Merge "msm: ipa: add support for new uC opcode"
diff --git a/Documentation/devicetree/bindings/display/msm/sde.txt b/Documentation/devicetree/bindings/display/msm/sde.txt
index 863a169..b13a10a 100644
--- a/Documentation/devicetree/bindings/display/msm/sde.txt
+++ b/Documentation/devicetree/bindings/display/msm/sde.txt
@@ -361,17 +361,23 @@
* Current values of src & dst are defined at
include/linux/msm-bus-board.h
+SMMU Subnodes:
+- smmu_sde_****: Child nodes representing sde smmu virtual
+ devices
+
Subnode properties:
-- compatible : Compatible name used in smmu v2.
- smmu_v2 names should be:
- "qcom,smmu-mdp-unsec" - smmu context bank device for
- unsecure mdp domain.
- "qcom,smmu-rot-unsec" - smmu context bank device for
- unsecure rotation domain.
- "qcom,smmu-mdp-sec" - smmu context bank device for
- secure mdp domain.
- "qcom,smmu-rot-sec" - smmu context bank device for
- secure rotation domain.
+- compatible: Compatible names used for smmu devices.
+ names should be:
+ "qcom,smmu_sde_unsec": smmu context bank device
+ for unsecure sde real time domain.
+ "qcom,smmu_sde_sec": smmu context bank device
+ for secure sde real time domain.
+ "qcom,smmu_sde_nrt_unsec": smmu context bank device
+ for unsecure sde non-real time domain.
+ "qcom,smmu_sde_nrt_sec": smmu context bank device
+ for secure sde non-real time domain.
+
+
Please refer to ../../interrupt-controller/interrupts.txt for a general
description of interrupt bindings.
@@ -673,4 +679,14 @@
<1 590 0 160000>,
<1 590 0 320000>;
};
+
+ smmu_kms_unsec: qcom,smmu_kms_unsec_cb {
+ compatible = "qcom,smmu_sde_unsec";
+ iommus = <&mmss_smmu 0>;
+ };
+
+ smmu_kms_sec: qcom,smmu_kms_sec_cb {
+ compatible = "qcom,smmu_sde_sec";
+ iommus = <&mmss_smmu 1>;
+ };
};
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-cci.txt b/Documentation/devicetree/bindings/media/video/msm-cam-cci.txt
index c9aaa00..54365b1 100644
--- a/Documentation/devicetree/bindings/media/video/msm-cam-cci.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-cci.txt
@@ -13,16 +13,16 @@
property defined.
- gpios : should contain phandle to gpio controller node and array of
#gpio-cells specifying specific gpio (controller specific)
-- qcom,gpio-req-tbl-num : should contain index to gpios specific to this sensor
-- qcom,gpio-req-tbl-flags : should contain direction of gpios present in
- qcom,gpio-req-tbl-num property (in the same order)
-- qcom,gpio-req-tbl-label : should contain name of gpios present in
- qcom,gpio-req-tbl-num property (in the same order)
+- gpio-req-tbl-num : should contain index to gpios specific to this sensor
+- gpio-req-tbl-flags : should contain direction of gpios present in
+ gpio-req-tbl-num property (in the same order)
+- gpio-req-tbl-label : should contain name of gpios present in
+ gpio-req-tbl-num property (in the same order)
- clock-names: name of the clocks required for the device
- clock-rates: clock rate in Hz
Optional properties:
-- qcom,cam-vreg-name : name of the voltage regulators required for the device.
+- regulator-names : name of the voltage regulators required for the device.
- gdscr-supply : should contain gdsr regulator used for cci clocks.
- mmagic-supply : should contain mmagic regulator used for mmagic clocks.
@@ -43,18 +43,16 @@
* Qualcomm Technologies, Inc. CCI clock settings
Optional properties:
-- qcom,hw-thigh : should contain high period of the SCL clock in terms of CCI
- clock cycle
-- qcom,hw-tlow : should contain high period of the SCL clock in terms of CCI
- clock cycle
-- qcom,hw-tsu-sto : should contain setup time for STOP condition
-- qcom,hw-tsu-sta : should contain setup time for Repeated START condition
-- qcom,hw-thd-dat : should contain hold time for the data
-- qcom,hw-thd-sta : should contain hold time for START condition
-- qcom,hw-tbuf : should contain free time between a STOP and a START condition
-- qcom,hw-scl-stretch-en : should contain enable or disable clock stretching
-- qcom,hw-trdhld : should contain internal hold time for SDA
-- qcom,hw-tsp : should contain filtering of glitches
+- hw-thigh : should contain high period of the SCL clock in terms of CCI clock cycle
+- hw-tlow : should contain high period of the SCL clock in terms of CCI clock cycle
+- hw-tsu-sto : should contain setup time for STOP condition
+- hw-tsu-sta : should contain setup time for Repeated START condition
+- hw-thd-dat : should contain hold time for the data
+- hw-thd-sta : should contain hold time for START condition
+- hw-tbuf : should contain free time between a STOP and a START condition
+- hw-scl-stretch-en : should contain enable or disable clock stretching
+- hw-trdhld : should contain internal hold time for SDA
+- hw-tsp : should contain filtering of glitches
* Qualcomm Technologies, Inc. MSM Sensor
@@ -64,7 +62,7 @@
- compatible : should be manufacturer name followed by sensor name
- "qcom,camera"
- reg : should contain i2c slave address of the device
-- qcom,csiphy-sd-index : should contain csiphy instance that will used to
+- csiphy-sd-index : should contain csiphy instance that will used to
receive sensor data
- 0, 1, 2
- cam_vdig-supply : should contain regulator from which digital voltage is
@@ -72,67 +70,69 @@
- cam_vana-supply : should contain regulator from which analog voltage is
supplied
- cam_vio-supply : should contain regulator from which IO voltage is supplied
-- qcom,cam-vreg-name : should contain names of all regulators needed by this
+- regulator-names : should contain names of all regulators needed by this
sensor
- "cam_vdig", "cam_vana", "cam_vio", "cam_vaf"
-- qcom,cam-vreg-min-voltage : should contain minimum voltage level for
- regulators mentioned in qcom,cam-vreg-name property (in the same order)
-- qcom,cam-vreg-max-voltage : should contain maximum voltage level for
- regulators mentioned in qcom,cam-vreg-name property (in the same order)
-- qcom,cam-vreg-op-mode : should contain optimum voltage level for regulators
- mentioned in qcom,cam-vreg-name property (in the same order)
-- qcom,sensor-position-roll : should contain sensor rotational angle with respect
+- rgltr-cntrl-support : It is booloean property. This property is required
+ if the code and regulator control parameters e.g. rgltr-min-voltage
+- rgltr-min-voltage : should contain minimum voltage level for
+ regulators mentioned in regulator-names property (in the same order)
+- rgltr-max-voltage : should contain maximum voltage level for
+ regulators mentioned in regulator-names property (in the same order)
+- rgltr-load-current : should contain optimum voltage level for regulators
+ mentioned in regulator-names property (in the same order)
+- sensor-position-roll : should contain sensor rotational angle with respect
to axis of reference
- 0, 90, 180, 360
-- qcom,sensor-position-pitch : should contain sensor rotational angle with respect
+- sensor-position-pitch : should contain sensor rotational angle with respect
to axis of reference
- 0, 90, 180, 360
-- qcom,sensor-position-yaw : should contain sensor rotational angle with respect
+- sensor-position-yaw : should contain sensor rotational angle with respect
to axis of reference
- 0, 90, 180, 360
Optional properties:
-- qcom,slave-id : should contain i2c slave address, device id address, expected
+- slave-id : should contain i2c slave address, device id address, expected
id read value and device id mask
-- qcom,sensor-name : should contain unique sensor name to differentiate from
+- sensor-name : should contain unique sensor name to differentiate from
other sensor
- "s5k3l1yx"
-- qcom,sensor-mode : should contain sensor mode supported
+- sensor-mode : should contain sensor mode supported
- 0 -> back camera 2D
- 1 -> front camera 2D
- 2 -> back camera 3D
- 3 -> back camera int 3D
-- qcom,sensor-type : should contain format of data that sensor streams
+- sensor-type : should contain format of data that sensor streams
- 0 -> bayer format
- 1 -> yuv format
- qcom,secure : should be enabled to operate the camera in secure mode
- 0, 1
-- qcom,gpio-no-mux : should contain field to indicate whether gpio mux table is
+- gpio-no-mux : should contain field to indicate whether gpio mux table is
available
- 1 if gpio mux is not available, 0 otherwise
- cam_vaf-supply : should contain regulator from which AF voltage is supplied
- gpios : should contain phandle to gpio controller node and array of
- #gpio-cells specifying specific gpio (controller specific)
-- qcom,gpio-reset : should contain index to gpio used by sensors reset_n
-- qcom,gpio-standby : should contain index to gpio used by sensors standby_n
-- qcom,gpio-vio : should contain index to gpio used by sensors io vreg enable
-- qcom,gpio-vana : should contain index to gpio used by sensors analog vreg enable
-- qcom,gpio-vdig : should contain index to gpio used by sensors digital vreg enable
-- qcom,gpio-vaf : should contain index to gpio used by sensors af vreg enable
-- qcom,gpio-af-pwdm : should contain index to gpio used by sensors af pwdm_n
-- qcom,gpio-req-tbl-num : should contain index to gpios specific to this sensor
-- qcom,gpio-req-tbl-flags : should contain direction of gpios present in
- qcom,gpio-req-tbl-num property (in the same order)
-- qcom,gpio-req-tbl-label : should contain name of gpios present in
- qcom,gpio-req-tbl-num property (in the same order)
-- qcom,gpio-set-tbl-num : should contain index of gpios that need to be
+ #gpio-cells specifying specific gpio (controller specific)
+- gpio-reset : should contain index to gpio used by sensors reset_n
+- gpio-standby : should contain index to gpio used by sensors standby_n
+- gpio-vio : should contain index to gpio used by sensors io vreg enable
+- gpio-vana : should contain index to gpio used by sensors analog vreg enable
+- gpio-vdig : should contain index to gpio used by sensors digital vreg enable
+- gpio-vaf : should contain index to gpio used by sensors af vreg enable
+- gpio-af-pwdm : should contain index to gpio used by sensors af pwdm_n
+- gpio-req-tbl-num : should contain index to gpios specific to this sensor
+- gpio-req-tbl-flags : should contain direction of gpios present in
+ gpio-req-tbl-num property (in the same order)
+- gpio-req-tbl-label : should contain name of gpios present in
+ gpio-req-tbl-num property (in the same order)
+- gpio-set-tbl-num : should contain index of gpios that need to be
configured by msm
-- qcom,gpio-set-tbl-flags : should contain value to be configured for the gpios
- present in qcom,gpio-set-tbl-num property (in the same order)
-- qcom,gpio-set-tbl-delay : should contain amount of delay after configuring
+- gpio-set-tbl-flags : should contain value to be configured for the gpios
+ present in gpio-set-tbl-num property (in the same order)
+- gpio-set-tbl-delay : should contain amount of delay after configuring
gpios as specified in gpio_set_tbl_flags property (in the same order)
-- qcom,csi-phy-sel : should contain CSIPHY core instance from which CSID should
+- csi-phy-sel : should contain CSIPHY core instance from which CSID should
receive data
-- qcom,actuator-cam-name : should contain actuator cam name associated with
+- actuator-cam-name : should contain actuator cam name associated with
this sensor
- If actuator does not exist, this property should not be initialized
- If actuator exist, this field should indicate the index of actuator to
@@ -141,39 +141,40 @@
for actuator
- qcom,actuator-vcm-enable : should contain value to be set for actuator vcm
gpio
-- qcom,sensor-position : should contain the mount angle of the camera sensor
+- sensor-position : should contain the mount angle of the camera sensor
- 0 -> back camera
- 1 -> front camera
-- qcom,cci-master : should contain i2c master id to be used for this camera
+- cci-master : should contain i2c master id to be used for this camera
sensor
- 0 -> MASTER 0
- 1 -> MASTER 1
-- qcom,actuator-src : if auto focus is supported by this sensor, this
+- actuator-src : if auto focus is supported by this sensor, this
property should contain phandle of respective actuator node
-- qcom,led-flash-src : if LED flash is supported by this sensor, this
+- led-flash-src : if LED flash is supported by this sensor, this
property should contain phandle of respective LED flash node
- qcom,vdd-cx-supply : should contain regulator from which cx voltage is
supplied
- qcom,vdd-cx-name : should contain names of cx regulator
-- qcom,eeprom-src : if eeprom memory is supported by this sensor, this
+- eeprom-src : if eeprom memory is supported by this sensor, this
property should contain phandle of respective eeprom nodes
-- qcom,ois-src : if optical image stabilization is supported by this sensor,
+- ois-src : if optical image stabilization is supported by this sensor,
this property should contain phandle of respective ois node
-- qcom,ir-led-src : if ir led is supported by this sensor, this property
+- ir-led-src : if ir led is supported by this sensor, this property
should contain phandle of respective ir-led node
- qcom,ir-cut-src : if ir cut is supported by this sensor, this property
should contain phandle of respective ir-cut node
- qcom,special-support-sensors: if only some special sensors are supported
on this board, add sensor name in this property.
-- qcom,clock-rates: clock rate in Hz.
-- qcom,clock-cntl-support: Says whether clock control support is present or not
-- qcom,clock-control: The valid fields are "NO_SET_RATE", "INIT_RATE" and
+- clock-rates: clock rate in Hz.
+- clock-cntl-level: says what all different cloc level node has.
+- clock-cntl-support: Says whether clock control support is present or not
+- clock-control: The valid fields are "NO_SET_RATE", "INIT_RATE" and
"SET_RATE". "NO_SET_RATE" the corresponding clock is enabled without setting
the rate assuming some other driver has already set it to appropriate rate.
"INIT_RATE" clock rate is not queried assuming some other driver has set
the clock rate and ispif will set the the clock to this rate.
"SET_RATE" clock is enabled and the rate is set to the value specified
- in the property qcom,clock-rates.
+ in the property clock-rates.
* Qualcomm Technologies, Inc. MSM ACTUATOR
@@ -184,20 +185,22 @@
data field which is 0x0
- compatible :
- "qcom,actuator"
-- qcom,cci-master : should contain i2c master id to be used for this camera
+- cci-master : should contain i2c master id to be used for this camera
sensor
- 0 -> MASTER 0
- 1 -> MASTER 1
Optional properties:
-- qcom,cam-vreg-name : should contain names of all regulators needed by this
+- regulator-names : should contain names of all regulators needed by this
actuator
- "cam_vaf"
-- qcom,cam-vreg-min-voltage : should contain minimum voltage level in mcrovolts
- for regulators mentioned in qcom,cam-vreg-name property (in the same order)
-- qcom,cam-vreg-max-voltage : should contain maximum voltage level in mcrovolts
- for regulators mentioned in qcom,cam-vreg-name property (in the same order)
-- qcom,cam-vreg-op-mode : should contain the maximum current in microamps
- required from the regulators mentioned in the qcom,cam-vreg-name property
+- rgltr-cntrl-support : It is booloean property. This property is required
+ if the code and regulator control parameters e.g. rgltr-min-voltage
+- rgltr-min-voltage : should contain minimum voltage level in mcrovolts
+ for regulators mentioned in regulator-names property (in the same order)
+- rgltr-max-voltage : should contain maximum voltage level in mcrovolts
+ for regulators mentioned in regulator-names property (in the same order)
+- rgltr-load-current : should contain the maximum current in microamps
+ required from the regulators mentioned in the regulator-names property
(in the same order).
- cam_vaf-supply : should contain regulator from which AF voltage is supplied
@@ -210,22 +213,24 @@
data field which is 0x0
- compatible :
- "qcom,ois"
-- qcom,cci-master : should contain i2c master id to be used for this camera
+- cci-master : should contain i2c master id to be used for this camera
sensor
- 0 -> MASTER 0
- 1 -> MASTER 1
-- qcom,clock-rates: clock rate in Hz.
+- clock-rates: clock rate in Hz.
Optional properties:
-- qcom,cam-vreg-name : should contain names of all regulators needed by this
+- regulator-names : should contain names of all regulators needed by this
ois
- "cam_vaf"
-- qcom,cam-vreg-min-voltage : should contain minimum voltage level in mcrovolts
- for regulators mentioned in qcom,cam-vreg-name property (in the same order)
-- qcom,cam-vreg-max-voltage : should contain maximum voltage level in mcrovolts
- for regulators mentioned in qcom,cam-vreg-name property (in the same order)
-- qcom,cam-vreg-op-mode : should contain the maximum current in microamps
- required from the regulators mentioned in the qcom,cam-vreg-name property
+- rgltr-cntrl-support : It is booloean property. This property is required
+ if the code and regulator control parameters e.g. rgltr-min-voltage
+- rgltr-min-voltage : should contain minimum voltage level in mcrovolts
+ for regulators mentioned in regulator-names property (in the same order)
+- rgltr-max-voltage : should contain maximum voltage level in mcrovolts
+ for regulators mentioned in regulator-names property (in the same order)
+- rgltr-load-current : should contain the maximum current in microamps
+ required from the regulators mentioned in the regulator-names property
(in the same order).
- cam_vaf-supply : should contain regulator from which ois voltage is supplied
@@ -234,9 +239,9 @@
led_flash0: qcom,camera-flash@0 {
cell-index = <0>;
compatible = "qcom,camera-flash";
- qcom,flash-source = <&pmi8994_flash0 &pmi8994_flash1>;
- qcom,torch-source = <&pmi8998_torch0 &pmi8998_torch1>;
- qcom,switch-source = <&pmi8998_switch>;
+ flash-source = <&pmi8994_flash0 &pmi8994_flash1>;
+ torch-source = <&pmi8998_torch0 &pmi8998_torch1>;
+ switch-source = <&pmi8998_switch>;
status = "ok";
}
@@ -248,70 +253,71 @@
interrupts = <0 50 0>;
interrupt-names = "cci";
clock-names = "camnoc_axi_clk", "soc_ahb_clk",
- "slow_ahb_src_clk", "cpas_ahb_clk",
- "cci_clk", "cci_clk_src";
- qcom,clock-rates = <0 0 80000000 0 0 37500000>;
+ "slow_ahb_src_clk", "cpas_ahb_clk",
+ "cci_clk", "cci_clk_src";
+ clock-rates = <0 0 80000000 0 0 37500000>;
+ clock-cntl-level = "turbo";
gpios = <&tlmm 17 0>,
<&tlmm 18 0>,
<&tlmm 19 0>,
<&tlmm 20 0>;
- qcom,gpio-tbl-num = <0 1 2 3>;
- qcom,gpio-tbl-flags = <1 1 1 1>;
- qcom,gpio-tbl-label = "CCI_I2C_DATA0",
+ gpio-tbl-num = <0 1 2 3>;
+ gpio-tbl-flags = <1 1 1 1>;
+ gpio-tbl-label = "CCI_I2C_DATA0",
"CCI_I2C_CLK0",
"CCI_I2C_DATA1",
"CCI_I2C_CLK1";
i2c_freq_100Khz: qcom,i2c_standard_mode {
- qcom,hw-thigh = <78>;
- qcom,hw-tlow = <114>;
- qcom,hw-tsu-sto = <28>;
- qcom,hw-tsu-sta = <28>;
- qcom,hw-thd-dat = <10>;
- qcom,hw-thd-sta = <77>;
- qcom,hw-tbuf = <118>;
- qcom,hw-scl-stretch-en = <0>;
- qcom,hw-trdhld = <6>;
- qcom,hw-tsp = <1>;
+ hw-thigh = <78>;
+ hw-tlow = <114>;
+ hw-tsu-sto = <28>;
+ hw-tsu-sta = <28>;
+ hw-thd-dat = <10>;
+ hw-thd-sta = <77>;
+ hw-tbuf = <118>;
+ hw-scl-stretch-en = <0>;
+ hw-trdhld = <6>;
+ hw-tsp = <1>;
status = "ok";
};
i2c_freq_400Khz: qcom,i2c_fast_mode {
- qcom,hw-thigh = <20>;
- qcom,hw-tlow = <28>;
- qcom,hw-tsu-sto = <21>;
- qcom,hw-tsu-sta = <21>;
- qcom,hw-thd-dat = <13>;
- qcom,hw-thd-sta = <18>;
- qcom,hw-tbuf = <25>;
- qcom,hw-scl-stretch-en = <0>;
- qcom,hw-trdhld = <6>;
- qcom,hw-tsp = <3>;
+ hw-thigh = <20>;
+ hw-tlow = <28>;
+ hw-tsu-sto = <21>;
+ hw-tsu-sta = <21>;
+ hw-thd-dat = <13>;
+ hw-thd-sta = <18>;
+ hw-tbuf = <25>;
+ hw-scl-stretch-en = <0>;
+ hw-trdhld = <6>;
+ hw-tsp = <3>;
status = "ok";
};
i2c_freq_custom: qcom,i2c_custom_mode {
- qcom,hw-thigh = <15>;
- qcom,hw-tlow = <28>;
- qcom,hw-tsu-sto = <21>;
- qcom,hw-tsu-sta = <21>;
- qcom,hw-thd-dat = <13>;
- qcom,hw-thd-sta = <18>;
- qcom,hw-tbuf = <25>;
- qcom,hw-scl-stretch-en = <1>;
- qcom,hw-trdhld = <6>;
- qcom,hw-tsp = <3>;
+ hw-thigh = <15>;
+ hw-tlow = <28>;
+ hw-tsu-sto = <21>;
+ hw-tsu-sta = <21>;
+ hw-thd-dat = <13>;
+ hw-thd-sta = <18>;
+ hw-tbuf = <25>;
+ hw-scl-stretch-en = <1>;
+ hw-trdhld = <6>;
+ hw-tsp = <3>;
status = "ok";
};
i2c_freq_1Mhz: qcom,i2c_fast_plus_mode {
- qcom,hw-thigh = <16>;
- qcom,hw-tlow = <22>;
- qcom,hw-tsu-sto = <17>;
- qcom,hw-tsu-sta = <18>;
- qcom,hw-thd-dat = <16>;
- qcom,hw-thd-sta = <15>;
- qcom,hw-tbuf = <19>;
- qcom,hw-scl-stretch-en = <1>;
- qcom,hw-trdhld = <3>;
- qcom,hw-tsp = <3>;
- qcom,cci-clk-src = <37500000>;
+ hw-thigh = <16>;
+ hw-tlow = <22>;
+ hw-tsu-sto = <17>;
+ hw-tsu-sta = <18>;
+ hw-thd-dat = <16>;
+ hw-thd-sta = <15>;
+ hw-tbuf = <19>;
+ hw-scl-stretch-en = <1>;
+ hw-trdhld = <3>;
+ hw-tsp = <3>;
+ cci-clk-src = <37500000>;
status = "ok";
};
@@ -319,34 +325,36 @@
cell-index = <0>;
reg = <0x0>;
compatible = "qcom,actuator";
- qcom,cci-master = <0>;
+ cci-master = <0>;
cam_vaf-supply = <&pmi8998_bob>;
- qcom,cam-vreg-name = "cam_vaf";
- qcom,cam-vreg-min-voltage = <2800000>;
- qcom,cam-vreg-max-voltage = <2800000>;
- qcom,cam-vreg-op-mode = <100000>;
+ regulator-names = "cam_vaf";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <2800000>;
+ rgltr-max-voltage = <2800000>;
+ rgltr-load-current = <100000>;
};
qcom,cam-sensor@0 {
cell-index = <0>;
compatible = "qcom,camera";
reg = <0x0>;
- qcom,csiphy-sd-index = <0>;
- qcom,sensor-position-roll = <90>;
- qcom,sensor-position-pitch = <0>;
- qcom,sensor-position-yaw = <180>;
- qcom,secure = <1>;
- qcom,led-flash-src = <&led_flash0>;
- qcom,actuator-src = <&actuator0>;
- qcom,eeprom-src = <&eeprom0>;
+ csiphy-sd-index = <0>;
+ sensor-position-roll = <90>;
+ sensor-position-pitch = <0>;
+ sensor-position-yaw = <180>;
+ secure = <1>;
+ led-flash-src = <&led_flash0>;
+ actuator-src = <&actuator0>;
+ eeprom-src = <&eeprom0>;
cam_vdig-supply = <&pm845_s3>;
cam_vio-supply = <&pm845_lvs1>;
cam_vana-supply = <&pmi8998_bob>;
- qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana";
- qcom,cam-vreg-min-voltage = <0 3312000 1352000>;
- qcom,cam-vreg-max-voltage = <0 3312000 1352000>;
- qcom,cam-vreg-op-mode = <0 80000 105000>;
- qcom,gpio-no-mux = <0>;
+ regulator-names = "cam_vdig", "cam_vio", "cam_vana";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <0 3312000 1352000>;
+ rgltr-max-voltage = <0 3312000 1352000>;
+ rgltr-load-current = <0 80000 105000>;
+ gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cam_sensor_mclk0_active
&cam_sensor_rear_active>;
@@ -355,19 +363,20 @@
gpios = <&tlmm 13 0>,
<&tlmm 80 0>,
<&tlmm 79 0>;
- qcom,gpio-reset = <1>;
- qcom,gpio-standby = <2>;
- qcom,gpio-req-tbl-num = <0 1 2>;
- qcom,gpio-req-tbl-flags = <1 0 0>;
- qcom,gpio-req-tbl-label = "CAMIF_MCLK0",
+ gpio-reset = <1>;
+ gpio-standby = <2>;
+ gpio-req-tbl-num = <0 1 2>;
+ gpio-req-tbl-flags = <1 0 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK0",
"CAM_RESET0",
"CAM_VANA";
- qcom,sensor-position = <0>;
- qcom,sensor-mode = <0>;
- qcom,cci-master = <0>;
+ sensor-position = <0>;
+ sensor-mode = <0>;
+ cci-master = <0>;
status = "ok";
clocks = <&clock_mmss clk_mclk0_clk_src>,
<&clock_mmss clk_camss_mclk0_clk>;
clock-names = "cam_src_clk", "cam_clk";
+ clock-cntl-level;
};
};
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-cdm.txt b/Documentation/devicetree/bindings/media/video/msm-cam-cdm.txt
index d62910a..3dc661f 100644
--- a/Documentation/devicetree/bindings/media/video/msm-cam-cdm.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-cdm.txt
@@ -118,6 +118,12 @@
Value type: <string>
Definition: List of Clients supported by CDM HW node.
+- clock-cntl-level
+ Usage: required
+ Value type: <string>
+ Definition: List of strings corresponds clock-rates levels.
+ Supported strings: minsvs, lowsvs, svs, svs_l1, nominal, turbo.
+
Example:
qcom,cpas-cdm0@ac48000 {
cell-index = <0>;
@@ -143,5 +149,6 @@
<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>;
qcom,clock-rates = <0 80000000 80000000 80000000 80000000 80000000>;
cdm-client-names = "ife";
+ clock-cntl-level = "turbo";
status = "ok";
};
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-cpas.txt b/Documentation/devicetree/bindings/media/video/msm-cam-cpas.txt
index 62a51cf..99f3ba2 100644
--- a/Documentation/devicetree/bindings/media/video/msm-cam-cpas.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-cpas.txt
@@ -97,6 +97,12 @@
Value type: <u32>
Definition: List of clocks rates.
+- clock-cntl-level
+ Usage: required
+ Value type: <string>
+ Definition: List of strings corresponds clock-rates levels.
+ Supported strings: minsvs, lowsvs, svs, svs_l1, nominal, turbo.
+
- qcom,msm-bus,name
- qcom,msm-bus,num-cases
- qcom,msm-bus,num-paths
@@ -191,6 +197,7 @@
<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>;
src-clock-name = "slow_ahb_clk_src";
clock-rates = <0 0 0 0 80000000 0>;
+ clock-cntl-level = "turbo";
qcom,msm-bus,name = "cam_ahb";
qcom,msm-bus,num-cases = <4>;
qcom,msm-bus,num-paths = <1>;
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-csiphy.txt b/Documentation/devicetree/bindings/media/video/msm-cam-csiphy.txt
index e8a74b3..dd8668c 100644
--- a/Documentation/devicetree/bindings/media/video/msm-cam-csiphy.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-csiphy.txt
@@ -6,13 +6,15 @@
- "qcom,csiphy-v5.01"
- reg : offset and length of the register set for the device
for the csiphy operating in compatible mode.
+reg-cam-base : offset of ceiphy in camera hw block
- reg-names : should specify relevant names to each reg property defined.
- interrupts : should contain the csiphy interrupt.
- interrupt-names : should specify relevant names to each interrupts
property defined.
- clock-names: name of the clocks required for the device
-- qcom,clock-rates: clock rate in Hz
+- clock-rates: clock rate in Hz
- 0 if appropriate clock is required but doesn't have to apply the rate
+- clock-cntl-level: says what all different cloc level node has.
Example:
@@ -20,6 +22,7 @@
cell-index = <0>;
compatible = "qcom,csiphy-v1.0", "qcom,csiphy";
reg = <0xac65000 0x200>;
+ reg-cam-base = <0x65000>;
reg-names = "csiphy";
interrupts = <0 477 0>;
interrupt-names = "csiphy";
@@ -28,7 +31,9 @@
"cphy_rx_clk_src", "csiphy0_clk",
"csi0phytimer_clk_src", "csi0phytimer_clk",
"ife_0_csid_clk", "ife_0_csid_clk_src";
- qcom,clock-rates =
+ clock-rates =
<0 0 80000000 0 320000000 0 269333333 0 0 384000000>;
+ clock-cntl-level = "turbo";
+ regulator-names = "gdscr";
status = "ok";
};
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-icp.txt b/Documentation/devicetree/bindings/media/video/msm-cam-icp.txt
index c560a05..28a0920 100644
--- a/Documentation/devicetree/bindings/media/video/msm-cam-icp.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-icp.txt
@@ -111,6 +111,12 @@
Value type: <phandle>
Definition: List of clocks used for CDM HW.
+- clock-cntl-level
+ Usage: required
+ Value type: <string>
+ Definition: List of strings corresponds clock-rates levels.
+ Supported strings: minsvs, lowsvs, svs, svs_l1, nominal, turbo.
+
- clock-rates
Usage: required
Value type: <u32>
@@ -157,6 +163,7 @@
<&clock_camcc CAM_CC_ICP_TS_CLK>;
clock-rates = <0 0 0 80000000 0 0 0 0 600000000 0 0>;
+ clock-cntl-level = "turbo";
fw_name = "CAMERA_ICP.elf";
};
@@ -177,6 +184,7 @@
<&clock_camcc CAM_CC_IPE_0_CLK_SRC>;
clock-rates = <80000000 400000000 0 0 600000000>;
+ clock-cntl-level = "turbo";
};
qcom,ipe1 {
@@ -195,7 +203,8 @@
<&clock_camcc CAM_CC_IPE_1_CLK>,
<&clock_camcc CAM_CC_IPE_1_CLK_SRC>;
- clock-rates = <80000000 400000000 0 0 600000000>;
+ clock-rates = <80000000 400000000 0 0 600000000>;
+ clock-cntl-level = "turbo";
};
bps: qcom,bps {
@@ -215,5 +224,6 @@
<&clock_camcc CAM_CC_BPS_CLK_SRC>;
clock-rates = <80000000 400000000 0 0 600000000>;
+ clock-cntl-level = "turbo";
};
diff --git a/Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.txt b/Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.txt
index 6ac06c1..5b6bd97 100644
--- a/Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.txt
+++ b/Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.txt
@@ -42,6 +42,8 @@
see:
Documentation/devicetree/bindings/spmi/qcom,spmi-pmic-arb.txt
- interrupt-names: Corresponding interrupt name to the interrupts property
+- qcom,can-sleep: Boolean flag indicating that processes waiting on SPMI
+ transactions may sleep
Each child node of SPMI slave id represents a function of the PMIC. In the
example below the rtc device node represents a peripheral of pm8941
diff --git a/Documentation/devicetree/bindings/spmi/qcom,spmi-pmic-arb-debug.txt b/Documentation/devicetree/bindings/spmi/qcom,spmi-pmic-arb-debug.txt
index ceac719..2131c33 100644
--- a/Documentation/devicetree/bindings/spmi/qcom,spmi-pmic-arb-debug.txt
+++ b/Documentation/devicetree/bindings/spmi/qcom,spmi-pmic-arb-debug.txt
@@ -35,6 +35,19 @@
the corresponding addresses are specified in the reg
property.
+- clocks
+ Usage: optional
+ Value type: <prop-encoded-array>
+ Definition: Clock tuple consisting of a phandle to a clock controller
+ device and the clock ID number for the SPMI debug controller
+ clock.
+
+- clock-names
+ Usage: required if clocks property is specified
+ Value type: <string>
+ Definition: Defines the name of the clock defined in the "clocks"
+ property. This must be "core_clk".
+
- #address-cells
Usage: required
Value type: <u32>
@@ -57,6 +70,8 @@
compatible = "qcom,spmi-pmic-arb-debug";
reg = <0x6b22000 0x60>, <0x7820A8 4>;
reg-names = "core", "fuse";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "core_clk";
qcom,fuse-disable-bit = <12>;
#address-cells = <2>;
#size-cells = <0>;
diff --git a/Documentation/devicetree/bindings/thermal/qti-qmi-cdev.txt b/Documentation/devicetree/bindings/thermal/qti-qmi-cdev.txt
new file mode 100644
index 0000000..51c5eac
--- /dev/null
+++ b/Documentation/devicetree/bindings/thermal/qti-qmi-cdev.txt
@@ -0,0 +1,113 @@
+QMI thermal mitigation(TMD) cooling devices.
+
+The QMI TMD cooling device, will be used for various mitigations for remote
+subsystem including remote processor mitigation, rail voltage restriction etc.
+This cooling device uses kernel qti QMI interface to send the message to
+remote subsystem.
+
+Each child node of the QMI TMD devicetree node represents each remote
+subsystem and each child of this subsystem represents separate cooling
+devices. It requires minimum one remote subsystem node and each subsystem
+node requires minimum one cooling device node.
+
+Properties:
+
+- compatible:
+ Usage: required
+ Value type: <string>
+ Definition: should be "qcom,qmi_cooling_devices"
+
+
+Subsystem properties:
+- qcom,instance-id:
+ Usage: required
+ Value type: <integer>
+ Definition: Remote subsystem QMI server instance id to be used for
+ communicating with QMI.
+
+ Minimum one child node is required. Child node name and its alias are
+ used as cooling device name and phandle for that cooling device.
+
+ cooling device node properties:
+ -qcom,qmi-dev-name:
+ Usage: required
+ Value type: <string>
+ Definition: Remote subsystem device identifier. Below strings
+ are the only acceptable device names,
+ "pa" -> for pa cooling device,
+ "cpuv_restriction_cold" -> for vdd restriction,
+ "cx_vdd_limit" -> for vdd limit,
+ "modem" -> for processor passive cooling device,
+ "modem_current" -> for current limiting device,
+ "modem_bw" -> for bus bandwidth limiting device,
+ "cpr_cold" -> for cpr restriction.
+
+ -#cooling-cells:
+ Usage: required
+ Value type: <integer>
+ Definition: Must be 2. Needed for of_thermal as cooling device
+ identifier. Please refer to
+ <devicetree/bindings/thermal/thermal.txt> for more
+ details.
+Example:
+
+ qmi-tmd-devices {
+ compatible = "qcom,qmi_cooling_devices";
+
+ modem {
+ qcom,instance-id = <0x0>;
+
+ modem_pa: modem_pa {
+ qcom,qmi-dev-name = "pa";
+ #cooling-cells = <2>;
+ };
+
+ modem_proc: modem_proc {
+ qcom,qmi-dev-name = "modem";
+ #cooling-cells = <2>;
+ };
+
+ modem_vdd: modem_vdd {
+ qcom,qmi-dev-name = "cpuv_restriction_cold";
+ #cooling-cells = <2>;
+ };
+
+ modem_current: modem_current {
+ qcom,qmi-dev-name = "modem_current";
+ #cooling-cells = <2>;
+ };
+
+ modem_cpr_cold: modem_cpr_cold {
+ qcom,qmi-dev-name = "cpr_cold";
+ #cooling-cells = <2>;
+ };
+ };
+
+ adsp {
+ qcom,instance-id = <0x1>;
+
+ adsp_vdd: adsp_vdd {
+ qcom,qmi-dev-name = "cpuv_restriction_cold";
+ #cooling-cells = <2>;
+ };
+ };
+
+ cdsp {
+ qcom,instance-id = <0x43>;
+
+ cdsp_vdd: cdsp_vdd {
+ qcom,qmi-dev-name = "cpuv_restriction_cold";
+ #cooling-cells = <2>;
+ };
+ };
+
+ slpi {
+ qcom,instance-id = <0x53>;
+
+ slpi_vdd: slpi_vdd {
+ qcom,qmi-dev-name = "cpuv_restriction_cold";
+ #cooling-cells = <2>;
+ };
+ };
+ };
+
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 8527965..2b576cc 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -3948,6 +3948,13 @@
spia_pedr=
spia_peddr=
+ stack_guard_gap= [MM]
+ override the default stack gap protection. The value
+ is in page units and it defines how many pages prior
+ to (for stacks growing down) resp. after (for stacks
+ growing up) the main stack are reserved for no other
+ mapping. Default value is 256 pages.
+
stacktrace [FTRACE]
Enabled the stack tracer on boot up.
diff --git a/Makefile b/Makefile
index 9e428c5..aebb186 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 9
-SUBLEVEL = 32
+SUBLEVEL = 35
EXTRAVERSION =
NAME = Roaring Lionus
@@ -655,6 +655,12 @@
# Tell gcc to never replace conditional load with a non-conditional one
KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
+# check for 'asm goto'
+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y)
+ KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
+ KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
+endif
+
include scripts/Makefile.gcc-plugins
ifdef CONFIG_READABLE_ASM
@@ -800,12 +806,6 @@
# use the deterministic mode of AR if available
KBUILD_ARFLAGS := $(call ar-option,D)
-# check for 'asm goto'
-ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC)), y)
- KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
- KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
-endif
-
include scripts/Makefile.kasan
include scripts/Makefile.extrawarn
include scripts/Makefile.ubsan
diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S
index 689dd86..8b90d25 100644
--- a/arch/arc/kernel/head.S
+++ b/arch/arc/kernel/head.S
@@ -71,14 +71,14 @@
GET_CPU_ID r5
cmp r5, 0
mov.nz r0, r5
-#ifdef CONFIG_ARC_SMP_HALT_ON_RESET
- ; Non-Master can proceed as system would be booted sufficiently
- jnz first_lines_of_secondary
-#else
+ bz .Lmaster_proceed
+
; Non-Masters wait for Master to boot enough and bring them up
- jnz arc_platform_smp_wait_to_boot
-#endif
- ; Master falls thru
+ ; when they resume, tail-call to entry point
+ mov blink, @first_lines_of_secondary
+ j arc_platform_smp_wait_to_boot
+
+.Lmaster_proceed:
#endif
; Clear BSS before updating any globals
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
index 88674d9..2afbafa 100644
--- a/arch/arc/kernel/smp.c
+++ b/arch/arc/kernel/smp.c
@@ -90,22 +90,37 @@ void __init smp_cpus_done(unsigned int max_cpus)
*/
static volatile int wake_flag;
+#ifdef CONFIG_ISA_ARCOMPACT
+
+#define __boot_read(f) f
+#define __boot_write(f, v) f = v
+
+#else
+
+#define __boot_read(f) arc_read_uncached_32(&f)
+#define __boot_write(f, v) arc_write_uncached_32(&f, v)
+
+#endif
+
static void arc_default_smp_cpu_kick(int cpu, unsigned long pc)
{
BUG_ON(cpu == 0);
- wake_flag = cpu;
+
+ __boot_write(wake_flag, cpu);
}
void arc_platform_smp_wait_to_boot(int cpu)
{
- while (wake_flag != cpu)
+ /* for halt-on-reset, we've waited already */
+ if (IS_ENABLED(CONFIG_ARC_SMP_HALT_ON_RESET))
+ return;
+
+ while (__boot_read(wake_flag) != cpu)
;
- wake_flag = 0;
- __asm__ __volatile__("j @first_lines_of_secondary \n");
+ __boot_write(wake_flag, 0);
}
-
const char *arc_platform_smp_cpuinfo(void)
{
return plat_smp_ops.info ? : "";
diff --git a/arch/arc/mm/mmap.c b/arch/arc/mm/mmap.c
index 2e06d56..cf4ae69 100644
--- a/arch/arc/mm/mmap.c
+++ b/arch/arc/mm/mmap.c
@@ -64,7 +64,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
diff --git a/arch/arm/configs/ezx_defconfig b/arch/arm/configs/ezx_defconfig
index ea316c4..d3f1768 100644
--- a/arch/arm/configs/ezx_defconfig
+++ b/arch/arm/configs/ezx_defconfig
@@ -64,8 +64,8 @@
CONFIG_NETFILTER_NETLINK_QUEUE=m
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_EVENTS=y
-CONFIG_NF_CT_PROTO_SCTP=m
-CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_FTP=m
CONFIG_NF_CONNTRACK_H323=m
diff --git a/arch/arm/configs/imote2_defconfig b/arch/arm/configs/imote2_defconfig
index 18e59fe..7f479cd 100644
--- a/arch/arm/configs/imote2_defconfig
+++ b/arch/arm/configs/imote2_defconfig
@@ -56,8 +56,8 @@
CONFIG_NETFILTER_NETLINK_QUEUE=m
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_EVENTS=y
-CONFIG_NF_CT_PROTO_SCTP=m
-CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_FTP=m
CONFIG_NF_CONNTRACK_H323=m
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index 66353ca..641334e 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -89,7 +89,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
@@ -140,7 +140,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
diff --git a/arch/arm64/boot/dts/qcom/sdm670-pm.dtsi b/arch/arm64/boot/dts/qcom/sdm670-pm.dtsi
new file mode 100644
index 0000000..8501d80
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm670-pm.dtsi
@@ -0,0 +1,198 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+ qcom,lpm-levels {
+ compatible = "qcom,lpm-levels";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,pm-cluster@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ label = "L3";
+ qcom,psci-mode-shift = <4>;
+ qcom,psci-mode-mask = <0xfff>;
+
+ qcom,pm-cluster-level@0 { /* D1 */
+ reg = <0>;
+ label = "l3-wfi";
+ qcom,psci-mode = <0x1>;
+ qcom,latency-us = <51>;
+ qcom,ss-power = <452>;
+ qcom,energy-overhead = <69355>;
+ qcom,time-overhead = <99>;
+ };
+
+ qcom,pm-cluster-level@1 { /* D2 */
+ reg = <1>;
+ label = "l3-dyn-ret";
+ qcom,psci-mode = <0x2>;
+ qcom,latency-us = <659>;
+ qcom,ss-power = <434>;
+ qcom,energy-overhead = <465725>;
+ qcom,time-overhead = <976>;
+ qcom,min-child-idx = <1>;
+ };
+
+ qcom,pm-cluster-level@2 { /* D4, D3 is not supported */
+ reg = <2>;
+ label = "l3-pc";
+ qcom,psci-mode = <0x4>;
+ qcom,latency-us = <3201>;
+ qcom,ss-power = <408>;
+ qcom,energy-overhead = <2421840>;
+ qcom,time-overhead = <5376>;
+ qcom,min-child-idx = <2>;
+ qcom,is-reset;
+ };
+
+ qcom,pm-cluster-level@3 { /* Cx off */
+ reg = <3>;
+ label = "cx-off";
+ qcom,psci-mode = <0x224>;
+ qcom,latency-us = <5562>;
+ qcom,ss-power = <308>;
+ qcom,energy-overhead = <2521840>;
+ qcom,time-overhead = <6376>;
+ qcom,min-child-idx = <3>;
+ qcom,is-reset;
+ qcom,notify-rpm;
+ };
+
+ qcom,pm-cluster-level@4 { /* AOSS sleep */
+ reg = <4>;
+ label = "llcc-off";
+ qcom,psci-mode = <0xC24>;
+ qcom,latency-us = <6562>;
+ qcom,ss-power = <108>;
+ qcom,energy-overhead = <2621840>;
+ qcom,time-overhead = <7376>;
+ qcom,min-child-idx = <3>;
+ qcom,is-reset;
+ qcom,notify-rpm;
+ };
+
+ qcom,pm-cpu@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ qcom,psci-mode-shift = <0>;
+ qcom,psci-mode-mask = <0xf>;
+ qcom,cpu = <&CPU0 &CPU1 &CPU2 &CPU3 &CPU4
+ &CPU5>;
+
+ qcom,pm-cpu-level@0 { /* C1 */
+ reg = <0>;
+ qcom,spm-cpu-mode = "wfi";
+ qcom,psci-cpu-mode = <0x1>;
+ qcom,latency-us = <43>;
+ qcom,ss-power = <454>;
+ qcom,energy-overhead = <38639>;
+ qcom,time-overhead = <83>;
+ };
+
+ qcom,pm-cpu-level@1 { /* C2D */
+ reg = <1>;
+ qcom,spm-cpu-mode = "ret";
+ qcom,psci-cpu-mode = <0x2>;
+ qcom,latency-us = <119>;
+ qcom,ss-power = <449>;
+ qcom,energy-overhead = <78456>;
+ qcom,time-overhead = <167>;
+ };
+
+ qcom,pm-cpu-level@2 { /* C3 */
+ reg = <2>;
+ qcom,spm-cpu-mode = "pc";
+ qcom,psci-cpu-mode = <0x3>;
+ qcom,latency-us = <461>;
+ qcom,ss-power = <436>;
+ qcom,energy-overhead = <418225>;
+ qcom,time-overhead = <885>;
+ qcom,is-reset;
+ qcom,use-broadcast-timer;
+ };
+
+ qcom,pm-cpu-level@3 { /* C4 */
+ reg = <3>;
+ qcom,spm-cpu-mode = "rail-pc";
+ qcom,psci-cpu-mode = <0x4>;
+ qcom,latency-us = <531>;
+ qcom,ss-power = <400>;
+ qcom,energy-overhead = <428225>;
+ qcom,time-overhead = <1000>;
+ qcom,is-reset;
+ qcom,use-broadcast-timer;
+ };
+ };
+
+ qcom,pm-cpu@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ qcom,psci-mode-shift = <0>;
+ qcom,psci-mode-mask = <0xf>;
+ qcom,cpu = <&CPU6 &CPU7>;
+
+ qcom,pm-cpu-level@0 { /* C1 */
+ reg = <0>;
+ qcom,spm-cpu-mode = "wfi";
+ qcom,psci-cpu-mode = <0x1>;
+ qcom,latency-us = <43>;
+ qcom,ss-power = <454>;
+ qcom,energy-overhead = <38639>;
+ qcom,time-overhead = <83>;
+ };
+
+ qcom,pm-cpu-level@1 { /* C2D */
+ reg = <1>;
+ qcom,spm-cpu-mode = "ret";
+ qcom,psci-cpu-mode = <0x2>;
+ qcom,latency-us = <116>;
+ qcom,ss-power = <449>;
+ qcom,energy-overhead = <78456>;
+ qcom,time-overhead = <167>;
+ };
+
+ qcom,pm-cpu-level@2 { /* C3 */
+ reg = <2>;
+ qcom,spm-cpu-mode = "pc";
+ qcom,psci-cpu-mode = <0x3>;
+ qcom,latency-us = <621>;
+ qcom,ss-power = <436>;
+ qcom,energy-overhead = <418225>;
+ qcom,time-overhead = <885>;
+ qcom,is-reset;
+ qcom,use-broadcast-timer;
+ };
+
+ qcom,pm-cpu-level@3 { /* C4 */
+ reg = <3>;
+ qcom,spm-cpu-mode = "rail-pc";
+ qcom,psci-cpu-mode = <0x4>;
+ qcom,latency-us = <1061>;
+ qcom,ss-power = <400>;
+ qcom,energy-overhead = <428225>;
+ qcom,time-overhead = <1000>;
+ qcom,is-reset;
+ qcom,use-broadcast-timer;
+ };
+ };
+ };
+ };
+
+ qcom,rpm-stats@c300000 {
+ compatible = "qcom,rpm-stats";
+ reg = <0xc300000 0x1000>, <0xc3f0004 0x4>;
+ reg-names = "phys_addr_base", "offset_addr";
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670.dtsi b/arch/arm64/boot/dts/qcom/sdm670.dtsi
index b7df320..d663bcd 100644
--- a/arch/arm64/boot/dts/qcom/sdm670.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670.dtsi
@@ -19,6 +19,7 @@
#include <dt-bindings/clock/qcom,videocc-sdm845.h>
#include <dt-bindings/clock/qcom,cpucc-sdm845.h>
#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/soc/qcom,tcs-mbox.h>
#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
/ {
@@ -990,6 +991,29 @@
};
};
+ cmd_db: qcom,cmd-db@c3f000c {
+ compatible = "qcom,cmd-db";
+ reg = <0xc3f000c 0x8>;
+ };
+
+ apps_rsc: mailbox@179e0000 {
+ compatible = "qcom,tcs-drv";
+ label = "apps_rsc";
+ reg = <0x179e0000 0x100>, <0x179e0d00 0x3000>;
+ interrupts = <0 5 0>;
+ #mbox-cells = <1>;
+ qcom,drv-id = <2>;
+ qcom,tcs-config = <ACTIVE_TCS 2>,
+ <SLEEP_TCS 3>,
+ <WAKE_TCS 3>,
+ <CONTROL_TCS 1>;
+ };
+
+ system_pm {
+ compatible = "qcom,system-pm";
+ mboxes = <&apps_rsc 0>;
+ };
+
dcc: dcc_v2@10a2000 {
compatible = "qcom,dcc_v2";
reg = <0x10a2000 0x1000>,
@@ -1113,6 +1137,7 @@
#include "sdm670-pinctrl.dtsi"
#include "msm-arm-smmu-sdm670.dtsi"
#include "msm-gdsc-sdm845.dtsi"
+#include "sdm670-pm.dtsi"
&usb30_prim_gdsc {
status = "ok";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-cdp.dtsi
index a715025..5db4c35 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-cdp.dtsi
@@ -16,9 +16,9 @@
cell-index = <0>;
reg = <0x00 0x00>;
compatible = "qcom,camera-flash";
- qcom,flash-source = <&pmi8998_flash0 &pmi8998_flash1>;
- qcom,torch-source = <&pmi8998_torch0 &pmi8998_torch1>;
- qcom,switch-source = <&pmi8998_switch0>;
+ flash-source = <&pmi8998_flash0 &pmi8998_flash1>;
+ torch-source = <&pmi8998_torch0 &pmi8998_torch1>;
+ switch-source = <&pmi8998_switch0>;
status = "ok";
};
@@ -26,9 +26,9 @@
cell-index = <1>;
reg = <0x01 0x00>;
compatible = "qcom,camera-flash";
- qcom,flash-source = <&pmi8998_flash2>;
- qcom,torch-source = <&pmi8998_torch2>;
- qcom,switch-source = <&pmi8998_switch1>;
+ flash-source = <&pmi8998_flash2>;
+ torch-source = <&pmi8998_torch2>;
+ switch-source = <&pmi8998_switch1>;
status = "ok";
};
@@ -78,36 +78,39 @@
cell-index = <0>;
reg = <0x0>;
compatible = "qcom,actuator";
- qcom,cci-master = <0>;
+ cci-master = <0>;
cam_vaf-supply = <&actuator_regulator>;
- qcom,cam-vreg-name = "cam_vaf";
- qcom,cam-vreg-min-voltage = <2800000>;
- qcom,cam-vreg-max-voltage = <2800000>;
- qcom,cam-vreg-op-mode = <0>;
+ regulator-names = "cam_vaf";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <2800000>;
+ rgltr-max-voltage = <2800000>;
+ rgltr-load-current = <0>;
};
actuator_front: qcom,actuator@1 {
cell-index = <1>;
reg = <0x1>;
compatible = "qcom,actuator";
- qcom,cci-master = <1>;
+ cci-master = <1>;
cam_vaf-supply = <&actuator_regulator>;
- qcom,cam-vreg-name = "cam_vaf";
- qcom,cam-vreg-min-voltage = <2800000>;
- qcom,cam-vreg-max-voltage = <2800000>;
- qcom,cam-vreg-op-mode = <0>;
+ regulator-names = "cam_vaf";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <2800000>;
+ rgltr-max-voltage = <2800000>;
+ rgltr-load-current = <0>;
};
ois_rear: qcom,ois@0 {
cell-index = <0>;
reg = <0x0>;
compatible = "qcom,ois";
- qcom,cci-master = <0>;
+ cci-master = <0>;
cam_vaf-supply = <&actuator_regulator>;
- qcom,cam-vreg-name = "cam_vaf";
- qcom,cam-vreg-min-voltage = <2800000>;
- qcom,cam-vreg-max-voltage = <2800000>;
- qcom,cam-vreg-op-mode = <0>;
+ regulator-names = "cam_vaf";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <2800000>;
+ rgltr-max-voltage = <2800000>;
+ rgltr-load-current = <0>;
status = "disabled";
};
@@ -119,12 +122,13 @@
cam_vana-supply = <&pmi8998_bob>;
cam_vdig-supply = <&camera_rear_ldo>;
cam_clk-supply = <&titan_top_gdsc>;
- qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig",
+ regulator-names = "cam_vio", "cam_vana", "cam_vdig",
"cam_clk";
- qcom,cam-vreg-min-voltage = <0 3312000 1050000 0>;
- qcom,cam-vreg-max-voltage = <0 3600000 1050000 0>;
- qcom,cam-vreg-op-mode = <0 80000 105000 0>;
- qcom,gpio-no-mux = <0>;
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <0 3312000 1050000 0>;
+ rgltr-max-voltage = <0 3600000 1050000 0>;
+ rgltr-load-current = <0 80000 105000 0>;
+ gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cam_sensor_mclk0_active
&cam_sensor_rear_active>;
@@ -134,22 +138,22 @@
<&tlmm 80 0>,
<&tlmm 79 0>,
<&tlmm 27 0>;
- qcom,gpio-reset = <1>;
- qcom,gpio-vana = <2>;
- qcom,gpio-vaf = <3>;
- qcom,gpio-req-tbl-num = <0 1 2 3>;
- qcom,gpio-req-tbl-flags = <1 0 0 0>;
- qcom,gpio-req-tbl-label = "CAMIF_MCLK0",
+ gpio-reset = <1>;
+ gpio-vana = <2>;
+ gpio-vaf = <3>;
+ gpio-req-tbl-num = <0 1 2 3>;
+ gpio-req-tbl-flags = <1 0 0 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK0",
"CAM_RESET0",
"CAM_VANA0",
"CAM_VAF";
- qcom,sensor-position = <0>;
- qcom,sensor-mode = <0>;
- qcom,cci-master = <0>;
+ sensor-position = <0>;
+ sensor-mode = <0>;
+ cci-master = <0>;
status = "ok";
clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
clock-names = "cam_clk";
- qcom,clock-rates = <24000000>;
+ clock-rates = <24000000>;
};
eeprom_rear_aux: qcom,eeprom@1 {
@@ -160,12 +164,13 @@
cam_vio-supply = <&pm8998_lvs1>;
cam_vana-supply = <&pmi8998_bob>;
cam_clk-supply = <&titan_top_gdsc>;
- qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana",
+ regulator-names = "cam_vdig", "cam_vio", "cam_vana",
"cam_clk";
- qcom,cam-vreg-min-voltage = <1050000 0 3312000 0>;
- qcom,cam-vreg-max-voltage = <1050000 0 3600000 0>;
- qcom,cam-vreg-op-mode = <105000 0 80000 0>;
- qcom,gpio-no-mux = <0>;
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <1050000 0 3312000 0>;
+ rgltr-max-voltage = <1050000 0 3600000 0>;
+ rgltr-load-current = <105000 0 80000 0>;
+ gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cam_sensor_mclk2_active
&cam_sensor_rear2_active>;
@@ -174,20 +179,20 @@
gpios = <&tlmm 15 0>,
<&tlmm 9 0>,
<&tlmm 8 0>;
- qcom,gpio-reset = <1>;
- qcom,gpio-vana = <2>;
- qcom,gpio-req-tbl-num = <0 1 2>;
- qcom,gpio-req-tbl-flags = <1 0 0>;
- qcom,gpio-req-tbl-label = "CAMIF_MCLK1",
+ gpio-reset = <1>;
+ gpio-vana = <2>;
+ gpio-req-tbl-num = <0 1 2>;
+ gpio-req-tbl-flags = <1 0 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK1",
"CAM_RESET1",
"CAM_VANA1";
- qcom,sensor-position = <0>;
- qcom,sensor-mode = <0>;
- qcom,cci-master = <1>;
+ sensor-position = <0>;
+ sensor-mode = <0>;
+ cci-master = <1>;
status = "ok";
clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
clock-names = "cam_clk";
- qcom,clock-rates = <24000000>;
+ clock-rates = <24000000>;
};
eeprom_front: qcom,eeprom@2 {
@@ -198,12 +203,13 @@
cam_vana-supply = <&pmi8998_bob>;
cam_vdig-supply = <&camera_ldo>;
cam_clk-supply = <&titan_top_gdsc>;
- qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig",
+ regulator-names = "cam_vio", "cam_vana", "cam_vdig",
"cam_clk";
- qcom,cam-vreg-min-voltage = <0 3312000 1050000 0>;
- qcom,cam-vreg-max-voltage = <0 3600000 1050000 0>;
- qcom,cam-vreg-op-mode = <0 80000 105000 0>;
- qcom,gpio-no-mux = <0>;
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <0 3312000 1050000 0>;
+ rgltr-max-voltage = <0 3600000 1050000 0>;
+ rgltr-load-current = <0 80000 105000 0>;
+ gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cam_sensor_mclk1_active
&cam_sensor_front_active>;
@@ -213,46 +219,47 @@
<&tlmm 28 0>,
<&tlmm 8 0>,
<&tlmm 27 0>;
- qcom,gpio-reset = <1>;
- qcom,gpio-vana = <2>;
- qcom,gpio-vaf = <3>;
- qcom,gpio-req-tbl-num = <0 1 2 3>;
- qcom,gpio-req-tbl-flags = <1 0 0 0>;
- qcom,gpio-req-tbl-label = "CAMIF_MCLK2",
+ gpio-reset = <1>;
+ gpio-vana = <2>;
+ gpio-vaf = <3>;
+ gpio-req-tbl-num = <0 1 2 3>;
+ gpio-req-tbl-flags = <1 0 0 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK2",
"CAM_RESET2",
"CAM_VANA2",
"CAM_VAF";
- qcom,sensor-position = <1>;
- qcom,sensor-mode = <0>;
- qcom,cci-master = <1>;
+ sensor-position = <1>;
+ sensor-mode = <0>;
+ cci-master = <1>;
status = "ok";
clocks = <&clock_camcc CAM_CC_MCLK1_CLK>;
clock-names = "cam_clk";
- qcom,clock-rates = <24000000>;
+ clock-rates = <24000000>;
};
qcom,cam-sensor@0 {
cell-index = <0>;
compatible = "qcom,cam-sensor";
reg = <0x0>;
- qcom,csiphy-sd-index = <0>;
- qcom,sensor-position-roll = <90>;
- qcom,sensor-position-pitch = <0>;
- qcom,sensor-position-yaw = <180>;
- qcom,led-flash-src = <&led_flash_rear>;
- qcom,actuator-src = <&actuator_rear>;
- qcom,ois-src = <&ois_rear>;
- qcom,eeprom-src = <&eeprom_rear>;
+ csiphy-sd-index = <0>;
+ sensor-position-roll = <90>;
+ sensor-position-pitch = <0>;
+ sensor-position-yaw = <180>;
+ led-flash-src = <&led_flash_rear>;
+ actuator-src = <&actuator_rear>;
+ ois-src = <&ois_rear>;
+ eeprom-src = <&eeprom_rear>;
cam_vio-supply = <&pm8998_lvs1>;
cam_vana-supply = <&pmi8998_bob>;
cam_vdig-supply = <&camera_rear_ldo>;
cam_clk-supply = <&titan_top_gdsc>;
- qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig",
+ regulator-names = "cam_vio", "cam_vana", "cam_vdig",
"cam_clk";
- qcom,cam-vreg-min-voltage = <0 3312000 1050000 0>;
- qcom,cam-vreg-max-voltage = <0 3600000 1050000 0>;
- qcom,cam-vreg-op-mode = <0 80000 105000 0>;
- qcom,gpio-no-mux = <0>;
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <0 3312000 1050000 0>;
+ rgltr-max-voltage = <0 3600000 1050000 0>;
+ rgltr-load-current = <0 80000 105000 0>;
+ gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cam_sensor_mclk0_active
&cam_sensor_rear_active>;
@@ -261,40 +268,42 @@
gpios = <&tlmm 13 0>,
<&tlmm 80 0>,
<&tlmm 79 0>;
- qcom,gpio-reset = <1>;
- qcom,gpio-vana = <2>;
- qcom,gpio-req-tbl-num = <0 1 2>;
- qcom,gpio-req-tbl-flags = <1 0 0>;
- qcom,gpio-req-tbl-label = "CAMIF_MCLK0",
+ gpio-reset = <1>;
+ gpio-vana = <2>;
+ gpio-req-tbl-num = <0 1 2>;
+ gpio-req-tbl-flags = <1 0 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK0",
"CAM_RESET0",
"CAM_VANA";
- qcom,sensor-mode = <0>;
- qcom,cci-master = <0>;
+ sensor-mode = <0>;
+ cci-master = <0>;
status = "ok";
clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
clock-names = "cam_clk";
- qcom,clock-rates = <24000000>;
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
};
qcom,cam-sensor@1 {
cell-index = <1>;
compatible = "qcom,cam-sensor";
reg = <0x1>;
- qcom,csiphy-sd-index = <1>;
- qcom,sensor-position-roll = <90>;
- qcom,sensor-position-pitch = <0>;
- qcom,sensor-position-yaw = <180>;
- qcom,eeprom-src = <&eeprom_rear_aux>;
+ csiphy-sd-index = <1>;
+ sensor-position-roll = <90>;
+ sensor-position-pitch = <0>;
+ sensor-position-yaw = <180>;
+ eeprom-src = <&eeprom_rear_aux>;
cam_vdig-supply = <&camera_ldo>;
cam_vio-supply = <&pm8998_lvs1>;
cam_vana-supply = <&pmi8998_bob>;
cam_clk-supply = <&titan_top_gdsc>;
- qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana",
+ regulator-names = "cam_vdig", "cam_vio", "cam_vana",
"cam_clk";
- qcom,cam-vreg-min-voltage = <1050000 0 3312000 0>;
- qcom,cam-vreg-max-voltage = <1050000 0 3600000 0>;
- qcom,cam-vreg-op-mode = <105000 0 80000 0>;
- qcom,gpio-no-mux = <0>;
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <1050000 0 3312000 0>;
+ rgltr-max-voltage = <1050000 0 3600000 0>;
+ rgltr-load-current = <105000 0 80000 0>;
+ gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cam_sensor_mclk2_active
&cam_sensor_rear2_active>;
@@ -303,42 +312,44 @@
gpios = <&tlmm 15 0>,
<&tlmm 9 0>,
<&tlmm 8 0>;
- qcom,gpio-reset = <1>;
- qcom,gpio-vana = <2>;
- qcom,gpio-req-tbl-num = <0 1 2>;
- qcom,gpio-req-tbl-flags = <1 0 0>;
- qcom,gpio-req-tbl-label = "CAMIF_MCLK1",
+ gpio-reset = <1>;
+ gpio-vana = <2>;
+ gpio-req-tbl-num = <0 1 2>;
+ gpio-req-tbl-flags = <1 0 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK1",
"CAM_RESET1",
"CAM_VANA1";
- qcom,sensor-mode = <0>;
- qcom,cci-master = <1>;
+ sensor-mode = <0>;
+ cci-master = <1>;
status = "ok";
clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
clock-names = "cam_clk";
- qcom,clock-rates = <24000000>;
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
};
qcom,cam-sensor@2 {
cell-index = <2>;
compatible = "qcom,cam-sensor";
reg = <0x02>;
- qcom,csiphy-sd-index = <2>;
- qcom,sensor-position-roll = <90>;
- qcom,sensor-position-pitch = <0>;
- qcom,sensor-position-yaw = <0>;
- qcom,eeprom-src = <&eeprom_front>;
- qcom,actuator-src = <&actuator_front>;
- qcom,led-flash-src = <&led_flash_front>;
+ csiphy-sd-index = <2>;
+ sensor-position-roll = <90>;
+ sensor-position-pitch = <0>;
+ sensor-position-yaw = <0>;
+ eeprom-src = <&eeprom_front>;
+ actuator-src = <&actuator_front>;
+ led-flash-src = <&led_flash_front>;
cam_vio-supply = <&pm8998_lvs1>;
cam_vana-supply = <&pmi8998_bob>;
cam_vdig-supply = <&camera_ldo>;
cam_clk-supply = <&titan_top_gdsc>;
- qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig",
+ regulator-names = "cam_vio", "cam_vana", "cam_vdig",
"cam_clk";
- qcom,cam-vreg-min-voltage = <0 3312000 1050000 0>;
- qcom,cam-vreg-max-voltage = <0 3600000 1050000 0>;
- qcom,cam-vreg-op-mode = <0 80000 105000 0>;
- qcom,gpio-no-mux = <0>;
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <0 3312000 1050000 0>;
+ rgltr-max-voltage = <0 3600000 1050000 0>;
+ rgltr-load-current = <0 80000 105000 0>;
+ gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cam_sensor_mclk1_active
&cam_sensor_front_active>;
@@ -347,18 +358,19 @@
gpios = <&tlmm 14 0>,
<&tlmm 28 0>,
<&tlmm 8 0>;
- qcom,gpio-reset = <1>;
- qcom,gpio-vana = <2>;
- qcom,gpio-req-tbl-num = <0 1 2>;
- qcom,gpio-req-tbl-flags = <1 0 0>;
- qcom,gpio-req-tbl-label = "CAMIF_MCLK2",
+ gpio-reset = <1>;
+ gpio-vana = <2>;
+ gpio-req-tbl-num = <0 1 2>;
+ gpio-req-tbl-flags = <1 0 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK2",
"CAM_RESET2",
"CAM_VANA1";
- qcom,sensor-mode = <0>;
- qcom,cci-master = <1>;
+ sensor-mode = <0>;
+ cci-master = <1>;
status = "ok";
clocks = <&clock_camcc CAM_CC_MCLK1_CLK>;
clock-names = "cam_clk";
- qcom,clock-rates = <24000000>;
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
};
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi
index a715025..5db4c35 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi
@@ -16,9 +16,9 @@
cell-index = <0>;
reg = <0x00 0x00>;
compatible = "qcom,camera-flash";
- qcom,flash-source = <&pmi8998_flash0 &pmi8998_flash1>;
- qcom,torch-source = <&pmi8998_torch0 &pmi8998_torch1>;
- qcom,switch-source = <&pmi8998_switch0>;
+ flash-source = <&pmi8998_flash0 &pmi8998_flash1>;
+ torch-source = <&pmi8998_torch0 &pmi8998_torch1>;
+ switch-source = <&pmi8998_switch0>;
status = "ok";
};
@@ -26,9 +26,9 @@
cell-index = <1>;
reg = <0x01 0x00>;
compatible = "qcom,camera-flash";
- qcom,flash-source = <&pmi8998_flash2>;
- qcom,torch-source = <&pmi8998_torch2>;
- qcom,switch-source = <&pmi8998_switch1>;
+ flash-source = <&pmi8998_flash2>;
+ torch-source = <&pmi8998_torch2>;
+ switch-source = <&pmi8998_switch1>;
status = "ok";
};
@@ -78,36 +78,39 @@
cell-index = <0>;
reg = <0x0>;
compatible = "qcom,actuator";
- qcom,cci-master = <0>;
+ cci-master = <0>;
cam_vaf-supply = <&actuator_regulator>;
- qcom,cam-vreg-name = "cam_vaf";
- qcom,cam-vreg-min-voltage = <2800000>;
- qcom,cam-vreg-max-voltage = <2800000>;
- qcom,cam-vreg-op-mode = <0>;
+ regulator-names = "cam_vaf";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <2800000>;
+ rgltr-max-voltage = <2800000>;
+ rgltr-load-current = <0>;
};
actuator_front: qcom,actuator@1 {
cell-index = <1>;
reg = <0x1>;
compatible = "qcom,actuator";
- qcom,cci-master = <1>;
+ cci-master = <1>;
cam_vaf-supply = <&actuator_regulator>;
- qcom,cam-vreg-name = "cam_vaf";
- qcom,cam-vreg-min-voltage = <2800000>;
- qcom,cam-vreg-max-voltage = <2800000>;
- qcom,cam-vreg-op-mode = <0>;
+ regulator-names = "cam_vaf";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <2800000>;
+ rgltr-max-voltage = <2800000>;
+ rgltr-load-current = <0>;
};
ois_rear: qcom,ois@0 {
cell-index = <0>;
reg = <0x0>;
compatible = "qcom,ois";
- qcom,cci-master = <0>;
+ cci-master = <0>;
cam_vaf-supply = <&actuator_regulator>;
- qcom,cam-vreg-name = "cam_vaf";
- qcom,cam-vreg-min-voltage = <2800000>;
- qcom,cam-vreg-max-voltage = <2800000>;
- qcom,cam-vreg-op-mode = <0>;
+ regulator-names = "cam_vaf";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <2800000>;
+ rgltr-max-voltage = <2800000>;
+ rgltr-load-current = <0>;
status = "disabled";
};
@@ -119,12 +122,13 @@
cam_vana-supply = <&pmi8998_bob>;
cam_vdig-supply = <&camera_rear_ldo>;
cam_clk-supply = <&titan_top_gdsc>;
- qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig",
+ regulator-names = "cam_vio", "cam_vana", "cam_vdig",
"cam_clk";
- qcom,cam-vreg-min-voltage = <0 3312000 1050000 0>;
- qcom,cam-vreg-max-voltage = <0 3600000 1050000 0>;
- qcom,cam-vreg-op-mode = <0 80000 105000 0>;
- qcom,gpio-no-mux = <0>;
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <0 3312000 1050000 0>;
+ rgltr-max-voltage = <0 3600000 1050000 0>;
+ rgltr-load-current = <0 80000 105000 0>;
+ gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cam_sensor_mclk0_active
&cam_sensor_rear_active>;
@@ -134,22 +138,22 @@
<&tlmm 80 0>,
<&tlmm 79 0>,
<&tlmm 27 0>;
- qcom,gpio-reset = <1>;
- qcom,gpio-vana = <2>;
- qcom,gpio-vaf = <3>;
- qcom,gpio-req-tbl-num = <0 1 2 3>;
- qcom,gpio-req-tbl-flags = <1 0 0 0>;
- qcom,gpio-req-tbl-label = "CAMIF_MCLK0",
+ gpio-reset = <1>;
+ gpio-vana = <2>;
+ gpio-vaf = <3>;
+ gpio-req-tbl-num = <0 1 2 3>;
+ gpio-req-tbl-flags = <1 0 0 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK0",
"CAM_RESET0",
"CAM_VANA0",
"CAM_VAF";
- qcom,sensor-position = <0>;
- qcom,sensor-mode = <0>;
- qcom,cci-master = <0>;
+ sensor-position = <0>;
+ sensor-mode = <0>;
+ cci-master = <0>;
status = "ok";
clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
clock-names = "cam_clk";
- qcom,clock-rates = <24000000>;
+ clock-rates = <24000000>;
};
eeprom_rear_aux: qcom,eeprom@1 {
@@ -160,12 +164,13 @@
cam_vio-supply = <&pm8998_lvs1>;
cam_vana-supply = <&pmi8998_bob>;
cam_clk-supply = <&titan_top_gdsc>;
- qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana",
+ regulator-names = "cam_vdig", "cam_vio", "cam_vana",
"cam_clk";
- qcom,cam-vreg-min-voltage = <1050000 0 3312000 0>;
- qcom,cam-vreg-max-voltage = <1050000 0 3600000 0>;
- qcom,cam-vreg-op-mode = <105000 0 80000 0>;
- qcom,gpio-no-mux = <0>;
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <1050000 0 3312000 0>;
+ rgltr-max-voltage = <1050000 0 3600000 0>;
+ rgltr-load-current = <105000 0 80000 0>;
+ gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cam_sensor_mclk2_active
&cam_sensor_rear2_active>;
@@ -174,20 +179,20 @@
gpios = <&tlmm 15 0>,
<&tlmm 9 0>,
<&tlmm 8 0>;
- qcom,gpio-reset = <1>;
- qcom,gpio-vana = <2>;
- qcom,gpio-req-tbl-num = <0 1 2>;
- qcom,gpio-req-tbl-flags = <1 0 0>;
- qcom,gpio-req-tbl-label = "CAMIF_MCLK1",
+ gpio-reset = <1>;
+ gpio-vana = <2>;
+ gpio-req-tbl-num = <0 1 2>;
+ gpio-req-tbl-flags = <1 0 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK1",
"CAM_RESET1",
"CAM_VANA1";
- qcom,sensor-position = <0>;
- qcom,sensor-mode = <0>;
- qcom,cci-master = <1>;
+ sensor-position = <0>;
+ sensor-mode = <0>;
+ cci-master = <1>;
status = "ok";
clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
clock-names = "cam_clk";
- qcom,clock-rates = <24000000>;
+ clock-rates = <24000000>;
};
eeprom_front: qcom,eeprom@2 {
@@ -198,12 +203,13 @@
cam_vana-supply = <&pmi8998_bob>;
cam_vdig-supply = <&camera_ldo>;
cam_clk-supply = <&titan_top_gdsc>;
- qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig",
+ regulator-names = "cam_vio", "cam_vana", "cam_vdig",
"cam_clk";
- qcom,cam-vreg-min-voltage = <0 3312000 1050000 0>;
- qcom,cam-vreg-max-voltage = <0 3600000 1050000 0>;
- qcom,cam-vreg-op-mode = <0 80000 105000 0>;
- qcom,gpio-no-mux = <0>;
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <0 3312000 1050000 0>;
+ rgltr-max-voltage = <0 3600000 1050000 0>;
+ rgltr-load-current = <0 80000 105000 0>;
+ gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cam_sensor_mclk1_active
&cam_sensor_front_active>;
@@ -213,46 +219,47 @@
<&tlmm 28 0>,
<&tlmm 8 0>,
<&tlmm 27 0>;
- qcom,gpio-reset = <1>;
- qcom,gpio-vana = <2>;
- qcom,gpio-vaf = <3>;
- qcom,gpio-req-tbl-num = <0 1 2 3>;
- qcom,gpio-req-tbl-flags = <1 0 0 0>;
- qcom,gpio-req-tbl-label = "CAMIF_MCLK2",
+ gpio-reset = <1>;
+ gpio-vana = <2>;
+ gpio-vaf = <3>;
+ gpio-req-tbl-num = <0 1 2 3>;
+ gpio-req-tbl-flags = <1 0 0 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK2",
"CAM_RESET2",
"CAM_VANA2",
"CAM_VAF";
- qcom,sensor-position = <1>;
- qcom,sensor-mode = <0>;
- qcom,cci-master = <1>;
+ sensor-position = <1>;
+ sensor-mode = <0>;
+ cci-master = <1>;
status = "ok";
clocks = <&clock_camcc CAM_CC_MCLK1_CLK>;
clock-names = "cam_clk";
- qcom,clock-rates = <24000000>;
+ clock-rates = <24000000>;
};
qcom,cam-sensor@0 {
cell-index = <0>;
compatible = "qcom,cam-sensor";
reg = <0x0>;
- qcom,csiphy-sd-index = <0>;
- qcom,sensor-position-roll = <90>;
- qcom,sensor-position-pitch = <0>;
- qcom,sensor-position-yaw = <180>;
- qcom,led-flash-src = <&led_flash_rear>;
- qcom,actuator-src = <&actuator_rear>;
- qcom,ois-src = <&ois_rear>;
- qcom,eeprom-src = <&eeprom_rear>;
+ csiphy-sd-index = <0>;
+ sensor-position-roll = <90>;
+ sensor-position-pitch = <0>;
+ sensor-position-yaw = <180>;
+ led-flash-src = <&led_flash_rear>;
+ actuator-src = <&actuator_rear>;
+ ois-src = <&ois_rear>;
+ eeprom-src = <&eeprom_rear>;
cam_vio-supply = <&pm8998_lvs1>;
cam_vana-supply = <&pmi8998_bob>;
cam_vdig-supply = <&camera_rear_ldo>;
cam_clk-supply = <&titan_top_gdsc>;
- qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig",
+ regulator-names = "cam_vio", "cam_vana", "cam_vdig",
"cam_clk";
- qcom,cam-vreg-min-voltage = <0 3312000 1050000 0>;
- qcom,cam-vreg-max-voltage = <0 3600000 1050000 0>;
- qcom,cam-vreg-op-mode = <0 80000 105000 0>;
- qcom,gpio-no-mux = <0>;
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <0 3312000 1050000 0>;
+ rgltr-max-voltage = <0 3600000 1050000 0>;
+ rgltr-load-current = <0 80000 105000 0>;
+ gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cam_sensor_mclk0_active
&cam_sensor_rear_active>;
@@ -261,40 +268,42 @@
gpios = <&tlmm 13 0>,
<&tlmm 80 0>,
<&tlmm 79 0>;
- qcom,gpio-reset = <1>;
- qcom,gpio-vana = <2>;
- qcom,gpio-req-tbl-num = <0 1 2>;
- qcom,gpio-req-tbl-flags = <1 0 0>;
- qcom,gpio-req-tbl-label = "CAMIF_MCLK0",
+ gpio-reset = <1>;
+ gpio-vana = <2>;
+ gpio-req-tbl-num = <0 1 2>;
+ gpio-req-tbl-flags = <1 0 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK0",
"CAM_RESET0",
"CAM_VANA";
- qcom,sensor-mode = <0>;
- qcom,cci-master = <0>;
+ sensor-mode = <0>;
+ cci-master = <0>;
status = "ok";
clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
clock-names = "cam_clk";
- qcom,clock-rates = <24000000>;
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
};
qcom,cam-sensor@1 {
cell-index = <1>;
compatible = "qcom,cam-sensor";
reg = <0x1>;
- qcom,csiphy-sd-index = <1>;
- qcom,sensor-position-roll = <90>;
- qcom,sensor-position-pitch = <0>;
- qcom,sensor-position-yaw = <180>;
- qcom,eeprom-src = <&eeprom_rear_aux>;
+ csiphy-sd-index = <1>;
+ sensor-position-roll = <90>;
+ sensor-position-pitch = <0>;
+ sensor-position-yaw = <180>;
+ eeprom-src = <&eeprom_rear_aux>;
cam_vdig-supply = <&camera_ldo>;
cam_vio-supply = <&pm8998_lvs1>;
cam_vana-supply = <&pmi8998_bob>;
cam_clk-supply = <&titan_top_gdsc>;
- qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana",
+ regulator-names = "cam_vdig", "cam_vio", "cam_vana",
"cam_clk";
- qcom,cam-vreg-min-voltage = <1050000 0 3312000 0>;
- qcom,cam-vreg-max-voltage = <1050000 0 3600000 0>;
- qcom,cam-vreg-op-mode = <105000 0 80000 0>;
- qcom,gpio-no-mux = <0>;
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <1050000 0 3312000 0>;
+ rgltr-max-voltage = <1050000 0 3600000 0>;
+ rgltr-load-current = <105000 0 80000 0>;
+ gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cam_sensor_mclk2_active
&cam_sensor_rear2_active>;
@@ -303,42 +312,44 @@
gpios = <&tlmm 15 0>,
<&tlmm 9 0>,
<&tlmm 8 0>;
- qcom,gpio-reset = <1>;
- qcom,gpio-vana = <2>;
- qcom,gpio-req-tbl-num = <0 1 2>;
- qcom,gpio-req-tbl-flags = <1 0 0>;
- qcom,gpio-req-tbl-label = "CAMIF_MCLK1",
+ gpio-reset = <1>;
+ gpio-vana = <2>;
+ gpio-req-tbl-num = <0 1 2>;
+ gpio-req-tbl-flags = <1 0 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK1",
"CAM_RESET1",
"CAM_VANA1";
- qcom,sensor-mode = <0>;
- qcom,cci-master = <1>;
+ sensor-mode = <0>;
+ cci-master = <1>;
status = "ok";
clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
clock-names = "cam_clk";
- qcom,clock-rates = <24000000>;
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
};
qcom,cam-sensor@2 {
cell-index = <2>;
compatible = "qcom,cam-sensor";
reg = <0x02>;
- qcom,csiphy-sd-index = <2>;
- qcom,sensor-position-roll = <90>;
- qcom,sensor-position-pitch = <0>;
- qcom,sensor-position-yaw = <0>;
- qcom,eeprom-src = <&eeprom_front>;
- qcom,actuator-src = <&actuator_front>;
- qcom,led-flash-src = <&led_flash_front>;
+ csiphy-sd-index = <2>;
+ sensor-position-roll = <90>;
+ sensor-position-pitch = <0>;
+ sensor-position-yaw = <0>;
+ eeprom-src = <&eeprom_front>;
+ actuator-src = <&actuator_front>;
+ led-flash-src = <&led_flash_front>;
cam_vio-supply = <&pm8998_lvs1>;
cam_vana-supply = <&pmi8998_bob>;
cam_vdig-supply = <&camera_ldo>;
cam_clk-supply = <&titan_top_gdsc>;
- qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig",
+ regulator-names = "cam_vio", "cam_vana", "cam_vdig",
"cam_clk";
- qcom,cam-vreg-min-voltage = <0 3312000 1050000 0>;
- qcom,cam-vreg-max-voltage = <0 3600000 1050000 0>;
- qcom,cam-vreg-op-mode = <0 80000 105000 0>;
- qcom,gpio-no-mux = <0>;
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <0 3312000 1050000 0>;
+ rgltr-max-voltage = <0 3600000 1050000 0>;
+ rgltr-load-current = <0 80000 105000 0>;
+ gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cam_sensor_mclk1_active
&cam_sensor_front_active>;
@@ -347,18 +358,19 @@
gpios = <&tlmm 14 0>,
<&tlmm 28 0>,
<&tlmm 8 0>;
- qcom,gpio-reset = <1>;
- qcom,gpio-vana = <2>;
- qcom,gpio-req-tbl-num = <0 1 2>;
- qcom,gpio-req-tbl-flags = <1 0 0>;
- qcom,gpio-req-tbl-label = "CAMIF_MCLK2",
+ gpio-reset = <1>;
+ gpio-vana = <2>;
+ gpio-req-tbl-num = <0 1 2>;
+ gpio-req-tbl-flags = <1 0 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK2",
"CAM_RESET2",
"CAM_VANA1";
- qcom,sensor-mode = <0>;
- qcom,cci-master = <1>;
+ sensor-mode = <0>;
+ cci-master = <1>;
status = "ok";
clocks = <&clock_camcc CAM_CC_MCLK1_CLK>;
clock-names = "cam_clk";
- qcom,clock-rates = <24000000>;
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
};
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
index 91b8738..cbd495a 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
@@ -22,12 +22,13 @@
compatible = "qcom,csiphy-v1.0", "qcom,csiphy";
reg = <0x0ac65000 0x1000>;
reg-names = "csiphy";
+ reg-cam-base = <0x65000>;
interrupts = <0 477 0>;
interrupt-names = "csiphy";
gdscr-supply = <&titan_top_gdsc>;
- qcom,cam-vreg-name = "gdscr";
- qcom,csi-vdd-voltage = <1200000>;
- qcom,mipi-csi-vdd-supply = <&pm8998_l26>;
+ regulator-names = "gdscr";
+ csi-vdd-voltage = <1200000>;
+ mipi-csi-vdd-supply = <&pm8998_l26>;
clocks = <&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
<&clock_camcc CAM_CC_SOC_AHB_CLK>,
<&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
@@ -48,8 +49,9 @@
"csi0phytimer_clk",
"ife_0_csid_clk",
"ife_0_csid_clk_src";
- qcom,clock-rates =
- <0 0 80000000 0 320000000 0 269333333 0 0 384000000>;
+ clock-cntl-level = "turbo";
+ clock-rates =
+ <0 0 0 0 320000000 0 269333333 0 0 384000000>;
status = "ok";
};
@@ -58,12 +60,13 @@
compatible = "qcom,csiphy-v1.0", "qcom,csiphy";
reg = <0xac66000 0x1000>;
reg-names = "csiphy";
+ reg-cam-base = <0x66000>;
interrupts = <0 478 0>;
interrupt-names = "csiphy";
gdscr-supply = <&titan_top_gdsc>;
- qcom,cam-vreg-name = "gdscr";
- qcom,csi-vdd-voltage = <1200000>;
- qcom,mipi-csi-vdd-supply = <&pm8998_l26>;
+ regulator-names = "gdscr";
+ csi-vdd-voltage = <1200000>;
+ mipi-csi-vdd-supply = <&pm8998_l26>;
clocks = <&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
<&clock_camcc CAM_CC_SOC_AHB_CLK>,
<&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
@@ -84,8 +87,9 @@
"csi1phytimer_clk",
"ife_1_csid_clk",
"ife_1_csid_clk_src";
- qcom,clock-rates =
- <0 0 80000000 0 320000000 0 269333333 0 0 384000000>;
+ clock-cntl-level = "turbo";
+ clock-rates =
+ <0 0 0 0 320000000 0 269333333 0 0 384000000>;
status = "ok";
};
@@ -95,12 +99,13 @@
compatible = "qcom,csiphy-v1.0", "qcom,csiphy";
reg = <0xac67000 0x1000>;
reg-names = "csiphy";
+ reg-cam-base = <0x67000>;
interrupts = <0 479 0>;
interrupt-names = "csiphy";
gdscr-supply = <&titan_top_gdsc>;
- qcom,cam-vreg-name = "gdscr";
- qcom,csi-vdd-voltage = <1200000>;
- qcom,mipi-csi-vdd-supply = <&pm8998_l26>;
+ regulator-names = "gdscr";
+ csi-vdd-voltage = <1200000>;
+ mipi-csi-vdd-supply = <&pm8998_l26>;
clocks = <&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
<&clock_camcc CAM_CC_SOC_AHB_CLK>,
<&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
@@ -121,23 +126,25 @@
"csi2phytimer_clk",
"ife_lite_csid_clk",
"ife_lite_csid_clk_src";
- qcom,clock-rates =
- <0 0 80000000 0 320000000 0 269333333 0 0 384000000>;
+ clock-cntl-level = "turbo";
+ clock-rates =
+ <0 0 0 0 320000000 0 269333333 0 0 384000000>;
status = "ok";
};
cam_cci: qcom,cci@ac4a000 {
cell-index = <0>;
compatible = "qcom,cci";
- reg = <0xac4a000 0x4000>;
#address-cells = <1>;
#size-cells = <0>;
+ reg = <0xac4a000 0x4000>;
reg-names = "cci";
- interrupts = <0 460 0>;
+ reg-cam-base = <0x4a000>;
interrupt-names = "cci";
+ interrupts = <0 460 0>;
status = "ok";
gdscr-supply = <&titan_top_gdsc>;
- qcom,cam-vreg-name = "gdscr";
+ regulator-names = "gdscr";
clocks = <&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
<&clock_camcc CAM_CC_SOC_AHB_CLK>,
<&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
@@ -150,17 +157,19 @@
"cpas_ahb_clk",
"cci_clk",
"cci_clk_src";
- qcom,clock-rates = <0 0 80000000 0 0 37500000>;
- pinctrl-names = "cci_default", "cci_suspend";
+ src-clock-name = "cci_clk_src";
+ clock-cntl-level = "turbo";
+ clock-rates = <0 0 0 0 0 37500000>;
+ pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cci0_active &cci1_active>;
pinctrl-1 = <&cci0_suspend &cci1_suspend>;
gpios = <&tlmm 17 0>,
<&tlmm 18 0>,
<&tlmm 19 0>,
<&tlmm 20 0>;
- qcom,gpio-tbl-num = <0 1 2 3>;
- qcom,gpio-tbl-flags = <1 1 1 1>;
- qcom,gpio-tbl-label = "CCI_I2C_DATA0",
+ gpio-req-tbl-num = <0 1 2 3>;
+ gpio-req-tbl-flags = <1 1 1 1>;
+ gpio-req-tbl-label = "CCI_I2C_DATA0",
"CCI_I2C_CLK0",
"CCI_I2C_DATA1",
"CCI_I2C_CLK1";
@@ -353,14 +362,28 @@
<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>;
src-clock-name = "slow_ahb_clk_src";
- clock-rates = <0 0 0 80000000 0 0>;
+ clock-rates = <0 0 0 0 0 0>,
+ <0 0 0 19200000 0 0>,
+ <0 0 0 60000000 0 0>,
+ <0 0 0 66660000 0 0>,
+ <0 0 0 73840000 0 0>,
+ <0 0 0 80000000 0 0>,
+ <0 0 0 80000000 0 0>;
+ clock-cntl-level = "suspend", "minsvs", "lowsvs", "svs",
+ "svs_l1", "nominal", "turbo";
qcom,msm-bus,name = "cam_ahb";
- qcom,msm-bus,num-cases = <4>;
+ qcom,msm-bus,num-cases = <7>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps =
<MSM_BUS_MASTER_AMPSS_M0
MSM_BUS_SLAVE_CAMERA_CFG 0 0>,
<MSM_BUS_MASTER_AMPSS_M0
+ MSM_BUS_SLAVE_CAMERA_CFG 0 180000>,
+ <MSM_BUS_MASTER_AMPSS_M0
+ MSM_BUS_SLAVE_CAMERA_CFG 0 180000>,
+ <MSM_BUS_MASTER_AMPSS_M0
+ MSM_BUS_SLAVE_CAMERA_CFG 0 300000>,
+ <MSM_BUS_MASTER_AMPSS_M0
MSM_BUS_SLAVE_CAMERA_CFG 0 300000>,
<MSM_BUS_MASTER_AMPSS_M0
MSM_BUS_SLAVE_CAMERA_CFG 0 640000>,
@@ -378,7 +401,7 @@
RPMH_REGULATOR_LEVEL_TURBO
RPMH_REGULATOR_LEVEL_TURBO_L1>;
vdd-corner-ahb-mapping = "suspend", "suspend",
- "svs", "svs", "svs", "svs",
+ "minsvs", "lowsvs", "svs", "svs_l1",
"nominal", "nominal", "nominal",
"turbo", "turbo";
client-id-based;
@@ -508,6 +531,7 @@
<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>;
clock-rates = <0 0 0 0 0>;
+ clock-cntl-level = "svs";
cdm-client-names = "ife";
status = "ok";
};
@@ -555,7 +579,8 @@
<&clock_camcc CAM_CC_IFE_0_CLK_SRC>,
<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
<&clock_camcc CAM_CC_IFE_0_AXI_CLK>;
- clock-rates = <0 0 0 0 0 0 500000000 0 0 0 60000000 0 0>;
+ clock-rates = <0 0 0 0 0 0 500000000 0 0 0 600000000 0 0>;
+ clock-cntl-level = "turbo";
src-clock-name = "ife_csid_clk_src";
status = "ok";
};
@@ -590,6 +615,7 @@
<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
<&clock_camcc CAM_CC_IFE_0_AXI_CLK>;
clock-rates = <0 0 0 0 0 0 600000000 0 0>;
+ clock-cntl-level = "turbo";
src-clock-name = "ife_clk_src";
clock-names-option = "ife_dsp_clk";
clocks-option = <&clock_camcc CAM_CC_IFE_0_DSP_CLK>;
@@ -634,7 +660,8 @@
<&clock_camcc CAM_CC_IFE_1_CLK_SRC>,
<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
<&clock_camcc CAM_CC_IFE_1_AXI_CLK>;
- clock-rates = <0 0 0 0 0 0 500000000 0 0 0 60000000 0 0>;
+ clock-rates = <0 0 0 0 0 0 500000000 0 0 0 600000000 0 0>;
+ clock-cntl-level = "turbo";
src-clock-name = "ife_csid_clk_src";
status = "ok";
};
@@ -669,6 +696,7 @@
<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
<&clock_camcc CAM_CC_IFE_1_AXI_CLK>;
clock-rates = <0 0 0 0 0 0 600000000 0 0>;
+ clock-cntl-level = "turbo";
src-clock-name = "ife_clk_src";
clock-names-option = "ife_dsp_clk";
clocks-option = <&clock_camcc CAM_CC_IFE_1_DSP_CLK>;
@@ -710,7 +738,8 @@
<&clock_camcc CAM_CC_IFE_LITE_CLK>,
<&clock_camcc CAM_CC_IFE_LITE_CLK_SRC>,
<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>;
- clock-rates = <0 0 0 0 0 0 384000000 0 0 0 40400000 0>;
+ clock-rates = <0 0 0 0 0 0 384000000 0 0 0 404000000 0>;
+ clock-cntl-level = "turbo";
src-clock-name = "ife_csid_clk_src";
status = "ok";
};
@@ -741,7 +770,8 @@
<&clock_camcc CAM_CC_IFE_LITE_CLK>,
<&clock_camcc CAM_CC_IFE_LITE_CLK_SRC>,
<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>;
- qcom,clock-rates = <0 0 0 0 0 0 404000000 0>;
+ clock-rates = <0 0 0 0 0 0 404000000 0>;
+ clock-cntl-level = "turbo";
src-clock-name = "ife_clk_src";
status = "ok";
};
@@ -790,6 +820,7 @@
<&clock_camcc CAM_CC_ICP_CLK_SRC>;
clock-rates = <0 0 400000000 0 0 0 0 0 600000000>;
+ clock-cntl-level = "turbo";
fw_name = "CAMERA_ICP.elf";
status = "ok";
};
@@ -811,6 +842,7 @@
<&clock_camcc CAM_CC_IPE_0_CLK_SRC>;
clock-rates = <0 0 0 0 600000000>;
+ clock-cntl-level = "turbo";
status = "ok";
};
@@ -831,6 +863,7 @@
<&clock_camcc CAM_CC_IPE_1_CLK_SRC>;
clock-rates = <0 0 0 0 600000000>;
+ clock-cntl-level = "turbo";
status = "ok";
};
@@ -851,6 +884,7 @@
<&clock_camcc CAM_CC_BPS_CLK_SRC>;
clock-rates = <0 0 0 0 600000000>;
+ clock-cntl-level = "turbo";
status = "ok";
};
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-ion.dtsi b/arch/arm64/boot/dts/qcom/sdm845-ion.dtsi
index 2579819..829dfcc 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-ion.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-ion.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -33,10 +33,10 @@
qcom,ion-heap-type = "DMA";
};
- qcom,ion-heap@13 { /* SPSS HEAP */
+ qcom,ion-heap@13 { /* SECURE SPSS HEAP */
reg = <13>;
- memory-region = <&sp_mem>;
- qcom,ion-heap-type = "DMA";
+ memory-region = <&secure_sp_mem>;
+ qcom,ion-heap-type = "HYP_CMA";
};
qcom,ion-heap@10 { /* SECURE DISPLAY HEAP */
diff --git a/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi b/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
index ba8e289..b826768 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
@@ -131,19 +131,19 @@
qcom,cpr-voltage-floor =
/* Speed bin 0 */
<568000 568000 568000 568000 568000
- 568000 568000 568000 568000 584000
- 584000 584000 632000 632000 632000
- 632000 672000 996000 996000>,
+ 568000 568000 568000 568000 568000
+ 568000 568000 568000 568000 568000
+ 568000 568000 996000 996000>,
/* Speed bin 1 */
<568000 568000 568000 568000 568000
- 568000 568000 568000 568000 584000
- 584000 584000 632000 632000 632000
- 632000 672000 712000 712000>,
+ 568000 568000 568000 568000 568000
+ 568000 568000 568000 568000 568000
+ 568000 568000 568000 568000>,
/* Speed bin 2 */
<568000 568000 568000 568000 568000
- 568000 568000 568000 568000 584000
- 584000 584000 632000 632000 632000
- 632000 672000 712000 712000>;
+ 568000 568000 568000 568000 568000
+ 568000 568000 568000 568000 568000
+ 568000 568000 568000 568000>;
qcom,cpr-floor-to-ceiling-max-range =
<32000 32000 32000 32000 32000
@@ -317,16 +317,16 @@
qcom,cpr-voltage-floor =
/* Speed bin 0 */
<568000 568000 568000 568000 568000
- 584000 584000 632000 672000 996000
+ 568000 568000 568000 568000 996000
996000>,
/* Speed bin 1 */
<568000 568000 568000 568000 568000
- 584000 584000 632000 672000 712000
- 712000>,
+ 568000 568000 568000 568000 568000
+ 568000>,
/* Speed bin 2 */
<568000 568000 568000 568000 568000
- 584000 584000 632000 672000 712000
- 712000 712000 712000>;
+ 568000 568000 568000 568000 568000
+ 568000 568000 568000>;
qcom,cpr-floor-to-ceiling-max-range =
/* Speed bin 0 */
@@ -557,22 +557,22 @@
/* Speed bin 0 */
<568000 568000 568000 568000 568000
568000 568000 568000 568000 568000
- 584000 584000 632000 632000 632000
- 632000 632000 672000 712000 712000
- 772000 772000>,
+ 568000 568000 568000 568000 568000
+ 568000 568000 568000 568000 568000
+ 568000 568000>,
/* Speed bin 1 */
<568000 568000 568000 568000 568000
568000 568000 568000 568000 568000
- 584000 584000 632000 632000 632000
- 632000 632000 672000 712000 712000
- 772000 772000 772000 772000>,
+ 568000 568000 568000 568000 568000
+ 568000 568000 568000 568000 568000
+ 568000 568000 568000 568000>,
/* Speed bin 2 */
<568000 568000 568000 568000 568000
568000 568000 568000 568000 568000
- 584000 584000 632000 632000 632000
- 632000 632000 672000 712000 712000
- 772000 772000 772000 772000
- 772000>;
+ 568000 568000 568000 568000 568000
+ 568000 568000 568000 568000 568000
+ 568000 568000 568000 568000
+ 568000>;
qcom,cpr-floor-to-ceiling-max-range =
/* Speed bin 0 */
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
index 83feac0..bde64b9 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
@@ -412,6 +412,7 @@
qcom,dp-usbpd-detection = <&pmi8998_pdphy>;
qcom,aux-cfg-settings = [00 13 04 00 0a 26 0a 03 bb 03];
+ qcom,max-pclk-frequency-khz = <576000>;
qcom,core-supply-entries {
#address-cells = <1>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
index e31f8fd..89f80d4 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
@@ -207,6 +207,12 @@
};
};
+ smmu_sde_sec: qcom,smmu_sde_sec_cb {
+ compatible = "qcom,smmu_sde_sec";
+ iommus = <&apps_smmu 0x881 0x8>,
+ <&apps_smmu 0xc81 0x8>;
+ };
+
/* data and reg bus scale settings */
qcom,sde-data-bus {
qcom,msm-bus,name = "mdss_sde_mnoc";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi b/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi
index bf72741..95ee14c 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi
@@ -36,3 +36,8 @@
&clock_videocc {
compatible = "qcom,video_cc-sdm845-v2";
};
+
+&msm_vidc {
+ qcom,allowed-clock-rates = <100000000 200000000 330000000
+ 404000000 444000000 533000000>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index 5208152..2c0373d 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -603,7 +603,7 @@
size = <0 0x1400000>;
};
- sp_mem: sp_region { /* SPSS-HLOS ION shared mem */
+ secure_sp_mem: secure_sp_region { /* SPSS-HLOS ION shared mem */
compatible = "shared-dma-pool";
alloc-ranges = <0 0x00000000 0 0xffffffff>; /* 32-bit */
reusable;
@@ -760,6 +760,8 @@
compatible = "qcom,spmi-pmic-arb-debug";
reg = <0x6b22000 0x60>, <0x7820A8 4>;
reg-names = "core", "fuse";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "core_clk";
qcom,fuse-disable-bit = <12>;
#address-cells = <2>;
#size-cells = <0>;
@@ -770,6 +772,7 @@
reg = <0x0 SPMI_USID>;
#address-cells = <2>;
#size-cells = <0>;
+ qcom,can-sleep;
};
qcom,pm8998-debug@1 {
@@ -777,6 +780,7 @@
reg = <0x1 SPMI_USID>;
#address-cells = <2>;
#size-cells = <0>;
+ qcom,can-sleep;
};
qcom,pmi8998-debug@2 {
@@ -784,6 +788,7 @@
reg = <0x2 SPMI_USID>;
#address-cells = <2>;
#size-cells = <0>;
+ qcom,can-sleep;
};
qcom,pmi8998-debug@3 {
@@ -791,6 +796,7 @@
reg = <0x3 SPMI_USID>;
#address-cells = <2>;
#size-cells = <0>;
+ qcom,can-sleep;
};
qcom,pm8005-debug@4 {
@@ -798,6 +804,7 @@
reg = <0x4 SPMI_USID>;
#address-cells = <2>;
#size-cells = <0>;
+ qcom,can-sleep;
};
qcom,pm8005-debug@5 {
@@ -805,6 +812,7 @@
reg = <0x5 SPMI_USID>;
#address-cells = <2>;
#size-cells = <0>;
+ qcom,can-sleep;
};
};
@@ -2761,17 +2769,17 @@
<90 512 80000 640000>,
<90 585 80000 640000>,
<1 676 80000 80000>,
- <143 777 0 150000000>,
+ <143 777 0 150>, /* IB defined for IPA clk in MHz*/
/* NOMINAL */
<90 512 206000 960000>,
<90 585 206000 960000>,
<1 676 206000 160000>,
- <143 777 0 300000000>,
+ <143 777 0 300>, /* IB defined for IPA clk in MHz*/
/* TURBO */
<90 512 206000 3600000>,
<90 585 206000 3600000>,
<1 676 206000 300000>,
- <143 777 0 355333333>;
+ <143 777 0 355>; /* IB defined for IPA clk in MHz*/
qcom,bus-vector-names = "MIN", "SVS", "NOMINAL", "TURBO";
/* IPA RAM mmap */
@@ -2964,6 +2972,61 @@
qcom,vdd-3.3-ch0-config = <3104000 3312000>;
};
+ qmi-tmd-devices {
+ compatible = "qcom,qmi_cooling_devices";
+
+ modem {
+ qcom,instance-id = <0x0>;
+
+ modem_pa: modem_pa {
+ qcom,qmi-dev-name = "pa";
+ #cooling-cells = <2>;
+ };
+
+ modem_proc: modem_proc {
+ qcom,qmi-dev-name = "modem";
+ #cooling-cells = <2>;
+ };
+
+ modem_current: modem_current {
+ qcom,qmi-dev-name = "modem_current";
+ #cooling-cells = <2>;
+ };
+
+ modem_vdd: modem_vdd {
+ qcom,qmi-dev-name = "cpuv_restriction_cold";
+ #cooling-cells = <2>;
+ };
+ };
+
+ adsp {
+ qcom,instance-id = <0x1>;
+
+ adsp_vdd: adsp_vdd {
+ qcom,qmi-dev-name = "cpuv_restriction_cold";
+ #cooling-cells = <2>;
+ };
+ };
+
+ cdsp {
+ qcom,instance-id = <0x43>;
+
+ cdsp_vdd: cdsp_vdd {
+ qcom,qmi-dev-name = "cpuv_restriction_cold";
+ #cooling-cells = <2>;
+ };
+ };
+
+ slpi {
+ qcom,instance-id = <0x53>;
+
+ slpi_vdd: slpi_vdd {
+ qcom,qmi-dev-name = "cpuv_restriction_cold";
+ #cooling-cells = <2>;
+ };
+ };
+ };
+
thermal_zones: thermal-zones {
aoss0-usr {
polling-delay-passive = <0>;
@@ -3823,6 +3886,22 @@
trip = <&aoss0_trip>;
cooling-device = <&ebi_cdev 0 0>;
};
+ modem_vdd_cdev {
+ trip = <&aoss0_trip>;
+ cooling-device = <&modem_vdd 0 0>;
+ };
+ adsp_vdd_cdev {
+ trip = <&aoss0_trip>;
+ cooling-device = <&adsp_vdd 0 0>;
+ };
+ cdsp_vdd_cdev {
+ trip = <&aoss0_trip>;
+ cooling-device = <&cdsp_vdd 0 0>;
+ };
+ slpi_vdd_cdev {
+ trip = <&aoss0_trip>;
+ cooling-device = <&slpi_vdd 0 0>;
+ };
};
};
@@ -3864,6 +3943,22 @@
trip = <&cpu0_trip>;
cooling-device = <&ebi_cdev 0 0>;
};
+ modem_vdd_cdev {
+ trip = <&cpu0_trip>;
+ cooling-device = <&modem_vdd 0 0>;
+ };
+ adsp_vdd_cdev {
+ trip = <&cpu0_trip>;
+ cooling-device = <&adsp_vdd 0 0>;
+ };
+ cdsp_vdd_cdev {
+ trip = <&cpu0_trip>;
+ cooling-device = <&cdsp_vdd 0 0>;
+ };
+ slpi_vdd_cdev {
+ trip = <&cpu0_trip>;
+ cooling-device = <&slpi_vdd 0 0>;
+ };
};
};
@@ -3905,6 +4000,22 @@
trip = <&cpu1_trip>;
cooling-device = <&ebi_cdev 0 0>;
};
+ modem_vdd_cdev {
+ trip = <&cpu1_trip>;
+ cooling-device = <&modem_vdd 0 0>;
+ };
+ adsp_vdd_cdev {
+ trip = <&cpu1_trip>;
+ cooling-device = <&adsp_vdd 0 0>;
+ };
+ cdsp_vdd_cdev {
+ trip = <&cpu1_trip>;
+ cooling-device = <&cdsp_vdd 0 0>;
+ };
+ slpi_vdd_cdev {
+ trip = <&cpu1_trip>;
+ cooling-device = <&slpi_vdd 0 0>;
+ };
};
};
@@ -3946,6 +4057,22 @@
trip = <&cpu2_trip>;
cooling-device = <&ebi_cdev 0 0>;
};
+ modem_vdd_cdev {
+ trip = <&cpu2_trip>;
+ cooling-device = <&modem_vdd 0 0>;
+ };
+ adsp_vdd_cdev {
+ trip = <&cpu2_trip>;
+ cooling-device = <&adsp_vdd 0 0>;
+ };
+ cdsp_vdd_cdev {
+ trip = <&cpu2_trip>;
+ cooling-device = <&cdsp_vdd 0 0>;
+ };
+ slpi_vdd_cdev {
+ trip = <&cpu2_trip>;
+ cooling-device = <&slpi_vdd 0 0>;
+ };
};
};
@@ -3987,6 +4114,22 @@
trip = <&cpu3_trip>;
cooling-device = <&ebi_cdev 0 0>;
};
+ modem_vdd_cdev {
+ trip = <&cpu3_trip>;
+ cooling-device = <&modem_vdd 0 0>;
+ };
+ adsp_vdd_cdev {
+ trip = <&cpu3_trip>;
+ cooling-device = <&adsp_vdd 0 0>;
+ };
+ cdsp_vdd_cdev {
+ trip = <&cpu3_trip>;
+ cooling-device = <&cdsp_vdd 0 0>;
+ };
+ slpi_vdd_cdev {
+ trip = <&cpu3_trip>;
+ cooling-device = <&slpi_vdd 0 0>;
+ };
};
};
@@ -4028,6 +4171,22 @@
trip = <&l3_0_trip>;
cooling-device = <&ebi_cdev 0 0>;
};
+ modem_vdd_cdev {
+ trip = <&l3_0_trip>;
+ cooling-device = <&modem_vdd 0 0>;
+ };
+ adsp_vdd_cdev {
+ trip = <&l3_0_trip>;
+ cooling-device = <&adsp_vdd 0 0>;
+ };
+ cdsp_vdd_cdev {
+ trip = <&l3_0_trip>;
+ cooling-device = <&cdsp_vdd 0 0>;
+ };
+ slpi_vdd_cdev {
+ trip = <&l3_0_trip>;
+ cooling-device = <&slpi_vdd 0 0>;
+ };
};
};
@@ -4069,6 +4228,22 @@
trip = <&l3_1_trip>;
cooling-device = <&ebi_cdev 0 0>;
};
+ modem_vdd_cdev {
+ trip = <&l3_1_trip>;
+ cooling-device = <&modem_vdd 0 0>;
+ };
+ adsp_vdd_cdev {
+ trip = <&l3_1_trip>;
+ cooling-device = <&adsp_vdd 0 0>;
+ };
+ cdsp_vdd_cdev {
+ trip = <&l3_1_trip>;
+ cooling-device = <&cdsp_vdd 0 0>;
+ };
+ slpi_vdd_cdev {
+ trip = <&l3_1_trip>;
+ cooling-device = <&slpi_vdd 0 0>;
+ };
};
};
@@ -4110,6 +4285,22 @@
trip = <&cpug0_trip>;
cooling-device = <&ebi_cdev 0 0>;
};
+ modem_vdd_cdev {
+ trip = <&cpug0_trip>;
+ cooling-device = <&modem_vdd 0 0>;
+ };
+ adsp_vdd_cdev {
+ trip = <&cpug0_trip>;
+ cooling-device = <&adsp_vdd 0 0>;
+ };
+ cdsp_vdd_cdev {
+ trip = <&cpug0_trip>;
+ cooling-device = <&cdsp_vdd 0 0>;
+ };
+ slpi_vdd_cdev {
+ trip = <&cpug0_trip>;
+ cooling-device = <&slpi_vdd 0 0>;
+ };
};
};
@@ -4151,6 +4342,22 @@
trip = <&cpug1_trip>;
cooling-device = <&ebi_cdev 0 0>;
};
+ modem_vdd_cdev {
+ trip = <&cpug1_trip>;
+ cooling-device = <&modem_vdd 0 0>;
+ };
+ adsp_vdd_cdev {
+ trip = <&cpug1_trip>;
+ cooling-device = <&adsp_vdd 0 0>;
+ };
+ cdsp_vdd_cdev {
+ trip = <&cpug1_trip>;
+ cooling-device = <&cdsp_vdd 0 0>;
+ };
+ slpi_vdd_cdev {
+ trip = <&cpug1_trip>;
+ cooling-device = <&slpi_vdd 0 0>;
+ };
};
};
@@ -4192,6 +4399,22 @@
trip = <&cpug2_trip>;
cooling-device = <&ebi_cdev 0 0>;
};
+ modem_vdd_cdev {
+ trip = <&cpug2_trip>;
+ cooling-device = <&modem_vdd 0 0>;
+ };
+ adsp_vdd_cdev {
+ trip = <&cpug2_trip>;
+ cooling-device = <&adsp_vdd 0 0>;
+ };
+ cdsp_vdd_cdev {
+ trip = <&cpug2_trip>;
+ cooling-device = <&cdsp_vdd 0 0>;
+ };
+ slpi_vdd_cdev {
+ trip = <&cpug2_trip>;
+ cooling-device = <&slpi_vdd 0 0>;
+ };
};
};
@@ -4233,6 +4456,22 @@
trip = <&cpug3_trip>;
cooling-device = <&ebi_cdev 0 0>;
};
+ modem_vdd_cdev {
+ trip = <&cpug3_trip>;
+ cooling-device = <&modem_vdd 0 0>;
+ };
+ adsp_vdd_cdev {
+ trip = <&cpug3_trip>;
+ cooling-device = <&adsp_vdd 0 0>;
+ };
+ cdsp_vdd_cdev {
+ trip = <&cpug3_trip>;
+ cooling-device = <&cdsp_vdd 0 0>;
+ };
+ slpi_vdd_cdev {
+ trip = <&cpug3_trip>;
+ cooling-device = <&slpi_vdd 0 0>;
+ };
};
};
@@ -4274,6 +4513,22 @@
trip = <&gpu0_trip_l>;
cooling-device = <&ebi_cdev 0 0>;
};
+ modem_vdd_cdev {
+ trip = <&gpu0_trip_l>;
+ cooling-device = <&modem_vdd 0 0>;
+ };
+ adsp_vdd_cdev {
+ trip = <&gpu0_trip_l>;
+ cooling-device = <&adsp_vdd 0 0>;
+ };
+ cdsp_vdd_cdev {
+ trip = <&gpu0_trip_l>;
+ cooling-device = <&cdsp_vdd 0 0>;
+ };
+ slpi_vdd_cdev {
+ trip = <&gpu0_trip_l>;
+ cooling-device = <&slpi_vdd 0 0>;
+ };
};
};
@@ -4315,6 +4570,22 @@
trip = <&gpu1_trip_l>;
cooling-device = <&ebi_cdev 0 0>;
};
+ modem_vdd_cdev {
+ trip = <&gpu1_trip_l>;
+ cooling-device = <&modem_vdd 0 0>;
+ };
+ adsp_vdd_cdev {
+ trip = <&gpu1_trip_l>;
+ cooling-device = <&adsp_vdd 0 0>;
+ };
+ cdsp_vdd_cdev {
+ trip = <&gpu1_trip_l>;
+ cooling-device = <&cdsp_vdd 0 0>;
+ };
+ slpi_vdd_cdev {
+ trip = <&gpu1_trip_l>;
+ cooling-device = <&slpi_vdd 0 0>;
+ };
};
};
@@ -4356,6 +4627,22 @@
trip = <&aoss1_trip>;
cooling-device = <&ebi_cdev 0 0>;
};
+ modem_vdd_cdev {
+ trip = <&aoss1_trip>;
+ cooling-device = <&modem_vdd 0 0>;
+ };
+ adsp_vdd_cdev {
+ trip = <&aoss1_trip>;
+ cooling-device = <&adsp_vdd 0 0>;
+ };
+ cdsp_vdd_cdev {
+ trip = <&aoss1_trip>;
+ cooling-device = <&cdsp_vdd 0 0>;
+ };
+ slpi_vdd_cdev {
+ trip = <&aoss1_trip>;
+ cooling-device = <&slpi_vdd 0 0>;
+ };
};
};
@@ -4397,6 +4684,22 @@
trip = <&dsp_trip>;
cooling-device = <&ebi_cdev 0 0>;
};
+ modem_vdd_cdev {
+ trip = <&dsp_trip>;
+ cooling-device = <&modem_vdd 0 0>;
+ };
+ adsp_vdd_cdev {
+ trip = <&dsp_trip>;
+ cooling-device = <&adsp_vdd 0 0>;
+ };
+ cdsp_vdd_cdev {
+ trip = <&dsp_trip>;
+ cooling-device = <&cdsp_vdd 0 0>;
+ };
+ slpi_vdd_cdev {
+ trip = <&dsp_trip>;
+ cooling-device = <&slpi_vdd 0 0>;
+ };
};
};
@@ -4438,6 +4741,22 @@
trip = <&ddr_trip>;
cooling-device = <&ebi_cdev 0 0>;
};
+ modem_vdd_cdev {
+ trip = <&ddr_trip>;
+ cooling-device = <&modem_vdd 0 0>;
+ };
+ adsp_vdd_cdev {
+ trip = <&ddr_trip>;
+ cooling-device = <&adsp_vdd 0 0>;
+ };
+ cdsp_vdd_cdev {
+ trip = <&ddr_trip>;
+ cooling-device = <&cdsp_vdd 0 0>;
+ };
+ slpi_vdd_cdev {
+ trip = <&ddr_trip>;
+ cooling-device = <&slpi_vdd 0 0>;
+ };
};
};
@@ -4479,6 +4798,22 @@
trip = <&wlan_trip>;
cooling-device = <&ebi_cdev 0 0>;
};
+ modem_vdd_cdev {
+ trip = <&wlan_trip>;
+ cooling-device = <&modem_vdd 0 0>;
+ };
+ adsp_vdd_cdev {
+ trip = <&wlan_trip>;
+ cooling-device = <&adsp_vdd 0 0>;
+ };
+ cdsp_vdd_cdev {
+ trip = <&wlan_trip>;
+ cooling-device = <&cdsp_vdd 0 0>;
+ };
+ slpi_vdd_cdev {
+ trip = <&wlan_trip>;
+ cooling-device = <&slpi_vdd 0 0>;
+ };
};
};
@@ -4520,6 +4855,22 @@
trip = <&hvx_trip>;
cooling-device = <&ebi_cdev 0 0>;
};
+ modem_vdd_cdev {
+ trip = <&hvx_trip>;
+ cooling-device = <&modem_vdd 0 0>;
+ };
+ adsp_vdd_cdev {
+ trip = <&hvx_trip>;
+ cooling-device = <&adsp_vdd 0 0>;
+ };
+ cdsp_vdd_cdev {
+ trip = <&hvx_trip>;
+ cooling-device = <&cdsp_vdd 0 0>;
+ };
+ slpi_vdd_cdev {
+ trip = <&hvx_trip>;
+ cooling-device = <&slpi_vdd 0 0>;
+ };
};
};
@@ -4561,6 +4912,22 @@
trip = <&camera_trip>;
cooling-device = <&ebi_cdev 0 0>;
};
+ modem_vdd_cdev {
+ trip = <&camera_trip>;
+ cooling-device = <&modem_vdd 0 0>;
+ };
+ adsp_vdd_cdev {
+ trip = <&camera_trip>;
+ cooling-device = <&adsp_vdd 0 0>;
+ };
+ cdsp_vdd_cdev {
+ trip = <&camera_trip>;
+ cooling-device = <&cdsp_vdd 0 0>;
+ };
+ slpi_vdd_cdev {
+ trip = <&camera_trip>;
+ cooling-device = <&slpi_vdd 0 0>;
+ };
};
};
@@ -4602,6 +4969,22 @@
trip = <&mmss_trip>;
cooling-device = <&ebi_cdev 0 0>;
};
+ modem_vdd_cdev {
+ trip = <&mmss_trip>;
+ cooling-device = <&modem_vdd 0 0>;
+ };
+ adsp_vdd_cdev {
+ trip = <&mmss_trip>;
+ cooling-device = <&adsp_vdd 0 0>;
+ };
+ cdsp_vdd_cdev {
+ trip = <&mmss_trip>;
+ cooling-device = <&cdsp_vdd 0 0>;
+ };
+ slpi_vdd_cdev {
+ trip = <&mmss_trip>;
+ cooling-device = <&slpi_vdd 0 0>;
+ };
};
};
@@ -4643,6 +5026,22 @@
trip = <&mdm_trip>;
cooling-device = <&ebi_cdev 0 0>;
};
+ modem_vdd_cdev {
+ trip = <&mdm_trip>;
+ cooling-device = <&modem_vdd 0 0>;
+ };
+ adsp_vdd_cdev {
+ trip = <&mdm_trip>;
+ cooling-device = <&adsp_vdd 0 0>;
+ };
+ cdsp_vdd_cdev {
+ trip = <&mdm_trip>;
+ cooling-device = <&cdsp_vdd 0 0>;
+ };
+ slpi_vdd_cdev {
+ trip = <&mdm_trip>;
+ cooling-device = <&slpi_vdd 0 0>;
+ };
};
};
};
diff --git a/arch/arm64/configs/sdm670_defconfig b/arch/arm64/configs/sdm670_defconfig
new file mode 100644
index 0000000..f28a9a6
--- /dev/null
+++ b/arch/arm64/configs/sdm670_defconfig
@@ -0,0 +1,639 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_FHANDLE is not set
+CONFIG_AUDIT=y
+# CONFIG_AUDITSYSCALL is not set
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_SCHED_WALT=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_RCU_EXPERT=y
+CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_RCU_NOCB_CPU=y
+CONFIG_RCU_NOCB_CPU_ALL=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_SCHEDTUNE=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_SCHED_CORE_CTL=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_SCHED_TUNE=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
+CONFIG_KALLSYMS_ALL=y
+# CONFIG_AIO is not set
+# CONFIG_MEMBARRIER is not set
+CONFIG_EMBEDDED=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PROFILING=y
+CONFIG_CC_STACKPROTECTOR_REGULAR=y
+CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SIG=y
+CONFIG_MODULE_SIG_FORCE=y
+CONFIG_MODULE_SIG_SHA512=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_ARCH_QCOM=y
+CONFIG_ARCH_SDM670=y
+CONFIG_PCI=y
+CONFIG_PCI_MSM=y
+CONFIG_SCHED_MC=y
+CONFIG_NR_CPUS=8
+CONFIG_PREEMPT=y
+CONFIG_HZ_100=y
+CONFIG_CLEANCACHE=y
+CONFIG_CMA=y
+CONFIG_CMA_DEBUGFS=y
+CONFIG_ZSMALLOC=y
+CONFIG_BALANCE_ANON_FILE_RECLAIM=y
+CONFIG_SECCOMP=y
+CONFIG_ARMV8_DEPRECATED=y
+CONFIG_SWP_EMULATION=y
+CONFIG_CP15_BARRIER_EMULATION=y
+CONFIG_SETEND_EMULATION=y
+# CONFIG_ARM64_VHE is not set
+CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_COMPAT=y
+CONFIG_PM_AUTOSLEEP=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=0
+# CONFIG_PM_WAKELOCKS_GC is not set
+CONFIG_PM_DEBUG=y
+CONFIG_CPU_IDLE=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_BOOST=y
+CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+CONFIG_CPU_FREQ_MSM=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_XFRM_STATISTICS=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+CONFIG_INET_IPCOMP=y
+CONFIG_INET_DIAG_DESTROY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_LOG=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
+CONFIG_NETFILTER_XT_TARGET_TEE=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_DSCP=y
+CONFIG_NETFILTER_XT_MATCH_ESP=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+# CONFIG_NETFILTER_XT_MATCH_L2TP is not set
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_RPFILTER=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MATCH_RPFILTER=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_BRIDGE_NF_EBTABLES=y
+CONFIG_BRIDGE_EBT_BROUTE=y
+CONFIG_L2TP=y
+CONFIG_L2TP_DEBUGFS=y
+CONFIG_L2TP_V3=y
+CONFIG_L2TP_IP=y
+CONFIG_L2TP_ETH=y
+CONFIG_BRIDGE=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCH_PRIO=y
+CONFIG_NET_SCH_MULTIQ=y
+CONFIG_NET_SCH_INGRESS=y
+CONFIG_NET_CLS_FW=y
+CONFIG_NET_CLS_U32=y
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_FLOW=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_CMP=y
+CONFIG_NET_EMATCH_NBYTE=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_EMATCH_META=y
+CONFIG_NET_EMATCH_TEXT=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_GACT=y
+CONFIG_NET_ACT_MIRRED=y
+CONFIG_NET_ACT_SKBEDIT=y
+CONFIG_DNS_RESOLVER=y
+CONFIG_RMNET_DATA=y
+CONFIG_RMNET_DATA_FC=y
+CONFIG_RMNET_DATA_DEBUG_PKT=y
+CONFIG_BT=y
+CONFIG_MSM_BT_POWER=y
+CONFIG_CFG80211=y
+CONFIG_CFG80211_INTERNAL_REGDB=y
+# CONFIG_CFG80211_CRDA_SUPPORT is not set
+CONFIG_RFKILL=y
+CONFIG_NFC_NQ=y
+CONFIG_IPC_ROUTER=y
+CONFIG_IPC_ROUTER_SECURITY=y
+CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+CONFIG_DMA_CMA=y
+CONFIG_ZRAM=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_QSEECOM=y
+CONFIG_UID_SYS_STATS=y
+CONFIG_MEMORY_STATE_TIME=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_UFSHCD=y
+CONFIG_SCSI_UFSHCD_PLATFORM=y
+CONFIG_SCSI_UFS_QCOM=y
+CONFIG_SCSI_UFS_QCOM_ICE=y
+CONFIG_SCSI_UFSHCD_CMD_LOGGING=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=y
+CONFIG_DM_VERITY_FEC=y
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=y
+CONFIG_DUMMY=y
+CONFIG_TUN=y
+CONFIG_RNDIS_IPA=y
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPPOLAC=y
+CONFIG_PPPOPNS=y
+CONFIG_USB_USBNET=y
+CONFIG_WIL6210=m
+CONFIG_WCNSS_MEM_PRE_ALLOC=y
+CONFIG_CLD_LL_CORE=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_GPIO=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_JOYSTICK=y
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_HBTP_INPUT=y
+CONFIG_INPUT_QPNP_POWER_ON=y
+CONFIG_INPUT_UINPUT=y
+# CONFIG_SERIO_SERPORT is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_MSM_GENI=y
+CONFIG_SERIAL_MSM_GENI_CONSOLE=y
+CONFIG_DIAG_CHAR=y
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_MSM_LEGACY=y
+CONFIG_MSM_ADSPRPC=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_QCOM_GENI=y
+CONFIG_SOUNDWIRE=y
+CONFIG_SPI=y
+CONFIG_SPI_QUP=y
+CONFIG_SPI_QCOM_GENI=y
+CONFIG_SPI_SPIDEV=y
+CONFIG_SLIMBUS_MSM_NGD=y
+CONFIG_SPMI=y
+CONFIG_SPMI_MSM_PMIC_ARB_DEBUG=y
+CONFIG_PINCTRL_SDM670=y
+CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_POWER_RESET_QCOM=y
+CONFIG_QCOM_DLOAD_MODE=y
+CONFIG_POWER_RESET_XGENE=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_QPNP_FG_GEN3=y
+CONFIG_SMB1355_SLAVE_CHARGER=y
+CONFIG_QPNP_SMB2=y
+CONFIG_QPNP_QNOVO=y
+CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
+CONFIG_THERMAL=y
+CONFIG_THERMAL_WRITABLE_TRIPS=y
+CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE=y
+CONFIG_THERMAL_GOV_STEP_WISE=y
+CONFIG_THERMAL_GOV_LOW_LIMITS=y
+CONFIG_CPU_THERMAL=y
+CONFIG_DEVFREQ_THERMAL=y
+CONFIG_QCOM_SPMI_TEMP_ALARM=y
+CONFIG_THERMAL_QPNP=y
+CONFIG_THERMAL_QPNP_ADC_TM=y
+CONFIG_THERMAL_TSENS=y
+CONFIG_MSM_BCL_PERIPHERAL_CTL=y
+CONFIG_QTI_THERMAL_LIMITS_DCVS=y
+CONFIG_QTI_VIRTUAL_SENSOR=y
+CONFIG_MFD_I2C_PMIC=y
+CONFIG_MFD_SPMI_PMIC=y
+CONFIG_WCD934X_CODEC=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_CPRH_KBSS=y
+CONFIG_REGULATOR_QPNP_LABIBB=y
+CONFIG_REGULATOR_QPNP=y
+CONFIG_REGULATOR_RPMH=y
+CONFIG_REGULATOR_STUB=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_VIDEO_ADV_DEBUG=y
+CONFIG_VIDEO_FIXED_MINOR_RANGES=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_SPECTRA_CAMERA=y
+CONFIG_MSM_VIDC_V4L2=y
+CONFIG_MSM_VIDC_GOVERNORS=y
+CONFIG_MSM_SDE_ROTATOR=y
+CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y
+CONFIG_QCOM_KGSL=y
+CONFIG_DRM=y
+CONFIG_DRM_SDE_EVTLOG_DEBUG=y
+CONFIG_DRM_SDE_RSC=y
+CONFIG_FB_VIRTUAL=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_USB_AUDIO_QMI=y
+CONFIG_SND_SOC=y
+CONFIG_SND_SOC_SDM845=y
+CONFIG_UHID=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_USB=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_DWC3_MSM=y
+CONFIG_USB_ISP1760=y
+CONFIG_USB_ISP1760_HOST_ROLE=y
+CONFIG_USB_PD_POLICY=y
+CONFIG_QPNP_USB_PDPHY=y
+CONFIG_USB_EHSET_TEST_FIXTURE=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_DUAL_ROLE_USB_INTF=y
+CONFIG_USB_MSM_SSPHY_QMP=y
+CONFIG_MSM_QUSB_PHY=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_NCM=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
+CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_MTP=y
+CONFIG_USB_CONFIGFS_F_PTP=y
+CONFIG_USB_CONFIGFS_F_ACC=y
+CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
+CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_MIDI=y
+CONFIG_USB_CONFIGFS_F_HID=y
+CONFIG_USB_CONFIGFS_F_DIAG=y
+CONFIG_USB_CONFIGFS_F_CDEV=y
+CONFIG_USB_CONFIGFS_F_CCID=y
+CONFIG_USB_CONFIGFS_F_GSI=y
+CONFIG_USB_CONFIGFS_F_QDSS=y
+CONFIG_MMC=y
+CONFIG_MMC_PERF_PROFILING=y
+CONFIG_MMC_RING_BUFFER=y
+CONFIG_MMC_CLKGATE=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_BLOCK_DEFERRED_RESUME=y
+CONFIG_MMC_TEST=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_MSM=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_QPNP=y
+CONFIG_LEDS_QPNP_FLASH_V2=y
+CONFIG_LEDS_QPNP_WLED=y
+CONFIG_LEDS_QPNP_HAPTICS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_EDAC=y
+CONFIG_EDAC_MM_EDAC=y
+CONFIG_EDAC_KRYO3XX_ARM64=y
+CONFIG_EDAC_KRYO3XX_ARM64_PANIC_ON_UE=y
+CONFIG_EDAC_QCOM_LLCC=y
+CONFIG_EDAC_QCOM_LLCC_PANIC_ON_UE=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_QPNP=y
+CONFIG_DMADEVICES=y
+CONFIG_QCOM_GPI_DMA=y
+CONFIG_QCOM_GPI_DMA_DEBUG=y
+CONFIG_UIO=y
+CONFIG_UIO_MSM_SHAREDMEM=y
+CONFIG_STAGING=y
+CONFIG_ASHMEM=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_ION=y
+CONFIG_ION_MSM=y
+CONFIG_GSI=y
+CONFIG_IPA3=y
+CONFIG_RMNET_IPA3=y
+CONFIG_IPA_UT=y
+CONFIG_SPS=y
+CONFIG_SPS_SUPPORT_NDP_BAM=y
+CONFIG_QPNP_COINCELL=y
+CONFIG_QPNP_REVID=y
+CONFIG_USB_BAM=y
+CONFIG_MSM_11AD=m
+CONFIG_SEEMP_CORE=y
+CONFIG_QCOM_GENI_SE=y
+CONFIG_CLOCK_QPNP_DIV=y
+CONFIG_MSM_CLK_RPMH=y
+CONFIG_CLOCK_CPU_OSM=y
+CONFIG_MSM_CLK_AOP_QMP=y
+CONFIG_QCOM_MDSS_PLL=y
+CONFIG_REMOTE_SPINLOCK_MSM=y
+CONFIG_MSM_QMP=y
+CONFIG_IOMMU_IO_PGTABLE_FAST=y
+CONFIG_ARM_SMMU=y
+CONFIG_QCOM_LAZY_MAPPING=y
+CONFIG_IOMMU_DEBUG=y
+CONFIG_IOMMU_DEBUG_TRACKING=y
+CONFIG_IOMMU_TESTS=y
+CONFIG_QCOM_CPUSS_DUMP=y
+CONFIG_QCOM_RUN_QUEUE_STATS=y
+CONFIG_QCOM_LLCC=y
+CONFIG_QCOM_SDM670_LLCC=y
+CONFIG_MSM_SERVICE_LOCATOR=y
+CONFIG_MSM_SERVICE_NOTIFIER=y
+CONFIG_MSM_BOOT_STATS=y
+CONFIG_MSM_CORE_HANG_DETECT=y
+CONFIG_MSM_GLADIATOR_HANG_DETECT=y
+CONFIG_MSM_GLADIATOR_ERP=y
+CONFIG_QCOM_EUD=y
+CONFIG_QCOM_WATCHDOG_V2=y
+CONFIG_QCOM_MEMORY_DUMP_V2=y
+CONFIG_QCOM_SECURE_BUFFER=y
+CONFIG_QCOM_EARLY_RANDOM=y
+CONFIG_MSM_SMEM=y
+CONFIG_MSM_GLINK=y
+CONFIG_MSM_GLINK_LOOPBACK_SERVER=y
+CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT=y
+CONFIG_MSM_GLINK_SPI_XPRT=y
+CONFIG_MSM_SPCOM=y
+CONFIG_MSM_SPSS_UTILS=y
+CONFIG_TRACER_PKT=y
+CONFIG_QTI_RPMH_API=y
+CONFIG_MSM_SMP2P=y
+CONFIG_MSM_SMP2P_TEST=y
+CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y
+CONFIG_MSM_QMI_INTERFACE=y
+CONFIG_MSM_GLINK_PKT=y
+CONFIG_MSM_SUBSYSTEM_RESTART=y
+CONFIG_MSM_PIL=y
+CONFIG_MSM_PIL_SSR_GENERIC=y
+CONFIG_MSM_PIL_MSS_QDSP6V5=y
+CONFIG_ICNSS=y
+CONFIG_ICNSS_DEBUG=y
+CONFIG_QCOM_COMMAND_DB=y
+CONFIG_MSM_ADSP_LOADER=y
+CONFIG_MSM_PERFORMANCE=y
+CONFIG_MSM_CDSP_LOADER=y
+CONFIG_MSM_AVTIMER=y
+CONFIG_MSM_EVENT_TIMER=y
+CONFIG_MSM_PM=y
+CONFIG_MSM_QBT1000=y
+CONFIG_APSS_CORE_EA=y
+CONFIG_QCOM_DCC_V2=y
+CONFIG_QTI_RPM_STATS_LOG=y
+CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
+CONFIG_QCOM_BIMC_BWMON=y
+CONFIG_ARM_MEMLAT_MON=y
+CONFIG_QCOMCCI_HWMON=y
+CONFIG_QCOM_M4M_HWMON=y
+CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
+CONFIG_DEVFREQ_GOV_QCOM_CACHE_HWMON=y
+CONFIG_DEVFREQ_GOV_MEMLAT=y
+CONFIG_DEVFREQ_SIMPLE_DEV=y
+CONFIG_QCOM_DEVFREQ_DEVBW=y
+CONFIG_EXTCON_USB_GPIO=y
+CONFIG_IIO=y
+CONFIG_QCOM_RRADC=y
+CONFIG_PWM=y
+CONFIG_PWM_QPNP=y
+CONFIG_ARM_GIC_V3_ACL=y
+CONFIG_PHY_XGENE=y
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_SENSORS_SSC=y
+CONFIG_MSM_TZ_LOG=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_QUOTA=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_FUSE_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_EFIVAR_FS=y
+CONFIG_ECRYPT_FS=y
+CONFIG_ECRYPT_FS_MESSAGING=y
+# CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DYNAMIC_DEBUG=y
+CONFIG_DEBUG_INFO=y
+CONFIG_PAGE_OWNER=y
+CONFIG_PAGE_OWNER_ENABLE_DEFAULT=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_PAGEALLOC=y
+CONFIG_SLUB_DEBUG_PANIC_ON=y
+CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT=y
+CONFIG_PAGE_POISONING=y
+CONFIG_DEBUG_OBJECTS=y
+CONFIG_DEBUG_OBJECTS_FREE=y
+CONFIG_DEBUG_OBJECTS_TIMERS=y
+CONFIG_DEBUG_OBJECTS_WORK=y
+CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
+CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y
+CONFIG_SLUB_DEBUG_ON=y
+CONFIG_DEBUG_KMEMLEAK=y
+CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE=4000
+CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y
+CONFIG_DEBUG_STACK_USAGE=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_LOCKUP_DETECTOR=y
+CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y
+CONFIG_WQ_WATCHDOG=y
+CONFIG_PANIC_TIMEOUT=5
+CONFIG_PANIC_ON_SCHED_BUG=y
+CONFIG_PANIC_ON_RT_THROTTLING=y
+CONFIG_SCHEDSTATS=y
+CONFIG_SCHED_STACK_END_CHECK=y
+# CONFIG_DEBUG_PREEMPT is not set
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_DEBUG_LIST=y
+CONFIG_FAULT_INJECTION=y
+CONFIG_FAIL_PAGE_ALLOC=y
+CONFIG_FAULT_INJECTION_DEBUG_FS=y
+CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
+CONFIG_IPC_LOGGING=y
+CONFIG_QCOM_RTB=y
+CONFIG_QCOM_RTB_SEPARATE_CPUS=y
+CONFIG_FUNCTION_TRACER=y
+CONFIG_IRQSOFF_TRACER=y
+CONFIG_PREEMPT_TRACER=y
+CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_CPU_FREQ_SWITCH_PROFILER=y
+CONFIG_LKDTM=y
+CONFIG_MEMTEST=y
+CONFIG_PANIC_ON_DATA_CORRUPTION=y
+CONFIG_ARM64_PTDUMP=y
+CONFIG_PID_IN_CONTEXTIDR=y
+CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
+CONFIG_CORESIGHT_SOURCE_ETM4X=y
+CONFIG_CORESIGHT_REMOTE_ETM=y
+CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0
+CONFIG_CORESIGHT_QCOM_REPLICATOR=y
+CONFIG_CORESIGHT_STM=y
+CONFIG_CORESIGHT_TPDA=y
+CONFIG_CORESIGHT_TPDM=y
+CONFIG_CORESIGHT_CTI=y
+CONFIG_CORESIGHT_HWEVENT=y
+CONFIG_CORESIGHT_DUMMY=y
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
+CONFIG_SECURITY=y
+CONFIG_HARDENED_USERCOPY=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SMACK=y
+CONFIG_CRYPTO_XCBC=y
+CONFIG_CRYPTO_MD4=y
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
+CONFIG_CRYPTO_DEV_QCRYPTO=y
+CONFIG_CRYPTO_DEV_QCEDEV=y
+CONFIG_CRYPTO_DEV_QCOM_ICE=y
+CONFIG_ARM64_CRYPTO=y
+CONFIG_CRYPTO_SHA1_ARM64_CE=y
+CONFIG_CRYPTO_SHA2_ARM64_CE=y
+CONFIG_CRYPTO_GHASH_ARM64_CE=y
+CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
+CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
+CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y
+CONFIG_CRYPTO_CRC32_ARM64=y
+CONFIG_XZ_DEC=y
+CONFIG_QMI_ENCDEC=y
diff --git a/arch/arm64/configs/sdm845-perf_defconfig b/arch/arm64/configs/sdm845-perf_defconfig
index 9ec1beb..9f98841 100644
--- a/arch/arm64/configs/sdm845-perf_defconfig
+++ b/arch/arm64/configs/sdm845-perf_defconfig
@@ -330,6 +330,7 @@
CONFIG_QTI_THERMAL_LIMITS_DCVS=y
CONFIG_QTI_VIRTUAL_SENSOR=y
CONFIG_QTI_REG_COOLING_DEVICE=y
+CONFIG_QTI_QMI_COOLING_DEVICE=y
CONFIG_MFD_I2C_PMIC=y
CONFIG_MFD_SPMI_PMIC=y
CONFIG_WCD9XXX_CODEC_CORE=y
diff --git a/arch/arm64/configs/sdm845_defconfig b/arch/arm64/configs/sdm845_defconfig
index 30d0d4b..cd4cbb1 100644
--- a/arch/arm64/configs/sdm845_defconfig
+++ b/arch/arm64/configs/sdm845_defconfig
@@ -56,7 +56,6 @@
# CONFIG_IOSCHED_DEADLINE is not set
CONFIG_ARCH_QCOM=y
CONFIG_ARCH_SDM845=y
-CONFIG_ARCH_SDM670=y
CONFIG_PCI=y
CONFIG_PCI_MSM=y
CONFIG_SCHED_MC=y
@@ -309,7 +308,6 @@
CONFIG_SPMI=y
CONFIG_SPMI_MSM_PMIC_ARB_DEBUG=y
CONFIG_PINCTRL_SDM845=y
-CONFIG_PINCTRL_SDM670=y
CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
CONFIG_GPIOLIB=y
CONFIG_GPIO_SYSFS=y
@@ -337,6 +335,7 @@
CONFIG_QTI_THERMAL_LIMITS_DCVS=y
CONFIG_QTI_VIRTUAL_SENSOR=y
CONFIG_QTI_REG_COOLING_DEVICE=y
+CONFIG_QTI_QMI_COOLING_DEVICE=y
CONFIG_MFD_I2C_PMIC=y
CONFIG_MFD_SPMI_PMIC=y
CONFIG_WCD9XXX_CODEC_CORE=y
@@ -496,7 +495,6 @@
CONFIG_QCOM_RUN_QUEUE_STATS=y
CONFIG_QCOM_LLCC=y
CONFIG_QCOM_SDM845_LLCC=y
-CONFIG_QCOM_SDM670_LLCC=y
CONFIG_MSM_SERVICE_LOCATOR=y
CONFIG_MSM_SERVICE_NOTIFIER=y
CONFIG_MSM_BOOT_STATS=y
@@ -601,7 +599,6 @@
CONFIG_DEBUG_STACK_USAGE=y
CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_LOCKUP_DETECTOR=y
-CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y
CONFIG_WQ_WATCHDOG=y
CONFIG_PANIC_TIMEOUT=5
CONFIG_PANIC_ON_SCHED_BUG=y
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index a2c2478..4bcfe01 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -217,10 +217,11 @@ void update_vsyscall(struct timekeeper *tk)
/* tkr_mono.cycle_last == tkr_raw.cycle_last */
vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last;
vdso_data->raw_time_sec = tk->raw_time.tv_sec;
- vdso_data->raw_time_nsec = tk->raw_time.tv_nsec;
+ vdso_data->raw_time_nsec = (tk->raw_time.tv_nsec <<
+ tk->tkr_raw.shift) +
+ tk->tkr_raw.xtime_nsec;
vdso_data->xtime_clock_sec = tk->xtime_sec;
vdso_data->xtime_clock_nsec = tk->tkr_mono.xtime_nsec;
- /* tkr_raw.xtime_nsec == 0 */
vdso_data->cs_mono_mult = tk->tkr_mono.mult;
vdso_data->cs_raw_mult = tk->tkr_raw.mult;
/* tkr_mono.shift == tkr_raw.shift */
diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S
index e00b467..76320e9 100644
--- a/arch/arm64/kernel/vdso/gettimeofday.S
+++ b/arch/arm64/kernel/vdso/gettimeofday.S
@@ -256,7 +256,6 @@
seqcnt_check fail=monotonic_raw
/* All computations are done with left-shifted nsecs. */
- lsl x14, x14, x12
get_nsec_per_sec res=x9
lsl x9, x9, x12
diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
index 1c2a5e2..e93c949 100644
--- a/arch/frv/include/asm/atomic.h
+++ b/arch/frv/include/asm/atomic.h
@@ -139,7 +139,7 @@ static inline void atomic64_dec(atomic64_t *v)
#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
#define atomic64_inc_and_test(v) (atomic64_inc_return((v)) == 0)
-
+#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
#define atomic_cmpxchg(v, old, new) (cmpxchg(&(v)->counter, old, new))
#define atomic_xchg(v, new) (xchg(&(v)->counter, new))
@@ -161,6 +161,39 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
return c;
}
+static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
+{
+ long long c, old;
+
+ c = atomic64_read(v);
+ for (;;) {
+ if (unlikely(c == u))
+ break;
+ old = atomic64_cmpxchg(v, c, c + i);
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+ return c != u;
+}
+
+static inline long long atomic64_dec_if_positive(atomic64_t *v)
+{
+ long long c, old, dec;
+
+ c = atomic64_read(v);
+ for (;;) {
+ dec = c - 1;
+ if (unlikely(dec < 0))
+ break;
+ old = atomic64_cmpxchg((v), c, dec);
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+ return dec;
+}
+
#define ATOMIC_OP(op) \
static inline int atomic_fetch_##op(int i, atomic_t *v) \
{ \
diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
index 836f147..efa59f1 100644
--- a/arch/frv/mm/elf-fdpic.c
+++ b/arch/frv/mm/elf-fdpic.c
@@ -74,7 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
addr = PAGE_ALIGN(addr);
vma = find_vma(current->mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
goto success;
}
diff --git a/arch/mips/boot/Makefile b/arch/mips/boot/Makefile
index 2728a9a..145b5ce 100644
--- a/arch/mips/boot/Makefile
+++ b/arch/mips/boot/Makefile
@@ -128,19 +128,19 @@
-DADDR_BITS=$(ADDR_BITS) \
-DADDR_CELLS=$(itb_addr_cells)
-$(obj)/vmlinux.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE
+$(obj)/vmlinux.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE
$(call if_changed_dep,cpp_its_S,none,vmlinux.bin)
-$(obj)/vmlinux.gz.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE
+$(obj)/vmlinux.gz.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE
$(call if_changed_dep,cpp_its_S,gzip,vmlinux.bin.gz)
-$(obj)/vmlinux.bz2.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE
+$(obj)/vmlinux.bz2.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE
$(call if_changed_dep,cpp_its_S,bzip2,vmlinux.bin.bz2)
-$(obj)/vmlinux.lzma.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE
+$(obj)/vmlinux.lzma.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE
$(call if_changed_dep,cpp_its_S,lzma,vmlinux.bin.lzma)
-$(obj)/vmlinux.lzo.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE
+$(obj)/vmlinux.lzo.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE
$(call if_changed_dep,cpp_its_S,lzo,vmlinux.bin.lzo)
quiet_cmd_itb-image = ITB $@
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
index 12c7181..c86b66b 100644
--- a/arch/mips/kernel/branch.c
+++ b/arch/mips/kernel/branch.c
@@ -804,8 +804,10 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
break;
}
/* Compact branch: BNEZC || JIALC */
- if (insn.i_format.rs)
+ if (!insn.i_format.rs) {
+ /* JIALC: set $31/ra */
regs->regs[31] = epc + 4;
+ }
regs->cp0_epc += 8;
break;
#endif
diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
index d08ea3f..a44052c 100644
--- a/arch/mips/mm/mmap.c
+++ b/arch/mips/mm/mmap.c
@@ -92,7 +92,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
diff --git a/arch/mn10300/include/asm/switch_to.h b/arch/mn10300/include/asm/switch_to.h
index 393d311..67e333a 100644
--- a/arch/mn10300/include/asm/switch_to.h
+++ b/arch/mn10300/include/asm/switch_to.h
@@ -16,7 +16,7 @@
struct task_struct;
struct thread_struct;
-#if !defined(CONFIG_LAZY_SAVE_FPU)
+#if defined(CONFIG_FPU) && !defined(CONFIG_LAZY_SAVE_FPU)
struct fpu_state_struct;
extern asmlinkage void fpu_save(struct fpu_state_struct *);
#define switch_fpu(prev, next) \
diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
index 0a393a0..1d7691f 100644
--- a/arch/parisc/kernel/sys_parisc.c
+++ b/arch/parisc/kernel/sys_parisc.c
@@ -88,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
+ struct vm_area_struct *vma, *prev;
unsigned long task_size = TASK_SIZE;
int do_color_align, last_mmap;
struct vm_unmapped_area_info info;
@@ -115,9 +115,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
else
addr = PAGE_ALIGN(addr);
- vma = find_vma(mm, addr);
+ vma = find_vma_prev(mm, addr, &prev);
if (task_size - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)) &&
+ (!prev || addr >= vm_end_gap(prev)))
goto found_addr;
}
@@ -141,7 +142,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
const unsigned long len, const unsigned long pgoff,
const unsigned long flags)
{
- struct vm_area_struct *vma;
+ struct vm_area_struct *vma, *prev;
struct mm_struct *mm = current->mm;
unsigned long addr = addr0;
int do_color_align, last_mmap;
@@ -175,9 +176,11 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
addr = COLOR_ALIGN(addr, last_mmap, pgoff);
else
addr = PAGE_ALIGN(addr);
- vma = find_vma(mm, addr);
+
+ vma = find_vma_prev(mm, addr, &prev);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)) &&
+ (!prev || addr >= vm_end_gap(prev)))
goto found_addr;
}
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 1ba82ea..2e2fc1e 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -1411,10 +1411,8 @@
.align 7
do_hash_page:
#ifdef CONFIG_PPC_STD_MMU_64
- andis. r0,r4,0xa410 /* weird error? */
+ andis. r0,r4,0xa450 /* weird error? */
bne- handle_page_fault /* if not, try to insert a HPTE */
- andis. r0,r4,DSISR_DABRMATCH@h
- bne- handle_dabr_fault
CURRENT_THREAD_INFO(r11, r1)
lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */
andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */
@@ -1438,11 +1436,16 @@
/* Error */
blt- 13f
+
+ /* Reload DSISR into r4 for the DABR check below */
+ ld r4,_DSISR(r1)
#endif /* CONFIG_PPC_STD_MMU_64 */
/* Here we have a page fault that hash_page can't handle. */
handle_page_fault:
-11: ld r4,_DAR(r1)
+11: andis. r0,r4,DSISR_DABRMATCH@h
+ bne- handle_dabr_fault
+ ld r4,_DAR(r1)
ld r5,_DSISR(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
bl do_page_fault
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index e785cc9..fe97cbe 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -511,6 +511,15 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc);
#endif
+ /*
+ * jprobes use jprobe_return() which skips the normal return
+ * path of the function, and this messes up the accounting of the
+ * function graph tracer.
+ *
+ * Pause function graph tracing while performing the jprobe function.
+ */
+ pause_graph_tracing();
+
return 1;
}
@@ -533,6 +542,8 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
* saved regs...
*/
memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
+ /* It's OK to start function graph tracing again */
+ unpause_graph_tracing();
preempt_enable_no_resched();
return 1;
}
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 094deb6..5c02984 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -2807,12 +2807,34 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
int r;
int srcu_idx;
+ unsigned long ebb_regs[3] = {}; /* shut up GCC */
if (!vcpu->arch.sane) {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return -EINVAL;
}
+ /*
+ * Don't allow entry with a suspended transaction, because
+ * the guest entry/exit code will lose it.
+ * If the guest has TM enabled, save away their TM-related SPRs
+ * (they will get restored by the TM unavailable interrupt).
+ */
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ if (cpu_has_feature(CPU_FTR_TM) && current->thread.regs &&
+ (current->thread.regs->msr & MSR_TM)) {
+ if (MSR_TM_ACTIVE(current->thread.regs->msr)) {
+ run->exit_reason = KVM_EXIT_FAIL_ENTRY;
+ run->fail_entry.hardware_entry_failure_reason = 0;
+ return -EINVAL;
+ }
+ current->thread.tm_tfhar = mfspr(SPRN_TFHAR);
+ current->thread.tm_tfiar = mfspr(SPRN_TFIAR);
+ current->thread.tm_texasr = mfspr(SPRN_TEXASR);
+ current->thread.regs->msr &= ~MSR_TM;
+ }
+#endif
+
kvmppc_core_prepare_to_enter(vcpu);
/* No need to go into the guest when all we'll do is come back out */
@@ -2834,6 +2856,13 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
flush_all_to_thread(current);
+ /* Save userspace EBB register values */
+ if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
+ ebb_regs[0] = mfspr(SPRN_EBBHR);
+ ebb_regs[1] = mfspr(SPRN_EBBRR);
+ ebb_regs[2] = mfspr(SPRN_BESCR);
+ }
+
vcpu->arch.wqp = &vcpu->arch.vcore->wq;
vcpu->arch.pgdir = current->mm->pgd;
vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
@@ -2856,6 +2885,13 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
r = kvmppc_xics_rm_complete(vcpu, 0);
} while (is_kvmppc_resume_guest(r));
+ /* Restore userspace EBB register values */
+ if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
+ mtspr(SPRN_EBBHR, ebb_regs[0]);
+ mtspr(SPRN_EBBRR, ebb_regs[1]);
+ mtspr(SPRN_BESCR, ebb_regs[2]);
+ }
+
out:
vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
atomic_dec(&vcpu->kvm->arch.vcpus_running);
diff --git a/arch/powerpc/mm/hugetlbpage-radix.c b/arch/powerpc/mm/hugetlbpage-radix.c
index 35254a6..a2b2d97 100644
--- a/arch/powerpc/mm/hugetlbpage-radix.c
+++ b/arch/powerpc/mm/hugetlbpage-radix.c
@@ -65,7 +65,7 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
addr = ALIGN(addr, huge_page_size(h));
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
/*
diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
index 2f1e443..5bc2845 100644
--- a/arch/powerpc/mm/mmap.c
+++ b/arch/powerpc/mm/mmap.c
@@ -106,7 +106,7 @@ radix__arch_get_unmapped_area(struct file *filp, unsigned long addr,
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
@@ -142,7 +142,7 @@ radix__arch_get_unmapped_area_topdown(struct file *filp,
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index 2b27458..c4d5c9c 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -105,7 +105,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
if ((mm->task_size - len) < addr)
return 0;
vma = find_vma(mm, addr);
- return (!vma || (addr + len) <= vma->vm_start);
+ return (!vma || (addr + len) <= vm_start_gap(vma));
}
static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
diff --git a/arch/powerpc/perf/perf_regs.c b/arch/powerpc/perf/perf_regs.c
index d24a8a3..28ae8bd 100644
--- a/arch/powerpc/perf/perf_regs.c
+++ b/arch/powerpc/perf/perf_regs.c
@@ -100,5 +100,6 @@ void perf_get_regs_user(struct perf_regs *regs_user,
struct pt_regs *regs_user_copy)
{
regs_user->regs = task_pt_regs(current);
- regs_user->abi = perf_reg_abi(current);
+ regs_user->abi = (regs_user->regs) ? perf_reg_abi(current) :
+ PERF_SAMPLE_REGS_ABI_NONE;
}
diff --git a/arch/powerpc/sysdev/xics/icp-opal.c b/arch/powerpc/sysdev/xics/icp-opal.c
index 32c46b4..b53f80f 100644
--- a/arch/powerpc/sysdev/xics/icp-opal.c
+++ b/arch/powerpc/sysdev/xics/icp-opal.c
@@ -130,14 +130,16 @@ static void icp_opal_cause_ipi(int cpu, unsigned long data)
{
int hw_cpu = get_hard_smp_processor_id(cpu);
+ kvmppc_set_host_ipi(cpu, 1);
opal_int_set_mfrr(hw_cpu, IPI_PRIORITY);
}
static irqreturn_t icp_opal_ipi_action(int irq, void *dev_id)
{
- int hw_cpu = hard_smp_processor_id();
+ int cpu = smp_processor_id();
- opal_int_set_mfrr(hw_cpu, 0xff);
+ kvmppc_set_host_ipi(cpu, 0);
+ opal_int_set_mfrr(get_hard_smp_processor_id(cpu), 0xff);
return smp_ipi_demux();
}
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index c438168..3bc2825 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -240,12 +240,17 @@
lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
.Lsie_done:
# some program checks are suppressing. C code (e.g. do_protection_exception)
-# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other
-# instructions between sie64a and .Lsie_done should not cause program
-# interrupts. So lets use a nop (47 00 00 00) as a landing pad.
+# will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There
+# are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable.
+# Other instructions between sie64a and .Lsie_done should not cause program
+# interrupts. So lets use 3 nops as a landing pad for all possible rewinds.
# See also .Lcleanup_sie
-.Lrewind_pad:
- nop 0
+.Lrewind_pad6:
+ nopr 7
+.Lrewind_pad4:
+ nopr 7
+.Lrewind_pad2:
+ nopr 7
.globl sie_exit
sie_exit:
lg %r14,__SF_EMPTY+8(%r15) # load guest register save area
@@ -258,7 +263,9 @@
stg %r14,__SF_EMPTY+16(%r15) # set exit reason code
j sie_exit
- EX_TABLE(.Lrewind_pad,.Lsie_fault)
+ EX_TABLE(.Lrewind_pad6,.Lsie_fault)
+ EX_TABLE(.Lrewind_pad4,.Lsie_fault)
+ EX_TABLE(.Lrewind_pad2,.Lsie_fault)
EX_TABLE(sie_exit,.Lsie_fault)
EXPORT_SYMBOL(sie64a)
EXPORT_SYMBOL(sie_exit)
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
index 4aa8a7e..f5d7984 100644
--- a/arch/s390/kvm/gaccess.c
+++ b/arch/s390/kvm/gaccess.c
@@ -972,11 +972,12 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
ptr = asce.origin * 4096;
if (asce.r) {
*fake = 1;
+ ptr = 0;
asce.dt = ASCE_TYPE_REGION1;
}
switch (asce.dt) {
case ASCE_TYPE_REGION1:
- if (vaddr.rfx01 > asce.tl && !asce.r)
+ if (vaddr.rfx01 > asce.tl && !*fake)
return PGM_REGION_FIRST_TRANS;
break;
case ASCE_TYPE_REGION2:
@@ -1004,8 +1005,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
union region1_table_entry rfte;
if (*fake) {
- /* offset in 16EB guest memory block */
- ptr = ptr + ((unsigned long) vaddr.rsx << 53UL);
+ ptr += (unsigned long) vaddr.rfx << 53;
rfte.val = ptr;
goto shadow_r2t;
}
@@ -1031,8 +1031,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
union region2_table_entry rste;
if (*fake) {
- /* offset in 8PB guest memory block */
- ptr = ptr + ((unsigned long) vaddr.rtx << 42UL);
+ ptr += (unsigned long) vaddr.rsx << 42;
rste.val = ptr;
goto shadow_r3t;
}
@@ -1059,8 +1058,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
union region3_table_entry rtte;
if (*fake) {
- /* offset in 4TB guest memory block */
- ptr = ptr + ((unsigned long) vaddr.sx << 31UL);
+ ptr += (unsigned long) vaddr.rtx << 31;
rtte.val = ptr;
goto shadow_sgt;
}
@@ -1096,8 +1094,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
union segment_table_entry ste;
if (*fake) {
- /* offset in 2G guest memory block */
- ptr = ptr + ((unsigned long) vaddr.sx << 20UL);
+ ptr += (unsigned long) vaddr.sx << 20;
ste.val = ptr;
goto shadow_pgt;
}
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index eb9df28..812368f 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -98,7 +98,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
@@ -136,7 +136,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
index 6777177..7df7d59 100644
--- a/arch/sh/mm/mmap.c
+++ b/arch/sh/mm/mmap.c
@@ -63,7 +63,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
@@ -113,7 +113,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index fe8b8ee..02e05e2 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -118,7 +118,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
vma = find_vma(mm, addr);
if (task_size - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
@@ -181,7 +181,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
vma = find_vma(mm, addr);
if (task_size - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index 4094a51..496fa92 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -85,7 +85,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
void bad_trap(struct pt_regs *regs, long lvl)
{
- char buffer[32];
+ char buffer[36];
siginfo_t info;
if (notify_die(DIE_TRAP, "bad trap", regs,
@@ -116,7 +116,7 @@ void bad_trap(struct pt_regs *regs, long lvl)
void bad_trap_tl1(struct pt_regs *regs, long lvl)
{
- char buffer[32];
+ char buffer[36];
if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
0, lvl, SIGTRAP) == NOTIFY_STOP)
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
index 988acc8..58cde8d 100644
--- a/arch/sparc/mm/hugetlbpage.c
+++ b/arch/sparc/mm/hugetlbpage.c
@@ -116,7 +116,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
addr = ALIGN(addr, HPAGE_SIZE);
vma = find_vma(mm, addr);
if (task_size - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
if (mm->get_unmapped_area == arch_get_unmapped_area)
diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
index 77ceaa3..67508b2 100644
--- a/arch/tile/mm/hugetlbpage.c
+++ b/arch/tile/mm/hugetlbpage.c
@@ -232,7 +232,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
addr = ALIGN(addr, huge_page_size(h));
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
if (current->mm->get_unmapped_area == arch_get_unmapped_area)
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index cb85222..3bdb917 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -431,11 +431,11 @@ static __initconst const u64 skl_hw_cache_event_ids
[ C(DTLB) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */
- [ C(RESULT_MISS) ] = 0x608, /* DTLB_LOAD_MISSES.WALK_COMPLETED */
+ [ C(RESULT_MISS) ] = 0xe08, /* DTLB_LOAD_MISSES.WALK_COMPLETED */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */
- [ C(RESULT_MISS) ] = 0x649, /* DTLB_STORE_MISSES.WALK_COMPLETED */
+ [ C(RESULT_MISS) ] = 0xe49, /* DTLB_STORE_MISSES.WALK_COMPLETED */
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = 0x0,
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index a55ed63..1119414 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -140,7 +140,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (end - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
@@ -183,7 +183,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
index 2ae8584..fe342e8 100644
--- a/arch/x86/mm/hugetlbpage.c
+++ b/arch/x86/mm/hugetlbpage.c
@@ -144,7 +144,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
addr = ALIGN(addr, huge_page_size(h));
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
if (mm->get_unmapped_area == arch_get_unmapped_area)
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
index 6b7ce62..aca6295 100644
--- a/arch/x86/mm/numa_32.c
+++ b/arch/x86/mm/numa_32.c
@@ -100,5 +100,6 @@ void __init initmem_init(void)
printk(KERN_DEBUG "High memory starts at vaddr %08lx\n",
(ulong) pfn_to_kaddr(highstart_pfn));
+ __vmalloc_start_set = true;
setup_bootmem_allocator();
}
diff --git a/arch/xtensa/include/asm/irq.h b/arch/xtensa/include/asm/irq.h
index f71f88e..19707db 100644
--- a/arch/xtensa/include/asm/irq.h
+++ b/arch/xtensa/include/asm/irq.h
@@ -29,7 +29,8 @@ static inline void variant_irq_disable(unsigned int irq) { }
# define PLATFORM_NR_IRQS 0
#endif
#define XTENSA_NR_IRQS XCHAL_NUM_INTERRUPTS
-#define NR_IRQS (XTENSA_NR_IRQS + VARIANT_NR_IRQS + PLATFORM_NR_IRQS)
+#define NR_IRQS (XTENSA_NR_IRQS + VARIANT_NR_IRQS + PLATFORM_NR_IRQS + 1)
+#define XTENSA_PIC_LINUX_IRQ(hwirq) ((hwirq) + 1)
#if VARIANT_NR_IRQS == 0
static inline void variant_init_irq(void) { }
diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c
index 4ac3d23..4416944 100644
--- a/arch/xtensa/kernel/irq.c
+++ b/arch/xtensa/kernel/irq.c
@@ -34,11 +34,6 @@ asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs)
{
int irq = irq_find_mapping(NULL, hwirq);
- if (hwirq >= NR_IRQS) {
- printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
- __func__, hwirq);
- }
-
#ifdef CONFIG_DEBUG_STACKOVERFLOW
/* Debugging check for stack overflow: is there less than 1KB free? */
{
diff --git a/arch/xtensa/kernel/syscall.c b/arch/xtensa/kernel/syscall.c
index 83cf496..3aaaae1 100644
--- a/arch/xtensa/kernel/syscall.c
+++ b/arch/xtensa/kernel/syscall.c
@@ -87,7 +87,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
/* At this point: (!vmm || addr < vmm->vm_end). */
if (TASK_SIZE - len < addr)
return -ENOMEM;
- if (!vmm || addr + len <= vmm->vm_start)
+ if (!vmm || addr + len <= vm_start_gap(vmm))
return addr;
addr = vmm->vm_end;
if (flags & MAP_SHARED)
diff --git a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
index dbeea2b..1fda7e2 100644
--- a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
+++ b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
@@ -24,16 +24,18 @@
/* Interrupt configuration. */
-#define PLATFORM_NR_IRQS 10
+#define PLATFORM_NR_IRQS 0
/* Default assignment of LX60 devices to external interrupts. */
#ifdef CONFIG_XTENSA_MX
#define DUART16552_INTNUM XCHAL_EXTINT3_NUM
#define OETH_IRQ XCHAL_EXTINT4_NUM
+#define C67X00_IRQ XCHAL_EXTINT8_NUM
#else
#define DUART16552_INTNUM XCHAL_EXTINT0_NUM
#define OETH_IRQ XCHAL_EXTINT1_NUM
+#define C67X00_IRQ XCHAL_EXTINT5_NUM
#endif
/*
@@ -63,5 +65,5 @@
#define C67X00_PADDR (XCHAL_KIO_PADDR + 0x0D0D0000)
#define C67X00_SIZE 0x10
-#define C67X00_IRQ 5
+
#endif /* __XTENSA_XTAVNET_HARDWARE_H */
diff --git a/arch/xtensa/platforms/xtfpga/setup.c b/arch/xtensa/platforms/xtfpga/setup.c
index 779be72..42285f3 100644
--- a/arch/xtensa/platforms/xtfpga/setup.c
+++ b/arch/xtensa/platforms/xtfpga/setup.c
@@ -175,8 +175,8 @@ static struct resource ethoc_res[] = {
.flags = IORESOURCE_MEM,
},
[2] = { /* IRQ number */
- .start = OETH_IRQ,
- .end = OETH_IRQ,
+ .start = XTENSA_PIC_LINUX_IRQ(OETH_IRQ),
+ .end = XTENSA_PIC_LINUX_IRQ(OETH_IRQ),
.flags = IORESOURCE_IRQ,
},
};
@@ -213,8 +213,8 @@ static struct resource c67x00_res[] = {
.flags = IORESOURCE_MEM,
},
[1] = { /* IRQ number */
- .start = C67X00_IRQ,
- .end = C67X00_IRQ,
+ .start = XTENSA_PIC_LINUX_IRQ(C67X00_IRQ),
+ .end = XTENSA_PIC_LINUX_IRQ(C67X00_IRQ),
.flags = IORESOURCE_IRQ,
},
};
@@ -247,7 +247,7 @@ static struct resource serial_resource = {
static struct plat_serial8250_port serial_platform_data[] = {
[0] = {
.mapbase = DUART16552_PADDR,
- .irq = DUART16552_INTNUM,
+ .irq = XTENSA_PIC_LINUX_IRQ(DUART16552_INTNUM),
.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST |
UPF_IOREMAP,
.iotype = XCHAL_HAVE_BE ? UPIO_MEM32BE : UPIO_MEM32,
diff --git a/block/partitions/msdos.c b/block/partitions/msdos.c
index 93e7c1b..5610cd5 100644
--- a/block/partitions/msdos.c
+++ b/block/partitions/msdos.c
@@ -300,6 +300,8 @@ static void parse_bsd(struct parsed_partitions *state,
continue;
bsd_start = le32_to_cpu(p->p_offset);
bsd_size = le32_to_cpu(p->p_size);
+ if (memcmp(flavour, "bsd\0", 4) == 0)
+ bsd_start += offset;
if (offset == bsd_start && size == bsd_size)
/* full parent partition, we have it already */
continue;
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 23f3b95..147d2e3 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -889,13 +889,13 @@ int __pm_runtime_idle(struct device *dev, int rpmflags)
unsigned long flags;
int retval;
- might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
-
if (rpmflags & RPM_GET_PUT) {
if (!atomic_dec_and_test(&dev->power.usage_count))
return 0;
}
+ might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
+
spin_lock_irqsave(&dev->power.lock, flags);
retval = rpm_idle(dev, rpmflags);
spin_unlock_irqrestore(&dev->power.lock, flags);
@@ -921,13 +921,13 @@ int __pm_runtime_suspend(struct device *dev, int rpmflags)
unsigned long flags;
int retval;
- might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
-
if (rpmflags & RPM_GET_PUT) {
if (!atomic_dec_and_test(&dev->power.usage_count))
return 0;
}
+ might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
+
spin_lock_irqsave(&dev->power.lock, flags);
retval = rpm_suspend(dev, rpmflags);
spin_unlock_irqrestore(&dev->power.lock, flags);
@@ -952,7 +952,8 @@ int __pm_runtime_resume(struct device *dev, int rpmflags)
unsigned long flags;
int retval;
- might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
+ might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe &&
+ dev->power.runtime_status != RPM_ACTIVE);
if (rpmflags & RPM_GET_PUT)
atomic_inc(&dev->power.usage_count);
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 4a80ee7..c42202d 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -1436,34 +1436,35 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
static void make_response(struct xen_blkif_ring *ring, u64 id,
unsigned short op, int st)
{
- struct blkif_response resp;
+ struct blkif_response *resp;
unsigned long flags;
union blkif_back_rings *blk_rings;
int notify;
- resp.id = id;
- resp.operation = op;
- resp.status = st;
-
spin_lock_irqsave(&ring->blk_ring_lock, flags);
blk_rings = &ring->blk_rings;
/* Place on the response ring for the relevant domain. */
switch (ring->blkif->blk_protocol) {
case BLKIF_PROTOCOL_NATIVE:
- memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt),
- &resp, sizeof(resp));
+ resp = RING_GET_RESPONSE(&blk_rings->native,
+ blk_rings->native.rsp_prod_pvt);
break;
case BLKIF_PROTOCOL_X86_32:
- memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt),
- &resp, sizeof(resp));
+ resp = RING_GET_RESPONSE(&blk_rings->x86_32,
+ blk_rings->x86_32.rsp_prod_pvt);
break;
case BLKIF_PROTOCOL_X86_64:
- memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt),
- &resp, sizeof(resp));
+ resp = RING_GET_RESPONSE(&blk_rings->x86_64,
+ blk_rings->x86_64.rsp_prod_pvt);
break;
default:
BUG();
}
+
+ resp->id = id;
+ resp->operation = op;
+ resp->status = st;
+
blk_rings->common.rsp_prod_pvt++;
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
spin_unlock_irqrestore(&ring->blk_ring_lock, flags);
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index dea61f6..ecb35fe 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -75,9 +75,8 @@ extern unsigned int xenblk_max_queues;
struct blkif_common_request {
char dummy;
};
-struct blkif_common_response {
- char dummy;
-};
+
+/* i386 protocol version */
struct blkif_x86_32_request_rw {
uint8_t nr_segments; /* number of segments */
@@ -129,14 +128,6 @@ struct blkif_x86_32_request {
} u;
} __attribute__((__packed__));
-/* i386 protocol version */
-#pragma pack(push, 4)
-struct blkif_x86_32_response {
- uint64_t id; /* copied from request */
- uint8_t operation; /* copied from request */
- int16_t status; /* BLKIF_RSP_??? */
-};
-#pragma pack(pop)
/* x86_64 protocol version */
struct blkif_x86_64_request_rw {
@@ -193,18 +184,12 @@ struct blkif_x86_64_request {
} u;
} __attribute__((__packed__));
-struct blkif_x86_64_response {
- uint64_t __attribute__((__aligned__(8))) id;
- uint8_t operation; /* copied from request */
- int16_t status; /* BLKIF_RSP_??? */
-};
-
DEFINE_RING_TYPES(blkif_common, struct blkif_common_request,
- struct blkif_common_response);
+ struct blkif_response);
DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request,
- struct blkif_x86_32_response);
+ struct blkif_response __packed);
DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request,
- struct blkif_x86_64_response);
+ struct blkif_response);
union blkif_back_rings {
struct blkif_back_ring native;
@@ -281,6 +266,7 @@ struct xen_blkif_ring {
wait_queue_head_t wq;
atomic_t inflight;
+ bool active;
/* One thread per blkif ring. */
struct task_struct *xenblkd;
unsigned int waiting_reqs;
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 3cc6d1d..9b69fe4 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -159,7 +159,7 @@ static int xen_blkif_alloc_rings(struct xen_blkif *blkif)
init_waitqueue_head(&ring->shutdown_wq);
ring->blkif = blkif;
ring->st_print = jiffies;
- xen_blkif_get(blkif);
+ ring->active = true;
}
return 0;
@@ -249,6 +249,9 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
struct xen_blkif_ring *ring = &blkif->rings[r];
unsigned int i = 0;
+ if (!ring->active)
+ continue;
+
if (ring->xenblkd) {
kthread_stop(ring->xenblkd);
wake_up(&ring->shutdown_wq);
@@ -296,7 +299,7 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
BUG_ON(ring->free_pages_num != 0);
BUG_ON(ring->persistent_gnt_c != 0);
WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));
- xen_blkif_put(blkif);
+ ring->active = false;
}
blkif->nr_ring_pages = 0;
/*
diff --git a/drivers/bluetooth/btfm_slim.c b/drivers/bluetooth/btfm_slim.c
index dc9bb0b..f50bf6f 100644
--- a/drivers/bluetooth/btfm_slim.c
+++ b/drivers/bluetooth/btfm_slim.c
@@ -127,7 +127,7 @@ int btfm_slim_enable_ch(struct btfmslim *btfmslim, struct btfmslim_ch *ch,
if (!btfmslim || !ch)
return -EINVAL;
- BTFMSLIM_DBG("port:%d", ch->port);
+ BTFMSLIM_DBG("port: %d ch: %d", ch->port, ch->ch);
/* Define the channel with below parameters */
prop.prot = SLIM_AUTO_ISO;
diff --git a/drivers/bluetooth/btfm_slim.h b/drivers/bluetooth/btfm_slim.h
index 00d46a5..161be78 100644
--- a/drivers/bluetooth/btfm_slim.h
+++ b/drivers/bluetooth/btfm_slim.h
@@ -13,7 +13,7 @@
#define BTFM_SLIM_H
#include <linux/slimbus/slimbus.h>
-#define BTFMSLIM_DBG(fmt, arg...) pr_debug("%s: " fmt "\n", __func__, ## arg)
+#define BTFMSLIM_DBG(fmt, arg...) pr_debug(fmt "\n", ## arg)
#define BTFMSLIM_INFO(fmt, arg...) pr_info("%s: " fmt "\n", __func__, ## arg)
#define BTFMSLIM_ERR(fmt, arg...) pr_err("%s: " fmt "\n", __func__, ## arg)
diff --git a/drivers/bluetooth/btfm_slim_codec.c b/drivers/bluetooth/btfm_slim_codec.c
index 86760cd..73a789c 100644
--- a/drivers/bluetooth/btfm_slim_codec.c
+++ b/drivers/bluetooth/btfm_slim_codec.c
@@ -54,8 +54,8 @@ static int btfm_slim_dai_startup(struct snd_pcm_substream *substream,
int ret;
struct btfmslim *btfmslim = dai->dev->platform_data;
- BTFMSLIM_DBG("substream = %s stream = %d",
- substream->name, substream->stream);
+ BTFMSLIM_DBG("substream = %s stream = %d dai name = %s",
+ substream->name, substream->stream, dai->name);
ret = btfm_slim_hw_init(btfmslim);
return ret;
}
@@ -65,8 +65,8 @@ static void btfm_slim_dai_shutdown(struct snd_pcm_substream *substream,
{
struct btfmslim *btfmslim = dai->dev->platform_data;
- BTFMSLIM_DBG("substream = %s stream = %d",
- substream->name, substream->stream);
+ BTFMSLIM_DBG("substream = %s stream = %d dai name = %s",
+ substream->name, substream->stream, dai->name);
btfm_slim_hw_deinit(btfmslim);
}
@@ -74,7 +74,7 @@ static int btfm_slim_dai_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
- BTFMSLIM_DBG("dai_name = %s DAI-ID %x rate %d num_ch %d",
+ BTFMSLIM_DBG("dai name = %s DAI-ID %x rate %d num_ch %d",
dai->name, dai->id, params_rate(params),
params_channels(params));
@@ -89,7 +89,7 @@ int btfm_slim_dai_prepare(struct snd_pcm_substream *substream,
struct btfmslim_ch *ch;
uint8_t rxport, grp = false, nchan = 1;
- BTFMSLIM_DBG("dai->name:%s, dai->id: %d, dai->rate: %d", dai->name,
+ BTFMSLIM_DBG("dai name: %s, dai->id: %d, dai->rate: %d", dai->name,
dai->id, dai->rate);
switch (dai->id) {
@@ -137,7 +137,7 @@ int btfm_slim_dai_hw_free(struct snd_pcm_substream *substream,
struct btfmslim_ch *ch;
uint8_t rxport, grp = false, nchan = 1;
- BTFMSLIM_DBG("dai->name:%s, dai->id: %d, dai->rate: %d", dai->name,
+ BTFMSLIM_DBG("dai name: %s, dai->id: %d, dai->rate: %d", dai->name,
dai->id, dai->rate);
switch (dai->id) {
@@ -387,7 +387,7 @@ static struct snd_soc_dai_driver btfmslim_dai[] = {
static struct snd_soc_codec_driver btfmslim_codec = {
.probe = btfm_slim_codec_probe,
.remove = btfm_slim_codec_remove,
- .read = btfm_slim_codec_read,
+ .read = btfm_slim_codec_read,
.write = btfm_slim_codec_write,
};
diff --git a/drivers/bluetooth/btfm_slim_wcn3990.c b/drivers/bluetooth/btfm_slim_wcn3990.c
index c2d5b7b..72e28da 100644
--- a/drivers/bluetooth/btfm_slim_wcn3990.c
+++ b/drivers/bluetooth/btfm_slim_wcn3990.c
@@ -76,7 +76,7 @@ int btfm_slim_chrk_enable_port(struct btfmslim *btfmslim, uint8_t port_num,
uint8_t reg_val = 0;
uint16_t reg;
- BTFMSLIM_DBG("enable(%d)", enable);
+ BTFMSLIM_DBG("port(%d) enable(%d)", port_num, enable);
if (rxport) {
/* Port enable */
reg = CHRK_SB_PGD_PORT_RX_CFGN(port_num - 0x10);
diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
index 946025a..84eca4f 100644
--- a/drivers/char/tpm/tpm_ibmvtpm.c
+++ b/drivers/char/tpm/tpm_ibmvtpm.c
@@ -295,6 +295,8 @@ static int tpm_ibmvtpm_remove(struct vio_dev *vdev)
}
kfree(ibmvtpm);
+ /* For tpm_ibmvtpm_get_desired_dma */
+ dev_set_drvdata(&vdev->dev, NULL);
return 0;
}
@@ -309,13 +311,16 @@ static int tpm_ibmvtpm_remove(struct vio_dev *vdev)
static unsigned long tpm_ibmvtpm_get_desired_dma(struct vio_dev *vdev)
{
struct tpm_chip *chip = dev_get_drvdata(&vdev->dev);
- struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
+ struct ibmvtpm_dev *ibmvtpm;
- /* ibmvtpm initializes at probe time, so the data we are
- * asking for may not be set yet. Estimate that 4K required
- * for TCE-mapped buffer in addition to CRQ.
- */
- if (!ibmvtpm)
+ /*
+ * ibmvtpm initializes at probe time, so the data we are
+ * asking for may not be set yet. Estimate that 4K required
+ * for TCE-mapped buffer in addition to CRQ.
+ */
+ if (chip)
+ ibmvtpm = dev_get_drvdata(&chip->dev);
+ else
return CRQ_RES_BUF_SIZE + PAGE_SIZE;
return CRQ_RES_BUF_SIZE + ibmvtpm->rtce_size;
diff --git a/drivers/clk/qcom/clk-cpu-osm.c b/drivers/clk/qcom/clk-cpu-osm.c
index 78e0ae5..f12f03d 100644
--- a/drivers/clk/qcom/clk-cpu-osm.c
+++ b/drivers/clk/qcom/clk-cpu-osm.c
@@ -573,55 +573,11 @@ static int clk_osm_enable(struct clk_hw *hw)
}
const struct clk_ops clk_ops_cpu_osm = {
- .enable = clk_osm_enable,
.round_rate = clk_osm_round_rate,
.list_rate = clk_osm_list_rate,
.debug_init = clk_debug_measure_add,
};
-static struct clk_ops clk_ops_core;
-
-static int cpu_clk_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
-{
- struct clk_osm *cpuclk = to_clk_osm(hw);
- struct clk_hw *p_hw = clk_hw_get_parent(hw);
- struct clk_osm *parent = to_clk_osm(p_hw);
- int index = 0;
- unsigned long r_rate;
-
- if (!cpuclk || !parent)
- return -EINVAL;
-
- r_rate = clk_osm_round_rate(p_hw, rate, NULL);
-
- if (rate != r_rate) {
- pr_err("invalid requested rate=%ld\n", rate);
- return -EINVAL;
- }
-
- /* Convert rate to table index */
- index = clk_osm_search_table(parent->osm_table,
- parent->num_entries, r_rate);
- if (index < 0) {
- pr_err("cannot set %s to %lu\n", clk_hw_get_name(hw), rate);
- return -EINVAL;
- }
- pr_debug("rate: %lu --> index %d\n", rate, index);
- /*
- * Choose index and send request to OSM hardware.
- * TODO: Program INACTIVE_OS_REQUEST if needed.
- */
- clk_osm_write_reg(parent, index,
- DCVS_PERF_STATE_DESIRED_REG(cpuclk->core_num),
- OSM_BASE);
-
- /* Make sure the write goes through before proceeding */
- clk_osm_mb(parent, OSM_BASE);
-
- return 0;
-}
-
static int l3_clk_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
@@ -657,38 +613,6 @@ static int l3_clk_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
-static long cpu_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
-{
- struct clk_hw *parent_hw = clk_hw_get_parent(hw);
-
- if (!parent_hw)
- return -EINVAL;
-
- return clk_hw_round_rate(parent_hw, rate);
-}
-
-static unsigned long cpu_clk_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- struct clk_osm *cpuclk = to_clk_osm(hw);
- struct clk_hw *p_hw = clk_hw_get_parent(hw);
- struct clk_osm *parent = to_clk_osm(p_hw);
- int index = 0;
-
- if (!cpuclk || !parent)
- return -EINVAL;
-
- index = clk_osm_read_reg(parent,
- DCVS_PERF_STATE_DESIRED_REG(cpuclk->core_num));
-
- pr_debug("%s: Index %d, freq %ld\n", __func__, index,
- parent->osm_table[index].frequency);
-
- /* Convert index to frequency */
- return parent->osm_table[index].frequency;
-}
-
static unsigned long l3_clk_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
@@ -759,7 +683,7 @@ static struct clk_osm cpu0_pwrcl_clk = {
.name = "cpu0_pwrcl_clk",
.parent_names = (const char *[]){ "pwrcl_clk" },
.num_parents = 1,
- .ops = &clk_ops_core,
+ .ops = &clk_dummy_ops,
},
};
@@ -771,8 +695,7 @@ static struct clk_osm cpu1_pwrcl_clk = {
.name = "cpu1_pwrcl_clk",
.parent_names = (const char *[]){ "pwrcl_clk" },
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_ops_core,
+ .ops = &clk_dummy_ops,
},
};
@@ -784,8 +707,7 @@ static struct clk_osm cpu2_pwrcl_clk = {
.name = "cpu2_pwrcl_clk",
.parent_names = (const char *[]){ "pwrcl_clk" },
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_ops_core,
+ .ops = &clk_dummy_ops,
},
};
@@ -797,8 +719,7 @@ static struct clk_osm cpu3_pwrcl_clk = {
.name = "cpu3_pwrcl_clk",
.parent_names = (const char *[]){ "pwrcl_clk" },
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_ops_core,
+ .ops = &clk_dummy_ops,
},
};
@@ -816,7 +737,7 @@ static struct clk_osm cpu4_perfcl_clk = {
.name = "cpu4_perfcl_clk",
.parent_names = (const char *[]){ "perfcl_clk" },
.num_parents = 1,
- .ops = &clk_ops_core,
+ .ops = &clk_dummy_ops,
},
};
@@ -828,8 +749,7 @@ static struct clk_osm cpu5_perfcl_clk = {
.name = "cpu5_perfcl_clk",
.parent_names = (const char *[]){ "perfcl_clk" },
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_ops_core,
+ .ops = &clk_dummy_ops,
},
};
@@ -841,8 +761,7 @@ static struct clk_osm cpu6_perfcl_clk = {
.name = "cpu6_perfcl_clk",
.parent_names = (const char *[]){ "perfcl_clk" },
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_ops_core,
+ .ops = &clk_dummy_ops,
},
};
@@ -854,8 +773,7 @@ static struct clk_osm cpu7_perfcl_clk = {
.name = "cpu7_perfcl_clk",
.parent_names = (const char *[]){ "perfcl_clk" },
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_ops_core,
+ .ops = &clk_dummy_ops,
},
};
@@ -3348,11 +3266,6 @@ static int clk_cpu_osm_driver_probe(struct platform_device *pdev)
clk_osm_write_reg(&perfcl_clk, val, CORE_DCVS_CTRL, OSM_BASE);
}
- clk_ops_core = clk_dummy_ops;
- clk_ops_core.set_rate = cpu_clk_set_rate;
- clk_ops_core.round_rate = cpu_clk_round_rate;
- clk_ops_core.recalc_rate = cpu_clk_recalc_rate;
-
rc = clk_osm_acd_init(&l3_clk);
if (rc) {
pr_err("failed to initialize ACD for L3, rc=%d\n", rc);
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
index 8ca07fe..0cca360 100644
--- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
+++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
@@ -556,7 +556,7 @@ static SUNXI_CCU_M_WITH_MUX_GATE(lcd0_ch1_clk, "lcd0-ch1", lcd_ch1_parents,
0x12c, 0, 4, 24, 3, BIT(31),
CLK_SET_RATE_PARENT);
static SUNXI_CCU_M_WITH_MUX_GATE(lcd1_ch1_clk, "lcd1-ch1", lcd_ch1_parents,
- 0x12c, 0, 4, 24, 3, BIT(31),
+ 0x130, 0, 4, 24, 3, BIT(31),
CLK_SET_RATE_PARENT);
static const char * const csi_sclk_parents[] = { "pll-video0", "pll-video1",
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index e8e16a5..0fe2518 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -169,8 +169,8 @@ static ssize_t store_down_threshold(struct gov_attr_set *attr_set,
int ret;
ret = sscanf(buf, "%u", &input);
- /* cannot be lower than 11 otherwise freq will not fall */
- if (ret != 1 || input < 11 || input > 100 ||
+ /* cannot be lower than 1 otherwise freq will not fall */
+ if (ret != 1 || input < 1 || input > 100 ||
input >= dbs_data->up_threshold)
return -EINVAL;
diff --git a/drivers/devfreq/governor_msm_adreno_tz.c b/drivers/devfreq/governor_msm_adreno_tz.c
index e8bfff2..3c50c4e 100644
--- a/drivers/devfreq/governor_msm_adreno_tz.c
+++ b/drivers/devfreq/governor_msm_adreno_tz.c
@@ -236,7 +236,7 @@ static int tz_init_ca(struct devfreq_msm_adreno_tz_data *priv)
{
unsigned int tz_ca_data[2];
struct scm_desc desc = {0};
- unsigned int *tz_buf;
+ u8 *tz_buf;
int ret;
/* Set data for TZ */
@@ -281,7 +281,7 @@ static int tz_init(struct devfreq_msm_adreno_tz_data *priv,
scm_is_call_available(SCM_SVC_DCVS, TZ_UPDATE_ID_64) &&
scm_is_call_available(SCM_SVC_DCVS, TZ_RESET_ID_64)) {
struct scm_desc desc = {0};
- unsigned int *tz_buf;
+ u8 *tz_buf;
if (!is_scm_armv8()) {
ret = scm_call(SCM_SVC_DCVS, TZ_INIT_ID_64,
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index e18dc59..6204cc3 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -251,8 +251,11 @@ static void bcm2835_dma_create_cb_set_length(
*/
/* have we filled in period_length yet? */
- if (*total_len + control_block->length < period_len)
+ if (*total_len + control_block->length < period_len) {
+ /* update number of bytes in this period so far */
+ *total_len += control_block->length;
return;
+ }
/* calculate the length that remains to reach period_length */
control_block->length = period_len - *total_len;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 8e6bf54..f8fdbd1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -693,6 +693,10 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev)
DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n",
adev->clock.default_dispclk / 100);
adev->clock.default_dispclk = 60000;
+ } else if (adev->clock.default_dispclk <= 60000) {
+ DRM_INFO("Changing default dispclk from %dMhz to 625Mhz\n",
+ adev->clock.default_dispclk / 100);
+ adev->clock.default_dispclk = 62500;
}
adev->clock.dp_extclk =
le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
index f7d236f..57fbde1 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
@@ -164,7 +164,7 @@ void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state)
struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private;
int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating);
- ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args;
+ ENABLE_DISP_POWER_GATING_PS_ALLOCATION args;
memset(&args, 0, sizeof(args));
@@ -177,7 +177,7 @@ void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state)
void amdgpu_atombios_crtc_powergate_init(struct amdgpu_device *adev)
{
int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating);
- ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args;
+ ENABLE_DISP_POWER_GATING_PS_ALLOCATION args;
memset(&args, 0, sizeof(args));
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 42448c7..db9b79a 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -1290,8 +1290,11 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
if (amdgpu_crtc->base.enabled && num_heads && mode) {
- active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
- line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
+ active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
+ (u32)mode->clock);
+ line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
+ (u32)mode->clock);
+ line_time = min(line_time, (u32)65535);
/* watermark for high clocks */
if (adev->pm.dpm_enabled) {
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 904dabd..36d5128 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -1257,8 +1257,11 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
if (amdgpu_crtc->base.enabled && num_heads && mode) {
- active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
- line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
+ active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
+ (u32)mode->clock);
+ line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
+ (u32)mode->clock);
+ line_time = min(line_time, (u32)65535);
/* watermark for high clocks */
if (adev->pm.dpm_enabled) {
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 6d02bdb..75689a2 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -1054,8 +1054,11 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
fixed20_12 a, b, c;
if (amdgpu_crtc->base.enabled && num_heads && mode) {
- active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
- line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
+ active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
+ (u32)mode->clock);
+ line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
+ (u32)mode->clock);
+ line_time = min(line_time, (u32)65535);
priority_a_cnt = 0;
priority_b_cnt = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index b1fb601..ba2321e 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -1211,8 +1211,11 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
if (amdgpu_crtc->base.enabled && num_heads && mode) {
- active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
- line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
+ active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
+ (u32)mode->clock);
+ line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
+ (u32)mode->clock);
+ line_time = min(line_time, (u32)65535);
/* watermark for high clocks */
if (adev->pm.dpm_enabled) {
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 908011d..7abda94 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -113,6 +113,7 @@ struct ast_private {
struct ttm_bo_kmap_obj cache_kmap;
int next_cursor;
bool support_wide_screen;
+ bool DisableP2A;
enum ast_tx_chip tx_chip_type;
u8 dp501_maxclk;
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index f75c642..533e762 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -124,6 +124,12 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
} else
*need_post = false;
+ /* Check P2A Access */
+ ast->DisableP2A = true;
+ data = ast_read32(ast, 0xf004);
+ if (data != 0xFFFFFFFF)
+ ast->DisableP2A = false;
+
/* Check if we support wide screen */
switch (ast->chip) {
case AST1180:
@@ -140,15 +146,17 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
ast->support_wide_screen = true;
else {
ast->support_wide_screen = false;
- /* Read SCU7c (silicon revision register) */
- ast_write32(ast, 0xf004, 0x1e6e0000);
- ast_write32(ast, 0xf000, 0x1);
- data = ast_read32(ast, 0x1207c);
- data &= 0x300;
- if (ast->chip == AST2300 && data == 0x0) /* ast1300 */
- ast->support_wide_screen = true;
- if (ast->chip == AST2400 && data == 0x100) /* ast1400 */
- ast->support_wide_screen = true;
+ if (ast->DisableP2A == false) {
+ /* Read SCU7c (silicon revision register) */
+ ast_write32(ast, 0xf004, 0x1e6e0000);
+ ast_write32(ast, 0xf000, 0x1);
+ data = ast_read32(ast, 0x1207c);
+ data &= 0x300;
+ if (ast->chip == AST2300 && data == 0x0) /* ast1300 */
+ ast->support_wide_screen = true;
+ if (ast->chip == AST2400 && data == 0x100) /* ast1400 */
+ ast->support_wide_screen = true;
+ }
}
break;
}
@@ -216,80 +224,81 @@ static int ast_get_dram_info(struct drm_device *dev)
uint32_t data, data2;
uint32_t denum, num, div, ref_pll;
- ast_write32(ast, 0xf004, 0x1e6e0000);
- ast_write32(ast, 0xf000, 0x1);
-
-
- ast_write32(ast, 0x10000, 0xfc600309);
-
- do {
- if (pci_channel_offline(dev->pdev))
- return -EIO;
- } while (ast_read32(ast, 0x10000) != 0x01);
- data = ast_read32(ast, 0x10004);
-
- if (data & 0x40)
+ if (ast->DisableP2A)
+ {
ast->dram_bus_width = 16;
+ ast->dram_type = AST_DRAM_1Gx16;
+ ast->mclk = 396;
+ }
else
- ast->dram_bus_width = 32;
+ {
+ ast_write32(ast, 0xf004, 0x1e6e0000);
+ ast_write32(ast, 0xf000, 0x1);
+ data = ast_read32(ast, 0x10004);
- if (ast->chip == AST2300 || ast->chip == AST2400) {
- switch (data & 0x03) {
- case 0:
- ast->dram_type = AST_DRAM_512Mx16;
- break;
- default:
- case 1:
- ast->dram_type = AST_DRAM_1Gx16;
+ if (data & 0x40)
+ ast->dram_bus_width = 16;
+ else
+ ast->dram_bus_width = 32;
+
+ if (ast->chip == AST2300 || ast->chip == AST2400) {
+ switch (data & 0x03) {
+ case 0:
+ ast->dram_type = AST_DRAM_512Mx16;
+ break;
+ default:
+ case 1:
+ ast->dram_type = AST_DRAM_1Gx16;
+ break;
+ case 2:
+ ast->dram_type = AST_DRAM_2Gx16;
+ break;
+ case 3:
+ ast->dram_type = AST_DRAM_4Gx16;
+ break;
+ }
+ } else {
+ switch (data & 0x0c) {
+ case 0:
+ case 4:
+ ast->dram_type = AST_DRAM_512Mx16;
+ break;
+ case 8:
+ if (data & 0x40)
+ ast->dram_type = AST_DRAM_1Gx16;
+ else
+ ast->dram_type = AST_DRAM_512Mx32;
+ break;
+ case 0xc:
+ ast->dram_type = AST_DRAM_1Gx32;
+ break;
+ }
+ }
+
+ data = ast_read32(ast, 0x10120);
+ data2 = ast_read32(ast, 0x10170);
+ if (data2 & 0x2000)
+ ref_pll = 14318;
+ else
+ ref_pll = 12000;
+
+ denum = data & 0x1f;
+ num = (data & 0x3fe0) >> 5;
+ data = (data & 0xc000) >> 14;
+ switch (data) {
+ case 3:
+ div = 0x4;
break;
case 2:
- ast->dram_type = AST_DRAM_2Gx16;
+ case 1:
+ div = 0x2;
break;
- case 3:
- ast->dram_type = AST_DRAM_4Gx16;
+ default:
+ div = 0x1;
break;
}
- } else {
- switch (data & 0x0c) {
- case 0:
- case 4:
- ast->dram_type = AST_DRAM_512Mx16;
- break;
- case 8:
- if (data & 0x40)
- ast->dram_type = AST_DRAM_1Gx16;
- else
- ast->dram_type = AST_DRAM_512Mx32;
- break;
- case 0xc:
- ast->dram_type = AST_DRAM_1Gx32;
- break;
- }
+ ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000);
}
-
- data = ast_read32(ast, 0x10120);
- data2 = ast_read32(ast, 0x10170);
- if (data2 & 0x2000)
- ref_pll = 14318;
- else
- ref_pll = 12000;
-
- denum = data & 0x1f;
- num = (data & 0x3fe0) >> 5;
- data = (data & 0xc000) >> 14;
- switch (data) {
- case 3:
- div = 0x4;
- break;
- case 2:
- case 1:
- div = 0x2;
- break;
- default:
- div = 0x1;
- break;
- }
- ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000);
return 0;
}
diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
index 30672a3d..270e8fb 100644
--- a/drivers/gpu/drm/ast/ast_post.c
+++ b/drivers/gpu/drm/ast/ast_post.c
@@ -375,12 +375,20 @@ void ast_post_gpu(struct drm_device *dev)
ast_enable_mmio(dev);
ast_set_def_ext_reg(dev);
- if (ast->chip == AST2300 || ast->chip == AST2400)
- ast_init_dram_2300(dev);
- else
- ast_init_dram_reg(dev);
+ if (ast->DisableP2A == false)
+ {
+ if (ast->chip == AST2300 || ast->chip == AST2400)
+ ast_init_dram_2300(dev);
+ else
+ ast_init_dram_reg(dev);
- ast_init_3rdtx(dev);
+ ast_init_3rdtx(dev);
+ }
+ else
+ {
+ if (ast->tx_chip_type != AST_TX_NONE)
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x80); /* Enable DVO */
+ }
}
/* AST 2300 DRAM settings */
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 2db7fb5..0e934a9 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -225,6 +225,7 @@ int drm_connector_init(struct drm_device *dev,
INIT_LIST_HEAD(&connector->probed_modes);
INIT_LIST_HEAD(&connector->modes);
+ mutex_init(&connector->mutex);
connector->edid_blob_ptr = NULL;
connector->status = connector_status_unknown;
@@ -359,6 +360,8 @@ void drm_connector_cleanup(struct drm_connector *connector)
connector->funcs->atomic_destroy_state(connector,
connector->state);
+ mutex_destroy(&connector->mutex);
+
memset(connector, 0, sizeof(*connector));
}
EXPORT_SYMBOL(drm_connector_cleanup);
@@ -374,14 +377,18 @@ EXPORT_SYMBOL(drm_connector_cleanup);
*/
int drm_connector_register(struct drm_connector *connector)
{
- int ret;
+ int ret = 0;
- if (connector->registered)
+ if (!connector->dev->registered)
return 0;
+ mutex_lock(&connector->mutex);
+ if (connector->registered)
+ goto unlock;
+
ret = drm_sysfs_connector_add(connector);
if (ret)
- return ret;
+ goto unlock;
ret = drm_debugfs_connector_add(connector);
if (ret) {
@@ -397,12 +404,14 @@ int drm_connector_register(struct drm_connector *connector)
drm_mode_object_register(connector->dev, &connector->base);
connector->registered = true;
- return 0;
+ goto unlock;
err_debugfs:
drm_debugfs_connector_remove(connector);
err_sysfs:
drm_sysfs_connector_remove(connector);
+unlock:
+ mutex_unlock(&connector->mutex);
return ret;
}
EXPORT_SYMBOL(drm_connector_register);
@@ -415,8 +424,11 @@ EXPORT_SYMBOL(drm_connector_register);
*/
void drm_connector_unregister(struct drm_connector *connector)
{
- if (!connector->registered)
+ mutex_lock(&connector->mutex);
+ if (!connector->registered) {
+ mutex_unlock(&connector->mutex);
return;
+ }
if (connector->funcs->early_unregister)
connector->funcs->early_unregister(connector);
@@ -425,6 +437,7 @@ void drm_connector_unregister(struct drm_connector *connector)
drm_debugfs_connector_remove(connector);
connector->registered = false;
+ mutex_unlock(&connector->mutex);
}
EXPORT_SYMBOL(drm_connector_unregister);
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 0f2fa90..362b8cd 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -710,6 +710,8 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
if (ret)
goto err_minors;
+ dev->registered = true;
+
if (dev->driver->load) {
ret = dev->driver->load(dev, flags);
if (ret)
@@ -749,6 +751,8 @@ void drm_dev_unregister(struct drm_device *dev)
drm_lastclose(dev);
+ dev->registered = false;
+
if (drm_core_check_feature(dev, DRIVER_MODESET))
drm_modeset_unregister_all(dev);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index ca6efb6..7513e76 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1199,6 +1199,15 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_free_priv;
pci_set_drvdata(pdev, &dev_priv->drm);
+ /*
+ * Disable the system suspend direct complete optimization, which can
+ * leave the device suspended skipping the driver's suspend handlers
+ * if the device was already runtime suspended. This is needed due to
+ * the difference in our runtime and system suspend sequence and
+ * becaue the HDA driver may require us to enable the audio power
+ * domain during system suspend.
+ */
+ pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME;
ret = i915_driver_init_early(dev_priv, ent);
if (ret < 0)
diff --git a/drivers/gpu/drm/i915/i915_pvinfo.h b/drivers/gpu/drm/i915/i915_pvinfo.h
index c0cb297..2cfe96d3 100644
--- a/drivers/gpu/drm/i915/i915_pvinfo.h
+++ b/drivers/gpu/drm/i915/i915_pvinfo.h
@@ -36,10 +36,6 @@
#define VGT_VERSION_MAJOR 1
#define VGT_VERSION_MINOR 0
-#define INTEL_VGT_IF_VERSION_ENCODE(major, minor) ((major) << 16 | (minor))
-#define INTEL_VGT_IF_VERSION \
- INTEL_VGT_IF_VERSION_ENCODE(VGT_VERSION_MAJOR, VGT_VERSION_MINOR)
-
/*
* notifications from guest to vgpu device model
*/
@@ -55,8 +51,8 @@ enum vgt_g2v_type {
struct vgt_if {
u64 magic; /* VGT_MAGIC */
- uint16_t version_major;
- uint16_t version_minor;
+ u16 version_major;
+ u16 version_minor;
u32 vgt_id; /* ID of vGT instance */
u32 rsv1[12]; /* pad to offset 0x40 */
/*
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index dae340c..125adcc 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -60,8 +60,8 @@
*/
void i915_check_vgpu(struct drm_i915_private *dev_priv)
{
- uint64_t magic;
- uint32_t version;
+ u64 magic;
+ u16 version_major;
BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
@@ -69,10 +69,8 @@ void i915_check_vgpu(struct drm_i915_private *dev_priv)
if (magic != VGT_MAGIC)
return;
- version = INTEL_VGT_IF_VERSION_ENCODE(
- __raw_i915_read16(dev_priv, vgtif_reg(version_major)),
- __raw_i915_read16(dev_priv, vgtif_reg(version_minor)));
- if (version != INTEL_VGT_IF_VERSION) {
+ version_major = __raw_i915_read16(dev_priv, vgtif_reg(version_major));
+ if (version_major < VGT_VERSION_MAJOR) {
DRM_INFO("VGT interface version mismatch!\n");
return;
}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 5dc6082..f8efd20 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2253,6 +2253,9 @@ void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
intel_fill_fb_ggtt_view(&view, fb, rotation);
vma = i915_gem_object_to_ggtt(obj, &view);
+ if (WARN_ON_ONCE(!vma))
+ return;
+
i915_vma_unpin_fence(vma);
i915_gem_object_unpin_from_display_plane(vma);
}
@@ -13764,6 +13767,15 @@ static void update_scanline_offset(struct intel_crtc *crtc)
* type. For DP ports it behaves like most other platforms, but on HDMI
* there's an extra 1 line difference. So we need to add two instead of
* one to the value.
+ *
+ * On VLV/CHV DSI the scanline counter would appear to increment
+ * approx. 1/3 of a scanline before start of vblank. Unfortunately
+ * that means we can't tell whether we're in vblank or not while
+ * we're on that particular line. We must still set scanline_offset
+ * to 1 so that the vblank timestamps come out correct when we query
+ * the scanline counter from within the vblank interrupt handler.
+ * However if queried just before the start of vblank we'll get an
+ * answer that's slightly in the future.
*/
if (IS_GEN2(dev)) {
const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 2c6d59d..49de476 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -4114,11 +4114,19 @@ skl_compute_wm(struct drm_atomic_state *state)
struct drm_crtc_state *cstate;
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
struct skl_wm_values *results = &intel_state->wm_results;
+ struct drm_device *dev = state->dev;
struct skl_pipe_wm *pipe_wm;
bool changed = false;
int ret, i;
/*
+ * When we distrust bios wm we always need to recompute to set the
+ * expected DDB allocations for each CRTC.
+ */
+ if (to_i915(dev)->wm.distrust_bios_wm)
+ changed = true;
+
+ /*
* If this transaction isn't actually touching any CRTC's, don't
* bother with watermark calculation. Note that if we pass this
* test, we're guaranteed to hold at least one CRTC state mutex,
@@ -4128,6 +4136,7 @@ skl_compute_wm(struct drm_atomic_state *state)
*/
for_each_crtc_in_state(state, crtc, cstate, i)
changed = true;
+
if (!changed)
return 0;
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index dbed12c..64f4e2e 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -81,10 +81,13 @@ int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
*/
void intel_pipe_update_start(struct intel_crtc *crtc)
{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
long timeout = msecs_to_jiffies_timeout(1);
int scanline, min, max, vblank_start;
wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
+ bool need_vlv_dsi_wa = (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DSI);
DEFINE_WAIT(wait);
vblank_start = adjusted_mode->crtc_vblank_start;
@@ -136,6 +139,24 @@ void intel_pipe_update_start(struct intel_crtc *crtc)
drm_crtc_vblank_put(&crtc->base);
+ /*
+ * On VLV/CHV DSI the scanline counter would appear to
+ * increment approx. 1/3 of a scanline before start of vblank.
+ * The registers still get latched at start of vblank however.
+ * This means we must not write any registers on the first
+ * line of vblank (since not the whole line is actually in
+ * vblank). And unfortunately we can't use the interrupt to
+ * wait here since it will fire too soon. We could use the
+ * frame start interrupt instead since it will fire after the
+ * critical scanline, but that would require more changes
+ * in the interrupt code. So for now we'll just do the nasty
+ * thing and poll for the bad scanline to pass us by.
+ *
+ * FIXME figure out if BXT+ DSI suffers from this as well
+ */
+ while (need_vlv_dsi_wa && scanline == vblank_start)
+ scanline = intel_get_crtc_scanline(crtc);
+
crtc->debug.scanline_start = scanline;
crtc->debug.start_vbl_time = ktime_get();
crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc);
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 0e8c4d9..e097780 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -1061,7 +1061,7 @@ static int mtk_hdmi_setup_vendor_specific_infoframe(struct mtk_hdmi *hdmi,
}
err = hdmi_vendor_infoframe_pack(&frame, buffer, sizeof(buffer));
- if (err) {
+ if (err < 0) {
dev_err(hdmi->dev, "Failed to pack vendor infoframe: %zd\n",
err);
return err;
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index b468d2a..961d47f 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -64,7 +64,7 @@ int adreno_hw_init(struct msm_gpu *gpu)
DBG("%s", gpu->name);
- ret = msm_gem_get_iova(gpu->rb->bo, gpu->id, &gpu->rb_iova);
+ ret = msm_gem_get_iova(gpu->rb->bo, gpu->aspace, &gpu->rb_iova);
if (ret) {
gpu->rb_iova = 0;
dev_err(gpu->dev->dev, "could not map ringbuffer: %d\n", ret);
@@ -406,7 +406,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
return -ENOMEM;
}
- ret = msm_gem_get_iova(adreno_gpu->memptrs_bo, gpu->id,
+ ret = msm_gem_get_iova(adreno_gpu->memptrs_bo, gpu->aspace,
&adreno_gpu->memptrs_iova);
if (ret) {
dev_err(drm->dev, "could not map memptrs: %d\n", ret);
@@ -423,8 +423,7 @@ void adreno_gpu_cleanup(struct adreno_gpu *gpu)
msm_gem_put_vaddr(gpu->memptrs_bo);
if (gpu->memptrs_iova)
- msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id);
-
+ msm_gem_put_iova(gpu->memptrs_bo, gpu->base.aspace);
drm_gem_object_unreference_unlocked(gpu->memptrs_bo);
}
release_firmware(gpu->pm4);
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index a3c6f58..5a33fdb 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -261,6 +261,7 @@ static const struct component_ops dp_display_comp_ops = {
static int dp_display_process_hpd_high(struct dp_display_private *dp)
{
int rc = 0;
+ u32 max_pclk_from_edid = 0;
rc = dp->panel->read_dpcd(dp->panel);
if (rc)
@@ -269,6 +270,11 @@ static int dp_display_process_hpd_high(struct dp_display_private *dp)
sde_get_edid(dp->dp_display.connector, &dp->aux->drm_aux->ddc,
(void **)&dp->panel->edid_ctrl);
+ max_pclk_from_edid = dp->panel->get_max_pclk(dp->panel);
+
+ dp->dp_display.max_pclk_khz = min(max_pclk_from_edid,
+ dp->parser->max_pclk_khz);
+
dp->dp_display.is_connected = true;
drm_helper_hpd_irq_event(dp->dp_display.connector->dev);
diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h
index 877287a..3caa277 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.h
+++ b/drivers/gpu/drm/msm/dp/dp_display.h
@@ -29,6 +29,7 @@ struct dp_display {
struct dp_bridge *bridge;
struct drm_connector *connector;
bool is_connected;
+ u32 max_pclk_khz;
int (*enable)(struct dp_display *dp_display);
int (*post_enable)(struct dp_display *dp_display);
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c
index 78c04c4..91aafdd 100644
--- a/drivers/gpu/drm/msm/dp/dp_drm.c
+++ b/drivers/gpu/drm/msm/dp/dp_drm.c
@@ -450,5 +450,17 @@ enum drm_mode_status dp_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode,
void *display)
{
- return MODE_OK;
+ struct dp_display *dp_disp;
+
+ if (!mode || !display) {
+ pr_err("invalid params\n");
+ return MODE_ERROR;
+ }
+
+ dp_disp = display;
+
+ if (mode->clock > dp_disp->max_pclk_khz)
+ return MODE_BAD;
+ else
+ return MODE_OK;
}
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
index fed1dbb..4496e9a 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.c
+++ b/drivers/gpu/drm/msm/dp/dp_panel.c
@@ -72,6 +72,34 @@ static int dp_panel_read_dpcd(struct dp_panel *dp_panel)
return rc;
}
+static u32 dp_panel_get_max_pclk(struct dp_panel *dp_panel)
+{
+ struct dp_panel_private *panel;
+ struct drm_dp_link *dp_link;
+ u32 bpc, bpp, max_data_rate_khz, max_pclk_rate_khz;
+ const u8 num_components = 3;
+
+ if (!dp_panel) {
+ pr_err("invalid input\n");
+ return 0;
+ }
+
+ panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+ dp_link = &dp_panel->dp_link;
+
+ bpc = sde_get_sink_bpc(dp_panel->edid_ctrl);
+ bpp = bpc * num_components;
+
+ max_data_rate_khz = (dp_link->num_lanes * dp_link->rate * 8);
+ max_pclk_rate_khz = max_data_rate_khz / bpp;
+
+ pr_debug("bpp=%d, max_lane_cnt=%d\n", bpp, dp_link->num_lanes);
+ pr_debug("max_data_rate=%dKHz, max_pclk_rate=%dKHz\n",
+ max_data_rate_khz, max_pclk_rate_khz);
+
+ return max_pclk_rate_khz;
+}
+
static int dp_panel_timing_cfg(struct dp_panel *dp_panel)
{
int rc = 0;
@@ -276,6 +304,7 @@ struct dp_panel *dp_panel_get(struct device *dev, struct dp_aux *aux,
dp_panel->timing_cfg = dp_panel_timing_cfg;
dp_panel->read_dpcd = dp_panel_read_dpcd;
dp_panel->get_link_rate = dp_panel_get_link_rate;
+ dp_panel->get_max_pclk = dp_panel_get_max_pclk;
return dp_panel;
error:
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.h b/drivers/gpu/drm/msm/dp/dp_panel.h
index 5852c70..b63c51f 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.h
+++ b/drivers/gpu/drm/msm/dp/dp_panel.h
@@ -46,6 +46,7 @@ struct dp_panel {
struct dp_panel_info pinfo;
u32 vic;
+ u32 max_pclk_khz;
int (*sde_edid_register)(struct dp_panel *dp_panel);
void (*sde_edid_deregister)(struct dp_panel *dp_panel);
@@ -53,6 +54,7 @@ struct dp_panel {
int (*timing_cfg)(struct dp_panel *dp_panel);
int (*read_dpcd)(struct dp_panel *dp_panel);
u32 (*get_link_rate)(struct dp_panel *dp_panel);
+ u32 (*get_max_pclk)(struct dp_panel *dp_panel);
};
struct dp_panel *dp_panel_get(struct device *dev, struct dp_aux *aux,
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
index e2a348d..b2aef9c 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
@@ -24,6 +24,7 @@
#include "msm_drv.h"
#include "msm_kms.h"
#include "msm_gpu.h"
+#include "msm_mmu.h"
#include "dsi_ctrl.h"
#include "dsi_ctrl_hw.h"
#include "dsi_clk.h"
@@ -252,6 +253,16 @@ static int dsi_ctrl_debugfs_deinit(struct dsi_ctrl *dsi_ctrl)
return 0;
}
+static inline struct msm_gem_address_space*
+dsi_ctrl_get_aspace(struct dsi_ctrl *dsi_ctrl,
+ int domain)
+{
+ if (!dsi_ctrl || !dsi_ctrl->drm_dev)
+ return NULL;
+
+ return msm_gem_smmu_address_space_get(dsi_ctrl->drm_dev, domain);
+}
+
static int dsi_ctrl_check_state(struct dsi_ctrl *dsi_ctrl,
enum dsi_ctrl_driver_ops op,
u32 op_state)
@@ -1170,8 +1181,17 @@ static int dsi_ctrl_drv_state_init(struct dsi_ctrl *dsi_ctrl)
static int dsi_ctrl_buffer_deinit(struct dsi_ctrl *dsi_ctrl)
{
+ struct msm_gem_address_space *aspace = NULL;
+
if (dsi_ctrl->tx_cmd_buf) {
- msm_gem_put_iova(dsi_ctrl->tx_cmd_buf, 0);
+ aspace = dsi_ctrl_get_aspace(dsi_ctrl,
+ MSM_SMMU_DOMAIN_UNSECURE);
+ if (!aspace) {
+ pr_err("failed to get address space\n");
+ return -ENOMEM;
+ }
+
+ msm_gem_put_iova(dsi_ctrl->tx_cmd_buf, aspace);
msm_gem_free_object(dsi_ctrl->tx_cmd_buf);
dsi_ctrl->tx_cmd_buf = NULL;
@@ -1184,6 +1204,13 @@ int dsi_ctrl_buffer_init(struct dsi_ctrl *dsi_ctrl)
{
int rc = 0;
u32 iova = 0;
+ struct msm_gem_address_space *aspace = NULL;
+
+ aspace = dsi_ctrl_get_aspace(dsi_ctrl, MSM_SMMU_DOMAIN_UNSECURE);
+ if (!aspace) {
+ pr_err("failed to get address space\n");
+ return -ENOMEM;
+ }
dsi_ctrl->tx_cmd_buf = msm_gem_new(dsi_ctrl->drm_dev,
SZ_4K,
@@ -1198,7 +1225,7 @@ int dsi_ctrl_buffer_init(struct dsi_ctrl *dsi_ctrl)
dsi_ctrl->cmd_buffer_size = SZ_4K;
- rc = msm_gem_get_iova(dsi_ctrl->tx_cmd_buf, 0, &iova);
+ rc = msm_gem_get_iova(dsi_ctrl->tx_cmd_buf, aspace, &iova);
if (rc) {
pr_err("failed to get iova, rc=%d\n", rc);
(void)dsi_ctrl_buffer_deinit(dsi_ctrl);
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
index 52b1dcb..b61bfde 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
@@ -20,6 +20,7 @@
#include "msm_drv.h"
#include "sde_connector.h"
+#include "msm_mmu.h"
#include "dsi_display.h"
#include "dsi_panel.h"
#include "dsi_ctrl.h"
@@ -1321,6 +1322,7 @@ static ssize_t dsi_host_transfer(struct mipi_dsi_host *host,
{
struct dsi_display *display = to_dsi_display(host);
struct dsi_display_ctrl *display_ctrl;
+ struct msm_gem_address_space *aspace = NULL;
int rc = 0, cnt = 0;
if (!host || !msg) {
@@ -1363,7 +1365,16 @@ static ssize_t dsi_host_transfer(struct mipi_dsi_host *host,
pr_err("value of display->tx_cmd_buf is NULL");
goto error_disable_cmd_engine;
}
- rc = msm_gem_get_iova(display->tx_cmd_buf, 0,
+
+ aspace = msm_gem_smmu_address_space_get(display->drm_dev,
+ MSM_SMMU_DOMAIN_UNSECURE);
+ if (!aspace) {
+ pr_err("failed to get aspace\n");
+ rc = -EINVAL;
+ goto free_gem;
+ }
+
+ rc = msm_gem_get_iova(display->tx_cmd_buf, aspace,
&(display->cmd_buffer_iova));
if (rc) {
pr_err("failed to get the iova rc %d\n", rc);
@@ -1419,7 +1430,7 @@ static ssize_t dsi_host_transfer(struct mipi_dsi_host *host,
}
return rc;
put_iova:
- msm_gem_put_iova(display->tx_cmd_buf, 0);
+ msm_gem_put_iova(display->tx_cmd_buf, aspace);
free_gem:
msm_gem_free_object(display->tx_cmd_buf);
error:
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index 9527daf..75e98dc 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -133,7 +133,7 @@ static void unref_cursor_worker(struct drm_flip_work *work, void *val)
container_of(work, struct mdp4_crtc, unref_cursor_work);
struct mdp4_kms *mdp4_kms = get_kms(&mdp4_crtc->base);
- msm_gem_put_iova(val, mdp4_kms->id);
+ msm_gem_put_iova(val, mdp4_kms->aspace);
drm_gem_object_unreference_unlocked(val);
}
@@ -378,7 +378,8 @@ static void update_cursor(struct drm_crtc *crtc)
if (next_bo) {
/* take a obj ref + iova ref when we start scanning out: */
drm_gem_object_reference(next_bo);
- msm_gem_get_iova_locked(next_bo, mdp4_kms->id, &iova);
+ msm_gem_get_iova_locked(next_bo, mdp4_kms->aspace,
+ &iova);
/* enable cursor: */
mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_SIZE(dma),
@@ -435,7 +436,7 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
}
if (cursor_bo) {
- ret = msm_gem_get_iova(cursor_bo, mdp4_kms->id, &iova);
+ ret = msm_gem_get_iova(cursor_bo, mdp4_kms->aspace, &iova);
if (ret)
goto fail;
} else {
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
index 80b49a1..acee5da 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
@@ -184,7 +184,7 @@ static void mdp4_destroy(struct msm_kms *kms)
}
if (mdp4_kms->blank_cursor_iova)
- msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id);
+ msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->aspace);
drm_gem_object_unreference_unlocked(mdp4_kms->blank_cursor_bo);
if (mdp4_kms->rpm_enabled)
@@ -582,13 +582,6 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
aspace = NULL;
}
- mdp4_kms->id = msm_register_address_space(dev, aspace);
- if (mdp4_kms->id < 0) {
- ret = mdp4_kms->id;
- dev_err(dev->dev, "failed to register mdp4 iommu: %d\n", ret);
- goto fail;
- }
-
ret = modeset_init(mdp4_kms);
if (ret) {
dev_err(dev->dev, "modeset_init failed: %d\n", ret);
@@ -605,7 +598,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
goto fail;
}
- ret = msm_gem_get_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id,
+ ret = msm_gem_get_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->aspace,
&mdp4_kms->blank_cursor_iova);
if (ret) {
dev_err(dev->dev, "could not pin blank-cursor bo: %d\n", ret);
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
index 1fe35b2..f9dcadf 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
@@ -33,8 +33,6 @@ struct mdp4_kms {
int rev;
/* mapper-id used to request GEM buffer mapped for scanout: */
- int id;
-
void __iomem *mmio;
struct regulator *vdd;
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
index 3903dbc..934992e 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
@@ -109,7 +109,7 @@ static int mdp4_plane_prepare_fb(struct drm_plane *plane,
return 0;
DBG("%s: prepare: FB[%u]", mdp4_plane->name, fb->base.id);
- return msm_framebuffer_prepare(fb, mdp4_kms->id);
+ return msm_framebuffer_prepare(fb, mdp4_kms->aspace);
}
static void mdp4_plane_cleanup_fb(struct drm_plane *plane,
@@ -123,7 +123,7 @@ static void mdp4_plane_cleanup_fb(struct drm_plane *plane,
return;
DBG("%s: cleanup: FB[%u]", mdp4_plane->name, fb->base.id);
- msm_framebuffer_cleanup(fb, mdp4_kms->id);
+ msm_framebuffer_cleanup(fb, mdp4_kms->aspace);
}
@@ -172,13 +172,13 @@ static void mdp4_plane_set_scanout(struct drm_plane *plane,
MDP4_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP0_BASE(pipe),
- msm_framebuffer_iova(fb, mdp4_kms->id, 0));
+ msm_framebuffer_iova(fb, mdp4_kms->aspace, 0));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP1_BASE(pipe),
- msm_framebuffer_iova(fb, mdp4_kms->id, 1));
+ msm_framebuffer_iova(fb, mdp4_kms->aspace, 1));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP2_BASE(pipe),
- msm_framebuffer_iova(fb, mdp4_kms->id, 2));
+ msm_framebuffer_iova(fb, mdp4_kms->aspace, 2));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP3_BASE(pipe),
- msm_framebuffer_iova(fb, mdp4_kms->id, 3));
+ msm_framebuffer_iova(fb, mdp4_kms->aspace, 3));
plane->fb = fb;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index c205c36..15e7da2 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -171,7 +171,7 @@ static void unref_cursor_worker(struct drm_flip_work *work, void *val)
container_of(work, struct mdp5_crtc, unref_cursor_work);
struct mdp5_kms *mdp5_kms = get_kms(&mdp5_crtc->base);
- msm_gem_put_iova(val, mdp5_kms->id);
+ msm_gem_put_iova(val, mdp5_kms->aspace);
drm_gem_object_unreference_unlocked(val);
}
@@ -525,7 +525,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
if (!cursor_bo)
return -ENOENT;
- ret = msm_gem_get_iova(cursor_bo, mdp5_kms->id, &cursor_addr);
+ ret = msm_gem_get_iova(cursor_bo, mdp5_kms->aspace, &cursor_addr);
if (ret)
return -EINVAL;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index f022967..d97e4ef 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -640,13 +640,6 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
aspace = NULL;
}
- mdp5_kms->id = msm_register_address_space(dev, aspace);
- if (mdp5_kms->id < 0) {
- ret = mdp5_kms->id;
- dev_err(&pdev->dev, "failed to register mdp5 iommu: %d\n", ret);
- goto fail;
- }
-
ret = modeset_init(mdp5_kms);
if (ret) {
dev_err(&pdev->dev, "modeset_init failed: %d\n", ret);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index 623ac07..f21e912 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -38,7 +38,6 @@ struct mdp5_kms {
/* mapper-id used to request GEM buffer mapped for scanout: */
- int id;
struct msm_gem_address_space *aspace;
struct mdp5_smp *smp;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index 5e67e8b..88e5d06 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -261,7 +261,7 @@ static int mdp5_plane_prepare_fb(struct drm_plane *plane,
return 0;
DBG("%s: prepare: FB[%u]", mdp5_plane->name, fb->base.id);
- return msm_framebuffer_prepare(fb, mdp5_kms->id);
+ return msm_framebuffer_prepare(fb, mdp5_kms->aspace);
}
static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
@@ -275,7 +275,7 @@ static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
return;
DBG("%s: cleanup: FB[%u]", mdp5_plane->name, fb->base.id);
- msm_framebuffer_cleanup(fb, mdp5_kms->id);
+ msm_framebuffer_cleanup(fb, mdp5_kms->aspace);
}
static int mdp5_plane_atomic_check(struct drm_plane *plane,
@@ -398,13 +398,13 @@ static void set_scanout_locked(struct drm_plane *plane,
MDP5_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC0_ADDR(pipe),
- msm_framebuffer_iova(fb, mdp5_kms->id, 0));
+ msm_framebuffer_iova(fb, mdp5_kms->aspace, 0));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC1_ADDR(pipe),
- msm_framebuffer_iova(fb, mdp5_kms->id, 1));
+ msm_framebuffer_iova(fb, mdp5_kms->aspace, 1));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe),
- msm_framebuffer_iova(fb, mdp5_kms->id, 2));
+ msm_framebuffer_iova(fb, mdp5_kms->aspace, 2));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe),
- msm_framebuffer_iova(fb, mdp5_kms->id, 3));
+ msm_framebuffer_iova(fb, mdp5_kms->aspace, 3));
plane->fb = fb;
}
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 6a2d239..810d0d6 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -151,20 +151,6 @@ static const struct drm_mode_config_funcs mode_config_funcs = {
.atomic_commit = msm_atomic_commit,
};
-int msm_register_address_space(struct drm_device *dev,
- struct msm_gem_address_space *aspace)
-{
- struct msm_drm_private *priv = dev->dev_private;
- int idx = priv->num_aspaces++;
-
- if (WARN_ON(idx >= ARRAY_SIZE(priv->aspace)))
- return -EINVAL;
-
- priv->aspace[idx] = aspace;
-
- return idx;
-}
-
#ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
static bool reglog = false;
MODULE_PARM_DESC(reglog, "Enable register read/write logging");
@@ -1933,6 +1919,30 @@ static int add_display_components(struct device *dev,
return ret;
}
+struct msm_gem_address_space *
+msm_gem_smmu_address_space_get(struct drm_device *dev,
+ unsigned int domain)
+{
+ struct msm_drm_private *priv = NULL;
+ struct msm_kms *kms;
+ const struct msm_kms_funcs *funcs;
+
+ if ((!dev) || (!dev->dev_private))
+ return NULL;
+
+ priv = dev->dev_private;
+ kms = priv->kms;
+ if (!kms)
+ return NULL;
+
+ funcs = kms->funcs;
+
+ if ((!funcs) || (!funcs->get_address_space))
+ return NULL;
+
+ return funcs->get_address_space(priv->kms, domain);
+}
+
/*
* We don't know what's the best binding to link the gpu with the drm device.
* Fow now, we just hunt for all the possible gpus that we support, and add them
@@ -2044,6 +2054,7 @@ void __exit adreno_unregister(void)
static int __init msm_drm_register(void)
{
DBG("init");
+ msm_smmu_driver_init();
msm_dsi_register();
msm_edp_register();
msm_hdmi_register();
@@ -2059,6 +2070,7 @@ static void __exit msm_drm_unregister(void)
adreno_unregister();
msm_edp_unregister();
msm_dsi_unregister();
+ msm_smmu_driver_cleanup();
}
module_init(msm_drm_register);
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 665ed365..0d1605d 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -119,6 +119,7 @@ enum msm_mdp_plane_property {
PLANE_PROP_ROTATION,
PLANE_PROP_BLEND_OP,
PLANE_PROP_SRC_CONFIG,
+ PLANE_PROP_FB_TRANSLATION_MODE,
/* total # of properties */
PLANE_PROP_COUNT
@@ -145,6 +146,7 @@ enum msm_mdp_crtc_property {
CRTC_PROP_ROT_PREFILL_BW,
CRTC_PROP_ROT_CLK,
CRTC_PROP_ROI_V1,
+ CRTC_PROP_SECURITY_LEVEL,
/* total # of properties */
CRTC_PROP_COUNT
@@ -642,8 +644,6 @@ int msm_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *state, bool nonblock);
void msm_gem_submit_free(struct msm_gem_submit *submit);
-int msm_register_address_space(struct drm_device *dev,
- struct msm_gem_address_space *aspace);
void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, struct sg_table *sgt,
void *priv);
@@ -662,6 +662,10 @@ struct msm_gem_address_space *
msm_gem_smmu_address_space_create(struct device *dev, struct msm_mmu *mmu,
const char *name);
+struct msm_gem_address_space *
+msm_gem_smmu_address_space_get(struct drm_device *dev,
+ unsigned int domain);
+
int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_file *file);
@@ -673,13 +677,16 @@ int msm_gem_mmap_obj(struct drm_gem_object *obj,
int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
-int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
- uint32_t *iova);
-int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova);
-uint32_t msm_gem_iova(struct drm_gem_object *obj, int id);
+int msm_gem_get_iova_locked(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace, uint32_t *iova);
+int msm_gem_get_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace, uint32_t *iova);
+uint32_t msm_gem_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace);
struct page **msm_gem_get_pages(struct drm_gem_object *obj);
void msm_gem_put_pages(struct drm_gem_object *obj);
-void msm_gem_put_iova(struct drm_gem_object *obj, int id);
+void msm_gem_put_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace);
int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);
int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
@@ -716,9 +723,12 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
struct dma_buf *dmabuf, struct sg_table *sgt);
void msm_framebuffer_set_kmap(struct drm_framebuffer *fb, bool enable);
-int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id);
-void msm_framebuffer_cleanup(struct drm_framebuffer *fb, int id);
-uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int id, int plane);
+int msm_framebuffer_prepare(struct drm_framebuffer *fb,
+ struct msm_gem_address_space *aspace);
+void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
+ struct msm_gem_address_space *aspace);
+uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
+ struct msm_gem_address_space *aspace, int plane);
struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane);
const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb);
struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index 918427a..0a9f12d 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -148,14 +148,15 @@ static void msm_framebuffer_kunmap(struct drm_framebuffer *fb)
* should be fine, since only the scanout (mdpN) side of things needs
* this, the gpu doesn't care about fb's.
*/
-int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id)
+int msm_framebuffer_prepare(struct drm_framebuffer *fb,
+ struct msm_gem_address_space *aspace)
{
struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
int ret, i, n = drm_format_num_planes(fb->pixel_format);
uint32_t iova;
for (i = 0; i < n; i++) {
- ret = msm_gem_get_iova(msm_fb->planes[i], id, &iova);
+ ret = msm_gem_get_iova(msm_fb->planes[i], aspace, &iova);
DBG("FB[%u]: iova[%d]: %08x (%d)", fb->base.id, i, iova, ret);
if (ret)
return ret;
@@ -167,7 +168,8 @@ int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id)
return 0;
}
-void msm_framebuffer_cleanup(struct drm_framebuffer *fb, int id)
+void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
+ struct msm_gem_address_space *aspace)
{
struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
int i, n = drm_format_num_planes(fb->pixel_format);
@@ -176,15 +178,16 @@ void msm_framebuffer_cleanup(struct drm_framebuffer *fb, int id)
msm_framebuffer_kunmap(fb);
for (i = 0; i < n; i++)
- msm_gem_put_iova(msm_fb->planes[i], id);
+ msm_gem_put_iova(msm_fb->planes[i], aspace);
}
-uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int id, int plane)
+uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
+ struct msm_gem_address_space *aspace, int plane)
{
struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
if (!msm_fb->planes[plane])
return 0;
- return msm_gem_iova(msm_fb->planes[plane], id) + fb->offsets[plane];
+ return msm_gem_iova(msm_fb->planes[plane], aspace) + fb->offsets[plane];
}
struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane)
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 43e2a26..a7d06d1 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -290,22 +290,63 @@ uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
return offset;
}
+static void obj_remove_domain(struct msm_gem_vma *domain)
+{
+ if (domain) {
+ list_del(&domain->list);
+ kfree(domain);
+ }
+}
+
static void
put_iova(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
- struct msm_drm_private *priv = obj->dev->dev_private;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- int id;
+ struct msm_gem_vma *domain, *tmp;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
- msm_gem_unmap_vma(priv->aspace[id], &msm_obj->domain[id],
- msm_obj->sgt, get_dmabuf_ptr(obj));
+ list_for_each_entry_safe(domain, tmp, &msm_obj->domains, list) {
+ if (iommu_present(&platform_bus_type)) {
+ msm_gem_unmap_vma(domain->aspace, domain,
+ msm_obj->sgt, get_dmabuf_ptr(obj));
+ }
+
+ obj_remove_domain(domain);
}
}
+static struct msm_gem_vma *obj_add_domain(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct msm_gem_vma *domain = kzalloc(sizeof(*domain), GFP_KERNEL);
+
+ if (!domain)
+ return ERR_PTR(-ENOMEM);
+
+ domain->aspace = aspace;
+
+ list_add_tail(&domain->list, &msm_obj->domains);
+
+ return domain;
+}
+
+static struct msm_gem_vma *obj_get_domain(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct msm_gem_vma *domain;
+
+ list_for_each_entry(domain, &msm_obj->domains, list) {
+ if (domain->aspace == aspace)
+ return domain;
+ }
+
+ return NULL;
+}
+
/* should be called under struct_mutex.. although it can be called
* from atomic context without struct_mutex to acquire an extra
* iova ref if you know one is already held.
@@ -313,51 +354,65 @@ put_iova(struct drm_gem_object *obj)
* That means when I do eventually need to add support for unpinning
* the refcnt counter needs to be atomic_t.
*/
-int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
- uint32_t *iova)
+int msm_gem_get_iova_locked(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace, uint32_t *iova)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct page **pages;
+ struct msm_gem_vma *domain;
int ret = 0;
- if (!msm_obj->domain[id].iova) {
- struct msm_drm_private *priv = obj->dev->dev_private;
- struct page **pages = get_pages(obj);
+ if (!iommu_present(&platform_bus_type)) {
+ pages = get_pages(obj);
if (IS_ERR(pages))
return PTR_ERR(pages);
- if (iommu_present(&platform_bus_type)) {
- ret = msm_gem_map_vma(priv->aspace[id],
- &msm_obj->domain[id], msm_obj->sgt,
- get_dmabuf_ptr(obj),
- msm_obj->flags);
- } else {
- msm_obj->domain[id].iova = physaddr(obj);
- }
+ *iova = physaddr(obj);
+ return 0;
}
- if (!ret)
- *iova = msm_obj->domain[id].iova;
+ domain = obj_get_domain(obj, aspace);
+
+ if (!domain) {
+ domain = obj_add_domain(obj, aspace);
+ if (IS_ERR(domain))
+ return PTR_ERR(domain);
+
+ pages = get_pages(obj);
+ if (IS_ERR(pages)) {
+ obj_remove_domain(domain);
+ return PTR_ERR(pages);
+ }
+
+ ret = msm_gem_map_vma(aspace, domain, msm_obj->sgt,
+ get_dmabuf_ptr(obj),
+ msm_obj->flags);
+ }
+
+ if (!ret && domain)
+ *iova = domain->iova;
+ else
+ obj_remove_domain(domain);
return ret;
}
/* get iova, taking a reference. Should have a matching put */
-int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
+int msm_gem_get_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace, uint32_t *iova)
{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct msm_gem_vma *domain;
int ret;
- /* this is safe right now because we don't unmap until the
- * bo is deleted:
- */
- if (msm_obj->domain[id].iova) {
- *iova = msm_obj->domain[id].iova;
+ domain = obj_get_domain(obj, aspace);
+ if (domain) {
+ *iova = domain->iova;
return 0;
}
mutex_lock(&obj->dev->struct_mutex);
- ret = msm_gem_get_iova_locked(obj, id, iova);
+ ret = msm_gem_get_iova_locked(obj, aspace, iova);
mutex_unlock(&obj->dev->struct_mutex);
return ret;
}
@@ -365,14 +420,18 @@ int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
/* get iova without taking a reference, used in places where you have
* already done a 'msm_gem_get_iova()'.
*/
-uint32_t msm_gem_iova(struct drm_gem_object *obj, int id)
+uint32_t msm_gem_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace)
{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- WARN_ON(!msm_obj->domain[id].iova);
- return msm_obj->domain[id].iova;
+ struct msm_gem_vma *domain = obj_get_domain(obj, aspace);
+
+ WARN_ON(!domain);
+
+ return domain ? domain->iova : 0;
}
-void msm_gem_put_iova(struct drm_gem_object *obj, int id)
+void msm_gem_put_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace)
{
// XXX TODO ..
// NOTE: probably don't need a _locked() version.. we wouldn't
@@ -624,6 +683,7 @@ static void describe_fence(struct fence *fence, const char *type,
void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct msm_gem_vma *domain;
struct reservation_object *robj = msm_obj->resv;
struct reservation_object_list *fobj;
struct fence *fence;
@@ -666,6 +726,12 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
if (fence)
describe_fence(fence, "Exclusive", m);
rcu_read_unlock();
+
+ /* FIXME: we need to print the address space here too */
+ list_for_each_entry(domain, &msm_obj->domains, list)
+ seq_printf(m, " %08llx", domain->iova);
+
+ seq_puts(m, "\n");
}
void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
@@ -783,8 +849,13 @@ static int msm_gem_new_impl(struct drm_device *dev,
if (!msm_obj)
return -ENOMEM;
- if (use_vram)
- msm_obj->vram_node = &msm_obj->domain[0].node;
+ if (use_vram) {
+ struct msm_gem_vma *domain = obj_add_domain(&msm_obj->base,
+ NULL);
+
+ if (!IS_ERR(domain))
+ msm_obj->vram_node = &domain->node;
+ }
msm_obj->flags = flags;
msm_obj->madv = MSM_MADV_WILLNEED;
@@ -797,6 +868,8 @@ static int msm_gem_new_impl(struct drm_device *dev,
}
INIT_LIST_HEAD(&msm_obj->submit_entry);
+ INIT_LIST_HEAD(&msm_obj->domains);
+
list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
*obj = &msm_obj->base;
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index b176c11..9d41a00 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -44,7 +44,9 @@ struct msm_gem_address_space {
struct msm_gem_vma {
/* Node used by the GPU address space, but not the SDE address space */
struct drm_mm_node node;
+ struct msm_gem_address_space *aspace;
uint64_t iova;
+ struct list_head list;
};
struct msm_gem_object {
@@ -84,7 +86,7 @@ struct msm_gem_object {
struct sg_table *sgt;
void *vaddr;
- struct msm_gem_vma domain[NUM_DOMAINS];
+ struct list_head domains;
/* normally (resv == &_resv) except for imported bo's */
struct reservation_object *resv;
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index b6a0f37..8d727fe 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -157,7 +157,7 @@ static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i)
struct msm_gem_object *msm_obj = submit->bos[i].obj;
if (submit->bos[i].flags & BO_PINNED)
- msm_gem_put_iova(&msm_obj->base, submit->gpu->id);
+ msm_gem_put_iova(&msm_obj->base, submit->gpu->aspace);
if (submit->bos[i].flags & BO_LOCKED)
ww_mutex_unlock(&msm_obj->resv->lock);
@@ -245,7 +245,7 @@ static int submit_pin_objects(struct msm_gem_submit *submit)
/* if locking succeeded, pin bo: */
ret = msm_gem_get_iova_locked(&msm_obj->base,
- submit->gpu->id, &iova);
+ submit->gpu->aspace, &iova);
if (ret)
break;
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index ded4226..49d9e10 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -457,7 +457,7 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
struct msm_gem_object *msm_obj = submit->bos[i].obj;
/* move to inactive: */
msm_gem_move_to_inactive(&msm_obj->base);
- msm_gem_put_iova(&msm_obj->base, gpu->id);
+ msm_gem_put_iova(&msm_obj->base, gpu->aspace);
drm_gem_object_unreference(&msm_obj->base);
}
@@ -494,6 +494,8 @@ static void retire_worker(struct work_struct *work)
mutex_lock(&dev->struct_mutex);
retire_submits(gpu);
+
+ retire_submits(gpu);
mutex_unlock(&dev->struct_mutex);
if (!msm_gpu_active(gpu))
@@ -538,8 +540,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
/* submit takes a reference to the bo and iova until retired: */
drm_gem_object_reference(&msm_obj->base);
msm_gem_get_iova_locked(&msm_obj->base,
- submit->gpu->id, &iova);
-
+ submit->gpu->aspace, &iova);
if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence);
else if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
@@ -674,8 +675,6 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
} else {
dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
}
- gpu->id = msm_register_address_space(drm, gpu->aspace);
-
/* Create ringbuffer: */
mutex_lock(&drm->struct_mutex);
@@ -706,7 +705,7 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
if (gpu->rb) {
if (gpu->rb_iova)
- msm_gem_put_iova(gpu->rb->bo, gpu->id);
+ msm_gem_put_iova(gpu->rb->bo, gpu->aspace);
msm_ringbuffer_destroy(gpu->rb);
}
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index c6bf5d6..13ecd72 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -99,7 +99,6 @@ struct msm_gpu {
int irq;
struct msm_gem_address_space *aspace;
- int id;
/* Power Control: */
struct regulator *gpu_reg, *gpu_cx;
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index eed0f1b..eb10d6b 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -93,6 +93,10 @@ struct msm_kms_funcs {
struct drm_mode_object *obj, u32 event, bool en);
/* cleanup: */
void (*destroy)(struct msm_kms *kms);
+ /* get address space */
+ struct msm_gem_address_space *(*get_address_space)(
+ struct msm_kms *kms,
+ unsigned int domain);
};
struct msm_kms {
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index dc7e5a6..5af26e2 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -63,4 +63,8 @@ struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain);
struct msm_mmu *msm_smmu_new(struct device *dev,
enum msm_mmu_domain_type domain);
+/* SDE smmu driver initialize and cleanup functions */
+int __init msm_smmu_driver_init(void);
+void __exit msm_smmu_driver_cleanup(void);
+
#endif /* __MSM_MMU_H__ */
diff --git a/drivers/gpu/drm/msm/msm_smmu.c b/drivers/gpu/drm/msm/msm_smmu.c
index e3d2e34..50b81bb 100644
--- a/drivers/gpu/drm/msm/msm_smmu.c
+++ b/drivers/gpu/drm/msm/msm_smmu.c
@@ -286,8 +286,8 @@ static struct msm_smmu_domain msm_smmu_domains[MSM_SMMU_DOMAIN_MAX] = {
},
[MSM_SMMU_DOMAIN_SECURE] = {
.label = "mdp_s",
- .va_start = 0,
- .va_size = SZ_4G,
+ .va_start = SZ_128K,
+ .va_size = SZ_4G - SZ_128K,
.secure = true,
},
[MSM_SMMU_DOMAIN_NRT_UNSECURE] = {
@@ -298,20 +298,20 @@ static struct msm_smmu_domain msm_smmu_domains[MSM_SMMU_DOMAIN_MAX] = {
},
[MSM_SMMU_DOMAIN_NRT_SECURE] = {
.label = "rot_s",
- .va_start = 0,
- .va_size = SZ_4G,
+ .va_start = SZ_128K,
+ .va_size = SZ_4G - SZ_128K,
.secure = true,
},
};
static const struct of_device_id msm_smmu_dt_match[] = {
- { .compatible = "qcom,smmu-mdp-unsec",
+ { .compatible = "qcom,smmu_sde_unsec",
.data = &msm_smmu_domains[MSM_SMMU_DOMAIN_UNSECURE] },
- { .compatible = "qcom,smmu-mdp-sec",
+ { .compatible = "qcom,smmu_sde_sec",
.data = &msm_smmu_domains[MSM_SMMU_DOMAIN_SECURE] },
- { .compatible = "qcom,smmu-rot-unsec",
+ { .compatible = "qcom,smmu_sde_nrt_unsec",
.data = &msm_smmu_domains[MSM_SMMU_DOMAIN_NRT_UNSECURE] },
- { .compatible = "qcom,smmu-rot-sec",
+ { .compatible = "qcom,smmu_sde_nrt_sec",
.data = &msm_smmu_domains[MSM_SMMU_DOMAIN_NRT_SECURE] },
{}
};
@@ -535,7 +535,7 @@ static struct platform_driver msm_smmu_driver = {
},
};
-static int __init msm_smmu_driver_init(void)
+int __init msm_smmu_driver_init(void)
{
int ret;
@@ -545,13 +545,11 @@ static int __init msm_smmu_driver_init(void)
return ret;
}
-module_init(msm_smmu_driver_init);
-static void __exit msm_smmu_driver_cleanup(void)
+void __exit msm_smmu_driver_cleanup(void)
{
platform_driver_unregister(&msm_smmu_driver);
}
-module_exit(msm_smmu_driver_cleanup);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("MSM SMMU driver");
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c
index 2970b28..c3c5a13 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.c
+++ b/drivers/gpu/drm/msm/sde/sde_connector.c
@@ -327,8 +327,7 @@ static void _sde_connector_destroy_fb(struct sde_connector *c_conn,
return;
}
- msm_framebuffer_cleanup(c_state->out_fb,
- c_state->mmu_id);
+ msm_framebuffer_cleanup(c_state->out_fb, c_state->aspace);
drm_framebuffer_unreference(c_state->out_fb);
c_state->out_fb = NULL;
@@ -432,7 +431,7 @@ sde_connector_atomic_duplicate_state(struct drm_connector *connector)
if (c_state->out_fb) {
drm_framebuffer_reference(c_state->out_fb);
rc = msm_framebuffer_prepare(c_state->out_fb,
- c_state->mmu_id);
+ c_state->aspace);
if (rc)
SDE_ERROR("failed to prepare fb, %d\n", rc);
}
@@ -652,14 +651,14 @@ static int sde_connector_atomic_set_property(struct drm_connector *connector,
c_conn->fb_kmap);
if (c_state->out_fb->flags & DRM_MODE_FB_SECURE)
- c_state->mmu_id =
- c_conn->mmu_id[SDE_IOMMU_DOMAIN_SECURE];
+ c_state->aspace =
+ c_conn->aspace[SDE_IOMMU_DOMAIN_SECURE];
else
- c_state->mmu_id =
- c_conn->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE];
+ c_state->aspace =
+ c_conn->aspace[SDE_IOMMU_DOMAIN_UNSECURE];
rc = msm_framebuffer_prepare(c_state->out_fb,
- c_state->mmu_id);
+ c_state->aspace);
if (rc)
SDE_ERROR("prep fb failed, %d\n", rc);
}
@@ -1010,18 +1009,17 @@ struct drm_connector *sde_connector_init(struct drm_device *dev,
c_conn->lp_mode = 0;
c_conn->last_panel_power_mode = SDE_MODE_DPMS_ON;
- /* cache mmu_id's for later */
sde_kms = to_sde_kms(priv->kms);
if (sde_kms->vbif[VBIF_NRT]) {
- c_conn->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE] =
- sde_kms->mmu_id[MSM_SMMU_DOMAIN_NRT_UNSECURE];
- c_conn->mmu_id[SDE_IOMMU_DOMAIN_SECURE] =
- sde_kms->mmu_id[MSM_SMMU_DOMAIN_NRT_SECURE];
+ c_conn->aspace[SDE_IOMMU_DOMAIN_UNSECURE] =
+ sde_kms->aspace[MSM_SMMU_DOMAIN_NRT_UNSECURE];
+ c_conn->aspace[SDE_IOMMU_DOMAIN_SECURE] =
+ sde_kms->aspace[MSM_SMMU_DOMAIN_NRT_SECURE];
} else {
- c_conn->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE] =
- sde_kms->mmu_id[MSM_SMMU_DOMAIN_UNSECURE];
- c_conn->mmu_id[SDE_IOMMU_DOMAIN_SECURE] =
- sde_kms->mmu_id[MSM_SMMU_DOMAIN_SECURE];
+ c_conn->aspace[SDE_IOMMU_DOMAIN_UNSECURE] =
+ sde_kms->aspace[MSM_SMMU_DOMAIN_UNSECURE];
+ c_conn->aspace[SDE_IOMMU_DOMAIN_SECURE] =
+ sde_kms->aspace[MSM_SMMU_DOMAIN_SECURE];
}
if (ops)
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.h b/drivers/gpu/drm/msm/sde/sde_connector.h
index 497d0db..2318756 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.h
+++ b/drivers/gpu/drm/msm/sde/sde_connector.h
@@ -240,7 +240,7 @@ struct sde_connector {
struct drm_panel *panel;
void *display;
- int mmu_id[SDE_IOMMU_DOMAIN_MAX];
+ struct msm_gem_address_space *aspace[SDE_IOMMU_DOMAIN_MAX];
char name[SDE_CONNECTOR_NAME_SIZE];
@@ -304,14 +304,14 @@ struct sde_connector {
* struct sde_connector_state - private connector status structure
* @base: Base drm connector structure
* @out_fb: Pointer to output frame buffer, if applicable
- * @mmu_id: MMU ID for accessing frame buffer objects, if applicable
+ * @aspace: Address space for accessing frame buffer objects, if applicable
* @property_values: Local cache of current connector property values
* @rois: Regions of interest structure for mapping CRTC to Connector output
*/
struct sde_connector_state {
struct drm_connector_state base;
struct drm_framebuffer *out_fb;
- int mmu_id;
+ struct msm_gem_address_space *aspace;
uint64_t property_values[CONNECTOR_PROP_COUNT];
struct msm_roi_list rois;
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index 923297f..ed6178a 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -2998,6 +2998,10 @@ static void sde_crtc_install_properties(struct drm_crtc *crtc,
struct drm_device *dev;
struct sde_kms_info *info;
struct sde_kms *sde_kms;
+ static const struct drm_prop_enum_list e_secure_level[] = {
+ {SDE_DRM_SEC_NON_SEC, "sec_and_non_sec"},
+ {SDE_DRM_SEC_ONLY, "sec_only"},
+ };
SDE_DEBUG("\n");
@@ -3071,6 +3075,11 @@ static void sde_crtc_install_properties(struct drm_crtc *crtc,
msm_property_install_volatile_range(&sde_crtc->property_info,
"sde_drm_roi_v1", 0x0, 0, ~0, 0, CRTC_PROP_ROI_V1);
+ msm_property_install_enum(&sde_crtc->property_info, "security_level",
+ 0x0, 0, e_secure_level,
+ ARRAY_SIZE(e_secure_level),
+ CRTC_PROP_SECURITY_LEVEL);
+
sde_kms_info_reset(info);
if (catalog->has_dim_layer) {
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
index b173876..4b12651 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
@@ -333,7 +333,7 @@ struct sde_encoder_phys_cmd {
* @wb_fmt: Writeback pixel format
* @frame_count: Counter of completed writeback operations
* @kickoff_count: Counter of issued writeback operations
- * @mmu_id: mmu identifier for non-secure/secure domain
+ * @aspace: address space identifier for non-secure/secure domain
* @wb_dev: Pointer to writeback device
* @start_time: Start time of writeback latest request
* @end_time: End time of writeback latest request
@@ -355,7 +355,7 @@ struct sde_encoder_phys_wb {
const struct sde_format *wb_fmt;
u32 frame_count;
u32 kickoff_count;
- int mmu_id[SDE_IOMMU_DOMAIN_MAX];
+ struct msm_gem_address_space *aspace[SDE_IOMMU_DOMAIN_MAX];
struct sde_wb_device *wb_dev;
ktime_t start_time;
ktime_t end_time;
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
index 54c1397..875d99d 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
@@ -250,7 +250,8 @@ static void sde_encoder_phys_wb_setup_fb(struct sde_encoder_phys *phys_enc,
struct sde_hw_wb_cfg *wb_cfg;
struct sde_hw_wb_cdp_cfg *cdp_cfg;
const struct msm_format *format;
- int ret, mmu_id;
+ int ret;
+ struct msm_gem_address_space *aspace;
if (!phys_enc || !phys_enc->sde_kms || !phys_enc->sde_kms->catalog) {
SDE_ERROR("invalid encoder\n");
@@ -264,9 +265,9 @@ static void sde_encoder_phys_wb_setup_fb(struct sde_encoder_phys *phys_enc,
wb_cfg->intf_mode = phys_enc->intf_mode;
wb_cfg->is_secure = (fb->flags & DRM_MODE_FB_SECURE) ? true : false;
- mmu_id = (wb_cfg->is_secure) ?
- wb_enc->mmu_id[SDE_IOMMU_DOMAIN_SECURE] :
- wb_enc->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE];
+ aspace = (wb_cfg->is_secure) ?
+ wb_enc->aspace[SDE_IOMMU_DOMAIN_SECURE] :
+ wb_enc->aspace[SDE_IOMMU_DOMAIN_UNSECURE];
SDE_DEBUG("[fb_secure:%d]\n", wb_cfg->is_secure);
@@ -288,7 +289,7 @@ static void sde_encoder_phys_wb_setup_fb(struct sde_encoder_phys *phys_enc,
wb_cfg->roi = *wb_roi;
if (hw_wb->caps->features & BIT(SDE_WB_XY_ROI_OFFSET)) {
- ret = sde_format_populate_layout(mmu_id, fb, &wb_cfg->dest);
+ ret = sde_format_populate_layout(aspace, fb, &wb_cfg->dest);
if (ret) {
SDE_DEBUG("failed to populate layout %d\n", ret);
return;
@@ -297,7 +298,7 @@ static void sde_encoder_phys_wb_setup_fb(struct sde_encoder_phys *phys_enc,
wb_cfg->dest.height = fb->height;
wb_cfg->dest.num_planes = wb_cfg->dest.format->num_planes;
} else {
- ret = sde_format_populate_layout_with_roi(mmu_id, fb, wb_roi,
+ ret = sde_format_populate_layout_with_roi(aspace, fb, wb_roi,
&wb_cfg->dest);
if (ret) {
/* this error should be detected during atomic_check */
@@ -914,12 +915,19 @@ static int _sde_encoder_phys_wb_init_internal_fb(
struct drm_mode_fb_cmd2 mode_cmd;
uint32_t size;
int nplanes, i, ret;
+ struct msm_gem_address_space *aspace;
if (!wb_enc || !wb_enc->base.parent || !wb_enc->base.sde_kms) {
SDE_ERROR("invalid params\n");
return -EINVAL;
}
+ aspace = wb_enc->base.sde_kms->aspace[SDE_IOMMU_DOMAIN_UNSECURE];
+ if (!aspace) {
+ SDE_ERROR("invalid address space\n");
+ return -EINVAL;
+ }
+
dev = wb_enc->base.sde_kms->dev;
if (!dev) {
SDE_ERROR("invalid dev\n");
@@ -974,8 +982,7 @@ static int _sde_encoder_phys_wb_init_internal_fb(
}
/* prepare the backing buffer now so that it's available later */
- ret = msm_framebuffer_prepare(fb,
- wb_enc->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE]);
+ ret = msm_framebuffer_prepare(fb, aspace);
if (!ret)
wb_enc->fb_disable = fb;
return ret;
@@ -1234,15 +1241,15 @@ struct sde_encoder_phys *sde_encoder_phys_wb_init(
phys_enc = &wb_enc->base;
if (p->sde_kms->vbif[VBIF_NRT]) {
- wb_enc->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE] =
- p->sde_kms->mmu_id[MSM_SMMU_DOMAIN_NRT_UNSECURE];
- wb_enc->mmu_id[SDE_IOMMU_DOMAIN_SECURE] =
- p->sde_kms->mmu_id[MSM_SMMU_DOMAIN_NRT_SECURE];
+ wb_enc->aspace[SDE_IOMMU_DOMAIN_UNSECURE] =
+ p->sde_kms->aspace[MSM_SMMU_DOMAIN_NRT_UNSECURE];
+ wb_enc->aspace[SDE_IOMMU_DOMAIN_SECURE] =
+ p->sde_kms->aspace[MSM_SMMU_DOMAIN_NRT_SECURE];
} else {
- wb_enc->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE] =
- p->sde_kms->mmu_id[MSM_SMMU_DOMAIN_UNSECURE];
- wb_enc->mmu_id[SDE_IOMMU_DOMAIN_SECURE] =
- p->sde_kms->mmu_id[MSM_SMMU_DOMAIN_SECURE];
+ wb_enc->aspace[SDE_IOMMU_DOMAIN_UNSECURE] =
+ p->sde_kms->aspace[MSM_SMMU_DOMAIN_UNSECURE];
+ wb_enc->aspace[SDE_IOMMU_DOMAIN_SECURE] =
+ p->sde_kms->aspace[MSM_SMMU_DOMAIN_SECURE];
}
hw_mdp = sde_rm_get_mdp(&p->sde_kms->rm);
diff --git a/drivers/gpu/drm/msm/sde/sde_formats.c b/drivers/gpu/drm/msm/sde/sde_formats.c
index c3477b5..04c9e79 100644
--- a/drivers/gpu/drm/msm/sde/sde_formats.c
+++ b/drivers/gpu/drm/msm/sde/sde_formats.c
@@ -818,7 +818,7 @@ uint32_t sde_format_get_framebuffer_size(
}
static int _sde_format_populate_addrs_ubwc(
- int mmu_id,
+ struct msm_gem_address_space *aspace,
struct drm_framebuffer *fb,
struct sde_hw_fmt_layout *layout)
{
@@ -830,7 +830,7 @@ static int _sde_format_populate_addrs_ubwc(
return -EINVAL;
}
- base_addr = msm_framebuffer_iova(fb, mmu_id, 0);
+ base_addr = msm_framebuffer_iova(fb, aspace, 0);
if (!base_addr) {
DRM_ERROR("failed to retrieve base addr\n");
return -EFAULT;
@@ -909,7 +909,7 @@ static int _sde_format_populate_addrs_ubwc(
}
static int _sde_format_populate_addrs_linear(
- int mmu_id,
+ struct msm_gem_address_space *aspace,
struct drm_framebuffer *fb,
struct sde_hw_fmt_layout *layout)
{
@@ -926,7 +926,7 @@ static int _sde_format_populate_addrs_linear(
/* Populate addresses for simple formats here */
for (i = 0; i < layout->num_planes; ++i) {
- layout->plane_addr[i] = msm_framebuffer_iova(fb, mmu_id, i);
+ layout->plane_addr[i] = msm_framebuffer_iova(fb, aspace, i);
if (!layout->plane_addr[i]) {
DRM_ERROR("failed to retrieve base addr\n");
return -EFAULT;
@@ -937,7 +937,7 @@ static int _sde_format_populate_addrs_linear(
}
int sde_format_populate_layout(
- int mmu_id,
+ struct msm_gem_address_space *aspace,
struct drm_framebuffer *fb,
struct sde_hw_fmt_layout *layout)
{
@@ -969,9 +969,9 @@ int sde_format_populate_layout(
/* Populate the addresses given the fb */
if (SDE_FORMAT_IS_UBWC(layout->format) ||
SDE_FORMAT_IS_TILE(layout->format))
- ret = _sde_format_populate_addrs_ubwc(mmu_id, fb, layout);
+ ret = _sde_format_populate_addrs_ubwc(aspace, fb, layout);
else
- ret = _sde_format_populate_addrs_linear(mmu_id, fb, layout);
+ ret = _sde_format_populate_addrs_linear(aspace, fb, layout);
/* check if anything changed */
if (!ret && !memcmp(plane_addr, layout->plane_addr, sizeof(plane_addr)))
@@ -1013,14 +1013,14 @@ static void _sde_format_calc_offset_linear(struct sde_hw_fmt_layout *source,
}
int sde_format_populate_layout_with_roi(
- int mmu_id,
+ struct msm_gem_address_space *aspace,
struct drm_framebuffer *fb,
struct sde_rect *roi,
struct sde_hw_fmt_layout *layout)
{
int ret;
- ret = sde_format_populate_layout(mmu_id, fb, layout);
+ ret = sde_format_populate_layout(aspace, fb, layout);
if (ret || !roi)
return ret;
diff --git a/drivers/gpu/drm/msm/sde/sde_formats.h b/drivers/gpu/drm/msm/sde/sde_formats.h
index 40aab22..2333a72 100644
--- a/drivers/gpu/drm/msm/sde/sde_formats.h
+++ b/drivers/gpu/drm/msm/sde/sde_formats.h
@@ -14,6 +14,7 @@
#define _SDE_FORMATS_H
#include <drm/drm_fourcc.h>
+#include "msm_gem.h"
#include "sde_hw_mdss.h"
/**
@@ -103,7 +104,7 @@ int sde_format_check_modified_format(
/**
* sde_format_populate_layout - populate the given format layout based on
* mmu, fb, and format found in the fb
- * @mmu_id: mmu id handle
+ * @aspace: address space pointer
* @fb: framebuffer pointer
* @fmtl: format layout structure to populate
*
@@ -111,14 +112,14 @@ int sde_format_check_modified_format(
* are the same as before or 0 if new addresses were populated
*/
int sde_format_populate_layout(
- int mmu_id,
+ struct msm_gem_address_space *aspace,
struct drm_framebuffer *fb,
struct sde_hw_fmt_layout *fmtl);
/**
* sde_format_populate_layout_with_roi - populate the given format layout
* based on mmu, fb, roi, and format found in the fb
- * @mmu_id: mmu id handle
+ * @aspace: address space pointer
* @fb: framebuffer pointer
* @roi: region of interest (optional)
* @fmtl: format layout structure to populate
@@ -126,7 +127,7 @@ int sde_format_populate_layout(
* Return: error code on failure, 0 on success
*/
int sde_format_populate_layout_with_roi(
- int mmu_id,
+ struct msm_gem_address_space *aspace,
struct drm_framebuffer *fb,
struct sde_rect *roi,
struct sde_hw_fmt_layout *fmtl);
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.c b/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.c
index dbd435b..9bc9837 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.c
@@ -13,6 +13,7 @@
#include "sde_hw_ctl.h"
#include "sde_hw_reg_dma_v1.h"
#include "msm_drv.h"
+#include "msm_mmu.h"
#define GUARD_BYTES (BIT(8) - 1)
#define ALIGNED_OFFSET (U32_MAX & ~(GUARD_BYTES))
@@ -582,6 +583,7 @@ static struct sde_reg_dma_buffer *alloc_reg_dma_buf_v1(u32 size)
struct sde_reg_dma_buffer *dma_buf = NULL;
u32 iova_aligned, offset;
u32 rsize = size + GUARD_BYTES;
+ struct msm_gem_address_space *aspace = NULL;
int rc = 0;
if (!size || SIZE_DWORD(size) > MAX_DWORDS_SZ) {
@@ -602,7 +604,15 @@ static struct sde_reg_dma_buffer *alloc_reg_dma_buf_v1(u32 size)
goto fail;
}
- rc = msm_gem_get_iova(dma_buf->buf, 0, &dma_buf->iova);
+ aspace = msm_gem_smmu_address_space_get(reg_dma->drm_dev,
+ MSM_SMMU_DOMAIN_UNSECURE);
+ if (!aspace) {
+ DRM_ERROR("failed to get aspace\n");
+ rc = -EINVAL;
+ goto free_gem;
+ }
+
+ rc = msm_gem_get_iova(dma_buf->buf, aspace, &dma_buf->iova);
if (rc) {
DRM_ERROR("failed to get the iova rc %d\n", rc);
goto free_gem;
@@ -625,7 +635,7 @@ static struct sde_reg_dma_buffer *alloc_reg_dma_buf_v1(u32 size)
return dma_buf;
put_iova:
- msm_gem_put_iova(dma_buf->buf, 0);
+ msm_gem_put_iova(dma_buf->buf, aspace);
free_gem:
msm_gem_free_object(dma_buf->buf);
fail:
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index 78ea685..abb378d 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -1414,6 +1414,29 @@ static int sde_kms_atomic_check(struct msm_kms *kms,
return drm_atomic_helper_check(dev, state);
}
+static struct msm_gem_address_space*
+_sde_kms_get_address_space(struct msm_kms *kms,
+ unsigned int domain)
+{
+ struct sde_kms *sde_kms;
+
+ if (!kms) {
+ SDE_ERROR("invalid kms\n");
+ return NULL;
+ }
+
+ sde_kms = to_sde_kms(kms);
+ if (!sde_kms) {
+ SDE_ERROR("invalid sde_kms\n");
+ return NULL;
+ }
+
+ if (domain >= MSM_SMMU_DOMAIN_MAX)
+ return NULL;
+
+ return sde_kms->aspace[domain];
+}
+
static const struct msm_kms_funcs kms_funcs = {
.hw_init = sde_kms_hw_init,
.postinit = sde_kms_postinit,
@@ -1436,6 +1459,7 @@ static const struct msm_kms_funcs kms_funcs = {
.round_pixclk = sde_kms_round_pixclk,
.destroy = sde_kms_destroy,
.register_events = _sde_kms_register_events,
+ .get_address_space = _sde_kms_get_address_space,
};
/* the caller api needs to turn on clock before calling it */
@@ -1449,17 +1473,17 @@ static int _sde_kms_mmu_destroy(struct sde_kms *sde_kms)
struct msm_mmu *mmu;
int i;
- for (i = ARRAY_SIZE(sde_kms->mmu_id) - 1; i >= 0; i--) {
- mmu = sde_kms->aspace[i]->mmu;
-
- if (!mmu)
+ for (i = ARRAY_SIZE(sde_kms->aspace) - 1; i >= 0; i--) {
+ if (!sde_kms->aspace[i])
continue;
+ mmu = sde_kms->aspace[i]->mmu;
+
mmu->funcs->detach(mmu, (const char **)iommu_ports,
ARRAY_SIZE(iommu_ports));
msm_gem_address_space_destroy(sde_kms->aspace[i]);
- sde_kms->mmu_id[i] = 0;
+ sde_kms->aspace[i] = NULL;
}
return 0;
@@ -1499,17 +1523,6 @@ static int _sde_kms_mmu_init(struct sde_kms *sde_kms)
goto fail;
}
- sde_kms->mmu_id[i] = msm_register_address_space(sde_kms->dev,
- aspace);
- if (sde_kms->mmu_id[i] < 0) {
- ret = sde_kms->mmu_id[i];
- SDE_ERROR("failed to register sde iommu %d: %d\n",
- i, ret);
- mmu->funcs->detach(mmu, (const char **)iommu_ports,
- ARRAY_SIZE(iommu_ports));
- msm_gem_address_space_destroy(aspace);
- goto fail;
- }
}
return 0;
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.h b/drivers/gpu/drm/msm/sde/sde_kms.h
index 0c5c286..d818fdf 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.h
+++ b/drivers/gpu/drm/msm/sde/sde_kms.h
@@ -160,7 +160,6 @@ struct sde_kms {
struct sde_mdss_cfg *catalog;
struct msm_gem_address_space *aspace[MSM_SMMU_DOMAIN_MAX];
- int mmu_id[MSM_SMMU_DOMAIN_MAX];
struct sde_power_client *core_client;
struct ion_client *iclient;
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
index 2a98af4..581b26e 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.c
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -115,6 +115,7 @@ enum sde_plane_sclcheck_state {
/*
* struct sde_plane - local sde plane structure
+ * @aspace: address space pointer
* @csc_cfg: Decoded user configuration for csc
* @csc_usr_ptr: Points to csc_cfg if valid user config available
* @csc_ptr: Points to sde_csc_cfg structure to use for current
@@ -129,8 +130,6 @@ enum sde_plane_sclcheck_state {
struct sde_plane {
struct drm_plane base;
- int mmu_id;
-
struct mutex lock;
enum sde_sspp pipe;
@@ -867,12 +866,62 @@ int sde_plane_wait_input_fence(struct drm_plane *plane, uint32_t wait_ms)
return ret;
}
+/**
+ * _sde_plane_get_aspace: gets the address space based on the
+ * fb_translation mode property
+ */
+static int _sde_plane_get_aspace(
+ struct sde_plane *psde,
+ struct sde_plane_state *pstate,
+ struct msm_gem_address_space **aspace)
+{
+ struct sde_kms *kms;
+ int mode;
+
+ if (!psde || !pstate || !aspace) {
+ SDE_ERROR("invalid parameters\n");
+ return -EINVAL;
+ }
+
+ kms = _sde_plane_get_kms(&psde->base);
+ if (!kms) {
+ SDE_ERROR("invalid kms\n");
+ return -EINVAL;
+ }
+
+ mode = sde_plane_get_property(pstate,
+ PLANE_PROP_FB_TRANSLATION_MODE);
+
+ switch (mode) {
+ case SDE_DRM_FB_NON_SEC:
+ *aspace = kms->aspace[MSM_SMMU_DOMAIN_UNSECURE];
+ if (!aspace)
+ return -EINVAL;
+ break;
+ case SDE_DRM_FB_SEC:
+ *aspace = kms->aspace[MSM_SMMU_DOMAIN_SECURE];
+ if (!aspace)
+ return -EINVAL;
+ break;
+ case SDE_DRM_FB_SEC_DIR_TRANS:
+ case SDE_DRM_FB_NON_SEC_DIR_TRANS:
+ *aspace = NULL;
+ break;
+ default:
+ SDE_ERROR("invalid fb_translation mode:%d\n", mode);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
static inline void _sde_plane_set_scanout(struct drm_plane *plane,
struct sde_plane_state *pstate,
struct sde_hw_pipe_cfg *pipe_cfg,
struct drm_framebuffer *fb)
{
struct sde_plane *psde;
+ struct msm_gem_address_space *aspace = NULL;
int ret;
if (!plane || !pstate || !pipe_cfg || !fb) {
@@ -888,7 +937,13 @@ static inline void _sde_plane_set_scanout(struct drm_plane *plane,
return;
}
- ret = sde_format_populate_layout(psde->mmu_id, fb, &pipe_cfg->layout);
+ ret = _sde_plane_get_aspace(psde, pstate, &aspace);
+ if (ret) {
+ SDE_ERROR_PLANE(psde, "Failed to get aspace %d\n", ret);
+ return;
+ }
+
+ ret = sde_format_populate_layout(aspace, fb, &pipe_cfg->layout);
if (ret == -EAGAIN)
SDE_DEBUG_PLANE(psde, "not updating same src addrs\n");
else if (ret)
@@ -1741,6 +1796,7 @@ static int sde_plane_rot_submit_command(struct drm_plane *plane,
struct drm_crtc_state *cstate;
struct sde_crtc_state *sde_cstate;
int ret, i;
+ int fb_mode;
if (!plane || !state || !state->fb || !rstate->rot_hw) {
SDE_ERROR("invalid parameters\n");
@@ -1764,7 +1820,14 @@ static int sde_plane_rot_submit_command(struct drm_plane *plane,
rot_cmd->rot90 = rstate->rot90;
rot_cmd->hflip = rstate->hflip;
rot_cmd->vflip = rstate->vflip;
- rot_cmd->secure = state->fb->flags & DRM_MODE_FB_SECURE ? true : false;
+ fb_mode = sde_plane_get_property(pstate,
+ PLANE_PROP_FB_TRANSLATION_MODE);
+ if ((fb_mode == SDE_DRM_FB_SEC) ||
+ (fb_mode == SDE_DRM_FB_SEC_DIR_TRANS))
+ rot_cmd->secure = true;
+ else
+ rot_cmd->secure = false;
+
rot_cmd->prefill_bw = sde_crtc_get_property(sde_cstate,
CRTC_PROP_ROT_PREFILL_BW);
rot_cmd->clkrate = sde_crtc_get_property(sde_cstate,
@@ -1801,7 +1864,7 @@ static int sde_plane_rot_submit_command(struct drm_plane *plane,
struct sde_hw_fmt_layout layout;
memset(&layout, 0, sizeof(struct sde_hw_fmt_layout));
- sde_format_populate_layout(rstate->mmu_id, state->fb,
+ sde_format_populate_layout(rstate->aspace, state->fb,
&layout);
for (i = 0; i < ARRAY_SIZE(rot_cmd->src_iova); i++) {
rot_cmd->src_iova[i] = layout.plane_addr[i];
@@ -1810,7 +1873,7 @@ static int sde_plane_rot_submit_command(struct drm_plane *plane,
rot_cmd->src_planes = layout.num_planes;
memset(&layout, 0, sizeof(struct sde_hw_fmt_layout));
- sde_format_populate_layout(rstate->mmu_id, rstate->out_fb,
+ sde_format_populate_layout(rstate->aspace, rstate->out_fb,
&layout);
for (i = 0; i < ARRAY_SIZE(rot_cmd->dst_iova); i++) {
rot_cmd->dst_iova[i] = layout.plane_addr[i];
@@ -2003,11 +2066,7 @@ static int sde_plane_rot_prepare_fb(struct drm_plane *plane,
SDE_DEBUG("plane%d.%d allocate fb/fbo\n", plane->base.id,
new_rstate->sequence_id);
-
- if (new_state->fb->flags & DRM_MODE_FB_SECURE)
- new_rstate->mmu_id = MSM_SMMU_DOMAIN_SECURE;
- else
- new_rstate->mmu_id = MSM_SMMU_DOMAIN_UNSECURE;
+ new_rstate->aspace = new_pstate->aspace;
/* check if out_fb is already attached to rotator */
new_rstate->out_fbo = sde_kms_fbo_alloc(plane->dev, fb_w, fb_h,
@@ -2046,7 +2105,7 @@ static int sde_plane_rot_prepare_fb(struct drm_plane *plane,
}
/* prepare rotator input buffer */
- ret = msm_framebuffer_prepare(new_state->fb, new_rstate->mmu_id);
+ ret = msm_framebuffer_prepare(new_state->fb, new_rstate->aspace);
if (ret) {
SDE_ERROR("failed to prepare input framebuffer\n");
goto error_prepare_input_buffer;
@@ -2058,7 +2117,7 @@ static int sde_plane_rot_prepare_fb(struct drm_plane *plane,
new_rstate->sequence_id);
ret = msm_framebuffer_prepare(new_rstate->out_fb,
- new_rstate->mmu_id);
+ new_rstate->aspace);
if (ret) {
SDE_ERROR("failed to prepare inline framebuffer\n");
goto error_prepare_output_buffer;
@@ -2068,7 +2127,7 @@ static int sde_plane_rot_prepare_fb(struct drm_plane *plane,
return 0;
error_prepare_output_buffer:
- msm_framebuffer_cleanup(new_state->fb, new_rstate->mmu_id);
+ msm_framebuffer_cleanup(new_state->fb, new_rstate->aspace);
error_prepare_input_buffer:
sde_crtc_res_put(cstate, SDE_CRTC_RES_ROT_OUT_FB,
(u64) &new_rstate->rot_hw->base);
@@ -2124,7 +2183,7 @@ static void sde_plane_rot_cleanup_fb(struct drm_plane *plane,
if (sde_plane_enabled(old_state)) {
if (old_rstate->out_fb) {
msm_framebuffer_cleanup(old_rstate->out_fb,
- old_rstate->mmu_id);
+ old_rstate->aspace);
sde_crtc_res_put(cstate, SDE_CRTC_RES_ROT_OUT_FB,
(u64) &old_rstate->rot_hw->base);
old_rstate->out_fb = NULL;
@@ -2133,7 +2192,7 @@ static void sde_plane_rot_cleanup_fb(struct drm_plane *plane,
old_rstate->out_fbo = NULL;
}
- msm_framebuffer_cleanup(old_state->fb, old_rstate->mmu_id);
+ msm_framebuffer_cleanup(old_state->fb, old_rstate->aspace);
}
}
@@ -2640,8 +2699,10 @@ static int sde_plane_prepare_fb(struct drm_plane *plane,
{
struct drm_framebuffer *fb = new_state->fb;
struct sde_plane *psde = to_sde_plane(plane);
+ struct sde_plane_state *pstate = to_sde_plane_state(new_state);
struct sde_plane_rot_state *new_rstate;
struct sde_hw_fmt_layout layout;
+ struct msm_gem_address_space *aspace;
int ret;
if (!new_state->fb)
@@ -2649,6 +2710,14 @@ static int sde_plane_prepare_fb(struct drm_plane *plane,
SDE_DEBUG_PLANE(psde, "FB[%u]\n", fb->base.id);
+ ret = _sde_plane_get_aspace(psde, pstate, &aspace);
+ if (ret) {
+ SDE_ERROR_PLANE(psde, "Failed to get aspace\n");
+ return ret;
+ }
+
+ /*cache aspace */
+ pstate->aspace = aspace;
ret = sde_plane_rot_prepare_fb(plane, new_state);
if (ret) {
SDE_ERROR("failed to prepare rot framebuffer\n");
@@ -2657,14 +2726,14 @@ static int sde_plane_prepare_fb(struct drm_plane *plane,
new_rstate = &to_sde_plane_state(new_state)->rot;
- ret = msm_framebuffer_prepare(new_rstate->out_fb, new_rstate->mmu_id);
+ ret = msm_framebuffer_prepare(new_rstate->out_fb, pstate->aspace);
if (ret) {
SDE_ERROR("failed to prepare framebuffer\n");
return ret;
}
/* validate framebuffer layout before commit */
- ret = sde_format_populate_layout(new_rstate->mmu_id,
+ ret = sde_format_populate_layout(pstate->aspace,
new_rstate->out_fb, &layout);
if (ret) {
SDE_ERROR_PLANE(psde, "failed to get format layout, %d\n", ret);
@@ -2687,7 +2756,7 @@ static void sde_plane_cleanup_fb(struct drm_plane *plane,
old_rstate = &to_sde_plane_state(old_state)->rot;
- msm_framebuffer_cleanup(old_rstate->out_fb, old_rstate->mmu_id);
+ msm_framebuffer_cleanup(old_rstate->out_fb, old_rstate->aspace);
sde_plane_rot_cleanup_fb(plane, old_state);
}
@@ -3134,7 +3203,7 @@ void sde_plane_flush(struct drm_plane *plane)
static int sde_plane_sspp_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
- uint32_t nplanes, src_flags;
+ uint32_t nplanes, src_flags = 0x0;
struct sde_plane *psde;
struct drm_plane_state *state;
struct sde_plane_state *pstate;
@@ -3147,6 +3216,7 @@ static int sde_plane_sspp_atomic_update(struct drm_plane *plane,
const struct sde_rect *crtc_roi;
bool q16_data = true;
int idx;
+ int mode;
if (!plane) {
SDE_ERROR("invalid plane\n");
@@ -3228,6 +3298,9 @@ static int sde_plane_sspp_atomic_update(struct drm_plane *plane,
case PLANE_PROP_BLEND_OP:
/* no special action required */
break;
+ case PLANE_PROP_FB_TRANSLATION_MODE:
+ pstate->dirty |= SDE_PLANE_DIRTY_FB_TRANSLATION_MODE;
+ break;
case PLANE_PROP_PREFILL_SIZE:
case PLANE_PROP_PREFILL_TIME:
pstate->dirty |= SDE_PLANE_DIRTY_PERF;
@@ -3275,6 +3348,12 @@ static int sde_plane_sspp_atomic_update(struct drm_plane *plane,
psde->is_rt_pipe = (sde_crtc_get_client_type(crtc) != NRT_CLIENT);
_sde_plane_set_qos_ctrl(plane, false, SDE_PLANE_QOS_PANIC_CTRL);
+ /* update secure session flag */
+ mode = sde_plane_get_property(pstate, PLANE_PROP_FB_TRANSLATION_MODE);
+ if ((mode == SDE_DRM_FB_SEC) ||
+ (mode == SDE_DRM_FB_SEC_DIR_TRANS))
+ src_flags |= SDE_SSPP_SECURE_OVERLAY_SESSION;
+
/* update roi config */
if (pstate->dirty & SDE_PLANE_DIRTY_RECTS) {
POPULATE_RECT(&src, rstate->out_src_x, rstate->out_src_y,
@@ -3352,9 +3431,9 @@ static int sde_plane_sspp_atomic_update(struct drm_plane *plane,
pstate->multirect_mode);
}
- if ((pstate->dirty & SDE_PLANE_DIRTY_FORMAT) &&
+ if (((pstate->dirty & SDE_PLANE_DIRTY_FORMAT) ||
+ (src_flags & SDE_SSPP_SECURE_OVERLAY_SESSION)) &&
psde->pipe_hw->ops.setup_format) {
- src_flags = 0x0;
SDE_DEBUG_PLANE(psde, "rotation 0x%X\n", rstate->out_rotation);
if (rstate->out_rotation & DRM_REFLECT_X)
src_flags |= SDE_SSPP_FLIP_LR;
@@ -3504,6 +3583,12 @@ static void _sde_plane_install_properties(struct drm_plane *plane,
static const struct drm_prop_enum_list e_src_config[] = {
{SDE_DRM_DEINTERLACE, "deinterlace"}
};
+ static const struct drm_prop_enum_list e_fb_translation_mode[] = {
+ {SDE_DRM_FB_NON_SEC, "non_sec"},
+ {SDE_DRM_FB_SEC, "sec"},
+ {SDE_DRM_FB_NON_SEC_DIR_TRANS, "non_sec_direct_translation"},
+ {SDE_DRM_FB_SEC_DIR_TRANS, "sec_direct_translation"},
+ };
const struct sde_format_extended *format_list;
struct sde_format_extended *virt_format_list = NULL;
struct sde_kms_info *info;
@@ -3722,6 +3807,12 @@ static void _sde_plane_install_properties(struct drm_plane *plane,
msm_property_install_blob(&psde->property_info, feature_name, 0,
PLANE_PROP_FOLIAGE_COLOR);
}
+
+ msm_property_install_enum(&psde->property_info, "fb_translation_mode",
+ 0x0,
+ 0, e_fb_translation_mode,
+ ARRAY_SIZE(e_fb_translation_mode),
+ PLANE_PROP_FB_TRANSLATION_MODE);
}
static inline void _sde_plane_set_csc_v1(struct sde_plane *psde, void *usr_ptr)
@@ -4481,7 +4572,6 @@ struct drm_plane *sde_plane_init(struct drm_device *dev,
/* cache local stuff for later */
plane = &psde->base;
psde->pipe = pipe;
- psde->mmu_id = kms->mmu_id[MSM_SMMU_DOMAIN_UNSECURE];
psde->is_virtual = (master_plane_id != 0);
psde->scaler_check_state = SDE_PLANE_SCLCHECK_NONE;
INIT_LIST_HEAD(&psde->mplane_list);
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.h b/drivers/gpu/drm/msm/sde/sde_plane.h
index f83a891..ccbf005 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.h
+++ b/drivers/gpu/drm/msm/sde/sde_plane.h
@@ -34,7 +34,7 @@
* @rot90: true if rotation of 90 degree is required
* @hflip: true if horizontal flip is required
* @vflip: true if vertical flip is required
- * @mmu_id: iommu identifier for input/output buffers
+ * @aspace: pointer address space for input/output buffers
* @rot_cmd: rotator configuration command
* @nplane: total number of drm plane attached to rotator
* @in_fb: input fb attached to rotator
@@ -64,7 +64,7 @@ struct sde_plane_rot_state {
bool rot90;
bool hflip;
bool vflip;
- u32 mmu_id;
+ struct msm_gem_address_space *aspace;
struct sde_hw_rot_cmd rot_cmd;
int nplane;
/* input */
@@ -96,6 +96,7 @@ struct sde_plane_rot_state {
#define SDE_PLANE_DIRTY_FORMAT 0x2
#define SDE_PLANE_DIRTY_SHARPEN 0x4
#define SDE_PLANE_DIRTY_PERF 0x8
+#define SDE_PLANE_DIRTY_FB_TRANSLATION_MODE 0x10
#define SDE_PLANE_DIRTY_ALL 0xFFFFFFFF
/**
@@ -103,6 +104,7 @@ struct sde_plane_rot_state {
* @base: base drm plane state object
* @property_values: cached plane property values
* @property_blobs: blob properties
+ * @aspace: pointer to address space for input/output buffers
* @input_fence: dereferenced input fence pointer
* @stage: assigned by crtc blender
* @excl_rect: exclusion rect values
@@ -116,6 +118,7 @@ struct sde_plane_state {
struct drm_plane_state base;
uint64_t property_values[PLANE_PROP_COUNT];
struct drm_property_blob *property_blobs[PLANE_PROP_BLOBCOUNT];
+ struct msm_gem_address_space *aspace;
void *input_fence;
enum sde_stage stage;
struct sde_rect excl_rect;
diff --git a/drivers/gpu/drm/msm/sde_edid_parser.c b/drivers/gpu/drm/msm/sde_edid_parser.c
index 12165e8..db36069 100644
--- a/drivers/gpu/drm/msm/sde_edid_parser.c
+++ b/drivers/gpu/drm/msm/sde_edid_parser.c
@@ -484,6 +484,40 @@ int _sde_edid_update_modes(struct drm_connector *connector,
return rc;
}
+u32 sde_get_sink_bpc(void *input)
+{
+ struct sde_edid_ctrl *edid_ctrl = (struct sde_edid_ctrl *)(input);
+ struct edid *edid = edid_ctrl->edid;
+
+ if ((edid->revision < 3) || !(edid->input & DRM_EDID_INPUT_DIGITAL))
+ return 0;
+
+ if (edid->revision < 4) {
+ if (edid->input & DRM_EDID_DIGITAL_TYPE_DVI)
+ return 8;
+ else
+ return 0;
+ }
+
+ switch (edid->input & DRM_EDID_DIGITAL_DEPTH_MASK) {
+ case DRM_EDID_DIGITAL_DEPTH_6:
+ return 6;
+ case DRM_EDID_DIGITAL_DEPTH_8:
+ return 8;
+ case DRM_EDID_DIGITAL_DEPTH_10:
+ return 10;
+ case DRM_EDID_DIGITAL_DEPTH_12:
+ return 12;
+ case DRM_EDID_DIGITAL_DEPTH_14:
+ return 14;
+ case DRM_EDID_DIGITAL_DEPTH_16:
+ return 16;
+ case DRM_EDID_DIGITAL_DEPTH_UNDEF:
+ default:
+ return 0;
+ }
+}
+
bool sde_detect_hdmi_monitor(void *input)
{
struct sde_edid_ctrl *edid_ctrl = (struct sde_edid_ctrl *)(input);
diff --git a/drivers/gpu/drm/msm/sde_edid_parser.h b/drivers/gpu/drm/msm/sde_edid_parser.h
index 1143dc2..eb68439 100644
--- a/drivers/gpu/drm/msm/sde_edid_parser.h
+++ b/drivers/gpu/drm/msm/sde_edid_parser.h
@@ -136,6 +136,14 @@ void sde_free_edid(void **edid_ctrl);
bool sde_detect_hdmi_monitor(void *edid_ctrl);
/**
+ * sde_get_sink_bpc() - return the bpc of sink device.
+ * @edid_ctrl: Handle to the edid_ctrl structure.
+ *
+ * Return: bpc supported by the sink.
+ */
+u32 sde_get_sink_bpc(void *edid_ctrl);
+
+/**
* _sde_edid_update_modes() - populate EDID modes.
* @edid_ctrl: Handle to the edid_ctrl structure.
*
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index afbf557..2c2b86d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -24,6 +24,7 @@
*
*/
+#include <acpi/video.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
@@ -358,6 +359,57 @@ static struct nouveau_drm_prop_enum_list dither_depth[] = {
} \
} while(0)
+static void
+nouveau_display_hpd_work(struct work_struct *work)
+{
+ struct nouveau_drm *drm = container_of(work, typeof(*drm), hpd_work);
+
+ pm_runtime_get_sync(drm->dev->dev);
+
+ drm_helper_hpd_irq_event(drm->dev);
+ /* enable polling for external displays */
+ drm_kms_helper_poll_enable(drm->dev);
+
+ pm_runtime_mark_last_busy(drm->dev->dev);
+ pm_runtime_put_sync(drm->dev->dev);
+}
+
+#ifdef CONFIG_ACPI
+
+/*
+ * Hans de Goede: This define belongs in acpi/video.h, I've submitted a patch
+ * to the acpi subsys to move it there from drivers/acpi/acpi_video.c .
+ * This should be dropped once that is merged.
+ */
+#ifndef ACPI_VIDEO_NOTIFY_PROBE
+#define ACPI_VIDEO_NOTIFY_PROBE 0x81
+#endif
+
+static int
+nouveau_display_acpi_ntfy(struct notifier_block *nb, unsigned long val,
+ void *data)
+{
+ struct nouveau_drm *drm = container_of(nb, typeof(*drm), acpi_nb);
+ struct acpi_bus_event *info = data;
+
+ if (!strcmp(info->device_class, ACPI_VIDEO_CLASS)) {
+ if (info->type == ACPI_VIDEO_NOTIFY_PROBE) {
+ /*
+ * This may be the only indication we receive of a
+ * connector hotplug on a runtime suspended GPU,
+ * schedule hpd_work to check.
+ */
+ schedule_work(&drm->hpd_work);
+
+ /* acpi-video should not generate keypresses for this */
+ return NOTIFY_BAD;
+ }
+ }
+
+ return NOTIFY_DONE;
+}
+#endif
+
int
nouveau_display_init(struct drm_device *dev)
{
@@ -370,9 +422,6 @@ nouveau_display_init(struct drm_device *dev)
if (ret)
return ret;
- /* enable polling for external displays */
- drm_kms_helper_poll_enable(dev);
-
/* enable hotplug interrupts */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct nouveau_connector *conn = nouveau_connector(connector);
@@ -537,6 +586,12 @@ nouveau_display_create(struct drm_device *dev)
}
nouveau_backlight_init(dev);
+ INIT_WORK(&drm->hpd_work, nouveau_display_hpd_work);
+#ifdef CONFIG_ACPI
+ drm->acpi_nb.notifier_call = nouveau_display_acpi_ntfy;
+ register_acpi_notifier(&drm->acpi_nb);
+#endif
+
return 0;
vblank_err:
@@ -552,6 +607,9 @@ nouveau_display_destroy(struct drm_device *dev)
{
struct nouveau_display *disp = nouveau_display(dev);
+#ifdef CONFIG_ACPI
+ unregister_acpi_notifier(&nouveau_drm(dev)->acpi_nb);
+#endif
nouveau_backlight_exit(dev);
nouveau_display_vblank_fini(dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 3100fd88..42829a9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -483,6 +483,9 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
pm_runtime_allow(dev->dev);
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put(dev->dev);
+ } else {
+ /* enable polling for external displays */
+ drm_kms_helper_poll_enable(dev);
}
return 0;
@@ -761,7 +764,7 @@ nouveau_pmops_runtime_resume(struct device *dev)
pci_set_master(pdev);
ret = nouveau_do_resume(drm_dev, true);
- drm_kms_helper_poll_enable(drm_dev);
+
/* do magic */
nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25));
vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 822a021..1e7f1e3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -37,6 +37,8 @@
* - implemented limited ABI16/NVIF interop
*/
+#include <linux/notifier.h>
+
#include <nvif/client.h>
#include <nvif/device.h>
#include <nvif/ioctl.h>
@@ -161,6 +163,12 @@ struct nouveau_drm {
struct nvbios vbios;
struct nouveau_display *display;
struct backlight_device *backlight;
+ struct work_struct hpd_work;
+ struct work_struct fbcon_work;
+ int fbcon_new_state;
+#ifdef CONFIG_ACPI
+ struct notifier_block acpi_nb;
+#endif
/* power management */
struct nouveau_hwmon *hwmon;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 9f56927..2b79e27 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -491,19 +491,43 @@ static const struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
.fb_probe = nouveau_fbcon_create,
};
+static void
+nouveau_fbcon_set_suspend_work(struct work_struct *work)
+{
+ struct nouveau_drm *drm = container_of(work, typeof(*drm), fbcon_work);
+ int state = READ_ONCE(drm->fbcon_new_state);
+
+ if (state == FBINFO_STATE_RUNNING)
+ pm_runtime_get_sync(drm->dev->dev);
+
+ console_lock();
+ if (state == FBINFO_STATE_RUNNING)
+ nouveau_fbcon_accel_restore(drm->dev);
+ drm_fb_helper_set_suspend(&drm->fbcon->helper, state);
+ if (state != FBINFO_STATE_RUNNING)
+ nouveau_fbcon_accel_save_disable(drm->dev);
+ console_unlock();
+
+ if (state == FBINFO_STATE_RUNNING) {
+ pm_runtime_mark_last_busy(drm->dev->dev);
+ pm_runtime_put_sync(drm->dev->dev);
+ }
+}
+
void
nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- if (drm->fbcon) {
- console_lock();
- if (state == FBINFO_STATE_RUNNING)
- nouveau_fbcon_accel_restore(dev);
- drm_fb_helper_set_suspend(&drm->fbcon->helper, state);
- if (state != FBINFO_STATE_RUNNING)
- nouveau_fbcon_accel_save_disable(dev);
- console_unlock();
- }
+
+ if (!drm->fbcon)
+ return;
+
+ drm->fbcon_new_state = state;
+ /* Since runtime resume can happen as a result of a sysfs operation,
+ * it's possible we already have the console locked. So handle fbcon
+ * init/deinit from a seperate work thread
+ */
+ schedule_work(&drm->fbcon_work);
}
int
@@ -524,6 +548,7 @@ nouveau_fbcon_init(struct drm_device *dev)
fbcon->dev = dev;
drm->fbcon = fbcon;
+ INIT_WORK(&drm->fbcon_work, nouveau_fbcon_set_suspend_work);
drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index 64c4ce7..75e1f09 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -100,6 +100,7 @@ struct nv84_fence_priv {
struct nouveau_bo *bo;
struct nouveau_bo *bo_gart;
u32 *suspend;
+ struct mutex mutex;
};
u64 nv84_fence_crtc(struct nouveau_channel *, int);
diff --git a/drivers/gpu/drm/nouveau/nouveau_usif.c b/drivers/gpu/drm/nouveau/nouveau_usif.c
index 08f9c6f..1fba386 100644
--- a/drivers/gpu/drm/nouveau/nouveau_usif.c
+++ b/drivers/gpu/drm/nouveau/nouveau_usif.c
@@ -313,7 +313,8 @@ usif_ioctl(struct drm_file *filp, void __user *user, u32 argc)
if (!(ret = nvif_unpack(-ENOSYS, &data, &size, argv->v0, 0, 0, true))) {
/* block access to objects not created via this interface */
owner = argv->v0.owner;
- if (argv->v0.object == 0ULL)
+ if (argv->v0.object == 0ULL &&
+ argv->v0.type != NVIF_IOCTL_V0_DEL)
argv->v0.owner = NVDRM_OBJECT_ANY; /* except client */
else
argv->v0.owner = NVDRM_OBJECT_USIF;
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index 18bde9d..90a5dd6 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -121,8 +121,10 @@ nv84_fence_context_del(struct nouveau_channel *chan)
}
nouveau_bo_wr32(priv->bo, chan->chid * 16 / 4, fctx->base.sequence);
+ mutex_lock(&priv->mutex);
nouveau_bo_vma_del(priv->bo, &fctx->vma_gart);
nouveau_bo_vma_del(priv->bo, &fctx->vma);
+ mutex_unlock(&priv->mutex);
nouveau_fence_context_del(&fctx->base);
chan->fence = NULL;
nouveau_fence_context_free(&fctx->base);
@@ -148,11 +150,13 @@ nv84_fence_context_new(struct nouveau_channel *chan)
fctx->base.sync32 = nv84_fence_sync32;
fctx->base.sequence = nv84_fence_read(chan);
+ mutex_lock(&priv->mutex);
ret = nouveau_bo_vma_add(priv->bo, cli->vm, &fctx->vma);
if (ret == 0) {
ret = nouveau_bo_vma_add(priv->bo_gart, cli->vm,
&fctx->vma_gart);
}
+ mutex_unlock(&priv->mutex);
/* map display semaphore buffers into channel's vm */
for (i = 0; !ret && i < chan->drm->dev->mode_config.num_crtc; i++) {
@@ -232,6 +236,8 @@ nv84_fence_create(struct nouveau_drm *drm)
priv->base.context_base = fence_context_alloc(priv->base.contexts);
priv->base.uevent = true;
+ mutex_init(&priv->mutex);
+
/* Use VRAM if there is any ; otherwise fallback to system memory */
domain = drm->device.info.ram_size != 0 ? TTM_PL_FLAG_VRAM :
/*
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 432480f..3178ba0 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -3393,6 +3393,13 @@ void radeon_combios_asic_init(struct drm_device *dev)
rdev->pdev->subsystem_vendor == 0x103c &&
rdev->pdev->subsystem_device == 0x280a)
return;
+ /* quirk for rs4xx Toshiba Sattellite L20-183 latop to make it resume
+ * - it hangs on resume inside the dynclk 1 table.
+ */
+ if (rdev->family == CHIP_RS400 &&
+ rdev->pdev->subsystem_vendor == 0x1179 &&
+ rdev->pdev->subsystem_device == 0xff31)
+ return;
/* DYN CLK 1 */
table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 621af06..3b21ca5 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -136,6 +136,10 @@ static struct radeon_px_quirk radeon_px_quirk_list[] = {
* https://bugzilla.kernel.org/show_bug.cgi?id=51381
*/
{ PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
+ /* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
+ * https://bugs.freedesktop.org/show_bug.cgi?id=101491
+ */
+ { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
/* macbook pro 8.2 */
{ PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP },
{ 0, 0, 0, 0, 0 },
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index 3f6704c..ec9023b 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -313,6 +313,14 @@ void vc4_free_object(struct drm_gem_object *gem_bo)
goto out;
}
+ /* If this object was partially constructed but CMA allocation
+ * had failed, just free it.
+ */
+ if (!bo->base.vaddr) {
+ vc4_bo_destroy(bo);
+ goto out;
+ }
+
cache_list = vc4_get_cache_list_for_size(dev, gem_bo->size);
if (!cache_list) {
vc4_bo_destroy(bo);
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index 73c0d71..c02046a 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -34,6 +34,8 @@
#include "kgsl_trace.h"
#include "kgsl_pwrctrl.h"
+#define CP_APERTURE_REG 0
+
#define _IOMMU_PRIV(_mmu) (&((_mmu)->priv.iommu))
#define ADDR_IN_GLOBAL(_a) \
@@ -1220,6 +1222,19 @@ void _enable_gpuhtw_llc(struct kgsl_mmu *mmu, struct kgsl_iommu_pt *iommu_pt)
"System cache not enabled for GPU pagetable walks: %d\n", ret);
}
+static int program_smmu_aperture(unsigned int cb, unsigned int aperture_reg)
+{
+ struct scm_desc desc = {0};
+
+ desc.args[0] = 0xFFFF0000 | ((aperture_reg & 0xff) << 8) | (cb & 0xff);
+ desc.args[1] = 0xFFFFFFFF;
+ desc.args[2] = 0xFFFFFFFF;
+ desc.args[3] = 0xFFFFFFFF;
+ desc.arginfo = SCM_ARGS(4);
+
+ return scm_call2(SCM_SIP_FNID(SCM_SVC_MP, 0x1B), &desc);
+}
+
static int _init_global_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
{
int ret = 0;
@@ -1260,6 +1275,15 @@ static int _init_global_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
goto done;
}
+ if (!MMU_FEATURE(mmu, KGSL_MMU_GLOBAL_PAGETABLE)) {
+ ret = program_smmu_aperture(cb_num, CP_APERTURE_REG);
+ if (ret) {
+ pr_err("SMMU aperture programming call failed with error %d\n",
+ ret);
+ return ret;
+ }
+ }
+
ctx->cb_num = cb_num;
ctx->regbase = iommu->regbase + KGSL_IOMMU_CB0_OFFSET
+ (cb_num << KGSL_IOMMU_CB_SHIFT);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index da93077..cfca43f 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -311,6 +311,9 @@
#define USB_VENDOR_ID_DELCOM 0x0fc5
#define USB_DEVICE_ID_DELCOM_VISUAL_IND 0xb080
+#define USB_VENDOR_ID_DELL 0x413c
+#define USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE 0x301a
+
#define USB_VENDOR_ID_DELORME 0x1163
#define USB_DEVICE_ID_DELORME_EARTHMATE 0x0100
#define USB_DEVICE_ID_DELORME_EM_LT20 0x0200
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 97dbb25..2b16207 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -81,6 +81,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_STRAFE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3, HID_QUIRK_MULTI_INPUT },
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index e34d82e..c21ca7b 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -58,7 +58,7 @@
#define SMBSLVDAT (0xC + piix4_smba)
/* count for request_region */
-#define SMBIOSIZE 8
+#define SMBIOSIZE 9
/* PCI Address Constants */
#define SMBBA 0x090
@@ -592,6 +592,8 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
u8 port;
int retval;
+ mutex_lock(&piix4_mutex_sb800);
+
/* Request the SMBUS semaphore, avoid conflicts with the IMC */
smbslvcnt = inb_p(SMBSLVCNT);
do {
@@ -605,10 +607,10 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
usleep_range(1000, 2000);
} while (--retries);
/* SMBus is still owned by the IMC, we give up */
- if (!retries)
+ if (!retries) {
+ mutex_unlock(&piix4_mutex_sb800);
return -EBUSY;
-
- mutex_lock(&piix4_mutex_sb800);
+ }
outb_p(piix4_port_sel_sb800, SB800_PIIX4_SMB_IDX);
smba_en_lo = inb_p(SB800_PIIX4_SMB_IDX + 1);
@@ -623,11 +625,11 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
outb_p(smba_en_lo, SB800_PIIX4_SMB_IDX + 1);
- mutex_unlock(&piix4_mutex_sb800);
-
/* Release the semaphore */
outb_p(smbslvcnt | 0x20, SMBSLVCNT);
+ mutex_unlock(&piix4_mutex_sb800);
+
return retval;
}
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index 2de1f52..62b0dec 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -484,7 +484,7 @@ static int tiadc_probe(struct platform_device *pdev)
return -EINVAL;
}
- indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*indio_dev));
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*adc_dev));
if (indio_dev == NULL) {
dev_err(&pdev->dev, "failed to allocate iio device\n");
return -ENOMEM;
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index b9fcbf1..5faea37 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -41,6 +41,7 @@ static const int accel_scale[] = {598, 1196, 2392, 4785};
static const struct inv_mpu6050_reg_map reg_set_6500 = {
.sample_rate_div = INV_MPU6050_REG_SAMPLE_RATE_DIV,
.lpf = INV_MPU6050_REG_CONFIG,
+ .accel_lpf = INV_MPU6500_REG_ACCEL_CONFIG_2,
.user_ctrl = INV_MPU6050_REG_USER_CTRL,
.fifo_en = INV_MPU6050_REG_FIFO_EN,
.gyro_config = INV_MPU6050_REG_GYRO_CONFIG,
@@ -205,6 +206,37 @@ int inv_mpu6050_set_power_itg(struct inv_mpu6050_state *st, bool power_on)
EXPORT_SYMBOL_GPL(inv_mpu6050_set_power_itg);
/**
+ * inv_mpu6050_set_lpf_regs() - set low pass filter registers, chip dependent
+ *
+ * MPU60xx/MPU9150 use only 1 register for accelerometer + gyroscope
+ * MPU6500 and above have a dedicated register for accelerometer
+ */
+static int inv_mpu6050_set_lpf_regs(struct inv_mpu6050_state *st,
+ enum inv_mpu6050_filter_e val)
+{
+ int result;
+
+ result = regmap_write(st->map, st->reg->lpf, val);
+ if (result)
+ return result;
+
+ switch (st->chip_type) {
+ case INV_MPU6050:
+ case INV_MPU6000:
+ case INV_MPU9150:
+ /* old chips, nothing to do */
+ result = 0;
+ break;
+ default:
+ /* set accel lpf */
+ result = regmap_write(st->map, st->reg->accel_lpf, val);
+ break;
+ }
+
+ return result;
+}
+
+/**
* inv_mpu6050_init_config() - Initialize hardware, disable FIFO.
*
* Initial configuration:
@@ -227,8 +259,7 @@ static int inv_mpu6050_init_config(struct iio_dev *indio_dev)
if (result)
return result;
- d = INV_MPU6050_FILTER_20HZ;
- result = regmap_write(st->map, st->reg->lpf, d);
+ result = inv_mpu6050_set_lpf_regs(st, INV_MPU6050_FILTER_20HZ);
if (result)
return result;
@@ -531,6 +562,8 @@ static int inv_mpu6050_write_raw(struct iio_dev *indio_dev,
* would be alising. This function basically search for the
* correct low pass parameters based on the fifo rate, e.g,
* sampling frequency.
+ *
+ * lpf is set automatically when setting sampling rate to avoid any aliases.
*/
static int inv_mpu6050_set_lpf(struct inv_mpu6050_state *st, int rate)
{
@@ -546,7 +579,7 @@ static int inv_mpu6050_set_lpf(struct inv_mpu6050_state *st, int rate)
while ((h < hz[i]) && (i < ARRAY_SIZE(d) - 1))
i++;
data = d[i];
- result = regmap_write(st->map, st->reg->lpf, data);
+ result = inv_mpu6050_set_lpf_regs(st, data);
if (result)
return result;
st->chip_config.lpf = data;
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
index f0e8c5d..d851581 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
@@ -28,6 +28,7 @@
* struct inv_mpu6050_reg_map - Notable registers.
* @sample_rate_div: Divider applied to gyro output rate.
* @lpf: Configures internal low pass filter.
+ * @accel_lpf: Configures accelerometer low pass filter.
* @user_ctrl: Enables/resets the FIFO.
* @fifo_en: Determines which data will appear in FIFO.
* @gyro_config: gyro config register.
@@ -47,6 +48,7 @@
struct inv_mpu6050_reg_map {
u8 sample_rate_div;
u8 lpf;
+ u8 accel_lpf;
u8 user_ctrl;
u8 fifo_en;
u8 gyro_config;
@@ -187,6 +189,7 @@ struct inv_mpu6050_state {
#define INV_MPU6050_FIFO_THRESHOLD 500
/* mpu6500 registers */
+#define INV_MPU6500_REG_ACCEL_CONFIG_2 0x1D
#define INV_MPU6500_REG_ACCEL_OFFSET 0x77
/* delay time in milliseconds */
diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
index 55df9a7..44e46c1 100644
--- a/drivers/iio/pressure/st_pressure_core.c
+++ b/drivers/iio/pressure/st_pressure_core.c
@@ -227,7 +227,7 @@ static const struct iio_chan_spec st_press_1_channels[] = {
.address = ST_PRESS_1_OUT_XL_ADDR,
.scan_index = 0,
.scan_type = {
- .sign = 'u',
+ .sign = 's',
.realbits = 24,
.storagebits = 32,
.endianness = IIO_LE,
@@ -240,7 +240,7 @@ static const struct iio_chan_spec st_press_1_channels[] = {
.address = ST_TEMP_1_OUT_L_ADDR,
.scan_index = 1,
.scan_type = {
- .sign = 'u',
+ .sign = 's',
.realbits = 16,
.storagebits = 16,
.endianness = IIO_LE,
@@ -259,7 +259,7 @@ static const struct iio_chan_spec st_press_lps001wp_channels[] = {
.address = ST_PRESS_LPS001WP_OUT_L_ADDR,
.scan_index = 0,
.scan_type = {
- .sign = 'u',
+ .sign = 's',
.realbits = 16,
.storagebits = 16,
.endianness = IIO_LE,
@@ -273,7 +273,7 @@ static const struct iio_chan_spec st_press_lps001wp_channels[] = {
.address = ST_TEMP_LPS001WP_OUT_L_ADDR,
.scan_index = 1,
.scan_type = {
- .sign = 'u',
+ .sign = 's',
.realbits = 16,
.storagebits = 16,
.endianness = IIO_LE,
@@ -291,7 +291,7 @@ static const struct iio_chan_spec st_press_lps22hb_channels[] = {
.address = ST_PRESS_1_OUT_XL_ADDR,
.scan_index = 0,
.scan_type = {
- .sign = 'u',
+ .sign = 's',
.realbits = 24,
.storagebits = 32,
.endianness = IIO_LE,
diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c
index 268210e..24fb543 100644
--- a/drivers/iio/proximity/as3935.c
+++ b/drivers/iio/proximity/as3935.c
@@ -269,8 +269,6 @@ static irqreturn_t as3935_interrupt_handler(int irq, void *private)
static void calibrate_as3935(struct as3935_state *st)
{
- mutex_lock(&st->lock);
-
/* mask disturber interrupt bit */
as3935_write(st, AS3935_INT, BIT(5));
@@ -280,8 +278,6 @@ static void calibrate_as3935(struct as3935_state *st)
mdelay(2);
as3935_write(st, AS3935_TUNE_CAP, (st->tune_cap / TUNE_CAP_DIV));
-
- mutex_unlock(&st->lock);
}
#ifdef CONFIG_PM_SLEEP
@@ -318,6 +314,8 @@ static int as3935_resume(struct device *dev)
val &= ~AS3935_AFE_PWR_BIT;
ret = as3935_write(st, AS3935_AFE_GAIN, val);
+ calibrate_as3935(st);
+
err_resume:
mutex_unlock(&st->lock);
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 11bfa27..282c9fb 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -1105,13 +1105,13 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
* pretend we don't support reading the HCA's core clock. This is also
* forced by mmap function.
*/
- if (PAGE_SIZE <= 4096 &&
- field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) {
- resp.comp_mask |=
- MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET;
- resp.hca_core_clock_offset =
- offsetof(struct mlx5_init_seg, internal_timer_h) %
- PAGE_SIZE;
+ if (field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) {
+ if (PAGE_SIZE <= 4096) {
+ resp.comp_mask |=
+ MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET;
+ resp.hca_core_clock_offset =
+ offsetof(struct mlx5_init_seg, internal_timer_h) % PAGE_SIZE;
+ }
resp.response_length += sizeof(resp.hca_core_clock_offset) +
sizeof(resp.reserved2);
}
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index 7b74d09..58e92bc 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -792,6 +792,9 @@ static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev,
if (device_create_file(&dev->ibdev.dev, qedr_attributes[i]))
goto sysfs_err;
+ if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
+ qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE);
+
DP_DEBUG(dev, QEDR_MSG_INIT, "qedr driver loaded successfully\n");
return dev;
@@ -824,11 +827,10 @@ static void qedr_remove(struct qedr_dev *dev)
ib_dealloc_device(&dev->ibdev);
}
-static int qedr_close(struct qedr_dev *dev)
+static void qedr_close(struct qedr_dev *dev)
{
- qedr_ib_dispatch_event(dev, 1, IB_EVENT_PORT_ERR);
-
- return 0;
+ if (test_and_clear_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
+ qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ERR);
}
static void qedr_shutdown(struct qedr_dev *dev)
@@ -837,6 +839,12 @@ static void qedr_shutdown(struct qedr_dev *dev)
qedr_remove(dev);
}
+static void qedr_open(struct qedr_dev *dev)
+{
+ if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
+ qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE);
+}
+
static void qedr_mac_address_change(struct qedr_dev *dev)
{
union ib_gid *sgid = &dev->sgid_tbl[0];
@@ -863,7 +871,7 @@ static void qedr_mac_address_change(struct qedr_dev *dev)
ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr);
- qedr_ib_dispatch_event(dev, 1, IB_EVENT_GID_CHANGE);
+ qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_GID_CHANGE);
if (rc)
DP_ERR(dev, "Error updating mac filter\n");
@@ -877,7 +885,7 @@ static void qedr_notify(struct qedr_dev *dev, enum qede_roce_event event)
{
switch (event) {
case QEDE_UP:
- qedr_ib_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE);
+ qedr_open(dev);
break;
case QEDE_DOWN:
qedr_close(dev);
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
index 620badd..f669d0b 100644
--- a/drivers/infiniband/hw/qedr/qedr.h
+++ b/drivers/infiniband/hw/qedr/qedr.h
@@ -113,6 +113,8 @@ struct qedr_device_attr {
struct qed_rdma_events events;
};
+#define QEDR_ENET_STATE_BIT (0)
+
struct qedr_dev {
struct ib_device ibdev;
struct qed_dev *cdev;
@@ -153,6 +155,8 @@ struct qedr_dev {
struct qedr_cq *gsi_sqcq;
struct qedr_cq *gsi_rqcq;
struct qedr_qp *gsi_qp;
+
+ unsigned long enet_state;
};
#define QEDR_MAX_SQ_PBL (0x8000)
@@ -188,6 +192,7 @@ struct qedr_dev {
#define QEDR_ROCE_MAX_CNQ_SIZE (0x4000)
#define QEDR_MAX_PORT (1)
+#define QEDR_PORT (1)
#define QEDR_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME)
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index a615142..4ba019e 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -471,8 +471,6 @@ struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
struct ib_ucontext *context, struct ib_udata *udata)
{
struct qedr_dev *dev = get_qedr_dev(ibdev);
- struct qedr_ucontext *uctx = NULL;
- struct qedr_alloc_pd_uresp uresp;
struct qedr_pd *pd;
u16 pd_id;
int rc;
@@ -489,21 +487,33 @@ struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
if (!pd)
return ERR_PTR(-ENOMEM);
- dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
+ rc = dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
+ if (rc)
+ goto err;
- uresp.pd_id = pd_id;
pd->pd_id = pd_id;
if (udata && context) {
+ struct qedr_alloc_pd_uresp uresp;
+
+ uresp.pd_id = pd_id;
+
rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
- if (rc)
+ if (rc) {
DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
- uctx = get_qedr_ucontext(context);
- uctx->pd = pd;
- pd->uctx = uctx;
+ dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id);
+ goto err;
+ }
+
+ pd->uctx = get_qedr_ucontext(context);
+ pd->uctx->pd = pd;
}
return &pd->ibpd;
+
+err:
+ kfree(pd);
+ return ERR_PTR(rc);
}
int qedr_dealloc_pd(struct ib_pd *ibpd)
@@ -1719,6 +1729,14 @@ static int qedr_update_qp_state(struct qedr_dev *dev,
/* ERR->XXX */
switch (new_state) {
case QED_ROCE_QP_STATE_RESET:
+ if ((qp->rq.prod != qp->rq.cons) ||
+ (qp->sq.prod != qp->sq.cons)) {
+ DP_NOTICE(dev,
+ "Error->Reset with rq/sq not empty rq.prod=%x rq.cons=%x sq.prod=%x sq.cons=%x\n",
+ qp->rq.prod, qp->rq.cons, qp->sq.prod,
+ qp->sq.cons);
+ status = -EINVAL;
+ }
break;
default:
status = -EINVAL;
@@ -2014,7 +2032,7 @@ int qedr_query_qp(struct ib_qp *ibqp,
qp_attr->cap.max_recv_wr = qp->rq.max_wr;
qp_attr->cap.max_send_sge = qp->sq.max_sges;
qp_attr->cap.max_recv_sge = qp->rq.max_sges;
- qp_attr->cap.max_inline_data = qp->max_inline_data;
+ qp_attr->cap.max_inline_data = ROCE_REQ_MAX_INLINE_DATA_SIZE;
qp_init_attr->cap = qp_attr->cap;
memcpy(&qp_attr->ah_attr.grh.dgid.raw[0], ¶ms.dgid.bytes[0],
@@ -3220,9 +3238,10 @@ static int qedr_poll_cq_req(struct qedr_dev *dev,
IB_WC_SUCCESS, 0);
break;
case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
- DP_ERR(dev,
- "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
- cq->icid, qp->icid);
+ if (qp->state != QED_ROCE_QP_STATE_ERR)
+ DP_ERR(dev,
+ "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+ cq->icid, qp->icid);
cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
IB_WC_WR_FLUSH_ERR, 0);
break;
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index e7b96f1..5be14ad 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -788,6 +788,13 @@ static const struct dmi_system_id __initconst i8042_dmi_notimeout_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U574"),
},
},
+ {
+ /* Fujitsu UH554 laptop */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK UH544"),
+ },
+ },
{ }
};
diff --git a/drivers/irqchip/irq-xtensa-mx.c b/drivers/irqchip/irq-xtensa-mx.c
index bb3ac5f..72a391e 100644
--- a/drivers/irqchip/irq-xtensa-mx.c
+++ b/drivers/irqchip/irq-xtensa-mx.c
@@ -142,7 +142,7 @@ static struct irq_chip xtensa_mx_irq_chip = {
int __init xtensa_mx_init_legacy(struct device_node *interrupt_parent)
{
struct irq_domain *root_domain =
- irq_domain_add_legacy(NULL, NR_IRQS, 0, 0,
+ irq_domain_add_legacy(NULL, NR_IRQS - 1, 1, 0,
&xtensa_mx_irq_domain_ops,
&xtensa_mx_irq_chip);
irq_set_default_host(root_domain);
diff --git a/drivers/irqchip/irq-xtensa-pic.c b/drivers/irqchip/irq-xtensa-pic.c
index 472ae17..f728755 100644
--- a/drivers/irqchip/irq-xtensa-pic.c
+++ b/drivers/irqchip/irq-xtensa-pic.c
@@ -89,7 +89,7 @@ static struct irq_chip xtensa_irq_chip = {
int __init xtensa_pic_init_legacy(struct device_node *interrupt_parent)
{
struct irq_domain *root_domain =
- irq_domain_add_legacy(NULL, NR_IRQS, 0, 0,
+ irq_domain_add_legacy(NULL, NR_IRQS - 1, 1, 0,
&xtensa_irq_domain_ops, &xtensa_irq_chip);
irq_set_default_host(root_domain);
return 0;
diff --git a/drivers/media/platform/msm/camera/Makefile b/drivers/media/platform/msm/camera/Makefile
index 2e71d05..99bd263 100644
--- a/drivers/media/platform/msm/camera/Makefile
+++ b/drivers/media/platform/msm/camera/Makefile
@@ -7,4 +7,4 @@
obj-$(CONFIG_SPECTRA_CAMERA) += cam_cdm/
obj-$(CONFIG_SPECTRA_CAMERA) += cam_isp/
obj-$(CONFIG_SPECTRA_CAMERA) += cam_sensor_module/
-obj-$(CONFIG_SPECTRA_CAMERA) += icp/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_icp/
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_hw_core.c b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_hw_core.c
index 1105d2c..6009c25 100644
--- a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_hw_core.c
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_hw_core.c
@@ -669,7 +669,8 @@ int cam_hw_cdm_init(void *hw_priv,
soc_info = &cdm_hw->soc_info;
cdm_core = (struct cam_cdm *)cdm_hw->core_info;
- rc = cam_soc_util_enable_platform_resource(soc_info, true, true);
+ rc = cam_soc_util_enable_platform_resource(soc_info, true,
+ CAM_SVS_VOTE, true);
if (rc) {
pr_err("Enable platform failed\n");
goto end;
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context.c b/drivers/media/platform/msm/camera/cam_core/cam_context.c
index 17b3c7c..fac8900 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_context.c
+++ b/drivers/media/platform/msm/camera/cam_core/cam_context.c
@@ -34,7 +34,7 @@ static int cam_context_handle_hw_event(void *context, uint32_t evt_id,
return rc;
}
-int cam_context_handle_get_dev_info(struct cam_context *ctx,
+int cam_context_handle_crm_get_dev_info(struct cam_context *ctx,
struct cam_req_mgr_device_info *info)
{
int rc;
@@ -63,7 +63,7 @@ int cam_context_handle_get_dev_info(struct cam_context *ctx,
return rc;
}
-int cam_context_handle_link(struct cam_context *ctx,
+int cam_context_handle_crm_link(struct cam_context *ctx,
struct cam_req_mgr_core_dev_link_setup *link)
{
int rc;
@@ -91,7 +91,7 @@ int cam_context_handle_link(struct cam_context *ctx,
return rc;
}
-int cam_context_handle_unlink(struct cam_context *ctx,
+int cam_context_handle_crm_unlink(struct cam_context *ctx,
struct cam_req_mgr_core_dev_link_setup *unlink)
{
int rc;
@@ -120,7 +120,7 @@ int cam_context_handle_unlink(struct cam_context *ctx,
return rc;
}
-int cam_context_handle_apply_req(struct cam_context *ctx,
+int cam_context_handle_crm_apply_req(struct cam_context *ctx,
struct cam_req_mgr_apply_request *apply)
{
int rc;
@@ -149,6 +149,29 @@ int cam_context_handle_apply_req(struct cam_context *ctx,
return rc;
}
+int cam_context_handle_crm_flush_req(struct cam_context *ctx,
+ struct cam_req_mgr_flush_request *flush)
+{
+ int rc;
+
+ if (!ctx->state_machine) {
+ pr_err("%s: Context is not ready\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&ctx->ctx_mutex);
+ if (ctx->state_machine[ctx->state].crm_ops.flush_req) {
+ rc = ctx->state_machine[ctx->state].crm_ops.flush_req(ctx,
+ flush);
+ } else {
+ pr_err("%s: No crm flush req in dev %d, state %d\n",
+ __func__, ctx->dev_hdl, ctx->state);
+ rc = -EPROTO;
+ }
+ mutex_unlock(&ctx->ctx_mutex);
+
+ return rc;
+}
int cam_context_handle_acquire_dev(struct cam_context *ctx,
struct cam_acquire_dev_cmd *cmd)
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context.h b/drivers/media/platform/msm/camera/cam_core/cam_context.h
index 37a5c03..7f0fb7f 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_context.h
+++ b/drivers/media/platform/msm/camera/cam_core/cam_context.h
@@ -100,6 +100,7 @@ struct cam_ctx_ioctl_ops {
* @link: Link the context
* @unlink: Unlink the context
* @apply_req: Apply setting for the context
+ * @flush_req: Flush request to remove request ids
*
*/
struct cam_ctx_crm_ops {
@@ -111,6 +112,8 @@ struct cam_ctx_crm_ops {
struct cam_req_mgr_core_dev_link_setup *unlink);
int (*apply_req)(struct cam_context *ctx,
struct cam_req_mgr_apply_request *apply);
+ int (*flush_req)(struct cam_context *ctx,
+ struct cam_req_mgr_flush_request *flush);
};
@@ -182,7 +185,7 @@ struct cam_context {
};
/**
- * cam_context_handle_get_dev_info()
+ * cam_context_handle_crm_get_dev_info()
*
* @brief: Handle get device information command
*
@@ -190,11 +193,11 @@ struct cam_context {
* @info: Device information returned
*
*/
-int cam_context_handle_get_dev_info(struct cam_context *ctx,
+int cam_context_handle_crm_get_dev_info(struct cam_context *ctx,
struct cam_req_mgr_device_info *info);
/**
- * cam_context_handle_link()
+ * cam_context_handle_crm_link()
*
* @brief: Handle link command
*
@@ -202,11 +205,11 @@ int cam_context_handle_get_dev_info(struct cam_context *ctx,
* @link: Link command payload
*
*/
-int cam_context_handle_link(struct cam_context *ctx,
+int cam_context_handle_crm_link(struct cam_context *ctx,
struct cam_req_mgr_core_dev_link_setup *link);
/**
- * cam_context_handle_unlink()
+ * cam_context_handle_crm_unlink()
*
* @brief: Handle unlink command
*
@@ -214,11 +217,11 @@ int cam_context_handle_link(struct cam_context *ctx,
* @unlink: Unlink command payload
*
*/
-int cam_context_handle_unlink(struct cam_context *ctx,
+int cam_context_handle_crm_unlink(struct cam_context *ctx,
struct cam_req_mgr_core_dev_link_setup *unlink);
/**
- * cam_context_handle_apply_req()
+ * cam_context_handle_crm_apply_req()
*
* @brief: Handle apply request command
*
@@ -226,9 +229,20 @@ int cam_context_handle_unlink(struct cam_context *ctx,
* @apply: Apply request command payload
*
*/
-int cam_context_handle_apply_req(struct cam_context *ctx,
+int cam_context_handle_crm_apply_req(struct cam_context *ctx,
struct cam_req_mgr_apply_request *apply);
+/**
+ * cam_context_handle_crm_flush_req()
+ *
+ * @brief: Handle flush request command
+ *
+ * @ctx: Object pointer for cam_context
+ * @apply: Flush request command payload
+ *
+ */
+int cam_context_handle_crm_flush_req(struct cam_context *ctx,
+ struct cam_req_mgr_flush_request *apply);
/**
* cam_context_handle_acquire_dev()
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c b/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c
index 2a0c4a7..edd2e11 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c
+++ b/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c
@@ -28,48 +28,45 @@
int cam_context_buf_done_from_hw(struct cam_context *ctx,
void *done_event_data, uint32_t bubble_state)
{
- int rc = 0;
- int i, j;
+ int j;
+ int result;
struct cam_ctx_request *req;
struct cam_hw_done_event_data *done =
(struct cam_hw_done_event_data *)done_event_data;
if (list_empty(&ctx->active_req_list)) {
pr_err("Buf done with no active request\n");
- rc = -EINVAL;
- goto end;
+ return -EIO;
}
req = list_first_entry(&ctx->active_req_list,
struct cam_ctx_request, list);
- for (i = 0; i < done->num_handles; i++) {
- for (j = 0; j < req->num_out_map_entries; j++) {
- if (done->resource_handle[i] ==
- req->out_map_entries[j].resource_handle)
- break;
- }
+ if (done->request_id != req->request_id) {
+ pr_err("mismatch: done request [%lld], active request [%lld]\n",
+ done->request_id, req->request_id);
+ return -EIO;
+ }
- if (j == req->num_out_map_entries) {
- pr_err("Can not find matching lane handle 0x%x\n",
- done->resource_handle[i]);
- rc = -EINVAL;
- continue;
- }
+ if (!req->num_out_map_entries) {
+ pr_err("active request with no output fence objects to signal\n");
+ return -EIO;
+ }
- cam_sync_signal(req->out_map_entries[j].sync_id,
- CAM_SYNC_STATE_SIGNALED_SUCCESS);
- req->num_out_acked++;
+ list_del_init(&req->list);
+ if (!bubble_state)
+ result = CAM_SYNC_STATE_SIGNALED_SUCCESS;
+ else
+ result = CAM_SYNC_STATE_SIGNALED_ERROR;
+
+ for (j = 0; j < req->num_out_map_entries; j++) {
+ cam_sync_signal(req->out_map_entries[j].sync_id, result);
req->out_map_entries[j].sync_id = -1;
}
- if (req->num_out_acked == req->num_out_map_entries) {
- list_del_init(&req->list);
- list_add_tail(&req->list, &ctx->free_req_list);
- }
+ list_add_tail(&req->list, &ctx->free_req_list);
-end:
- return rc;
+ return 0;
}
int cam_context_apply_req_to_hw(struct cam_context *ctx,
@@ -114,64 +111,7 @@ int cam_context_apply_req_to_hw(struct cam_context *ctx,
return rc;
}
-int32_t cam_context_release_dev_to_hw(struct cam_context *ctx,
- struct cam_release_dev_cmd *cmd)
-{
- int rc = 0;
- int i;
- struct cam_hw_release_args arg;
- struct cam_ctx_request *req;
-
- if (!ctx->hw_mgr_intf) {
- pr_err("HW interface is not ready\n");
- rc = -EFAULT;
- goto end;
- }
-
- if (ctx->ctxt_to_hw_map) {
- arg.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
- ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv,
- &arg);
- ctx->ctxt_to_hw_map = NULL;
- }
-
- ctx->session_hdl = 0;
- ctx->dev_hdl = 0;
- ctx->link_hdl = 0;
-
- while (!list_empty(&ctx->active_req_list)) {
- req = list_first_entry(&ctx->active_req_list,
- struct cam_ctx_request, list);
- list_del_init(&req->list);
- pr_warn("signal fence in active list. fence num %d\n",
- req->num_out_map_entries);
- for (i = 0; i < req->num_out_map_entries; i++) {
- if (req->out_map_entries[i].sync_id != -1)
- cam_sync_signal(req->out_map_entries[i].sync_id,
- CAM_SYNC_STATE_SIGNALED_ERROR);
- }
- list_add_tail(&req->list, &ctx->free_req_list);
- }
-
- /* flush the pending queue */
- while (!list_empty(&ctx->pending_req_list)) {
- req = list_first_entry(&ctx->pending_req_list,
- struct cam_ctx_request, list);
- list_del_init(&req->list);
- pr_debug("signal fence in pending list. fence num %d\n",
- req->num_out_map_entries);
- for (i = 0; i < req->num_out_map_entries; i++)
- if (req->out_map_entries[i].sync_id != -1)
- cam_sync_signal(req->out_map_entries[i].sync_id,
- CAM_SYNC_STATE_SIGNALED_ERROR);
- list_add_tail(&req->list, &ctx->free_req_list);
- }
-
-end:
- return rc;
-}
-
-void cam_context_sync_callback(int32_t sync_obj, int status, void *data)
+static void cam_context_sync_callback(int32_t sync_obj, int status, void *data)
{
struct cam_context *ctx = data;
struct cam_ctx_request *req = NULL;
@@ -195,6 +135,67 @@ void cam_context_sync_callback(int32_t sync_obj, int status, void *data)
}
}
+int32_t cam_context_release_dev_to_hw(struct cam_context *ctx,
+ struct cam_release_dev_cmd *cmd)
+{
+ int i;
+ struct cam_hw_release_args arg;
+ struct cam_ctx_request *req;
+
+ if ((!ctx->hw_mgr_intf) || (!ctx->hw_mgr_intf->hw_release)) {
+ pr_err("HW interface is not ready\n");
+ return -EINVAL;
+ }
+
+ arg.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
+ if ((list_empty(&ctx->active_req_list)) &&
+ (list_empty(&ctx->pending_req_list)))
+ arg.active_req = false;
+ else
+ arg.active_req = true;
+
+ ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv, &arg);
+ ctx->ctxt_to_hw_map = NULL;
+
+ ctx->session_hdl = 0;
+ ctx->dev_hdl = 0;
+ ctx->link_hdl = 0;
+
+ while (!list_empty(&ctx->active_req_list)) {
+ req = list_first_entry(&ctx->active_req_list,
+ struct cam_ctx_request, list);
+ list_del_init(&req->list);
+ pr_debug("signal fence in active list. fence num %d\n",
+ req->num_out_map_entries);
+ for (i = 0; i < req->num_out_map_entries; i++) {
+ if (req->out_map_entries[i].sync_id > 0)
+ cam_sync_signal(req->out_map_entries[i].sync_id,
+ CAM_SYNC_STATE_SIGNALED_ERROR);
+ }
+ list_add_tail(&req->list, &ctx->free_req_list);
+ }
+
+ while (!list_empty(&ctx->pending_req_list)) {
+ req = list_first_entry(&ctx->pending_req_list,
+ struct cam_ctx_request, list);
+ list_del_init(&req->list);
+ for (i = 0; i < req->num_in_map_entries; i++)
+ if (req->in_map_entries[i].sync_id > 0)
+ cam_sync_deregister_callback(
+ cam_context_sync_callback, ctx,
+ req->in_map_entries[i].sync_id);
+ pr_debug("signal out fence in pending list. fence num %d\n",
+ req->num_out_map_entries);
+ for (i = 0; i < req->num_out_map_entries; i++)
+ if (req->out_map_entries[i].sync_id > 0)
+ cam_sync_signal(req->out_map_entries[i].sync_id,
+ CAM_SYNC_STATE_SIGNALED_ERROR);
+ list_add_tail(&req->list, &ctx->free_req_list);
+ }
+
+ return 0;
+}
+
int32_t cam_context_prepare_dev_to_hw(struct cam_context *ctx,
struct cam_config_dev_cmd *cmd)
{
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_core_defs.h b/drivers/media/platform/msm/camera/cam_core/cam_core_defs.h
index 3498836..45d989f 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_core_defs.h
+++ b/drivers/media/platform/msm/camera/cam_core/cam_core_defs.h
@@ -9,8 +9,9 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
-#ifndef _CAM_REQ_MGR_CORE_DEFS_H_
-#define _CAM_REQ_MGR_CORE_DEFS_H_
+
+#ifndef _CAM_CORE_DEFS_H_
+#define _CAM_CORE_DEFS_H_
#define CAM_CORE_TRACE_ENABLE 0
@@ -40,5 +41,5 @@
__func__, __LINE__, ##args)
#endif
-#endif /* _CAM_REQ_MGR_CORE_DEFS_H_ */
+#endif /* _CAM_CORE_DEFS_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_hw_mgr_intf.h b/drivers/media/platform/msm/camera/cam_core/cam_hw_mgr_intf.h
index f72a1d7..aab75d5 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_hw_mgr_intf.h
+++ b/drivers/media/platform/msm/camera/cam_core/cam_hw_mgr_intf.h
@@ -51,7 +51,7 @@ struct cam_hw_update_entry {
* struct cam_hw_fence_map_entry - Entry for the resource to sync id map
*
* @resrouce_handle: Resource port id for the buffer
- * @sync_id: Synce id
+ * @sync_id: Sync id
*
*/
struct cam_hw_fence_map_entry {
@@ -65,12 +65,14 @@ struct cam_hw_fence_map_entry {
* @num_handles: number of handles in the event
* @resrouce_handle: list of the resource handle
* @timestamp: time stamp
+ * @request_id: request identifier
*
*/
struct cam_hw_done_event_data {
uint32_t num_handles;
uint32_t resource_handle[CAM_NUM_OUT_PER_COMP_IRQ_MAX];
struct timeval timestamp;
+ uint64_t request_id;
};
/**
@@ -95,10 +97,12 @@ struct cam_hw_acquire_args {
* struct cam_hw_release_args - Payload for release command
*
* @ctxt_to_hw_map: HW context from the acquire
+ * @active_req: Active request flag
*
*/
struct cam_hw_release_args {
void *ctxt_to_hw_map;
+ bool active_req;
};
/**
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_node.c b/drivers/media/platform/msm/camera/cam_core/cam_node.c
index 74a94b2..ab4c25d 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_node.c
+++ b/drivers/media/platform/msm/camera/cam_core/cam_node.c
@@ -201,7 +201,7 @@ static int __cam_node_handle_release_dev(struct cam_node *node,
return rc;
}
-static int __cam_node_get_dev_info(struct cam_req_mgr_device_info *info)
+static int __cam_node_crm_get_dev_info(struct cam_req_mgr_device_info *info)
{
struct cam_context *ctx = NULL;
@@ -214,10 +214,11 @@ static int __cam_node_get_dev_info(struct cam_req_mgr_device_info *info)
__func__, info->dev_hdl);
return -EINVAL;
}
- return cam_context_handle_get_dev_info(ctx, info);
+ return cam_context_handle_crm_get_dev_info(ctx, info);
}
-static int __cam_node_link_setup(struct cam_req_mgr_core_dev_link_setup *setup)
+static int __cam_node_crm_link_setup(
+ struct cam_req_mgr_core_dev_link_setup *setup)
{
int rc;
struct cam_context *ctx = NULL;
@@ -233,14 +234,14 @@ static int __cam_node_link_setup(struct cam_req_mgr_core_dev_link_setup *setup)
}
if (setup->link_enable)
- rc = cam_context_handle_link(ctx, setup);
+ rc = cam_context_handle_crm_link(ctx, setup);
else
- rc = cam_context_handle_unlink(ctx, setup);
+ rc = cam_context_handle_crm_unlink(ctx, setup);
return rc;
}
-static int __cam_node_apply_req(struct cam_req_mgr_apply_request *apply)
+static int __cam_node_crm_apply_req(struct cam_req_mgr_apply_request *apply)
{
struct cam_context *ctx = NULL;
@@ -254,7 +255,26 @@ static int __cam_node_apply_req(struct cam_req_mgr_apply_request *apply)
return -EINVAL;
}
- return cam_context_handle_apply_req(ctx, apply);
+ return cam_context_handle_crm_apply_req(ctx, apply);
+}
+
+static int __cam_node_crm_flush_req(struct cam_req_mgr_flush_request *flush)
+{
+ struct cam_context *ctx = NULL;
+
+ if (!flush) {
+ pr_err("%s: Invalid flush request payload\n", __func__);
+ return -EINVAL;
+ }
+
+ ctx = (struct cam_context *) cam_get_device_priv(flush->dev_hdl);
+ if (!ctx) {
+ pr_err("%s: Can not get context for handle %d\n",
+ __func__, flush->dev_hdl);
+ return -EINVAL;
+ }
+
+ return cam_context_handle_crm_flush_req(ctx, flush);
}
int cam_node_deinit(struct cam_node *node)
@@ -283,9 +303,10 @@ int cam_node_init(struct cam_node *node, struct cam_hw_mgr_intf *hw_mgr_intf,
strlcpy(node->name, name, sizeof(node->name));
memcpy(&node->hw_mgr_intf, hw_mgr_intf, sizeof(node->hw_mgr_intf));
- node->crm_node_intf.apply_req = __cam_node_apply_req;
- node->crm_node_intf.get_dev_info = __cam_node_get_dev_info;
- node->crm_node_intf.link_setup = __cam_node_link_setup;
+ node->crm_node_intf.apply_req = __cam_node_crm_apply_req;
+ node->crm_node_intf.get_dev_info = __cam_node_crm_get_dev_info;
+ node->crm_node_intf.link_setup = __cam_node_crm_link_setup;
+ node->crm_node_intf.flush_req = __cam_node_crm_flush_req;
mutex_init(&node->list_mutex);
INIT_LIST_HEAD(&node->free_ctx_list);
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_subdev.c b/drivers/media/platform/msm/camera/cam_core/cam_subdev.c
index a89981d..8664ce8 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_subdev.c
+++ b/drivers/media/platform/msm/camera/cam_core/cam_subdev.c
@@ -129,16 +129,12 @@ int cam_subdev_probe(struct cam_subdev *sd, struct platform_device *pdev,
int rc;
struct cam_node *node = NULL;
- if (!sd || !pdev || !name) {
- rc = -EINVAL;
- goto err;
- }
+ if (!sd || !pdev || !name)
+ return -EINVAL;
node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (!node) {
- rc = -ENOMEM;
- goto err;
- }
+ if (!node)
+ return -ENOMEM;
/* Setup camera v4l2 subdevice */
sd->pdev = pdev;
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
index 9a30d64..813f392 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
@@ -665,7 +665,8 @@ static int cam_cpas_util_get_ahb_level(struct cam_hw_info *cpas_hw,
}
static int cam_cpas_util_apply_client_ahb_vote(struct cam_hw_info *cpas_hw,
- struct cam_cpas_client *cpas_client, struct cam_ahb_vote *ahb_vote)
+ struct cam_cpas_client *cpas_client, struct cam_ahb_vote *ahb_vote,
+ enum cam_vote_level *applied_level)
{
struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
struct cam_cpas_bus_client *ahb_bus_client = &cpas_core->ahb_bus_client;
@@ -710,9 +711,21 @@ static int cam_cpas_util_apply_client_ahb_vote(struct cam_hw_info *cpas_hw,
rc = cam_cpas_util_vote_bus_client_level(ahb_bus_client,
highest_level);
- if (rc)
+ if (rc) {
pr_err("Failed in ahb vote, level=%d, rc=%d\n",
highest_level, rc);
+ goto unlock_bus_client;
+ }
+
+ rc = cam_soc_util_set_clk_rate_level(&cpas_hw->soc_info, highest_level);
+ if (rc) {
+ pr_err("Failed in scaling clock rate level %d for AHB\n",
+ highest_level);
+ goto unlock_bus_client;
+ }
+
+ if (applied_level)
+ *applied_level = highest_level;
unlock_bus_client:
mutex_unlock(&ahb_bus_client->lock);
@@ -748,7 +761,7 @@ static int cam_cpas_hw_update_ahb_vote(struct cam_hw_info *cpas_hw,
cpas_core->cpas_client[client_indx]->ahb_level);
rc = cam_cpas_util_apply_client_ahb_vote(cpas_hw,
- cpas_core->cpas_client[client_indx], ahb_vote);
+ cpas_core->cpas_client[client_indx], ahb_vote, NULL);
unlock_client:
mutex_unlock(&cpas_core->client_mutex[client_indx]);
@@ -765,6 +778,7 @@ static int cam_cpas_hw_start(void *hw_priv, void *start_args,
struct cam_cpas_client *cpas_client;
struct cam_ahb_vote *ahb_vote;
struct cam_axi_vote *axi_vote;
+ enum cam_vote_level applied_level = CAM_SVS_VOTE;
int rc;
if (!hw_priv || !start_args) {
@@ -820,7 +834,7 @@ static int cam_cpas_hw_start(void *hw_priv, void *start_args,
client_indx, ahb_vote->type, ahb_vote->vote.level,
cpas_client->ahb_level);
rc = cam_cpas_util_apply_client_ahb_vote(cpas_hw, cpas_client,
- ahb_vote);
+ ahb_vote, &applied_level);
if (rc)
goto done;
@@ -833,7 +847,8 @@ static int cam_cpas_hw_start(void *hw_priv, void *start_args,
goto done;
if (cpas_core->streamon_clients == 0) {
- rc = cam_cpas_soc_enable_resources(&cpas_hw->soc_info);
+ rc = cam_cpas_soc_enable_resources(&cpas_hw->soc_info,
+ applied_level);
if (rc) {
pr_err("enable_resorce failed, rc=%d\n", rc);
goto done;
@@ -932,7 +947,7 @@ static int cam_cpas_hw_stop(void *hw_priv, void *stop_args,
ahb_vote.type = CAM_VOTE_ABSOLUTE;
ahb_vote.vote.level = CAM_SUSPEND_VOTE;
rc = cam_cpas_util_apply_client_ahb_vote(cpas_hw, cpas_client,
- &ahb_vote);
+ &ahb_vote, NULL);
if (rc)
goto done;
@@ -1383,7 +1398,7 @@ int cam_cpas_hw_probe(struct platform_device *pdev,
if (rc)
goto axi_cleanup;
- rc = cam_cpas_soc_enable_resources(&cpas_hw->soc_info);
+ rc = cam_cpas_soc_enable_resources(&cpas_hw->soc_info, CAM_SVS_VOTE);
if (rc) {
pr_err("failed in soc_enable_resources, rc=%d\n", rc);
goto remove_default_vote;
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.c b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.c
index 0c71ece..09c2ae5 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.c
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.c
@@ -22,26 +22,6 @@
#include "cam_cpas_hw.h"
#include "cam_cpas_soc.h"
-static int cam_cpas_get_vote_level_from_string(const char *string,
- enum cam_vote_level *vote_level)
-{
- if (!vote_level || !string)
- return -EINVAL;
-
- if (strnstr("suspend", string, strlen(string)))
- *vote_level = CAM_SUSPEND_VOTE;
- else if (strnstr("svs", string, strlen(string)))
- *vote_level = CAM_SVS_VOTE;
- else if (strnstr("nominal", string, strlen(string)))
- *vote_level = CAM_NOMINAL_VOTE;
- else if (strnstr("turbo", string, strlen(string)))
- *vote_level = CAM_TURBO_VOTE;
- else
- *vote_level = CAM_SVS_VOTE;
-
- return 0;
-}
-
int cam_cpas_get_custom_dt_info(struct platform_device *pdev,
struct cam_cpas_private_soc *soc_private)
{
@@ -130,7 +110,7 @@ int cam_cpas_get_custom_dt_info(struct platform_device *pdev,
return -ENODEV;
}
- rc = cam_cpas_get_vote_level_from_string(ahb_string,
+ rc = cam_soc_util_get_level_from_string(ahb_string,
&soc_private->vdd_ahb[i].ahb_level);
if (rc) {
pr_err("invalid ahb-string at index=%d\n", i);
@@ -207,11 +187,13 @@ int cam_cpas_soc_deinit_resources(struct cam_hw_soc_info *soc_info)
return rc;
}
-int cam_cpas_soc_enable_resources(struct cam_hw_soc_info *soc_info)
+int cam_cpas_soc_enable_resources(struct cam_hw_soc_info *soc_info,
+ enum cam_vote_level default_level)
{
int rc = 0;
- rc = cam_soc_util_enable_platform_resource(soc_info, true, true);
+ rc = cam_soc_util_enable_platform_resource(soc_info, true,
+ default_level, true);
if (rc)
pr_err("enable platform resource failed, rc=%d\n", rc);
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.h b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.h
index d3dfbbd..b2ad513 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.h
@@ -59,6 +59,7 @@ struct cam_cpas_private_soc {
int cam_cpas_soc_init_resources(struct cam_hw_soc_info *soc_info,
irq_handler_t vfe_irq_handler, void *irq_data);
int cam_cpas_soc_deinit_resources(struct cam_hw_soc_info *soc_info);
-int cam_cpas_soc_enable_resources(struct cam_hw_soc_info *soc_info);
+int cam_cpas_soc_enable_resources(struct cam_hw_soc_info *soc_info,
+ enum cam_vote_level default_level);
int cam_cpas_soc_disable_resources(struct cam_hw_soc_info *soc_info);
#endif /* _CAM_CPAS_SOC_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_cpas/include/cam_cpas_api.h b/drivers/media/platform/msm/camera/cam_cpas/include/cam_cpas_api.h
index 27b8504..801d09d 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/include/cam_cpas_api.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/include/cam_cpas_api.h
@@ -17,6 +17,7 @@
#include <linux/platform_device.h>
#include <media/cam_cpas.h>
+#include "cam_soc_util.h"
#define CAM_HW_IDENTIFIER_LENGTH 128
@@ -101,7 +102,7 @@ struct cam_cpas_register_params {
};
/**
- * enum cam_vote_level - Enum for voting type
+ * enum cam_vote_type - Enum for voting type
*
* @CAM_VOTE_ABSOLUTE : Absolute vote
* @CAM_VOTE_DYNAMIC : Dynamic vote
@@ -112,21 +113,6 @@ enum cam_vote_type {
};
/**
- * enum cam_vote_level - Enum for voting level
- *
- * @CAM_SUSPEND_VOTE : Suspend vote
- * @CAM_SVS_VOTE : SVS vote
- * @CAM_NOMINAL_VOTE : Nominal vote
- * @CAM_TURBO_VOTE : Turbo vote
- */
-enum cam_vote_level {
- CAM_SUSPEND_VOTE,
- CAM_SVS_VOTE,
- CAM_NOMINAL_VOTE,
- CAM_TURBO_VOTE,
-};
-
-/**
* struct cam_ahb_vote : AHB vote
*
* @type : AHB voting type.
diff --git a/drivers/media/platform/msm/camera/icp/Makefile b/drivers/media/platform/msm/camera/cam_icp/Makefile
similarity index 63%
rename from drivers/media/platform/msm/camera/icp/Makefile
rename to drivers/media/platform/msm/camera/cam_icp/Makefile
index c42b162..b35e4e4 100644
--- a/drivers/media/platform/msm/camera/icp/Makefile
+++ b/drivers/media/platform/msm/camera/cam_icp/Makefile
@@ -2,13 +2,12 @@
ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
ccflags-y += -Idrivers/media/platform/msm/camera/cam_sync
-ccflags-y += -Idrivers/media/platform/msm/camera/icp
-ccflags-y += -Idrivers/media/platform/msm/camera/icp/icp_hw/include
-ccflags-y += -Idrivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr
-ccflags-y += -Idrivers/media/platform/msm/camera/icp/fw_inc
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/fw_inc
ccflags-y += -Idrivers/media/platform/msm/camera
ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
-
obj-$(CONFIG_SPECTRA_CAMERA) += icp_hw/
obj-$(CONFIG_SPECTRA_CAMERA) += cam_icp_subdev.o cam_icp_context.o hfi.o
diff --git a/drivers/media/platform/msm/camera/icp/cam_icp_context.c b/drivers/media/platform/msm/camera/cam_icp/cam_icp_context.c
similarity index 98%
rename from drivers/media/platform/msm/camera/icp/cam_icp_context.c
rename to drivers/media/platform/msm/camera/cam_icp/cam_icp_context.c
index 41290f4..2311f66 100644
--- a/drivers/media/platform/msm/camera/icp/cam_icp_context.c
+++ b/drivers/media/platform/msm/camera/cam_icp/cam_icp_context.c
@@ -107,7 +107,7 @@ static int __cam_icp_release_dev_in_ready(struct cam_context *ctx,
static int __cam_icp_handle_buf_done_in_ready(void *ctx,
uint32_t evt_id, void *done)
{
- return cam_context_buf_done_from_hw(ctx, done, 0);
+ return cam_context_buf_done_from_hw(ctx, done, evt_id);
}
static struct cam_ctx_ops
diff --git a/drivers/media/platform/msm/camera/icp/cam_icp_context.h b/drivers/media/platform/msm/camera/cam_icp/cam_icp_context.h
similarity index 100%
rename from drivers/media/platform/msm/camera/icp/cam_icp_context.h
rename to drivers/media/platform/msm/camera/cam_icp/cam_icp_context.h
diff --git a/drivers/media/platform/msm/camera/icp/cam_icp_subdev.c b/drivers/media/platform/msm/camera/cam_icp/cam_icp_subdev.c
similarity index 98%
rename from drivers/media/platform/msm/camera/icp/cam_icp_subdev.c
rename to drivers/media/platform/msm/camera/cam_icp/cam_icp_subdev.c
index 703561d..69c2e03 100644
--- a/drivers/media/platform/msm/camera/icp/cam_icp_subdev.c
+++ b/drivers/media/platform/msm/camera/cam_icp/cam_icp_subdev.c
@@ -142,8 +142,6 @@ static int cam_icp_probe(struct platform_device *pdev)
return -EINVAL;
}
- memset(&g_icp_dev, 0, sizeof(g_icp_dev));
-
g_icp_dev.sd.pdev = pdev;
g_icp_dev.sd.internal_ops = &cam_icp_subdev_internal_ops;
rc = cam_subdev_probe(&g_icp_dev.sd, pdev, CAM_ICP_DEV_NAME,
diff --git a/drivers/media/platform/msm/camera/icp/fw_inc/hfi_intf.h b/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_intf.h
similarity index 95%
rename from drivers/media/platform/msm/camera/icp/fw_inc/hfi_intf.h
rename to drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_intf.h
index 0ffea5b..9150795 100644
--- a/drivers/media/platform/msm/camera/icp/fw_inc/hfi_intf.h
+++ b/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_intf.h
@@ -98,6 +98,13 @@ void hfi_send_system_cmd(uint32_t type, uint64_t data, uint32_t size);
* @icp_base: icp base address
*/
void cam_hfi_enable_cpu(void __iomem *icp_base);
+
+/**
+ * cam_hfi_disable_cpu() - disable A5 CPU
+ * @icp_base: icp base address
+ */
+void cam_hfi_disable_cpu(void __iomem *icp_base);
+
/**
* cam_hfi_deinit() - cleanup HFI
*/
diff --git a/drivers/media/platform/msm/camera/icp/fw_inc/hfi_reg.h b/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_reg.h
similarity index 99%
rename from drivers/media/platform/msm/camera/icp/fw_inc/hfi_reg.h
rename to drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_reg.h
index ff6b72a..04e3c85 100644
--- a/drivers/media/platform/msm/camera/icp/fw_inc/hfi_reg.h
+++ b/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_reg.h
@@ -49,6 +49,7 @@
#define ICP_CSR_EN_CLKGATE_WFI (1 << 12)
#define ICP_CSR_EDBGRQ (1 << 14)
#define ICP_CSR_DBGSWENABLE (1 << 22)
+#define ICP_CSR_A5_STATUS_WFI (1 << 7)
/* start of Queue table and queues */
#define MAX_ICP_HFI_QUEUES 4
diff --git a/drivers/media/platform/msm/camera/icp/fw_inc/hfi_session_defs.h b/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_session_defs.h
similarity index 100%
rename from drivers/media/platform/msm/camera/icp/fw_inc/hfi_session_defs.h
rename to drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_session_defs.h
diff --git a/drivers/media/platform/msm/camera/icp/fw_inc/hfi_sys_defs.h b/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_sys_defs.h
similarity index 100%
rename from drivers/media/platform/msm/camera/icp/fw_inc/hfi_sys_defs.h
rename to drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_sys_defs.h
diff --git a/drivers/media/platform/msm/camera/icp/hfi.c b/drivers/media/platform/msm/camera/cam_icp/hfi.c
similarity index 97%
rename from drivers/media/platform/msm/camera/icp/hfi.c
rename to drivers/media/platform/msm/camera/cam_icp/hfi.c
index 170c8cf..b763a39 100644
--- a/drivers/media/platform/msm/camera/icp/hfi.c
+++ b/drivers/media/platform/msm/camera/cam_icp/hfi.c
@@ -302,6 +302,19 @@ int hfi_get_hw_caps(void *query_buf)
return 0;
}
+void cam_hfi_disable_cpu(void __iomem *icp_base)
+{
+ uint32_t data;
+ uint32_t val;
+
+ data = cam_io_r(icp_base + HFI_REG_A5_CSR_A5_STATUS);
+ /* Add waiting logic in case it is not idle */
+ if (data & ICP_CSR_A5_STATUS_WFI) {
+ val = cam_io_r(icp_base + HFI_REG_A5_CSR_A5_CONTROL);
+ val &= ~(ICP_FLAG_CSR_A5_EN | ICP_FLAG_CSR_WAKE_UP_EN);
+ cam_io_w(val, icp_base + HFI_REG_A5_CSR_A5_CONTROL);
+ }
+}
void cam_hfi_enable_cpu(void __iomem *icp_base)
{
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/Makefile b/drivers/media/platform/msm/camera/cam_icp/icp_hw/Makefile
similarity index 60%
rename from drivers/media/platform/msm/camera/icp/icp_hw/Makefile
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/Makefile
index 8e95286..5276340 100644
--- a/drivers/media/platform/msm/camera/icp/icp_hw/Makefile
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/Makefile
@@ -1,9 +1,9 @@
ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
-ccflags-y += -Idrivers/media/platform/msm/camera/icp
-ccflags-y += -Idrivers/media/platform/msm/camera/icp/icp_hw/include
-ccflags-y += -Idrivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr
ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
obj-$(CONFIG_SPECTRA_CAMERA) += icp_hw_mgr/ a5_hw/ ipe_hw/ bps_hw/
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/Makefile b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/Makefile
new file mode 100644
index 0000000..5f4f9fa
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/Makefile
@@ -0,0 +1,11 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/fw_inc
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+
+obj-$(CONFIG_SPECTRA_CAMERA) += a5_dev.o a5_core.o a5_soc.o
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/a5_core.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.c
similarity index 99%
rename from drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/a5_core.c
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.c
index 39eacd8..9f6f940 100644
--- a/drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/a5_core.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.c
@@ -272,7 +272,7 @@ int cam_a5_init_hw(void *device_priv,
}
cpas_vote.ahb_vote.type = CAM_VOTE_ABSOLUTE;
- cpas_vote.ahb_vote.vote.level = CAM_TURBO_VOTE;
+ cpas_vote.ahb_vote.vote.level = CAM_SVS_VOTE;
cpas_vote.axi_vote.compressed_bw = ICP_TURBO_VOTE;
cpas_vote.axi_vote.uncompressed_bw = ICP_TURBO_VOTE;
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/a5_core.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.h
similarity index 100%
rename from drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/a5_core.h
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.h
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/a5_dev.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_dev.c
similarity index 100%
rename from drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/a5_dev.c
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_dev.c
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/a5_soc.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_soc.c
similarity index 96%
rename from drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/a5_soc.c
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_soc.c
index d12b3b6..a98f01f 100644
--- a/drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/a5_soc.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_soc.c
@@ -82,7 +82,8 @@ int cam_a5_enable_soc_resources(struct cam_hw_soc_info *soc_info)
{
int rc = 0;
- rc = cam_soc_util_enable_platform_resource(soc_info, true, true);
+ rc = cam_soc_util_enable_platform_resource(soc_info, true,
+ CAM_TURBO_VOTE, true);
if (rc)
pr_err("%s: enable platform failed\n", __func__);
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/a5_soc.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_soc.h
similarity index 100%
rename from drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/a5_soc.h
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_soc.h
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/Makefile b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/Makefile
new file mode 100644
index 0000000..114e4a1
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/Makefile
@@ -0,0 +1,11 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/fw_inc
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+
+obj-$(CONFIG_SPECTRA_CAMERA) += bps_dev.o bps_core.o bps_soc.o
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/bps_core.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.c
similarity index 98%
rename from drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/bps_core.c
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.c
index 91652d7..cabdc8a 100644
--- a/drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/bps_core.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.c
@@ -74,7 +74,7 @@ int cam_bps_init_hw(void *device_priv,
}
cpas_vote.ahb_vote.type = CAM_VOTE_ABSOLUTE;
- cpas_vote.ahb_vote.vote.level = CAM_TURBO_VOTE;
+ cpas_vote.ahb_vote.vote.level = CAM_SVS_VOTE;
cpas_vote.axi_vote.compressed_bw = ICP_TURBO_VOTE;
cpas_vote.axi_vote.uncompressed_bw = ICP_TURBO_VOTE;
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/bps_core.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.h
similarity index 100%
rename from drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/bps_core.h
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.h
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/bps_dev.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_dev.c
similarity index 100%
rename from drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/bps_dev.c
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_dev.c
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/bps_soc.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_soc.c
similarity index 95%
rename from drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/bps_soc.c
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_soc.c
index 76884bf..8a3c7ac 100644
--- a/drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/bps_soc.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_soc.c
@@ -66,7 +66,8 @@ int cam_bps_enable_soc_resources(struct cam_hw_soc_info *soc_info)
{
int rc = 0;
- rc = cam_soc_util_enable_platform_resource(soc_info, true, false);
+ rc = cam_soc_util_enable_platform_resource(soc_info, true,
+ CAM_TURBO_VOTE, false);
if (rc)
pr_err("%s: enable platform failed\n", __func__);
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/bps_soc.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_soc.h
similarity index 100%
rename from drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/bps_soc.h
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_soc.h
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/Makefile b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/Makefile
similarity index 63%
rename from drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/Makefile
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/Makefile
index 4a6c3c0..71afea4 100644
--- a/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/Makefile
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/Makefile
@@ -1,16 +1,16 @@
ccflags-y += -Idrivers/media/platform/msm/camera/isp/isp_hw/include
-ccflags-y += -Idrivers/media/platform/msm/camera/icp/icp_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/include
ccflags-y += -Idrivers/media/platform/msm/camera/isp/isp_hw/hw_utils/include
ccflags-y += -Idrivers/media/platform/msm/camera/isp/isp_hw/isp_hw_mgr/
-ccflags-y += -Idrivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/
-ccflags-y += -Idrivers/media/platform/msm/camera/icp/fw_inc/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/fw_inc/
ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
ccflags-y += -Idrivers/media/platform/msm/camera/cam_sync
ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr/
-ccflags-y += -Idrivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/include/
-ccflags-y += -Idrivers/media/platform/msm/camera/icp/icp_hw/a5_hw/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/
ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
obj-$(CONFIG_SPECTRA_CAMERA) += cam_icp_hw_mgr.o
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
similarity index 71%
rename from drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
index 677c24e..fe719c7 100644
--- a/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
@@ -26,7 +26,6 @@
#include <linux/debugfs.h>
#include <media/cam_defs.h>
#include <media/cam_icp.h>
-#include <linux/debugfs.h>
#include "cam_sync_api.h"
#include "cam_packet_util.h"
@@ -89,10 +88,8 @@ static int cam_icp_mgr_process_cmd(void *priv, void *data)
task_data = (struct hfi_cmd_work_data *)data;
rc = hfi_write_cmd(task_data->data);
- if (rc < 0)
- pr_err("unable to write\n");
-
ICP_DBG("task type : %u, rc : %d\n", task_data->type, rc);
+
return rc;
}
@@ -100,12 +97,12 @@ static int cam_icp_mgr_process_msg_frame_process(uint32_t *msg_ptr)
{
int i;
uint32_t idx;
- uint32_t request_id;
+ uint64_t request_id;
struct cam_icp_hw_ctx_data *ctx_data = NULL;
struct hfi_msg_ipebps_async_ack *ioconfig_ack = NULL;
struct hfi_msg_frame_process_done *frame_done;
struct hfi_frame_process_info *hfi_frame_process;
- struct cam_hw_done_event_data buf_data;
+ struct cam_hw_done_event_data buf_data;
ioconfig_ack = (struct hfi_msg_ipebps_async_ack *)msg_ptr;
if (ioconfig_ack->err_type != HFI_ERR_SYS_NONE) {
@@ -119,12 +116,9 @@ static int cam_icp_mgr_process_msg_frame_process(uint32_t *msg_ptr)
pr_err("result : %u\n", frame_done->result);
return -EIO;
}
- ICP_DBG("result : %u\n", frame_done->result);
ctx_data = (struct cam_icp_hw_ctx_data *)ioconfig_ack->user_data1;
request_id = ioconfig_ack->user_data2;
- ICP_DBG("ctx : %pK, request_id :%d\n",
- (void *)ctx_data->context_priv, request_id);
hfi_frame_process = &ctx_data->hfi_frame_process;
for (i = 0; i < CAM_FRAME_CMD_MAX; i++)
@@ -132,22 +126,17 @@ static int cam_icp_mgr_process_msg_frame_process(uint32_t *msg_ptr)
break;
if (i >= CAM_FRAME_CMD_MAX) {
- pr_err("unable to find pkt in ctx data for req_id =%d\n",
+ pr_err("unable to find pkt in ctx data for req_id =%lld\n",
request_id);
return -EINVAL;
}
idx = i;
- /* send event to ctx this needs to be done in msg handler */
- buf_data.num_handles = hfi_frame_process->num_out_resources[idx];
- for (i = 0; i < buf_data.num_handles; i++)
- buf_data.resource_handle[i] =
- hfi_frame_process->out_resource[idx][i];
-
- ctx_data->ctxt_event_cb(ctx_data->context_priv, 0, &buf_data);
+ buf_data.request_id = hfi_frame_process->request_id[idx];
+ ctx_data->ctxt_event_cb(ctx_data->context_priv, false, &buf_data);
/* now release memory for hfi frame process command */
- ICP_DBG("matching request id: %d\n",
+ ICP_DBG("matching request id: %lld\n",
hfi_frame_process->request_id[idx]);
mutex_lock(&ctx_data->hfi_frame_process.lock);
hfi_frame_process->request_id[idx] = 0;
@@ -176,6 +165,11 @@ static int cam_icp_mgr_process_msg_config_io(uint32_t *msg_ptr)
}
ctx_data =
(struct cam_icp_hw_ctx_data *)ioconfig_ack->user_data1;
+ if (!ctx_data) {
+ pr_err("wrong ctx data from IPE response\n");
+ return -EINVAL;
+ }
+
mutex_lock(&ctx_data->ctx_mutex);
ctx_data->scratch_mem_size = ipe_config_ack->scratch_mem_size;
mutex_unlock(&ctx_data->ctx_mutex);
@@ -191,6 +185,10 @@ static int cam_icp_mgr_process_msg_config_io(uint32_t *msg_ptr)
}
ctx_data =
(struct cam_icp_hw_ctx_data *)ioconfig_ack->user_data1;
+ if (!ctx_data) {
+ pr_err("wrong ctx data from BPS response\n");
+ return -EINVAL;
+ }
}
complete(&ctx_data->wait_complete);
@@ -253,29 +251,25 @@ static int cam_icp_mgr_process_indirect_ack_msg(uint32_t *msg_ptr)
{
int rc;
- switch (msg_ptr[ICP_PACKET_IPCODE]) {
+ switch (msg_ptr[ICP_PACKET_OPCODE]) {
case HFI_IPEBPS_CMD_OPCODE_IPE_CONFIG_IO:
case HFI_IPEBPS_CMD_OPCODE_BPS_CONFIG_IO:
ICP_DBG("received HFI_IPEBPS_CMD_OPCODE_IPE/BPS_CONFIG_IO:\n");
rc = cam_icp_mgr_process_msg_config_io(msg_ptr);
- if (rc < 0) {
- pr_err("error in process_msg_config_io\n");
+ if (rc)
return rc;
- }
break;
case HFI_IPEBPS_CMD_OPCODE_IPE_FRAME_PROCESS:
case HFI_IPEBPS_CMD_OPCODE_BPS_FRAME_PROCESS:
ICP_DBG("received OPCODE_IPE/BPS_FRAME_PROCESS:\n");
rc = cam_icp_mgr_process_msg_frame_process(msg_ptr);
- if (rc < 0) {
- pr_err("error in msg_frame_process\n");
+ if (rc)
return rc;
- }
break;
default:
pr_err("Invalid opcode : %u\n",
- msg_ptr[ICP_PACKET_IPCODE]);
+ msg_ptr[ICP_PACKET_OPCODE]);
break;
}
@@ -286,23 +280,27 @@ static int cam_icp_mgr_process_direct_ack_msg(uint32_t *msg_ptr)
{
struct cam_icp_hw_ctx_data *ctx_data = NULL;
struct hfi_msg_ipebps_async_ack *ioconfig_ack = NULL;
+ int rc = 0;
- if (msg_ptr[ICP_PACKET_IPCODE] ==
- HFI_IPEBPS_CMD_OPCODE_IPE_DESTROY ||
- msg_ptr[ICP_PACKET_IPCODE] ==
- HFI_IPEBPS_CMD_OPCODE_BPS_DESTROY) {
- ICP_DBG("received HFI_IPEBPS_CMD_OPCODE_IPE/BPS_DESTROY:\n");
+ switch (msg_ptr[ICP_PACKET_OPCODE]) {
+ case HFI_IPEBPS_CMD_OPCODE_IPE_DESTROY:
+ case HFI_IPEBPS_CMD_OPCODE_BPS_DESTROY:
+ case HFI_IPEBPS_CMD_OPCODE_IPE_ABORT:
+ case HFI_IPEBPS_CMD_OPCODE_BPS_ABORT:
+ ICP_DBG("received IPE/BPS_DESTROY/ABORT:\n");
ioconfig_ack = (struct hfi_msg_ipebps_async_ack *)msg_ptr;
ctx_data =
(struct cam_icp_hw_ctx_data *)ioconfig_ack->user_data1;
complete(&ctx_data->wait_complete);
-
- } else {
- pr_err("Invalid opcode : %u\n", msg_ptr[ICP_PACKET_IPCODE]);
- return -EINVAL;
+ break;
+ default:
+ pr_err("Invalid opcode : %u\n",
+ msg_ptr[ICP_PACKET_OPCODE]);
+ rc = -EINVAL;
+ break;
}
- return 0;
+ return rc;
}
static int32_t cam_icp_mgr_process_msg(void *priv, void *data)
@@ -341,27 +339,19 @@ static int32_t cam_icp_mgr_process_msg(void *priv, void *data)
case HFI_MSG_SYS_PING_ACK:
ICP_DBG("received HFI_MSG_SYS_PING_ACK\n");
rc = cam_icp_mgr_process_msg_ping_ack(msg_ptr);
- if (rc)
- pr_err("fail process PING_ACK\n");
break;
case HFI_MSG_IPEBPS_CREATE_HANDLE_ACK:
ICP_DBG("received HFI_MSG_IPEBPS_CREATE_HANDLE_ACK\n");
rc = cam_icp_mgr_process_msg_create_handle(msg_ptr);
- if (rc)
- pr_err("fail process CREATE_HANDLE_ACK\n");
break;
case HFI_MSG_IPEBPS_ASYNC_COMMAND_INDIRECT_ACK:
rc = cam_icp_mgr_process_indirect_ack_msg(msg_ptr);
- if (rc)
- pr_err("fail process INDIRECT_ACK\n");
break;
case HFI_MSG_IPEBPS_ASYNC_COMMAND_DIRECT_ACK:
rc = cam_icp_mgr_process_direct_ack_msg(msg_ptr);
- if (rc)
- pr_err("fail process DIRECT_ACK\n");
break;
case HFI_MSG_EVENT_NOTIFY:
@@ -416,27 +406,44 @@ static void cam_icp_free_hfi_mem(void)
cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.sec_heap);
}
-static int cam_icp_allocate_hfi_mem(void)
+static int cam_icp_alloc_shared_mem(struct cam_mem_mgr_memory_desc *qtbl)
{
int rc;
struct cam_mem_mgr_request_desc alloc;
struct cam_mem_mgr_memory_desc out;
- dma_addr_t iova;
+
+ memset(&alloc, 0, sizeof(alloc));
+ memset(&out, 0, sizeof(out));
+ alloc.size = SZ_1M;
+ alloc.align = 0;
+ alloc.region = CAM_MEM_MGR_REGION_SHARED;
+ alloc.smmu_hdl = icp_hw_mgr.iommu_hdl;
+ rc = cam_mem_mgr_request_mem(&alloc, &out);
+ if (rc)
+ return rc;
+
+ *qtbl = out;
+ ICP_DBG("kva = %llX\n", out.kva);
+ ICP_DBG("qtbl IOVA = %X\n", out.iova);
+ ICP_DBG("SMMU HDL = %X\n", out.smmu_hdl);
+ ICP_DBG("MEM HDL = %X\n", out.mem_handle);
+ ICP_DBG("length = %lld\n", out.len);
+ ICP_DBG("region = %d\n", out.region);
+
+ return rc;
+}
+
+static int cam_icp_allocate_fw_mem(void)
+{
+ int rc;
uint64_t kvaddr;
size_t len;
-
- rc = cam_smmu_get_region_info(icp_hw_mgr.iommu_hdl,
- CAM_MEM_MGR_REGION_SHARED,
- &icp_hw_mgr.hfi_mem.shmem);
- if (rc)
- return -ENOMEM;
+ dma_addr_t iova;
rc = cam_smmu_alloc_firmware(icp_hw_mgr.iommu_hdl,
&iova, &kvaddr, &len);
- if (rc < 0) {
- pr_err("Unable to allocate FW memory\n");
+ if (rc)
return -ENOMEM;
- }
icp_hw_mgr.hfi_mem.fw_buf.len = len;
icp_hw_mgr.hfi_mem.fw_buf.kva = kvaddr;
@@ -447,112 +454,58 @@ static int cam_icp_allocate_hfi_mem(void)
ICP_DBG("IOVA = %llX\n", iova);
ICP_DBG("length = %zu\n", len);
- memset(&alloc, 0, sizeof(alloc));
- memset(&out, 0, sizeof(out));
- alloc.size = SZ_1M;
- alloc.align = 0;
- alloc.region = CAM_MEM_MGR_REGION_SHARED;
- alloc.smmu_hdl = icp_hw_mgr.iommu_hdl;
- rc = cam_mem_mgr_request_mem(&alloc, &out);
- if (rc < 0) {
+ return rc;
+}
+
+static int cam_icp_allocate_hfi_mem(void)
+{
+ int rc;
+
+ rc = cam_smmu_get_region_info(icp_hw_mgr.iommu_hdl,
+ CAM_MEM_MGR_REGION_SHARED,
+ &icp_hw_mgr.hfi_mem.shmem);
+ if (rc) {
+ pr_err("Unable to get shared memory info\n");
+ return rc;
+ }
+
+ rc = cam_icp_allocate_fw_mem();
+ if (rc) {
+ pr_err("Unable to allocate FW memory\n");
+ return rc;
+ }
+
+ rc = cam_icp_alloc_shared_mem(&icp_hw_mgr.hfi_mem.qtbl);
+ if (rc) {
pr_err("Unable to allocate qtbl memory\n");
goto qtbl_alloc_failed;
}
- icp_hw_mgr.hfi_mem.qtbl = out;
- ICP_DBG("kva = %llX\n", out.kva);
- ICP_DBG("qtbl IOVA = %X\n", out.iova);
- ICP_DBG("SMMU HDL = %X\n", out.smmu_hdl);
- ICP_DBG("MEM HDL = %X\n", out.mem_handle);
- ICP_DBG("length = %lld\n", out.len);
- ICP_DBG("region = %d\n", out.region);
-
- /* Allocate memory for cmd queue */
- memset(&alloc, 0, sizeof(alloc));
- memset(&out, 0, sizeof(out));
- alloc.size = SZ_1M;
- alloc.align = 0;
- alloc.region = CAM_MEM_MGR_REGION_SHARED;
- alloc.smmu_hdl = icp_hw_mgr.iommu_hdl;
- rc = cam_mem_mgr_request_mem(&alloc, &out);
- if (rc < 0) {
+ rc = cam_icp_alloc_shared_mem(&icp_hw_mgr.hfi_mem.cmd_q);
+ if (rc) {
pr_err("Unable to allocate cmd q memory\n");
goto cmd_q_alloc_failed;
}
- icp_hw_mgr.hfi_mem.cmd_q = out;
- ICP_DBG("kva = %llX\n", out.kva);
- ICP_DBG("cmd_q IOVA = %X\n", out.iova);
- ICP_DBG("SMMU HDL = %X\n", out.smmu_hdl);
- ICP_DBG("MEM HDL = %X\n", out.mem_handle);
- ICP_DBG("length = %lld\n", out.len);
- ICP_DBG("region = %d\n", out.region);
-
- /* Allocate memory for msg queue */
- memset(&alloc, 0, sizeof(alloc));
- memset(&out, 0, sizeof(out));
- alloc.size = SZ_1M;
- alloc.align = 0;
- alloc.region = CAM_MEM_MGR_REGION_SHARED;
- alloc.smmu_hdl = icp_hw_mgr.iommu_hdl;
- rc = cam_mem_mgr_request_mem(&alloc, &out);
- if (rc < 0) {
+ rc = cam_icp_alloc_shared_mem(&icp_hw_mgr.hfi_mem.msg_q);
+ if (rc) {
pr_err("Unable to allocate msg q memory\n");
goto msg_q_alloc_failed;
}
- icp_hw_mgr.hfi_mem.msg_q = out;
- ICP_DBG("kva = %llX\n", out.kva);
- ICP_DBG("msg_q IOVA = %X\n", out.iova);
- ICP_DBG("SMMU HDL = %X\n", out.smmu_hdl);
- ICP_DBG("MEM HDL = %X\n", out.mem_handle);
- ICP_DBG("length = %lld\n", out.len);
- ICP_DBG("region = %d\n", out.region);
-
- /* Allocate memory for dbg queue */
- memset(&alloc, 0, sizeof(alloc));
- memset(&out, 0, sizeof(out));
- alloc.size = SZ_1M;
- alloc.align = 0;
- alloc.region = CAM_MEM_MGR_REGION_SHARED;
- alloc.smmu_hdl = icp_hw_mgr.iommu_hdl;
- rc = cam_mem_mgr_request_mem(&alloc, &out);
- if (rc < 0) {
+ rc = cam_icp_alloc_shared_mem(&icp_hw_mgr.hfi_mem.dbg_q);
+ if (rc) {
pr_err("Unable to allocate dbg q memory\n");
goto dbg_q_alloc_failed;
}
- icp_hw_mgr.hfi_mem.dbg_q = out;
- ICP_DBG("kva = %llX\n", out.kva);
- ICP_DBG("dbg_q IOVA = %X\n", out.iova);
- ICP_DBG("SMMU HDL = %X\n", out.smmu_hdl);
- ICP_DBG("MEM HDL = %X\n", out.mem_handle);
- ICP_DBG("length = %lld\n", out.len);
- ICP_DBG("region = %d\n", out.region);
-
- /* Allocate memory for sec heap queue */
- memset(&alloc, 0, sizeof(alloc));
- memset(&out, 0, sizeof(out));
- alloc.size = SZ_1M;
- alloc.align = 0;
- alloc.region = CAM_MEM_MGR_REGION_SHARED;
- alloc.smmu_hdl = icp_hw_mgr.iommu_hdl;
- rc = cam_mem_mgr_request_mem(&alloc, &out);
- if (rc < 0) {
+ rc = cam_icp_alloc_shared_mem(&icp_hw_mgr.hfi_mem.sec_heap);
+ if (rc) {
pr_err("Unable to allocate sec heap q memory\n");
goto sec_heap_alloc_failed;
}
- icp_hw_mgr.hfi_mem.sec_heap = out;
-
- ICP_DBG("kva = %llX\n", out.kva);
- ICP_DBG("sec_heap IOVA = %X\n", out.iova);
- ICP_DBG("SMMU HDL = %X\n", out.smmu_hdl);
- ICP_DBG("MEM HDL = %X\n", out.mem_handle);
- ICP_DBG("length = %lld\n", out.len);
- ICP_DBG("region = %d\n", out.region);
return rc;
-
sec_heap_alloc_failed:
cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.dbg_q);
dbg_q_alloc_failed:
@@ -563,20 +516,17 @@ static int cam_icp_allocate_hfi_mem(void)
cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.qtbl);
qtbl_alloc_failed:
cam_smmu_dealloc_firmware(icp_hw_mgr.iommu_hdl);
- pr_err("returned with error : %d\n", rc);
-
return rc;
}
static int cam_icp_mgr_get_free_ctx(struct cam_icp_hw_mgr *hw_mgr)
{
int i = 0;
- int num_ctx = CAM_ICP_CTX_MAX;
- for (i = 0; i < num_ctx; i++) {
+ for (i = 0; i < CAM_ICP_CTX_MAX; i++) {
mutex_lock(&hw_mgr->ctx_data[i].ctx_mutex);
- if (hw_mgr->ctx_data[i].in_use == 0) {
- hw_mgr->ctx_data[i].in_use = 1;
+ if (hw_mgr->ctx_data[i].in_use == false) {
+ hw_mgr->ctx_data[i].in_use = true;
mutex_unlock(&hw_mgr->ctx_data[i].ctx_mutex);
break;
}
@@ -586,22 +536,87 @@ static int cam_icp_mgr_get_free_ctx(struct cam_icp_hw_mgr *hw_mgr)
return i;
}
+static void cam_icp_mgr_put_ctx(struct cam_icp_hw_ctx_data *ctx_data)
+{
+ mutex_lock(&ctx_data->ctx_mutex);
+ ctx_data->in_use = false;
+ mutex_unlock(&ctx_data->ctx_mutex);
+}
+
+static int cam_icp_mgr_abort_handle(
+ struct cam_icp_hw_ctx_data *ctx_data)
+{
+ int rc = 0;
+ int timeout = 5000;
+ struct hfi_cmd_work_data *task_data;
+ struct hfi_cmd_ipebps_async abort_cmd;
+ struct crm_workq_task *task;
+ unsigned long rem_jiffies;
+
+ task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
+ if (!task)
+ return -ENOMEM;
+
+ abort_cmd.size =
+ sizeof(struct hfi_cmd_ipebps_async) +
+ sizeof(struct hfi_cmd_abort_destroy) -
+ sizeof(abort_cmd.payload.direct);
+ abort_cmd.pkt_type = HFI_CMD_IPEBPS_ASYNC_COMMAND_DIRECT;
+ if (ctx_data->icp_dev_acquire_info->dev_type == CAM_ICP_RES_TYPE_BPS)
+ abort_cmd.opcode = HFI_IPEBPS_CMD_OPCODE_BPS_ABORT;
+ else
+ abort_cmd.opcode = HFI_IPEBPS_CMD_OPCODE_IPE_ABORT;
+
+ reinit_completion(&ctx_data->wait_complete);
+ abort_cmd.num_fw_handles = 1;
+ abort_cmd.fw_handles[0] = ctx_data->fw_handle;
+ abort_cmd.user_data1 = (uint64_t)ctx_data;
+ abort_cmd.user_data2 = (uint64_t)0x0;
+ memcpy(abort_cmd.payload.direct, &ctx_data->temp_payload,
+ sizeof(uint32_t));
+
+ task_data = (struct hfi_cmd_work_data *)task->payload;
+ task_data->data = (void *)&abort_cmd;
+ task_data->request_id = 0;
+ task_data->type = ICP_WORKQ_TASK_CMD_TYPE;
+ task->process_cb = cam_icp_mgr_process_cmd;
+ rc = cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr,
+ CRM_TASK_PRIORITY_0);
+ if (rc)
+ return rc;
+
+ ICP_DBG("fw_handle = %x ctx_data = %pK\n",
+ ctx_data->fw_handle, ctx_data);
+ rem_jiffies = wait_for_completion_timeout(&ctx_data->wait_complete,
+ msecs_to_jiffies((timeout)));
+ if (!rem_jiffies) {
+ rc = -ETIMEDOUT;
+ ICP_DBG("FW timeout/err in abort handle command\n");
+ }
+
+ return rc;
+}
+
static int cam_icp_mgr_destroy_handle(
- struct cam_icp_hw_ctx_data *ctx_data,
- struct crm_workq_task *task)
+ struct cam_icp_hw_ctx_data *ctx_data)
{
int rc = 0;
int timeout = 5000;
struct hfi_cmd_work_data *task_data;
struct hfi_cmd_ipebps_async destroy_cmd;
+ struct crm_workq_task *task;
unsigned long rem_jiffies;
+ task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
+ if (!task)
+ return -ENOMEM;
+
destroy_cmd.size =
sizeof(struct hfi_cmd_ipebps_async) +
sizeof(struct ipe_bps_destroy) -
sizeof(destroy_cmd.payload.direct);
destroy_cmd.pkt_type = HFI_CMD_IPEBPS_ASYNC_COMMAND_DIRECT;
- if (ctx_data->icp_dev_acquire_info.dev_type == CAM_ICP_RES_TYPE_BPS)
+ if (ctx_data->icp_dev_acquire_info->dev_type == CAM_ICP_RES_TYPE_BPS)
destroy_cmd.opcode = HFI_IPEBPS_CMD_OPCODE_BPS_DESTROY;
else
destroy_cmd.opcode = HFI_IPEBPS_CMD_OPCODE_IPE_DESTROY;
@@ -619,14 +634,18 @@ static int cam_icp_mgr_destroy_handle(
task_data->request_id = 0;
task_data->type = ICP_WORKQ_TASK_CMD_TYPE;
task->process_cb = cam_icp_mgr_process_cmd;
- cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr, CRM_TASK_PRIORITY_0);
+ rc = cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr,
+ CRM_TASK_PRIORITY_0);
+ if (rc)
+ return rc;
+
ICP_DBG("fw_handle = %x ctx_data = %pK\n",
ctx_data->fw_handle, ctx_data);
rem_jiffies = wait_for_completion_timeout(&ctx_data->wait_complete,
msecs_to_jiffies((timeout)));
if (!rem_jiffies) {
rc = -ETIMEDOUT;
- pr_err("FW response timeout: %d\n", rc);
+ ICP_DBG("FW response timeout: %d\n", rc);
}
return rc;
@@ -634,7 +653,6 @@ static int cam_icp_mgr_destroy_handle(
static int cam_icp_mgr_release_ctx(struct cam_icp_hw_mgr *hw_mgr, int ctx_id)
{
- struct crm_workq_task *task;
int i = 0;
if (ctx_id >= CAM_ICP_CTX_MAX) {
@@ -650,13 +668,11 @@ static int cam_icp_mgr_release_ctx(struct cam_icp_hw_mgr *hw_mgr, int ctx_id)
}
mutex_unlock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
- task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
- if (task)
- cam_icp_mgr_destroy_handle(&hw_mgr->ctx_data[ctx_id], task);
+ cam_icp_mgr_destroy_handle(&hw_mgr->ctx_data[ctx_id]);
mutex_lock(&hw_mgr->hw_mgr_mutex);
mutex_lock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
- hw_mgr->ctx_data[ctx_id].in_use = 0;
+ hw_mgr->ctx_data[ctx_id].in_use = false;
hw_mgr->ctx_data[ctx_id].fw_handle = 0;
hw_mgr->ctx_data[ctx_id].scratch_mem_size = 0;
mutex_lock(&hw_mgr->ctx_data[ctx_id].hfi_frame_process.lock);
@@ -664,52 +680,22 @@ static int cam_icp_mgr_release_ctx(struct cam_icp_hw_mgr *hw_mgr, int ctx_id)
clear_bit(i, hw_mgr->ctx_data[ctx_id].hfi_frame_process.bitmap);
mutex_unlock(&hw_mgr->ctx_data[ctx_id].hfi_frame_process.lock);
mutex_destroy(&hw_mgr->ctx_data[ctx_id].hfi_frame_process.lock);
- mutex_unlock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
kfree(hw_mgr->ctx_data[ctx_id].hfi_frame_process.bitmap);
+ hw_mgr->ctxt_cnt--;
+ kfree(hw_mgr->ctx_data[ctx_id].icp_dev_acquire_info);
+ hw_mgr->ctx_data[ctx_id].icp_dev_acquire_info = NULL;
+ mutex_unlock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
mutex_unlock(&hw_mgr->hw_mgr_mutex);
return 0;
}
-static int cam_icp_mgr_get_ctx_from_fw_handle(struct cam_icp_hw_mgr *hw_mgr,
- uint32_t fw_handle)
+static void cam_icp_mgr_device_deinit(struct cam_icp_hw_mgr *hw_mgr)
{
- int ctx_id;
-
- for (ctx_id = 0; ctx_id < CAM_ICP_CTX_MAX; ctx_id++) {
- mutex_lock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
- if (hw_mgr->ctx_data[ctx_id].in_use) {
- if (hw_mgr->ctx_data[ctx_id].fw_handle == fw_handle) {
- mutex_unlock(
- &hw_mgr->ctx_data[ctx_id].ctx_mutex);
- return ctx_id;
- }
- }
- mutex_unlock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
- }
- ICP_DBG("Invalid fw handle to get ctx\n");
-
- return -EINVAL;
-}
-
-static int cam_icp_mgr_hw_close(void *hw_priv, void *hw_close_args)
-{
- struct cam_icp_hw_mgr *hw_mgr = hw_priv;
struct cam_hw_intf *a5_dev_intf = NULL;
struct cam_hw_intf *ipe0_dev_intf = NULL;
struct cam_hw_intf *ipe1_dev_intf = NULL;
struct cam_hw_intf *bps_dev_intf = NULL;
- struct cam_icp_a5_set_irq_cb irq_cb;
- struct cam_icp_a5_set_fw_buf_info fw_buf_info;
- struct cam_icp_hw_ctx_data *ctx_data = NULL;
- int i;
-
- mutex_lock(&hw_mgr->hw_mgr_mutex);
- if (hw_mgr->fw_download == false) {
- ICP_DBG("hw mgr is already closed\n");
- mutex_unlock(&hw_mgr->hw_mgr_mutex);
- return 0;
- }
a5_dev_intf = hw_mgr->devices[CAM_ICP_DEV_A5][0];
ipe0_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][0];
@@ -718,84 +704,78 @@ static int cam_icp_mgr_hw_close(void *hw_priv, void *hw_close_args)
if ((!a5_dev_intf) || (!ipe0_dev_intf) || (!bps_dev_intf)) {
pr_err("dev intfs are wrong, failed to close\n");
+ return;
+ }
+
+ if (ipe1_dev_intf)
+ ipe1_dev_intf->hw_ops.deinit(ipe1_dev_intf->hw_priv, NULL, 0);
+ ipe0_dev_intf->hw_ops.deinit(ipe0_dev_intf->hw_priv, NULL, 0);
+ bps_dev_intf->hw_ops.deinit(bps_dev_intf->hw_priv, NULL, 0);
+ a5_dev_intf->hw_ops.deinit(a5_dev_intf->hw_priv, NULL, 0);
+}
+
+static int cam_icp_mgr_hw_close(void *hw_priv, void *hw_close_args)
+{
+ struct cam_icp_hw_mgr *hw_mgr = hw_priv;
+ struct cam_hw_intf *a5_dev_intf = NULL;
+ struct cam_icp_a5_set_irq_cb irq_cb;
+ struct cam_icp_a5_set_fw_buf_info fw_buf_info;
+ int i, rc = 0;
+
+ mutex_lock(&hw_mgr->hw_mgr_mutex);
+ if ((hw_mgr->fw_download == false) && (!hw_mgr->ctxt_cnt)) {
+ ICP_DBG("hw mgr is already closed\n");
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ return 0;
+ }
+
+ a5_dev_intf = hw_mgr->devices[CAM_ICP_DEV_A5][0];
+ if (!a5_dev_intf) {
+ pr_err("a5_dev_intf is NULL\n");
mutex_unlock(&hw_mgr->hw_mgr_mutex);
return -EINVAL;
}
irq_cb.icp_hw_mgr_cb = NULL;
irq_cb.data = NULL;
- a5_dev_intf->hw_ops.process_cmd(
+ rc = a5_dev_intf->hw_ops.process_cmd(
a5_dev_intf->hw_priv,
CAM_ICP_A5_SET_IRQ_CB,
&irq_cb, sizeof(irq_cb));
+ if (rc)
+ pr_err("deregister irq call back failed\n");
fw_buf_info.kva = 0;
fw_buf_info.iova = 0;
fw_buf_info.len = 0;
- a5_dev_intf->hw_ops.process_cmd(
+ rc = a5_dev_intf->hw_ops.process_cmd(
a5_dev_intf->hw_priv,
CAM_ICP_A5_CMD_SET_FW_BUF,
&fw_buf_info,
sizeof(fw_buf_info));
+ if (rc)
+ pr_err("nullify the fw buf failed\n");
mutex_unlock(&hw_mgr->hw_mgr_mutex);
-
- for (i = 0; i < CAM_ICP_CTX_MAX; i++) {
- ctx_data = &hw_mgr->ctx_data[i];
+ for (i = 0; i < CAM_ICP_CTX_MAX; i++)
cam_icp_mgr_release_ctx(hw_mgr, i);
- }
mutex_lock(&hw_mgr->hw_mgr_mutex);
- ipe1_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][1];
- if (ipe1_dev_intf)
- ipe1_dev_intf->hw_ops.deinit(ipe1_dev_intf->hw_priv,
- NULL, 0);
-
- ipe0_dev_intf->hw_ops.deinit(ipe0_dev_intf->hw_priv, NULL, 0);
- bps_dev_intf->hw_ops.deinit(bps_dev_intf->hw_priv, NULL, 0);
- a5_dev_intf->hw_ops.deinit(a5_dev_intf->hw_priv, NULL, 0);
cam_hfi_deinit();
+ cam_icp_mgr_device_deinit(hw_mgr);
cam_icp_free_hfi_mem();
hw_mgr->fw_download = false;
mutex_unlock(&hw_mgr->hw_mgr_mutex);
-
return 0;
}
-static int cam_icp_mgr_download_fw(void *hw_mgr_priv, void *download_fw_args)
+static int cam_icp_mgr_device_init(struct cam_icp_hw_mgr *hw_mgr)
{
+ int rc = 0;
struct cam_hw_intf *a5_dev_intf = NULL;
struct cam_hw_intf *ipe0_dev_intf = NULL;
struct cam_hw_intf *ipe1_dev_intf = NULL;
struct cam_hw_intf *bps_dev_intf = NULL;
- struct cam_hw_info *a5_dev = NULL;
- struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv;
- struct cam_icp_a5_set_irq_cb irq_cb;
- struct cam_icp_a5_set_fw_buf_info fw_buf_info;
- struct hfi_mem_info hfi_mem;
- unsigned long rem_jiffies;
- int timeout = 5000;
- int rc = 0;
-
- if (!hw_mgr) {
- pr_err("hw_mgr is NULL\n");
- return -EINVAL;
- }
-
- mutex_lock(&hw_mgr->hw_mgr_mutex);
- if (hw_mgr->fw_download) {
- ICP_DBG("FW already downloaded\n");
- mutex_unlock(&hw_mgr->hw_mgr_mutex);
- return rc;
- }
-
- /* Allocate memory for FW and shared memory */
- rc = cam_icp_allocate_hfi_mem();
- if (rc < 0) {
- pr_err("hfi mem alloc failed\n");
- mutex_unlock(&hw_mgr->hw_mgr_mutex);
- return rc;
- }
a5_dev_intf = hw_mgr->devices[CAM_ICP_DEV_A5][0];
ipe0_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][0];
@@ -804,47 +784,58 @@ static int cam_icp_mgr_download_fw(void *hw_mgr_priv, void *download_fw_args)
if ((!a5_dev_intf) || (!ipe0_dev_intf) || (!bps_dev_intf)) {
pr_err("dev intfs are wrong\n");
- goto dev_intf_fail;
+ return -EINVAL;
}
- a5_dev = (struct cam_hw_info *)a5_dev_intf->hw_priv;
-
rc = a5_dev_intf->hw_ops.init(a5_dev_intf->hw_priv, NULL, 0);
- if (rc < 0) {
- pr_err("a5 dev init failed\n");
+ if (rc)
goto a5_dev_init_failed;
- }
+
rc = bps_dev_intf->hw_ops.init(bps_dev_intf->hw_priv, NULL, 0);
- if (rc < 0) {
- pr_err("bps dev init failed\n");
+ if (rc)
goto bps_dev_init_failed;
- }
+
rc = ipe0_dev_intf->hw_ops.init(ipe0_dev_intf->hw_priv, NULL, 0);
- if (rc < 0) {
- pr_err("ipe0 dev init failed\n");
+ if (rc)
goto ipe0_dev_init_failed;
- }
if (ipe1_dev_intf) {
rc = ipe1_dev_intf->hw_ops.init(ipe1_dev_intf->hw_priv,
NULL, 0);
- if (rc < 0) {
- pr_err("ipe1 dev init failed\n");
+ if (rc)
goto ipe1_dev_init_failed;
- }
}
- /* Set IRQ callback */
+
+ return rc;
+ipe1_dev_init_failed:
+ ipe0_dev_intf->hw_ops.deinit(ipe0_dev_intf->hw_priv, NULL, 0);
+ipe0_dev_init_failed:
+ bps_dev_intf->hw_ops.deinit(bps_dev_intf->hw_priv, NULL, 0);
+bps_dev_init_failed:
+ a5_dev_intf->hw_ops.deinit(a5_dev_intf->hw_priv, NULL, 0);
+a5_dev_init_failed:
+ return rc;
+}
+
+static int cam_icp_mgr_fw_download(struct cam_icp_hw_mgr *hw_mgr)
+{
+ int rc;
+ struct cam_hw_intf *a5_dev_intf = NULL;
+ struct cam_hw_info *a5_dev = NULL;
+ struct cam_icp_a5_set_irq_cb irq_cb;
+ struct cam_icp_a5_set_fw_buf_info fw_buf_info;
+
+ a5_dev_intf = hw_mgr->devices[CAM_ICP_DEV_A5][0];
+ a5_dev = (struct cam_hw_info *)a5_dev_intf->hw_priv;
+
irq_cb.icp_hw_mgr_cb = cam_icp_hw_mgr_cb;
- irq_cb.data = hw_mgr_priv;
+ irq_cb.data = hw_mgr;
rc = a5_dev_intf->hw_ops.process_cmd(
a5_dev_intf->hw_priv,
CAM_ICP_A5_SET_IRQ_CB,
&irq_cb, sizeof(irq_cb));
- if (rc < 0) {
- pr_err("CAM_ICP_A5_SET_IRQ_CB failed\n");
- rc = -EINVAL;
+ if (rc)
goto set_irq_failed;
- }
fw_buf_info.kva = icp_hw_mgr.hfi_mem.fw_buf.kva;
fw_buf_info.iova = icp_hw_mgr.hfi_mem.fw_buf.iova;
@@ -853,12 +844,9 @@ static int cam_icp_mgr_download_fw(void *hw_mgr_priv, void *download_fw_args)
rc = a5_dev_intf->hw_ops.process_cmd(
a5_dev_intf->hw_priv,
CAM_ICP_A5_CMD_SET_FW_BUF,
- &fw_buf_info,
- sizeof(fw_buf_info));
- if (rc < 0) {
- pr_err("CAM_ICP_A5_CMD_SET_FW_BUF failed\n");
+ &fw_buf_info, sizeof(fw_buf_info));
+ if (rc)
goto set_irq_failed;
- }
cam_hfi_enable_cpu(a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base);
@@ -866,10 +854,24 @@ static int cam_icp_mgr_download_fw(void *hw_mgr_priv, void *download_fw_args)
a5_dev_intf->hw_priv,
CAM_ICP_A5_CMD_FW_DOWNLOAD,
NULL, 0);
- if (rc < 0) {
- pr_err("FW download is failed\n");
- goto set_irq_failed;
- }
+ if (rc)
+ goto fw_download_failed;
+
+ return rc;
+fw_download_failed:
+ cam_hfi_disable_cpu(a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base);
+set_irq_failed:
+ return rc;
+}
+
+static int cam_icp_mgr_hfi_init(struct cam_icp_hw_mgr *hw_mgr)
+{
+ struct cam_hw_intf *a5_dev_intf = NULL;
+ struct cam_hw_info *a5_dev = NULL;
+ struct hfi_mem_info hfi_mem;
+
+ a5_dev_intf = hw_mgr->devices[CAM_ICP_DEV_A5][0];
+ a5_dev = (struct cam_hw_info *)a5_dev_intf->hw_priv;
hfi_mem.qtbl.kva = icp_hw_mgr.hfi_mem.qtbl.kva;
hfi_mem.qtbl.iova = icp_hw_mgr.hfi_mem.qtbl.iova;
@@ -905,65 +907,157 @@ static int cam_icp_mgr_download_fw(void *hw_mgr_priv, void *download_fw_args)
hfi_mem.shmem.iova = icp_hw_mgr.hfi_mem.shmem.iova_start;
hfi_mem.shmem.len = icp_hw_mgr.hfi_mem.shmem.iova_len;
-
- rc = cam_hfi_init(0, &hfi_mem,
+ return cam_hfi_init(0, &hfi_mem,
a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base,
hw_mgr->a5_debug);
- if (rc < 0) {
- pr_err("hfi_init is failed\n");
- goto set_irq_failed;
- }
+}
- mutex_unlock(&hw_mgr->hw_mgr_mutex);
+static int cam_icp_mgr_send_fw_init(struct cam_icp_hw_mgr *hw_mgr)
+{
+ int rc;
+ struct cam_hw_intf *a5_dev_intf = NULL;
+ unsigned long rem_jiffies;
+ int timeout = 5000;
- ICP_DBG("Sending HFI init command\n");
+ a5_dev_intf = hw_mgr->devices[CAM_ICP_DEV_A5][0];
reinit_completion(&hw_mgr->a5_complete);
-
+ ICP_DBG("Sending HFI init command\n");
rc = a5_dev_intf->hw_ops.process_cmd(
a5_dev_intf->hw_priv,
CAM_ICP_A5_SEND_INIT,
NULL, 0);
+ if (rc)
+ return rc;
ICP_DBG("Wait for INIT DONE Message\n");
rem_jiffies = wait_for_completion_timeout(&icp_hw_mgr.a5_complete,
- msecs_to_jiffies((timeout)));
+ msecs_to_jiffies((timeout)));
if (!rem_jiffies) {
rc = -ETIMEDOUT;
- pr_err("FW response timed out %d\n", rc);
- goto set_irq_failed;
+ ICP_DBG("FW response timed out %d\n", rc);
+ }
+ ICP_DBG("Done Waiting for INIT DONE Message\n");
+
+ return rc;
+}
+
+static int cam_icp_mgr_download_fw(void *hw_mgr_priv, void *download_fw_args)
+{
+ struct cam_hw_intf *a5_dev_intf = NULL;
+ struct cam_hw_info *a5_dev = NULL;
+ struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv;
+ int rc = 0;
+
+ if (!hw_mgr) {
+ pr_err("hw_mgr is NULL\n");
+ return -EINVAL;
}
- ICP_DBG("Done Waiting for INIT DONE Message\n");
+ mutex_lock(&hw_mgr->hw_mgr_mutex);
+ if (hw_mgr->fw_download) {
+ ICP_DBG("FW already downloaded\n");
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ return rc;
+ }
+
+ a5_dev_intf = hw_mgr->devices[CAM_ICP_DEV_A5][0];
+ a5_dev = (struct cam_hw_info *)a5_dev_intf->hw_priv;
+ rc = cam_icp_allocate_hfi_mem();
+ if (rc) {
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ goto alloc_hfi_mem_failed;
+ }
+
+ rc = cam_icp_mgr_device_init(hw_mgr);
+ if (rc) {
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ goto dev_init_fail;
+ }
+
+ rc = cam_icp_mgr_fw_download(hw_mgr);
+ if (rc) {
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ goto fw_download_failed;
+ }
+
+ rc = cam_icp_mgr_hfi_init(hw_mgr);
+ if (rc) {
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ goto hfi_init_failed;
+ }
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+ rc = cam_icp_mgr_send_fw_init(hw_mgr);
+ if (rc)
+ goto fw_init_failed;
rc = a5_dev_intf->hw_ops.process_cmd(
a5_dev_intf->hw_priv,
CAM_ICP_A5_CMD_POWER_COLLAPSE,
NULL, 0);
- if (rc) {
- pr_err("icp power collapse failed\n");
- goto set_irq_failed;
- }
-
hw_mgr->fw_download = true;
hw_mgr->ctxt_cnt = 0;
ICP_DBG("FW download done successfully\n");
-
+ if (!download_fw_args)
+ cam_icp_mgr_hw_close(hw_mgr, NULL);
return rc;
-set_irq_failed:
- if (ipe1_dev_intf)
- rc = ipe1_dev_intf->hw_ops.deinit(ipe1_dev_intf->hw_priv,
- NULL, 0);
-ipe1_dev_init_failed:
- rc = ipe0_dev_intf->hw_ops.deinit(ipe0_dev_intf->hw_priv, NULL, 0);
-ipe0_dev_init_failed:
- rc = bps_dev_intf->hw_ops.deinit(bps_dev_intf->hw_priv, NULL, 0);
-bps_dev_init_failed:
- rc = a5_dev_intf->hw_ops.deinit(a5_dev_intf->hw_priv, NULL, 0);
-a5_dev_init_failed:
-dev_intf_fail:
+fw_init_failed:
+ cam_hfi_deinit();
+hfi_init_failed:
+ cam_hfi_disable_cpu(a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base);
+fw_download_failed:
+ cam_icp_mgr_device_deinit(hw_mgr);
+dev_init_fail:
cam_icp_free_hfi_mem();
- mutex_unlock(&hw_mgr->hw_mgr_mutex);
+alloc_hfi_mem_failed:
+ return rc;
+}
+
+static int cam_icp_mgr_handle_config_err(
+ struct cam_hw_config_args *config_args,
+ struct cam_icp_hw_ctx_data *ctx_data)
+{
+ struct cam_hw_done_event_data buf_data;
+
+ buf_data.num_handles = config_args->num_out_map_entries;
+ buf_data.request_id = *(uint64_t *)config_args->priv;
+ ctx_data->ctxt_event_cb(ctx_data->context_priv, true, &buf_data);
+
+ return 0;
+}
+
+static int cam_icp_mgr_enqueue_config(struct cam_icp_hw_mgr *hw_mgr,
+ struct cam_hw_config_args *config_args)
+{
+ int rc = 0;
+ uint64_t request_id = 0;
+ struct crm_workq_task *task;
+ struct hfi_cmd_work_data *task_data;
+ struct hfi_cmd_ipebps_async *hfi_cmd;
+ struct cam_hw_update_entry *hw_update_entries;
+
+ request_id = *(uint64_t *)config_args->priv;
+ hw_update_entries = config_args->hw_update_entries;
+ ICP_DBG("req_id = %lld %pK\n", request_id, config_args->priv);
+
+ task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
+ if (!task) {
+ pr_err("no empty task\n");
+ return -ENOMEM;
+ }
+
+ task_data = (struct hfi_cmd_work_data *)task->payload;
+ task_data->data = (void *)hw_update_entries->addr;
+ hfi_cmd = (struct hfi_cmd_ipebps_async *)hw_update_entries->addr;
+ ICP_DBG("request from hfi_cmd :%llu, hfi_cmd: %pK\n",
+ hfi_cmd->user_data2, hfi_cmd);
+ task_data->request_id = request_id;
+ task_data->type = ICP_WORKQ_TASK_CMD_TYPE;
+ task->process_cb = cam_icp_mgr_process_cmd;
+ rc = cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr,
+ CRM_TASK_PRIORITY_0);
+
return rc;
}
@@ -972,18 +1066,10 @@ static int cam_icp_mgr_config_hw(void *hw_mgr_priv, void *config_hw_args)
int rc = 0;
struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv;
struct cam_hw_config_args *config_args = config_hw_args;
- uint32_t fw_handle;
- int ctx_id = 0;
struct cam_icp_hw_ctx_data *ctx_data = NULL;
- int32_t request_id = 0;
- struct cam_hw_update_entry *hw_update_entries;
- struct crm_workq_task *task;
- struct hfi_cmd_work_data *task_data;
- struct hfi_cmd_ipebps_async *hfi_cmd;
if (!hw_mgr || !config_args) {
- pr_err("Invalid arguments %pK %pK\n",
- hw_mgr, config_args);
+ pr_err("Invalid arguments %pK %pK\n", hw_mgr, config_args);
return -EINVAL;
}
@@ -993,62 +1079,34 @@ static int cam_icp_mgr_config_hw(void *hw_mgr_priv, void *config_hw_args)
}
mutex_lock(&hw_mgr->hw_mgr_mutex);
- fw_handle = *(uint32_t *)config_args->ctxt_to_hw_map;
- ctx_id = cam_icp_mgr_get_ctx_from_fw_handle(hw_mgr, fw_handle);
- if (ctx_id < 0) {
- pr_err("Fw handle to ctx mapping is failed\n");
- mutex_unlock(&hw_mgr->hw_mgr_mutex);
- return -EINVAL;
- }
-
- ctx_data = &hw_mgr->ctx_data[ctx_id];
+ ctx_data = config_args->ctxt_to_hw_map;
if (!ctx_data->in_use) {
pr_err("ctx is not in use\n");
mutex_unlock(&hw_mgr->hw_mgr_mutex);
- return -EINVAL;
+ rc = -EINVAL;
+ goto config_err;
}
-
- request_id = *(uint32_t *)config_args->priv;
- hw_update_entries = config_args->hw_update_entries;
- ICP_DBG("req_id = %d\n", request_id);
- ICP_DBG("fw_handle = %x req_id = %d %pK\n",
- fw_handle, request_id, config_args->priv);
- task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
- if (!task) {
- pr_err("no empty task\n");
- mutex_unlock(&hw_mgr->hw_mgr_mutex);
- return -ENOMEM;
- }
-
mutex_unlock(&hw_mgr->hw_mgr_mutex);
- task_data = (struct hfi_cmd_work_data *)task->payload;
- if (!task_data) {
- pr_err("task_data is NULL\n");
- return -EINVAL;
- }
+ rc = cam_icp_mgr_enqueue_config(hw_mgr, config_args);
+ if (rc)
+ goto config_err;
- task_data->data = (void *)hw_update_entries->addr;
- hfi_cmd = (struct hfi_cmd_ipebps_async *)hw_update_entries->addr;
- ICP_DBG("request from hfi_cmd :%llu, hfi_cmd: %pK\n",
- hfi_cmd->user_data2, hfi_cmd);
- task_data->request_id = request_id;
- task_data->type = ICP_WORKQ_TASK_CMD_TYPE;
- task->process_cb = cam_icp_mgr_process_cmd;
- rc = cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr,
- CRM_TASK_PRIORITY_0);
+ return 0;
+config_err:
+ cam_icp_mgr_handle_config_err(config_args, ctx_data);
return rc;
}
static int cam_icp_mgr_prepare_frame_process_cmd(
- struct cam_icp_hw_ctx_data *ctx_data,
- struct hfi_cmd_ipebps_async *hfi_cmd,
- uint32_t request_id,
- uint32_t fw_cmd_buf_iova_addr)
+ struct cam_icp_hw_ctx_data *ctx_data,
+ struct hfi_cmd_ipebps_async *hfi_cmd,
+ uint64_t request_id,
+ uint32_t fw_cmd_buf_iova_addr)
{
hfi_cmd->size = sizeof(struct hfi_cmd_ipebps_async);
hfi_cmd->pkt_type = HFI_CMD_IPEBPS_ASYNC_COMMAND_INDIRECT;
- if (ctx_data->icp_dev_acquire_info.dev_type == CAM_ICP_RES_TYPE_BPS)
+ if (ctx_data->icp_dev_acquire_info->dev_type == CAM_ICP_RES_TYPE_BPS)
hfi_cmd->opcode = HFI_IPEBPS_CMD_OPCODE_BPS_FRAME_PROCESS;
else
hfi_cmd->opcode = HFI_IPEBPS_CMD_OPCODE_IPE_FRAME_PROCESS;
@@ -1058,80 +1116,28 @@ static int cam_icp_mgr_prepare_frame_process_cmd(
hfi_cmd->user_data1 = (uint64_t)ctx_data;
hfi_cmd->user_data2 = request_id;
- ICP_DBG("ctx_data : %pK, request_id :%d cmd_buf %x\n",
- (void *)ctx_data->context_priv,
- request_id, fw_cmd_buf_iova_addr);
+ ICP_DBG("ctx_data : %pK, request_id :%lld cmd_buf %x\n",
+ (void *)ctx_data->context_priv, request_id,
+ fw_cmd_buf_iova_addr);
return 0;
}
-static int cam_icp_mgr_prepare_hw_update(void *hw_mgr_priv,
- void *prepare_hw_update_args)
+static int cam_icp_mgr_pkt_validation(struct cam_packet *packet)
{
- int rc = 0, i, j;
- int ctx_id = 0;
- uint32_t fw_handle;
- int32_t idx;
- uint64_t iova_addr;
- uint32_t fw_cmd_buf_iova_addr;
- size_t fw_cmd_buf_len;
- int32_t sync_in_obj[CAM_ICP_IPE_IMAGE_MAX];
- int32_t merged_sync_in_obj;
-
-
- struct cam_hw_prepare_update_args *prepare_args =
- prepare_hw_update_args;
- struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv;
- struct cam_icp_hw_ctx_data *ctx_data = NULL;
- struct cam_packet *packet = NULL;
- struct cam_cmd_buf_desc *cmd_desc = NULL;
- struct cam_buf_io_cfg *io_cfg_ptr = NULL;
- struct hfi_cmd_ipebps_async *hfi_cmd = NULL;
-
- if ((!prepare_args) || (!hw_mgr)) {
- pr_err("Invalid args\n");
- return -EINVAL;
- }
-
- mutex_lock(&hw_mgr->hw_mgr_mutex);
- fw_handle = *(uint32_t *)prepare_args->ctxt_to_hw_map;
- ctx_id = cam_icp_mgr_get_ctx_from_fw_handle(hw_mgr, fw_handle);
- if (ctx_id < 0) {
- pr_err("Fw handle to ctx mapping is failed\n");
- mutex_unlock(&hw_mgr->hw_mgr_mutex);
- return -EINVAL;
- }
- mutex_unlock(&hw_mgr->hw_mgr_mutex);
-
- ctx_data = &hw_mgr->ctx_data[ctx_id];
- if (!ctx_data->in_use) {
- pr_err("ctx is not in use\n");
- return -EINVAL;
- }
-
- packet = prepare_args->packet;
- if (!packet) {
- pr_err("received packet is NULL\n");
- return -EINVAL;
- }
-
ICP_DBG("packet header : opcode = %x size = %x",
- packet->header.op_code,
- packet->header.size);
+ packet->header.op_code, packet->header.size);
ICP_DBG(" req_id = %x flags = %x\n",
- (uint32_t)packet->header.request_id,
- packet->header.flags);
+ (uint32_t)packet->header.request_id, packet->header.flags);
ICP_DBG("packet data : c_off = %x c_num = %x\n",
- packet->cmd_buf_offset,
- packet->num_cmd_buf);
+ packet->cmd_buf_offset, packet->num_cmd_buf);
ICP_DBG("io_off = %x io_num = %x p_off = %x p_num = %x %x %x\n",
- packet->io_configs_offset,
- packet->num_io_configs, packet->patch_offset,
- packet->num_patches, packet->kmd_cmd_buf_index,
- packet->kmd_cmd_buf_offset);
+ packet->io_configs_offset, packet->num_io_configs,
+ packet->patch_offset, packet->num_patches,
+ packet->kmd_cmd_buf_index, packet->kmd_cmd_buf_offset);
if (((packet->header.op_code & 0xff) !=
CAM_ICP_OPCODE_IPE_UPDATE) &&
@@ -1143,66 +1149,78 @@ static int cam_icp_mgr_prepare_hw_update(void *hw_mgr_priv,
}
if ((packet->num_cmd_buf > 1) || (!packet->num_patches) ||
- (!packet->num_io_configs)) {
+ (!packet->num_io_configs)) {
pr_err("wrong number of cmd/patch info: %u %u\n",
- packet->num_cmd_buf,
- packet->num_patches);
+ packet->num_cmd_buf, packet->num_patches);
return -EINVAL;
}
- /* process command buffer descriptors */
+ return 0;
+}
+
+static int cam_icp_mgr_process_cmd_desc(struct cam_icp_hw_mgr *hw_mgr,
+ struct cam_packet *packet,
+ uint32_t *fw_cmd_buf_iova_addr)
+{
+ int rc = 0;
+ uint64_t iova_addr;
+ size_t fw_cmd_buf_len;
+ struct cam_cmd_buf_desc *cmd_desc = NULL;
+
cmd_desc = (struct cam_cmd_buf_desc *)
- ((uint32_t *) &packet->payload +
- packet->cmd_buf_offset/4);
+ ((uint32_t *) &packet->payload + packet->cmd_buf_offset/4);
ICP_DBG("packet = %pK cmd_desc = %pK size = %lu\n",
- (void *)packet, (void *)cmd_desc,
- sizeof(struct cam_cmd_buf_desc));
+ (void *)packet, (void *)cmd_desc,
+ sizeof(struct cam_cmd_buf_desc));
rc = cam_mem_get_io_buf(cmd_desc->mem_handle,
hw_mgr->iommu_hdl, &iova_addr, &fw_cmd_buf_len);
- if (rc < 0) {
+ if (rc) {
pr_err("unable to get src buf info for cmd buf: %x\n",
- hw_mgr->iommu_hdl);
+ hw_mgr->iommu_hdl);
return rc;
}
ICP_DBG("cmd_buf desc cpu and iova address: %pK %zu\n",
- (void *)iova_addr, fw_cmd_buf_len);
- fw_cmd_buf_iova_addr = iova_addr;
- fw_cmd_buf_iova_addr = (fw_cmd_buf_iova_addr + cmd_desc->offset);
+ (void *)iova_addr, fw_cmd_buf_len);
- /* Update Buffer Address from handles and patch information */
- rc = cam_packet_util_process_patches(packet, hw_mgr->iommu_hdl);
- if (rc) {
- pr_err("Patch processing failed\n");
- return rc;
- }
+ *fw_cmd_buf_iova_addr = iova_addr;
+ *fw_cmd_buf_iova_addr = (*fw_cmd_buf_iova_addr + cmd_desc->offset);
- /* process io config out descriptors */
+ return rc;
+}
+
+static int cam_icp_mgr_process_io_cfg(struct cam_icp_hw_mgr *hw_mgr,
+ struct cam_icp_hw_ctx_data *ctx_data,
+ struct cam_packet *packet,
+ struct cam_hw_prepare_update_args *prepare_args)
+{
+ int rc = 0, i, j;
+ int32_t sync_in_obj[CAM_ICP_IPE_IMAGE_MAX];
+ int32_t merged_sync_in_obj;
+ struct cam_buf_io_cfg *io_cfg_ptr = NULL;
+
io_cfg_ptr = (struct cam_buf_io_cfg *) ((uint32_t *) &packet->payload +
packet->io_configs_offset/4);
ICP_DBG("packet = %pK io_cfg_ptr = %pK size = %lu\n",
- (void *)packet, (void *)io_cfg_ptr,
- sizeof(struct cam_buf_io_cfg));
+ (void *)packet, (void *)io_cfg_ptr,
+ sizeof(struct cam_buf_io_cfg));
prepare_args->num_out_map_entries = 0;
for (i = 0, j = 0; i < packet->num_io_configs; i++) {
if (io_cfg_ptr[i].direction == CAM_BUF_INPUT) {
ICP_DBG("direction is i : %d :%u\n",
- i, io_cfg_ptr[i].direction);
+ i, io_cfg_ptr[i].direction);
ICP_DBG("fence is i : %d :%d\n",
- i, io_cfg_ptr[i].fence);
+ i, io_cfg_ptr[i].fence);
continue;
}
- prepare_args->out_map_entries[j].sync_id = io_cfg_ptr[i].fence;
- prepare_args->out_map_entries[j++].resource_handle =
- io_cfg_ptr[i].fence;
+ prepare_args->out_map_entries[j++].sync_id =
+ io_cfg_ptr[i].fence;
prepare_args->num_out_map_entries++;
ICP_DBG(" out fence = %x index = %d\n", io_cfg_ptr[i].fence, i);
}
- ICP_DBG("out buf entries processing is done\n");
- /* process io config in descriptors */
for (i = 0, j = 0; i < packet->num_io_configs; i++) {
if (io_cfg_ptr[i].direction == CAM_BUF_INPUT) {
sync_in_obj[j++] = io_cfg_ptr[i].fence;
@@ -1211,13 +1229,12 @@ static int cam_icp_mgr_prepare_hw_update(void *hw_mgr_priv,
}
}
- if (j == 1)
+ if (j == 1) {
merged_sync_in_obj = sync_in_obj[j - 1];
- else if (j > 1) {
+ } else if (j > 1) {
rc = cam_sync_merge(&sync_in_obj[0], j, &merged_sync_in_obj);
- if (rc < 0) {
- pr_err("unable to create in merged object: %d\n",
- rc);
+ if (rc) {
+ pr_err("unable to create in merged object: %d\n", rc);
return rc;
}
} else {
@@ -1227,100 +1244,186 @@ static int cam_icp_mgr_prepare_hw_update(void *hw_mgr_priv,
prepare_args->in_map_entries[0].sync_id = merged_sync_in_obj;
prepare_args->in_map_entries[0].resource_handle =
- ctx_data->icp_dev_acquire_info.dev_type;
+ ctx_data->icp_dev_acquire_info->dev_type;
prepare_args->num_in_map_entries = 1;
- ICP_DBG("out buf entries processing is done\n");
+
+ return rc;
+}
+
+static int cam_icp_mgr_update_hfi_frame_process(
+ struct cam_icp_hw_ctx_data *ctx_data,
+ struct cam_packet *packet,
+ struct cam_hw_prepare_update_args *prepare_args,
+ int32_t *idx)
+{
+ int32_t index;
mutex_lock(&ctx_data->hfi_frame_process.lock);
- idx = find_first_zero_bit(ctx_data->hfi_frame_process.bitmap,
- ctx_data->hfi_frame_process.bits);
- if (idx < 0 || idx >= CAM_FRAME_CMD_MAX) {
- pr_err("request idx is wrong: %d\n", idx);
+ index = find_first_zero_bit(ctx_data->hfi_frame_process.bitmap,
+ ctx_data->hfi_frame_process.bits);
+ if (index < 0 || index >= CAM_FRAME_CMD_MAX) {
+ pr_err("request idx is wrong: %d\n", index);
mutex_unlock(&ctx_data->hfi_frame_process.lock);
return -EINVAL;
}
- set_bit(idx, ctx_data->hfi_frame_process.bitmap);
+ set_bit(index, ctx_data->hfi_frame_process.bitmap);
mutex_unlock(&ctx_data->hfi_frame_process.lock);
- ctx_data->hfi_frame_process.request_id[idx] = packet->header.request_id;
- ICP_DBG("slot[%d]: %d\n", idx,
- ctx_data->hfi_frame_process.request_id[idx]);
- ctx_data->hfi_frame_process.num_out_resources[idx] =
- prepare_args->num_out_map_entries;
- for (i = 0; i < prepare_args->num_out_map_entries; i++)
- ctx_data->hfi_frame_process.out_resource[idx][i] =
- prepare_args->out_map_entries[i].resource_handle;
+ ctx_data->hfi_frame_process.request_id[index] =
+ packet->header.request_id;
+ ICP_DBG("slot[%d]: %lld\n", index,
+ ctx_data->hfi_frame_process.request_id[index]);
+ *idx = index;
+
+ return 0;
+}
+
+static int cam_icp_mgr_prepare_hw_update(void *hw_mgr_priv,
+ void *prepare_hw_update_args)
+{
+ int rc = 0;
+ int32_t idx;
+ uint32_t fw_cmd_buf_iova_addr;
+ struct cam_icp_hw_ctx_data *ctx_data = NULL;
+ struct cam_packet *packet = NULL;
+ struct hfi_cmd_ipebps_async *hfi_cmd = NULL;
+ struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv;
+ struct cam_hw_prepare_update_args *prepare_args =
+ prepare_hw_update_args;
+
+ if ((!prepare_args) || (!hw_mgr) || (!prepare_args->packet)) {
+ pr_err("Invalid args\n");
+ return -EINVAL;
+ }
+
+ ctx_data = prepare_args->ctxt_to_hw_map;
+ mutex_lock(&hw_mgr->hw_mgr_mutex);
+ if (!ctx_data->in_use) {
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ pr_err("ctx is not in use\n");
+ return -EINVAL;
+ }
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+ packet = prepare_args->packet;
+
+ rc = cam_icp_mgr_pkt_validation(packet);
+ if (rc)
+ return rc;
+
+ rc = cam_icp_mgr_process_cmd_desc(hw_mgr, packet,
+ &fw_cmd_buf_iova_addr);
+ if (rc)
+ return rc;
+
+ /* Update Buffer Address from handles and patch information */
+ rc = cam_packet_util_process_patches(packet, hw_mgr->iommu_hdl);
+ if (rc)
+ return rc;
+
+ rc = cam_icp_mgr_process_io_cfg(hw_mgr, ctx_data,
+ packet, prepare_args);
+ if (rc)
+ return rc;
+
+ rc = cam_icp_mgr_update_hfi_frame_process(ctx_data, packet,
+ prepare_args, &idx);
+ if (rc) {
+ if (prepare_args->in_map_entries[0].sync_id > 0)
+ cam_sync_destroy(
+ prepare_args->in_map_entries[0].sync_id);
+ return rc;
+ }
hfi_cmd = (struct hfi_cmd_ipebps_async *)
&ctx_data->hfi_frame_process.hfi_frame_cmd[idx];
cam_icp_mgr_prepare_frame_process_cmd(
- ctx_data, hfi_cmd, packet->header.request_id,
- fw_cmd_buf_iova_addr);
+ ctx_data, hfi_cmd, packet->header.request_id,
+ fw_cmd_buf_iova_addr);
prepare_args->num_hw_update_entries = 1;
prepare_args->hw_update_entries[0].addr = (uint64_t)hfi_cmd;
-
prepare_args->priv = &ctx_data->hfi_frame_process.request_id[idx];
-
- ICP_DBG("slot : %d, hfi_cmd : %pK, request : %d\n", idx,
+ ICP_DBG("slot : %d, hfi_cmd : %pK, request : %lld\n", idx,
(void *)hfi_cmd,
ctx_data->hfi_frame_process.request_id[idx]);
return rc;
}
+static int cam_icp_mgr_send_abort_status(struct cam_icp_hw_ctx_data *ctx_data)
+{
+ struct hfi_frame_process_info *hfi_frame_process;
+ int idx;
+
+ mutex_lock(&ctx_data->hfi_frame_process.lock);
+ hfi_frame_process = &ctx_data->hfi_frame_process;
+ for (idx = 0; idx < CAM_FRAME_CMD_MAX; idx++) {
+ if (!hfi_frame_process->request_id[idx])
+ continue;
+
+ ctx_data->ctxt_event_cb(ctx_data->context_priv, true,
+ &hfi_frame_process->request_id[idx]);
+
+ /* now release memory for hfi frame process command */
+ hfi_frame_process->request_id[idx] = 0;
+ clear_bit(idx, ctx_data->hfi_frame_process.bitmap);
+ }
+ mutex_unlock(&ctx_data->hfi_frame_process.lock);
+
+ return 0;
+}
+
static int cam_icp_mgr_release_hw(void *hw_mgr_priv, void *release_hw_args)
{
int rc = 0;
int ctx_id = 0;
- int i;
- uint32_t fw_handle;
struct cam_hw_release_args *release_hw = release_hw_args;
struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv;
struct cam_icp_hw_ctx_data *ctx_data = NULL;
if (!release_hw || !hw_mgr) {
- pr_err("Invalid args\n");
+ pr_err("Invalid args: %pK %pK\n", release_hw, hw_mgr);
return -EINVAL;
}
- for (i = 0; i < CAM_ICP_CTX_MAX; i++) {
- ctx_data = &hw_mgr->ctx_data[i];
- ICP_DBG("i = %d in_use = %u fw_handle = %u\n", i,
- ctx_data->in_use, ctx_data->fw_handle);
- }
-
- mutex_lock(&hw_mgr->hw_mgr_mutex);
- fw_handle = *(uint32_t *)release_hw->ctxt_to_hw_map;
- ctx_id = cam_icp_mgr_get_ctx_from_fw_handle(hw_mgr, fw_handle);
- if (ctx_id < 0) {
- pr_err("Invalid ctx id\n");
- mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ ctx_data = release_hw->ctxt_to_hw_map;
+ ctx_id = ctx_data->ctx_id;
+ if (ctx_id < 0 || ctx_id >= CAM_ICP_CTX_MAX) {
+ pr_err("Invalid ctx id: %d\n", ctx_id);
return -EINVAL;
}
- mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+ if (release_hw->active_req) {
+ cam_icp_mgr_abort_handle(ctx_data);
+ cam_icp_mgr_send_abort_status(ctx_data);
+ }
rc = cam_icp_mgr_release_ctx(hw_mgr, ctx_id);
- if (rc)
- return -EINVAL;
+ if (!hw_mgr->ctxt_cnt)
+ cam_icp_mgr_hw_close(hw_mgr, NULL);
- ICP_DBG("fw handle %d\n", fw_handle);
return rc;
}
static int cam_icp_mgr_send_config_io(struct cam_icp_hw_ctx_data *ctx_data,
- struct crm_workq_task *task, uint32_t io_buf_addr)
+ uint32_t io_buf_addr)
{
int rc = 0;
struct hfi_cmd_work_data *task_data;
struct hfi_cmd_ipebps_async ioconfig_cmd;
unsigned long rem_jiffies;
int timeout = 5000;
+ struct crm_workq_task *task;
+
+ task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
+ if (!task)
+ return -ENOMEM;
ioconfig_cmd.size = sizeof(struct hfi_cmd_ipebps_async);
ioconfig_cmd.pkt_type = HFI_CMD_IPEBPS_ASYNC_COMMAND_INDIRECT;
- if (ctx_data->icp_dev_acquire_info.dev_type == CAM_ICP_RES_TYPE_BPS)
+ if (ctx_data->icp_dev_acquire_info->dev_type == CAM_ICP_RES_TYPE_BPS)
ioconfig_cmd.opcode = HFI_IPEBPS_CMD_OPCODE_BPS_CONFIG_IO;
else
ioconfig_cmd.opcode = HFI_IPEBPS_CMD_OPCODE_IPE_CONFIG_IO;
@@ -1338,29 +1441,37 @@ static int cam_icp_mgr_send_config_io(struct cam_icp_hw_ctx_data *ctx_data,
task_data->request_id = 0;
task_data->type = ICP_WORKQ_TASK_CMD_TYPE;
task->process_cb = cam_icp_mgr_process_cmd;
- cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr, CRM_TASK_PRIORITY_0);
+ rc = cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr,
+ CRM_TASK_PRIORITY_0);
+ if (rc)
+ return rc;
+
ICP_DBG("fw_hdl = %x ctx_data = %pK\n", ctx_data->fw_handle, ctx_data);
rem_jiffies = wait_for_completion_timeout(&ctx_data->wait_complete,
msecs_to_jiffies((timeout)));
if (!rem_jiffies) {
rc = -ETIMEDOUT;
- pr_err("FW response timed out %d\n", rc);
+ ICP_DBG("FW response timed out %d\n", rc);
}
return rc;
}
static int cam_icp_mgr_create_handle(uint32_t dev_type,
- struct cam_icp_hw_ctx_data *ctx_data,
- struct crm_workq_task *task)
+ struct cam_icp_hw_ctx_data *ctx_data)
{
struct hfi_cmd_create_handle create_handle;
struct hfi_cmd_work_data *task_data;
unsigned long rem_jiffies;
int timeout = 5000;
+ struct crm_workq_task *task;
int rc = 0;
+ task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
+ if (!task)
+ return -ENOMEM;
+
create_handle.size = sizeof(struct hfi_cmd_create_handle);
create_handle.pkt_type = HFI_CMD_IPEBPS_CREATE_HANDLE;
create_handle.handle_type = dev_type;
@@ -1375,27 +1486,36 @@ static int cam_icp_mgr_create_handle(uint32_t dev_type,
task_data->request_id = 0;
task_data->type = ICP_WORKQ_TASK_CMD_TYPE;
task->process_cb = cam_icp_mgr_process_cmd;
- cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr, CRM_TASK_PRIORITY_0);
+ rc = cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr,
+ CRM_TASK_PRIORITY_0);
+ if (rc)
+ return rc;
rem_jiffies = wait_for_completion_timeout(&ctx_data->wait_complete,
msecs_to_jiffies((timeout)));
if (!rem_jiffies) {
rc = -ETIMEDOUT;
- pr_err("FW response timed out %d\n", rc);
+ ICP_DBG("FW response timed out %d\n", rc);
}
return rc;
}
-static int cam_icp_mgr_send_ping(struct cam_icp_hw_ctx_data *ctx_data,
- struct crm_workq_task *task)
+static int cam_icp_mgr_send_ping(struct cam_icp_hw_ctx_data *ctx_data)
{
struct hfi_cmd_ping_pkt ping_pkt;
struct hfi_cmd_work_data *task_data;
unsigned long rem_jiffies;
int timeout = 5000;
+ struct crm_workq_task *task;
int rc = 0;
+ task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
+ if (!task) {
+ pr_err("No free task to send ping command\n");
+ return -ENOMEM;
+ }
+
ping_pkt.size = sizeof(struct hfi_cmd_ping_pkt);
ping_pkt.pkt_type = HFI_CMD_SYS_PING;
ping_pkt.user_data = (uint64_t)ctx_data;
@@ -1409,32 +1529,86 @@ static int cam_icp_mgr_send_ping(struct cam_icp_hw_ctx_data *ctx_data,
task_data->request_id = 0;
task_data->type = ICP_WORKQ_TASK_CMD_TYPE;
task->process_cb = cam_icp_mgr_process_cmd;
- cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr, CRM_TASK_PRIORITY_0);
+
+ rc = cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr,
+ CRM_TASK_PRIORITY_0);
+ if (rc)
+ return rc;
rem_jiffies = wait_for_completion_timeout(&ctx_data->wait_complete,
msecs_to_jiffies((timeout)));
if (!rem_jiffies) {
rc = -ETIMEDOUT;
- pr_err("FW response timed out %d\n", rc);
+ ICP_DBG("FW response timed out %d\n", rc);
}
-
return rc;
}
+static int cam_icp_get_acquire_info(struct cam_icp_hw_mgr *hw_mgr,
+ struct cam_hw_acquire_args *args,
+ struct cam_icp_hw_ctx_data *ctx_data)
+{
+ int i;
+ int acquire_size;
+ struct cam_icp_acquire_dev_info icp_dev_acquire_info;
+ struct cam_icp_res_info *p_icp_out = NULL;
+
+ if (copy_from_user(&icp_dev_acquire_info,
+ (void __user *)args->acquire_info,
+ sizeof(struct cam_icp_acquire_dev_info)))
+ return -EFAULT;
+
+ if (icp_dev_acquire_info.num_out_res > ICP_MAX_OUTPUT_SUPPORTED) {
+ pr_err("num of out resources exceeding : %u\n",
+ icp_dev_acquire_info.num_out_res);
+ return -EINVAL;
+ }
+
+ acquire_size = sizeof(struct cam_icp_acquire_dev_info) +
+ (icp_dev_acquire_info.num_out_res *
+ sizeof(struct cam_icp_res_info));
+ ctx_data->icp_dev_acquire_info = kzalloc(acquire_size, GFP_KERNEL);
+ if (!ctx_data->icp_dev_acquire_info)
+ return -ENOMEM;
+
+ if (copy_from_user(ctx_data->icp_dev_acquire_info,
+ (void __user *)args->acquire_info, acquire_size)) {
+ kfree(ctx_data->icp_dev_acquire_info);
+ ctx_data->icp_dev_acquire_info = NULL;
+ return -EFAULT;
+ }
+
+ ICP_DBG("%x %x %x %x %x %x %x\n",
+ ctx_data->icp_dev_acquire_info->dev_type,
+ ctx_data->icp_dev_acquire_info->in_res.format,
+ ctx_data->icp_dev_acquire_info->in_res.width,
+ ctx_data->icp_dev_acquire_info->in_res.height,
+ ctx_data->icp_dev_acquire_info->in_res.fps,
+ ctx_data->icp_dev_acquire_info->num_out_res,
+ ctx_data->icp_dev_acquire_info->scratch_mem_size);
+
+ p_icp_out = ctx_data->icp_dev_acquire_info->out_res;
+ for (i = 0; i < ctx_data->icp_dev_acquire_info->num_out_res; i++)
+ ICP_DBG("out[i] %x %x %x %x\n",
+ p_icp_out[i].format,
+ p_icp_out[i].width,
+ p_icp_out[i].height,
+ p_icp_out[i].fps);
+
+ return 0;
+}
+
static int cam_icp_mgr_acquire_hw(void *hw_mgr_priv, void *acquire_hw_args)
{
- int rc = 0, i, bitmap_size = 0, tmp_size;
+ int rc = 0, bitmap_size = 0;
uint32_t ctx_id = 0;
uint64_t io_buf_addr;
size_t io_buf_size;
struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv;
struct cam_icp_hw_ctx_data *ctx_data = NULL;
struct cam_hw_acquire_args *args = acquire_hw_args;
- struct cam_icp_acquire_dev_info icp_dev_acquire_info;
- struct cam_icp_res_info *p_icp_out = NULL;
- struct crm_workq_task *task;
- uint8_t *tmp_acquire;
+ struct cam_icp_acquire_dev_info *icp_dev_acquire_info;
if ((!hw_mgr_priv) || (!acquire_hw_args)) {
pr_err("Invalid params: %pK %pK\n", hw_mgr_priv,
@@ -1447,140 +1621,71 @@ static int cam_icp_mgr_acquire_hw(void *hw_mgr_priv, void *acquire_hw_args)
return -EINVAL;
}
- if (copy_from_user(&icp_dev_acquire_info,
- (void __user *)args->acquire_info,
- sizeof(icp_dev_acquire_info)))
- return -EFAULT;
-
- if (icp_dev_acquire_info.num_out_res > ICP_IPE_MAX_OUTPUT_SUPPORTED) {
- pr_err("num of out resources exceeding : %u\n",
- icp_dev_acquire_info.num_out_res);
- return -EINVAL;
- }
-
- ICP_DBG("%x %x %x %x %x %x %x\n",
- icp_dev_acquire_info.dev_type,
- icp_dev_acquire_info.in_res.format,
- icp_dev_acquire_info.in_res.width,
- icp_dev_acquire_info.in_res.height,
- icp_dev_acquire_info.in_res.fps,
- icp_dev_acquire_info.num_out_res,
- icp_dev_acquire_info.scratch_mem_size);
-
- tmp_size = sizeof(icp_dev_acquire_info) +
- icp_dev_acquire_info.num_out_res *
- sizeof(struct cam_icp_res_info);
-
- tmp_acquire = kzalloc(tmp_size, GFP_KERNEL);
- if (!tmp_acquire)
- return -EINVAL;
-
- if (copy_from_user(tmp_acquire,
- (void __user *)args->acquire_info,
- tmp_size)) {
- kfree(tmp_acquire);
- return -EFAULT;
- }
-
- p_icp_out =
- (struct cam_icp_res_info *)(tmp_acquire +
- sizeof(icp_dev_acquire_info)-
- sizeof(struct cam_icp_res_info));
- ICP_DBG("out[0] %x %x %x %x\n",
- p_icp_out[0].format,
- p_icp_out[0].width,
- p_icp_out[0].height,
- p_icp_out[0].fps);
-
- ICP_DBG("out[1] %x %x %x %x\n",
- p_icp_out[1].format,
- p_icp_out[1].width,
- p_icp_out[1].height,
- p_icp_out[1].fps);
-
mutex_lock(&hw_mgr->hw_mgr_mutex);
ctx_id = cam_icp_mgr_get_free_ctx(hw_mgr);
if (ctx_id >= CAM_ICP_CTX_MAX) {
pr_err("No free ctx space in hw_mgr\n");
- kfree(tmp_acquire);
mutex_unlock(&hw_mgr->hw_mgr_mutex);
- return -EFAULT;
+ return -ENOSPC;
}
-
- /* Fill ctx with acquire info */
ctx_data = &hw_mgr->ctx_data[ctx_id];
+ ctx_data->ctx_id = ctx_id;
mutex_unlock(&hw_mgr->hw_mgr_mutex);
- /* Fill ctx with acquire info */
mutex_lock(&ctx_data->ctx_mutex);
- ctx_data->icp_dev_acquire_info = icp_dev_acquire_info;
- for (i = 0; i < icp_dev_acquire_info.num_out_res; i++)
- ctx_data->icp_out_acquire_info[i] = p_icp_out[i];
+ rc = cam_icp_get_acquire_info(hw_mgr, args, ctx_data);
+ if (rc) {
+ mutex_unlock(&ctx_data->ctx_mutex);
+ goto acquire_info_failed;
+ }
+ icp_dev_acquire_info = ctx_data->icp_dev_acquire_info;
mutex_unlock(&ctx_data->ctx_mutex);
/* Get IOCONFIG command info */
- if (ctx_data->icp_dev_acquire_info.secure_mode)
+ if (icp_dev_acquire_info->secure_mode)
rc = cam_mem_get_io_buf(
- icp_dev_acquire_info.io_config_cmd_handle,
+ icp_dev_acquire_info->io_config_cmd_handle,
hw_mgr->iommu_sec_hdl,
&io_buf_addr, &io_buf_size);
else
rc = cam_mem_get_io_buf(
- icp_dev_acquire_info.io_config_cmd_handle,
+ icp_dev_acquire_info->io_config_cmd_handle,
hw_mgr->iommu_hdl,
&io_buf_addr, &io_buf_size);
+ if (rc) {
+ pr_err("unable to get src buf info from io desc\n");
+ goto get_io_buf_failed;
+ }
ICP_DBG("io_config_cmd_handle : %d\n",
- icp_dev_acquire_info.io_config_cmd_handle);
+ icp_dev_acquire_info->io_config_cmd_handle);
ICP_DBG("io_buf_addr : %pK\n", (void *)io_buf_addr);
ICP_DBG("io_buf_size : %zu\n", io_buf_size);
- if (rc < 0) {
- pr_err("unable to get src buf info from io desc\n");
- goto cmd_cpu_buf_failed;
- }
- mutex_lock(&icp_hw_mgr.hw_mgr_mutex);
- task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
- if (!task) {
- pr_err("no free task\n");
- mutex_unlock(&icp_hw_mgr.hw_mgr_mutex);
- goto get_create_task_failed;
+ mutex_lock(&hw_mgr->hw_mgr_mutex);
+ if (!hw_mgr->ctxt_cnt) {
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ rc = cam_icp_mgr_download_fw(hw_mgr, ctx_data);
+ if (rc)
+ goto get_io_buf_failed;
+ mutex_lock(&hw_mgr->hw_mgr_mutex);
}
- mutex_unlock(&icp_hw_mgr.hw_mgr_mutex);
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
- rc = cam_icp_mgr_send_ping(ctx_data, task);
+ rc = cam_icp_mgr_send_ping(ctx_data);
if (rc) {
pr_err("ping ack not received\n");
- goto create_handle_failed;
+ goto send_ping_failed;
}
- mutex_lock(&icp_hw_mgr.hw_mgr_mutex);
- task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
- if (!task) {
- pr_err("no free task\n");
- mutex_unlock(&icp_hw_mgr.hw_mgr_mutex);
- goto get_create_task_failed;
- }
- mutex_unlock(&icp_hw_mgr.hw_mgr_mutex);
- /* Send create fw handle command */
- rc = cam_icp_mgr_create_handle(icp_dev_acquire_info.dev_type,
- ctx_data, task);
+ rc = cam_icp_mgr_create_handle(icp_dev_acquire_info->dev_type,
+ ctx_data);
if (rc) {
pr_err("create handle failed\n");
goto create_handle_failed;
}
- /* Send IOCONFIG command */
- mutex_lock(&icp_hw_mgr.hw_mgr_mutex);
- task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
- if (!task) {
- pr_err("no empty task\n");
- mutex_unlock(&icp_hw_mgr.hw_mgr_mutex);
- goto get_ioconfig_task_failed;
- }
- mutex_unlock(&icp_hw_mgr.hw_mgr_mutex);
-
- rc = cam_icp_mgr_send_config_io(ctx_data, task, io_buf_addr);
+ rc = cam_icp_mgr_send_config_io(ctx_data, io_buf_addr);
if (rc) {
pr_err("IO Config command failed\n");
goto ioconfig_failed;
@@ -1588,42 +1693,46 @@ static int cam_icp_mgr_acquire_hw(void *hw_mgr_priv, void *acquire_hw_args)
mutex_lock(&ctx_data->ctx_mutex);
ctx_data->context_priv = args->context_data;
- args->ctxt_to_hw_map = &ctx_data->fw_handle;
+ args->ctxt_to_hw_map = ctx_data;
bitmap_size = BITS_TO_LONGS(CAM_FRAME_CMD_MAX) * sizeof(long);
ctx_data->hfi_frame_process.bitmap =
- kzalloc(sizeof(bitmap_size), GFP_KERNEL);
+ kzalloc(bitmap_size, GFP_KERNEL);
+ if (!ctx_data->hfi_frame_process.bitmap)
+ goto ioconfig_failed;
ctx_data->hfi_frame_process.bits = bitmap_size * BITS_PER_BYTE;
mutex_init(&ctx_data->hfi_frame_process.lock);
+ hw_mgr->ctx_data[ctx_id].ctxt_event_cb = args->event_cb;
+ icp_dev_acquire_info->scratch_mem_size = ctx_data->scratch_mem_size;
mutex_unlock(&ctx_data->ctx_mutex);
- hw_mgr->ctx_data[ctx_id].ctxt_event_cb = args->event_cb;
-
- icp_dev_acquire_info.scratch_mem_size = ctx_data->scratch_mem_size;
if (copy_to_user((void __user *)args->acquire_info,
- &icp_dev_acquire_info,
- sizeof(icp_dev_acquire_info)))
+ icp_dev_acquire_info, sizeof(struct cam_icp_acquire_dev_info)))
goto copy_to_user_failed;
ICP_DBG("scratch mem size = %x fw_handle = %x\n",
- (unsigned int)icp_dev_acquire_info.scratch_mem_size,
+ (unsigned int)icp_dev_acquire_info->scratch_mem_size,
(unsigned int)ctx_data->fw_handle);
- kfree(tmp_acquire);
+ mutex_lock(&hw_mgr->hw_mgr_mutex);
+ hw_mgr->ctxt_cnt++;
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
return 0;
copy_to_user_failed:
+ kfree(ctx_data->hfi_frame_process.bitmap);
+ ctx_data->hfi_frame_process.bitmap = NULL;
ioconfig_failed:
-get_ioconfig_task_failed:
- mutex_lock(&icp_hw_mgr.hw_mgr_mutex);
- task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
- mutex_unlock(&icp_hw_mgr.hw_mgr_mutex);
- if (task)
- cam_icp_mgr_destroy_handle(ctx_data, task);
+ cam_icp_mgr_destroy_handle(ctx_data);
+send_ping_failed:
create_handle_failed:
-get_create_task_failed:
-cmd_cpu_buf_failed:
- cam_icp_mgr_release_ctx(hw_mgr, ctx_id);
- kfree(tmp_acquire);
+ if (!hw_mgr->ctxt_cnt)
+ cam_icp_mgr_hw_close(hw_mgr, NULL);
+get_io_buf_failed:
+ kfree(hw_mgr->ctx_data[ctx_id].icp_dev_acquire_info);
+ hw_mgr->ctx_data[ctx_id].icp_dev_acquire_info = NULL;
+acquire_info_failed:
+ cam_icp_mgr_put_ctx(ctx_data);
return rc;
}
@@ -1639,25 +1748,22 @@ static int cam_icp_mgr_get_hw_caps(void *hw_mgr_priv, void *hw_caps_args)
}
if (copy_from_user(&icp_hw_mgr.icp_caps,
- (void __user *)query_cap->caps_handle,
- sizeof(struct cam_icp_query_cap_cmd))) {
+ (void __user *)query_cap->caps_handle,
+ sizeof(struct cam_icp_query_cap_cmd))) {
pr_err("copy_from_user failed\n");
return -EFAULT;
}
mutex_lock(&hw_mgr->hw_mgr_mutex);
rc = hfi_get_hw_caps(&icp_hw_mgr.icp_caps);
- if (rc < 0) {
- pr_err("Unable to get caps from HFI: %d\n", rc);
+ if (rc)
goto hfi_get_caps_fail;
- }
icp_hw_mgr.icp_caps.dev_iommu_handle.non_secure = hw_mgr->iommu_hdl;
icp_hw_mgr.icp_caps.dev_iommu_handle.secure = hw_mgr->iommu_sec_hdl;
if (copy_to_user((void __user *)query_cap->caps_handle,
- &icp_hw_mgr.icp_caps,
- sizeof(struct cam_icp_query_cap_cmd))) {
+ &icp_hw_mgr.icp_caps, sizeof(struct cam_icp_query_cap_cmd))) {
pr_err("copy_to_user failed\n");
rc = -EFAULT;
goto hfi_get_caps_fail;
@@ -1672,7 +1778,6 @@ int cam_icp_hw_mgr_init(struct device_node *of_node, uint64_t *hw_mgr_hdl)
{
int count, i, rc = 0;
uint32_t num_dev;
- uint32_t num_ipe_dev;
const char *name = NULL;
struct device_node *child_node = NULL;
struct platform_device *child_pdev = NULL;
@@ -1712,7 +1817,7 @@ int cam_icp_hw_mgr_init(struct device_node *of_node, uint64_t *hw_mgr_hdl)
/* Get number of a5 device nodes and a5 mem allocation */
rc = of_property_read_u32(of_node, "num-a5", &num_dev);
- if (rc < 0) {
+ if (rc) {
pr_err("getting num of a5 failed\n");
goto num_dev_failed;
}
@@ -1725,14 +1830,14 @@ int cam_icp_hw_mgr_init(struct device_node *of_node, uint64_t *hw_mgr_hdl)
}
/* Get number of ipe device nodes and ipe mem allocation */
- rc = of_property_read_u32(of_node, "num-ipe", &num_ipe_dev);
- if (rc < 0) {
+ rc = of_property_read_u32(of_node, "num-ipe", &num_dev);
+ if (rc) {
pr_err("getting number of ipe dev nodes failed\n");
goto num_ipe_failed;
}
icp_hw_mgr.devices[CAM_ICP_DEV_IPE] = kzalloc(
- sizeof(struct cam_hw_intf *) * num_ipe_dev, GFP_KERNEL);
+ sizeof(struct cam_hw_intf *) * num_dev, GFP_KERNEL);
if (!icp_hw_mgr.devices[CAM_ICP_DEV_IPE]) {
rc = -ENOMEM;
goto num_ipe_failed;
@@ -1740,7 +1845,7 @@ int cam_icp_hw_mgr_init(struct device_node *of_node, uint64_t *hw_mgr_hdl)
/* Get number of bps device nodes and bps mem allocation */
rc = of_property_read_u32(of_node, "num-bps", &num_dev);
- if (rc < 0) {
+ if (rc) {
pr_err("read num bps devices failed\n");
goto num_bps_failed;
}
@@ -1753,8 +1858,8 @@ int cam_icp_hw_mgr_init(struct device_node *of_node, uint64_t *hw_mgr_hdl)
for (i = 0; i < count; i++) {
rc = of_property_read_string_index(of_node, "compat-hw-name",
- i, &name);
- if (rc < 0) {
+ i, &name);
+ if (rc) {
pr_err("getting dev object name failed\n");
goto compat_hw_name_failed;
}
@@ -1776,7 +1881,7 @@ int cam_icp_hw_mgr_init(struct device_node *of_node, uint64_t *hw_mgr_hdl)
}
child_dev_intf = (struct cam_hw_intf *)platform_get_drvdata(
- child_pdev);
+ child_pdev);
if (!child_dev_intf) {
pr_err("no child device\n");
of_node_put(child_node);
@@ -1793,28 +1898,27 @@ int cam_icp_hw_mgr_init(struct device_node *of_node, uint64_t *hw_mgr_hdl)
}
rc = cam_smmu_get_handle("icp", &icp_hw_mgr.iommu_hdl);
- if (rc < 0) {
- pr_err("icp get iommu handle failed\n");
+ if (rc) {
+ pr_err("icp get iommu handle failed: %d\n", rc);
goto compat_hw_name_failed;
}
- pr_err("mmu handle :%d\n", icp_hw_mgr.iommu_hdl);
rc = cam_smmu_ops(icp_hw_mgr.iommu_hdl, CAM_SMMU_ATTACH);
- if (rc < 0) {
+ if (rc) {
pr_err("icp attach failed: %d\n", rc);
goto icp_attach_failed;
}
rc = cam_req_mgr_workq_create("icp_command_queue", ICP_WORKQ_NUM_TASK,
&icp_hw_mgr.cmd_work, CRM_WORKQ_USAGE_NON_IRQ);
- if (rc < 0) {
+ if (rc) {
pr_err("unable to create a worker\n");
goto cmd_work_failed;
}
rc = cam_req_mgr_workq_create("icp_message_queue", ICP_WORKQ_NUM_TASK,
&icp_hw_mgr.msg_work, CRM_WORKQ_USAGE_IRQ);
- if (rc < 0) {
+ if (rc) {
pr_err("unable to create a worker\n");
goto msg_work_failed;
}
@@ -1856,6 +1960,7 @@ int cam_icp_hw_mgr_init(struct device_node *of_node, uint64_t *hw_mgr_hdl)
cmd_work_failed:
cam_smmu_ops(icp_hw_mgr.iommu_hdl, CAM_SMMU_DETACH);
icp_attach_failed:
+ cam_smmu_destroy_handle(icp_hw_mgr.iommu_hdl);
icp_hw_mgr.iommu_hdl = 0;
compat_hw_name_failed:
kfree(icp_hw_mgr.devices[CAM_ICP_DEV_BPS]);
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
similarity index 93%
rename from drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
index 32d796a..6fa32fa 100644
--- a/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
@@ -39,8 +39,8 @@
#define ICP_PACKET_SIZE 0
#define ICP_PACKET_TYPE 1
-#define ICP_PACKET_IPCODE 2
-#define ICP_IPE_MAX_OUTPUT_SUPPORTED 6
+#define ICP_PACKET_OPCODE 2
+#define ICP_MAX_OUTPUT_SUPPORTED 6
/**
* struct icp_hfi_mem_info
@@ -100,7 +100,7 @@ struct hfi_frame_process_info {
void *bitmap;
size_t bits;
struct mutex lock;
- int32_t request_id[CAM_FRAME_CMD_MAX];
+ uint64_t request_id[CAM_FRAME_CMD_MAX];
uint32_t num_out_resources[CAM_FRAME_CMD_MAX];
uint32_t out_resource[CAM_FRAME_CMD_MAX][CAM_MAX_OUT_RES];
};
@@ -113,13 +113,13 @@ struct hfi_frame_process_info {
* @scratch_mem_size: Scratch memory size
* @acquire_dev_cmd: Acquire command
* @icp_dev_acquire_info: Acquire device info
- * @icp_out_acquire_info: Acquire out resource info
* @ctxt_event_cb: Context callback function
* @in_use: Flag for context usage
* @role: Role of a context in case of chaining
* @chain_ctx: Peer context
* @hfi_frame_process: Frame process command
* @wait_complete: Completion info
+ * @ctx_id: Context Id
* @temp_payload: Payload for destroy handle data
*/
struct cam_icp_hw_ctx_data {
@@ -128,15 +128,15 @@ struct cam_icp_hw_ctx_data {
uint32_t fw_handle;
uint32_t scratch_mem_size;
struct cam_acquire_dev_cmd acquire_dev_cmd;
- struct cam_icp_acquire_dev_info icp_dev_acquire_info;
- struct cam_icp_res_info icp_out_acquire_info[CAM_MAX_OUT_RES];
+ struct cam_icp_acquire_dev_info *icp_dev_acquire_info;
cam_hw_event_cb_func ctxt_event_cb;
- uint32_t in_use;
+ bool in_use;
uint32_t role;
struct cam_icp_hw_ctx_data *chain_ctx;
struct hfi_frame_process_info hfi_frame_process;
struct completion wait_complete;
struct ipe_bps_destroy temp_payload;
+ uint32_t ctx_id;
};
/**
@@ -183,4 +183,6 @@ struct cam_icp_hw_mgr {
bool a5_debug;
};
+static int cam_icp_mgr_hw_close(void *hw_priv, void *hw_close_args);
+static int cam_icp_mgr_download_fw(void *hw_mgr_priv, void *download_fw_args);
#endif /* CAM_ICP_HW_MGR_H */
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/include/cam_a5_hw_intf.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_a5_hw_intf.h
similarity index 100%
rename from drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/include/cam_a5_hw_intf.h
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_a5_hw_intf.h
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/include/cam_bps_hw_intf.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_bps_hw_intf.h
similarity index 100%
rename from drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/include/cam_bps_hw_intf.h
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_bps_hw_intf.h
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/include/cam_icp_hw_intf.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_icp_hw_intf.h
similarity index 100%
rename from drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/include/cam_icp_hw_intf.h
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_icp_hw_intf.h
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/include/cam_ipe_hw_intf.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_ipe_hw_intf.h
similarity index 100%
rename from drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/include/cam_ipe_hw_intf.h
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_ipe_hw_intf.h
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/include/cam_icp_hw_mgr_intf.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/include/cam_icp_hw_mgr_intf.h
similarity index 100%
rename from drivers/media/platform/msm/camera/icp/icp_hw/include/cam_icp_hw_mgr_intf.h
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/include/cam_icp_hw_mgr_intf.h
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/Makefile b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/Makefile
new file mode 100644
index 0000000..f904ea4
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/Makefile
@@ -0,0 +1,11 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/fw_inc
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+
+obj-$(CONFIG_SPECTRA_CAMERA) += ipe_dev.o ipe_core.o ipe_soc.o
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/ipe_core.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.c
similarity index 98%
rename from drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/ipe_core.c
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.c
index 07f63d2..99b45aa 100644
--- a/drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/ipe_core.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.c
@@ -72,7 +72,7 @@ int cam_ipe_init_hw(void *device_priv,
}
cpas_vote.ahb_vote.type = CAM_VOTE_ABSOLUTE;
- cpas_vote.ahb_vote.vote.level = CAM_TURBO_VOTE;
+ cpas_vote.ahb_vote.vote.level = CAM_SVS_VOTE;
cpas_vote.axi_vote.compressed_bw = ICP_TURBO_VOTE;
cpas_vote.axi_vote.uncompressed_bw = ICP_TURBO_VOTE;
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/ipe_core.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.h
similarity index 100%
rename from drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/ipe_core.h
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.h
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/ipe_dev.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_dev.c
similarity index 100%
rename from drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/ipe_dev.c
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_dev.c
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/ipe_soc.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_soc.c
similarity index 95%
rename from drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/ipe_soc.c
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_soc.c
index 527e716..e691dad 100644
--- a/drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/ipe_soc.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_soc.c
@@ -66,7 +66,8 @@ int cam_ipe_enable_soc_resources(struct cam_hw_soc_info *soc_info)
{
int rc = 0;
- rc = cam_soc_util_enable_platform_resource(soc_info, true, false);
+ rc = cam_soc_util_enable_platform_resource(soc_info, true,
+ CAM_TURBO_VOTE, false);
if (rc) {
pr_err("%s: enable platform failed\n", __func__);
return rc;
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/ipe_soc.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_soc.h
similarity index 100%
rename from drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/ipe_soc.h
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_soc.h
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
index a9064fa..83009d2 100644
--- a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
@@ -14,11 +14,13 @@
#include <linux/videodev2.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
+#include <linux/ratelimit.h>
#include "cam_isp_context.h"
#include "cam_isp_log.h"
#include "cam_mem_mgr.h"
#include "cam_sync_api.h"
+#include "cam_req_mgr_dev.h"
#undef CDBG
#define CDBG(fmt, args...) pr_debug(fmt, ##args)
@@ -537,7 +539,7 @@ static int __cam_isp_ctx_apply_req_in_activated_state(
*/
list_for_each_entry(req, &ctx->active_req_list, list) {
if (++cnt > 2) {
- pr_err("%s: Apply failed due to pipeline congestion\n",
+ pr_err_ratelimited("%s: Apply failed due to congest\n",
__func__);
rc = -EFAULT;
goto end;
@@ -627,6 +629,94 @@ static int __cam_isp_ctx_apply_req_in_bubble(
return rc;
}
+static int __cam_isp_ctx_flush_req(struct cam_context *ctx,
+ struct list_head *req_list, struct cam_req_mgr_flush_request *flush_req)
+{
+ int i, rc;
+ uint32_t cancel_req_id_found = 0;
+ struct cam_ctx_request *req;
+ struct cam_ctx_request *req_temp;
+ struct cam_isp_ctx_req *req_isp;
+
+ spin_lock(&ctx->lock);
+ if (list_empty(req_list)) {
+ spin_unlock(&ctx->lock);
+ CDBG("%s: request list is empty\n", __func__);
+ return 0;
+ }
+
+ list_for_each_entry_safe(req, req_temp, req_list, list) {
+ if ((flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ)
+ && (req->request_id != flush_req->req_id))
+ continue;
+
+ list_del_init(&req->list);
+ req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+ for (i = 0; i < req_isp->num_fence_map_out; i++) {
+ if (req_isp->fence_map_out[i].sync_id != -1) {
+ CDBG("%s: Flush req 0x%llx, fence %d\n",
+ __func__, req->request_id,
+ req_isp->fence_map_out[i].sync_id);
+ rc = cam_sync_signal(
+ req_isp->fence_map_out[i].sync_id,
+ CAM_SYNC_STATE_SIGNALED_ERROR);
+ if (rc)
+ pr_err_ratelimited("%s: signal fence failed\n",
+ __func__);
+ req_isp->fence_map_out[i].sync_id = -1;
+ }
+ }
+ list_add_tail(&req->list, &ctx->free_req_list);
+
+ /* If flush request id found, exit the loop */
+ if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) {
+ cancel_req_id_found = 1;
+ break;
+ }
+ }
+ spin_unlock(&ctx->lock);
+
+ if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ &&
+ !cancel_req_id_found)
+ CDBG("%s:Flush request id:%lld is not found in the list\n",
+ __func__, flush_req->req_id);
+
+ return 0;
+}
+
+static int __cam_isp_ctx_flush_req_in_top_state(
+ struct cam_context *ctx,
+ struct cam_req_mgr_flush_request *flush_req)
+{
+ int rc = 0;
+
+ CDBG("%s: try to flush pending list\n", __func__);
+ rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, flush_req);
+ CDBG("%s: Flush request in top state %d\n",
+ __func__, ctx->state);
+ return rc;
+}
+
+static int __cam_isp_ctx_flush_req_in_ready(
+ struct cam_context *ctx,
+ struct cam_req_mgr_flush_request *flush_req)
+{
+ int rc = 0;
+
+ CDBG("%s: try to flush pending list\n", __func__);
+ rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, flush_req);
+
+ /* if nothing is in pending req list, change state to acquire*/
+ spin_lock(&ctx->lock);
+ if (list_empty(&ctx->pending_req_list))
+ ctx->state = CAM_CTX_ACQUIRED;
+ spin_unlock(&ctx->lock);
+
+ CDBG("%s: Flush request in ready state. next state %d\n",
+ __func__, ctx->state);
+ return rc;
+}
+
static struct cam_ctx_ops
cam_isp_ctx_activated_state_machine[CAM_ISP_CTX_ACTIVATED_MAX] = {
/* SOF */
@@ -679,12 +769,10 @@ static int __cam_isp_ctx_release_dev_in_top_state(struct cam_context *ctx,
struct cam_release_dev_cmd *cmd)
{
int rc = 0;
- int i;
struct cam_hw_release_args rel_arg;
- struct cam_ctx_request *req;
- struct cam_isp_ctx_req *req_isp;
struct cam_isp_context *ctx_isp =
(struct cam_isp_context *) ctx->ctx_priv;
+ struct cam_req_mgr_flush_request flush_req;
if (ctx_isp->hw_ctx) {
rel_arg.ctxt_to_hw_map = ctx_isp->hw_ctx;
@@ -704,25 +792,16 @@ static int __cam_isp_ctx_release_dev_in_top_state(struct cam_context *ctx,
* But we still add some sanity check code here to help the debug
*/
if (!list_empty(&ctx->active_req_list))
- pr_err("%s: Active list is empty.\n", __func__);
+ pr_err("%s: Active list is not empty\n", __func__);
- /* flush the pending list */
- while (!list_empty(&ctx->pending_req_list)) {
- req = list_first_entry(&ctx->pending_req_list,
- struct cam_ctx_request, list);
- list_del_init(&req->list);
- req_isp = (struct cam_isp_ctx_req *) req->req_priv;
- pr_err("%s: signal fence in pending list. fence num %d\n",
- __func__, req_isp->num_fence_map_out);
- for (i = 0; i < req_isp->num_fence_map_out; i++) {
- if (req_isp->fence_map_out[i].sync_id != -1) {
- cam_sync_signal(
- req_isp->fence_map_out[i].sync_id,
- CAM_SYNC_STATE_SIGNALED_ERROR);
- }
- }
- list_add_tail(&req->list, &ctx->free_req_list);
- }
+ /* Flush all the pending request list */
+ flush_req.type = CAM_REQ_MGR_FLUSH_TYPE_ALL;
+ flush_req.link_hdl = ctx->link_hdl;
+ flush_req.dev_hdl = ctx->dev_hdl;
+
+ CDBG("%s: try to flush pending list\n", __func__);
+ rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, &flush_req);
+
ctx->state = CAM_CTX_AVAILABLE;
CDBG("%s: next state %d\n", __func__, ctx->state);
return rc;
@@ -1252,6 +1331,7 @@ static struct cam_ctx_ops
.link = __cam_isp_ctx_link_in_acquired,
.unlink = __cam_isp_ctx_unlink_in_acquired,
.get_dev_info = __cam_isp_ctx_get_dev_info_in_acquired,
+ .flush_req = __cam_isp_ctx_flush_req_in_top_state,
},
.irq_ops = NULL,
},
@@ -1264,6 +1344,7 @@ static struct cam_ctx_ops
},
.crm_ops = {
.unlink = __cam_isp_ctx_unlink_in_ready,
+ .flush_req = __cam_isp_ctx_flush_req_in_ready,
},
.irq_ops = NULL,
},
@@ -1276,6 +1357,7 @@ static struct cam_ctx_ops
},
.crm_ops = {
.apply_req = __cam_isp_ctx_apply_req,
+ .flush_req = __cam_isp_ctx_flush_req_in_top_state,
},
.irq_ops = __cam_isp_ctx_handle_irq_in_activated,
},
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.c
index f07c45e..c718bba 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.c
@@ -72,7 +72,8 @@ int cam_ife_csid_enable_soc_resources(struct cam_hw_soc_info *soc_info)
{
int rc = 0;
- rc = cam_soc_util_enable_platform_resource(soc_info, true, true);
+ rc = cam_soc_util_enable_platform_resource(soc_info, true,
+ CAM_TURBO_VOTE, true);
if (rc) {
pr_err("%s: enable platform failed\n", __func__);
return rc;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.c
index 3670ca9..9f8f8c5 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.c
@@ -112,8 +112,8 @@ int cam_vfe_enable_soc_resources(struct cam_hw_soc_info *soc_info)
ahb_vote.type = CAM_VOTE_ABSOLUTE;
ahb_vote.vote.level = CAM_SVS_VOTE;
- axi_vote.compressed_bw = 640000000;
- axi_vote.uncompressed_bw = 640000000;
+ axi_vote.compressed_bw = 10640000000L;
+ axi_vote.uncompressed_bw = 10640000000L;
rc = cam_cpas_start(soc_private->cpas_handle, &ahb_vote, &axi_vote);
if (rc) {
@@ -122,7 +122,8 @@ int cam_vfe_enable_soc_resources(struct cam_hw_soc_info *soc_info)
goto end;
}
- rc = cam_soc_util_enable_platform_resource(soc_info, true, true);
+ rc = cam_soc_util_enable_platform_resource(soc_info, true,
+ CAM_TURBO_VOTE, true);
if (rc) {
pr_err("Error! enable platform failed\n");
goto stop_cpas;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
index 92a17d8..c4fae99 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
@@ -28,7 +28,8 @@
#define FRAME_BASED_EN 0
-#define MAX_BUF_UPDATE_REG_NUM 20
+#define MAX_BUF_UPDATE_REG_NUM \
+ (sizeof(struct cam_vfe_bus_ver2_reg_offset_bus_client)/4)
#define MAX_REG_VAL_PAIR_SIZE \
(MAX_BUF_UPDATE_REG_NUM * 2 * CAM_PACKET_MAX_PLANES)
@@ -370,6 +371,7 @@ static int cam_vfe_bus_get_num_wm(
case CAM_VFE_BUS_VER2_VFE_OUT_STATS_CS:
switch (format) {
case CAM_FORMAT_PLAIN64:
+ case CAM_FORMAT_PLAIN128:
return 1;
default:
break;
@@ -628,7 +630,8 @@ static int cam_vfe_bus_acquire_wm(
rsrc_data->height = 1;
rsrc_data->pack_fmt = 0x0;
rsrc_data->en_cfg = 0x3;
- } else if (rsrc_data->index < 5) {
+ } else if (rsrc_data->index < 5 ||
+ rsrc_data->index == 7 || rsrc_data->index == 8) {
switch (plane) {
case PLANE_Y:
switch (rsrc_data->format) {
@@ -663,6 +666,12 @@ static int cam_vfe_bus_acquire_wm(
}
rsrc_data->pack_fmt = 0xE;
rsrc_data->en_cfg = 0x1;
+ } else if (rsrc_data->index >= 11) {
+ rsrc_data->width = 0;
+ rsrc_data->height = 0;
+ rsrc_data->pack_fmt = 0x0;
+ rsrc_data->stride = 1;
+ rsrc_data->en_cfg = 0x3;
} else {
rsrc_data->width = rsrc_data->width * 4;
rsrc_data->height = rsrc_data->height / 2;
@@ -1652,6 +1661,7 @@ static int cam_vfe_bus_update_buf(void *priv, void *cmd_args,
struct cam_vfe_bus_ver2_wm_resource_data *wm_data = NULL;
uint32_t *reg_val_pair;
uint32_t i, j, size = 0;
+ uint32_t frame_inc = 0;
/*
* Need the entire buf io config so we can get the stride info
@@ -1672,13 +1682,19 @@ static int cam_vfe_bus_update_buf(void *priv, void *cmd_args,
if (update_buf->num_buf != vfe_out_data->num_wm) {
pr_err("Failed! Invalid number buffers:%d required:%d\n",
update_buf->num_buf, vfe_out_data->num_wm);
- return -ENOMEM;
+ return -EINVAL;
}
reg_val_pair = &vfe_out_data->common_data->io_buf_update[0];
io_cfg = update_buf->io_cfg;
for (i = 0, j = 0; i < vfe_out_data->num_wm; i++) {
+ if (j >= (MAX_REG_VAL_PAIR_SIZE - MAX_BUF_UPDATE_REG_NUM * 2)) {
+ pr_err("reg_val_pair %d exceeds the array limit %lu\n",
+ j, MAX_REG_VAL_PAIR_SIZE);
+ return -ENOMEM;
+ }
+
wm_data = vfe_out_data->wm_res[i]->res_priv;
/* For initial configuration program all bus registers */
@@ -1833,6 +1849,11 @@ static int cam_vfe_bus_update_buf(void *priv, void *cmd_args,
CDBG("image address 0x%x\n", reg_val_pair[j-1]);
+ frame_inc = io_cfg->planes[i].plane_stride *
+ io_cfg->planes[i].slice_height;
+ CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+ wm_data->hw_regs->frame_inc, frame_inc);
+
/* enable the WM */
CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
wm_data->hw_regs->cfg,
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c
index 3f3c2a3..6dd67df 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c
@@ -119,10 +119,6 @@ static int cam_vfe_camif_resource_start(
rsrc_data->camif_reg->line_skip_pattern);
cam_io_w_mb(0x1, rsrc_data->mem_base +
rsrc_data->camif_reg->pixel_skip_pattern);
- cam_io_w_mb(0x0, rsrc_data->mem_base +
- rsrc_data->camif_reg->skip_period);
- cam_io_w_mb(0x1, rsrc_data->mem_base +
- rsrc_data->camif_reg->irq_subsample_pattern);
/* epoch config with 20 line */
cam_io_w_mb(0x00140014,
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c
index f47b1dc..edfc245 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c
@@ -722,7 +722,7 @@ static int cam_mem_util_unmap_hw_va(int32_t idx,
static int cam_mem_util_unmap(int32_t idx)
{
int rc = 0;
- enum cam_smmu_region_id region;
+ enum cam_smmu_region_id region = CAM_SMMU_REGION_SHARED;
if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
pr_err("Incorrect index\n");
@@ -735,14 +735,17 @@ static int cam_mem_util_unmap(int32_t idx)
if (tbl.bufq[idx].i_hdl && tbl.bufq[idx].kmdvaddr)
ion_unmap_kernel(tbl.client, tbl.bufq[idx].i_hdl);
- if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_READ_WRITE)
- region = CAM_SMMU_REGION_IO;
+ if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_READ_WRITE ||
+ tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) {
- if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_SHARED_ACCESS)
- region = CAM_SMMU_REGION_SHARED;
+ if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_READ_WRITE)
+ region = CAM_SMMU_REGION_IO;
- rc = cam_mem_util_unmap_hw_va(idx,
- region);
+ if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_SHARED_ACCESS)
+ region = CAM_SMMU_REGION_SHARED;
+
+ rc = cam_mem_util_unmap_hw_va(idx, region);
+ }
mutex_lock(&tbl.bufq[idx].q_lock);
tbl.bufq[idx].flags = 0;
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_util.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_util.c
index 7bc26ec..38048d5 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_util.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_util.c
@@ -58,7 +58,7 @@ int cam_req_mgr_util_init(void)
spin_unlock_bh(&hdl_tbl_lock);
bitmap_size = BITS_TO_LONGS(CAM_REQ_MGR_MAX_HANDLES) * sizeof(long);
- hdl_tbl->bitmap = kzalloc(sizeof(bitmap_size), GFP_KERNEL);
+ hdl_tbl->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
if (!hdl_tbl->bitmap) {
rc = -ENOMEM;
goto bitmap_alloc_fail;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c
index 91b68cf..0a96f18 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c
@@ -330,40 +330,31 @@ int32_t cam_actuator_i2c_pkt_parse(struct cam_actuator_ctrl_t *a_ctrl,
return rc;
}
-static int32_t cam_actuator_vreg_control(struct cam_actuator_ctrl_t *a_ctrl,
+static int32_t cam_actuator_vreg_control(
+ struct cam_actuator_ctrl_t *a_ctrl,
int config)
{
- int rc = 0, i, cnt;
- struct cam_actuator_vreg *vreg_cfg;
+ int rc = 0, cnt;
+ struct cam_hw_soc_info *soc_info;
- vreg_cfg = &a_ctrl->vreg_cfg;
- cnt = vreg_cfg->num_vreg;
+ soc_info = &a_ctrl->soc_info;
+ cnt = soc_info->num_rgltr;
+
if (!cnt)
return 0;
- if (cnt >= MSM_ACTUATOR_MAX_VREGS) {
+ if (cnt >= CAM_SOC_MAX_REGULATOR) {
pr_err("%s:%d Regulators more than supported %d\n",
__func__, __LINE__, cnt);
return -EINVAL;
}
- for (i = 0; i < cnt; i++) {
- if (a_ctrl->io_master_info.master_type ==
- CCI_MASTER) {
- rc = msm_camera_config_single_vreg(
- &(a_ctrl->v4l2_dev_str.pdev->dev),
- &vreg_cfg->cam_vreg[i],
- (struct regulator **)&vreg_cfg->data[i],
- config);
- } else if (a_ctrl->io_master_info.master_type ==
- I2C_MASTER) {
- rc = msm_camera_config_single_vreg(
- &(a_ctrl->io_master_info.client->dev),
- &vreg_cfg->cam_vreg[i],
- (struct regulator **)&vreg_cfg->data[i],
- config);
- }
- }
+ if (config)
+ rc = cam_soc_util_enable_platform_resource(soc_info, false, 0,
+ false);
+ else
+ rc = cam_soc_util_disable_platform_resource(soc_info, false,
+ false);
return rc;
}
@@ -371,6 +362,9 @@ static int32_t cam_actuator_vreg_control(struct cam_actuator_ctrl_t *a_ctrl,
static int32_t cam_actuator_power_up(struct cam_actuator_ctrl_t *a_ctrl)
{
int rc = 0;
+ struct cam_hw_soc_info *soc_info =
+ &a_ctrl->soc_info;
+ struct msm_camera_gpio_num_info *gpio_num_info = NULL;
rc = cam_actuator_vreg_control(a_ctrl, 1);
if (rc < 0) {
@@ -379,28 +373,23 @@ static int32_t cam_actuator_power_up(struct cam_actuator_ctrl_t *a_ctrl)
return rc;
}
- if (a_ctrl->gconf &&
- a_ctrl->gconf->gpio_num_info &&
- a_ctrl->gconf->gpio_num_info->valid[SENSOR_VAF] == 1) {
- rc = msm_camera_request_gpio_table(
- a_ctrl->gconf->cam_gpio_req_tbl,
- a_ctrl->gconf->cam_gpio_req_tbl_size, 1);
+ gpio_num_info = a_ctrl->gpio_num_info;
+
+ if (soc_info->gpio_data &&
+ gpio_num_info &&
+ gpio_num_info->valid[SENSOR_VAF] == 1) {
+ rc = cam_soc_util_request_platform_resource(&a_ctrl->soc_info,
+ NULL, NULL);
+ rc = cam_soc_util_enable_platform_resource(&a_ctrl->soc_info,
+ false, 0, false);
if (rc < 0) {
pr_err("%s:%d :Error: Failed in req gpio: %d\n",
__func__, __LINE__, rc);
return rc;
}
- if (a_ctrl->cam_pinctrl_status) {
- rc = pinctrl_select_state(
- a_ctrl->pinctrl_info.pinctrl,
- a_ctrl->pinctrl_info.gpio_state_active);
- if (rc < 0)
- pr_err("%s:%d :Error: cannot set pin to active state: %d",
- __func__, __LINE__, rc);
- }
gpio_set_value_cansleep(
- a_ctrl->gconf->gpio_num_info->gpio_num[SENSOR_VAF],
+ gpio_num_info->gpio_num[SENSOR_VAF],
1);
}
@@ -413,6 +402,9 @@ static int32_t cam_actuator_power_up(struct cam_actuator_ctrl_t *a_ctrl)
static int32_t cam_actuator_power_down(struct cam_actuator_ctrl_t *a_ctrl)
{
int32_t rc = 0;
+ struct cam_hw_soc_info *soc_info =
+ &a_ctrl->soc_info;
+ struct msm_camera_gpio_num_info *gpio_num_info = NULL;
rc = cam_actuator_vreg_control(a_ctrl, 0);
if (rc < 0) {
@@ -420,35 +412,21 @@ static int32_t cam_actuator_power_down(struct cam_actuator_ctrl_t *a_ctrl)
return rc;
}
- if (a_ctrl->gconf &&
- a_ctrl->gconf->gpio_num_info &&
- a_ctrl->gconf->gpio_num_info->
- valid[SENSOR_VAF] == 1) {
+ gpio_num_info = a_ctrl->gpio_num_info;
+
+ if (soc_info->gpio_data &&
+ gpio_num_info &&
+ gpio_num_info->valid[SENSOR_VAF] == 1) {
gpio_set_value_cansleep(
- a_ctrl->gconf->gpio_num_info->
- gpio_num[SENSOR_VAF],
+ gpio_num_info->gpio_num[SENSOR_VAF],
GPIOF_OUT_INIT_LOW);
- if (a_ctrl->cam_pinctrl_status) {
- rc = pinctrl_select_state(
- a_ctrl->pinctrl_info.pinctrl,
- a_ctrl->pinctrl_info.
- gpio_state_suspend);
- if (rc < 0)
- pr_err("%s:%d cannot set pin to suspend state: %d",
- __func__, __LINE__, rc);
-
- devm_pinctrl_put(
- a_ctrl->pinctrl_info.pinctrl);
- }
- a_ctrl->cam_pinctrl_status = 0;
- rc = msm_camera_request_gpio_table(
- a_ctrl->gconf->cam_gpio_req_tbl,
- a_ctrl->gconf->cam_gpio_req_tbl_size,
- 0);
+ rc = cam_soc_util_release_platform_resource(&a_ctrl->soc_info);
+ rc |= cam_soc_util_disable_platform_resource(&a_ctrl->soc_info,
+ 0, 0);
if (rc < 0)
- pr_err("%s:%d Failed in selecting state: %d\n",
+ pr_err("%s:%d Failed to disable platform resources: %d\n",
__func__, __LINE__, rc);
}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.c
index 3835680..48e3c2e 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.c
@@ -76,7 +76,6 @@ static int32_t cam_actuator_driver_i2c_probe(struct i2c_client *client,
INIT_LIST_HEAD(&(a_ctrl->i2c_data.per_frame[i].list_head));
/* Initialize sensor device type */
- a_ctrl->of_node = client->dev.of_node;
a_ctrl->io_master_info.master_type = I2C_MASTER;
rc = cam_actuator_parse_dt(a_ctrl, &client->dev);
@@ -203,11 +202,9 @@ static int32_t cam_actuator_driver_platform_probe(
if (!a_ctrl)
return -ENOMEM;
- /* Initialize actuator device type */
- a_ctrl->of_node = pdev->dev.of_node;
-
/*fill in platform device*/
a_ctrl->v4l2_dev_str.pdev = pdev;
+ a_ctrl->soc_info.pdev = pdev;
a_ctrl->io_master_info.master_type = CCI_MASTER;
@@ -259,6 +256,14 @@ static int32_t cam_actuator_driver_platform_probe(
goto free_mem;
}
+ rc = cam_soc_util_request_platform_resource(&a_ctrl->soc_info,
+ NULL, NULL);
+ if (rc < 0) {
+ pr_err("%s:%d :Error: Requesting Platform Resources failed rc %d",
+ __func__, __LINE__, rc);
+ goto free_ctrl;
+ }
+
a_ctrl->bridge_intf.device_hdl = -1;
a_ctrl->bridge_intf.ops.get_dev_info =
cam_actuator_publish_dev_info;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.h
index 22ef29e..19fe4af 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.h
@@ -32,7 +32,7 @@
#include <cam_sensor_cmn_header.h>
#include <cam_subdev.h>
#include "cam_sensor_util.h"
-#include "cam_sensor_soc_api.h"
+#include "cam_soc_util.h"
#define NUM_MASTERS 2
#define NUM_QUEUES 2
@@ -60,18 +60,6 @@ enum msm_actuator_state_t {
};
/**
- * struct cam_actuator_vreg
- * @cam_vreg: Regulator structure
- * @data: Regulator data
- * @num_vreg: Number of regulators
- */
-struct cam_actuator_vreg {
- struct camera_vreg_t *cam_vreg;
- void *data[MSM_ACTUATOR_MAX_VREGS];
- int num_vreg;
-};
-
-/**
* struct intf_params
* @device_hdl: Device Handle
* @session_hdl: Session Handle
@@ -107,18 +95,16 @@ struct cam_actuator_ctrl_t {
struct i2c_driver *i2c_driver;
enum cci_i2c_master_t cci_i2c_master;
struct camera_io_master io_master_info;
+ struct cam_hw_soc_info soc_info;
struct mutex actuator_mutex;
uint32_t id;
enum msm_actuator_state_t act_apply_state;
- struct cam_actuator_vreg vreg_cfg;
- struct msm_camera_gpio_conf *gconf;
- struct msm_pinctrl_info pinctrl_info;
+ struct msm_camera_gpio_num_info *gpio_num_info;
uint8_t cam_pinctrl_status;
struct cam_subdev v4l2_dev_str;
struct i2c_data_settings i2c_data;
struct cam_actuator_query_cap act_info;
struct intf_params bridge_intf;
- struct device_node *of_node;
char device_name[20];
};
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_soc.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_soc.c
index 767f3b0..584e4d2 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_soc.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_soc.c
@@ -10,68 +10,68 @@
* GNU General Public License for more details.
*/
-#include "cam_actuator_soc.h"
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <cam_sensor_cmn_header.h>
#include <cam_sensor_util.h>
#include <cam_sensor_io.h>
#include <cam_req_mgr_util.h>
+#include "cam_actuator_soc.h"
+#include "cam_soc_util.h"
int32_t cam_actuator_parse_dt(struct cam_actuator_ctrl_t *a_ctrl,
struct device *dev)
{
int32_t rc = 0;
- struct cam_actuator_vreg *vreg_cfg;
+ struct cam_hw_soc_info *soc_info = &a_ctrl->soc_info;
+ struct device_node *of_node = NULL;
+ struct platform_device *pdev = NULL;
+
+ if (!soc_info->pdev) {
+ pr_err("%s:%d :Error:soc_info is not initialized\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ pdev = soc_info->pdev;
+ of_node = pdev->dev.of_node;
/* Initialize mutex */
mutex_init(&(a_ctrl->actuator_mutex));
- rc = of_property_read_u32(a_ctrl->of_node, "cell-index",
- &(a_ctrl->id));
- CDBG("cell-index %d, rc %d\n", a_ctrl->id, rc);
+ rc = cam_soc_util_get_dt_properties(soc_info);
if (rc < 0) {
- pr_err("%s:%d :Error: parsing dt for cellindex rc %d\n",
+ pr_err("%s:%d :Error: parsing common soc dt(rc %d)\n",
__func__, __LINE__, rc);
return rc;
}
-
- rc = of_property_read_u32(a_ctrl->of_node, "qcom,cci-master",
+ rc = of_property_read_u32(of_node, "cci-master",
&(a_ctrl->cci_i2c_master));
- CDBG("qcom,cci-master %d, rc %d\n", a_ctrl->cci_i2c_master, rc);
+ CDBG("cci-master %d, rc %d\n", a_ctrl->cci_i2c_master, rc);
if (rc < 0 || a_ctrl->cci_i2c_master >= MASTER_MAX) {
pr_err("%s:%d :Error: Wrong info from dt CCI master as : %d\n",
__func__, __LINE__, a_ctrl->cci_i2c_master);
return rc;
}
- if (of_find_property(a_ctrl->of_node,
- "qcom,cam-vreg-name", NULL)) {
- vreg_cfg = &(a_ctrl->vreg_cfg);
- rc = cam_sensor_get_dt_vreg_data(dev->of_node,
- &vreg_cfg->cam_vreg, &vreg_cfg->num_vreg);
- if (rc < 0) {
- pr_err("%s:%d :Error: parsing regulator dt: %d\n",
- __func__, __LINE__, rc);
- return rc;
- }
- }
- rc = msm_sensor_driver_get_gpio_data(&(a_ctrl->gconf),
- a_ctrl->of_node);
- if (rc < 0) {
- pr_err("%s:%d No/Error Actuator GPIOs\n",
- __func__, __LINE__);
- } else {
- a_ctrl->cam_pinctrl_status = 1;
- rc = msm_camera_pinctrl_init(
- &(a_ctrl->pinctrl_info), dev);
- if (rc < 0) {
- pr_err("ERR:%s: Error in reading actuator pinctrl\n",
- __func__);
- a_ctrl->cam_pinctrl_status = 0;
- rc = 0;
- }
+ if (!soc_info->gpio_data) {
+ pr_info("%s:%d No GPIO found\n", __func__, __LINE__);
+ rc = 0;
+ return rc;
}
+ if (!soc_info->gpio_data->cam_gpio_common_tbl_size) {
+ pr_info("%s:%d No GPIO found\n", __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ rc = cam_sensor_util_init_gpio_pin_tbl(soc_info,
+ &a_ctrl->gpio_num_info);
+
+ if ((rc < 0) || (!a_ctrl->gpio_num_info)) {
+ pr_err("%s:%d No/Error Actuator GPIOs\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
return rc;
}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.c
index 746b786..83e0c19 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.c
@@ -44,8 +44,11 @@ static void cam_cci_flush_queue(struct cci_device *cci_dev,
enum cci_i2c_master_t master)
{
int32_t rc = 0;
+ struct cam_hw_soc_info *soc_info =
+ &cci_dev->soc_info;
+ void __iomem *base = soc_info->reg_map[0].mem_base;
- cam_io_w_mb(1 << master, cci_dev->base + CCI_HALT_REQ_ADDR);
+ cam_io_w_mb(1 << master, base + CCI_HALT_REQ_ADDR);
rc = wait_for_completion_timeout(
&cci_dev->cci_master_info[master].reset_complete, CCI_TIMEOUT);
if (rc < 0) {
@@ -59,10 +62,10 @@ static void cam_cci_flush_queue(struct cci_device *cci_dev,
/* Set proper mask to RESET CMD address based on MASTER */
if (master == MASTER_0)
cam_io_w_mb(CCI_M0_RESET_RMSK,
- cci_dev->base + CCI_RESET_CMD_ADDR);
+ base + CCI_RESET_CMD_ADDR);
else
cam_io_w_mb(CCI_M1_RESET_RMSK,
- cci_dev->base + CCI_RESET_CMD_ADDR);
+ base + CCI_RESET_CMD_ADDR);
/* wait for reset done irq */
rc = wait_for_completion_timeout(
@@ -82,8 +85,11 @@ static int32_t cam_cci_validate_queue(struct cci_device *cci_dev,
int32_t rc = 0;
uint32_t read_val = 0;
uint32_t reg_offset = master * 0x200 + queue * 0x100;
+ struct cam_hw_soc_info *soc_info =
+ &cci_dev->soc_info;
+ void __iomem *base = soc_info->reg_map[0].mem_base;
- read_val = cam_io_r_mb(cci_dev->base +
+ read_val = cam_io_r_mb(base +
CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset);
CDBG("%s line %d CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR %d len %d max %d\n",
__func__, __LINE__, read_val, len,
@@ -95,18 +101,18 @@ static int32_t cam_cci_validate_queue(struct cci_device *cci_dev,
CDBG("%s:%d CCI_I2C_REPORT_CMD\n", __func__, __LINE__);
cam_io_w_mb(report_val,
- cci_dev->base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+ base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
reg_offset);
read_val++;
CDBG("%s:%d CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR %d, queue: %d\n",
__func__, __LINE__, read_val, queue);
- cam_io_w_mb(read_val, cci_dev->base +
+ cam_io_w_mb(read_val, base +
CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR + reg_offset);
reg_val = 1 << ((master * 2) + queue);
CDBG("%s:%d CCI_QUEUE_START_ADDR\n", __func__, __LINE__);
atomic_set(&cci_dev->cci_master_info[master].
done_pending[queue], 1);
- cam_io_w_mb(reg_val, cci_dev->base +
+ cam_io_w_mb(reg_val, base +
CCI_QUEUE_START_ADDR);
CDBG("%s line %d wait_for_completion_timeout\n",
__func__, __LINE__);
@@ -136,6 +142,9 @@ static int32_t cam_cci_write_i2c_queue(struct cci_device *cci_dev,
{
int32_t rc = 0;
uint32_t reg_offset = master * 0x200 + queue * 0x100;
+ struct cam_hw_soc_info *soc_info =
+ &cci_dev->soc_info;
+ void __iomem *base = soc_info->reg_map[0].mem_base;
if (!cci_dev) {
pr_err("%s: failed %d", __func__, __LINE__);
@@ -150,7 +159,7 @@ static int32_t cam_cci_write_i2c_queue(struct cci_device *cci_dev,
CDBG("%s CCI_I2C_M0_Q0_LOAD_DATA_ADDR:val 0x%x:0x%x\n",
__func__, CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
reg_offset, val);
- cam_io_w_mb(val, cci_dev->base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+ cam_io_w_mb(val, base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
reg_offset);
return rc;
}
@@ -260,21 +269,25 @@ static void cam_cci_load_report_cmd(struct cci_device *cci_dev,
enum cci_i2c_master_t master,
enum cci_i2c_queue_t queue)
{
+ struct cam_hw_soc_info *soc_info =
+ &cci_dev->soc_info;
+ void __iomem *base = soc_info->reg_map[0].mem_base;
+
uint32_t reg_offset = master * 0x200 + queue * 0x100;
- uint32_t read_val = cam_io_r_mb(cci_dev->base +
+ uint32_t read_val = cam_io_r_mb(base +
CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset);
uint32_t report_val = CCI_I2C_REPORT_CMD | (1 << 8);
CDBG("%s:%d CCI_I2C_REPORT_CMD curr_w_cnt: %d\n",
__func__, __LINE__, read_val);
cam_io_w_mb(report_val,
- cci_dev->base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+ base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
reg_offset);
read_val++;
CDBG("%s:%d CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR %d\n",
__func__, __LINE__, read_val);
- cam_io_w_mb(read_val, cci_dev->base +
+ cam_io_w_mb(read_val, base +
CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR + reg_offset);
}
@@ -282,12 +295,16 @@ static int32_t cam_cci_wait_report_cmd(struct cci_device *cci_dev,
enum cci_i2c_master_t master,
enum cci_i2c_queue_t queue)
{
+ struct cam_hw_soc_info *soc_info =
+ &cci_dev->soc_info;
+ void __iomem *base = soc_info->reg_map[0].mem_base;
+
uint32_t reg_val = 1 << ((master * 2) + queue);
cam_cci_load_report_cmd(cci_dev, master, queue);
atomic_set(&cci_dev->cci_master_info[master].q_free[queue], 1);
atomic_set(&cci_dev->cci_master_info[master].done_pending[queue], 1);
- cam_io_w_mb(reg_val, cci_dev->base +
+ cam_io_w_mb(reg_val, base +
CCI_QUEUE_START_ADDR);
return cam_cci_wait(cci_dev, master, queue);
@@ -339,8 +356,11 @@ static int32_t cam_cci_get_queue_free_size(struct cci_device *cci_dev,
{
uint32_t read_val = 0;
uint32_t reg_offset = master * 0x200 + queue * 0x100;
+ struct cam_hw_soc_info *soc_info =
+ &cci_dev->soc_info;
+ void __iomem *base = soc_info->reg_map[0].mem_base;
- read_val = cam_io_r_mb(cci_dev->base +
+ read_val = cam_io_r_mb(base +
CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset);
CDBG("%s line %d CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR %d max %d\n",
__func__, __LINE__, read_val,
@@ -354,12 +374,15 @@ static void cam_cci_process_half_q(struct cci_device *cci_dev,
enum cci_i2c_master_t master,
enum cci_i2c_queue_t queue)
{
+ struct cam_hw_soc_info *soc_info =
+ &cci_dev->soc_info;
+ void __iomem *base = soc_info->reg_map[0].mem_base;
uint32_t reg_val = 1 << ((master * 2) + queue);
if (atomic_read(&cci_dev->cci_master_info[master].q_free[queue]) == 0) {
cam_cci_load_report_cmd(cci_dev, master, queue);
atomic_set(&cci_dev->cci_master_info[master].q_free[queue], 1);
- cam_io_w_mb(reg_val, cci_dev->base +
+ cam_io_w_mb(reg_val, base +
CCI_QUEUE_START_ADDR);
}
}
@@ -461,52 +484,53 @@ static uint32_t cam_cci_cycles_per_ms(unsigned long clk)
return cycles_per_us;
}
-uint32_t *cam_cci_get_clk_rates(struct cci_device *cci_dev,
+void cam_cci_get_clk_rates(struct cci_device *cci_dev,
struct cam_cci_ctrl *c_ctrl)
+
{
- uint32_t j;
- int32_t idx;
+ int32_t src_clk_idx, j;
uint32_t cci_clk_src;
unsigned long clk;
struct cam_cci_clk_params_t *clk_params = NULL;
- struct device_node *of_node = cci_dev->v4l2_dev_str.pdev->dev.of_node;
+
enum i2c_freq_mode i2c_freq_mode = c_ctrl->cci_info->i2c_freq_mode;
+ struct cam_hw_soc_info *soc_info = &cci_dev->soc_info;
if (i2c_freq_mode >= I2C_MAX_MODES ||
i2c_freq_mode < I2C_STANDARD_MODE) {
pr_err("%s:%d Invalid frequency mode: %d\n",
__func__, __LINE__, (int32_t)i2c_freq_mode);
- return NULL;
+ cci_dev->clk_level_index = -1;
+ return;
}
clk_params = &cci_dev->cci_clk_params[i2c_freq_mode];
cci_clk_src = clk_params->cci_clk_src;
- idx = of_property_match_string(of_node,
- "clock-names", CCI_CLK_SRC_NAME);
- if (idx < 0) {
+ src_clk_idx = soc_info->src_clk_idx;
+
+ if (src_clk_idx < 0) {
cci_dev->cycles_per_us = CYCLES_PER_MICRO_SEC_DEFAULT;
- return cci_dev->cci_clk_rates[0];
+ cci_dev->clk_level_index = 0;
+ return;
}
if (cci_clk_src == 0) {
- clk = cci_dev->cci_clk_rates[0][idx];
+ clk = soc_info->clk_rate[0][src_clk_idx];
cci_dev->cycles_per_us = cam_cci_cycles_per_ms(clk);
- return cci_dev->cci_clk_rates[0];
+ cci_dev->clk_level_index = 0;
+ return;
}
- CDBG("%s:%d CCI: 3 cases:%d idx: %d\n", __func__,
- __LINE__, (int32_t)cci_dev->num_clk_cases, idx);
- for (j = 0; j < cci_dev->num_clk_cases; j++) {
- clk = cci_dev->cci_clk_rates[j][idx];
+ for (j = 0; j < CAM_MAX_VOTE; j++) {
+ clk = soc_info->clk_rate[j][src_clk_idx];
if (clk == cci_clk_src) {
cci_dev->cycles_per_us = cam_cci_cycles_per_ms(clk);
- cci_dev->cci_clk_src = cci_clk_src;
- return cci_dev->cci_clk_rates[j];
+ cci_dev->clk_level_index = j;
+ return;
}
}
-
- return NULL;
+ return;
}
static int32_t cam_cci_set_clk_param(struct cci_device *cci_dev,
@@ -515,6 +539,9 @@ static int32_t cam_cci_set_clk_param(struct cci_device *cci_dev,
struct cam_cci_clk_params_t *clk_params = NULL;
enum cci_i2c_master_t master = c_ctrl->cci_info->cci_i2c_master;
enum i2c_freq_mode i2c_freq_mode = c_ctrl->cci_info->i2c_freq_mode;
+ struct cam_hw_soc_info *soc_info =
+ &cci_dev->soc_info;
+ void __iomem *base = soc_info->reg_map[0].mem_base;
if ((i2c_freq_mode >= I2C_MAX_MODES) || (i2c_freq_mode < 0)) {
pr_err("%s:%d invalid i2c_freq_mode = %d",
@@ -529,33 +556,33 @@ static int32_t cam_cci_set_clk_param(struct cci_device *cci_dev,
if (master == MASTER_0) {
cam_io_w_mb(clk_params->hw_thigh << 16 |
clk_params->hw_tlow,
- cci_dev->base + CCI_I2C_M0_SCL_CTL_ADDR);
+ base + CCI_I2C_M0_SCL_CTL_ADDR);
cam_io_w_mb(clk_params->hw_tsu_sto << 16 |
clk_params->hw_tsu_sta,
- cci_dev->base + CCI_I2C_M0_SDA_CTL_0_ADDR);
+ base + CCI_I2C_M0_SDA_CTL_0_ADDR);
cam_io_w_mb(clk_params->hw_thd_dat << 16 |
clk_params->hw_thd_sta,
- cci_dev->base + CCI_I2C_M0_SDA_CTL_1_ADDR);
+ base + CCI_I2C_M0_SDA_CTL_1_ADDR);
cam_io_w_mb(clk_params->hw_tbuf,
- cci_dev->base + CCI_I2C_M0_SDA_CTL_2_ADDR);
+ base + CCI_I2C_M0_SDA_CTL_2_ADDR);
cam_io_w_mb(clk_params->hw_scl_stretch_en << 8 |
clk_params->hw_trdhld << 4 | clk_params->hw_tsp,
- cci_dev->base + CCI_I2C_M0_MISC_CTL_ADDR);
+ base + CCI_I2C_M0_MISC_CTL_ADDR);
} else if (master == MASTER_1) {
cam_io_w_mb(clk_params->hw_thigh << 16 |
clk_params->hw_tlow,
- cci_dev->base + CCI_I2C_M1_SCL_CTL_ADDR);
+ base + CCI_I2C_M1_SCL_CTL_ADDR);
cam_io_w_mb(clk_params->hw_tsu_sto << 16 |
clk_params->hw_tsu_sta,
- cci_dev->base + CCI_I2C_M1_SDA_CTL_0_ADDR);
+ base + CCI_I2C_M1_SDA_CTL_0_ADDR);
cam_io_w_mb(clk_params->hw_thd_dat << 16 |
clk_params->hw_thd_sta,
- cci_dev->base + CCI_I2C_M1_SDA_CTL_1_ADDR);
+ base + CCI_I2C_M1_SDA_CTL_1_ADDR);
cam_io_w_mb(clk_params->hw_tbuf,
- cci_dev->base + CCI_I2C_M1_SDA_CTL_2_ADDR);
+ base + CCI_I2C_M1_SDA_CTL_2_ADDR);
cam_io_w_mb(clk_params->hw_scl_stretch_en << 8 |
clk_params->hw_trdhld << 4 | clk_params->hw_tsp,
- cci_dev->base + CCI_I2C_M1_MISC_CTL_ADDR);
+ base + CCI_I2C_M1_MISC_CTL_ADDR);
}
cci_dev->i2c_freq_mode[master] = i2c_freq_mode;
@@ -576,6 +603,9 @@ static int32_t cam_cci_data_queue(struct cci_device *cci_dev,
uint16_t reg_addr = 0, cmd_size = i2c_msg->size;
uint32_t read_val = 0, reg_offset, val, delay = 0;
uint32_t max_queue_size, queue_size = 0, cmd = 0;
+ struct cam_hw_soc_info *soc_info =
+ &cci_dev->soc_info;
+ void __iomem *base = soc_info->reg_map[0].mem_base;
if (i2c_cmd == NULL) {
pr_err("%s:%d Failed line\n", __func__,
@@ -605,7 +635,7 @@ static int32_t cam_cci_data_queue(struct cci_device *cci_dev,
reg_offset = master * 0x200 + queue * 0x100;
cam_io_w_mb(cci_dev->cci_wait_sync_cfg.cid,
- cci_dev->base + CCI_SET_CID_SYNC_TIMER_ADDR +
+ base + CCI_SET_CID_SYNC_TIMER_ADDR +
cci_dev->cci_wait_sync_cfg.csid *
CCI_SET_CID_SYNC_TIMER_OFFSET);
@@ -616,7 +646,7 @@ static int32_t cam_cci_data_queue(struct cci_device *cci_dev,
CDBG("%s CCI_I2C_M0_Q0_LOAD_DATA_ADDR:val 0x%x:0x%x\n",
__func__, CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
reg_offset, val);
- cam_io_w_mb(val, cci_dev->base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+ cam_io_w_mb(val, base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
reg_offset);
atomic_set(&cci_dev->cci_master_info[master].q_free[queue], 0);
@@ -635,7 +665,7 @@ static int32_t cam_cci_data_queue(struct cci_device *cci_dev,
val = CCI_I2C_WAIT_SYNC_CMD |
((cci_dev->cci_wait_sync_cfg.line) << 4);
cam_io_w_mb(val,
- cci_dev->base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+ base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
reg_offset);
}
@@ -655,7 +685,7 @@ static int32_t cam_cci_data_queue(struct cci_device *cci_dev,
return -EINVAL;
}
- read_val = cam_io_r_mb(cci_dev->base +
+ read_val = cam_io_r_mb(base +
CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset);
CDBG("%s line %d CUR_WORD_CNT_ADDR %d len %d max %d\n",
__func__, __LINE__, read_val, len, max_queue_size);
@@ -736,7 +766,7 @@ static int32_t cam_cci_data_queue(struct cci_device *cci_dev,
}
len = ((i-1)/4) + 1;
- read_val = cam_io_r_mb(cci_dev->base +
+ read_val = cam_io_r_mb(base +
CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset);
for (h = 0, k = 0; h < len; h++) {
cmd = 0;
@@ -744,12 +774,12 @@ static int32_t cam_cci_data_queue(struct cci_device *cci_dev,
cmd |= (data[k++] << (j * 8));
CDBG("%s LOAD_DATA_ADDR 0x%x, q: %d, len:%d, cnt: %d\n",
__func__, cmd, queue, len, read_val);
- cam_io_w_mb(cmd, cci_dev->base +
+ cam_io_w_mb(cmd, base +
CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
master * 0x200 + queue * 0x100);
read_val += 1;
- cam_io_w_mb(read_val, cci_dev->base +
+ cam_io_w_mb(read_val, base +
CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR + reg_offset);
}
@@ -761,11 +791,11 @@ static int32_t cam_cci_data_queue(struct cci_device *cci_dev,
cmd |= CCI_I2C_WAIT_CMD;
CDBG("%s CCI_I2C_M0_Q0_LOAD_DATA_ADDR 0x%x\n",
__func__, cmd);
- cam_io_w_mb(cmd, cci_dev->base +
+ cam_io_w_mb(cmd, base +
CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
master * 0x200 + queue * 0x100);
read_val += 1;
- cam_io_w_mb(read_val, cci_dev->base +
+ cam_io_w_mb(read_val, base +
CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR + reg_offset);
}
}
@@ -791,6 +821,8 @@ static int32_t cam_cci_read(struct v4l2_subdev *sd,
enum cci_i2c_queue_t queue = QUEUE_1;
struct cci_device *cci_dev = NULL;
struct cam_cci_read_cfg *read_cfg = NULL;
+ struct cam_hw_soc_info *soc_info = NULL;
+ void __iomem *base = NULL;
cci_dev = v4l2_get_subdevdata(sd);
master = c_ctrl->cci_info->cci_i2c_master;
@@ -801,6 +833,10 @@ static int32_t cam_cci_read(struct v4l2_subdev *sd,
pr_err("%s:%d Invalid I2C master addr\n", __func__, __LINE__);
return -EINVAL;
}
+
+ soc_info = &cci_dev->soc_info;
+ base = soc_info->reg_map[0].mem_base;
+
mutex_lock(&cci_dev->cci_master_info[master].mutex_q[queue]);
/*
@@ -894,14 +930,14 @@ static int32_t cam_cci_read(struct v4l2_subdev *sd,
goto rel_mutex;
}
- val = cam_io_r_mb(cci_dev->base + CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR
+ val = cam_io_r_mb(base + CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR
+ master * 0x200 + queue * 0x100);
CDBG("%s cur word cnt 0x%x\n", __func__, val);
- cam_io_w_mb(val, cci_dev->base + CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR
+ cam_io_w_mb(val, base + CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR
+ master * 0x200 + queue * 0x100);
val = 1 << ((master * 2) + queue);
- cam_io_w_mb(val, cci_dev->base + CCI_QUEUE_START_ADDR);
+ cam_io_w_mb(val, base + CCI_QUEUE_START_ADDR);
CDBG("%s:%d E wait_for_completion_timeout\n", __func__,
__LINE__);
@@ -921,7 +957,7 @@ static int32_t cam_cci_read(struct v4l2_subdev *sd,
rc = 0;
}
- read_words = cam_io_r_mb(cci_dev->base +
+ read_words = cam_io_r_mb(base +
CCI_I2C_M0_READ_BUF_LEVEL_ADDR + master * 0x100);
exp_words = ((read_cfg->num_byte / 4) + 1);
if (read_words != exp_words) {
@@ -936,7 +972,7 @@ static int32_t cam_cci_read(struct v4l2_subdev *sd,
read_cfg->num_byte);
first_byte = 0;
do {
- val = cam_io_r_mb(cci_dev->base +
+ val = cam_io_r_mb(base +
CCI_I2C_M0_READ_DATA_ADDR + master * 0x100);
CDBG("%s read val 0x%x\n", __func__, val);
for (i = 0; (i < 4) && (index < read_cfg->num_byte); i++) {
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.h
index f6e82dc..a28d5d8 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.h
@@ -23,7 +23,7 @@
*
* This API gets CCI clk rates
*/
-uint32_t *cam_cci_get_clk_rates(struct cci_device *cci_dev,
+void cam_cci_get_clk_rates(struct cci_device *cci_dev,
struct cam_cci_ctrl *c_ctrl);
/**
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.c
index 6764b8a..63655a4 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.c
@@ -49,10 +49,13 @@ irqreturn_t cam_cci_irq(int irq_num, void *data)
{
uint32_t irq;
struct cci_device *cci_dev = data;
+ struct cam_hw_soc_info *soc_info =
+ &cci_dev->soc_info;
+ void __iomem *base = soc_info->reg_map[0].mem_base;
- irq = cam_io_r_mb(cci_dev->base + CCI_IRQ_STATUS_0_ADDR);
- cam_io_w_mb(irq, cci_dev->base + CCI_IRQ_CLEAR_0_ADDR);
- cam_io_w_mb(0x1, cci_dev->base + CCI_IRQ_GLOBAL_CLEAR_CMD_ADDR);
+ irq = cam_io_r_mb(base + CCI_IRQ_STATUS_0_ADDR);
+ cam_io_w_mb(irq, base + CCI_IRQ_CLEAR_0_ADDR);
+ cam_io_w_mb(0x1, base + CCI_IRQ_GLOBAL_CLEAR_CMD_ADDR);
if (irq & CCI_IRQ_STATUS_0_RST_DONE_ACK_BMSK) {
if (cci_dev->cci_master_info[MASTER_0].reset_pending == TRUE) {
@@ -123,24 +126,24 @@ irqreturn_t cam_cci_irq(int irq_num, void *data)
if (irq & CCI_IRQ_STATUS_0_I2C_M0_Q0Q1_HALT_ACK_BMSK) {
cci_dev->cci_master_info[MASTER_0].reset_pending = TRUE;
cam_io_w_mb(CCI_M0_RESET_RMSK,
- cci_dev->base + CCI_RESET_CMD_ADDR);
+ base + CCI_RESET_CMD_ADDR);
}
if (irq & CCI_IRQ_STATUS_0_I2C_M1_Q0Q1_HALT_ACK_BMSK) {
cci_dev->cci_master_info[MASTER_1].reset_pending = TRUE;
cam_io_w_mb(CCI_M1_RESET_RMSK,
- cci_dev->base + CCI_RESET_CMD_ADDR);
+ base + CCI_RESET_CMD_ADDR);
}
if (irq & CCI_IRQ_STATUS_0_I2C_M0_ERROR_BMSK) {
pr_err("%s:%d MASTER_0 error 0x%x\n", __func__, __LINE__, irq);
cci_dev->cci_master_info[MASTER_0].status = -EINVAL;
cam_io_w_mb(CCI_M0_HALT_REQ_RMSK,
- cci_dev->base + CCI_HALT_REQ_ADDR);
+ base + CCI_HALT_REQ_ADDR);
}
if (irq & CCI_IRQ_STATUS_0_I2C_M1_ERROR_BMSK) {
pr_err("%s:%d MASTER_1 error 0x%x\n", __func__, __LINE__, irq);
cci_dev->cci_master_info[MASTER_1].status = -EINVAL;
cam_io_w_mb(CCI_M1_HALT_REQ_RMSK,
- cci_dev->base + CCI_HALT_REQ_ADDR);
+ base + CCI_HALT_REQ_ADDR);
}
return IRQ_HANDLED;
}
@@ -150,8 +153,10 @@ static int cam_cci_irq_routine(struct v4l2_subdev *sd, u32 status,
{
struct cci_device *cci_dev = v4l2_get_subdevdata(sd);
irqreturn_t ret;
+ struct cam_hw_soc_info *soc_info =
+ &cci_dev->soc_info;
- ret = cam_cci_irq(cci_dev->irq->start, cci_dev);
+ ret = cam_cci_irq(soc_info->irq_line->start, cci_dev);
*handled = TRUE;
return 0;
}
@@ -171,6 +176,7 @@ static int cam_cci_platform_probe(struct platform_device *pdev)
{
struct cam_cpas_register_params cpas_parms;
struct cci_device *new_cci_dev;
+ struct cam_hw_soc_info *soc_info = NULL;
int rc = 0;
new_cci_dev = kzalloc(sizeof(struct cci_device),
@@ -178,8 +184,12 @@ static int cam_cci_platform_probe(struct platform_device *pdev)
if (!new_cci_dev)
return -ENOMEM;
+ soc_info = &new_cci_dev->soc_info;
+
new_cci_dev->v4l2_dev_str.pdev = pdev;
+ soc_info->pdev = pdev;
+
rc = cam_cci_parse_dt_info(pdev, new_cci_dev);
if (rc < 0) {
pr_err("%s: %d Resource get Failed: %d\n",
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.h
index 996fc62..6268a1b 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.h
@@ -31,12 +31,12 @@
#include <media/v4l2-ioctl.h>
#include <media/v4l2-subdev.h>
#include <cam_sensor_cmn_header.h>
-#include <cam_sensor_soc_api.h>
#include <cam_io_util.h>
#include <cam_sensor_util.h>
#include <cam_subdev.h>
#include <cam_cpas_api.h>
#include "cam_cci_hwreg.h"
+#include "cam_soc_util.h"
#define V4L2_IDENT_CCI 50005
#define CCI_I2C_QUEUE_0_SIZE 128
@@ -206,36 +206,25 @@ enum cam_cci_state_t {
*/
struct cci_device {
struct v4l2_subdev subdev;
- struct resource *irq;
- void __iomem *base;
+ struct cam_hw_soc_info soc_info;
uint32_t hw_version;
uint8_t ref_count;
enum cam_cci_state_t cci_state;
- size_t num_clk;
- struct clk **cci_clk;
- struct msm_cam_clk_info *cci_clk_info;
struct cam_cci_i2c_queue_info
cci_i2c_queue_info[NUM_MASTERS][NUM_QUEUES];
struct cam_cci_master_info cci_master_info[NUM_MASTERS];
enum i2c_freq_mode i2c_freq_mode[NUM_MASTERS];
struct cam_cci_clk_params_t cci_clk_params[I2C_MAX_MODES];
- struct gpio *cci_gpio_tbl;
- uint8_t cci_gpio_tbl_size;
struct msm_pinctrl_info cci_pinctrl;
uint8_t cci_pinctrl_status;
- uint32_t cci_clk_src;
- struct camera_vreg_t *cci_vreg;
- struct regulator *cci_reg_ptr[MAX_REGULATOR];
- int32_t regulator_count;
uint8_t support_seq_write;
struct workqueue_struct *write_wq[MASTER_MAX];
struct cam_cci_wait_sync_cfg cci_wait_sync_cfg;
uint8_t valid_sync;
struct cam_subdev v4l2_dev_str;
uint32_t cycles_per_us;
+ int32_t clk_level_index;
uint8_t payload_size;
- size_t num_clk_cases;
- uint32_t **cci_clk_rates;
char device_name[20];
uint32_t cpas_handle;
};
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.c
index 59cdfaa..d976788 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.c
@@ -13,46 +13,17 @@
#include "cam_cci_dev.h"
#include "cam_cci_core.h"
-static int32_t cam_cci_pinctrl_init(struct cci_device *cci_dev)
-{
- struct msm_pinctrl_info *cci_pctrl = NULL;
-
- cci_pctrl = &cci_dev->cci_pinctrl;
- cci_pctrl->pinctrl = devm_pinctrl_get(&cci_dev->v4l2_dev_str.pdev->dev);
- if (IS_ERR_OR_NULL(cci_pctrl->pinctrl)) {
- pr_err("%s:%d devm_pinctrl_get cci_pinctrl failed\n",
- __func__, __LINE__);
- return -EINVAL;
- }
- cci_pctrl->gpio_state_active = pinctrl_lookup_state(
- cci_pctrl->pinctrl,
- CCI_PINCTRL_STATE_DEFAULT);
- if (IS_ERR_OR_NULL(cci_pctrl->gpio_state_active)) {
- pr_err("%s:%d look up state for active state failed\n",
- __func__, __LINE__);
- return -EINVAL;
- }
- cci_pctrl->gpio_state_suspend = pinctrl_lookup_state(
- cci_pctrl->pinctrl,
- CCI_PINCTRL_STATE_SLEEP);
- if (IS_ERR_OR_NULL(cci_pctrl->gpio_state_suspend)) {
- pr_err("%s:%d look up state for suspend state failed\n",
- __func__, __LINE__);
- return -EINVAL;
- }
- return 0;
-}
-
int cam_cci_init(struct v4l2_subdev *sd,
struct cam_cci_ctrl *c_ctrl)
{
uint8_t i = 0, j = 0;
- int32_t rc = 0, ret = 0;
+ int32_t rc = 0;
struct cci_device *cci_dev;
enum cci_i2c_master_t master = MASTER_0;
- uint32_t *clk_rates = NULL;
struct cam_ahb_vote ahb_vote;
struct cam_axi_vote axi_vote;
+ struct cam_hw_soc_info *soc_info = NULL;
+ void __iomem *base = NULL;
cci_dev = v4l2_get_subdevdata(sd);
if (!cci_dev || !c_ctrl) {
@@ -62,8 +33,21 @@ int cam_cci_init(struct v4l2_subdev *sd,
return rc;
}
+ soc_info = &cci_dev->soc_info;
+ base = soc_info->reg_map[0].mem_base;
+
+ if (!soc_info || !base) {
+ pr_err("%s:%d failed: invalid params %pK %pK\n", __func__,
+ __LINE__, soc_info, base);
+ rc = -EINVAL;
+ return rc;
+ }
+
+ CDBG("%s:%d Base address %pK\n", __func__, __LINE__, base);
+
if (cci_dev->ref_count++) {
- CDBG("%s ref_count %d\n", __func__, cci_dev->ref_count);
+ CDBG("%s:%d ref_count %d\n", __func__, __LINE__,
+ cci_dev->ref_count);
master = c_ctrl->cci_info->cci_i2c_master;
CDBG("%s:%d master %d\n", __func__, __LINE__, master);
if (master < MASTER_MAX && master >= 0) {
@@ -80,10 +64,10 @@ int cam_cci_init(struct v4l2_subdev *sd,
/* Set proper mask to RESET CMD address */
if (master == MASTER_0)
cam_io_w_mb(CCI_M0_RESET_RMSK,
- cci_dev->base + CCI_RESET_CMD_ADDR);
+ base + CCI_RESET_CMD_ADDR);
else
cam_io_w_mb(CCI_M1_RESET_RMSK,
- cci_dev->base + CCI_RESET_CMD_ADDR);
+ base + CCI_RESET_CMD_ADDR);
/* wait for reset done irq */
rc = wait_for_completion_timeout(
&cci_dev->cci_master_info[master].
@@ -108,74 +92,24 @@ int cam_cci_init(struct v4l2_subdev *sd,
pr_err("%s:%d CPAS start failed\n",
__func__, __LINE__);
}
-
- ret = cam_cci_pinctrl_init(cci_dev);
- if (ret < 0) {
- pr_err("%s:%d Initialization of pinctrl failed\n",
- __func__, __LINE__);
- cci_dev->cci_pinctrl_status = 0;
- } else {
- cci_dev->cci_pinctrl_status = 1;
- }
- rc = msm_camera_request_gpio_table(cci_dev->cci_gpio_tbl,
- cci_dev->cci_gpio_tbl_size, 1);
- if (cci_dev->cci_pinctrl_status) {
- ret = pinctrl_select_state(cci_dev->cci_pinctrl.pinctrl,
- cci_dev->cci_pinctrl.gpio_state_active);
- if (ret)
- pr_err("%s:%d cannot set pin to active state\n",
- __func__, __LINE__);
- }
- if (rc < 0) {
- CDBG("%s: request gpio failed\n", __func__);
- goto request_gpio_failed;
- }
-
- rc = msm_camera_config_vreg(&cci_dev->v4l2_dev_str.pdev->dev,
- cci_dev->cci_vreg, cci_dev->regulator_count, NULL, 0,
- &cci_dev->cci_reg_ptr[0], 1);
- if (rc < 0) {
- pr_err("%s:%d cci config_vreg failed\n", __func__, __LINE__);
- goto clk_enable_failed;
- }
-
- rc = msm_camera_enable_vreg(&cci_dev->v4l2_dev_str.pdev->dev,
- cci_dev->cci_vreg, cci_dev->regulator_count, NULL, 0,
- &cci_dev->cci_reg_ptr[0], 1);
- if (rc < 0) {
- pr_err("%s:%d cci enable_vreg failed\n", __func__, __LINE__);
- goto reg_enable_failed;
- }
-
- clk_rates = cam_cci_get_clk_rates(cci_dev, c_ctrl);
- if (!clk_rates) {
- pr_err("%s: clk enable failed\n", __func__);
- goto reg_enable_failed;
- }
-
- for (i = 0; i < cci_dev->num_clk; i++) {
- cci_dev->cci_clk_info[i].clk_rate =
- clk_rates[i];
- }
- rc = msm_camera_clk_enable(&cci_dev->v4l2_dev_str.pdev->dev,
- cci_dev->cci_clk_info, cci_dev->cci_clk,
- cci_dev->num_clk, true);
- if (rc < 0) {
- pr_err("%s: clk enable failed\n", __func__);
- goto reg_enable_failed;
- }
+ cam_cci_get_clk_rates(cci_dev, c_ctrl);
/* Re-initialize the completion */
reinit_completion(&cci_dev->cci_master_info[master].reset_complete);
for (i = 0; i < NUM_QUEUES; i++)
reinit_completion(&cci_dev->cci_master_info[master].
report_q[i]);
- rc = msm_camera_enable_irq(cci_dev->irq, true);
+
+ /* Enable Regulators and IRQ*/
+ rc = cam_soc_util_enable_platform_resource(soc_info, true,
+ CAM_TURBO_VOTE, true);
if (rc < 0) {
- pr_err("%s: irq enable failed\n", __func__);
- return -EINVAL;
+ CDBG("%s:%d request platform resources failed\n", __func__,
+ __LINE__);
+ goto platform_enable_failed;
}
- cci_dev->hw_version = cam_io_r_mb(cci_dev->base +
+
+ cci_dev->hw_version = cam_io_r_mb(base +
CCI_HW_VERSION_ADDR);
CDBG("%s:%d: hw_version = 0x%x\n", __func__, __LINE__,
cci_dev->hw_version);
@@ -195,7 +129,8 @@ int cam_cci_init(struct v4l2_subdev *sd,
max_queue_size =
CCI_I2C_QUEUE_1_SIZE;
- CDBG("CCI Master[%d] :: Q0 size: %d Q1 size: %d\n", i,
+ CDBG("%s:%d : CCI Master[%d] :: Q0 : %d Q1 : %d\n",
+ __func__, __LINE__, i,
cci_dev->cci_i2c_queue_info[i][j].
max_queue_size,
cci_dev->cci_i2c_queue_info[i][j].
@@ -204,14 +139,14 @@ int cam_cci_init(struct v4l2_subdev *sd,
}
cci_dev->cci_master_info[MASTER_0].reset_pending = TRUE;
- cam_io_w_mb(CCI_RESET_CMD_RMSK, cci_dev->base +
+ cam_io_w_mb(CCI_RESET_CMD_RMSK, base +
CCI_RESET_CMD_ADDR);
- cam_io_w_mb(0x1, cci_dev->base + CCI_RESET_CMD_ADDR);
+ cam_io_w_mb(0x1, base + CCI_RESET_CMD_ADDR);
rc = wait_for_completion_timeout(
&cci_dev->cci_master_info[MASTER_0].reset_complete,
CCI_TIMEOUT);
if (rc <= 0) {
- pr_err("%s: wait_for_completion_timeout %d\n",
+ pr_err("%s:%d wait_for_completion_timeout\n",
__func__, __LINE__);
if (rc == 0)
rc = -ETIMEDOUT;
@@ -220,14 +155,15 @@ int cam_cci_init(struct v4l2_subdev *sd,
for (i = 0; i < MASTER_MAX; i++)
cci_dev->i2c_freq_mode[i] = I2C_MAX_MODES;
cam_io_w_mb(CCI_IRQ_MASK_0_RMSK,
- cci_dev->base + CCI_IRQ_MASK_0_ADDR);
+ base + CCI_IRQ_MASK_0_ADDR);
cam_io_w_mb(CCI_IRQ_MASK_0_RMSK,
- cci_dev->base + CCI_IRQ_CLEAR_0_ADDR);
- cam_io_w_mb(0x1, cci_dev->base + CCI_IRQ_GLOBAL_CLEAR_CMD_ADDR);
+ base + CCI_IRQ_CLEAR_0_ADDR);
+ cam_io_w_mb(0x1, base + CCI_IRQ_GLOBAL_CLEAR_CMD_ADDR);
for (i = 0; i < MASTER_MAX; i++) {
if (!cci_dev->write_wq[i]) {
- pr_err("Failed to flush write wq\n");
+ pr_err("%s:%d Failed to flush write wq\n",
+ __func__, __LINE__);
rc = -ENOMEM;
goto reset_complete_failed;
} else {
@@ -239,25 +175,9 @@ int cam_cci_init(struct v4l2_subdev *sd,
return 0;
reset_complete_failed:
- msm_camera_enable_irq(cci_dev->irq, false);
- msm_camera_clk_enable(&cci_dev->v4l2_dev_str.pdev->dev,
- cci_dev->cci_clk_info, cci_dev->cci_clk,
- cci_dev->num_clk, false);
-reg_enable_failed:
- msm_camera_config_vreg(&cci_dev->v4l2_dev_str.pdev->dev,
- cci_dev->cci_vreg, cci_dev->regulator_count, NULL, 0,
- &cci_dev->cci_reg_ptr[0], 0);
-clk_enable_failed:
- if (cci_dev->cci_pinctrl_status) {
- ret = pinctrl_select_state(cci_dev->cci_pinctrl.pinctrl,
- cci_dev->cci_pinctrl.gpio_state_suspend);
- if (ret)
- pr_err("%s:%d cannot set pin to suspend state\n",
- __func__, __LINE__);
- }
- msm_camera_request_gpio_table(cci_dev->cci_gpio_tbl,
- cci_dev->cci_gpio_tbl_size, 0);
-request_gpio_failed:
+ cam_soc_util_disable_platform_resource(soc_info, 1, 1);
+
+platform_enable_failed:
cci_dev->ref_count--;
cam_cpas_stop(cci_dev->cpas_handle);
@@ -267,12 +187,9 @@ int cam_cci_init(struct v4l2_subdev *sd,
void cam_cci_soc_remove(struct platform_device *pdev,
struct cci_device *cci_dev)
{
- msm_camera_put_clk_info_and_rates(pdev,
- &cci_dev->cci_clk_info, &cci_dev->cci_clk,
- &cci_dev->cci_clk_rates, cci_dev->num_clk_cases,
- cci_dev->num_clk);
+ struct cam_hw_soc_info *soc_info = &cci_dev->soc_info;
- msm_camera_put_reg_base(pdev, cci_dev->base, "cci", true);
+ cam_soc_util_release_platform_resource(soc_info);
}
static void cam_cci_init_cci_params(struct cci_device *new_cci_dev)
@@ -293,76 +210,6 @@ static void cam_cci_init_cci_params(struct cci_device *new_cci_dev)
}
}
-static int32_t cam_cci_init_gpio_params(struct cci_device *cci_dev)
-{
- int32_t rc = 0, i = 0;
- uint32_t *val_array = NULL;
- uint8_t tbl_size = 0;
- struct device_node *of_node = cci_dev->v4l2_dev_str.pdev->dev.of_node;
- struct gpio *gpio_tbl = NULL;
-
- cci_dev->cci_gpio_tbl_size = tbl_size = of_gpio_count(of_node);
- CDBG("%s gpio count %d\n", __func__, tbl_size);
- if (!tbl_size) {
- pr_err("%s:%d gpio count 0\n", __func__, __LINE__);
- return -EINVAL;
- }
-
- gpio_tbl = cci_dev->cci_gpio_tbl =
- kzalloc(sizeof(struct gpio) * tbl_size, GFP_KERNEL);
- if (!gpio_tbl) {
- pr_err("%s failed %d\n", __func__, __LINE__);
- return -EINVAL;
- }
-
- for (i = 0; i < tbl_size; i++) {
- gpio_tbl[i].gpio = of_get_gpio(of_node, i);
- CDBG("%s gpio_tbl[%d].gpio = %d\n", __func__, i,
- gpio_tbl[i].gpio);
- }
-
- val_array = kcalloc(tbl_size, sizeof(uint32_t),
- GFP_KERNEL);
- if (!val_array) {
- rc = -ENOMEM;
- goto free_gpio_tbl;
- }
-
- rc = of_property_read_u32_array(of_node, "qcom,gpio-tbl-flags",
- val_array, tbl_size);
- if (rc < 0) {
- pr_err("%s failed %d\n", __func__, __LINE__);
- goto free_val_array;
- }
- for (i = 0; i < tbl_size; i++) {
- gpio_tbl[i].flags = val_array[i];
- CDBG("%s gpio_tbl[%d].flags = %ld\n", __func__, i,
- gpio_tbl[i].flags);
- }
-
- for (i = 0; i < tbl_size; i++) {
- rc = of_property_read_string_index(of_node,
- "qcom,gpio-tbl-label", i, &gpio_tbl[i].label);
- CDBG("%s gpio_tbl[%d].label = %s\n", __func__, i,
- gpio_tbl[i].label);
- if (rc < 0) {
- pr_err("%s failed %d\n", __func__, __LINE__);
- goto free_val_array;
- }
- }
-
- kfree(val_array);
- return rc;
-
-free_val_array:
- kfree(val_array);
-free_gpio_tbl:
- kfree(cci_dev->cci_gpio_tbl);
- cci_dev->cci_gpio_tbl = NULL;
- cci_dev->cci_gpio_tbl_size = 0;
- return rc;
-}
-
static void cam_cci_init_default_clk_params(struct cci_device *cci_dev,
uint8_t index)
{
@@ -403,75 +250,78 @@ static void cam_cci_init_clk_params(struct cci_device *cci_dev)
src_node = of_find_node_by_name(of_node,
"qcom,i2c_custom_mode");
- rc = of_property_read_u32(src_node, "qcom,hw-thigh", &val);
- CDBG("%s qcom,hw-thigh %d, rc %d\n", __func__, val, rc);
+ rc = of_property_read_u32(src_node, "hw-thigh", &val);
+ CDBG("%s:%d hw-thigh %d, rc %d\n", __func__, __LINE__, val, rc);
if (!rc) {
cci_dev->cci_clk_params[count].hw_thigh = val;
- rc = of_property_read_u32(src_node, "qcom,hw-tlow",
+ rc = of_property_read_u32(src_node, "hw-tlow",
&val);
- CDBG("%s qcom,hw-tlow %d, rc %d\n", __func__, val, rc);
+ CDBG("%s:%d hw-tlow %d, rc %d\n", __func__, __LINE__,
+ val, rc);
}
if (!rc) {
cci_dev->cci_clk_params[count].hw_tlow = val;
- rc = of_property_read_u32(src_node, "qcom,hw-tsu-sto",
+ rc = of_property_read_u32(src_node, "hw-tsu-sto",
&val);
- CDBG("%s qcom,hw-tsu-sto %d, rc %d\n",
- __func__, val, rc);
+ CDBG("%s:%d hw-tsu-sto %d, rc %d\n",
+ __func__, __LINE__, val, rc);
}
if (!rc) {
cci_dev->cci_clk_params[count].hw_tsu_sto = val;
- rc = of_property_read_u32(src_node, "qcom,hw-tsu-sta",
+ rc = of_property_read_u32(src_node, "hw-tsu-sta",
&val);
- CDBG("%s qcom,hw-tsu-sta %d, rc %d\n",
- __func__, val, rc);
+ CDBG("%s:%d hw-tsu-sta %d, rc %d\n",
+ __func__, __LINE__, val, rc);
}
if (!rc) {
cci_dev->cci_clk_params[count].hw_tsu_sta = val;
- rc = of_property_read_u32(src_node, "qcom,hw-thd-dat",
+ rc = of_property_read_u32(src_node, "hw-thd-dat",
&val);
- CDBG("%s qcom,hw-thd-dat %d, rc %d\n",
- __func__, val, rc);
+ CDBG("%s:%d hw-thd-dat %d, rc %d\n",
+ __func__, __LINE__, val, rc);
}
if (!rc) {
cci_dev->cci_clk_params[count].hw_thd_dat = val;
- rc = of_property_read_u32(src_node, "qcom,hw-thd-sta",
+ rc = of_property_read_u32(src_node, "hw-thd-sta",
&val);
- CDBG("%s qcom,hw-thd-sta %d, rc %d\n", __func__,
+ CDBG("%s:%d hw-thd-sta %d, rc %d\n", __func__, __LINE__,
val, rc);
}
if (!rc) {
cci_dev->cci_clk_params[count].hw_thd_sta = val;
- rc = of_property_read_u32(src_node, "qcom,hw-tbuf",
+ rc = of_property_read_u32(src_node, "hw-tbuf",
&val);
- CDBG("%s qcom,hw-tbuf %d, rc %d\n", __func__, val, rc);
+ CDBG("%s:%d hw-tbuf %d, rc %d\n", __func__, __LINE__,
+ val, rc);
}
if (!rc) {
cci_dev->cci_clk_params[count].hw_tbuf = val;
rc = of_property_read_u32(src_node,
- "qcom,hw-scl-stretch-en", &val);
- CDBG("%s qcom,hw-scl-stretch-en %d, rc %d\n",
- __func__, val, rc);
+ "hw-scl-stretch-en", &val);
+ CDBG("%s:%d hw-scl-stretch-en %d, rc %d\n",
+ __func__, __LINE__, val, rc);
}
if (!rc) {
cci_dev->cci_clk_params[count].hw_scl_stretch_en = val;
- rc = of_property_read_u32(src_node, "qcom,hw-trdhld",
+ rc = of_property_read_u32(src_node, "hw-trdhld",
&val);
- CDBG("%s qcom,hw-trdhld %d, rc %d\n",
- __func__, val, rc);
+ CDBG("%s:%d hw-trdhld %d, rc %d\n",
+ __func__, __LINE__, val, rc);
}
if (!rc) {
cci_dev->cci_clk_params[count].hw_trdhld = val;
- rc = of_property_read_u32(src_node, "qcom,hw-tsp",
+ rc = of_property_read_u32(src_node, "hw-tsp",
&val);
- CDBG("%s qcom,hw-tsp %d, rc %d\n", __func__, val, rc);
+ CDBG("%s:%d hw-tsp %d, rc %d\n", __func__, __LINE__,
+ val, rc);
}
if (!rc) {
cci_dev->cci_clk_params[count].hw_tsp = val;
val = 0;
- rc = of_property_read_u32(src_node, "qcom,cci-clk-src",
+ rc = of_property_read_u32(src_node, "cci-clk-src",
&val);
- CDBG("%s qcom,cci-clk-src %d, rc %d\n",
- __func__, val, rc);
+ CDBG("%s:%d cci-clk-src %d, rc %d\n",
+ __func__, __LINE__, val, rc);
cci_dev->cci_clk_params[count].cci_clk_src = val;
} else
cam_cci_init_default_clk_params(cci_dev, count);
@@ -484,141 +334,78 @@ int cam_cci_parse_dt_info(struct platform_device *pdev,
struct cci_device *new_cci_dev)
{
int rc = 0, i = 0;
+ struct cam_hw_soc_info *soc_info =
+ &new_cci_dev->soc_info;
- /* Get Clock Info*/
- rc = msm_camera_get_clk_info_and_rates(pdev,
- &new_cci_dev->cci_clk_info, &new_cci_dev->cci_clk,
- &new_cci_dev->cci_clk_rates, &new_cci_dev->num_clk_cases,
- &new_cci_dev->num_clk);
+ rc = cam_soc_util_get_dt_properties(soc_info);
if (rc < 0) {
- pr_err("%s: cam_cci_get_clk_info() failed", __func__);
- kfree(new_cci_dev);
- new_cci_dev = NULL;
- return -EFAULT;
+ pr_err("%s:%d :Error: Parsing DT data failed:%d\n",
+ __func__, __LINE__, rc);
+ return -EINVAL;
}
new_cci_dev->ref_count = 0;
- new_cci_dev->base = msm_camera_get_reg_base(pdev, "cci", true);
- if (!new_cci_dev->base) {
- pr_err("%s: no mem resource?\n", __func__);
- return -ENODEV;
- }
- new_cci_dev->irq = msm_camera_get_irq(pdev, "cci");
- if (!new_cci_dev->irq) {
- pr_err("%s: no irq resource?\n", __func__);
- return -ENODEV;
- }
- CDBG("%s line %d cci irq start %d end %d\n", __func__,
- __LINE__,
- (int) new_cci_dev->irq->start,
- (int) new_cci_dev->irq->end);
- rc = msm_camera_register_irq(pdev, new_cci_dev->irq,
- cam_cci_irq, IRQF_TRIGGER_RISING, "cci", new_cci_dev);
- if (rc < 0) {
- pr_err("%s: irq request fail\n", __func__);
- rc = -EBUSY;
- goto cci_release_mem;
- }
- msm_camera_enable_irq(new_cci_dev->irq, false);
+ rc = cam_soc_util_request_platform_resource(soc_info,
+ cam_cci_irq, new_cci_dev);
+ if (rc < 0) {
+ pr_err("%s:%d :Error: requesting platform resources failed:%d\n",
+ __func__, __LINE__, rc);
+ return -EINVAL;
+ }
new_cci_dev->v4l2_dev_str.pdev = pdev;
cam_cci_init_cci_params(new_cci_dev);
cam_cci_init_clk_params(new_cci_dev);
- rc = cam_cci_init_gpio_params(new_cci_dev);
- if (rc < 0) {
- pr_err("%s:%d :Error: In Initializing GPIO params:%d\n",
- __func__, __LINE__, rc);
- goto cci_release_mem;
- }
-
- rc = cam_sensor_get_dt_vreg_data(new_cci_dev->
- v4l2_dev_str.pdev->dev.of_node,
- &(new_cci_dev->cci_vreg), &(new_cci_dev->regulator_count));
- if (rc < 0) {
- pr_err("%s: cam_sensor_get_dt_vreg_data fail\n", __func__);
- rc = -EFAULT;
- goto cci_release_mem;
- }
-
- /* Parse VREG data */
- if ((new_cci_dev->regulator_count < 0) ||
- (new_cci_dev->regulator_count > MAX_REGULATOR)) {
- pr_err("%s: invalid reg count = %d, max is %d\n", __func__,
- new_cci_dev->regulator_count, MAX_REGULATOR);
- rc = -EFAULT;
- goto cci_invalid_vreg_data;
- }
rc = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
if (rc)
- pr_err("%s: failed to add child nodes, rc=%d\n", __func__, rc);
+ pr_err("%s:%d failed to add child nodes, rc=%d\n",
+ __func__, __LINE__, rc);
+
for (i = 0; i < MASTER_MAX; i++) {
new_cci_dev->write_wq[i] = create_singlethread_workqueue(
"cam_cci_wq");
if (!new_cci_dev->write_wq[i])
- pr_err("Failed to create write wq\n");
+ pr_err("%s:%d Failed to create write wq\n",
+ __func__, __LINE__);
}
CDBG("%s line %d\n", __func__, __LINE__);
return 0;
-
-cci_invalid_vreg_data:
- kfree(new_cci_dev->cci_vreg);
- new_cci_dev->cci_vreg = NULL;
-cci_release_mem:
- msm_camera_put_reg_base(pdev, new_cci_dev->base, "cci", true);
-
- return rc;
}
int cam_cci_soc_release(struct cci_device *cci_dev)
{
uint8_t i = 0, rc = 0;
+ struct cam_hw_soc_info *soc_info =
+ &cci_dev->soc_info;
if (!cci_dev->ref_count || cci_dev->cci_state != CCI_STATE_ENABLED) {
- pr_err("%s invalid ref count %d / cci state %d\n",
- __func__, cci_dev->ref_count, cci_dev->cci_state);
+ pr_err("%s:%d invalid ref count %d / cci state %d\n", __func__,
+ __LINE__, cci_dev->ref_count, cci_dev->cci_state);
return -EINVAL;
}
if (--cci_dev->ref_count) {
- CDBG("%s ref_count Exit %d\n", __func__, cci_dev->ref_count);
+ CDBG("%s:%d ref_count Exit %d\n", __func__, __LINE__,
+ cci_dev->ref_count);
return 0;
}
for (i = 0; i < MASTER_MAX; i++)
if (cci_dev->write_wq[i])
flush_workqueue(cci_dev->write_wq[i]);
- msm_camera_enable_irq(cci_dev->irq, false);
- msm_camera_clk_enable(&cci_dev->v4l2_dev_str.pdev->dev,
- cci_dev->cci_clk_info, cci_dev->cci_clk,
- cci_dev->num_clk, false);
-
- rc = msm_camera_enable_vreg(&cci_dev->v4l2_dev_str.pdev->dev,
- cci_dev->cci_vreg, cci_dev->regulator_count, NULL, 0,
- &cci_dev->cci_reg_ptr[0], 0);
- if (rc < 0)
- pr_err("%s:%d cci disable_vreg failed\n", __func__, __LINE__);
-
- rc = msm_camera_config_vreg(&cci_dev->v4l2_dev_str.pdev->dev,
- cci_dev->cci_vreg, cci_dev->regulator_count, NULL, 0,
- &cci_dev->cci_reg_ptr[0], 0);
- if (rc < 0)
- pr_err("%s:%d cci unconfig_vreg failed\n", __func__, __LINE__);
-
- if (cci_dev->cci_pinctrl_status) {
- rc = pinctrl_select_state(cci_dev->cci_pinctrl.pinctrl,
- cci_dev->cci_pinctrl.gpio_state_suspend);
- if (rc)
- pr_err("%s:%d cannot set pin to active state\n",
- __func__, __LINE__);
- }
- cci_dev->cci_pinctrl_status = 0;
- msm_camera_request_gpio_table(cci_dev->cci_gpio_tbl,
- cci_dev->cci_gpio_tbl_size, 0);
for (i = 0; i < MASTER_MAX; i++)
cci_dev->i2c_freq_mode[i] = I2C_MAX_MODES;
+
+ rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
+ if (rc) {
+ pr_err("%s:%d: platform resources disable failed, rc=%d\n",
+ __func__, __LINE__, rc);
+ return rc;
+ }
+
cci_dev->cci_state = CCI_STATE_DISABLED;
cci_dev->cycles_per_us = 0;
- cci_dev->cci_clk_src = 0;
+ soc_info->src_clk_idx = 0;
return rc;
}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.h
index ca4bbe0..331227b 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.h
@@ -14,6 +14,7 @@
#define _CAM_CCI_SOC_H_
#include "cam_cci_core.h"
+#include "cam_soc_util.h"
/**
* @sd: V4L2 sub device
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
index 6751fdd..71a88bf 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
@@ -19,7 +19,9 @@
void cam_csiphy_query_cap(struct csiphy_device *csiphy_dev,
struct cam_csiphy_query_cap *csiphy_cap)
{
- csiphy_cap->slot_info = csiphy_dev->v4l2_dev_str.pdev->id;
+ struct cam_hw_soc_info *soc_info = &csiphy_dev->soc_info;
+
+ csiphy_cap->slot_info = soc_info->index;
csiphy_cap->version = csiphy_dev->hw_version;
csiphy_cap->clk_lane = csiphy_dev->clk_lane;
}
@@ -27,14 +29,18 @@ void cam_csiphy_query_cap(struct csiphy_device *csiphy_dev,
void cam_csiphy_reset(struct csiphy_device *csiphy_dev)
{
int32_t i;
+ void __iomem *base = NULL;
uint32_t size =
csiphy_dev->ctrl_reg->csiphy_reg.csiphy_reset_array_size;
+ struct cam_hw_soc_info *soc_info = &csiphy_dev->soc_info;
+
+ base = soc_info->reg_map[0].mem_base;
for (i = 0; i < size; i++) {
cam_io_w(
csiphy_dev->ctrl_reg->
csiphy_reset_reg[i].reg_data,
- csiphy_dev->base +
+ base +
csiphy_dev->ctrl_reg->
csiphy_reset_reg[i].reg_addr);
@@ -118,11 +124,13 @@ int32_t cam_cmd_buf_parser(struct csiphy_device *csiphy_dev,
void cam_csiphy_cphy_irq_config(struct csiphy_device *csiphy_dev)
{
int32_t i;
+ void __iomem *csiphybase =
+ csiphy_dev->soc_info.reg_map[0].mem_base;
for (i = 0; i < csiphy_dev->num_irq_registers; i++)
cam_io_w(csiphy_dev->ctrl_reg->
csiphy_irq_reg[i].reg_data,
- csiphy_dev->base +
+ csiphybase +
csiphy_dev->ctrl_reg->
csiphy_irq_reg[i].reg_addr);
}
@@ -130,10 +138,12 @@ void cam_csiphy_cphy_irq_config(struct csiphy_device *csiphy_dev)
void cam_csiphy_cphy_irq_disable(struct csiphy_device *csiphy_dev)
{
int32_t i;
+ void __iomem *csiphybase =
+ csiphy_dev->soc_info.reg_map[0].mem_base;
for (i = 0; i < csiphy_dev->num_irq_registers; i++)
cam_io_w(0x0,
- csiphy_dev->base +
+ csiphybase +
csiphy_dev->ctrl_reg->
csiphy_irq_reg[i].reg_addr);
}
@@ -144,6 +154,8 @@ irqreturn_t cam_csiphy_irq(int irq_num, void *data)
uint8_t i;
struct csiphy_device *csiphy_dev =
(struct csiphy_device *)data;
+ struct cam_hw_soc_info *soc_info = NULL;
+ void __iomem *base = NULL;
if (!csiphy_dev) {
pr_err("%s:%d Invalid Args\n",
@@ -151,27 +163,30 @@ irqreturn_t cam_csiphy_irq(int irq_num, void *data)
return -EINVAL;
}
+ soc_info = &csiphy_dev->soc_info;
+ base = csiphy_dev->soc_info.reg_map[0].mem_base;
+
for (i = 0; i < csiphy_dev->num_irq_registers; i++) {
irq = cam_io_r(
- csiphy_dev->base +
+ base +
csiphy_dev->ctrl_reg->csiphy_reg.
mipi_csiphy_interrupt_status0_addr + 0x4*i);
cam_io_w(irq,
- csiphy_dev->base +
+ base +
csiphy_dev->ctrl_reg->csiphy_reg.
mipi_csiphy_interrupt_clear0_addr + 0x4*i);
pr_err_ratelimited(
"%s CSIPHY%d_IRQ_STATUS_ADDR%d = 0x%x\n",
- __func__, csiphy_dev->v4l2_dev_str.pdev->id, i, irq);
+ __func__, soc_info->index, i, irq);
cam_io_w(0x0,
- csiphy_dev->base +
+ base +
csiphy_dev->ctrl_reg->csiphy_reg.
mipi_csiphy_interrupt_clear0_addr + 0x4*i);
}
- cam_io_w(0x1, csiphy_dev->base +
+ cam_io_w(0x1, base +
csiphy_dev->ctrl_reg->
csiphy_reg.mipi_csiphy_glbl_irq_cmd_addr);
- cam_io_w(0x0, csiphy_dev->base +
+ cam_io_w(0x0, base +
csiphy_dev->ctrl_reg->
csiphy_reg.mipi_csiphy_glbl_irq_cmd_addr);
@@ -196,7 +211,7 @@ int32_t cam_csiphy_config_dev(struct csiphy_device *csiphy_dev)
lane_cnt = csiphy_dev->csiphy_info->lane_cnt;
lane_mask = csiphy_dev->csiphy_info->lane_mask & 0x1f;
settle_cnt = (csiphy_dev->csiphy_info->settle_time / 200000000);
- csiphybase = csiphy_dev->base;
+ csiphybase = csiphy_dev->soc_info.reg_map[0].mem_base;
if (!csiphybase) {
pr_err("%s: csiphybase NULL\n", __func__);
@@ -243,14 +258,14 @@ int32_t cam_csiphy_config_dev(struct csiphy_device *csiphy_dev)
csiphy_common_reg[i].csiphy_param_type) {
case CSIPHY_LANE_ENABLE:
cam_io_w(lane_enable,
- csiphy_dev->base +
+ csiphybase +
csiphy_dev->ctrl_reg->
csiphy_common_reg[i].reg_addr);
break;
case CSIPHY_DEFAULT_PARAMS:
cam_io_w(csiphy_dev->ctrl_reg->
csiphy_common_reg[i].reg_data,
- csiphy_dev->base +
+ csiphybase +
csiphy_dev->ctrl_reg->
csiphy_common_reg[i].reg_addr);
break;
@@ -270,22 +285,22 @@ int32_t cam_csiphy_config_dev(struct csiphy_device *csiphy_dev)
switch (reg_array[lane_pos][i].csiphy_param_type) {
case CSIPHY_LANE_ENABLE:
cam_io_w(lane_enable,
- csiphy_dev->base +
+ csiphybase +
reg_array[lane_pos][i].reg_addr);
break;
case CSIPHY_DEFAULT_PARAMS:
cam_io_w(reg_array[lane_pos][i].reg_data,
- csiphy_dev->base +
+ csiphybase +
reg_array[lane_pos][i].reg_addr);
break;
case CSIPHY_SETTLE_CNT_LOWER_BYTE:
cam_io_w(settle_cnt & 0xFF,
- csiphy_dev->base +
+ csiphybase +
reg_array[lane_pos][i].reg_addr);
break;
case CSIPHY_SETTLE_CNT_HIGHER_BYTE:
cam_io_w((settle_cnt >> 8) & 0xFF,
- csiphy_dev->base +
+ csiphybase +
reg_array[lane_pos][i].reg_addr);
break;
default:
@@ -388,7 +403,7 @@ int32_t cam_csiphy_core_cfg(void *phy_dev,
}
break;
case CAM_STOP_DEV: {
- rc = cam_csiphy_soc_release(csiphy_dev);
+ rc = cam_csiphy_disable_hw(csiphy_dev);
if (rc < 0) {
pr_err("%s:%d Failed in csiphy release\n",
__func__, __LINE__);
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.c
index f2ece9d..7783b2e 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.c
@@ -120,11 +120,13 @@ static int32_t cam_csiphy_platform_probe(struct platform_device *pdev)
mutex_init(&new_csiphy_dev->mutex);
new_csiphy_dev->v4l2_dev_str.pdev = pdev;
+ new_csiphy_dev->soc_info.pdev = pdev;
+
new_csiphy_dev->ref_count = 0;
rc = cam_csiphy_parse_dt_info(pdev, new_csiphy_dev);
if (rc < 0) {
- pr_err("%s:%d :ERROR: dt paring failed: %d\n",
+ pr_err("%s:%d :ERROR: dt parsing failed: %d\n",
__func__, __LINE__, rc);
goto csiphy_no_resource;
}
@@ -167,7 +169,7 @@ static int32_t cam_csiphy_platform_probe(struct platform_device *pdev)
new_csiphy_dev->is_acquired_dev_combo_mode = 0;
cpas_parms.cam_cpas_client_cb = NULL;
- cpas_parms.cell_index = pdev->id;
+ cpas_parms.cell_index = new_csiphy_dev->soc_info.index;
cpas_parms.dev = &pdev->dev;
cpas_parms.userdata = new_csiphy_dev;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.h
index 9049e4e..c4258bd 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.h
@@ -31,9 +31,9 @@
#include <cam_sensor_cmn_header.h>
#include <cam_req_mgr_interface.h>
#include <cam_subdev.h>
-#include <cam_sensor_soc_api.h>
#include <cam_io_util.h>
#include <cam_cpas_api.h>
+#include "cam_soc_util.h"
#define MAX_CSIPHY 3
#define MAX_DPHY_DATA_LN 4
@@ -175,19 +175,11 @@ struct csiphy_ctrl_t {
* device is for combo mode
*/
struct csiphy_device {
- struct resource *irq;
- void __iomem *base;
struct mutex mutex;
uint32_t hw_version;
uint32_t csiphy_state;
struct csiphy_ctrl_t *ctrl_reg;
- size_t num_clk;
uint32_t csiphy_max_clk;
- int32_t num_vreg;
- struct clk **csiphy_clk;
- struct msm_cam_clk_info *csiphy_clk_info;
- struct camera_vreg_t *csiphy_vreg;
- struct regulator *csiphy_reg_ptr[MAX_REGULATOR];
struct msm_cam_clk_info csiphy_3p_clk_info[2];
struct clk *csiphy_3p_clk[2];
uint32_t csiphy_clk_index;
@@ -203,6 +195,7 @@ struct csiphy_device {
uint32_t acquire_count;
char device_name[20];
uint32_t is_acquired_dev_combo_mode;
+ struct cam_hw_soc_info soc_info;
uint32_t cpas_handle;
};
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_soc.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_soc.c
index 540ec76..6b5aba9 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_soc.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_soc.c
@@ -13,12 +13,13 @@
#include "cam_csiphy_soc.h"
#include "cam_csiphy_core.h"
#include "include/cam_csiphy_1_0_hwreg.h"
-#include "cam_sensor_util.h"
int32_t cam_csiphy_enable_hw(struct csiphy_device *csiphy_dev)
{
int32_t rc = 0;
- long clk_rate = 0;
+ struct cam_hw_soc_info *soc_info;
+
+ soc_info = &csiphy_dev->soc_info;
if (csiphy_dev->ref_count++) {
pr_err("%s:%d csiphy refcount = %d\n", __func__,
@@ -26,92 +27,58 @@ int32_t cam_csiphy_enable_hw(struct csiphy_device *csiphy_dev)
return rc;
}
- rc = msm_camera_config_vreg(&csiphy_dev->v4l2_dev_str.pdev->dev,
- csiphy_dev->csiphy_vreg,
- csiphy_dev->num_vreg, NULL, 0,
- &csiphy_dev->csiphy_reg_ptr[0], 1);
+ rc = cam_soc_util_enable_platform_resource(soc_info, true,
+ CAM_TURBO_VOTE, ENABLE_IRQ);
if (rc < 0) {
- pr_err("%s:%d failed regulator get\n", __func__, __LINE__);
- goto csiphy_config_regulator_fail;
+ pr_err("%s:%d failed to enable platform resources %d\n",
+ __func__, __LINE__, rc);
+ return rc;
}
- rc = msm_camera_enable_vreg(&csiphy_dev->v4l2_dev_str.pdev->dev,
- csiphy_dev->csiphy_vreg,
- csiphy_dev->num_vreg, NULL, 0,
- &csiphy_dev->csiphy_reg_ptr[0], 1);
- if (rc < 0) {
- pr_err("%s:%d failed to enable regulators\n", __func__, rc);
- goto csiphy_regulator_fail;
- }
+ rc = cam_soc_util_set_clk_rate(
+ soc_info->clk[csiphy_dev->csiphy_clk_index],
+ soc_info->clk_name[csiphy_dev->csiphy_clk_index],
+ soc_info->clk_rate[0][csiphy_dev->csiphy_clk_index]);
- /*Enable clocks*/
- rc = msm_camera_clk_enable(&csiphy_dev->v4l2_dev_str.pdev->dev,
- csiphy_dev->csiphy_clk_info, csiphy_dev->csiphy_clk,
- csiphy_dev->num_clk, true);
if (rc < 0) {
- pr_err("%s: csiphy clk enable failed\n", __func__);
- csiphy_dev->ref_count--;
- goto csiphy_regulator_fail;
- }
-
- clk_rate = msm_camera_clk_set_rate(&csiphy_dev->v4l2_dev_str.pdev->dev,
- csiphy_dev->csiphy_clk[csiphy_dev->csiphy_clk_index],
- clk_rate);
- if (clk_rate < 0) {
- pr_err("csiphy_clk_set_rate failed\n");
- goto csiphy_clk_enable_fail;
- }
-
- rc = msm_camera_enable_irq(csiphy_dev->irq, ENABLE_IRQ);
- if (rc < 0) {
- pr_err("%s:%d :ERROR: irq enable failed\n",
+ pr_err("%s:%d csiphy_clk_set_rate failed\n",
__func__, __LINE__);
- goto csiphy_clk_enable_fail;
- return -EINVAL;
+ goto csiphy_disable_platform_resource;
}
cam_csiphy_reset(csiphy_dev);
return rc;
-csiphy_clk_enable_fail:
- msm_camera_clk_enable(&csiphy_dev->v4l2_dev_str.pdev->dev,
- csiphy_dev->csiphy_clk_info, csiphy_dev->csiphy_clk,
- csiphy_dev->num_clk, false);
-csiphy_regulator_fail:
- msm_camera_enable_vreg(&csiphy_dev->v4l2_dev_str.pdev->dev,
- csiphy_dev->csiphy_vreg,
- csiphy_dev->num_vreg, NULL, 0,
- &csiphy_dev->csiphy_reg_ptr[0], 0);
-csiphy_config_regulator_fail:
- msm_camera_config_vreg(&csiphy_dev->v4l2_dev_str.pdev->dev,
- csiphy_dev->csiphy_vreg,
- csiphy_dev->num_vreg, NULL, 0,
- &csiphy_dev->csiphy_reg_ptr[0], 0);
+
+
+csiphy_disable_platform_resource:
+ cam_soc_util_disable_platform_resource(soc_info, true, true);
return rc;
}
-int32_t cam_csiphy_disable_hw(struct platform_device *pdev)
+int32_t cam_csiphy_disable_hw(struct csiphy_device *csiphy_dev)
{
- struct csiphy_device *csiphy_dev =
- platform_get_drvdata(pdev);
+ struct cam_hw_soc_info *soc_info;
- /*Disable regulators*/
- msm_camera_enable_vreg(&csiphy_dev->v4l2_dev_str.pdev->dev,
- csiphy_dev->csiphy_vreg,
- csiphy_dev->num_vreg, NULL, 0,
- &csiphy_dev->csiphy_reg_ptr[0], 0);
+ if (!csiphy_dev || !csiphy_dev->ref_count) {
+ pr_err("%s:%d csiphy dev NULL / ref_count ZERO\n", __func__,
+ __LINE__);
+ return 0;
+ }
+ soc_info = &csiphy_dev->soc_info;
- /*Disable clocks*/
- msm_camera_clk_enable(&csiphy_dev->v4l2_dev_str.pdev->dev,
- csiphy_dev->csiphy_clk_info, csiphy_dev->csiphy_clk,
- csiphy_dev->num_clk, false);
+ if (--csiphy_dev->ref_count) {
+ pr_err("%s:%d csiphy refcount = %d\n", __func__,
+ __LINE__, csiphy_dev->ref_count);
+ return 0;
+ }
- /*Disable IRQ*/
- msm_camera_enable_irq(csiphy_dev->irq, false);
+ cam_csiphy_reset(csiphy_dev);
+
+ cam_soc_util_disable_platform_resource(soc_info, true, true);
return 0;
-
}
int32_t cam_csiphy_parse_dt_info(struct platform_device *pdev,
@@ -121,11 +88,16 @@ int32_t cam_csiphy_parse_dt_info(struct platform_device *pdev,
uint32_t clk_cnt = 0;
char *csi_3p_clk_name = "csi_phy_3p_clk";
char *csi_3p_clk_src_name = "csiphy_3p_clk_src";
+ struct cam_hw_soc_info *soc_info;
- if (pdev->dev.of_node) {
- of_property_read_u32((&pdev->dev)->of_node,
- "cell-index", &pdev->id);
- CDBG("%s: device id = %d\n", __func__, pdev->id);
+ csiphy_dev->is_csiphy_3phase_hw = 0;
+ soc_info = &csiphy_dev->soc_info;
+
+ rc = cam_soc_util_get_dt_properties(soc_info);
+ if (rc < 0) {
+ pr_err("%s:%d :Error: parsing common soc dt(rc %d)\n",
+ __func__, __LINE__, rc);
+ return rc;
}
csiphy_dev->is_csiphy_3phase_hw = 0;
@@ -151,124 +123,56 @@ int32_t cam_csiphy_parse_dt_info(struct platform_device *pdev,
return rc;
}
- rc = msm_camera_get_clk_info(csiphy_dev->v4l2_dev_str.pdev,
- &csiphy_dev->csiphy_clk_info,
- &csiphy_dev->csiphy_clk,
- &csiphy_dev->num_clk);
- if (rc < 0) {
- pr_err("%s:%d failed clock get\n", __func__, __LINE__);
- return rc;
+ if (soc_info->num_clk > CSIPHY_NUM_CLK_MAX) {
+ pr_err("%s:%d invalid clk count=%d, max is %d\n", __func__,
+ __LINE__, soc_info->num_clk, CSIPHY_NUM_CLK_MAX);
+ return -EINVAL;
}
-
- if (csiphy_dev->num_clk > CSIPHY_NUM_CLK_MAX) {
- pr_err("%s: invalid clk count=%zu, max is %d\n", __func__,
- csiphy_dev->num_clk, CSIPHY_NUM_CLK_MAX);
- goto clk_mem_ovf_err;
- }
-
- for (i = 0; i < csiphy_dev->num_clk; i++) {
- if (!strcmp(csiphy_dev->csiphy_clk_info[i].clk_name,
+ for (i = 0; i < soc_info->num_clk; i++) {
+ if (!strcmp(soc_info->clk_name[i],
csi_3p_clk_src_name)) {
csiphy_dev->csiphy_3p_clk_info[0].clk_name =
- csiphy_dev->csiphy_clk_info[i].clk_name;
+ soc_info->clk_name[i];
csiphy_dev->csiphy_3p_clk_info[0].clk_rate =
- csiphy_dev->csiphy_clk_info[i].clk_rate;
+ soc_info->clk_rate[0][i];
csiphy_dev->csiphy_3p_clk[0] =
- csiphy_dev->csiphy_clk[i];
+ soc_info->clk[i];
continue;
- } else if (!strcmp(csiphy_dev->csiphy_clk_info[i].clk_name,
- csi_3p_clk_name)) {
+ } else if (!strcmp(soc_info->clk_name[i],
+ csi_3p_clk_name)) {
csiphy_dev->csiphy_3p_clk_info[1].clk_name =
- csiphy_dev->csiphy_clk_info[i].clk_name;
+ soc_info->clk_name[i];
csiphy_dev->csiphy_3p_clk_info[1].clk_rate =
- csiphy_dev->csiphy_clk_info[i].clk_rate;
+ soc_info->clk_rate[0][i];
csiphy_dev->csiphy_3p_clk[1] =
- csiphy_dev->csiphy_clk[i];
+ soc_info->clk[i];
continue;
}
- if (!strcmp(csiphy_dev->csiphy_clk_info[clk_cnt].clk_name,
+ if (!strcmp(soc_info->clk_name[i],
"csiphy_timer_src_clk")) {
csiphy_dev->csiphy_max_clk =
- csiphy_dev->csiphy_clk_info[clk_cnt].clk_rate;
+ soc_info->clk_rate[0][clk_cnt];
csiphy_dev->csiphy_clk_index = clk_cnt;
}
- CDBG("%s: clk_rate[%d] = %ld\n", __func__, clk_cnt,
- csiphy_dev->csiphy_clk_info[clk_cnt].clk_rate);
+ CDBG("%s:%d clk_rate[%d] = %d\n", __func__, __LINE__, clk_cnt,
+ soc_info->clk_rate[0][clk_cnt]);
clk_cnt++;
}
+ rc = cam_soc_util_request_platform_resource(&csiphy_dev->soc_info,
+ cam_csiphy_irq, csiphy_dev);
- rc = cam_sensor_get_dt_vreg_data(pdev->dev.of_node,
- &(csiphy_dev->csiphy_vreg), &(csiphy_dev->num_vreg));
- if (rc < 0) {
- pr_err("%s:%d Reg get failed\n", __func__, __LINE__);
- csiphy_dev->num_vreg = 0;
- }
-
- csiphy_dev->base = msm_camera_get_reg_base(pdev, "csiphy", true);
- if (!csiphy_dev->base) {
- pr_err("%s: no mem resource?\n", __func__);
- rc = -ENODEV;
- goto csiphy_no_resource;
- }
-
- csiphy_dev->irq = msm_camera_get_irq(pdev, "csiphy");
- if (!csiphy_dev->irq) {
- pr_err("%s: no irq resource?\n", __func__);
- rc = -ENODEV;
- goto csiphy_no_resource;
- }
-
- rc = msm_camera_register_irq(pdev, csiphy_dev->irq,
- cam_csiphy_irq, IRQF_TRIGGER_RISING, "csiphy", csiphy_dev);
- if (rc < 0) {
- pr_err("%s: irq request fail\n", __func__);
- rc = -EBUSY;
- goto csiphy_no_resource;
- }
- msm_camera_enable_irq(csiphy_dev->irq, false);
- return rc;
-
-csiphy_no_resource:
- msm_camera_put_reg_base(pdev, csiphy_dev->base, "csiphy", true);
-clk_mem_ovf_err:
- msm_camera_put_clk_info(csiphy_dev->v4l2_dev_str.pdev,
- &csiphy_dev->csiphy_clk_info,
- &csiphy_dev->csiphy_clk,
- csiphy_dev->num_clk);
return rc;
}
int32_t cam_csiphy_soc_release(struct csiphy_device *csiphy_dev)
{
-
- if (!csiphy_dev || !csiphy_dev->ref_count) {
- pr_err("%s csiphy dev NULL / ref_count ZERO\n", __func__);
+ if (!csiphy_dev) {
+ pr_err("%s:%d csiphy dev NULL\n", __func__, __LINE__);
return 0;
}
- if (--csiphy_dev->ref_count) {
- pr_err("%s:%d csiphy refcount = %d\n", __func__,
- __LINE__, csiphy_dev->ref_count);
- return 0;
- }
-
- cam_csiphy_reset(csiphy_dev);
-
- msm_camera_enable_irq(csiphy_dev->irq, false);
-
- msm_camera_clk_enable(&csiphy_dev->v4l2_dev_str.pdev->dev,
- csiphy_dev->csiphy_clk_info, csiphy_dev->csiphy_clk,
- csiphy_dev->num_clk, false);
-
- msm_camera_enable_vreg(&csiphy_dev->v4l2_dev_str.pdev->dev,
- csiphy_dev->csiphy_vreg, csiphy_dev->num_vreg,
- NULL, 0, &csiphy_dev->csiphy_reg_ptr[0], 0);
-
- msm_camera_config_vreg(&csiphy_dev->v4l2_dev_str.pdev->dev,
- csiphy_dev->csiphy_vreg, csiphy_dev->num_vreg,
- NULL, 0, &csiphy_dev->csiphy_reg_ptr[0], 0);
-
+ cam_soc_util_release_platform_resource(&csiphy_dev->soc_info);
return 0;
}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_soc.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_soc.h
index 27de3fc..94ec79f 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_soc.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_soc.h
@@ -60,4 +60,11 @@ int cam_csiphy_parse_dt_info(struct platform_device *pdev,
*/
int cam_csiphy_enable_hw(struct csiphy_device *csiphy_dev);
+/**
+ * @csiphy_dev: CSIPhy device structure
+ *
+ * This API disables SOC related parameters
+ */
+int cam_csiphy_disable_hw(struct csiphy_device *csiphy_dev);
+
#endif /* _CAM_CSIPHY_SOC_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
index 4fc3aa1..031c340 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
@@ -14,6 +14,7 @@
#include <cam_sensor_cmn_header.h>
#include "cam_sensor_core.h"
#include <cam_sensor_util.h>
+#include "cam_soc_util.h"
static int32_t cam_sensor_i2c_pkt_parse(struct cam_sensor_ctrl_t *s_ctrl,
void *arg)
@@ -492,7 +493,7 @@ void cam_sensor_query_cap(struct cam_sensor_ctrl_t *s_ctrl,
query_cap->ois_slot_id =
s_ctrl->sensordata->subdev_id[SUB_MODULE_OIS];
query_cap->slot_info =
- s_ctrl->id;
+ s_ctrl->soc_info.index;
}
static uint16_t cam_sensor_id_by_mask(struct cam_sensor_ctrl_t *s_ctrl,
@@ -603,8 +604,7 @@ int32_t cam_sensor_driver_cmd(struct cam_sensor_ctrl_t *s_ctrl,
/* Parse and fill vreg params for powerup settings */
rc = msm_camera_fill_vreg_params(
- s_ctrl->sensordata->power_info.cam_vreg,
- s_ctrl->sensordata->power_info.num_vreg,
+ &s_ctrl->soc_info,
s_ctrl->sensordata->power_info.power_setting,
s_ctrl->sensordata->power_info.power_setting_size);
if (rc < 0) {
@@ -617,8 +617,7 @@ int32_t cam_sensor_driver_cmd(struct cam_sensor_ctrl_t *s_ctrl,
/* Parse and fill vreg params for powerdown settings*/
rc = msm_camera_fill_vreg_params(
- s_ctrl->sensordata->power_info.cam_vreg,
- s_ctrl->sensordata->power_info.num_vreg,
+ &s_ctrl->soc_info,
s_ctrl->sensordata->power_info.power_down_setting,
s_ctrl->sensordata->power_info.power_down_setting_size);
if (rc < 0) {
@@ -650,7 +649,8 @@ int32_t cam_sensor_driver_cmd(struct cam_sensor_ctrl_t *s_ctrl,
}
CDBG("%s:%d Probe Succeeded on the slot: %d\n",
- __func__, __LINE__, s_ctrl->id);
+ __func__, __LINE__,
+ s_ctrl->soc_info.index);
rc = cam_sensor_power_down(s_ctrl);
if (rc < 0) {
pr_err("%s:%d :Error: fail in Sensor Power Down\n",
@@ -844,6 +844,8 @@ int cam_sensor_power_up(struct cam_sensor_ctrl_t *s_ctrl)
int rc;
struct cam_sensor_power_ctrl_t *power_info;
struct cam_camera_slave_info *slave_info;
+ struct cam_hw_soc_info *soc_info =
+ &s_ctrl->soc_info;
if (!s_ctrl) {
pr_err("%s:%d failed: %pK\n",
@@ -861,7 +863,7 @@ int cam_sensor_power_up(struct cam_sensor_ctrl_t *s_ctrl)
return -EINVAL;
}
- rc = cam_sensor_core_power_up(power_info);
+ rc = cam_sensor_core_power_up(power_info, soc_info);
if (rc < 0) {
pr_err("%s:%d power up the core is failed:%d\n",
__func__, __LINE__, rc);
@@ -884,6 +886,7 @@ int cam_sensor_power_up(struct cam_sensor_ctrl_t *s_ctrl)
int cam_sensor_power_down(struct cam_sensor_ctrl_t *s_ctrl)
{
struct cam_sensor_power_ctrl_t *power_info;
+ struct cam_hw_soc_info *soc_info;
int rc = 0;
if (!s_ctrl) {
@@ -893,13 +896,14 @@ int cam_sensor_power_down(struct cam_sensor_ctrl_t *s_ctrl)
}
power_info = &s_ctrl->sensordata->power_info;
+ soc_info = &s_ctrl->soc_info;
if (!power_info) {
pr_err("%s:%d failed: power_info %pK\n",
__func__, __LINE__, power_info);
return -EINVAL;
}
- rc = msm_camera_power_down(power_info);
+ rc = msm_camera_power_down(power_info, soc_info);
if (rc < 0) {
pr_err("%s:%d power down the core is failed:%d\n",
__func__, __LINE__, rc);
@@ -1003,3 +1007,49 @@ int32_t cam_sensor_apply_request(struct cam_req_mgr_apply_request *apply)
rc = cam_sensor_apply_settings(s_ctrl, apply->request_id);
return rc;
}
+
+int32_t cam_sensor_flush_request(struct cam_req_mgr_flush_request *flush_req)
+{
+ int32_t rc = 0, i;
+ uint32_t cancel_req_id_found = 0;
+ struct cam_sensor_ctrl_t *s_ctrl = NULL;
+ struct i2c_settings_array *i2c_set = NULL;
+
+ if (!flush_req)
+ return -EINVAL;
+
+ s_ctrl = (struct cam_sensor_ctrl_t *)
+ cam_get_device_priv(flush_req->dev_hdl);
+ if (!s_ctrl) {
+ pr_err("%s: Device data is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) {
+ i2c_set = &(s_ctrl->i2c_data.per_frame[i]);
+
+ if ((flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ)
+ && (i2c_set->request_id != flush_req->req_id))
+ continue;
+
+ if (i2c_set->is_settings_valid == 1) {
+ rc = delete_request(i2c_set);
+ if (rc < 0)
+ pr_err("%s:%d :Error: delete request: %lld rc: %d\n",
+ __func__, __LINE__,
+ i2c_set->request_id, rc);
+
+ if (flush_req->type ==
+ CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) {
+ cancel_req_id_found = 1;
+ break;
+ }
+ }
+ }
+
+ if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ &&
+ !cancel_req_id_found)
+ CDBG("%s:Flush request id:%lld not found in the pending list\n",
+ __func__, flush_req->req_id);
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.h
index b23edce..c8158fa 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.h
@@ -53,6 +53,13 @@ int cam_sensor_apply_settings(struct cam_sensor_ctrl_t *s_ctrl, int64_t req_id);
int cam_sensor_apply_request(struct cam_req_mgr_apply_request *apply);
/**
+ * @flush: Req mgr structure for flushing request
+ *
+ * This API flushes the request that is mentioned
+ */
+int cam_sensor_flush_request(struct cam_req_mgr_flush_request *flush);
+
+/**
* @info: Sub device info to req mgr
*
* Publish the subdevice info
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.c
index 448ce51..c06a1b3 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.c
@@ -166,6 +166,7 @@ static int32_t cam_sensor_driver_platform_probe(
{
int32_t rc = 0, i = 0;
struct cam_sensor_ctrl_t *s_ctrl = NULL;
+ struct cam_hw_soc_info *soc_info = NULL;
/* Create sensor control structure */
s_ctrl = devm_kzalloc(&pdev->dev,
@@ -173,6 +174,9 @@ static int32_t cam_sensor_driver_platform_probe(
if (!s_ctrl)
return -ENOMEM;
+ soc_info = &s_ctrl->soc_info;
+ soc_info->pdev = pdev;
+
/* Initialize sensor device type */
s_ctrl->of_node = pdev->dev.of_node;
s_ctrl->is_probe_succeed = 0;
@@ -189,7 +193,7 @@ static int32_t cam_sensor_driver_platform_probe(
}
/* Fill platform device id*/
- pdev->id = s_ctrl->id;
+ pdev->id = soc_info->index;
s_ctrl->v4l2_dev_str.internal_ops =
&cam_sensor_internal_ops;
@@ -230,6 +234,7 @@ static int32_t cam_sensor_driver_platform_probe(
s_ctrl->bridge_intf.ops.get_dev_info = cam_sensor_publish_dev_info;
s_ctrl->bridge_intf.ops.link_setup = cam_sensor_establish_link;
s_ctrl->bridge_intf.ops.apply_req = cam_sensor_apply_request;
+ s_ctrl->bridge_intf.ops.flush_req = cam_sensor_flush_request;
s_ctrl->sensordata->power_info.dev = &pdev->dev;
platform_set_drvdata(pdev, s_ctrl);
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.h
index f597c36..ae14c9d 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.h
@@ -30,7 +30,6 @@
#include <cam_cci_dev.h>
#include <cam_sensor_cmn_header.h>
#include <cam_subdev.h>
-#include <cam_sensor_soc_api.h>
#include <cam_sensor_io.h>
#define NUM_MASTERS 2
@@ -91,6 +90,7 @@ struct intf_params {
*/
struct cam_sensor_ctrl_t {
struct platform_device *pdev;
+ struct cam_hw_soc_info soc_info;
struct mutex cam_sensor_mutex;
struct cam_sensor_board_info *sensordata;
enum cci_i2c_master_t cci_i2c_master;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_soc.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_soc.c
index 8cb1078..78edec1 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_soc.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_soc.c
@@ -17,6 +17,7 @@
#include <cam_sensor_io.h>
#include <cam_req_mgr_util.h>
#include "cam_sensor_soc.h"
+#include "cam_soc_util.h"
int32_t cam_sensor_get_sub_module_index(struct device_node *of_node,
struct cam_sensor_board_info *s_info)
@@ -31,15 +32,15 @@ int32_t cam_sensor_get_sub_module_index(struct device_node *of_node,
for (i = 0; i < SUB_MODULE_MAX; i++)
sensor_info->subdev_id[i] = -1;
- src_node = of_parse_phandle(of_node, "qcom,actuator-src", 0);
+ src_node = of_parse_phandle(of_node, "actuator-src", 0);
if (!src_node) {
CDBG("%s:%d src_node NULL\n", __func__, __LINE__);
} else {
rc = of_property_read_u32(src_node, "cell-index", &val);
- CDBG("%s qcom,actuator cell index %d, rc %d\n", __func__,
- val, rc);
+ CDBG("%s:%d actuator cell index %d, rc %d\n", __func__,
+ __LINE__, val, rc);
if (rc < 0) {
- pr_err("%s failed %d\n", __func__, __LINE__);
+ pr_err("%s:%d failed %d\n", __func__, __LINE__, rc);
of_node_put(src_node);
return rc;
}
@@ -47,15 +48,15 @@ int32_t cam_sensor_get_sub_module_index(struct device_node *of_node,
of_node_put(src_node);
}
- src_node = of_parse_phandle(of_node, "qcom,ois-src", 0);
+ src_node = of_parse_phandle(of_node, "ois-src", 0);
if (!src_node) {
CDBG("%s:%d src_node NULL\n", __func__, __LINE__);
} else {
rc = of_property_read_u32(src_node, "cell-index", &val);
- CDBG("%s qcom,ois cell index %d, rc %d\n", __func__,
+ CDBG("%s:%d ois cell index %d, rc %d\n", __func__, __LINE__,
val, rc);
if (rc < 0) {
- pr_err("%s failed %d\n", __func__, __LINE__);
+ pr_err("%s:%d failed %d\n", __func__, __LINE__, rc);
of_node_put(src_node);
return rc;
}
@@ -63,15 +64,15 @@ int32_t cam_sensor_get_sub_module_index(struct device_node *of_node,
of_node_put(src_node);
}
- src_node = of_parse_phandle(of_node, "qcom,eeprom-src", 0);
+ src_node = of_parse_phandle(of_node, "eeprom-src", 0);
if (!src_node) {
CDBG("%s:%d eeprom src_node NULL\n", __func__, __LINE__);
} else {
rc = of_property_read_u32(src_node, "cell-index", &val);
- CDBG("%s qcom,eeprom cell index %d, rc %d\n", __func__,
+ CDBG("%s:%d eeprom cell index %d, rc %d\n", __func__, __LINE__,
val, rc);
if (rc < 0) {
- pr_err("%s failed %d\n", __func__, __LINE__);
+ pr_err("%s:%d failed %d\n", __func__, __LINE__, rc);
of_node_put(src_node);
return rc;
}
@@ -79,13 +80,13 @@ int32_t cam_sensor_get_sub_module_index(struct device_node *of_node,
of_node_put(src_node);
}
- src_node = of_parse_phandle(of_node, "qcom,led-flash-src", 0);
+ src_node = of_parse_phandle(of_node, "led-flash-src", 0);
if (!src_node) {
CDBG("%s:%d src_node NULL\n", __func__, __LINE__);
} else {
rc = of_property_read_u32(src_node, "cell-index", &val);
- CDBG("%s qcom,led flash cell index %d, rc %d\n", __func__,
- val, rc);
+ CDBG("%s:%d led flash cell index %d, rc %d\n", __func__,
+ __LINE__, val, rc);
if (rc < 0) {
pr_err("%s:%d failed %d\n", __func__, __LINE__, rc);
of_node_put(src_node);
@@ -95,7 +96,7 @@ int32_t cam_sensor_get_sub_module_index(struct device_node *of_node,
of_node_put(src_node);
}
- rc = of_property_read_u32(of_node, "qcom,csiphy-sd-index", &val);
+ rc = of_property_read_u32(of_node, "csiphy-sd-index", &val);
if (rc < 0)
pr_err("%s:%d :Error: paring the dt node for csiphy rc %d\n",
__func__, __LINE__, rc);
@@ -110,27 +111,33 @@ static int32_t cam_sensor_driver_get_dt_data(struct cam_sensor_ctrl_t *s_ctrl)
int32_t rc = 0;
struct cam_sensor_board_info *sensordata = NULL;
struct device_node *of_node = s_ctrl->of_node;
- uint32_t cell_id;
-
+ struct cam_hw_soc_info *soc_info = &s_ctrl->soc_info;
s_ctrl->sensordata = kzalloc(sizeof(*sensordata), GFP_KERNEL);
if (!s_ctrl->sensordata)
return -ENOMEM;
sensordata = s_ctrl->sensordata;
- /*
- * Read cell index - this cell index will be the camera slot where
- * this camera will be mounted
- */
- rc = of_property_read_u32(of_node, "cell-index", &cell_id);
+
+ rc = cam_soc_util_get_dt_properties(soc_info);
if (rc < 0) {
- pr_err("failed: cell-index rc %d", rc);
+ pr_err("%s:%d Failed to read DT properties rc %d",
+ __func__, __LINE__, rc);
goto FREE_SENSOR_DATA;
}
- s_ctrl->id = cell_id;
+
+ rc = cam_sensor_util_init_gpio_pin_tbl(soc_info,
+ &sensordata->power_info.gpio_num_info);
+ if (rc < 0) {
+ pr_err("%s:%d Failed to read gpios %d", __func__, __LINE__, rc);
+ goto FREE_SENSOR_DATA;
+ }
+
+ s_ctrl->id = soc_info->index;
/* Validate cell_id */
- if (cell_id >= MAX_CAMERAS) {
- pr_err("failed: invalid cell_id %d", cell_id);
+ if (s_ctrl->id >= MAX_CAMERAS) {
+ pr_err("%s:%d Failed invalid cell_id %d", __func__, __LINE__,
+ s_ctrl->id);
rc = -EINVAL;
goto FREE_SENSOR_DATA;
}
@@ -138,48 +145,33 @@ static int32_t cam_sensor_driver_get_dt_data(struct cam_sensor_ctrl_t *s_ctrl)
/* Read subdev info */
rc = cam_sensor_get_sub_module_index(of_node, sensordata);
if (rc < 0) {
- pr_err("failed");
+ pr_err("%s:%d failed to get sub module index, rc=%d\n",
+ __func__, __LINE__, rc);
goto FREE_SENSOR_DATA;
}
- /* Read vreg information */
- rc = cam_sensor_get_dt_vreg_data(of_node,
- &sensordata->power_info.cam_vreg,
- &sensordata->power_info.num_vreg);
- if (rc < 0) {
- pr_err("failed: cam_sensor_get_dt_vreg_data rc %d", rc);
- goto FREE_SENSOR_DATA;
- }
-
- /* Read gpio information */
- rc = msm_sensor_driver_get_gpio_data
- (&(sensordata->power_info.gpio_conf), of_node);
- if (rc < 0) {
- pr_err("failed: msm_sensor_driver_get_gpio_data rc %d", rc);
- goto FREE_VREG_DATA;
- }
-
/* Get CCI master */
- rc = of_property_read_u32(of_node, "qcom,cci-master",
+ rc = of_property_read_u32(of_node, "cci-master",
&s_ctrl->cci_i2c_master);
- CDBG("qcom,cci-master %d, rc %d", s_ctrl->cci_i2c_master, rc);
+ CDBG("%s:%d cci-master %d, rc %d", __func__, __LINE__,
+ s_ctrl->cci_i2c_master, rc);
if (rc < 0) {
/* Set default master 0 */
s_ctrl->cci_i2c_master = MASTER_0;
rc = 0;
}
- if (of_property_read_u32(of_node, "qcom,sensor-position-pitch",
+ if (of_property_read_u32(of_node, "sensor-position-pitch",
&sensordata->pos_pitch) < 0) {
CDBG("%s:%d Invalid sensor position\n", __func__, __LINE__);
sensordata->pos_pitch = 360;
}
- if (of_property_read_u32(of_node, "qcom,sensor-position-roll",
+ if (of_property_read_u32(of_node, "sensor-position-roll",
&sensordata->pos_roll) < 0) {
CDBG("%s:%d Invalid sensor position\n", __func__, __LINE__);
sensordata->pos_roll = 360;
}
- if (of_property_read_u32(of_node, "qcom,sensor-position-yaw",
+ if (of_property_read_u32(of_node, "sensor-position-yaw",
&sensordata->pos_yaw) < 0) {
CDBG("%s:%d Invalid sensor position\n", __func__, __LINE__);
sensordata->pos_yaw = 360;
@@ -187,8 +179,6 @@ static int32_t cam_sensor_driver_get_dt_data(struct cam_sensor_ctrl_t *s_ctrl)
return rc;
-FREE_VREG_DATA:
- kfree(sensordata->power_info.cam_vreg);
FREE_SENSOR_DATA:
kfree(sensordata);
return rc;
@@ -223,43 +213,42 @@ int32_t msm_sensor_init_default_params(struct cam_sensor_ctrl_t *s_ctrl)
int32_t cam_sensor_parse_dt(struct cam_sensor_ctrl_t *s_ctrl)
{
- int32_t rc = 0;
+ int32_t i, rc = 0;
+ struct cam_hw_soc_info *soc_info = &s_ctrl->soc_info;
/* Parse dt information and store in sensor control structure */
rc = cam_sensor_driver_get_dt_data(s_ctrl);
if (rc < 0) {
- pr_err("failed: rc %d", rc);
+ pr_err("%s:%d Failed to get dt data rc %d", __func__, __LINE__,
+ rc);
return rc;
}
/* Initialize mutex */
mutex_init(&(s_ctrl->cam_sensor_mutex));
- pr_err("%s: %d\n", __func__, __LINE__);
+ CDBG("%s: %d\n", __func__, __LINE__);
/* Initialize default parameters */
+ for (i = 0; i < soc_info->num_clk; i++) {
+ soc_info->clk[i] = devm_clk_get(&soc_info->pdev->dev,
+ soc_info->clk_name[i]);
+ if (!soc_info->clk[i]) {
+ pr_err("%s:%d get failed for %s\n",
+ __func__, __LINE__, soc_info->clk_name[i]);
+ rc = -ENOENT;
+ return rc;
+ }
+ }
rc = msm_sensor_init_default_params(s_ctrl);
if (rc < 0) {
- pr_err("failed: msm_sensor_init_default_params rc %d", rc);
- goto FREE_DT_DATA;
- }
-
- /* Get clocks information */
- rc = msm_camera_get_clk_info(s_ctrl->pdev,
- &s_ctrl->sensordata->power_info.clk_info,
- &s_ctrl->sensordata->power_info.clk_ptr,
- &s_ctrl->sensordata->power_info.clk_info_size);
- if (rc < 0) {
- pr_err("failed: msm_camera_get_clk_info rc %d", rc);
+ pr_err("%s;%d failed: msm_sensor_init_default_params rc %d",
+ __func__, __LINE__, rc);
goto FREE_DT_DATA;
}
return rc;
FREE_DT_DATA:
- kfree(s_ctrl->sensordata->power_info.gpio_conf->gpio_num_info);
- kfree(s_ctrl->sensordata->power_info.gpio_conf->cam_gpio_req_tbl);
- kfree(s_ctrl->sensordata->power_info.gpio_conf);
- kfree(s_ctrl->sensordata->power_info.cam_vreg);
kfree(s_ctrl->sensordata);
s_ctrl->sensordata = NULL;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/Makefile
index 766828e..770391c 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/Makefile
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/Makefile
@@ -3,4 +3,4 @@
ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io
ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_cci
-obj-$(CONFIG_SPECTRA_CAMERA) += cam_sensor_util.o cam_sensor_soc_api.o
\ No newline at end of file
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_sensor_util.o
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_cmn_header.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_cmn_header.h
index e5e4872..d12ff2b 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_cmn_header.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_cmn_header.h
@@ -263,14 +263,9 @@ struct cam_sensor_power_ctrl_t {
uint16_t power_setting_size;
struct cam_sensor_power_setting *power_down_setting;
uint16_t power_down_setting_size;
- struct msm_camera_gpio_conf *gpio_conf;
- struct camera_vreg_t *cam_vreg;
- int num_vreg;
- struct clk **clk_ptr;
- struct msm_cam_clk_info *clk_info;
+ struct msm_camera_gpio_num_info *gpio_num_info;
struct msm_pinctrl_info pinctrl_info;
uint8_t cam_pinctrl_status;
- size_t clk_info_size;
};
struct cam_camera_slave_info {
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_soc_api.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_soc_api.c
deleted file mode 100644
index 2eed9ce..0000000
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_soc_api.c
+++ /dev/null
@@ -1,1331 +0,0 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/delay.h>
-#include <linux/module.h>
-#include <linux/of_platform.h>
-#include <linux/msm-bus.h>
-#include "cam_sensor_soc_api.h"
-
-#define NO_SET_RATE -1
-#define INIT_RATE -2
-
-#ifdef CONFIG_CAM_SOC_API_DBG
-#define CDBG(fmt, args...) pr_err(fmt, ##args)
-#else
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
-#endif
-
-int msm_cam_clk_sel_src(struct device *dev, struct msm_cam_clk_info *clk_info,
- struct msm_cam_clk_info *clk_src_info, int num_clk)
-{
- int i;
- int rc = 0;
- struct clk *mux_clk = NULL;
- struct clk *src_clk = NULL;
-
- for (i = 0; i < num_clk; i++) {
- if (clk_src_info[i].clk_name) {
- mux_clk = clk_get(dev, clk_info[i].clk_name);
- if (IS_ERR(mux_clk)) {
- pr_err("%s get failed\n",
- clk_info[i].clk_name);
- continue;
- }
- src_clk = clk_get(dev, clk_src_info[i].clk_name);
- if (IS_ERR(src_clk)) {
- pr_err("%s get failed\n",
- clk_src_info[i].clk_name);
- continue;
- }
- clk_set_parent(mux_clk, src_clk);
- }
- }
- return rc;
-}
-
-int msm_cam_clk_enable(struct device *dev, struct msm_cam_clk_info *clk_info,
- struct clk **clk_ptr, int num_clk, int enable)
-{
- int i;
- int rc = 0;
- long clk_rate;
-
- if (enable) {
- for (i = 0; i < num_clk; i++) {
- CDBG("%s enable %s\n", __func__, clk_info[i].clk_name);
- clk_ptr[i] = clk_get(dev, clk_info[i].clk_name);
- if (IS_ERR(clk_ptr[i])) {
- pr_err("%s get failed\n", clk_info[i].clk_name);
- rc = PTR_ERR(clk_ptr[i]);
- goto cam_clk_get_err;
- }
- if (clk_info[i].clk_rate > 0) {
- clk_rate = clk_round_rate(clk_ptr[i],
- clk_info[i].clk_rate);
- if (clk_rate < 0) {
- pr_err("%s round failed\n",
- clk_info[i].clk_name);
- goto cam_clk_set_err;
- }
- rc = clk_set_rate(clk_ptr[i],
- clk_rate);
- if (rc < 0) {
- pr_err("%s set failed\n",
- clk_info[i].clk_name);
- goto cam_clk_set_err;
- }
-
- } else if (clk_info[i].clk_rate == INIT_RATE) {
- clk_rate = clk_get_rate(clk_ptr[i]);
- if (clk_rate == 0) {
- clk_rate =
- clk_round_rate(clk_ptr[i], 0);
- if (clk_rate < 0) {
- pr_err("%s round rate failed\n",
- clk_info[i].clk_name);
- goto cam_clk_set_err;
- }
- rc = clk_set_rate(clk_ptr[i],
- clk_rate);
- if (rc < 0) {
- pr_err("%s set rate failed\n",
- clk_info[i].clk_name);
- goto cam_clk_set_err;
- }
- }
- }
- rc = clk_prepare(clk_ptr[i]);
- if (rc < 0) {
- pr_err("%s prepare failed\n",
- clk_info[i].clk_name);
- goto cam_clk_prepare_err;
- }
-
- rc = clk_enable(clk_ptr[i]);
- if (rc < 0) {
- pr_err("%s enable failed\n",
- clk_info[i].clk_name);
- goto cam_clk_enable_err;
- }
- if (clk_info[i].delay > 20)
- msleep(clk_info[i].delay);
- else if (clk_info[i].delay)
- usleep_range(clk_info[i].delay * 1000,
- (clk_info[i].delay * 1000) + 1000);
- }
- } else {
- for (i = num_clk - 1; i >= 0; i--) {
- if (clk_ptr[i] != NULL) {
- CDBG("%s disable %s\n", __func__,
- clk_info[i].clk_name);
- clk_disable(clk_ptr[i]);
- clk_unprepare(clk_ptr[i]);
- clk_put(clk_ptr[i]);
- }
- }
- }
-
- return rc;
-
-cam_clk_enable_err:
- clk_unprepare(clk_ptr[i]);
-cam_clk_prepare_err:
-cam_clk_set_err:
- clk_put(clk_ptr[i]);
-cam_clk_get_err:
- for (i--; i >= 0; i--) {
- if (clk_ptr[i] != NULL) {
- clk_disable(clk_ptr[i]);
- clk_unprepare(clk_ptr[i]);
- clk_put(clk_ptr[i]);
- }
- }
-
- return rc;
-}
-
-int msm_camera_config_vreg(struct device *dev, struct camera_vreg_t *cam_vreg,
- int num_vreg, enum msm_camera_vreg_name_t *vreg_seq,
- int num_vreg_seq, struct regulator **reg_ptr, int config)
-{
- int i = 0, j = 0;
- int rc = 0;
- struct camera_vreg_t *curr_vreg;
-
- if (num_vreg_seq > num_vreg) {
- pr_err("%s:%d vreg sequence invalid\n", __func__, __LINE__);
- return -EINVAL;
- }
- if (!num_vreg_seq)
- num_vreg_seq = num_vreg;
-
- if (config) {
- for (i = 0; i < num_vreg_seq; i++) {
- if (vreg_seq) {
- j = vreg_seq[i];
- if (j >= num_vreg)
- continue;
- } else {
- j = i;
- }
- curr_vreg = &cam_vreg[j];
- reg_ptr[j] = regulator_get(dev,
- curr_vreg->reg_name);
- if (IS_ERR(reg_ptr[j])) {
- pr_err("%s: %s get failed\n",
- __func__,
- curr_vreg->reg_name);
- reg_ptr[j] = NULL;
- goto vreg_get_fail;
- }
- if (regulator_count_voltages(reg_ptr[j]) > 0) {
- rc = regulator_set_voltage(
- reg_ptr[j],
- curr_vreg->min_voltage,
- curr_vreg->max_voltage);
- if (rc < 0) {
- pr_err("%s: %s set voltage failed\n",
- __func__,
- curr_vreg->reg_name);
- goto vreg_set_voltage_fail;
- }
- if (curr_vreg->op_mode >= 0) {
- rc = regulator_set_load(
- reg_ptr[j],
- curr_vreg->op_mode);
- if (rc < 0) {
- pr_err(
- "%s:%s set optimum mode fail\n",
- __func__,
- curr_vreg->reg_name);
- goto vreg_set_opt_mode_fail;
- }
- }
- }
- }
- } else {
- for (i = num_vreg_seq-1; i >= 0; i--) {
- if (vreg_seq) {
- j = vreg_seq[i];
- if (j >= num_vreg)
- continue;
- } else {
- j = i;
- }
- curr_vreg = &cam_vreg[j];
- if (reg_ptr[j]) {
- if (regulator_count_voltages(reg_ptr[j]) > 0) {
- if (curr_vreg->op_mode >= 0) {
- regulator_set_load(
- reg_ptr[j], 0);
- }
- regulator_set_voltage(
- reg_ptr[j], 0, curr_vreg->
- max_voltage);
- }
- regulator_put(reg_ptr[j]);
- reg_ptr[j] = NULL;
- }
- }
- }
-
- return 0;
-
-vreg_unconfig:
- if (regulator_count_voltages(reg_ptr[j]) > 0)
- regulator_set_load(reg_ptr[j], 0);
-
-vreg_set_opt_mode_fail:
- if (regulator_count_voltages(reg_ptr[j]) > 0)
- regulator_set_voltage(reg_ptr[j], 0,
- curr_vreg->max_voltage);
-
-vreg_set_voltage_fail:
- regulator_put(reg_ptr[j]);
- reg_ptr[j] = NULL;
-
-vreg_get_fail:
- for (i--; i >= 0; i--) {
- if (vreg_seq) {
- j = vreg_seq[i];
- if (j >= num_vreg)
- continue;
- } else {
- j = i;
- }
- curr_vreg = &cam_vreg[j];
- goto vreg_unconfig;
- }
-
- return -ENODEV;
-}
-
-int msm_camera_enable_vreg(struct device *dev, struct camera_vreg_t *cam_vreg,
- int num_vreg, enum msm_camera_vreg_name_t *vreg_seq,
- int num_vreg_seq, struct regulator **reg_ptr, int enable)
-{
- int i = 0, j = 0, rc = 0;
-
- if (num_vreg_seq > num_vreg) {
- pr_err("%s:%d vreg sequence invalid\n", __func__, __LINE__);
- return -EINVAL;
- }
- if (!num_vreg_seq)
- num_vreg_seq = num_vreg;
-
- if (enable) {
- for (i = 0; i < num_vreg_seq; i++) {
- if (vreg_seq) {
- j = vreg_seq[i];
- if (j >= num_vreg)
- continue;
- } else
- j = i;
- if (IS_ERR(reg_ptr[j])) {
- pr_err("%s: %s null regulator\n",
- __func__, cam_vreg[j].reg_name);
- goto disable_vreg;
- }
- rc = regulator_enable(reg_ptr[j]);
- if (rc < 0) {
- pr_err("%s: %s enable failed\n",
- __func__, cam_vreg[j].reg_name);
- goto disable_vreg;
- }
- if (cam_vreg[j].delay > 20)
- msleep(cam_vreg[j].delay);
- else if (cam_vreg[j].delay)
- usleep_range(cam_vreg[j].delay * 1000,
- (cam_vreg[j].delay * 1000) + 1000);
- }
- } else {
- for (i = num_vreg_seq-1; i >= 0; i--) {
- if (vreg_seq) {
- j = vreg_seq[i];
- if (j >= num_vreg)
- continue;
- } else
- j = i;
- regulator_disable(reg_ptr[j]);
- if (cam_vreg[j].delay > 20)
- msleep(cam_vreg[j].delay);
- else if (cam_vreg[j].delay)
- usleep_range(cam_vreg[j].delay * 1000,
- (cam_vreg[j].delay * 1000) + 1000);
- }
- }
-
- return rc;
-disable_vreg:
- for (i--; i >= 0; i--) {
- if (vreg_seq) {
- j = vreg_seq[i];
- if (j >= num_vreg)
- continue;
- } else
- j = i;
- regulator_disable(reg_ptr[j]);
- if (cam_vreg[j].delay > 20)
- msleep(cam_vreg[j].delay);
- else if (cam_vreg[j].delay)
- usleep_range(cam_vreg[j].delay * 1000,
- (cam_vreg[j].delay * 1000) + 1000);
- }
-
- return rc;
-}
-
-int msm_camera_set_gpio_table(struct msm_gpio_set_tbl *gpio_tbl,
- uint8_t gpio_tbl_size, int gpio_en)
-{
- int rc = 0, i;
-
- if (gpio_en) {
- for (i = 0; i < gpio_tbl_size; i++) {
- gpio_set_value_cansleep(gpio_tbl[i].gpio,
- gpio_tbl[i].flags);
- usleep_range(gpio_tbl[i].delay,
- gpio_tbl[i].delay + 1000);
- }
- } else {
- for (i = gpio_tbl_size - 1; i >= 0; i--) {
- if (gpio_tbl[i].flags)
- gpio_set_value_cansleep(gpio_tbl[i].gpio,
- GPIOF_OUT_INIT_LOW);
- }
- }
-
- return rc;
-}
-
-int msm_camera_config_single_vreg(struct device *dev,
- struct camera_vreg_t *cam_vreg, struct regulator **reg_ptr, int config)
-{
- int rc = 0;
- const char *vreg_name = NULL;
-
- if (!dev || !cam_vreg || !reg_ptr) {
- pr_err("%s: get failed NULL parameter\n", __func__);
- goto vreg_get_fail;
- }
- if (cam_vreg->type == VREG_TYPE_CUSTOM) {
- if (cam_vreg->custom_vreg_name == NULL) {
- pr_err("%s : can't find sub reg name",
- __func__);
- goto vreg_get_fail;
- }
- vreg_name = cam_vreg->custom_vreg_name;
- } else {
- if (cam_vreg->reg_name == NULL) {
- pr_err("%s : can't find reg name", __func__);
- goto vreg_get_fail;
- }
- vreg_name = cam_vreg->reg_name;
- }
-
- if (config) {
- CDBG("%s enable %s\n", __func__, vreg_name);
- *reg_ptr = regulator_get(dev, vreg_name);
- if (IS_ERR(*reg_ptr)) {
- pr_err("%s: %s get failed\n", __func__, vreg_name);
- *reg_ptr = NULL;
- goto vreg_get_fail;
- }
- if (regulator_count_voltages(*reg_ptr) > 0) {
- CDBG("%s: voltage min=%d, max=%d\n",
- __func__, cam_vreg->min_voltage,
- cam_vreg->max_voltage);
- rc = regulator_set_voltage(
- *reg_ptr, cam_vreg->min_voltage,
- cam_vreg->max_voltage);
- if (rc < 0) {
- pr_err("%s: %s set voltage failed\n",
- __func__, vreg_name);
- goto vreg_set_voltage_fail;
- }
- if (cam_vreg->op_mode >= 0) {
- rc = regulator_set_load(*reg_ptr,
- cam_vreg->op_mode);
- if (rc < 0) {
- pr_err(
- "%s: %s set optimum mode failed\n",
- __func__, vreg_name);
- goto vreg_set_opt_mode_fail;
- }
- }
- }
- rc = regulator_enable(*reg_ptr);
- if (rc < 0) {
- pr_err("%s: %s regulator_enable failed\n", __func__,
- vreg_name);
- goto vreg_unconfig;
- }
- } else {
- CDBG("%s disable %s\n", __func__, vreg_name);
- if (*reg_ptr) {
- CDBG("%s disable %s\n", __func__, vreg_name);
- regulator_disable(*reg_ptr);
- if (regulator_count_voltages(*reg_ptr) > 0) {
- if (cam_vreg->op_mode >= 0)
- regulator_set_load(*reg_ptr, 0);
- regulator_set_voltage(
- *reg_ptr, 0, cam_vreg->max_voltage);
- }
- regulator_put(*reg_ptr);
- *reg_ptr = NULL;
- } else {
- pr_err("%s can't disable %s\n", __func__, vreg_name);
- }
- }
-
- return 0;
-
-vreg_unconfig:
- if (regulator_count_voltages(*reg_ptr) > 0)
- regulator_set_load(*reg_ptr, 0);
-
-vreg_set_opt_mode_fail:
- if (regulator_count_voltages(*reg_ptr) > 0)
- regulator_set_voltage(*reg_ptr, 0,
- cam_vreg->max_voltage);
-
-vreg_set_voltage_fail:
- regulator_put(*reg_ptr);
- *reg_ptr = NULL;
-
-vreg_get_fail:
- return -EINVAL;
-}
-
-int msm_camera_request_gpio_table(struct gpio *gpio_tbl, uint8_t size,
- int gpio_en)
-{
- int rc = 0, i = 0, err = 0;
-
- if (!gpio_tbl || !size) {
- pr_err("%s:%d invalid gpio_tbl %pK / size %d\n", __func__,
- __LINE__, gpio_tbl, size);
- return -EINVAL;
- }
- for (i = 0; i < size; i++) {
- CDBG("%s:%d i %d, gpio %d dir %ld\n", __func__, __LINE__, i,
- gpio_tbl[i].gpio, gpio_tbl[i].flags);
- }
- if (gpio_en) {
- for (i = 0; i < size; i++) {
- err = gpio_request_one(gpio_tbl[i].gpio,
- gpio_tbl[i].flags, gpio_tbl[i].label);
- if (err) {
- /*
- * After GPIO request fails, contine to
- * apply new gpios, outout a error message
- * for driver bringup debug
- */
- pr_err("%s:%d gpio %d:%s request fails\n",
- __func__, __LINE__,
- gpio_tbl[i].gpio, gpio_tbl[i].label);
- }
- }
- } else {
- gpio_free_array(gpio_tbl, size);
- }
-
- return rc;
-}
-
-/* Get all clocks from DT */
-static int msm_camera_get_clk_info_internal(struct device *dev,
- struct msm_cam_clk_info **clk_info,
- struct clk ***clk_ptr,
- size_t *num_clk)
-{
- int rc = 0;
- size_t cnt, tmp;
- uint32_t *rates, i = 0;
- const char *clk_ctl = NULL;
- bool clock_cntl_support = false;
- struct device_node *of_node;
-
- of_node = dev->of_node;
-
- cnt = of_property_count_strings(of_node, "clock-names");
- if (cnt <= 0) {
- pr_err("err: No clocks found in DT=%zu\n", cnt);
- return -EINVAL;
- }
-
- tmp = of_property_count_u32_elems(of_node, "qcom,clock-rates");
- if (tmp <= 0) {
- pr_err("err: No clk rates device tree, count=%zu", tmp);
- return -EINVAL;
- }
-
- if (cnt != tmp) {
- pr_err("err: clk name/rates mismatch, strings=%zu, rates=%zu\n",
- cnt, tmp);
- return -EINVAL;
- }
-
- if (of_property_read_bool(of_node, "qcom,clock-cntl-support")) {
- tmp = of_property_count_strings(of_node,
- "qcom,clock-control");
- if (tmp <= 0) {
- pr_err("err: control strings not found in DT count=%zu",
- tmp);
- return -EINVAL;
- }
- if (cnt != tmp) {
- pr_err("err: controls mismatch, strings=%zu, ctl=%zu\n",
- cnt, tmp);
- return -EINVAL;
- }
- clock_cntl_support = true;
- }
-
- *num_clk = cnt;
-
- *clk_info = devm_kcalloc(dev, cnt,
- sizeof(struct msm_cam_clk_info), GFP_KERNEL);
- if (!*clk_info)
- return -ENOMEM;
-
- *clk_ptr = devm_kcalloc(dev, cnt, sizeof(struct clk *),
- GFP_KERNEL);
- if (!*clk_ptr) {
- rc = -ENOMEM;
- goto free_clk_info;
- }
-
- rates = devm_kcalloc(dev, cnt, sizeof(long), GFP_KERNEL);
- if (!rates) {
- rc = -ENOMEM;
- goto free_clk_ptr;
- }
-
- rc = of_property_read_u32_array(of_node, "qcom,clock-rates",
- rates, cnt);
- if (rc < 0) {
- pr_err("err: failed reading clock rates\n");
- rc = -EINVAL;
- goto free_rates;
- }
-
- for (i = 0; i < cnt; i++) {
- rc = of_property_read_string_index(of_node, "clock-names",
- i, &((*clk_info)[i].clk_name));
- if (rc < 0) {
- pr_err("%s reading clock-name failed index %d\n",
- __func__, i);
- rc = -EINVAL;
- goto free_rates;
- }
-
- CDBG("dbg: clk-name[%d] = %s\n", i, (*clk_info)[i].clk_name);
- if (clock_cntl_support) {
- rc = of_property_read_string_index(of_node,
- "qcom,clock-control", i, &clk_ctl);
- if (rc < 0) {
- pr_err("%s reading clock-control failed index %d\n",
- __func__, i);
- rc = -EINVAL;
- goto free_rates;
- }
-
- if (!strcmp(clk_ctl, "NO_SET_RATE")) {
- (*clk_info)[i].clk_rate = NO_SET_RATE;
- } else if (!strcmp(clk_ctl, "INIT_RATE")) {
- (*clk_info)[i].clk_rate = INIT_RATE;
- } else if (!strcmp(clk_ctl, "SET_RATE")) {
- (*clk_info)[i].clk_rate = rates[i];
- } else {
- pr_err("%s: error: clock control has invalid value\n",
- __func__);
- rc = -EINVAL;
- goto free_rates;
- }
- } else {
- (*clk_info)[i].clk_rate =
- (rates[i] == 0) ? (long)-1 : rates[i];
- }
-
- CDBG("dbg: clk-rate[%d] = rate: %ld\n",
- i, (*clk_info)[i].clk_rate);
-
- (*clk_ptr)[i] =
- devm_clk_get(dev, (*clk_info)[i].clk_name);
- if (IS_ERR((*clk_ptr)[i])) {
- rc = PTR_ERR((*clk_ptr)[i]);
- goto release_clk;
- }
- CDBG("clk ptr[%d] :%pK\n", i, (*clk_ptr)[i]);
- }
-
- devm_kfree(dev, rates);
-
- return rc;
-
-release_clk:
- for (--i; i >= 0; i--)
- devm_clk_put(dev, (*clk_ptr)[i]);
-free_rates:
- devm_kfree(dev, rates);
-free_clk_ptr:
- devm_kfree(dev, *clk_ptr);
-free_clk_info:
- devm_kfree(dev, *clk_info);
- return rc;
-}
-
-/* Get all clocks from DT for I2C devices */
-int msm_camera_i2c_dev_get_clk_info(struct device *dev,
- struct msm_cam_clk_info **clk_info,
- struct clk ***clk_ptr,
- size_t *num_clk)
-{
- int rc = 0;
-
- if (!dev || !clk_info || !clk_ptr || !num_clk)
- return -EINVAL;
-
- rc = msm_camera_get_clk_info_internal(dev, clk_info, clk_ptr, num_clk);
- return rc;
-}
-EXPORT_SYMBOL(msm_camera_i2c_dev_get_clk_info);
-
-/* Get all clocks from DT for platform devices */
-int msm_camera_get_clk_info(struct platform_device *pdev,
- struct msm_cam_clk_info **clk_info,
- struct clk ***clk_ptr,
- size_t *num_clk)
-{
- int rc = 0;
-
- if (!pdev || !&pdev->dev || !clk_info || !clk_ptr || !num_clk)
- return -EINVAL;
-
- rc = msm_camera_get_clk_info_internal(&pdev->dev,
- clk_info, clk_ptr, num_clk);
- return rc;
-}
-EXPORT_SYMBOL(msm_camera_get_clk_info);
-
-/* Get all clocks and multiple rates from DT */
-int msm_camera_get_clk_info_and_rates(
- struct platform_device *pdev,
- struct msm_cam_clk_info **pclk_info,
- struct clk ***pclks,
- uint32_t ***pclk_rates,
- size_t *num_set,
- size_t *num_clk)
-{
- int rc = 0, tmp_var, cnt, tmp;
- uint32_t i = 0, j = 0;
- struct device_node *of_node;
- uint32_t **rates;
- struct clk **clks;
- struct msm_cam_clk_info *clk_info;
-
- if (!pdev || !pclk_info || !num_clk
- || !pclk_rates || !pclks || !num_set)
- return -EINVAL;
-
- of_node = pdev->dev.of_node;
-
- cnt = of_property_count_strings(of_node, "clock-names");
- if (cnt <= 0) {
- pr_err("err: No clocks found in DT=%d\n", cnt);
- return -EINVAL;
- }
-
- tmp = of_property_count_u32_elems(of_node, "qcom,clock-rates");
- if (tmp <= 0) {
- pr_err("err: No clk rates device tree, count=%d\n", tmp);
- return -EINVAL;
- }
-
- if ((tmp % cnt) != 0) {
- pr_err("err: clk name/rates mismatch, strings=%d, rates=%d\n",
- cnt, tmp);
- return -EINVAL;
- }
-
- *num_clk = cnt;
- *num_set = (tmp / cnt);
-
- clk_info = devm_kcalloc(&pdev->dev, cnt,
- sizeof(struct msm_cam_clk_info), GFP_KERNEL);
- if (!clk_info)
- return -ENOMEM;
-
- clks = devm_kcalloc(&pdev->dev, cnt, sizeof(struct clk *),
- GFP_KERNEL);
- if (!clks) {
- rc = -ENOMEM;
- goto free_clk_info;
- }
-
- rates = devm_kcalloc(&pdev->dev, *num_set,
- sizeof(uint32_t *), GFP_KERNEL);
- if (!rates) {
- rc = -ENOMEM;
- goto free_clk;
- }
-
- for (i = 0; i < *num_set; i++) {
- rates[i] = devm_kcalloc(&pdev->dev, *num_clk,
- sizeof(uint32_t), GFP_KERNEL);
- if (!rates[i]) {
- rc = -ENOMEM;
- for (--i; i >= 0; i--)
- devm_kfree(&pdev->dev, rates[i]);
- goto free_rate;
- }
- }
-
- tmp_var = 0;
- for (i = 0; i < *num_set; i++) {
- for (j = 0; j < *num_clk; j++) {
- rc = of_property_read_u32_index(of_node,
- "qcom,clock-rates", tmp_var++, &rates[i][j]);
- if (rc < 0) {
- pr_err("err: failed reading clock rates\n");
- rc = -EINVAL;
- goto free_rate_array;
- }
- CDBG("Clock rate idx %d idx %d value %d\n",
- i, j, rates[i][j]);
- }
- }
- for (i = 0; i < *num_clk; i++) {
- rc = of_property_read_string_index(of_node, "clock-names",
- i, &clk_info[i].clk_name);
- if (rc < 0) {
- pr_err("%s reading clock-name failed index %d\n",
- __func__, i);
- rc = -EINVAL;
- goto free_rate_array;
- }
-
- CDBG("dbg: clk-name[%d] = %s\n", i, clk_info[i].clk_name);
-
- clks[i] =
- devm_clk_get(&pdev->dev, clk_info[i].clk_name);
- if (IS_ERR(clks[i])) {
- rc = PTR_ERR(clks[i]);
- goto release_clk;
- }
- CDBG("clk ptr[%d] :%pK\n", i, clks[i]);
- }
- *pclk_info = clk_info;
- *pclks = clks;
- *pclk_rates = rates;
-
- return rc;
-
-release_clk:
- for (--i; i >= 0; i--)
- devm_clk_put(&pdev->dev, clks[i]);
-free_rate_array:
- for (i = 0; i < *num_set; i++)
- devm_kfree(&pdev->dev, rates[i]);
-free_rate:
- devm_kfree(&pdev->dev, rates);
-free_clk:
- devm_kfree(&pdev->dev, clks);
-free_clk_info:
- devm_kfree(&pdev->dev, clk_info);
- return rc;
-}
-EXPORT_SYMBOL(msm_camera_get_clk_info_and_rates);
-
-/* Enable/Disable all clocks */
-int msm_camera_clk_enable(struct device *dev,
- struct msm_cam_clk_info *clk_info,
- struct clk **clk_ptr, int num_clk, int enable)
-{
- int i;
- int rc = 0;
- long clk_rate;
-
- if (enable) {
- for (i = 0; i < num_clk; i++) {
- pr_err("enable %s\n", clk_info[i].clk_name);
- if (clk_info[i].clk_rate > 0) {
- clk_rate = clk_round_rate(clk_ptr[i],
- clk_info[i].clk_rate);
- if (clk_rate < 0) {
- pr_err("%s round failed\n",
- clk_info[i].clk_name);
- goto cam_clk_set_err;
- }
- rc = clk_set_rate(clk_ptr[i],
- clk_rate);
- if (rc < 0) {
- pr_err("%s set failed\n",
- clk_info[i].clk_name);
- goto cam_clk_set_err;
- }
-
- } else if (clk_info[i].clk_rate == INIT_RATE) {
- clk_rate = clk_get_rate(clk_ptr[i]);
- if (clk_rate == 0) {
- clk_rate =
- clk_round_rate(clk_ptr[i], 0);
- if (clk_rate < 0) {
- pr_err("%s round rate failed\n",
- clk_info[i].clk_name);
- goto cam_clk_set_err;
- }
- rc = clk_set_rate(clk_ptr[i],
- clk_rate);
- if (rc < 0) {
- pr_err("%s set rate failed\n",
- clk_info[i].clk_name);
- goto cam_clk_set_err;
- }
- }
- }
- rc = clk_prepare_enable(clk_ptr[i]);
- if (rc < 0) {
- pr_err("%s enable failed\n",
- clk_info[i].clk_name);
- goto cam_clk_enable_err;
- }
- if (clk_info[i].delay > 20) {
- msleep(clk_info[i].delay);
- } else if (clk_info[i].delay) {
- usleep_range(clk_info[i].delay * 1000,
- (clk_info[i].delay * 1000) + 1000);
- }
- }
- } else {
- for (i = num_clk - 1; i >= 0; i--) {
- if (clk_ptr[i] != NULL) {
- pr_err("%s disable %s\n", __func__,
- clk_info[i].clk_name);
- clk_disable_unprepare(clk_ptr[i]);
- }
- }
- }
- return rc;
-
-cam_clk_enable_err:
-cam_clk_set_err:
- for (i--; i >= 0; i--) {
- if (clk_ptr[i] != NULL)
- clk_disable_unprepare(clk_ptr[i]);
- }
- return rc;
-}
-EXPORT_SYMBOL(msm_camera_clk_enable);
-
-/* Set rate on a specific clock */
-long msm_camera_clk_set_rate(struct device *dev,
- struct clk *clk,
- long clk_rate)
-{
- int rc = 0;
- long rate = 0;
-
- if (!dev || !clk || (clk_rate < 0))
- return -EINVAL;
-
- CDBG("clk : %pK, enable : %ld\n", clk, clk_rate);
-
- if (clk_rate > 0) {
- rate = clk_round_rate(clk, clk_rate);
- if (rate < 0) {
- pr_err("round rate failed\n");
- return -EINVAL;
- }
-
- rc = clk_set_rate(clk, rate);
- if (rc < 0) {
- pr_err("set rate failed\n");
- return -EINVAL;
- }
- }
-
- return rate;
-}
-EXPORT_SYMBOL(msm_camera_clk_set_rate);
-
-/* release memory allocated for clocks */
-static int msm_camera_put_clk_info_internal(struct device *dev,
- struct msm_cam_clk_info **clk_info,
- struct clk ***clk_ptr, int cnt)
-{
- int i;
-
- for (i = cnt - 1; i >= 0; i--) {
- if (clk_ptr[i] != NULL)
- devm_clk_put(dev, (*clk_ptr)[i]);
-
- CDBG("clk ptr[%d] :%pK\n", i, (*clk_ptr)[i]);
- }
- devm_kfree(dev, *clk_info);
- devm_kfree(dev, *clk_ptr);
- *clk_info = NULL;
- *clk_ptr = NULL;
- return 0;
-}
-
-/* release memory allocated for clocks for i2c devices */
-int msm_camera_i2c_dev_put_clk_info(struct device *dev,
- struct msm_cam_clk_info **clk_info,
- struct clk ***clk_ptr, int cnt)
-{
- int rc = 0;
-
- if (!dev || !clk_info || !clk_ptr)
- return -EINVAL;
-
- rc = msm_camera_put_clk_info_internal(dev, clk_info, clk_ptr, cnt);
- return rc;
-}
-EXPORT_SYMBOL(msm_camera_i2c_dev_put_clk_info);
-
-/* release memory allocated for clocks for platform devices */
-int msm_camera_put_clk_info(struct platform_device *pdev,
- struct msm_cam_clk_info **clk_info,
- struct clk ***clk_ptr, int cnt)
-{
- int rc = 0;
-
- if (!pdev || !&pdev->dev || !clk_info || !clk_ptr)
- return -EINVAL;
-
- rc = msm_camera_put_clk_info_internal(&pdev->dev,
- clk_info, clk_ptr, cnt);
- return rc;
-}
-EXPORT_SYMBOL(msm_camera_put_clk_info);
-
-int msm_camera_put_clk_info_and_rates(struct platform_device *pdev,
- struct msm_cam_clk_info **clk_info,
- struct clk ***clk_ptr, uint32_t ***clk_rates,
- size_t set, size_t cnt)
-{
- int i;
-
- for (i = set - 1; i >= 0; i--)
- devm_kfree(&pdev->dev, (*clk_rates)[i]);
-
- devm_kfree(&pdev->dev, *clk_rates);
- for (i = cnt - 1; i >= 0; i--) {
- if (clk_ptr[i] != NULL)
- devm_clk_put(&pdev->dev, (*clk_ptr)[i]);
- CDBG("clk ptr[%d] :%pK\n", i, (*clk_ptr)[i]);
- }
- devm_kfree(&pdev->dev, *clk_info);
- devm_kfree(&pdev->dev, *clk_ptr);
- *clk_info = NULL;
- *clk_ptr = NULL;
- *clk_rates = NULL;
-
- return 0;
-}
-EXPORT_SYMBOL(msm_camera_put_clk_info_and_rates);
-
-/* Get regulators from DT */
-int msm_camera_get_regulator_info(struct platform_device *pdev,
- struct msm_cam_regulator **vdd_info,
- int *num_reg)
-{
- uint32_t cnt;
- int i, rc;
- struct device_node *of_node;
- char prop_name[32];
- struct msm_cam_regulator *tmp_reg;
-
- if (!pdev || !vdd_info || !num_reg)
- return -EINVAL;
-
- of_node = pdev->dev.of_node;
-
- if (!of_get_property(of_node, "qcom,vdd-names", NULL)) {
- pr_err("err: Regulators property not found\n");
- return -EINVAL;
- }
-
- cnt = of_property_count_strings(of_node, "qcom,vdd-names");
- if (cnt <= 0) {
- pr_err("err: no regulators found in device tree, count=%d",
- cnt);
- return -EINVAL;
- }
-
- tmp_reg = devm_kcalloc(&pdev->dev, cnt,
- sizeof(struct msm_cam_regulator), GFP_KERNEL);
- if (!tmp_reg)
- return -ENOMEM;
-
- for (i = 0; i < cnt; i++) {
- rc = of_property_read_string_index(of_node,
- "qcom,vdd-names", i, &tmp_reg[i].name);
- if (rc < 0) {
- pr_err("Fail to fetch regulators: %d\n", i);
- rc = -EINVAL;
- goto err1;
- }
-
- CDBG("regulator-names[%d] = %s\n", i, tmp_reg[i].name);
-
- snprintf(prop_name, 32, "%s-supply", tmp_reg[i].name);
-
- if (of_get_property(of_node, prop_name, NULL)) {
- tmp_reg[i].vdd =
- devm_regulator_get(&pdev->dev, tmp_reg[i].name);
- if (IS_ERR(tmp_reg[i].vdd)) {
- rc = -EINVAL;
- pr_err("Fail to get regulator :%d\n", i);
- goto err1;
- }
- } else {
- pr_err("Regulator phandle not found :%s\n",
- tmp_reg[i].name);
- rc = -EINVAL;
- goto err1;
- }
- CDBG("vdd ptr[%d] :%pK\n", i, tmp_reg[i].vdd);
- }
-
- *num_reg = cnt;
- *vdd_info = tmp_reg;
-
- return 0;
-
-err1:
- for (--i; i >= 0; i--)
- devm_regulator_put(tmp_reg[i].vdd);
- devm_kfree(&pdev->dev, tmp_reg);
- return rc;
-}
-EXPORT_SYMBOL(msm_camera_get_regulator_info);
-
-
-/* Enable/Disable regulators */
-int msm_camera_regulator_enable(struct msm_cam_regulator *vdd_info,
- int cnt, int enable)
-{
- int i;
- int rc;
- struct msm_cam_regulator *tmp = vdd_info;
-
- if (!tmp) {
- pr_err("Invalid params");
- return -EINVAL;
- }
- CDBG("cnt : %d\n", cnt);
-
- for (i = 0; i < cnt; i++) {
- if (tmp && !IS_ERR_OR_NULL(tmp->vdd)) {
- CDBG("name : %s, enable : %d\n", tmp->name, enable);
- if (enable) {
- rc = regulator_enable(tmp->vdd);
- if (rc < 0) {
- pr_err("regulator enable failed %d\n",
- i);
- goto disable_reg;
- }
- } else {
- rc = regulator_disable(tmp->vdd);
- if (rc < 0)
- pr_err("regulator disable failed %d\n",
- i);
- }
- }
- tmp++;
- }
-
- return 0;
-disable_reg:
- for (--i; i > 0; i--) {
- --tmp;
- if (!IS_ERR_OR_NULL(tmp->vdd))
- regulator_disable(tmp->vdd);
- }
- return rc;
-}
-EXPORT_SYMBOL(msm_camera_regulator_enable);
-
-/* Put regulators regulators */
-void msm_camera_put_regulators(struct platform_device *pdev,
- struct msm_cam_regulator **vdd_info, int cnt)
-{
- int i;
-
- if (!vdd_info || !*vdd_info) {
- pr_err("Invalid params\n");
- return;
- }
-
- for (i = cnt - 1; i >= 0; i--) {
- if (vdd_info[i] && !IS_ERR_OR_NULL(vdd_info[i]->vdd))
- devm_regulator_put(vdd_info[i]->vdd);
- CDBG("vdd ptr[%d] :%pK\n", i, vdd_info[i]->vdd);
- }
-
- devm_kfree(&pdev->dev, *vdd_info);
- *vdd_info = NULL;
-}
-EXPORT_SYMBOL(msm_camera_put_regulators);
-
-struct resource *msm_camera_get_irq(struct platform_device *pdev,
- char *irq_name)
-{
- if (!pdev || !irq_name) {
- pr_err("Invalid params\n");
- return NULL;
- }
-
- CDBG("Get irq for %s\n", irq_name);
- return platform_get_resource_byname(pdev, IORESOURCE_IRQ, irq_name);
-}
-EXPORT_SYMBOL(msm_camera_get_irq);
-
-int msm_camera_register_irq(struct platform_device *pdev,
- struct resource *irq, irq_handler_t handler,
- unsigned long irqflags, char *irq_name, void *dev_id)
-{
- int rc = 0;
-
- if (!pdev || !irq || !handler || !irq_name || !dev_id) {
- pr_err("Invalid params\n");
- return -EINVAL;
- }
-
- rc = devm_request_irq(&pdev->dev, irq->start, handler,
- irqflags, irq_name, dev_id);
- if (rc < 0) {
- pr_err("irq request fail\n");
- rc = -EINVAL;
- }
-
- CDBG("Registered irq for %s[resource - %pK]\n", irq_name, irq);
-
- return rc;
-}
-EXPORT_SYMBOL(msm_camera_register_irq);
-
-int msm_camera_register_threaded_irq(struct platform_device *pdev,
- struct resource *irq, irq_handler_t handler_fn,
- irq_handler_t thread_fn, unsigned long irqflags,
- const char *irq_name, void *dev_id)
-{
- int rc = 0;
-
- if (!pdev || !irq || !irq_name || !dev_id) {
- pr_err("Invalid params\n");
- return -EINVAL;
- }
-
- rc = devm_request_threaded_irq(&pdev->dev, irq->start, handler_fn,
- thread_fn, irqflags, irq_name, dev_id);
- if (rc < 0) {
- pr_err("irq request fail\n");
- rc = -EINVAL;
- }
-
- CDBG("Registered irq for %s[resource - %pK]\n", irq_name, irq);
-
- return rc;
-}
-EXPORT_SYMBOL(msm_camera_register_threaded_irq);
-
-int msm_camera_enable_irq(struct resource *irq, int enable)
-{
- if (!irq) {
- pr_err("Invalid params\n");
- return -EINVAL;
- }
-
- CDBG("irq Enable %d\n", enable);
- if (enable)
- enable_irq(irq->start);
- else
- disable_irq(irq->start);
-
- return 0;
-}
-EXPORT_SYMBOL(msm_camera_enable_irq);
-
-int msm_camera_unregister_irq(struct platform_device *pdev,
- struct resource *irq, void *dev_id)
-{
-
- if (!pdev || !irq || !dev_id) {
- pr_err("Invalid params\n");
- return -EINVAL;
- }
-
- CDBG("Un Registering irq for [resource - %pK]\n", irq);
- devm_free_irq(&pdev->dev, irq->start, dev_id);
-
- return 0;
-}
-EXPORT_SYMBOL(msm_camera_unregister_irq);
-
-void __iomem *msm_camera_get_reg_base(struct platform_device *pdev,
- char *device_name, int reserve_mem)
-{
- struct resource *mem;
- void *base;
-
- if (!pdev || !device_name) {
- pr_err("Invalid params\n");
- return NULL;
- }
-
- CDBG("device name :%s\n", device_name);
- mem = platform_get_resource_byname(pdev,
- IORESOURCE_MEM, device_name);
- if (!mem) {
- pr_err("err: mem resource %s not found\n", device_name);
- return NULL;
- }
-
- if (reserve_mem) {
- CDBG("device:%pK, mem : %pK, size : %d\n",
- &pdev->dev, mem, (int)resource_size(mem));
- if (!devm_request_mem_region(&pdev->dev, mem->start,
- resource_size(mem),
- device_name)) {
- pr_err("err: no valid mem region for device:%s\n",
- device_name);
- return NULL;
- }
- }
-
- base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
- if (!base) {
- devm_release_mem_region(&pdev->dev, mem->start,
- resource_size(mem));
- pr_err("err: ioremap failed: %s\n", device_name);
- return NULL;
- }
-
- CDBG("base : %pK\n", base);
- return base;
-}
-EXPORT_SYMBOL(msm_camera_get_reg_base);
-
-uint32_t msm_camera_get_res_size(struct platform_device *pdev,
- char *device_name)
-{
- struct resource *mem;
-
- if (!pdev || !device_name) {
- pr_err("Invalid params\n");
- return 0;
- }
-
- CDBG("device name :%s\n", device_name);
- mem = platform_get_resource_byname(pdev,
- IORESOURCE_MEM, device_name);
- if (!mem) {
- pr_err("err: mem resource %s not found\n", device_name);
- return 0;
- }
- return resource_size(mem);
-}
-EXPORT_SYMBOL(msm_camera_get_res_size);
-
-
-int msm_camera_put_reg_base(struct platform_device *pdev,
- void __iomem *base, char *device_name, int reserve_mem)
-{
- struct resource *mem;
-
- if (!pdev || !base || !device_name) {
- pr_err("Invalid params\n");
- return -EINVAL;
- }
-
- CDBG("device name :%s\n", device_name);
- mem = platform_get_resource_byname(pdev,
- IORESOURCE_MEM, device_name);
- if (!mem) {
- pr_err("err: mem resource %s not found\n", device_name);
- return -EINVAL;
- }
- CDBG("mem : %pK, size : %d\n", mem, (int)resource_size(mem));
-
- devm_iounmap(&pdev->dev, base);
- if (reserve_mem)
- devm_release_mem_region(&pdev->dev,
- mem->start, resource_size(mem));
-
- return 0;
-}
-EXPORT_SYMBOL(msm_camera_put_reg_base);
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_soc_api.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_soc_api.h
deleted file mode 100644
index c316090..0000000
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_soc_api.h
+++ /dev/null
@@ -1,473 +0,0 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef _CAM_SENSOR_SOC_API_H_
-#define _CAM_SENSOR_SOC_API_H_
-
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/of_platform.h>
-#include <linux/spinlock_types.h>
-#include <linux/mutex.h>
-#include <linux/clk.h>
-#include <linux/regulator/consumer.h>
-#include <linux/gpio.h>
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-#include "cam_sensor_cmn_header.h"
-
-struct msm_cam_regulator {
- const char *name;
- struct regulator *vdd;
-};
-
-struct msm_gpio_set_tbl {
- unsigned int gpio;
- unsigned long flags;
- uint32_t delay;
-};
-
-/**
- * @brief : Gets clock information from dtsi
- *
- * This function extracts the clocks information for a specific
- * platform device
- *
- * @param pdev : Platform device to get clocks information
- * @param clk_info : Pointer to populate clock information array
- * @param clk_ptr : Pointer to populate clock resource pointers
- * @param num_clk: Pointer to populate the number of clocks
- * extracted from dtsi
- *
- * @return Status of operation. Negative in case of error. Zero otherwise.
- */
-int msm_camera_get_clk_info(struct platform_device *pdev,
- struct msm_cam_clk_info **clk_info,
- struct clk ***clk_ptr,
- size_t *num_clk);
-
-/**
- * @brief : Gets clock information from dtsi
- *
- * This function extracts the clocks information for a specific
- * i2c device
- *
- * @param dev : i2c device to get clocks information
- * @param clk_info : Pointer to populate clock information array
- * @param clk_ptr : Pointer to populate clock resource pointers
- * @param num_clk: Pointer to populate the number of clocks
- * extracted from dtsi
- *
- * @return Status of operation. Negative in case of error. Zero otherwise.
- */
-int msm_camera_i2c_dev_get_clk_info(struct device *dev,
- struct msm_cam_clk_info **clk_info,
- struct clk ***clk_ptr,
- size_t *num_clk);
-
-/**
- * @brief : Gets clock information and rates from dtsi
- *
- * This function extracts the clocks information for a specific
- * platform device
- *
- * @param pdev : Platform device to get clocks information
- * @param clk_info : Pointer to populate clock information array
- * @param clk_ptr : Pointer to populate clock resource pointers
- * @param clk_rates : Pointer to populate clock rates
- * @param num_set: Pointer to populate the number of sets of rates
- * @param num_clk: Pointer to populate the number of clocks
- * extracted from dtsi
- *
- * @return Status of operation. Negative in case of error. Zero otherwise.
- */
-int msm_camera_get_clk_info_and_rates(
- struct platform_device *pdev,
- struct msm_cam_clk_info **clk_info,
- struct clk ***clk_ptr,
- uint32_t ***clk_rates,
- size_t *num_set,
- size_t *num_clk);
-
-/**
- * @brief : Puts clock information
- *
- * This function releases the memory allocated for the clocks
- *
- * @param pdev : Pointer to platform device
- * @param clk_info : Pointer to release the allocated memory
- * @param clk_ptr : Pointer to release the clock resources
- * @param cnt : Number of clk resources
- *
- * @return Status of operation. Negative in case of error. Zero otherwise.
- */
-int msm_camera_put_clk_info(struct platform_device *pdev,
- struct msm_cam_clk_info **clk_info,
- struct clk ***clk_ptr, int cnt);
-
-/**
- * @brief : Puts clock information
- *
- * This function releases the memory allocated for the clocks
- *
- * @param dev : Pointer to i2c device
- * @param clk_info : Pointer to release the allocated memory
- * @param clk_ptr : Pointer to release the clock resources
- * @param cnt : Number of clk resources
- *
- * @return Status of operation. Negative in case of error. Zero otherwise.
- */
-int msm_camera_i2c_dev_put_clk_info(struct device *dev,
- struct msm_cam_clk_info **clk_info,
- struct clk ***clk_ptr, int cnt);
-
-/**
- * @brief : Puts clock information
- *
- * This function releases the memory allocated for the clocks
- *
- * @param pdev : Pointer to platform device
- * @param clk_info : Pointer to release the allocated memory
- * @param clk_ptr : Pointer to release the clock resources
- * @param clk_ptr : Pointer to release the clock rates
- * @param set : Number of sets of clock rates
- * @param cnt : Number of clk resources
- *
- * @return Status of operation. Negative in case of error. Zero otherwise.
- */
-int msm_camera_put_clk_info_and_rates(struct platform_device *pdev,
- struct msm_cam_clk_info **clk_info,
- struct clk ***clk_ptr, uint32_t ***clk_rates,
- size_t set, size_t cnt);
-/**
- * @brief : Enable clocks
- *
- * This function enables the clocks for a specified device
- *
- * @param dev : Device to get clocks information
- * @param clk_info : Pointer to populate clock information
- * @param clk_ptr : Pointer to populate clock information
- * @param num_clk: Pointer to populate the number of clocks
- * extracted from dtsi
- * @param enable : Flag to specify enable/disable
- *
- * @return Status of operation. Negative in case of error. Zero otherwise.
- */
-
-int msm_camera_clk_enable(struct device *dev,
- struct msm_cam_clk_info *clk_info,
- struct clk **clk_ptr,
- int num_clk,
- int enable);
-/**
- * @brief : Set clock rate
- *
- * This function sets the rate for a specified clock and
- * returns the rounded value
- *
- * @param dev : Device to get clocks information
- * @param clk : Pointer to clock to set rate
- * @param clk_rate : Rate to be set
- *
- * @return Status of operation. Negative in case of error. clk rate otherwise.
- */
-
-long msm_camera_clk_set_rate(struct device *dev,
- struct clk *clk,
- long clk_rate);
-/**
- * @brief : Gets regulator info
- *
- * This function extracts the regulator information for a specific
- * platform device
- *
- * @param pdev : platform device to get regulator information
- * @param vdd_info: Pointer to populate the regulator names
- * @param num_reg: Pointer to populate the number of regulators
- * extracted from dtsi
- *
- * @return Status of operation. Negative in case of error. Zero otherwise.
- */
-int msm_camera_get_regulator_info(struct platform_device *pdev,
- struct msm_cam_regulator **vdd_info, int *num_reg);
-/**
- * @brief : Enable/Disable the regultors
- *
- * This function enables/disables the regulators for a specific
- * platform device
- *
- * @param vdd_info: Pointer to list of regulators
- * @param cnt: Number of regulators to enable/disable
- * @param enable: Flags specifies either enable/disable
- *
- * @return Status of operation. Negative in case of error. Zero otherwise.
- */
-
-int msm_camera_regulator_enable(struct msm_cam_regulator *vdd_info,
- int cnt, int enable);
-
-/**
- * @brief : Release the regulators
- *
- * This function releases the regulator resources.
- *
- * @param pdev: Pointer to platform device
- * @param vdd_info: Pointer to list of regulators
- * @param cnt: Number of regulators to release
- */
-
-void msm_camera_put_regulators(struct platform_device *pdev,
- struct msm_cam_regulator **vdd_info, int cnt);
-/**
- * @brief : Get the IRQ resource
- *
- * This function gets the irq resource from dtsi for a specific
- * platform device
- *
- * @param pdev : Platform device to get IRQ
- * @param irq_name: Name of the IRQ resource to get from DTSI
- *
- * @return Pointer to resource if success else null
- */
-
-struct resource *msm_camera_get_irq(struct platform_device *pdev,
- char *irq_name);
-/**
- * @brief : Register the IRQ
- *
- * This function registers the irq resource for specified hardware
- *
- * @param pdev : Platform device to register IRQ resource
- * @param irq : IRQ resource
- * @param handler : IRQ handler
- * @param irqflags : IRQ flags
- * @param irq_name: Name of the IRQ
- * @param dev : Token of the device
- *
- * @return Status of operation. Negative in case of error. Zero otherwise.
- */
-
-int msm_camera_register_irq(struct platform_device *pdev,
- struct resource *irq,
- irq_handler_t handler,
- unsigned long irqflags,
- char *irq_name,
- void *dev);
-
-/**
- * @brief : Register the threaded IRQ
- *
- * This function registers the irq resource for specified hardware
- *
- * @param pdev : Platform device to register IRQ resource
- * @param irq : IRQ resource
- * @param handler_fn : IRQ handler function
- * @param thread_fn : thread handler function
- * @param irqflags : IRQ flags
- * @param irq_name: Name of the IRQ
- * @param dev : Token of the device
- *
- * @return Status of operation. Negative in case of error. Zero otherwise.
- */
-
-int msm_camera_register_threaded_irq(struct platform_device *pdev,
- struct resource *irq,
- irq_handler_t handler_fn,
- irq_handler_t thread_fn,
- unsigned long irqflags,
- const char *irq_name,
- void *dev);
-
-/**
- * @brief : Enable/Disable the IRQ
- *
- * This function enables or disables a specific IRQ
- *
- * @param irq : IRQ resource
- * @param flag : flag to enable/disable
- *
- * @return Status of operation. Negative in case of error. Zero otherwise.
- */
-
-int msm_camera_enable_irq(struct resource *irq, int flag);
-
-/**
- * @brief : UnRegister the IRQ
- *
- * This function Unregisters/Frees the irq resource
- *
- * @param pdev : Pointer to platform device
- * @param irq : IRQ resource
- * @param dev : Token of the device
- *
- * @return Status of operation. Negative in case of error. Zero otherwise.
- */
-
-int msm_camera_unregister_irq(struct platform_device *pdev,
- struct resource *irq, void *dev_id);
-
-/**
- * @brief : Gets device register base
- *
- * This function extracts the device's register base from the dtsi
- * for the specified platform device
- *
- * @param pdev : Platform device to get regulator infor
- * @param device_name : Name of the device to fetch the register base
- * @param reserve_mem : Flag to decide whether to reserve memory
- * region or not.
- *
- * @return Pointer to resource if success else null
- */
-
-void __iomem *msm_camera_get_reg_base(struct platform_device *pdev,
- char *device_name, int reserve_mem);
-
-/**
- * @brief : Puts device register base
- *
- * This function releases the memory region for the specified
- * resource
- *
- * @param pdev : Pointer to platform device
- * @param base : Pointer to base to unmap
- * @param device_name : Device name
- * @param reserve_mem : Flag to decide whether to release memory
- * region or not.
- *
- * @return Status of operation. Negative in case of error. Zero otherwise.
- */
-
-int msm_camera_put_reg_base(struct platform_device *pdev, void __iomem *base,
- char *device_name, int reserve_mem);
-
-/**
- * @brief : Gets resource size
- *
- * This function returns the size of the resource for the
- * specified platform device
- *
- * @param pdev : Platform device to get regulator infor
- * @param device_name : Name of the device to fetch the register base
- *
- * @return size of the resource
- */
-
-uint32_t msm_camera_get_res_size(struct platform_device *pdev,
- char *device_name);
-
-/**
- * @brief : Selects clock source
- *
- *
- * @param dev : Token of the device
- * @param clk_info : Clock Info structure
- * @param clk_src_info : Clock Info structure
- * @param num_clk : Number of clocks
- *
- * @return Status of operation. Negative in case of error. Zero otherwise.
- */
-int msm_cam_clk_sel_src(struct device *dev, struct msm_cam_clk_info *clk_info,
- struct msm_cam_clk_info *clk_src_info, int num_clk);
-
-/**
- * @brief : Enables the clock
- *
- *
- * @param dev : Token of the device
- * @param clk_info : Clock Info structure
- * @param clk_tr : Pointer to lock strucure
- * @param num_clk : Number of clocks
- * @param enable : Enable/disable the clock
- *
- * @return Status of operation. Negative in case of error. Zero otherwise.
- */
-int msm_cam_clk_enable(struct device *dev, struct msm_cam_clk_info *clk_info,
- struct clk **clk_ptr, int num_clk, int enable);
-
-/**
- * @brief : Configures voltage regulator
- *
- *
- * @param dev : Token of the device
- * @param cam_vreg : Regulator dt structure
- * @param num_vreg : Number of regulators
- * @param vreg_seq : Regulator sequence type
- * @param num_clk : Number of clocks
- * @param reg_ptr : Regulator pointer
- * @param config : Enable/disable configuring the regulator
- *
- * @return Status of operation. Negative in case of error. Zero otherwise.
- */
-int msm_camera_config_vreg(struct device *dev, struct camera_vreg_t *cam_vreg,
- int num_vreg, enum msm_camera_vreg_name_t *vreg_seq,
- int num_vreg_seq, struct regulator **reg_ptr, int config);
-
-/**
- * @brief : Enables voltage regulator
- *
- *
- * @param dev : Token of the device
- * @param cam_vreg : Regulator dt structure
- * @param num_vreg : Number of regulators
- * @param vreg_seq : Regulator sequence type
- * @param num_clk : Number of clocks
- * @param reg_ptr : Regulator pointer
- * @param config : Enable/disable configuring the regulator
- *
- * @return Status of operation. Negative in case of error. Zero otherwise.
- */
-int msm_camera_enable_vreg(struct device *dev, struct camera_vreg_t *cam_vreg,
- int num_vreg, enum msm_camera_vreg_name_t *vreg_seq,
- int num_vreg_seq, struct regulator **reg_ptr, int enable);
-
-/**
- * @brief : Sets table of GPIOs
- *
- * @param gpio_tbl : GPIO table parsed from dt
- * @param gpio_tbl_size : Size of GPIO table
- * @param gpio_en : Enable/disable the GPIO
- *
- * @return Status of operation. Negative in case of error. Zero otherwise.
- */
-int msm_camera_set_gpio_table(struct msm_gpio_set_tbl *gpio_tbl,
- uint8_t gpio_tbl_size, int gpio_en);
-
-/**
- * @brief : Configures single voltage regulator
- *
- *
- * @param dev : Token of the device
- * @param cam_vreg : Regulator dt structure
- * @param num_vreg : Number of regulators
- * @param reg_ptr : Regulator pointer
- * @param config : Enable/disable configuring the regulator
- *
- * @return Status of operation. Negative in case of error. Zero otherwise.
- */
-int msm_camera_config_single_vreg(struct device *dev,
- struct camera_vreg_t *cam_vreg, struct regulator **reg_ptr, int config);
-
-/**
- * @brief : Request table of gpios
- *
- *
- * @param gpio_tbl : Table of GPIOs
- * @param size : Size of table
- * @param gpio_en : Enable/disable the gpio
- *
- * @return Status of operation. Negative in case of error. Zero otherwise.
- */
-int msm_camera_request_gpio_table(struct gpio *gpio_tbl, uint8_t size,
- int gpio_en);
-
-#endif /* _CAM_SENSOR_SOC_API_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c
index 44294e8..9f16e93 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c
@@ -12,7 +12,6 @@
#include <linux/kernel.h>
#include "cam_sensor_util.h"
-#include "cam_sensor_soc_api.h"
#define CAM_SENSOR_PINCTRL_STATE_SLEEP "cam_suspend"
#define CAM_SENSOR_PINCTRL_STATE_DEFAULT "cam_default"
@@ -324,39 +323,47 @@ int cam_sensor_i2c_pkt_parser(struct i2c_settings_array *i2c_reg_settings,
return rc;
}
-int32_t msm_camera_fill_vreg_params(struct camera_vreg_t *cam_vreg,
- int num_vreg, struct cam_sensor_power_setting *power_setting,
+int32_t msm_camera_fill_vreg_params(
+ struct cam_hw_soc_info *soc_info,
+ struct cam_sensor_power_setting *power_setting,
uint16_t power_setting_size)
{
int32_t rc = 0, j = 0, i = 0;
+ uint32_t num_vreg;
/* Validate input parameters */
- if (!cam_vreg || !power_setting) {
- pr_err("%s:%d failed: cam_vreg %pK power_setting %pK", __func__,
- __LINE__, cam_vreg, power_setting);
+ if (!soc_info || !power_setting) {
+ pr_err("%s:%d failed: soc_info %pK power_setting %pK", __func__,
+ __LINE__, soc_info, power_setting);
return -EINVAL;
}
- /* Validate size of num_vreg */
+ num_vreg = soc_info->num_rgltr;
+
if (num_vreg <= 0) {
- pr_err("failed: num_vreg %d", num_vreg);
+ pr_err("%s:%d failed: num_vreg %d", __func__, __LINE__,
+ num_vreg);
return -EINVAL;
}
+
for (i = 0; i < power_setting_size; i++) {
switch (power_setting[i].seq_type) {
case SENSOR_VDIG:
for (j = 0; j < num_vreg; j++) {
- if (!strcmp(cam_vreg[j].reg_name, "cam_vdig")) {
+ if (!strcmp(soc_info->rgltr_name[j],
+ "cam_vdig")) {
+
CDBG("%s:%d i %d j %d cam_vdig\n",
__func__, __LINE__, i, j);
power_setting[i].seq_val = j;
+
if (VALIDATE_VOLTAGE(
- cam_vreg[j].min_voltage,
- cam_vreg[j].max_voltage,
+ soc_info->rgltr_min_volt[j],
+ soc_info->rgltr_max_volt[j],
power_setting[i].config_val)) {
- cam_vreg[j].min_voltage =
- cam_vreg[j].max_voltage =
+ soc_info->rgltr_min_volt[j] =
+ soc_info->rgltr_max_volt[j] =
power_setting[i].config_val;
}
break;
@@ -368,20 +375,24 @@ int32_t msm_camera_fill_vreg_params(struct camera_vreg_t *cam_vreg,
case SENSOR_VIO:
for (j = 0; j < num_vreg; j++) {
- if (!strcmp(cam_vreg[j].reg_name, "cam_vio")) {
+
+ if (!strcmp(soc_info->rgltr_name[j],
+ "cam_vio")) {
CDBG("%s:%d i %d j %d cam_vio\n",
__func__, __LINE__, i, j);
power_setting[i].seq_val = j;
+
if (VALIDATE_VOLTAGE(
- cam_vreg[j].min_voltage,
- cam_vreg[j].max_voltage,
+ soc_info->rgltr_min_volt[j],
+ soc_info->rgltr_max_volt[j],
power_setting[i].config_val)) {
- cam_vreg[j].min_voltage =
- cam_vreg[j].max_voltage =
+ soc_info->rgltr_min_volt[j] =
+ soc_info->rgltr_max_volt[j] =
power_setting[i].config_val;
}
break;
}
+
}
if (j == num_vreg)
power_setting[i].seq_val = INVALID_VREG;
@@ -389,20 +400,24 @@ int32_t msm_camera_fill_vreg_params(struct camera_vreg_t *cam_vreg,
case SENSOR_VANA:
for (j = 0; j < num_vreg; j++) {
- if (!strcmp(cam_vreg[j].reg_name, "cam_vana")) {
+
+ if (!strcmp(soc_info->rgltr_name[j],
+ "cam_vana")) {
CDBG("%s:%d i %d j %d cam_vana\n",
__func__, __LINE__, i, j);
power_setting[i].seq_val = j;
+
if (VALIDATE_VOLTAGE(
- cam_vreg[j].min_voltage,
- cam_vreg[j].max_voltage,
+ soc_info->rgltr_min_volt[j],
+ soc_info->rgltr_max_volt[j],
power_setting[i].config_val)) {
- cam_vreg[j].min_voltage =
- cam_vreg[j].max_voltage =
+ soc_info->rgltr_min_volt[j] =
+ soc_info->rgltr_max_volt[j] =
power_setting[i].config_val;
}
break;
}
+
}
if (j == num_vreg)
power_setting[i].seq_val = INVALID_VREG;
@@ -410,20 +425,25 @@ int32_t msm_camera_fill_vreg_params(struct camera_vreg_t *cam_vreg,
case SENSOR_VAF:
for (j = 0; j < num_vreg; j++) {
- if (!strcmp(cam_vreg[j].reg_name, "cam_vaf")) {
+
+ if (!strcmp(soc_info->rgltr_name[j],
+ "cam_vaf")) {
CDBG("%s:%d i %d j %d cam_vaf\n",
__func__, __LINE__, i, j);
power_setting[i].seq_val = j;
+
if (VALIDATE_VOLTAGE(
- cam_vreg[j].min_voltage,
- cam_vreg[j].max_voltage,
+ soc_info->rgltr_min_volt[j],
+ soc_info->rgltr_max_volt[j],
power_setting[i].config_val)) {
- cam_vreg[j].min_voltage =
- cam_vreg[j].max_voltage =
+ soc_info->rgltr_min_volt[j] =
+ soc_info->rgltr_max_volt[j] =
power_setting[i].config_val;
}
+
break;
}
+
}
if (j == num_vreg)
power_setting[i].seq_val = INVALID_VREG;
@@ -431,38 +451,43 @@ int32_t msm_camera_fill_vreg_params(struct camera_vreg_t *cam_vreg,
case SENSOR_CUSTOM_REG1:
for (j = 0; j < num_vreg; j++) {
- if (!strcmp(cam_vreg[j].reg_name,
+
+ if (!strcmp(soc_info->rgltr_name[j],
"cam_v_custom1")) {
CDBG("%s:%d i %d j %d cam_vcustom1\n",
__func__, __LINE__, i, j);
power_setting[i].seq_val = j;
+
if (VALIDATE_VOLTAGE(
- cam_vreg[j].min_voltage,
- cam_vreg[j].max_voltage,
+ soc_info->rgltr_min_volt[j],
+ soc_info->rgltr_max_volt[j],
power_setting[i].config_val)) {
- cam_vreg[j].min_voltage =
- cam_vreg[j].max_voltage =
+ soc_info->rgltr_min_volt[j] =
+ soc_info->rgltr_max_volt[j] =
power_setting[i].config_val;
}
break;
}
+
}
if (j == num_vreg)
power_setting[i].seq_val = INVALID_VREG;
break;
case SENSOR_CUSTOM_REG2:
for (j = 0; j < num_vreg; j++) {
- if (!strcmp(cam_vreg[j].reg_name,
+
+ if (!strcmp(soc_info->rgltr_name[j],
"cam_v_custom2")) {
CDBG("%s:%d i %d j %d cam_vcustom2\n",
__func__, __LINE__, i, j);
power_setting[i].seq_val = j;
+
if (VALIDATE_VOLTAGE(
- cam_vreg[j].min_voltage,
- cam_vreg[j].max_voltage,
+ soc_info->rgltr_min_volt[j],
+ soc_info->rgltr_max_volt[j],
power_setting[i].config_val)) {
- cam_vreg[j].min_voltage =
- cam_vreg[j].max_voltage =
+ soc_info->rgltr_min_volt[j] =
+ soc_info->rgltr_max_volt[j] =
power_setting[i].config_val;
}
break;
@@ -483,269 +508,274 @@ int32_t msm_camera_fill_vreg_params(struct camera_vreg_t *cam_vreg,
return rc;
}
-int32_t msm_camera_get_dt_gpio_req_tbl(struct device_node *of_node,
- struct msm_camera_gpio_conf *gconf, uint16_t *gpio_array,
- uint16_t gpio_array_size)
+int cam_sensor_util_request_gpio_table(
+ struct cam_hw_soc_info *soc_info, int gpio_en)
{
- int32_t rc = 0, i = 0;
- uint32_t count = 0, *val_array = NULL;
+ int rc = 0, i = 0;
+ uint8_t size = 0;
+ struct cam_soc_gpio_data *gpio_conf =
+ soc_info->gpio_data;
+ struct gpio *gpio_tbl = gpio_conf->cam_gpio_req_tbl;
- if (!of_get_property(of_node, "qcom,gpio-req-tbl-num", &count))
- return 0;
+ size = gpio_conf->cam_gpio_req_tbl_size;
- count /= sizeof(uint32_t);
- if (!count) {
- pr_err("%s qcom,gpio-req-tbl-num 0\n", __func__);
+ if (gpio_conf->cam_gpio_common_tbl_size <= 0) {
+ pr_info("%s:%d No GPIO entry\n", __func__, __LINE__);
return 0;
}
- val_array = kcalloc(count, sizeof(uint32_t), GFP_KERNEL);
- if (!val_array)
- return -ENOMEM;
-
- gconf->cam_gpio_req_tbl = kcalloc(count, sizeof(struct gpio),
- GFP_KERNEL);
- if (!gconf->cam_gpio_req_tbl) {
- rc = -ENOMEM;
- goto free_val_array;
- }
- gconf->cam_gpio_req_tbl_size = count;
-
- rc = of_property_read_u32_array(of_node, "qcom,gpio-req-tbl-num",
- val_array, count);
- if (rc < 0) {
- pr_err("%s failed %d\n", __func__, __LINE__);
- goto free_gpio_req_tbl;
+ if (!gpio_tbl || !size) {
+ pr_err("%s:%d invalid gpio_tbl %pK / size %d\n", __func__,
+ __LINE__, gpio_tbl, size);
+ return -EINVAL;
}
- for (i = 0; i < count; i++) {
- if (val_array[i] >= gpio_array_size) {
- pr_err("%s gpio req tbl index %d invalid\n",
- __func__, val_array[i]);
- return -EINVAL;
+ for (i = 0; i < size; i++) {
+ CDBG("%s:%d i %d, gpio %d dir %ld\n", __func__, __LINE__, i,
+ gpio_tbl[i].gpio, gpio_tbl[i].flags);
+ }
+
+ if (gpio_en) {
+ for (i = 0; i < size; i++) {
+ rc = gpio_request_one(gpio_tbl[i].gpio,
+ gpio_tbl[i].flags, gpio_tbl[i].label);
+ if (rc) {
+ /*
+ * After GPIO request fails, contine to
+ * apply new gpios, outout a error message
+ * for driver bringup debug
+ */
+ pr_err("%s:%d gpio %d:%s request fails\n",
+ __func__, __LINE__,
+ gpio_tbl[i].gpio, gpio_tbl[i].label);
+ }
}
- gconf->cam_gpio_req_tbl[i].gpio = gpio_array[val_array[i]];
- CDBG("%s cam_gpio_req_tbl[%d].gpio = %d\n", __func__, i,
- gconf->cam_gpio_req_tbl[i].gpio);
+ } else {
+ gpio_free_array(gpio_tbl, size);
}
- rc = of_property_read_u32_array(of_node, "qcom,gpio-req-tbl-flags",
- val_array, count);
- if (rc < 0) {
- pr_err("%s failed %d\n", __func__, __LINE__);
- goto free_gpio_req_tbl;
- }
-
- for (i = 0; i < count; i++) {
- gconf->cam_gpio_req_tbl[i].flags = val_array[i];
- CDBG("%s cam_gpio_req_tbl[%d].flags = %ld\n", __func__, i,
- gconf->cam_gpio_req_tbl[i].flags);
- }
-
- for (i = 0; i < count; i++) {
- rc = of_property_read_string_index(of_node,
- "qcom,gpio-req-tbl-label", i,
- &gconf->cam_gpio_req_tbl[i].label);
- CDBG("%s cam_gpio_req_tbl[%d].label = %s\n", __func__, i,
- gconf->cam_gpio_req_tbl[i].label);
- if (rc < 0) {
- pr_err("%s failed %d\n", __func__, __LINE__);
- goto free_gpio_req_tbl;
- }
- }
-
- kfree(val_array);
-
- return rc;
-
-free_gpio_req_tbl:
- kfree(gconf->cam_gpio_req_tbl);
-free_val_array:
- kfree(val_array);
- gconf->cam_gpio_req_tbl_size = 0;
-
return rc;
}
-int msm_camera_init_gpio_pin_tbl(struct device_node *of_node,
- struct msm_camera_gpio_conf *gconf, uint16_t *gpio_array,
- uint16_t gpio_array_size)
+
+int cam_sensor_util_init_gpio_pin_tbl(
+ struct cam_hw_soc_info *soc_info,
+ struct msm_camera_gpio_num_info **pgpio_num_info)
{
int rc = 0, val = 0;
+ uint32_t gpio_array_size;
+ struct platform_device *pdev = NULL;
+ struct device_node *of_node = NULL;
+ struct cam_soc_gpio_data *gconf = NULL;
+ struct msm_camera_gpio_num_info *gpio_num_info = NULL;
- gconf->gpio_num_info = kzalloc(sizeof(struct msm_camera_gpio_num_info),
+ pdev = soc_info->pdev;
+ of_node = pdev->dev.of_node;
+
+ gconf = soc_info->gpio_data;
+ if (!gconf) {
+ pr_err("%s:%d No gpio_common_table is found\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ if (!gconf->cam_gpio_common_tbl) {
+ pr_err("%s:%d gpio_common_table is not initialized\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ gpio_array_size = gconf->cam_gpio_common_tbl_size;
+
+ if (!gpio_array_size) {
+ pr_err("%s:%d invalid size of gpio table\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ *pgpio_num_info = kzalloc(sizeof(struct msm_camera_gpio_num_info),
GFP_KERNEL);
- if (!gconf->gpio_num_info)
+ if (!*pgpio_num_info)
return -ENOMEM;
+ gpio_num_info = *pgpio_num_info;
- rc = of_property_read_u32(of_node, "qcom,gpio-vana", &val);
+ rc = of_property_read_u32(of_node, "gpio-vana", &val);
if (rc != -EINVAL) {
if (rc < 0) {
- pr_err("%s:%d read qcom,gpio-vana failed rc %d\n",
+ pr_err("%s:%d read gpio-vana failed rc %d\n",
__func__, __LINE__, rc);
goto free_gpio_info;
} else if (val >= gpio_array_size) {
- pr_err("%s:%d qcom,gpio-vana invalid %d\n",
+ pr_err("%s:%d gpio-vana invalid %d\n",
__func__, __LINE__, val);
rc = -EINVAL;
goto free_gpio_info;
}
- gconf->gpio_num_info->gpio_num[SENSOR_VANA] =
- gpio_array[val];
- gconf->gpio_num_info->valid[SENSOR_VANA] = 1;
- CDBG("%s qcom,gpio-vana %d\n", __func__,
- gconf->gpio_num_info->gpio_num[SENSOR_VANA]);
+ gpio_num_info->gpio_num[SENSOR_VANA] =
+ gconf->cam_gpio_common_tbl[val].gpio;
+ gpio_num_info->valid[SENSOR_VANA] = 1;
+
+ CDBG("%s:%d gpio-vana %d\n", __func__, __LINE__,
+ gpio_num_info->gpio_num[SENSOR_VANA]);
}
- rc = of_property_read_u32(of_node, "qcom,gpio-vio", &val);
+ rc = of_property_read_u32(of_node, "gpio-vio", &val);
if (rc != -EINVAL) {
if (rc < 0) {
- pr_err("%s:%d read qcom,gpio-vio failed rc %d\n",
+ pr_err("%s:%d read gpio-vio failed rc %d\n",
__func__, __LINE__, rc);
goto free_gpio_info;
} else if (val >= gpio_array_size) {
- pr_err("%s:%d qcom,gpio-vio invalid %d\n",
+ pr_err("%s:%d gpio-vio invalid %d\n",
__func__, __LINE__, val);
goto free_gpio_info;
}
- gconf->gpio_num_info->gpio_num[SENSOR_VIO] =
- gpio_array[val];
- gconf->gpio_num_info->valid[SENSOR_VIO] = 1;
- CDBG("%s qcom,gpio-vio %d\n", __func__,
- gconf->gpio_num_info->gpio_num[SENSOR_VIO]);
+ gpio_num_info->gpio_num[SENSOR_VIO] =
+ gconf->cam_gpio_common_tbl[val].gpio;
+ gpio_num_info->valid[SENSOR_VIO] = 1;
+
+ CDBG("%s:%d gpio-vio %d\n", __func__, __LINE__,
+ gpio_num_info->gpio_num[SENSOR_VIO]);
}
- rc = of_property_read_u32(of_node, "qcom,gpio-vaf", &val);
+ rc = of_property_read_u32(of_node, "gpio-vaf", &val);
if (rc != -EINVAL) {
if (rc < 0) {
- pr_err("%s:%d read qcom,gpio-vaf failed rc %d\n",
+ pr_err("%s:%d read gpio-vaf failed rc %d\n",
__func__, __LINE__, rc);
goto free_gpio_info;
} else if (val >= gpio_array_size) {
- pr_err("%s:%d qcom,gpio-vaf invalid %d\n",
+ pr_err("%s:%d gpio-vaf invalid %d\n",
__func__, __LINE__, val);
rc = -EINVAL;
goto free_gpio_info;
}
- gconf->gpio_num_info->gpio_num[SENSOR_VAF] =
- gpio_array[val];
- gconf->gpio_num_info->valid[SENSOR_VAF] = 1;
- CDBG("%s qcom,gpio-vaf %d\n", __func__,
- gconf->gpio_num_info->gpio_num[SENSOR_VAF]);
+ gpio_num_info->gpio_num[SENSOR_VAF] =
+ gconf->cam_gpio_common_tbl[val].gpio;
+ gpio_num_info->valid[SENSOR_VAF] = 1;
+
+ CDBG("%s:%d gpio-vaf %d\n", __func__, __LINE__,
+ gpio_num_info->gpio_num[SENSOR_VAF]);
}
- rc = of_property_read_u32(of_node, "qcom,gpio-vdig", &val);
+ rc = of_property_read_u32(of_node, "gpio-vdig", &val);
if (rc != -EINVAL) {
if (rc < 0) {
- pr_err("%s:%d read qcom,gpio-vdig failed rc %d\n",
+ pr_err("%s:%d read gpio-vdig failed rc %d\n",
__func__, __LINE__, rc);
goto free_gpio_info;
} else if (val >= gpio_array_size) {
- pr_err("%s:%d qcom,gpio-vdig invalid %d\n",
+ pr_err("%s:%d gpio-vdig invalid %d\n",
__func__, __LINE__, val);
rc = -EINVAL;
goto free_gpio_info;
}
- gconf->gpio_num_info->gpio_num[SENSOR_VDIG] =
- gpio_array[val];
- gconf->gpio_num_info->valid[SENSOR_VDIG] = 1;
- CDBG("%s qcom,gpio-vdig %d\n", __func__,
- gconf->gpio_num_info->gpio_num[SENSOR_VDIG]);
+ gpio_num_info->gpio_num[SENSOR_VDIG] =
+ gconf->cam_gpio_common_tbl[val].gpio;
+ gpio_num_info->valid[SENSOR_VDIG] = 1;
+
+ CDBG("%s:%d gpio-vdig %d\n", __func__, __LINE__,
+ gpio_num_info->gpio_num[SENSOR_VDIG]);
}
- rc = of_property_read_u32(of_node, "qcom,gpio-reset", &val);
+ rc = of_property_read_u32(of_node, "gpio-reset", &val);
if (rc != -EINVAL) {
if (rc < 0) {
- pr_err("%s:%d read qcom,gpio-reset failed rc %d\n",
+ pr_err("%s:%d read gpio-reset failed rc %d\n",
__func__, __LINE__, rc);
goto free_gpio_info;
} else if (val >= gpio_array_size) {
- pr_err("%s:%d qcom,gpio-reset invalid %d\n",
+ pr_err("%s:%d gpio-reset invalid %d\n",
__func__, __LINE__, val);
rc = -EINVAL;
goto free_gpio_info;
}
- gconf->gpio_num_info->gpio_num[SENSOR_RESET] =
- gpio_array[val];
- gconf->gpio_num_info->valid[SENSOR_RESET] = 1;
- CDBG("%s qcom,gpio-reset %d\n", __func__,
- gconf->gpio_num_info->gpio_num[SENSOR_RESET]);
+ gpio_num_info->gpio_num[SENSOR_RESET] =
+ gconf->cam_gpio_common_tbl[val].gpio;
+ gpio_num_info->valid[SENSOR_RESET] = 1;
+
+ CDBG("%s:%d gpio-reset %d\n", __func__, __LINE__,
+ gpio_num_info->gpio_num[SENSOR_RESET]);
}
- rc = of_property_read_u32(of_node, "qcom,gpio-standby", &val);
+ rc = of_property_read_u32(of_node, "gpio-standby", &val);
if (rc != -EINVAL) {
if (rc < 0) {
- pr_err("%s:%d read qcom,gpio-standby failed rc %d\n",
+ pr_err("%s:%d read gpio-standby failed rc %d\n",
__func__, __LINE__, rc);
goto free_gpio_info;
} else if (val >= gpio_array_size) {
- pr_err("%s:%d qcom,gpio-standby invalid %d\n",
+ pr_err("%s:%d gpio-standby invalid %d\n",
__func__, __LINE__, val);
rc = -EINVAL;
goto free_gpio_info;
}
- gconf->gpio_num_info->gpio_num[SENSOR_STANDBY] =
- gpio_array[val];
- gconf->gpio_num_info->valid[SENSOR_STANDBY] = 1;
- CDBG("%s qcom,gpio-standby %d\n", __func__,
- gconf->gpio_num_info->gpio_num[SENSOR_STANDBY]);
+ gpio_num_info->gpio_num[SENSOR_STANDBY] =
+ gconf->cam_gpio_common_tbl[val].gpio;
+ gpio_num_info->valid[SENSOR_STANDBY] = 1;
+
+ CDBG("%s:%d gpio-standby %d\n", __func__, __LINE__,
+ gpio_num_info->gpio_num[SENSOR_STANDBY]);
}
- rc = of_property_read_u32(of_node, "qcom,gpio-af-pwdm", &val);
+ rc = of_property_read_u32(of_node, "gpio-af-pwdm", &val);
if (rc != -EINVAL) {
if (rc < 0) {
- pr_err("%s:%d read qcom,gpio-af-pwdm failed rc %d\n",
+ pr_err("%s:%d read gpio-af-pwdm failed rc %d\n",
__func__, __LINE__, rc);
goto free_gpio_info;
} else if (val >= gpio_array_size) {
- pr_err("%s:%d qcom,gpio-af-pwdm invalid %d\n",
+ pr_err("%s:%d gpio-af-pwdm invalid %d\n",
__func__, __LINE__, val);
rc = -EINVAL;
goto free_gpio_info;
}
- gconf->gpio_num_info->gpio_num[SENSOR_VAF_PWDM] =
- gpio_array[val];
- gconf->gpio_num_info->valid[SENSOR_VAF_PWDM] = 1;
- CDBG("%s qcom,gpio-af-pwdm %d\n", __func__,
- gconf->gpio_num_info->gpio_num[SENSOR_VAF_PWDM]);
+ gpio_num_info->gpio_num[SENSOR_VAF_PWDM] =
+ gconf->cam_gpio_common_tbl[val].gpio;
+ gpio_num_info->valid[SENSOR_VAF_PWDM] = 1;
+
+ CDBG("%s:%d gpio-af-pwdm %d\n", __func__, __LINE__,
+ gpio_num_info->gpio_num[SENSOR_VAF_PWDM]);
}
- rc = of_property_read_u32(of_node, "qcom,gpio-custom1", &val);
+ rc = of_property_read_u32(of_node, "gpio-custom1", &val);
if (rc != -EINVAL) {
if (rc < 0) {
- pr_err("%s:%d read qcom,gpio-custom1 failed rc %d\n",
+ pr_err("%s:%d read gpio-custom1 failed rc %d\n",
__func__, __LINE__, rc);
goto free_gpio_info;
} else if (val >= gpio_array_size) {
- pr_err("%s:%d qcom,gpio-custom1 invalid %d\n",
+ pr_err("%s:%d gpio-custom1 invalid %d\n",
__func__, __LINE__, val);
rc = -EINVAL;
goto free_gpio_info;
}
- gconf->gpio_num_info->gpio_num[SENSOR_CUSTOM_GPIO1] =
- gpio_array[val];
- gconf->gpio_num_info->valid[SENSOR_CUSTOM_GPIO1] = 1;
- CDBG("%s qcom,gpio-custom1 %d\n", __func__,
- gconf->gpio_num_info->gpio_num[SENSOR_CUSTOM_GPIO1]);
+ gpio_num_info->gpio_num[SENSOR_CUSTOM_GPIO1] =
+ gconf->cam_gpio_common_tbl[val].gpio;
+ gpio_num_info->valid[SENSOR_CUSTOM_GPIO1] = 1;
+
+ CDBG("%s:%d gpio-custom1 %d\n", __func__, __LINE__,
+ gpio_num_info->gpio_num[SENSOR_CUSTOM_GPIO1]);
}
- rc = of_property_read_u32(of_node, "qcom,gpio-custom2", &val);
+ rc = of_property_read_u32(of_node, "gpio-custom2", &val);
if (rc != -EINVAL) {
if (rc < 0) {
- pr_err("%s:%d read qcom,gpio-custom2 failed rc %d\n",
+ pr_err("%s:%d read gpio-custom2 failed rc %d\n",
__func__, __LINE__, rc);
goto free_gpio_info;
} else if (val >= gpio_array_size) {
- pr_err("%s:%d qcom,gpio-custom2 invalid %d\n",
+ pr_err("%s:%d gpio-custom2 invalid %d\n",
__func__, __LINE__, val);
rc = -EINVAL;
goto free_gpio_info;
}
- gconf->gpio_num_info->gpio_num[SENSOR_CUSTOM_GPIO2] =
- gpio_array[val];
- gconf->gpio_num_info->valid[SENSOR_CUSTOM_GPIO2] = 1;
- CDBG("%s qcom,gpio-custom2 %d\n", __func__,
- gconf->gpio_num_info->gpio_num[SENSOR_CUSTOM_GPIO2]);
+ gpio_num_info->gpio_num[SENSOR_CUSTOM_GPIO2] =
+ gconf->cam_gpio_common_tbl[val].gpio;
+ gpio_num_info->valid[SENSOR_CUSTOM_GPIO2] = 1;
+
+ CDBG("%s:%d gpio-custom2 %d\n", __func__, __LINE__,
+ gpio_num_info->gpio_num[SENSOR_CUSTOM_GPIO2]);
} else {
rc = 0;
}
@@ -753,142 +783,8 @@ int msm_camera_init_gpio_pin_tbl(struct device_node *of_node,
return rc;
free_gpio_info:
- kfree(gconf->gpio_num_info);
- gconf->gpio_num_info = NULL;
- return rc;
-}
-
-int cam_sensor_get_dt_vreg_data(struct device_node *of_node,
- struct camera_vreg_t **cam_vreg, int *num_vreg)
-{
- int rc = 0, i = 0;
- int32_t count = 0;
- uint32_t *vreg_array = NULL;
- struct camera_vreg_t *vreg = NULL;
-
- count = of_property_count_strings(of_node, "qcom,cam-vreg-name");
- CDBG("%s qcom,cam-vreg-name count %d\n", __func__, count);
-
- if (!count || (count == -EINVAL)) {
- pr_err("%s:%d number of entries is 0 or not present in dts\n",
- __func__, __LINE__);
- *num_vreg = 0;
- return 0;
- }
-
- vreg = kcalloc(count, sizeof(*vreg), GFP_KERNEL);
- if (!vreg)
- return -ENOMEM;
-
- *cam_vreg = vreg;
- *num_vreg = count;
- for (i = 0; i < count; i++) {
- rc = of_property_read_string_index(of_node,
- "qcom,cam-vreg-name", i,
- &vreg[i].reg_name);
- CDBG("%s reg_name[%d] = %s\n", __func__, i,
- vreg[i].reg_name);
- if (rc < 0) {
- pr_err("%s failed %d\n", __func__, __LINE__);
- goto free_vreg;
- }
- }
-
- vreg_array = kcalloc(count, sizeof(uint32_t), GFP_KERNEL);
- if (!vreg_array) {
- rc = -ENOMEM;
- goto free_vreg;
- }
-
- for (i = 0; i < count; i++)
- vreg[i].type = VREG_TYPE_DEFAULT;
-
- rc = of_property_read_u32_array(of_node, "qcom,cam-vreg-type",
- vreg_array, count);
- if (rc != -EINVAL) {
- if (rc < 0) {
- pr_err("%s failed %d\n", __func__, __LINE__);
- goto free_vreg_array;
- } else {
- for (i = 0; i < count; i++) {
- vreg[i].type = vreg_array[i];
- CDBG("%s cam_vreg[%d].type = %d\n",
- __func__, i, vreg[i].type);
- }
- }
- } else {
- CDBG("%s:%d no qcom,cam-vreg-type entries in dts\n",
- __func__, __LINE__);
- rc = 0;
- }
-
- rc = of_property_read_u32_array(of_node, "qcom,cam-vreg-min-voltage",
- vreg_array, count);
- if (rc != -EINVAL) {
- if (rc < 0) {
- pr_err("%s failed %d\n", __func__, __LINE__);
- goto free_vreg_array;
- } else {
- for (i = 0; i < count; i++) {
- vreg[i].min_voltage = vreg_array[i];
- CDBG("%s cam_vreg[%d].min_voltage = %d\n",
- __func__, i, vreg[i].min_voltage);
- }
- }
- } else {
- CDBG("%s:%d no qcom,cam-vreg-min-voltage entries in dts\n",
- __func__, __LINE__);
- rc = 0;
- }
-
- rc = of_property_read_u32_array(of_node, "qcom,cam-vreg-max-voltage",
- vreg_array, count);
- if (rc != -EINVAL) {
- if (rc < 0) {
- pr_err("%s failed %d\n", __func__, __LINE__);
- goto free_vreg_array;
- } else {
- for (i = 0; i < count; i++) {
- vreg[i].max_voltage = vreg_array[i];
- CDBG("%s cam_vreg[%d].max_voltage = %d\n",
- __func__, i, vreg[i].max_voltage);
- }
- }
- } else {
- CDBG("%s:%d no qcom,cam-vreg-max-voltage entries in dts\n",
- __func__, __LINE__);
- rc = 0;
- }
-
- rc = of_property_read_u32_array(of_node, "qcom,cam-vreg-op-mode",
- vreg_array, count);
- if (rc != -EINVAL) {
- if (rc < 0) {
- pr_err("%s failed %d\n", __func__, __LINE__);
- goto free_vreg_array;
- } else {
- for (i = 0; i < count; i++) {
- vreg[i].op_mode = vreg_array[i];
- CDBG("%s cam_vreg[%d].op_mode = %d\n",
- __func__, i, vreg[i].op_mode);
- }
- }
- } else {
- CDBG("%s:%d no qcom,cam-vreg-op-mode entries in dts\n",
- __func__, __LINE__);
- rc = 0;
- }
-
- kfree(vreg_array);
-
- return rc;
-
-free_vreg_array:
- kfree(vreg_array);
-free_vreg:
- kfree(vreg);
- *num_vreg = 0;
-
+ kfree(gpio_num_info);
+ gpio_num_info = NULL;
return rc;
}
@@ -919,112 +815,55 @@ int msm_camera_pinctrl_init(
}
return 0;
}
-
int msm_cam_sensor_handle_reg_gpio(int seq_type,
- struct msm_camera_gpio_conf *gconf, int val)
+ struct msm_camera_gpio_num_info *gpio_num_info, int val)
{
-
int gpio_offset = -1;
- if (!gconf) {
- pr_err("ERR:%s: Input Parameters are not proper\n", __func__);
+ if (!gpio_num_info) {
+ pr_err("%s:%d Input Parameters are not proper\n",
+ __func__, __LINE__);
return -EINVAL;
}
+
CDBG("%s: %d Seq type: %d, config: %d", __func__, __LINE__,
seq_type, val);
gpio_offset = seq_type;
- if ((gconf->gpio_num_info->valid[gpio_offset] == 1)) {
+ if (gpio_num_info->valid[gpio_offset] == 1) {
CDBG("%s: %d VALID GPIO offset: %d, seqtype: %d\n",
- __func__, __LINE__, gpio_offset, seq_type);
+ __func__, __LINE__, gpio_offset, seq_type);
gpio_set_value_cansleep(
- gconf->gpio_num_info->gpio_num
+ gpio_num_info->gpio_num
[gpio_offset], val);
}
return 0;
}
-int32_t msm_sensor_driver_get_gpio_data(
- struct msm_camera_gpio_conf **gpio_conf,
- struct device_node *of_node)
-{
- int32_t rc = 0, i = 0;
- uint16_t *gpio_array = NULL;
- int16_t gpio_array_size = 0;
- struct msm_camera_gpio_conf *gconf = NULL;
-
- /* Validate input parameters */
- if (!of_node) {
- pr_err("failed: invalid param of_node %pK", of_node);
- return -EINVAL;
- }
-
- gpio_array_size = of_gpio_count(of_node);
- CDBG("gpio count %d\n", gpio_array_size);
- if (gpio_array_size <= 0)
- return 0;
-
- gconf = kzalloc(sizeof(*gconf), GFP_KERNEL);
- if (!gconf)
- return -ENOMEM;
-
- *gpio_conf = gconf;
-
- gpio_array = kcalloc(gpio_array_size, sizeof(uint16_t), GFP_KERNEL);
- if (!gpio_array)
- goto free_gpio_conf;
-
- for (i = 0; i < gpio_array_size; i++) {
- gpio_array[i] = of_get_gpio(of_node, i);
- CDBG("gpio_array[%d] = %d", i, gpio_array[i]);
- }
- rc = msm_camera_get_dt_gpio_req_tbl(of_node, gconf, gpio_array,
- gpio_array_size);
- if (rc < 0) {
- pr_err("failed in msm_camera_get_dt_gpio_req_tbl\n");
- goto free_gpio_array;
- }
-
- rc = msm_camera_init_gpio_pin_tbl(of_node, gconf, gpio_array,
- gpio_array_size);
- if (rc < 0) {
- pr_err("failed in msm_camera_init_gpio_pin_tbl\n");
- goto free_gpio_req_tbl;
- }
- kfree(gpio_array);
-
- return rc;
-
-free_gpio_req_tbl:
- kfree(gconf->cam_gpio_req_tbl);
-free_gpio_array:
- kfree(gpio_array);
-free_gpio_conf:
- kfree(gconf);
- *gpio_conf = NULL;
-
- return rc;
-}
-
-int cam_sensor_core_power_up(struct cam_sensor_power_ctrl_t *ctrl)
+int cam_sensor_core_power_up(struct cam_sensor_power_ctrl_t *ctrl,
+ struct cam_hw_soc_info *soc_info)
{
int rc = 0, index = 0, no_gpio = 0, ret = 0, num_vreg, j = 0;
+ int32_t vreg_idx = -1;
struct cam_sensor_power_setting *power_setting = NULL;
- struct camera_vreg_t *cam_vreg;
+ struct msm_camera_gpio_num_info *gpio_num_info = NULL;
CDBG("%s:%d\n", __func__, __LINE__);
if (!ctrl) {
- pr_err("failed ctrl %pK\n", ctrl);
+ pr_err("%s:%d Invalid ctrl handle\n", __func__, __LINE__);
return -EINVAL;
}
- cam_vreg = ctrl->cam_vreg;
- num_vreg = ctrl->num_vreg;
+ gpio_num_info = ctrl->gpio_num_info;
+ num_vreg = soc_info->num_rgltr;
- if (ctrl->gpio_conf->cam_gpiomux_conf_tbl != NULL)
- CDBG("%s:%d mux install\n", __func__, __LINE__);
+ if ((num_vreg == 0) || (num_vreg > CAM_SOC_MAX_REGULATOR)) {
+ pr_err("%s:%d Regulators are not initialized\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
ret = msm_camera_pinctrl_init(&(ctrl->pinctrl_info), ctrl->dev);
if (ret < 0) {
@@ -1034,11 +873,11 @@ int cam_sensor_core_power_up(struct cam_sensor_power_ctrl_t *ctrl)
} else {
ctrl->cam_pinctrl_status = 1;
}
- rc = msm_camera_request_gpio_table(
- ctrl->gpio_conf->cam_gpio_req_tbl,
- ctrl->gpio_conf->cam_gpio_req_tbl_size, 1);
+
+ rc = cam_sensor_util_request_gpio_table(soc_info, 1);
if (rc < 0)
no_gpio = rc;
+
if (ctrl->cam_pinctrl_status) {
ret = pinctrl_select_state(ctrl->pinctrl_info.pinctrl,
ctrl->pinctrl_info.gpio_state_active);
@@ -1048,38 +887,68 @@ int cam_sensor_core_power_up(struct cam_sensor_power_ctrl_t *ctrl)
}
for (index = 0; index < ctrl->power_setting_size; index++) {
- CDBG("%s index %d\n", __func__, index);
+ CDBG("%s:%d index %d\n", __func__, __LINE__, index);
power_setting = &ctrl->power_setting[index];
switch (power_setting->seq_type) {
case SENSOR_MCLK:
- if (power_setting->seq_val >= ctrl->clk_info_size) {
- pr_err("%s:%d :Error: clk index %d >= max %zu\n",
+ if (power_setting->seq_val >= soc_info->num_clk) {
+ pr_err("%s:%d :Error: clk index %d >= max %u\n",
__func__, __LINE__,
power_setting->seq_val,
- ctrl->clk_info_size);
+ soc_info->num_clk);
goto power_up_failed;
}
for (j = 0; j < num_vreg; j++) {
- if (!strcmp(cam_vreg[j].reg_name,
+ if (!strcmp(soc_info->rgltr_name[j],
"cam_clk")) {
CDBG("%s:%d Enable cam_clk: %d\n",
__func__, __LINE__, j);
- msm_camera_config_single_vreg(ctrl->dev,
- &cam_vreg[j],
- (struct regulator **)
- &power_setting->data[0],
- 1);
+
+ soc_info->rgltr[j] =
+ regulator_get(
+ &soc_info->pdev->dev,
+ soc_info->rgltr_name[j]);
+
+ if (IS_ERR_OR_NULL(
+ soc_info->rgltr[j])) {
+ rc = PTR_ERR(
+ soc_info->rgltr[j]);
+ rc = rc ? rc : -EINVAL;
+ pr_err("%s:%d :vreg %s %d\n",
+ __func__, __LINE__,
+ soc_info->rgltr_name[j],
+ rc);
+ soc_info->rgltr[j] = NULL;
+ }
+
+ rc = cam_soc_util_regulator_enable(
+ soc_info->rgltr[j],
+ soc_info->rgltr_name[j],
+ soc_info->rgltr_min_volt[j],
+ soc_info->rgltr_max_volt[j],
+ soc_info->rgltr_op_mode[j],
+ soc_info->rgltr_delay[j]);
+
+ power_setting->data[0] =
+ soc_info->rgltr[j];
}
}
if (power_setting->config_val)
- ctrl->clk_info[power_setting->seq_val].
- clk_rate = power_setting->config_val;
- rc = msm_camera_clk_enable(ctrl->dev,
- ctrl->clk_info, ctrl->clk_ptr,
- ctrl->clk_info_size, true);
+ soc_info->clk_rate[0][power_setting->seq_val] =
+ power_setting->config_val;
+
+ for (j = 0; j < soc_info->num_clk; j++) {
+ rc = cam_soc_util_clk_enable(soc_info->clk[j],
+ soc_info->clk_name[j],
+ soc_info->clk_rate[0][j]);
+ if (rc)
+ break;
+ }
+
if (rc < 0) {
- pr_err("%s: clk enable failed\n", __func__);
+ pr_err("%s:%d clk enable failed\n", __func__,
+ __LINE__);
goto power_up_failed;
}
break;
@@ -1088,27 +957,29 @@ int cam_sensor_core_power_up(struct cam_sensor_power_ctrl_t *ctrl)
case SENSOR_CUSTOM_GPIO1:
case SENSOR_CUSTOM_GPIO2:
if (no_gpio) {
- pr_err("%s: request gpio failed\n", __func__);
+ pr_err("%s:%d request gpio failed\n", __func__,
+ __LINE__);
return no_gpio;
}
if (power_setting->seq_val >= CAM_VREG_MAX ||
- !ctrl->gpio_conf->gpio_num_info) {
- pr_err("%s gpio index %d >= max %d\n", __func__,
+ !gpio_num_info) {
+ pr_err("%s:%d gpio index %d >= max %d\n",
+ __func__, __LINE__,
power_setting->seq_val,
CAM_VREG_MAX);
goto power_up_failed;
}
CDBG("%s:%d gpio set val %d\n",
__func__, __LINE__,
- ctrl->gpio_conf->gpio_num_info->gpio_num
+ gpio_num_info->gpio_num
[power_setting->seq_val]);
rc = msm_cam_sensor_handle_reg_gpio(
power_setting->seq_type,
- ctrl->gpio_conf, 1);
+ gpio_num_info, 1);
if (rc < 0) {
- pr_err("ERR:%s Error in handling VREG GPIO\n",
- __func__);
+ pr_err("%s:%d Error in handling VREG GPIO\n",
+ __func__, __LINE__);
goto power_up_failed;
}
break;
@@ -1123,35 +994,61 @@ int cam_sensor_core_power_up(struct cam_sensor_power_ctrl_t *ctrl)
break;
if (power_setting->seq_val >= CAM_VREG_MAX) {
- pr_err("%s vreg index %d >= max %d\n", __func__,
+ pr_err("%s:%d vreg index %d >= max %d\n",
+ __func__, __LINE__,
power_setting->seq_val,
CAM_VREG_MAX);
goto power_up_failed;
}
- if (power_setting->seq_val < ctrl->num_vreg)
- msm_camera_config_single_vreg(ctrl->dev,
- &ctrl->cam_vreg
- [power_setting->seq_val],
- (struct regulator **)
- &power_setting->data[0],
- 1);
+ if (power_setting->seq_val < num_vreg) {
+ CDBG("%s:%d Enable Regulator\n",
+ __func__, __LINE__);
+ vreg_idx = power_setting->seq_val;
+
+ soc_info->rgltr[vreg_idx] =
+ regulator_get(&soc_info->pdev->dev,
+ soc_info->rgltr_name[vreg_idx]);
+ if (IS_ERR_OR_NULL(
+ soc_info->rgltr[vreg_idx])) {
+ rc = PTR_ERR(soc_info->rgltr[vreg_idx]);
+ rc = rc ? rc : -EINVAL;
+
+ pr_err("%s:%d, %s get failed %d\n",
+ __func__, __LINE__,
+ soc_info->rgltr_name[vreg_idx],
+ rc);
+
+ soc_info->rgltr[vreg_idx] = NULL;
+ }
+
+ rc = cam_soc_util_regulator_enable(
+ soc_info->rgltr[vreg_idx],
+ soc_info->rgltr_name[vreg_idx],
+ soc_info->rgltr_min_volt[vreg_idx],
+ soc_info->rgltr_max_volt[vreg_idx],
+ soc_info->rgltr_op_mode[vreg_idx],
+ soc_info->rgltr_delay[vreg_idx]);
+
+ power_setting->data[0] =
+ soc_info->rgltr[vreg_idx];
+ }
else
pr_err("%s: %d usr_idx:%d dts_idx:%d\n",
__func__, __LINE__,
- power_setting->seq_val, ctrl->num_vreg);
+ power_setting->seq_val, num_vreg);
rc = msm_cam_sensor_handle_reg_gpio(
power_setting->seq_type,
- ctrl->gpio_conf, 1);
+ gpio_num_info, 1);
if (rc < 0) {
- pr_err("ERR:%s Error in handling VREG GPIO\n",
- __func__);
+ pr_err("%s:%d Error in handling VREG GPIO\n",
+ __func__, __LINE__);
goto power_up_failed;
}
break;
default:
- pr_err("%s error power seq type %d\n", __func__,
- power_setting->seq_type);
+ pr_err("%s:%d error power seq type %d\n", __func__,
+ __LINE__, power_setting->seq_type);
break;
}
if (power_setting->delay > 20)
@@ -1165,21 +1062,22 @@ int cam_sensor_core_power_up(struct cam_sensor_power_ctrl_t *ctrl)
power_up_failed:
pr_err("%s:%d failed\n", __func__, __LINE__);
for (index--; index >= 0; index--) {
- CDBG("%s index %d\n", __func__, index);
+ CDBG("%s:%d index %d\n", __func__, __LINE__, index);
power_setting = &ctrl->power_setting[index];
- CDBG("%s type %d\n", __func__, power_setting->seq_type);
+ CDBG("%s:%d type %d\n", __func__, __LINE__,
+ power_setting->seq_type);
switch (power_setting->seq_type) {
case SENSOR_RESET:
case SENSOR_STANDBY:
case SENSOR_CUSTOM_GPIO1:
case SENSOR_CUSTOM_GPIO2:
- if (!ctrl->gpio_conf->gpio_num_info)
+ if (!gpio_num_info)
continue;
- if (!ctrl->gpio_conf->gpio_num_info->valid
+ if (!gpio_num_info->valid
[power_setting->seq_val])
continue;
gpio_set_value_cansleep(
- ctrl->gpio_conf->gpio_num_info->gpio_num
+ gpio_num_info->gpio_num
[power_setting->seq_val], GPIOF_OUT_INIT_LOW);
break;
case SENSOR_VANA:
@@ -1189,24 +1087,35 @@ int cam_sensor_core_power_up(struct cam_sensor_power_ctrl_t *ctrl)
case SENSOR_VAF_PWDM:
case SENSOR_CUSTOM_REG1:
case SENSOR_CUSTOM_REG2:
- if (power_setting->seq_val < ctrl->num_vreg)
- msm_camera_config_single_vreg(ctrl->dev,
- &ctrl->cam_vreg
- [power_setting->seq_val],
- (struct regulator **)
- &power_setting->data[0],
- 0);
+ if (power_setting->seq_val < num_vreg) {
+ CDBG("%s:%d Disable Regulator\n",
+ __func__, __LINE__);
+ vreg_idx = power_setting->seq_val;
+
+ rc = cam_soc_util_regulator_disable(
+ soc_info->rgltr[vreg_idx],
+ soc_info->rgltr_name[vreg_idx],
+ soc_info->rgltr_min_volt[vreg_idx],
+ soc_info->rgltr_max_volt[vreg_idx],
+ soc_info->rgltr_op_mode[vreg_idx],
+ soc_info->rgltr_delay[vreg_idx]);
+
+ power_setting->data[0] =
+ soc_info->rgltr[vreg_idx];
+
+ }
else
pr_err("%s:%d:seq_val: %d > num_vreg: %d\n",
__func__, __LINE__,
- power_setting->seq_val, ctrl->num_vreg);
+ power_setting->seq_val, num_vreg);
msm_cam_sensor_handle_reg_gpio(power_setting->seq_type,
- ctrl->gpio_conf, GPIOF_OUT_INIT_LOW);
+ gpio_num_info, GPIOF_OUT_INIT_LOW);
+
break;
default:
- pr_err("%s error power seq type %d\n", __func__,
- power_setting->seq_type);
+ pr_err("%s:%d error power seq type %d\n", __func__,
+ __LINE__, power_setting->seq_type);
break;
}
if (power_setting->delay > 20) {
@@ -1225,9 +1134,8 @@ int cam_sensor_core_power_up(struct cam_sensor_power_ctrl_t *ctrl)
devm_pinctrl_put(ctrl->pinctrl_info.pinctrl);
}
ctrl->cam_pinctrl_status = 0;
- msm_camera_request_gpio_table(
- ctrl->gpio_conf->cam_gpio_req_tbl,
- ctrl->gpio_conf->cam_gpio_req_tbl_size, 0);
+
+ cam_sensor_util_request_gpio_table(soc_info, 0);
return rc;
}
@@ -1254,19 +1162,18 @@ msm_camera_get_power_settings(struct cam_sensor_power_ctrl_t *ctrl,
}
static int cam_config_mclk_reg(struct cam_sensor_power_ctrl_t *ctrl,
- int32_t index)
+ struct cam_hw_soc_info *soc_info, int32_t index)
{
- struct camera_vreg_t *cam_vreg;
int32_t num_vreg = 0, j = 0, rc = 0, idx = 0;
struct cam_sensor_power_setting *ps = NULL;
struct cam_sensor_power_setting *pd = NULL;
- cam_vreg = ctrl->cam_vreg;
- num_vreg = ctrl->num_vreg;
+ num_vreg = soc_info->num_rgltr;
+
pd = &ctrl->power_down_setting[index];
for (j = 0; j < num_vreg; j++) {
- if (!strcmp(cam_vreg[j].reg_name, "cam_clk")) {
+ if (!strcmp(soc_info->rgltr_name[j], "cam_clk")) {
ps = NULL;
for (idx = 0; idx <
@@ -1278,62 +1185,77 @@ static int cam_config_mclk_reg(struct cam_sensor_power_ctrl_t *ctrl,
}
}
- if (ps != NULL)
- msm_camera_config_single_vreg(
- ctrl->dev,
- &cam_vreg[j],
- (struct regulator **)
- &ps->data[0], 0);
+ if (ps != NULL) {
+ CDBG("%s:%d Disable Regulator\n",
+ __func__, __LINE__);
+
+ rc = cam_soc_util_regulator_disable(
+ soc_info->rgltr[j],
+ soc_info->rgltr_name[j],
+ soc_info->rgltr_min_volt[j],
+ soc_info->rgltr_max_volt[j],
+ soc_info->rgltr_op_mode[j],
+ soc_info->rgltr_delay[j]);
+
+ ps->data[0] =
+ soc_info->rgltr[j];
+ }
}
}
return rc;
}
-int msm_camera_power_down(struct cam_sensor_power_ctrl_t *ctrl)
+int msm_camera_power_down(struct cam_sensor_power_ctrl_t *ctrl,
+ struct cam_hw_soc_info *soc_info)
{
- int index = 0, ret = 0, num_vreg = 0;
+ int index = 0, ret = 0, num_vreg = 0, i;
struct cam_sensor_power_setting *pd = NULL;
struct cam_sensor_power_setting *ps;
- struct camera_vreg_t *cam_vreg;
+ struct msm_camera_gpio_num_info *gpio_num_info = NULL;
CDBG("%s:%d\n", __func__, __LINE__);
- if (!ctrl) {
- pr_err("failed ctrl %pK\n", ctrl);
+ if (!ctrl || !soc_info) {
+ pr_err("%s:%d failed ctrl %pK\n", __func__, __LINE__, ctrl);
return -EINVAL;
}
- cam_vreg = ctrl->cam_vreg;
- num_vreg = ctrl->num_vreg;
+ gpio_num_info = ctrl->gpio_num_info;
+ num_vreg = soc_info->num_rgltr;
for (index = 0; index < ctrl->power_down_setting_size; index++) {
- CDBG("%s index %d\n", __func__, index);
+ CDBG("%s:%d index %d\n", __func__, __LINE__, index);
pd = &ctrl->power_down_setting[index];
ps = NULL;
- CDBG("%s type %d\n", __func__, pd->seq_type);
+ CDBG("%s:%d type %d\n", __func__, __LINE__, pd->seq_type);
switch (pd->seq_type) {
case SENSOR_MCLK:
- ret = cam_config_mclk_reg(ctrl, index);
+ ret = cam_config_mclk_reg(ctrl, soc_info, index);
if (ret < 0) {
pr_err("%s:%d :Error: in config clk reg\n",
__func__, __LINE__);
return ret;
}
- msm_camera_clk_enable(ctrl->dev,
- ctrl->clk_info, ctrl->clk_ptr,
- ctrl->clk_info_size, false);
+ //cam_soc_util_clk_disable_default(soc_info);
+ for (i = soc_info->num_clk - 1; i >= 0; i--) {
+ cam_soc_util_clk_disable(soc_info->clk[i],
+ soc_info->clk_name[i]);
+ }
+
break;
case SENSOR_RESET:
case SENSOR_STANDBY:
case SENSOR_CUSTOM_GPIO1:
case SENSOR_CUSTOM_GPIO2:
- if (!ctrl->gpio_conf->gpio_num_info->valid
- [pd->seq_val])
+
+ if (!gpio_num_info->valid[pd->seq_val])
continue;
+
gpio_set_value_cansleep(
- ctrl->gpio_conf->gpio_num_info->gpio_num
+ gpio_num_info->gpio_num
[pd->seq_val],
(int) pd->config_val);
+
break;
case SENSOR_VANA:
case SENSOR_VDIG:
@@ -1344,33 +1266,43 @@ int msm_camera_power_down(struct cam_sensor_power_ctrl_t *ctrl)
case SENSOR_CUSTOM_REG2:
if (pd->seq_val == INVALID_VREG)
break;
+
ps = msm_camera_get_power_settings(
ctrl, pd->seq_type,
pd->seq_val);
if (ps) {
- if (pd->seq_val < ctrl->num_vreg)
- msm_camera_config_single_vreg(ctrl->dev,
- &ctrl->cam_vreg
- [pd->seq_val],
- (struct regulator **)
- &ps->data[0],
- 0);
+ if (pd->seq_val < num_vreg) {
+ CDBG("%s:%d Disable Regulator\n",
+ __func__, __LINE__);
+ ret = cam_soc_util_regulator_disable(
+ soc_info->rgltr[ps->seq_val],
+ soc_info->rgltr_name[ps->seq_val],
+ soc_info->rgltr_min_volt[ps->seq_val],
+ soc_info->rgltr_max_volt[ps->seq_val],
+ soc_info->rgltr_op_mode[ps->seq_val],
+ soc_info->rgltr_delay[ps->seq_val]);
+
+ ps->data[0] =
+ soc_info->rgltr[ps->seq_val];
+ }
else
pr_err("%s:%d:seq_val:%d > num_vreg: %d\n",
__func__, __LINE__, pd->seq_val,
- ctrl->num_vreg);
+ num_vreg);
} else
- pr_err("%s error in power up/down seq data\n",
- __func__);
+ pr_err("%s:%d error in power up/down seq\n",
+ __func__, __LINE__);
+
ret = msm_cam_sensor_handle_reg_gpio(pd->seq_type,
- ctrl->gpio_conf, GPIOF_OUT_INIT_LOW);
+ gpio_num_info, GPIOF_OUT_INIT_LOW);
+
if (ret < 0)
- pr_err("ERR:%s Error while disabling VREG GPIO\n",
- __func__);
+ pr_err("%s:%d Error disabling VREG GPIO\n",
+ __func__, __LINE__);
break;
default:
- pr_err("%s error power seq type %d\n", __func__,
- pd->seq_type);
+ pr_err("%s:%d error power seq type %d\n", __func__,
+ __LINE__, pd->seq_type);
break;
}
if (pd->delay > 20)
@@ -1390,9 +1322,8 @@ int msm_camera_power_down(struct cam_sensor_power_ctrl_t *ctrl)
}
ctrl->cam_pinctrl_status = 0;
- msm_camera_request_gpio_table(
- ctrl->gpio_conf->cam_gpio_req_tbl,
- ctrl->gpio_conf->cam_gpio_req_tbl_size, 0);
+
+ cam_sensor_util_request_gpio_table(soc_info, 0);
return 0;
}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.h
index 7e7fc35..912f06b 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.h
@@ -22,6 +22,7 @@
#include <cam_req_mgr_util.h>
#include <cam_req_mgr_interface.h>
#include <cam_mem_mgr.h>
+#include "cam_soc_util.h"
#define INVALID_VREG 100
@@ -29,34 +30,26 @@ int msm_camera_get_dt_power_setting_data(struct device_node *of_node,
struct camera_vreg_t *cam_vreg, int num_vreg,
struct cam_sensor_power_ctrl_t *power_info);
-int msm_camera_get_dt_gpio_req_tbl(struct device_node *of_node,
- struct msm_camera_gpio_conf *gconf, uint16_t *gpio_array,
- uint16_t gpio_array_size);
-
-int msm_camera_init_gpio_pin_tbl(struct device_node *of_node,
- struct msm_camera_gpio_conf *gconf, uint16_t *gpio_array,
- uint16_t gpio_array_size);
-
-int cam_sensor_get_dt_vreg_data(struct device_node *of_node,
- struct camera_vreg_t **cam_vreg, int *num_vreg);
-
-int cam_sensor_core_power_up(struct cam_sensor_power_ctrl_t *ctrl);
-
-int msm_camera_power_down(struct cam_sensor_power_ctrl_t *ctrl);
-
-int msm_camera_fill_vreg_params(struct camera_vreg_t *cam_vreg,
- int num_vreg, struct cam_sensor_power_setting *power_setting,
- uint16_t power_setting_size);
-
int msm_camera_pinctrl_init
(struct msm_pinctrl_info *sensor_pctrl, struct device *dev);
-int32_t msm_sensor_driver_get_gpio_data(
- struct msm_camera_gpio_conf **gpio_conf,
- struct device_node *of_node);
-
int cam_sensor_i2c_pkt_parser(struct i2c_settings_array *i2c_reg_settings,
struct cam_cmd_buf_desc *cmd_desc, int32_t num_cmd_buffers);
int32_t delete_request(struct i2c_settings_array *i2c_array);
+int cam_sensor_util_request_gpio_table(
+ struct cam_hw_soc_info *soc_info, int gpio_en);
+
+int cam_sensor_util_init_gpio_pin_tbl(
+ struct cam_hw_soc_info *soc_info,
+ struct msm_camera_gpio_num_info **pgpio_num_info);
+int cam_sensor_core_power_up(struct cam_sensor_power_ctrl_t *ctrl,
+ struct cam_hw_soc_info *soc_info);
+
+int msm_camera_power_down(struct cam_sensor_power_ctrl_t *ctrl,
+ struct cam_hw_soc_info *soc_info);
+
+int msm_camera_fill_vreg_params(struct cam_hw_soc_info *soc_info,
+ struct cam_sensor_power_setting *power_setting,
+ uint16_t power_setting_size);
#endif /* _CAM_SENSOR_UTIL_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_debug_util.h b/drivers/media/platform/msm/camera/cam_utils/cam_debug_util.h
new file mode 100644
index 0000000..5989f1a
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_debug_util.h
@@ -0,0 +1,57 @@
+/* Copyright (c) 2017, The Linux Foundataion. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_DEBUG_UTIL_H_
+#define _CAM_DEBUG_UTIL_H_
+
+#define DEFAULT 0xFFFF
+#define CAM_CDM (1 << 0)
+#define CAM_CORE (1 << 1)
+#define CAM_CPAS (1 << 2)
+#define CAM_ISP (1 << 3)
+#define CAM_CRM (1 << 4)
+#define CAM_SENSOR (1 << 5)
+#define CAM_SMMU (1 << 6)
+#define CAM_SYNC (1 << 7)
+#define CAM_ICP (1 << 8)
+#define CAM_JPEG (1 << 9)
+#define CAM_FD (1 << 10)
+#define CAM_LRME (1 << 11)
+
+#define GROUP DEFAULT
+#define TRACE_ON 0
+
+#define CAM_ERR(__module, fmt, args...) \
+ do { if (GROUP & __module) { \
+ if (TRACE_ON) \
+ trace_printk(fmt, ##args); \
+ else \
+ pr_err(fmt, ##args); \
+ } } while (0)
+
+#define CAM_WARN(__module, fmt, args...) \
+ do { if (GROUP & __module) { \
+ if (TRACE_ON) \
+ trace_printk(fmt, ##args); \
+ else \
+ pr_warn(fmt, ##args); \
+ } } while (0)
+
+#define CAM_INFO(__module, fmt, args...) \
+ do { if (GROUP & __module) { \
+ if (TRACE_ON) \
+ trace_printk(fmt, ##args); \
+ else \
+ pr_info(fmt, ##args); \
+ } } while (0)
+
+#endif /* _CAM_DEBUG_UTIL_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c
index 2dfb90a..0be2aaa 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c
@@ -14,11 +14,87 @@
#include <linux/of.h>
#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
#include "cam_soc_util.h"
#undef CDBG
#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+int cam_soc_util_get_level_from_string(const char *string,
+ enum cam_vote_level *level)
+{
+ if (!level)
+ return -EINVAL;
+
+ if (!strcmp(string, "suspend")) {
+ *level = CAM_SUSPEND_VOTE;
+ } else if (!strcmp(string, "minsvs")) {
+ *level = CAM_MINSVS_VOTE;
+ } else if (!strcmp(string, "lowsvs")) {
+ *level = CAM_LOWSVS_VOTE;
+ } else if (!strcmp(string, "svs")) {
+ *level = CAM_SVS_VOTE;
+ } else if (!strcmp(string, "svs_l1")) {
+ *level = CAM_SVSL1_VOTE;
+ } else if (!strcmp(string, "nominal")) {
+ *level = CAM_NOMINAL_VOTE;
+ } else if (!strcmp(string, "turbo")) {
+ *level = CAM_TURBO_VOTE;
+ } else {
+ pr_err("Invalid string %s\n", string);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * cam_soc_util_get_clk_level_to_apply()
+ *
+ * @brief: Get the clock level to apply. If the requested level
+ * is not valid, bump the level to next available valid
+ * level. If no higher level found, return failure.
+ *
+ * @soc_info: Device soc struct to be populated
+ * @req_level: Requested level
+ * @apply_level Level to apply
+ *
+ * @return: success or failure
+ */
+static int cam_soc_util_get_clk_level_to_apply(
+ struct cam_hw_soc_info *soc_info, enum cam_vote_level req_level,
+ enum cam_vote_level *apply_level)
+{
+ if (req_level >= CAM_MAX_VOTE) {
+ pr_err("Invalid clock level parameter %d\n", req_level);
+ return -EINVAL;
+ }
+
+ if (soc_info->clk_level_valid[req_level] == true) {
+ *apply_level = req_level;
+ } else {
+ int i;
+
+ for (i = (req_level + 1); i < CAM_MAX_VOTE; i++)
+ if (soc_info->clk_level_valid[i] == true) {
+ *apply_level = i;
+ break;
+ }
+
+ if (i == CAM_MAX_VOTE) {
+ pr_err("No valid clock level found to apply, req=%d\n",
+ req_level);
+ return -EINVAL;
+ }
+ }
+
+ CDBG("Req level %d, Applying %d\n", req_level, *apply_level);
+
+ return 0;
+}
+
int cam_soc_util_irq_enable(struct cam_hw_soc_info *soc_info)
{
if (!soc_info) {
@@ -53,7 +129,7 @@ int cam_soc_util_irq_disable(struct cam_hw_soc_info *soc_info)
return 0;
}
-int cam_soc_util_clk_enable(struct clk *clk, const char *clk_name,
+int cam_soc_util_set_clk_rate(struct clk *clk, const char *clk_name,
int32_t clk_rate)
{
int rc = 0;
@@ -62,14 +138,13 @@ int cam_soc_util_clk_enable(struct clk *clk, const char *clk_name,
if (!clk || !clk_name)
return -EINVAL;
- CDBG("enable %s, clk %pK rate %d\n",
- clk_name, clk, clk_rate);
+ CDBG("set %s, rate %d\n", clk_name, clk_rate);
if (clk_rate > 0) {
clk_rate_round = clk_round_rate(clk, clk_rate);
CDBG("new_rate %ld\n", clk_rate_round);
if (clk_rate_round < 0) {
- pr_err("%s: round failed for clock %s rc = %ld\n",
- __func__, clk_name, clk_rate_round);
+ pr_err("round failed for clock %s rc = %ld\n",
+ clk_name, clk_rate_round);
return clk_rate_round;
}
rc = clk_set_rate(clk, clk_rate_round);
@@ -93,9 +168,25 @@ int cam_soc_util_clk_enable(struct clk *clk, const char *clk_name,
return rc;
}
}
+
+ return rc;
+}
+
+int cam_soc_util_clk_enable(struct clk *clk, const char *clk_name,
+ int32_t clk_rate)
+{
+ int rc = 0;
+
+ if (!clk || !clk_name)
+ return -EINVAL;
+
+ rc = cam_soc_util_set_clk_rate(clk, clk_name, clk_rate);
+ if (rc)
+ return rc;
+
rc = clk_prepare_enable(clk);
if (rc) {
- pr_err("enable failed for %s\n", clk_name);
+ pr_err("enable failed for %s: rc(%d)\n", clk_name, rc);
return rc;
}
@@ -119,20 +210,32 @@ int cam_soc_util_clk_disable(struct clk *clk, const char *clk_name)
* @brief: This function enables the default clocks present
* in soc_info
*
- * @soc_info: device soc struct to be populated
+ * @soc_info: Device soc struct to be populated
+ * @clk_level: Clk level to apply while enabling
*
* @return: success or failure
*/
-static int cam_soc_util_clk_enable_default(struct cam_hw_soc_info *soc_info)
+static int cam_soc_util_clk_enable_default(struct cam_hw_soc_info *soc_info,
+ enum cam_vote_level clk_level)
{
int i, rc = 0;
+ enum cam_vote_level apply_level;
- if (soc_info->num_clk == 0)
+ if ((soc_info->num_clk == 0) ||
+ (soc_info->num_clk >= CAM_SOC_MAX_CLK)) {
+ pr_err("Invalid number of clock %d\n", soc_info->num_clk);
+ return -EINVAL;
+ }
+
+ rc = cam_soc_util_get_clk_level_to_apply(soc_info, clk_level,
+ &apply_level);
+ if (rc)
return rc;
for (i = 0; i < soc_info->num_clk; i++) {
rc = cam_soc_util_clk_enable(soc_info->clk[i],
- soc_info->clk_name[i], soc_info->clk_rate[i]);
+ soc_info->clk_name[i],
+ soc_info->clk_rate[apply_level][i]);
if (rc)
goto clk_disable;
}
@@ -165,11 +268,9 @@ static void cam_soc_util_clk_disable_default(struct cam_hw_soc_info *soc_info)
if (soc_info->num_clk == 0)
return;
- for (i = soc_info->num_clk - 1; i >= 0; i--) {
- CDBG("disable %s\n", soc_info->clk_name[i]);
+ for (i = soc_info->num_clk - 1; i >= 0; i--)
cam_soc_util_clk_disable(soc_info->clk[i],
soc_info->clk_name[i]);
- }
}
/**
@@ -186,9 +287,13 @@ static int cam_soc_util_get_dt_clk_info(struct cam_hw_soc_info *soc_info)
{
struct device_node *of_node = NULL;
int count;
- int i, rc;
+ int num_clk_rates, num_clk_levels;
+ int i, j, rc;
+ int32_t num_clk_level_strings;
struct platform_device *pdev = NULL;
const char *src_clk_str = NULL;
+ const char *clk_cntl_lvl_string = NULL;
+ enum cam_vote_level level;
if (!soc_info || !soc_info->pdev)
return -EINVAL;
@@ -224,31 +329,388 @@ static int cam_soc_util_get_dt_clk_info(struct cam_hw_soc_info *soc_info)
}
}
- rc = of_property_read_u32_array(of_node, "clock-rates",
- soc_info->clk_rate, count);
- if (rc) {
- pr_err("reading clock-rates failed");
- return rc;
+ num_clk_rates = of_property_count_u32_elems(of_node, "clock-rates");
+ if (num_clk_rates <= 0) {
+ pr_err("reading clock-rates count failed\n");
+ return -EINVAL;
}
+ if ((num_clk_rates % soc_info->num_clk) != 0) {
+ pr_err("mismatch clk/rates, No of clocks=%d, No of rates=%d\n",
+ soc_info->num_clk, num_clk_rates);
+ return -EINVAL;
+ }
+
+ num_clk_levels = (num_clk_rates / soc_info->num_clk);
+
+ num_clk_level_strings = of_property_count_strings(of_node,
+ "clock-cntl-level");
+ if (num_clk_level_strings != num_clk_levels) {
+ pr_err("Mismatch No of levels=%d, No of level string=%d\n",
+ num_clk_levels, num_clk_level_strings);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num_clk_levels; i++) {
+ rc = of_property_read_string_index(of_node,
+ "clock-cntl-level", i, &clk_cntl_lvl_string);
+ if (rc) {
+ pr_err("Error reading clock-cntl-level, rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = cam_soc_util_get_level_from_string(clk_cntl_lvl_string,
+ &level);
+ if (rc)
+ return rc;
+
+ CDBG("[%d] : %s %d\n", i, clk_cntl_lvl_string, level);
+ soc_info->clk_level_valid[level] = true;
+ for (j = 0; j < soc_info->num_clk; j++) {
+ rc = of_property_read_u32_index(of_node, "clock-rates",
+ ((i * soc_info->num_clk) + j),
+ &soc_info->clk_rate[level][j]);
+ if (rc) {
+ pr_err("Error reading clock-rates, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ soc_info->clk_rate[level][j] =
+ (soc_info->clk_rate[level][j] == 0) ?
+ (long)NO_SET_RATE :
+ soc_info->clk_rate[level][j];
+
+ CDBG("soc_info->clk_rate[%d][%d] = %d\n", level, j,
+ soc_info->clk_rate[level][j]);
+ }
+ }
+
+ soc_info->src_clk_idx = -1;
rc = of_property_read_string_index(of_node, "src-clock-name", 0,
&src_clk_str);
- if (rc) {
+ if (rc || !src_clk_str) {
CDBG("No src_clk_str found\n");
- soc_info->src_clk_idx = -1;
rc = 0;
/* Bottom loop is dependent on src_clk_str. So return here */
return rc;
}
for (i = 0; i < soc_info->num_clk; i++) {
- soc_info->clk_rate[i] = (soc_info->clk_rate[i] == 0) ?
- (long)-1 : soc_info->clk_rate[i];
- if (src_clk_str &&
- (strcmp(soc_info->clk_name[i], src_clk_str) == 0)) {
+ if (strcmp(soc_info->clk_name[i], src_clk_str) == 0) {
soc_info->src_clk_idx = i;
+ CDBG("src clock = %s, index = %d\n", src_clk_str, i);
+ break;
}
- CDBG("clk_rate[%d] = %d\n", i, soc_info->clk_rate[i]);
+ }
+
+ return rc;
+}
+
+int cam_soc_util_set_clk_rate_level(struct cam_hw_soc_info *soc_info,
+ enum cam_vote_level clk_level)
+{
+ int i, rc = 0;
+ enum cam_vote_level apply_level;
+
+ if ((soc_info->num_clk == 0) ||
+ (soc_info->num_clk >= CAM_SOC_MAX_CLK)) {
+ pr_err("Invalid number of clock %d\n", soc_info->num_clk);
+ return -EINVAL;
+ }
+
+ rc = cam_soc_util_get_clk_level_to_apply(soc_info, clk_level,
+ &apply_level);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < soc_info->num_clk; i++) {
+ rc = cam_soc_util_set_clk_rate(soc_info->clk[i],
+ soc_info->clk_name[i],
+ soc_info->clk_rate[apply_level][i]);
+ if (rc)
+ break;
+ }
+
+ return rc;
+};
+
+static int cam_soc_util_get_dt_gpio_req_tbl(struct device_node *of_node,
+ struct cam_soc_gpio_data *gconf, uint16_t *gpio_array,
+ uint16_t gpio_array_size)
+{
+ int32_t rc = 0, i = 0;
+ uint32_t count = 0;
+ uint32_t *val_array = NULL;
+
+ if (!of_get_property(of_node, "gpio-req-tbl-num", &count))
+ return 0;
+
+ count /= sizeof(uint32_t);
+ if (!count) {
+ pr_err("gpio-req-tbl-num 0\n");
+ return 0;
+ }
+
+ val_array = kcalloc(count, sizeof(uint32_t), GFP_KERNEL);
+ if (!val_array)
+ return -ENOMEM;
+
+ gconf->cam_gpio_req_tbl = kcalloc(count, sizeof(struct gpio),
+ GFP_KERNEL);
+ if (!gconf->cam_gpio_req_tbl) {
+ rc = -ENOMEM;
+ goto free_val_array;
+ }
+ gconf->cam_gpio_req_tbl_size = count;
+
+ rc = of_property_read_u32_array(of_node, "gpio-req-tbl-num",
+ val_array, count);
+ if (rc) {
+ pr_err("failed in reading gpio-req-tbl-num, rc = %d\n", rc);
+ goto free_gpio_req_tbl;
+ }
+
+ for (i = 0; i < count; i++) {
+ if (val_array[i] >= gpio_array_size) {
+ pr_err("gpio req tbl index %d invalid\n", val_array[i]);
+ goto free_gpio_req_tbl;
+ }
+ gconf->cam_gpio_req_tbl[i].gpio = gpio_array[val_array[i]];
+ CDBG("cam_gpio_req_tbl[%d].gpio = %d\n", i,
+ gconf->cam_gpio_req_tbl[i].gpio);
+ }
+
+ rc = of_property_read_u32_array(of_node, "gpio-req-tbl-flags",
+ val_array, count);
+ if (rc) {
+ pr_err("Failed in gpio-req-tbl-flags, rc %d\n", rc);
+ goto free_gpio_req_tbl;
+ }
+
+ for (i = 0; i < count; i++) {
+ gconf->cam_gpio_req_tbl[i].flags = val_array[i];
+ CDBG("cam_gpio_req_tbl[%d].flags = %ld\n", i,
+ gconf->cam_gpio_req_tbl[i].flags);
+ }
+
+ for (i = 0; i < count; i++) {
+ rc = of_property_read_string_index(of_node,
+ "gpio-req-tbl-label", i,
+ &gconf->cam_gpio_req_tbl[i].label);
+ if (rc) {
+ pr_err("Failed rc %d\n", rc);
+ goto free_gpio_req_tbl;
+ }
+ CDBG("cam_gpio_req_tbl[%d].label = %s\n", i,
+ gconf->cam_gpio_req_tbl[i].label);
+ }
+
+ kfree(val_array);
+
+ return rc;
+
+free_gpio_req_tbl:
+ kfree(gconf->cam_gpio_req_tbl);
+free_val_array:
+ kfree(val_array);
+ gconf->cam_gpio_req_tbl_size = 0;
+
+ return rc;
+}
+
+static int cam_soc_util_get_gpio_info(struct cam_hw_soc_info *soc_info)
+{
+ int32_t rc = 0, i = 0;
+ uint16_t *gpio_array = NULL;
+ int16_t gpio_array_size = 0;
+ struct cam_soc_gpio_data *gconf = NULL;
+ struct device_node *of_node = NULL;
+ struct platform_device *pdev = NULL;
+
+ if (!soc_info || !soc_info->pdev)
+ return -EINVAL;
+
+ pdev = soc_info->pdev;
+ of_node = pdev->dev.of_node;
+
+ /* Validate input parameters */
+ if (!of_node) {
+ pr_err("Invalid param of_node\n");
+ return -EINVAL;
+ }
+
+ gpio_array_size = of_gpio_count(of_node);
+
+ if (gpio_array_size <= 0)
+ return 0;
+
+ CDBG("gpio count %d\n", gpio_array_size);
+
+ gpio_array = kcalloc(gpio_array_size, sizeof(uint16_t), GFP_KERNEL);
+ if (!gpio_array)
+ goto free_gpio_conf;
+
+ for (i = 0; i < gpio_array_size; i++) {
+ gpio_array[i] = of_get_gpio(of_node, i);
+ CDBG("gpio_array[%d] = %d", i, gpio_array[i]);
+ }
+
+ gconf = kzalloc(sizeof(*gconf), GFP_KERNEL);
+ if (!gconf)
+ return -ENOMEM;
+
+ rc = cam_soc_util_get_dt_gpio_req_tbl(of_node, gconf, gpio_array,
+ gpio_array_size);
+ if (rc) {
+ pr_err("failed in msm_camera_get_dt_gpio_req_tbl\n");
+ goto free_gpio_array;
+ }
+
+ gconf->cam_gpio_common_tbl = kcalloc(gpio_array_size,
+ sizeof(struct gpio), GFP_KERNEL);
+ if (!gconf->cam_gpio_common_tbl) {
+ rc = -ENOMEM;
+ goto free_gpio_array;
+ }
+
+ for (i = 0; i <= gpio_array_size; i++)
+ gconf->cam_gpio_common_tbl[i].gpio = gpio_array[i];
+
+ gconf->cam_gpio_common_tbl_size = gpio_array_size;
+ soc_info->gpio_data = gconf;
+ kfree(gpio_array);
+
+ return rc;
+
+free_gpio_array:
+ kfree(gpio_array);
+free_gpio_conf:
+ kfree(gconf);
+ soc_info->gpio_data = NULL;
+
+ return rc;
+}
+
+static int cam_soc_util_request_gpio_table(
+ struct cam_hw_soc_info *soc_info, bool gpio_en)
+{
+ int rc = 0, i = 0;
+ uint8_t size = 0;
+ struct cam_soc_gpio_data *gpio_conf =
+ soc_info->gpio_data;
+ struct gpio *gpio_tbl = NULL;
+
+
+ if (!gpio_conf) {
+ CDBG("No GPIO entry\n");
+ return 0;
+ }
+ if (gpio_conf->cam_gpio_common_tbl_size <= 0) {
+ pr_err("GPIO table size is invalid\n");
+ return -EINVAL;
+ }
+ size = gpio_conf->cam_gpio_req_tbl_size;
+ gpio_tbl = gpio_conf->cam_gpio_req_tbl;
+
+ if (!gpio_tbl || !size) {
+ pr_err("Invalid gpio_tbl %pK / size %d\n",
+ gpio_tbl, size);
+ return -EINVAL;
+ }
+ for (i = 0; i < size; i++) {
+ CDBG("i=%d, gpio=%d dir=%ld\n", i,
+ gpio_tbl[i].gpio, gpio_tbl[i].flags);
+ }
+ if (gpio_en) {
+ for (i = 0; i < size; i++) {
+ rc = gpio_request_one(gpio_tbl[i].gpio,
+ gpio_tbl[i].flags, gpio_tbl[i].label);
+ if (rc) {
+ /*
+ * After GPIO request fails, contine to
+ * apply new gpios, outout a error message
+ * for driver bringup debug
+ */
+ pr_err("gpio %d:%s request fails\n",
+ gpio_tbl[i].gpio, gpio_tbl[i].label);
+ }
+ }
+ } else {
+ gpio_free_array(gpio_tbl, size);
+ }
+
+ return rc;
+}
+
+static int cam_soc_util_get_dt_regulator_info
+ (struct cam_hw_soc_info *soc_info)
+{
+ int rc = 0, count = 0, i = 0;
+ struct device_node *of_node = NULL;
+ struct platform_device *pdev = NULL;
+
+ if (!soc_info || !soc_info->pdev) {
+ pr_err("Invalid parameters\n");
+ return -EINVAL;
+ }
+
+ pdev = soc_info->pdev;
+ of_node = pdev->dev.of_node;
+
+ soc_info->num_rgltr = 0;
+ count = of_property_count_strings(of_node, "regulator-names");
+ if (count != -EINVAL) {
+ if (count <= 0) {
+ pr_err("no regulators found\n");
+ count = 0;
+ return -EINVAL;
+ }
+
+ soc_info->num_rgltr = count;
+
+ } else {
+ CDBG("No regulators node found\n");
+ return 0;
+ }
+
+ for (i = 0; i < soc_info->num_rgltr; i++) {
+ rc = of_property_read_string_index(of_node,
+ "regulator-names", i, &soc_info->rgltr_name[i]);
+ CDBG("rgltr_name[%d] = %s\n", i, soc_info->rgltr_name[i]);
+ if (rc) {
+ pr_err("no regulator resource at cnt=%d\n", i);
+ return -ENODEV;
+ }
+ }
+
+ if (!of_property_read_bool(of_node, "rgltr-cntrl-support")) {
+ CDBG("No regulator control parameter defined\n");
+ soc_info->rgltr_ctrl_support = false;
+ return 0;
+ }
+
+ soc_info->rgltr_ctrl_support = true;
+
+ rc = of_property_read_u32_array(of_node, "rgltr-min-voltage",
+ soc_info->rgltr_min_volt, soc_info->num_rgltr);
+ if (rc) {
+ pr_err("No minimum volatage value found, rc=%d\n", rc);
+ return -EINVAL;
+ }
+
+ rc = of_property_read_u32_array(of_node, "rgltr-max-voltage",
+ soc_info->rgltr_max_volt, soc_info->num_rgltr);
+ if (rc) {
+ pr_err("No maximum volatage value found, rc=%d\n", rc);
+ return -EINVAL;
+ }
+
+ rc = of_property_read_u32_array(of_node, "rgltr-load-current",
+ soc_info->rgltr_op_mode, soc_info->num_rgltr);
+ if (rc) {
+ pr_err("No Load curent found rc=%d\n", rc);
+ return -EINVAL;
}
return rc;
@@ -264,7 +726,6 @@ int cam_soc_util_get_dt_properties(struct cam_hw_soc_info *soc_info)
return -EINVAL;
pdev = soc_info->pdev;
-
of_node = pdev->dev.of_node;
rc = of_property_read_u32(of_node, "cell-index", &soc_info->index);
@@ -272,25 +733,6 @@ int cam_soc_util_get_dt_properties(struct cam_hw_soc_info *soc_info)
pr_err("device %s failed to read cell-index\n", pdev->name);
return rc;
}
-
- count = of_property_count_strings(of_node, "regulator-names");
- if (count <= 0) {
- pr_err("no regulators found\n");
- count = 0;
- }
- soc_info->num_rgltr = count;
-
- for (i = 0; i < soc_info->num_rgltr; i++) {
- rc = of_property_read_string_index(of_node,
- "regulator-names", i, &soc_info->rgltr_name[i]);
- CDBG("rgltr_name[%d] = %s\n", i, soc_info->rgltr_name[i]);
- if (rc) {
- pr_err("no regulator resource at cnt=%d\n", i);
- rc = -ENODEV;
- return rc;
- }
- }
-
count = of_property_count_strings(of_node, "reg-names");
if (count <= 0) {
pr_err("no reg-names found\n");
@@ -330,6 +772,7 @@ int cam_soc_util_get_dt_properties(struct cam_hw_soc_info *soc_info)
&soc_info->irq_name);
if (rc) {
pr_warn("No interrupt line present\n");
+ rc = 0;
} else {
soc_info->irq_line = platform_get_resource_byname(pdev,
IORESOURCE_IRQ, soc_info->irq_name);
@@ -340,7 +783,17 @@ int cam_soc_util_get_dt_properties(struct cam_hw_soc_info *soc_info)
}
}
+ rc = cam_soc_util_get_dt_regulator_info(soc_info);
+ if (rc)
+ return rc;
+
rc = cam_soc_util_get_dt_clk_info(soc_info);
+ if (rc)
+ return rc;
+
+ rc = cam_soc_util_get_gpio_info(soc_info);
+ if (rc)
+ return rc;
return rc;
}
@@ -370,18 +823,207 @@ static int cam_soc_util_get_regulator(struct platform_device *pdev,
return rc;
}
-int cam_soc_util_request_platform_resource(struct cam_hw_soc_info *soc_info,
+int cam_soc_util_regulator_disable(struct regulator *rgltr,
+ const char *rgltr_name, uint32_t rgltr_min_volt,
+ uint32_t rgltr_max_volt, uint32_t rgltr_op_mode,
+ uint32_t rgltr_delay_ms)
+{
+ int32_t rc = 0;
+
+ if (!rgltr) {
+ pr_err("Invalid NULL parameter\n");
+ return -EINVAL;
+ }
+
+ rc = regulator_disable(rgltr);
+ if (rc) {
+ pr_err("%s regulator disable failed\n", rgltr_name);
+ return rc;
+ }
+
+ if (rgltr_delay_ms > 20)
+ msleep(rgltr_delay_ms);
+ else if (rgltr_delay_ms)
+ usleep_range(rgltr_delay_ms * 1000,
+ (rgltr_delay_ms * 1000) + 1000);
+
+ if (regulator_count_voltages(rgltr) > 0) {
+ regulator_set_load(rgltr, 0);
+ regulator_set_voltage(rgltr, 0, rgltr_max_volt);
+ }
+
+ return rc;
+}
+
+
+int cam_soc_util_regulator_enable(struct regulator *rgltr,
+ const char *rgltr_name,
+ uint32_t rgltr_min_volt, uint32_t rgltr_max_volt,
+ uint32_t rgltr_op_mode, uint32_t rgltr_delay)
+{
+ int32_t rc = 0;
+
+ if (!rgltr) {
+ pr_err("Invalid NULL parameter\n");
+ return -EINVAL;
+ }
+
+ if (regulator_count_voltages(rgltr) > 0) {
+ CDBG("voltage min=%d, max=%d\n",
+ rgltr_min_volt, rgltr_max_volt);
+
+ rc = regulator_set_voltage(
+ rgltr, rgltr_min_volt, rgltr_max_volt);
+ if (rc) {
+ pr_err("%s set voltage failed\n", rgltr_name);
+ return rc;
+ }
+
+ rc = regulator_set_load(rgltr, rgltr_op_mode);
+ if (rc) {
+ pr_err("%s set optimum mode failed\n", rgltr_name);
+ return rc;
+ }
+ }
+
+ rc = regulator_enable(rgltr);
+ if (rc) {
+ pr_err("%s regulator_enable failed\n", rgltr_name);
+ return rc;
+ }
+
+ if (rgltr_delay > 20)
+ msleep(rgltr_delay);
+ else if (rgltr_delay)
+ usleep_range(rgltr_delay * 1000,
+ (rgltr_delay * 1000) + 1000);
+
+ return rc;
+}
+
+static int cam_soc_util_request_pinctrl(
+ struct cam_hw_soc_info *soc_info) {
+
+ struct cam_soc_pinctrl_info *device_pctrl = &soc_info->pinctrl_info;
+ struct device *dev = &soc_info->pdev->dev;
+
+ device_pctrl->pinctrl = devm_pinctrl_get(dev);
+ if (IS_ERR_OR_NULL(device_pctrl->pinctrl)) {
+ CDBG("Pinctrl not available\n");
+ device_pctrl->pinctrl = NULL;
+ return 0;
+ }
+ device_pctrl->gpio_state_active =
+ pinctrl_lookup_state(device_pctrl->pinctrl,
+ CAM_SOC_PINCTRL_STATE_DEFAULT);
+ if (IS_ERR_OR_NULL(device_pctrl->gpio_state_active)) {
+ pr_err("Failed to get the active state pinctrl handle\n");
+ device_pctrl->gpio_state_active = NULL;
+ return -EINVAL;
+ }
+ device_pctrl->gpio_state_suspend
+ = pinctrl_lookup_state(device_pctrl->pinctrl,
+ CAM_SOC_PINCTRL_STATE_SLEEP);
+ if (IS_ERR_OR_NULL(device_pctrl->gpio_state_suspend)) {
+ pr_err("Failed to get the suspend state pinctrl handle\n");
+ device_pctrl->gpio_state_suspend = NULL;
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void cam_soc_util_regulator_disable_default(
+ struct cam_hw_soc_info *soc_info)
+{
+ int j = 0;
+ uint32_t num_rgltr = soc_info->num_rgltr;
+
+ for (j = num_rgltr-1; j >= 0; j--) {
+ if (soc_info->rgltr_ctrl_support == true) {
+ cam_soc_util_regulator_disable(soc_info->rgltr[j],
+ soc_info->rgltr_name[j],
+ soc_info->rgltr_min_volt[j],
+ soc_info->rgltr_max_volt[j],
+ soc_info->rgltr_op_mode[j],
+ soc_info->rgltr_delay[j]);
+ } else {
+ if (soc_info->rgltr[j])
+ regulator_disable(soc_info->rgltr[j]);
+ }
+ }
+}
+
+static int cam_soc_util_regulator_enable_default(
+ struct cam_hw_soc_info *soc_info)
+{
+ int j = 0, rc = 0;
+ uint32_t num_rgltr = soc_info->num_rgltr;
+
+ for (j = 0; j < num_rgltr; j++) {
+ if (soc_info->rgltr_ctrl_support == true) {
+ rc = cam_soc_util_regulator_enable(soc_info->rgltr[j],
+ soc_info->rgltr_name[j],
+ soc_info->rgltr_min_volt[j],
+ soc_info->rgltr_max_volt[j],
+ soc_info->rgltr_op_mode[j],
+ soc_info->rgltr_delay[j]);
+ } else {
+ if (soc_info->rgltr[j])
+ rc = regulator_enable(soc_info->rgltr[j]);
+ }
+
+ if (rc) {
+ pr_err("%s enable failed\n", soc_info->rgltr_name[j]);
+ goto disable_rgltr;
+ }
+ }
+
+ return rc;
+disable_rgltr:
+
+ for (j--; j >= 0; j--) {
+ if (soc_info->rgltr_ctrl_support == true) {
+ cam_soc_util_regulator_disable(soc_info->rgltr[j],
+ soc_info->rgltr_name[j],
+ soc_info->rgltr_min_volt[j],
+ soc_info->rgltr_max_volt[j],
+ soc_info->rgltr_op_mode[j],
+ soc_info->rgltr_delay[j]);
+ } else {
+ if (soc_info->rgltr[j])
+ regulator_disable(soc_info->rgltr[j]);
+ }
+ }
+
+ return rc;
+}
+
+int cam_soc_util_request_platform_resource(
+ struct cam_hw_soc_info *soc_info,
irq_handler_t handler, void *irq_data)
{
int i = 0, rc = 0;
struct platform_device *pdev = NULL;
- if (!soc_info || !soc_info->pdev)
+
+ if (!soc_info || !soc_info->pdev) {
+ pr_err("Invalid parameters\n");
return -EINVAL;
+ }
pdev = soc_info->pdev;
for (i = 0; i < soc_info->num_mem_block; i++) {
+ if (soc_info->reserve_mem) {
+ if (!request_mem_region(soc_info->mem_block[i]->start,
+ resource_size(soc_info->mem_block[i]),
+ soc_info->mem_block_name[i])){
+ pr_err("Error Mem Region request Failed:%s\n",
+ soc_info->mem_block_name[i]);
+ rc = -ENOMEM;
+ goto unmap_base;
+ }
+ }
soc_info->reg_map[i].mem_base = ioremap(
soc_info->mem_block[i]->start,
resource_size(soc_info->mem_block[i]));
@@ -398,6 +1040,11 @@ int cam_soc_util_request_platform_resource(struct cam_hw_soc_info *soc_info,
}
for (i = 0; i < soc_info->num_rgltr; i++) {
+ if (soc_info->rgltr_name[i] == NULL) {
+ pr_err("can't find regulator name\n");
+ goto put_regulator;
+ }
+
rc = cam_soc_util_get_regulator(pdev, &soc_info->rgltr[i],
soc_info->rgltr_name[i]);
if (rc)
@@ -408,7 +1055,7 @@ int cam_soc_util_request_platform_resource(struct cam_hw_soc_info *soc_info,
rc = devm_request_irq(&pdev->dev, soc_info->irq_line->start,
handler, IRQF_TRIGGER_RISING,
soc_info->irq_name, irq_data);
- if (rc < 0) {
+ if (rc) {
pr_err("irq request fail\n");
rc = -EBUSY;
goto put_regulator;
@@ -428,6 +1075,16 @@ int cam_soc_util_request_platform_resource(struct cam_hw_soc_info *soc_info,
}
}
+ rc = cam_soc_util_request_pinctrl(soc_info);
+ if (rc)
+ CDBG("Failed in request pinctrl, rc=%d\n", rc);
+
+ rc = cam_soc_util_request_gpio_table(soc_info, true);
+ if (rc) {
+ pr_err("Failed in request gpio table, rc=%d\n", rc);
+ goto put_clk;
+ }
+
return rc;
put_clk:
@@ -461,6 +1118,9 @@ int cam_soc_util_request_platform_resource(struct cam_hw_soc_info *soc_info,
if (i == -1)
i = soc_info->num_reg_map;
for (i = i - 1; i >= 0; i--) {
+ if (soc_info->reserve_mem)
+ release_mem_region(soc_info->mem_block[i]->start,
+ resource_size(soc_info->mem_block[i]));
iounmap(soc_info->reg_map[i].mem_base);
soc_info->reg_map[i].mem_base = NULL;
soc_info->reg_map[i].size = 0;
@@ -474,8 +1134,11 @@ int cam_soc_util_release_platform_resource(struct cam_hw_soc_info *soc_info)
int i;
struct platform_device *pdev = NULL;
- if (!soc_info || !soc_info->pdev)
+ if (!soc_info || !soc_info->pdev) {
+ pr_err("Invalid parameter\n");
return -EINVAL;
+ }
+
pdev = soc_info->pdev;
@@ -503,28 +1166,32 @@ int cam_soc_util_release_platform_resource(struct cam_hw_soc_info *soc_info)
soc_info->irq_line->start, soc_info->irq_data);
}
+ if (soc_info->pinctrl_info.pinctrl)
+ devm_pinctrl_put(soc_info->pinctrl_info.pinctrl);
+
+
+ /* release for gpio */
+ cam_soc_util_request_gpio_table(soc_info, false);
+
return 0;
}
int cam_soc_util_enable_platform_resource(struct cam_hw_soc_info *soc_info,
- bool enable_clocks, bool enable_irq)
+ bool enable_clocks, enum cam_vote_level clk_level, bool enable_irq)
{
- int i, rc = 0;
+ int rc = 0;
if (!soc_info)
return -EINVAL;
- for (i = 0; i < soc_info->num_rgltr; i++) {
- rc = regulator_enable(soc_info->rgltr[i]);
- if (rc) {
- pr_err("Regulator enable %s failed\n",
- soc_info->rgltr_name[i]);
- goto disable_regulator;
- }
+ rc = cam_soc_util_regulator_enable_default(soc_info);
+ if (rc) {
+ pr_err("Regulators enable failed\n");
+ return rc;
}
if (enable_clocks) {
- rc = cam_soc_util_clk_enable_default(soc_info);
+ rc = cam_soc_util_clk_enable_default(soc_info, clk_level);
if (rc)
goto disable_regulator;
}
@@ -535,19 +1202,28 @@ int cam_soc_util_enable_platform_resource(struct cam_hw_soc_info *soc_info,
goto disable_clk;
}
+ if (soc_info->pinctrl_info.pinctrl &&
+ soc_info->pinctrl_info.gpio_state_active) {
+ rc = pinctrl_select_state(soc_info->pinctrl_info.pinctrl,
+ soc_info->pinctrl_info.gpio_state_active);
+
+ if (rc)
+ goto disable_irq;
+ }
+
return rc;
+disable_irq:
+ if (enable_irq)
+ cam_soc_util_irq_disable(soc_info);
+
disable_clk:
if (enable_clocks)
cam_soc_util_clk_disable_default(soc_info);
disable_regulator:
- if (i == -1)
- i = soc_info->num_rgltr;
- for (i = i - 1; i >= 0; i--) {
- if (soc_info->rgltr[i])
- regulator_disable(soc_info->rgltr[i]);
- }
+ cam_soc_util_regulator_disable_default(soc_info);
+
return rc;
}
@@ -555,7 +1231,7 @@ int cam_soc_util_enable_platform_resource(struct cam_hw_soc_info *soc_info,
int cam_soc_util_disable_platform_resource(struct cam_hw_soc_info *soc_info,
bool disable_clocks, bool disble_irq)
{
- int i, rc = 0;
+ int rc = 0;
if (!soc_info)
return -EINVAL;
@@ -563,18 +1239,16 @@ int cam_soc_util_disable_platform_resource(struct cam_hw_soc_info *soc_info,
if (disable_clocks)
cam_soc_util_clk_disable_default(soc_info);
- for (i = soc_info->num_rgltr - 1; i >= 0; i--) {
- rc |= regulator_disable(soc_info->rgltr[i]);
- if (rc) {
- pr_err("Regulator disble %s failed\n",
- soc_info->rgltr_name[i]);
- continue;
- }
- }
+ cam_soc_util_regulator_disable_default(soc_info);
if (disble_irq)
rc |= cam_soc_util_irq_disable(soc_info);
+ if (soc_info->pinctrl_info.pinctrl &&
+ soc_info->pinctrl_info.gpio_state_suspend)
+ rc = pinctrl_select_state(soc_info->pinctrl_info.pinctrl,
+ soc_info->pinctrl_info.gpio_state_suspend);
+
return rc;
}
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h
index e556bba..7eb7578 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h
@@ -16,6 +16,7 @@
#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
@@ -37,6 +38,33 @@
#define CAM_SOC_MAX_CLK 32
/**
+ * enum cam_vote_level - Enum for voting level
+ *
+ * @CAM_SUSPEND_VOTE : Suspend vote
+ * @CAM_MINSVS_VOTE : Min SVS vote
+ * @CAM_LOWSVS_VOTE : Low SVS vote
+ * @CAM_SVS_VOTE : SVS vote
+ * @CAM_SVSL1_VOTE : SVS Plus vote
+ * @CAM_NOMINAL_VOTE : Nominal vote
+ * @CAM_TURBO_VOTE : Turbo vote
+ * @CAM_MAX_VOTE : Max voting level, This is invalid level.
+ */
+enum cam_vote_level {
+ CAM_SUSPEND_VOTE,
+ CAM_MINSVS_VOTE,
+ CAM_LOWSVS_VOTE,
+ CAM_SVS_VOTE,
+ CAM_SVSL1_VOTE,
+ CAM_NOMINAL_VOTE,
+ CAM_TURBO_VOTE,
+ CAM_MAX_VOTE,
+};
+
+/* pinctrl states */
+#define CAM_SOC_PINCTRL_STATE_SLEEP "cam_suspend"
+#define CAM_SOC_PINCTRL_STATE_DEFAULT "cam_default"
+
+/**
* struct cam_soc_reg_map: Information about the mapped register space
*
* @mem_base: Starting location of MAPPED register space
@@ -51,6 +79,35 @@ struct cam_soc_reg_map {
};
/**
+ * struct cam_soc_pinctrl_info: Information about pinctrl data
+ *
+ * @pinctrl: pintrl object
+ * @gpio_state_active: default pinctrl state
+ * @gpio_state_suspend suspend state of pinctrl
+ **/
+struct cam_soc_pinctrl_info {
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *gpio_state_active;
+ struct pinctrl_state *gpio_state_suspend;
+};
+
+/**
+ * struct cam_soc_gpio_data: Information about the gpio pins
+ *
+ * @cam_gpio_common_tbl: It is list of al the gpios present in gpios node
+ * @cam_gpio_common_tbl_size: It is equal to number of gpios prsent in
+ * gpios node in DTSI
+ * @cam_gpio_req_tbl It is list of al the requesetd gpios
+ * @cam_gpio_req_tbl_size: It is size of requested gpios
+ **/
+struct cam_soc_gpio_data {
+ struct gpio *cam_gpio_common_tbl;
+ uint8_t cam_gpio_common_tbl_size;
+ struct gpio *cam_gpio_req_tbl;
+ uint8_t cam_gpio_req_tbl_size;
+};
+
+/**
* struct cam_hw_soc_info: Soc information pertaining to specific instance of
* Camera hardware driver module
*
@@ -69,14 +126,25 @@ struct cam_soc_reg_map {
* @num_reg_map: Number of mapped register space associated
* with mem_block. num_reg_map = num_mem_block in
* most cases
+ * @reserve_mem: Whether to reserve memory for Mem blocks
* @num_rgltr: Number of regulators
* @rgltr_name: Array of regulator names
+ * @rgltr_ctrl_support: Whether regulator control is supported
+ * @rgltr_min_volt: Array of minimum regulator voltage
+ * @rgltr_max_volt: Array of maximum regulator voltage
+ * @rgltr_op_mode: Array of regulator operation mode
+ * @rgltr_type: Array of regulator names
* @rgltr: Array of associated regulator resources
+ * @rgltr_delay: Array of regulator delay values
* @num_clk: Number of clocks
* @clk_name: Array of clock names
* @clk: Array of associated clock resources
- * @clk_rate: Array of default clock rates
+ * @clk_rate: 2D array of clock rates representing clock rate
+ * values at different vote levels
* @src_clk_idx: Source clock index that is rate-controllable
+ * @clk_level_valid: Indicates whether corresponding level is valid
+ * @gpio_data: Pointer to gpio info
+ * @pinctrl_info: Pointer to pinctrl info
* @soc_private: Soc private data
*
*/
@@ -84,7 +152,6 @@ struct cam_hw_soc_info {
struct platform_device *pdev;
uint32_t hw_version;
uint32_t index;
-
const char *irq_name;
struct resource *irq_line;
void *irq_data;
@@ -95,16 +162,27 @@ struct cam_hw_soc_info {
struct resource *mem_block[CAM_SOC_MAX_BLOCK];
struct cam_soc_reg_map reg_map[CAM_SOC_MAX_BASE];
uint32_t num_reg_map;
+ uint32_t reserve_mem;
uint32_t num_rgltr;
const char *rgltr_name[CAM_SOC_MAX_REGULATOR];
+ uint32_t rgltr_ctrl_support;
+ uint32_t rgltr_min_volt[CAM_SOC_MAX_REGULATOR];
+ uint32_t rgltr_max_volt[CAM_SOC_MAX_REGULATOR];
+ uint32_t rgltr_op_mode[CAM_SOC_MAX_REGULATOR];
+ uint32_t rgltr_type[CAM_SOC_MAX_REGULATOR];
struct regulator *rgltr[CAM_SOC_MAX_REGULATOR];
+ uint32_t rgltr_delay[CAM_SOC_MAX_REGULATOR];
uint32_t num_clk;
const char *clk_name[CAM_SOC_MAX_CLK];
struct clk *clk[CAM_SOC_MAX_CLK];
- int32_t clk_rate[CAM_SOC_MAX_CLK];
+ int32_t clk_rate[CAM_MAX_VOTE][CAM_SOC_MAX_CLK];
int32_t src_clk_idx;
+ bool clk_level_valid[CAM_MAX_VOTE];
+
+ struct cam_soc_gpio_data *gpio_data;
+ struct cam_soc_pinctrl_info pinctrl_info;
void *soc_private;
};
@@ -159,6 +237,18 @@ struct cam_hw_soc_info {
((!__soc_info || __base_index >= __soc_info->num_reg_map) ? \
0 : __soc_info->reg_map[__base_index].size)
+/**
+ * cam_soc_util_get_level_from_string()
+ *
+ * @brief: Get the associated vote level for the input string
+ *
+ * @string: Input string to compare with.
+ * @level: Vote level corresponds to input string.
+ *
+ * @return: Success or failure
+ */
+int cam_soc_util_get_level_from_string(const char *string,
+ enum cam_vote_level *level);
/**
* cam_soc_util_get_dt_properties()
@@ -208,6 +298,9 @@ int cam_soc_util_release_platform_resource(struct cam_hw_soc_info *soc_info);
* TRUE: Enable all clocks in soc_info Now.
* False: Don't enable clocks Now. Driver will
* enable independently.
+ * @clk_level: Clock level to be applied.
+ * Applicable only if enable_clocks is true
+ * Valid range : 0 to (CAM_MAX_VOTE - 1)
* @enable_irq: Boolean flag:
* TRUE: Enable IRQ in soc_info Now.
* False: Don't enable IRQ Now. Driver will
@@ -216,7 +309,7 @@ int cam_soc_util_release_platform_resource(struct cam_hw_soc_info *soc_info);
* @return: Success or failure
*/
int cam_soc_util_enable_platform_resource(struct cam_hw_soc_info *soc_info,
- bool enable_clocks, bool enable_irq);
+ bool enable_clocks, enum cam_vote_level clk_level, bool enable_irq);
/**
* cam_soc_util_disable_platform_resource()
@@ -235,6 +328,20 @@ int cam_soc_util_disable_platform_resource(struct cam_hw_soc_info *soc_info,
bool disable_clocks, bool disable_irq);
/**
+ * cam_soc_util_set_clk_rate()
+ *
+ * @brief: Set the rate on a given clock.
+ *
+ * @clk: Clock that needs to be set
+ * @clk_name: Clocks name associated with clk
+ * @clk_rate: Clocks rate associated with clk
+ *
+ * @return: success or failure
+ */
+int cam_soc_util_set_clk_rate(struct clk *clk, const char *clk_name,
+ int32_t clk_rate);
+
+/**
* cam_soc_util_clk_enable()
*
* @brief: Enable clock specified in params
@@ -249,6 +356,21 @@ int cam_soc_util_clk_enable(struct clk *clk, const char *clk_name,
int32_t clk_rate);
/**
+ * cam_soc_util_set_clk_rate_level()
+ *
+ * @brief: Apply clock rates for the requested level.
+ * This applies the new requested level for all
+ * the clocks listed in DT based on their values.
+ *
+ * @soc_info: Device soc information
+ * @clk_level: Clock level number to set
+ *
+ * @return: Success or failure
+ */
+int cam_soc_util_set_clk_rate_level(struct cam_hw_soc_info *soc_info,
+ enum cam_vote_level clk_level);
+
+/**
* cam_soc_util_clk_disable()
*
* @brief: Disable clock specified in params
@@ -283,6 +405,45 @@ int cam_soc_util_irq_enable(struct cam_hw_soc_info *soc_info);
int cam_soc_util_irq_disable(struct cam_hw_soc_info *soc_info);
/**
+ * cam_soc_util_regulator_enable()
+ *
+ * @brief: Enable single regulator
+ *
+ * @rgltr Regulator that needs to be turned ON
+ * @rgltr_name Associated Regulator name
+ * @rgltr_min_volt: Requested minimum volatage
+ * @rgltr_max_volt: Requested maximum volatage
+ * @rgltr_op_mode: Requested Load
+ * @rgltr_delay: Requested delay needed aaftre enabling regulator
+ *
+ * @return: Success or failure
+ */
+int cam_soc_util_regulator_enable(struct regulator *rgltr,
+ const char *rgltr_name,
+ uint32_t rgltr_min_volt, uint32_t rgltr_max_volt,
+ uint32_t rgltr_op_mode, uint32_t rgltr_delay);
+
+/**
+ * cam_soc_util_regulator_enable()
+ *
+ * @brief: Disable single regulator
+ *
+ * @rgltr Regulator that needs to be turned ON
+ * @rgltr_name Associated Regulator name
+ * @rgltr_min_volt: Requested minimum volatage
+ * @rgltr_max_volt: Requested maximum volatage
+ * @rgltr_op_mode: Requested Load
+ * @rgltr_delay: Requested delay needed aaftre enabling regulator
+ *
+ * @return: Success or failure
+ */
+int cam_soc_util_regulator_disable(struct regulator *rgltr,
+ const char *rgltr_name,
+ uint32_t rgltr_min_volt, uint32_t rgltr_max_volt,
+ uint32_t rgltr_op_mode, uint32_t rgltr_delay);
+
+
+/**
* cam_soc_util_w()
*
* @brief: Camera SOC util for register write
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/Makefile b/drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/Makefile
deleted file mode 100644
index a4df0b8..0000000
--- a/drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
-ccflags-y += -Idrivers/media/platform/msm/camera/icp
-ccflags-y += -Idrivers/media/platform/msm/camera/icp/icp_hw/include
-ccflags-y += -Idrivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/include
-ccflags-y += -Idrivers/media/platform/msm/camera/icp/icp_hw/a5_hw
-ccflags-y += -Idrivers/media/platform/msm/camera/icp/fw_inc
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
-
-obj-$(CONFIG_SPECTRA_CAMERA) += a5_dev.o a5_core.o a5_soc.o
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/Makefile b/drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/Makefile
deleted file mode 100644
index 6aeb5f1..0000000
--- a/drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
-ccflags-y += -Idrivers/media/platform/msm/camera/icp
-ccflags-y += -Idrivers/media/platform/msm/camera/icp/icp_hw/include
-ccflags-y += -Idrivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/include
-ccflags-y += -Idrivers/media/platform/msm/camera/icp/icp_hw/bps_hw
-ccflags-y += -Idrivers/media/platform/msm/camera/icp/fw_inc
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
-
-obj-$(CONFIG_SPECTRA_CAMERA) += bps_dev.o bps_core.o bps_soc.o
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/Makefile b/drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/Makefile
deleted file mode 100644
index 8af20ae..0000000
--- a/drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
-ccflags-y += -Idrivers/media/platform/msm/camera/icp
-ccflags-y += -Idrivers/media/platform/msm/camera/icp/icp_hw/include
-ccflags-y += -Idrivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/include
-ccflags-y += -Idrivers/media/platform/msm/camera/icp/icp_hw/ipe_hw
-ccflags-y += -Idrivers/media/platform/msm/camera/icp/fw_inc
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
-
-obj-$(CONFIG_SPECTRA_CAMERA) += ipe_dev.o ipe_core.o ipe_soc.o
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
index cb3c526..86dc973 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
@@ -16,8 +16,8 @@
#include "msm_vidc_debug.h"
#include "msm_vidc_clocks.h"
-#define MSM_VIDC_MIN_UBWC_COMPLEXITY_FACTOR 1
-#define MSM_VIDC_MAX_UBWC_COMPLEXITY_FACTOR 4
+#define MSM_VIDC_MIN_UBWC_COMPLEXITY_FACTOR (1 << 16)
+#define MSM_VIDC_MAX_UBWC_COMPLEXITY_FACTOR (4 << 16)
static inline unsigned long int get_ubwc_compression_ratio(
struct ubwc_cr_stats_info_type ubwc_stats_info)
@@ -104,8 +104,12 @@ static int fill_recon_stats(struct msm_vidc_inst *inst,
max_cf = max(max_cf, binfo->CF);
}
mutex_unlock(&inst->reconbufs.lock);
- vote_data->compression_ratio = CR;
+ /* Sanitize CF values from HW . */
+ max_cf = min_t(u32, max_cf, MSM_VIDC_MAX_UBWC_COMPLEXITY_FACTOR);
+ min_cf = max_t(u32, min_cf, MSM_VIDC_MIN_UBWC_COMPLEXITY_FACTOR);
+
+ vote_data->compression_ratio = CR;
vote_data->complexity_factor = max_cf;
vote_data->use_dpb_read = false;
if (inst->clk_data.load <= inst->clk_data.load_norm) {
@@ -114,7 +118,7 @@ static int fill_recon_stats(struct msm_vidc_inst *inst,
}
dprintk(VIDC_DBG,
- "Complression Ratio = %d Complexity Factor = %d\n",
+ "Compression Ratio = %d Complexity Factor = %d\n",
vote_data->compression_ratio,
vote_data->complexity_factor);
@@ -160,7 +164,8 @@ int msm_comm_vote_bus(struct msm_vidc_core *core)
list_for_each_entry_safe(temp, next,
&inst->registeredbufs.list, list) {
if (temp->vvb.vb2_buf.type ==
- V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
+ temp->deferred) {
filled_len = max(filled_len,
temp->vvb.vb2_buf.planes[0].bytesused);
device_addr = temp->smem[0].device_addr;
@@ -260,6 +265,7 @@ static int msm_dcvs_scale_clocks(struct msm_vidc_inst *inst)
int rc = 0;
int fw_pending_bufs = 0;
int total_output_buf = 0;
+ int min_output_buf = 0;
int buffers_outside_fw = 0;
struct msm_vidc_core *core;
struct hal_buffer_requirements *output_buf_req;
@@ -294,16 +300,37 @@ static int msm_dcvs_scale_clocks(struct msm_vidc_inst *inst)
/* Total number of output buffers */
total_output_buf = output_buf_req->buffer_count_actual;
+ min_output_buf = output_buf_req->buffer_count_min;
+
/* Buffers outside FW are with display */
buffers_outside_fw = total_output_buf - fw_pending_bufs;
dprintk(VIDC_PROF,
- "Counts : total_output_buf = %d fw_pending_bufs = %d buffers_outside_fw = %d\n",
- total_output_buf, fw_pending_bufs, buffers_outside_fw);
+ "Counts : total_output_buf = %d Min buffers = %d fw_pending_bufs = %d buffers_outside_fw = %d\n",
+ total_output_buf, min_output_buf, fw_pending_bufs,
+ buffers_outside_fw);
- if (buffers_outside_fw >= dcvs->min_threshold)
- dcvs->load = dcvs->load_low;
- else if (buffers_outside_fw <= dcvs->max_threshold)
+ /*
+ * PMS decides clock level based on below algo
+
+ * Limits :
+ * max_threshold : Client extra allocated buffers. Client
+ * reserves these buffers for it's smooth flow.
+ * min_output_buf : HW requested buffers for it's smooth
+ * flow of buffers.
+ * min_threshold : Driver requested extra buffers for PMS.
+
+ * 1) When buffers outside FW are reaching client's extra buffers,
+ * FW is slow and will impact pipeline, Increase clock.
+ * 2) When pending buffers with FW are same as FW requested,
+ * pipeline has cushion to absorb FW slowness, Decrease clocks.
+ * 3) When none of 1) or 2) FW is just fast enough to maintain
+ * pipeline, request Right Clocks.
+ */
+
+ if (buffers_outside_fw <= dcvs->max_threshold)
dcvs->load = dcvs->load_high;
+ else if (fw_pending_bufs <= min_output_buf)
+ dcvs->load = dcvs->load_low;
else
dcvs->load = dcvs->load_norm;
@@ -393,7 +420,7 @@ static unsigned long msm_vidc_max_freq(struct msm_vidc_core *core)
allowed_clks_tbl = core->resources.allowed_clks_tbl;
freq = allowed_clks_tbl[0].clock_rate;
- dprintk(VIDC_PROF, "Max rate = %lu", freq);
+ dprintk(VIDC_PROF, "Max rate = %lu\n", freq);
return freq;
}
@@ -571,7 +598,8 @@ int msm_comm_scale_clocks(struct msm_vidc_inst *inst)
mutex_lock(&inst->registeredbufs.lock);
list_for_each_entry_safe(temp, next, &inst->registeredbufs.list, list) {
if (temp->vvb.vb2_buf.type ==
- V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
+ temp->deferred) {
filled_len = max(filled_len,
temp->vvb.vb2_buf.planes[0].bytesused);
device_addr = temp->smem[0].device_addr;
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index b103d73..ec9777b 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -5076,7 +5076,6 @@ int msm_vidc_check_session_supported(struct msm_vidc_inst *inst)
rc = msm_vidc_load_supported(inst);
if (rc) {
change_inst_state(inst, MSM_VIDC_CORE_INVALID);
- msm_comm_kill_session(inst);
dprintk(VIDC_WARN,
"%s: Hardware is overloaded\n", __func__);
return rc;
@@ -5126,7 +5125,6 @@ int msm_vidc_check_session_supported(struct msm_vidc_inst *inst)
}
if (rc) {
change_inst_state(inst, MSM_VIDC_CORE_INVALID);
- msm_comm_kill_session(inst);
dprintk(VIDC_ERR,
"%s: Resolution unsupported\n", __func__);
}
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-eeprom.c b/drivers/media/usb/pvrusb2/pvrusb2-eeprom.c
index e1907cd0c..7613d1f 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-eeprom.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-eeprom.c
@@ -123,15 +123,10 @@ int pvr2_eeprom_analyze(struct pvr2_hdw *hdw)
memset(&tvdata,0,sizeof(tvdata));
eeprom = pvr2_eeprom_fetch(hdw);
- if (!eeprom) return -EINVAL;
+ if (!eeprom)
+ return -EINVAL;
- {
- struct i2c_client fake_client;
- /* Newer version expects a useless client interface */
- fake_client.addr = hdw->eeprom_addr;
- fake_client.adapter = &hdw->i2c_adap;
- tveeprom_hauppauge_analog(&fake_client,&tvdata,eeprom);
- }
+ tveeprom_hauppauge_analog(NULL, &tvdata, eeprom);
trace_eeprom("eeprom assumed v4l tveeprom module");
trace_eeprom("eeprom direct call results:");
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index 21900202..9ccf7f5 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -868,7 +868,7 @@ EXPORT_SYMBOL_GPL(vb2_core_create_bufs);
void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no)
{
- if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv)
+ if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv)
return NULL;
return call_ptr_memop(vb, vaddr, vb->planes[plane_no].mem_priv);
diff --git a/drivers/mfd/omap-usb-tll.c b/drivers/mfd/omap-usb-tll.c
index 1aa74c4..9d167c9 100644
--- a/drivers/mfd/omap-usb-tll.c
+++ b/drivers/mfd/omap-usb-tll.c
@@ -377,8 +377,8 @@ int omap_tll_init(struct usbhs_omap_platform_data *pdata)
* and use SDR Mode
*/
reg &= ~(OMAP_TLL_CHANNEL_CONF_UTMIAUTOIDLE
- | OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF
| OMAP_TLL_CHANNEL_CONF_ULPIDDRMODE);
+ reg |= OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF;
} else if (pdata->port_mode[i] ==
OMAP_EHCI_PORT_MODE_HSIC) {
/*
diff --git a/drivers/mfd/qcom-spmi-pmic.c b/drivers/mfd/qcom-spmi-pmic.c
index 2280770..0977563 100644
--- a/drivers/mfd/qcom-spmi-pmic.c
+++ b/drivers/mfd/qcom-spmi-pmic.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2015, 2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -118,12 +118,23 @@ static const struct regmap_config spmi_regmap_config = {
.fast_io = true,
};
+static const struct regmap_config spmi_regmap_can_sleep_config = {
+ .reg_bits = 16,
+ .val_bits = 8,
+ .max_register = 0xffff,
+ .fast_io = false,
+};
+
static int pmic_spmi_probe(struct spmi_device *sdev)
{
struct device_node *root = sdev->dev.of_node;
struct regmap *regmap;
- regmap = devm_regmap_init_spmi_ext(sdev, &spmi_regmap_config);
+ if (of_property_read_bool(root, "qcom,can-sleep"))
+ regmap = devm_regmap_init_spmi_ext(sdev,
+ &spmi_regmap_can_sleep_config);
+ else
+ regmap = devm_regmap_init_spmi_ext(sdev, &spmi_regmap_config);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
diff --git a/drivers/misc/c2port/c2port-duramar2150.c b/drivers/misc/c2port/c2port-duramar2150.c
index 5484301..3dc61ea 100644
--- a/drivers/misc/c2port/c2port-duramar2150.c
+++ b/drivers/misc/c2port/c2port-duramar2150.c
@@ -129,8 +129,8 @@ static int __init duramar2150_c2port_init(void)
duramar2150_c2port_dev = c2port_device_register("uc",
&duramar2150_c2port_ops, NULL);
- if (!duramar2150_c2port_dev) {
- ret = -ENODEV;
+ if (IS_ERR(duramar2150_c2port_dev)) {
+ ret = PTR_ERR(duramar2150_c2port_dev);
goto free_region;
}
diff --git a/drivers/misc/mic/vop/vop_vringh.c b/drivers/misc/mic/vop/vop_vringh.c
index 88e4523..fed992e 100644
--- a/drivers/misc/mic/vop/vop_vringh.c
+++ b/drivers/misc/mic/vop/vop_vringh.c
@@ -292,7 +292,6 @@ static int vop_virtio_add_device(struct vop_vdev *vdev,
if (ret) {
dev_err(vop_dev(vdev), "%s %d err %d\n",
__func__, __LINE__, ret);
- kfree(vdev);
return ret;
}
diff --git a/drivers/misc/uid_sys_stats.c b/drivers/misc/uid_sys_stats.c
index 871040e..8bf4c57 100644
--- a/drivers/misc/uid_sys_stats.c
+++ b/drivers/misc/uid_sys_stats.c
@@ -95,7 +95,7 @@ static struct uid_entry *find_or_register_uid(uid_t uid)
static int uid_cputime_show(struct seq_file *m, void *v)
{
- struct uid_entry *uid_entry;
+ struct uid_entry *uid_entry = NULL;
struct task_struct *task, *temp;
struct user_namespace *user_ns = current_user_ns();
cputime_t utime;
@@ -113,7 +113,8 @@ static int uid_cputime_show(struct seq_file *m, void *v)
read_lock(&tasklist_lock);
do_each_thread(temp, task) {
uid = from_kuid_munged(user_ns, task_uid(task));
- uid_entry = find_or_register_uid(uid);
+ if (!uid_entry || uid_entry->uid != uid)
+ uid_entry = find_or_register_uid(uid);
if (!uid_entry) {
read_unlock(&tasklist_lock);
rt_mutex_unlock(&uid_lock);
@@ -252,7 +253,7 @@ static void compute_uid_io_bucket_stats(struct io_stats *io_bucket,
static void update_io_stats_all_locked(void)
{
- struct uid_entry *uid_entry;
+ struct uid_entry *uid_entry = NULL;
struct task_struct *task, *temp;
struct user_namespace *user_ns = current_user_ns();
unsigned long bkt;
@@ -265,7 +266,8 @@ static void update_io_stats_all_locked(void)
rcu_read_lock();
do_each_thread(temp, task) {
uid = from_kuid_munged(user_ns, task_uid(task));
- uid_entry = find_or_register_uid(uid);
+ if (!uid_entry || uid_entry->uid != uid)
+ uid_entry = find_or_register_uid(uid);
if (!uid_entry)
continue;
add_uid_io_stats(uid_entry, task, UID_STATE_TOTAL_CURR);
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index f300435..c6f3496 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -1718,6 +1718,8 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
/* We couldn't get a response from the card. Give up. */
if (err) {
+ if (card->err_in_sdr104)
+ return ERR_RETRY;
/* Check if the card is removed */
if (mmc_detect_card_removed(card->host))
return ERR_NOMEDIUM;
@@ -2208,7 +2210,8 @@ static int mmc_blk_err_check(struct mmc_card *card,
brq->data.error == -ETIMEDOUT ||
brq->cmd.error == -EILSEQ ||
brq->cmd.error == -EIO ||
- brq->cmd.error == -ETIMEDOUT))
+ brq->cmd.error == -ETIMEDOUT ||
+ brq->sbc.error))
card->err_in_sdr104 = true;
/*
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index e3696c5..a531cb4 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -398,8 +398,6 @@ int mmc_add_card(struct mmc_card *card)
mmc_hostname(card->host), __func__, ret);
}
- device_enable_async_suspend(&card->dev);
-
ret = device_add(&card->dev);
if (ret)
return ret;
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index c19aa0c..093f28a 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -456,6 +456,22 @@ int mmc_clk_update_freq(struct mmc_host *host,
}
EXPORT_SYMBOL(mmc_clk_update_freq);
+void mmc_recovery_fallback_lower_speed(struct mmc_host *host)
+{
+ if (!host->card)
+ return;
+
+ if (host->sdr104_wa && mmc_card_sd(host->card) &&
+ (host->ios.timing == MMC_TIMING_UHS_SDR104) &&
+ !host->card->sdr104_blocked) {
+ pr_err("%s: %s: blocked SDR104, lower the bus-speed (SDR50 / DDR50)\n",
+ mmc_hostname(host), __func__);
+ mmc_host_clear_sdr104(host);
+ mmc_hw_reset(host);
+ host->card->sdr104_blocked = true;
+ }
+}
+
static int mmc_devfreq_set_target(struct device *dev,
unsigned long *freq, u32 devfreq_flags)
{
@@ -507,6 +523,9 @@ static int mmc_devfreq_set_target(struct device *dev,
if (abort)
goto out;
+ if (mmc_card_sd(host->card) && host->card->sdr104_blocked)
+ goto rel_host;
+
/*
* In case we were able to claim host there is no need to
* defer the frequency change. It will be done now
@@ -515,15 +534,18 @@ static int mmc_devfreq_set_target(struct device *dev,
mmc_host_clk_hold(host);
err = mmc_clk_update_freq(host, *freq, clk_scaling->state);
- if (err && err != -EAGAIN)
+ if (err && err != -EAGAIN) {
pr_err("%s: clock scale to %lu failed with error %d\n",
mmc_hostname(host), *freq, err);
- else
+ mmc_recovery_fallback_lower_speed(host);
+ } else {
pr_debug("%s: clock change to %lu finished successfully (%s)\n",
mmc_hostname(host), *freq, current->comm);
+ }
mmc_host_clk_release(host);
+rel_host:
mmc_release_host(host);
out:
return err;
@@ -544,6 +566,9 @@ void mmc_deferred_scaling(struct mmc_host *host)
if (!host->clk_scaling.enable)
return;
+ if (mmc_card_sd(host->card) && host->card->sdr104_blocked)
+ return;
+
spin_lock_bh(&host->clk_scaling.lock);
if (host->clk_scaling.clk_scaling_in_progress ||
@@ -564,13 +589,15 @@ void mmc_deferred_scaling(struct mmc_host *host)
err = mmc_clk_update_freq(host, target_freq,
host->clk_scaling.state);
- if (err && err != -EAGAIN)
+ if (err && err != -EAGAIN) {
pr_err("%s: failed on deferred scale clocks (%d)\n",
mmc_hostname(host), err);
- else
+ mmc_recovery_fallback_lower_speed(host);
+ } else {
pr_debug("%s: clocks were successfully scaled to %lu (%s)\n",
mmc_hostname(host),
target_freq, current->comm);
+ }
host->clk_scaling.clk_scaling_in_progress = false;
atomic_dec(&host->clk_scaling.devfreq_abort);
}
@@ -1571,8 +1598,13 @@ void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq)
}
}
if (!cmd->error || !cmd->retries ||
- mmc_card_removed(host->card))
+ mmc_card_removed(host->card)) {
+ if (cmd->error && !cmd->retries &&
+ cmd->opcode != MMC_SEND_STATUS &&
+ cmd->opcode != MMC_SEND_TUNING_BLOCK)
+ mmc_recovery_fallback_lower_speed(host);
break;
+ }
mmc_retune_recheck(host);
@@ -4264,12 +4296,18 @@ int _mmc_detect_card_removed(struct mmc_host *host)
}
if (ret) {
- mmc_card_set_removed(host->card);
- if (host->card->sdr104_blocked) {
- mmc_host_set_sdr104(host);
- host->card->sdr104_blocked = false;
+ if (host->ops->get_cd && host->ops->get_cd(host)) {
+ mmc_recovery_fallback_lower_speed(host);
+ ret = 0;
+ } else {
+ mmc_card_set_removed(host->card);
+ if (host->card->sdr104_blocked) {
+ mmc_host_set_sdr104(host);
+ host->card->sdr104_blocked = false;
+ }
+ pr_debug("%s: card remove detected\n",
+ mmc_hostname(host));
}
- pr_debug("%s: card remove detected\n", mmc_hostname(host));
}
return ret;
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index c92ea77..127ab0f 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -224,7 +224,7 @@ void mmc_host_clk_release(struct mmc_host *host)
host->clk_requests--;
if (mmc_host_may_gate_card(host->card) &&
!host->clk_requests)
- schedule_delayed_work(&host->clk_gate_work,
+ queue_delayed_work(host->clk_gate_wq, &host->clk_gate_work,
msecs_to_jiffies(host->clkgate_delay));
spin_unlock_irqrestore(&host->clk_lock, flags);
}
@@ -283,6 +283,8 @@ static inline void mmc_host_clk_exit(struct mmc_host *host)
mmc_host_clk_gate_delayed(host);
if (host->clk_gated)
mmc_host_clk_hold(host);
+ if (host->clk_gate_wq)
+ destroy_workqueue(host->clk_gate_wq);
/* There should be only one user now */
WARN_ON(host->clk_requests > 1);
}
@@ -298,6 +300,42 @@ static inline void mmc_host_clk_sysfs_init(struct mmc_host *host)
pr_err("%s: Failed to create clkgate_delay sysfs entry\n",
mmc_hostname(host));
}
+
+static inline bool mmc_host_clk_gate_wq_init(struct mmc_host *host)
+{
+ char *wq = NULL;
+ int wq_nl;
+ bool ret = true;
+
+ wq_nl = sizeof("mmc_clk_gate/") + sizeof(mmc_hostname(host)) + 1;
+
+ wq = kzalloc(wq_nl, GFP_KERNEL);
+ if (!wq) {
+ ret = false;
+ goto out;
+ }
+
+ snprintf(wq, wq_nl, "mmc_clk_gate/%s", mmc_hostname(host));
+
+ /*
+ * Create a work queue with flag WQ_MEM_RECLAIM set for
+ * mmc clock gate work. Because mmc thread is created with
+ * flag PF_MEMALLOC set, kernel will check for work queue
+ * flag WQ_MEM_RECLAIM when flush the work queue. If work
+ * queue flag WQ_MEM_RECLAIM is not set, kernel warning
+ * will be triggered.
+ */
+ host->clk_gate_wq = create_workqueue(wq);
+ if (!host->clk_gate_wq) {
+ ret = false;
+ dev_err(host->parent,
+ "failed to create clock gate work queue\n");
+ }
+
+ kfree(wq);
+out:
+ return ret;
+}
#else
static inline void mmc_host_clk_init(struct mmc_host *host)
@@ -316,6 +354,11 @@ bool mmc_host_may_gate_card(struct mmc_card *card)
{
return false;
}
+
+static inline bool mmc_host_clk_gate_wq_init(struct mmc_host *host)
+{
+ return true;
+}
#endif
void mmc_retune_enable(struct mmc_host *host)
@@ -644,6 +687,11 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
return NULL;
}
+ if (!mmc_host_clk_gate_wq_init(host)) {
+ kfree(host);
+ return NULL;
+ }
+
mmc_host_clk_init(host);
spin_lock_init(&host->lock);
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index 1499d53..e32ed3d 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -333,7 +333,6 @@ int sdio_add_func(struct sdio_func *func)
sdio_set_of_node(func);
sdio_acpi_set_handle(func);
- device_enable_async_suspend(&func->dev);
ret = device_add(&func->dev);
if (ret == 0)
sdio_func_set_present(func);
diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
index b5c81e4..91ad946 100644
--- a/drivers/mmc/core/slot-gpio.c
+++ b/drivers/mmc/core/slot-gpio.c
@@ -77,6 +77,15 @@ EXPORT_SYMBOL(mmc_gpio_get_ro);
int mmc_gpio_get_cd(struct mmc_host *host)
{
struct mmc_gpio *ctx = host->slot.handler_priv;
+ int ret;
+
+ if (host->extcon) {
+ ret = extcon_get_state(host->extcon, EXTCON_MECHANICAL);
+ if (ret < 0)
+ dev_err(mmc_dev(host), "%s: Extcon failed to check card state, ret=%d\n",
+ __func__, ret);
+ return ret;
+ }
if (!ctx || !ctx->cd_gpio)
return -ENOSYS;
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index d0fc165..21dde52 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -1255,6 +1255,13 @@ static int spansion_quad_enable(struct spi_nor *nor)
return -EINVAL;
}
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret) {
+ dev_err(nor->dev,
+ "timeout while writing configuration register\n");
+ return ret;
+ }
+
/* read back and check it */
ret = read_cr(nor);
if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) {
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index 7ab24c5..05369dc 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -265,6 +265,8 @@ static int gs_cmd_reset(struct gs_usb *gsusb, struct gs_can *gsdev)
sizeof(*dm),
1000);
+ kfree(dm);
+
return rc;
}
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index 8af2c88..45bb0fe 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -1153,6 +1153,12 @@ static void init_ring(struct net_device *dev)
if (skb == NULL)
break;
np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(np->pci_dev,
+ np->rx_info[i].mapping)) {
+ dev_kfree_skb(skb);
+ np->rx_info[i].skb = NULL;
+ break;
+ }
/* Grrr, we cannot offset to correctly align the IP header. */
np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid);
}
@@ -1183,8 +1189,9 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
{
struct netdev_private *np = netdev_priv(dev);
unsigned int entry;
+ unsigned int prev_tx;
u32 status;
- int i;
+ int i, j;
/*
* be cautious here, wrapping the queue has weird semantics
@@ -1202,6 +1209,7 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
}
#endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */
+ prev_tx = np->cur_tx;
entry = np->cur_tx % TX_RING_SIZE;
for (i = 0; i < skb_num_frags(skb); i++) {
int wrap_ring = 0;
@@ -1235,6 +1243,11 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
skb_frag_size(this_frag),
PCI_DMA_TODEVICE);
}
+ if (pci_dma_mapping_error(np->pci_dev,
+ np->tx_info[entry].mapping)) {
+ dev->stats.tx_dropped++;
+ goto err_out;
+ }
np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping);
np->tx_ring[entry].status = cpu_to_le32(status);
@@ -1269,8 +1282,30 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
return NETDEV_TX_OK;
-}
+err_out:
+ entry = prev_tx % TX_RING_SIZE;
+ np->tx_info[entry].skb = NULL;
+ if (i > 0) {
+ pci_unmap_single(np->pci_dev,
+ np->tx_info[entry].mapping,
+ skb_first_frag_len(skb),
+ PCI_DMA_TODEVICE);
+ np->tx_info[entry].mapping = 0;
+ entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE;
+ for (j = 1; j < i; j++) {
+ pci_unmap_single(np->pci_dev,
+ np->tx_info[entry].mapping,
+ skb_frag_size(
+ &skb_shinfo(skb)->frags[j-1]),
+ PCI_DMA_TODEVICE);
+ entry++;
+ }
+ }
+ dev_kfree_skb_any(skb);
+ np->cur_tx = prev_tx;
+ return NETDEV_TX_OK;
+}
/* The interrupt handler does all of the Rx thread work and cleans up
after the Tx thread. */
@@ -1570,6 +1605,12 @@ static void refill_rx_ring(struct net_device *dev)
break; /* Better luck next round. */
np->rx_info[entry].mapping =
pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(np->pci_dev,
+ np->rx_info[entry].mapping)) {
+ dev_kfree_skb(skb);
+ np->rx_info[entry].skb = NULL;
+ break;
+ }
np->rx_ring[entry].rxaddr =
cpu_to_dma(np->rx_info[entry].mapping | RxDescValid);
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 48ee411..5cc0f8c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1499,6 +1499,7 @@ static int bnxt_async_event_process(struct bnxt *bp,
netdev_warn(bp->dev, "Link speed %d no longer supported\n",
speed);
}
+ set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
/* fall thru */
}
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
@@ -5110,6 +5111,7 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
struct hwrm_port_phy_qcfg_input req = {0};
struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
u8 link_up = link_info->link_up;
+ u16 diff;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
@@ -5197,6 +5199,18 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
link_info->link_up = 0;
}
mutex_unlock(&bp->hwrm_cmd_lock);
+
+ diff = link_info->support_auto_speeds ^ link_info->advertising;
+ if ((link_info->support_auto_speeds | diff) !=
+ link_info->support_auto_speeds) {
+ /* An advertised speed is no longer supported, so we need to
+ * update the advertisement settings. Caller holds RTNL
+ * so we can modify link settings.
+ */
+ link_info->advertising = link_info->support_auto_speeds;
+ if (link_info->autoneg & BNXT_AUTONEG_SPEED)
+ bnxt_hwrm_set_link_setting(bp, true, false);
+ }
return 0;
}
@@ -6080,29 +6094,37 @@ static void bnxt_timer(unsigned long data)
mod_timer(&bp->timer, jiffies + bp->current_interval);
}
-/* Only called from bnxt_sp_task() */
-static void bnxt_reset(struct bnxt *bp, bool silent)
+static void bnxt_rtnl_lock_sp(struct bnxt *bp)
{
- /* bnxt_reset_task() calls bnxt_close_nic() which waits
- * for BNXT_STATE_IN_SP_TASK to clear.
- * If there is a parallel dev_close(), bnxt_close() may be holding
+ /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
+ * set. If the device is being closed, bnxt_close() may be holding
* rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we
* must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
*/
clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
rtnl_lock();
- if (test_bit(BNXT_STATE_OPEN, &bp->state))
- bnxt_reset_task(bp, silent);
+}
+
+static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
+{
set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
rtnl_unlock();
}
+/* Only called from bnxt_sp_task() */
+static void bnxt_reset(struct bnxt *bp, bool silent)
+{
+ bnxt_rtnl_lock_sp(bp);
+ if (test_bit(BNXT_STATE_OPEN, &bp->state))
+ bnxt_reset_task(bp, silent);
+ bnxt_rtnl_unlock_sp(bp);
+}
+
static void bnxt_cfg_ntp_filters(struct bnxt *);
static void bnxt_sp_task(struct work_struct *work)
{
struct bnxt *bp = container_of(work, struct bnxt, sp_task);
- int rc;
set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
smp_mb__after_atomic();
@@ -6116,12 +6138,6 @@ static void bnxt_sp_task(struct work_struct *work)
if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
bnxt_cfg_ntp_filters(bp);
- if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
- rc = bnxt_update_link(bp, true);
- if (rc)
- netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
- rc);
- }
if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
bnxt_hwrm_exec_fwd_req(bp);
if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) {
@@ -6142,18 +6158,39 @@ static void bnxt_sp_task(struct work_struct *work)
bnxt_hwrm_tunnel_dst_port_free(
bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
}
+ if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event))
+ bnxt_hwrm_port_qstats(bp);
+
+ /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
+ * must be the last functions to be called before exiting.
+ */
+ if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
+ int rc = 0;
+
+ if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
+ &bp->sp_event))
+ bnxt_hwrm_phy_qcaps(bp);
+
+ bnxt_rtnl_lock_sp(bp);
+ if (test_bit(BNXT_STATE_OPEN, &bp->state))
+ rc = bnxt_update_link(bp, true);
+ bnxt_rtnl_unlock_sp(bp);
+ if (rc)
+ netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
+ rc);
+ }
+ if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
+ bnxt_rtnl_lock_sp(bp);
+ if (test_bit(BNXT_STATE_OPEN, &bp->state))
+ bnxt_get_port_module_status(bp);
+ bnxt_rtnl_unlock_sp(bp);
+ }
if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
bnxt_reset(bp, false);
if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
bnxt_reset(bp, true);
- if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event))
- bnxt_get_port_module_status(bp);
-
- if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event))
- bnxt_hwrm_port_qstats(bp);
-
smp_mb__before_atomic();
clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 51b164a..666bc06 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1089,6 +1089,7 @@ struct bnxt {
#define BNXT_RESET_TASK_SILENT_SP_EVENT 11
#define BNXT_GENEVE_ADD_PORT_SP_EVENT 12
#define BNXT_GENEVE_DEL_PORT_SP_EVENT 13
+#define BNXT_LINK_SPEED_CHNG_SP_EVENT 14
struct bnxt_pf_info pf;
#ifdef CONFIG_BNXT_SRIOV
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 050e21f..679679a4 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -31,6 +31,7 @@ struct lmac {
u8 lmac_type;
u8 lane_to_sds;
bool use_training;
+ bool autoneg;
bool link_up;
int lmacid; /* ID within BGX */
int lmacid_bd; /* ID on board */
@@ -418,7 +419,17 @@ static int bgx_lmac_sgmii_init(struct bgx *bgx, struct lmac *lmac)
/* power down, reset autoneg, autoneg enable */
cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MRX_CTL);
cfg &= ~PCS_MRX_CTL_PWR_DN;
- cfg |= (PCS_MRX_CTL_RST_AN | PCS_MRX_CTL_AN_EN);
+ cfg |= PCS_MRX_CTL_RST_AN;
+ if (lmac->phydev) {
+ cfg |= PCS_MRX_CTL_AN_EN;
+ } else {
+ /* In scenarios where PHY driver is not present or it's a
+ * non-standard PHY, FW sets AN_EN to inform Linux driver
+ * to do auto-neg and link polling or not.
+ */
+ if (cfg & PCS_MRX_CTL_AN_EN)
+ lmac->autoneg = true;
+ }
bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg);
if (lmac->lmac_type == BGX_MODE_QSGMII) {
@@ -429,7 +440,7 @@ static int bgx_lmac_sgmii_init(struct bgx *bgx, struct lmac *lmac)
return 0;
}
- if (lmac->lmac_type == BGX_MODE_SGMII) {
+ if ((lmac->lmac_type == BGX_MODE_SGMII) && lmac->phydev) {
if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_STATUS,
PCS_MRX_STATUS_AN_CPT, false)) {
dev_err(&bgx->pdev->dev, "BGX AN_CPT not completed\n");
@@ -623,12 +634,71 @@ static int bgx_xaui_check_link(struct lmac *lmac)
return -1;
}
+static void bgx_poll_for_sgmii_link(struct lmac *lmac)
+{
+ u64 pcs_link, an_result;
+ u8 speed;
+
+ pcs_link = bgx_reg_read(lmac->bgx, lmac->lmacid,
+ BGX_GMP_PCS_MRX_STATUS);
+
+ /*Link state bit is sticky, read it again*/
+ if (!(pcs_link & PCS_MRX_STATUS_LINK))
+ pcs_link = bgx_reg_read(lmac->bgx, lmac->lmacid,
+ BGX_GMP_PCS_MRX_STATUS);
+
+ if (bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_GMP_PCS_MRX_STATUS,
+ PCS_MRX_STATUS_AN_CPT, false)) {
+ lmac->link_up = false;
+ lmac->last_speed = SPEED_UNKNOWN;
+ lmac->last_duplex = DUPLEX_UNKNOWN;
+ goto next_poll;
+ }
+
+ lmac->link_up = ((pcs_link & PCS_MRX_STATUS_LINK) != 0) ? true : false;
+ an_result = bgx_reg_read(lmac->bgx, lmac->lmacid,
+ BGX_GMP_PCS_ANX_AN_RESULTS);
+
+ speed = (an_result >> 3) & 0x3;
+ lmac->last_duplex = (an_result >> 1) & 0x1;
+ switch (speed) {
+ case 0:
+ lmac->last_speed = 10;
+ break;
+ case 1:
+ lmac->last_speed = 100;
+ break;
+ case 2:
+ lmac->last_speed = 1000;
+ break;
+ default:
+ lmac->link_up = false;
+ lmac->last_speed = SPEED_UNKNOWN;
+ lmac->last_duplex = DUPLEX_UNKNOWN;
+ break;
+ }
+
+next_poll:
+
+ if (lmac->last_link != lmac->link_up) {
+ if (lmac->link_up)
+ bgx_sgmii_change_link_state(lmac);
+ lmac->last_link = lmac->link_up;
+ }
+
+ queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 3);
+}
+
static void bgx_poll_for_link(struct work_struct *work)
{
struct lmac *lmac;
u64 spu_link, smu_link;
lmac = container_of(work, struct lmac, dwork.work);
+ if (lmac->is_sgmii) {
+ bgx_poll_for_sgmii_link(lmac);
+ return;
+ }
/* Receive link is latching low. Force it high and verify it */
bgx_reg_modify(lmac->bgx, lmac->lmacid,
@@ -720,9 +790,21 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
(lmac->lmac_type != BGX_MODE_XLAUI) &&
(lmac->lmac_type != BGX_MODE_40G_KR) &&
(lmac->lmac_type != BGX_MODE_10G_KR)) {
- if (!lmac->phydev)
- return -ENODEV;
-
+ if (!lmac->phydev) {
+ if (lmac->autoneg) {
+ bgx_reg_write(bgx, lmacid,
+ BGX_GMP_PCS_LINKX_TIMER,
+ PCS_LINKX_TIMER_COUNT);
+ goto poll;
+ } else {
+ /* Default to below link speed and duplex */
+ lmac->link_up = true;
+ lmac->last_speed = 1000;
+ lmac->last_duplex = 1;
+ bgx_sgmii_change_link_state(lmac);
+ return 0;
+ }
+ }
lmac->phydev->dev_flags = 0;
if (phy_connect_direct(&lmac->netdev, lmac->phydev,
@@ -731,15 +813,17 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
return -ENODEV;
phy_start_aneg(lmac->phydev);
- } else {
- lmac->check_link = alloc_workqueue("check_link", WQ_UNBOUND |
- WQ_MEM_RECLAIM, 1);
- if (!lmac->check_link)
- return -ENOMEM;
- INIT_DELAYED_WORK(&lmac->dwork, bgx_poll_for_link);
- queue_delayed_work(lmac->check_link, &lmac->dwork, 0);
+ return 0;
}
+poll:
+ lmac->check_link = alloc_workqueue("check_link", WQ_UNBOUND |
+ WQ_MEM_RECLAIM, 1);
+ if (!lmac->check_link)
+ return -ENOMEM;
+ INIT_DELAYED_WORK(&lmac->dwork, bgx_poll_for_link);
+ queue_delayed_work(lmac->check_link, &lmac->dwork, 0);
+
return 0;
}
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
index 01cc7c8..1143e957 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -144,10 +144,15 @@
#define PCS_MRX_CTL_LOOPBACK1 BIT_ULL(14)
#define PCS_MRX_CTL_RESET BIT_ULL(15)
#define BGX_GMP_PCS_MRX_STATUS 0x30008
+#define PCS_MRX_STATUS_LINK BIT_ULL(2)
#define PCS_MRX_STATUS_AN_CPT BIT_ULL(5)
+#define BGX_GMP_PCS_ANX_ADV 0x30010
#define BGX_GMP_PCS_ANX_AN_RESULTS 0x30020
+#define BGX_GMP_PCS_LINKX_TIMER 0x30040
+#define PCS_LINKX_TIMER_COUNT 0x1E84
#define BGX_GMP_PCS_SGM_AN_ADV 0x30068
#define BGX_GMP_PCS_MISCX_CTL 0x30078
+#define PCS_MISC_CTL_MODE BIT_ULL(8)
#define PCS_MISC_CTL_DISP_EN BIT_ULL(13)
#define PCS_MISC_CTL_GMX_ENO BIT_ULL(11)
#define PCS_MISC_CTL_SAMP_PT_MASK 0x7Full
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index f9c2feb..0c2a32a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -2201,9 +2201,10 @@ static int cxgb_up(struct adapter *adap)
{
int err;
+ mutex_lock(&uld_mutex);
err = setup_sge_queues(adap);
if (err)
- goto out;
+ goto rel_lock;
err = setup_rss(adap);
if (err)
goto freeq;
@@ -2227,7 +2228,6 @@ static int cxgb_up(struct adapter *adap)
goto irq_err;
}
- mutex_lock(&uld_mutex);
enable_rx(adap);
t4_sge_start(adap);
t4_intr_enable(adap);
@@ -2240,13 +2240,15 @@ static int cxgb_up(struct adapter *adap)
#endif
/* Initialize hash mac addr list*/
INIT_LIST_HEAD(&adap->mac_hlist);
- out:
return err;
+
irq_err:
dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
freeq:
t4_free_sge_resources(adap);
- goto out;
+ rel_lock:
+ mutex_unlock(&uld_mutex);
+ return err;
}
static void cxgb_down(struct adapter *adapter)
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 9061c2f..d391bee 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -2007,8 +2007,8 @@ static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
if (!rxb->page)
continue;
- dma_unmap_single(rx_queue->dev, rxb->dma,
- PAGE_SIZE, DMA_FROM_DEVICE);
+ dma_unmap_page(rx_queue->dev, rxb->dma,
+ PAGE_SIZE, DMA_FROM_DEVICE);
__free_page(rxb->page);
rxb->page = NULL;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index dff7b60..c06845b 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -304,8 +304,8 @@ int hns_nic_net_xmit_hw(struct net_device *ndev,
struct hns_nic_ring_data *ring_data)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
- struct device *dev = priv->dev;
struct hnae_ring *ring = ring_data->ring;
+ struct device *dev = ring_to_dev(ring);
struct netdev_queue *dev_queue;
struct skb_frag_struct *frag;
int buf_num;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 0fbf686..9f2184b 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -189,9 +189,10 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
}
ltb->map_id = adapter->map_id;
adapter->map_id++;
+
+ init_completion(&adapter->fw_done);
send_request_map(adapter, ltb->addr,
ltb->size, ltb->map_id);
- init_completion(&adapter->fw_done);
wait_for_completion(&adapter->fw_done);
return 0;
}
@@ -505,7 +506,7 @@ static int ibmvnic_open(struct net_device *netdev)
adapter->rx_pool = NULL;
rx_pool_arr_alloc_failed:
for (i = 0; i < adapter->req_rx_queues; i++)
- napi_enable(&adapter->napi[i]);
+ napi_disable(&adapter->napi[i]);
alloc_napi_failed:
return -ENOMEM;
}
@@ -1133,10 +1134,10 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev,
crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
crq.request_statistics.len =
cpu_to_be32(sizeof(struct ibmvnic_statistics));
- ibmvnic_send_crq(adapter, &crq);
/* Wait for data to be written */
init_completion(&adapter->stats_done);
+ ibmvnic_send_crq(adapter, &crq);
wait_for_completion(&adapter->stats_done);
for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
@@ -2197,12 +2198,12 @@ static void handle_error_info_rsp(union ibmvnic_crq *crq,
if (!found) {
dev_err(dev, "Couldn't find error id %x\n",
- crq->request_error_rsp.error_id);
+ be32_to_cpu(crq->request_error_rsp.error_id));
return;
}
dev_err(dev, "Detailed info for error id %x:",
- crq->request_error_rsp.error_id);
+ be32_to_cpu(crq->request_error_rsp.error_id));
for (i = 0; i < error_buff->len; i++) {
pr_cont("%02x", (int)error_buff->buff[i]);
@@ -2281,8 +2282,8 @@ static void handle_error_indication(union ibmvnic_crq *crq,
dev_err(dev, "Firmware reports %serror id %x, cause %d\n",
crq->error_indication.
flags & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
- crq->error_indication.error_id,
- crq->error_indication.error_cause);
+ be32_to_cpu(crq->error_indication.error_id),
+ be16_to_cpu(crq->error_indication.error_cause));
error_buff = kmalloc(sizeof(*error_buff), GFP_ATOMIC);
if (!error_buff)
@@ -2400,10 +2401,10 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
case PARTIALSUCCESS:
dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
*req_value,
- (long int)be32_to_cpu(crq->request_capability_rsp.
+ (long int)be64_to_cpu(crq->request_capability_rsp.
number), name);
release_sub_crqs_no_irqs(adapter);
- *req_value = be32_to_cpu(crq->request_capability_rsp.number);
+ *req_value = be64_to_cpu(crq->request_capability_rsp.number);
init_sub_crqs(adapter, 1);
return;
default:
@@ -2809,9 +2810,9 @@ static ssize_t trace_read(struct file *file, char __user *user_buf, size_t len,
crq.collect_fw_trace.correlator = adapter->ras_comps[num].correlator;
crq.collect_fw_trace.ioba = cpu_to_be32(trace_tok);
crq.collect_fw_trace.len = adapter->ras_comps[num].trace_buff_size;
- ibmvnic_send_crq(adapter, &crq);
init_completion(&adapter->fw_done);
+ ibmvnic_send_crq(adapter, &crq);
wait_for_completion(&adapter->fw_done);
if (*ppos + len > be32_to_cpu(adapter->ras_comps[num].trace_buff_size))
@@ -3591,9 +3592,9 @@ static int ibmvnic_dump_show(struct seq_file *seq, void *v)
memset(&crq, 0, sizeof(crq));
crq.request_dump_size.first = IBMVNIC_CRQ_CMD;
crq.request_dump_size.cmd = REQUEST_DUMP_SIZE;
- ibmvnic_send_crq(adapter, &crq);
init_completion(&adapter->fw_done);
+ ibmvnic_send_crq(adapter, &crq);
wait_for_completion(&adapter->fw_done);
seq_write(seq, adapter->dump_data, adapter->dump_data_size);
@@ -3639,8 +3640,8 @@ static void handle_crq_init_rsp(struct work_struct *work)
}
}
- send_version_xchg(adapter);
reinit_completion(&adapter->init_done);
+ send_version_xchg(adapter);
if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
dev_err(dev, "Passive init timeout\n");
goto task_failed;
@@ -3650,9 +3651,9 @@ static void handle_crq_init_rsp(struct work_struct *work)
if (adapter->renegotiate) {
adapter->renegotiate = false;
release_sub_crqs_no_irqs(adapter);
- send_cap_queries(adapter);
reinit_completion(&adapter->init_done);
+ send_cap_queries(adapter);
if (!wait_for_completion_timeout(&adapter->init_done,
timeout)) {
dev_err(dev, "Passive init timeout\n");
@@ -3780,9 +3781,9 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
adapter->debugfs_dump = ent;
}
}
- ibmvnic_send_crq_init(adapter);
init_completion(&adapter->init_done);
+ ibmvnic_send_crq_init(adapter);
if (!wait_for_completion_timeout(&adapter->init_done, timeout))
return 0;
@@ -3790,9 +3791,9 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
if (adapter->renegotiate) {
adapter->renegotiate = false;
release_sub_crqs_no_irqs(adapter);
- send_cap_queries(adapter);
reinit_completion(&adapter->init_done);
+ send_cap_queries(adapter);
if (!wait_for_completion_timeout(&adapter->init_done,
timeout))
return 0;
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 86a89cb..4832223 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -2518,7 +2518,7 @@ static int mtk_remove(struct platform_device *pdev)
}
const struct of_device_id of_mtk_match[] = {
- { .compatible = "mediatek,mt7623-eth" },
+ { .compatible = "mediatek,mt2701-eth" },
{},
};
MODULE_DEVICE_TABLE(of, of_mtk_match);
diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c
index c7e9399..53daa6c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/catas.c
+++ b/drivers/net/ethernet/mellanox/mlx4/catas.c
@@ -158,7 +158,7 @@ static int mlx4_reset_slave(struct mlx4_dev *dev)
return -ETIMEDOUT;
}
-static int mlx4_comm_internal_err(u32 slave_read)
+int mlx4_comm_internal_err(u32 slave_read)
{
return (u32)COMM_CHAN_EVENT_INTERNAL_ERR ==
(slave_read & (u32)COMM_CHAN_EVENT_INTERNAL_ERR) ? 1 : 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c
index 0e8b7c4..8258d08 100644
--- a/drivers/net/ethernet/mellanox/mlx4/intf.c
+++ b/drivers/net/ethernet/mellanox/mlx4/intf.c
@@ -222,6 +222,18 @@ void mlx4_unregister_device(struct mlx4_dev *dev)
return;
mlx4_stop_catas_poll(dev);
+ if (dev->persist->interface_state & MLX4_INTERFACE_STATE_DELETION &&
+ mlx4_is_slave(dev)) {
+ /* In mlx4_remove_one on a VF */
+ u32 slave_read =
+ swab32(readl(&mlx4_priv(dev)->mfunc.comm->slave_read));
+
+ if (mlx4_comm_internal_err(slave_read)) {
+ mlx4_dbg(dev, "%s: comm channel is down, entering error state.\n",
+ __func__);
+ mlx4_enter_error_state(dev->persist);
+ }
+ }
mutex_lock(&intf_mutex);
list_for_each_entry(intf, &intf_list, list)
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 88ee7d8..086920b6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -1220,6 +1220,7 @@ void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type);
void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type);
void mlx4_enter_error_state(struct mlx4_dev_persistent *persist);
+int mlx4_comm_internal_err(u32 slave_read);
int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
enum mlx4_port_type *type);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index 36fbc6b..8cd7227 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -1081,7 +1081,7 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
MLX5_FLOW_NAMESPACE_KERNEL);
if (!priv->fs.ns)
- return -EINVAL;
+ return -EOPNOTSUPP;
err = mlx5e_arfs_create_tables(priv);
if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index c7011ef..a8966e6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -352,7 +352,7 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw, int nvports)
root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
if (!root_ns) {
esw_warn(dev, "Failed to get FDB flow namespace\n");
- return -ENOMEM;
+ return -EOPNOTSUPP;
}
flow_group_in = mlx5_vzalloc(inlen);
@@ -961,7 +961,7 @@ static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS);
if (!root_ns) {
esw_warn(dev, "Failed to get E-Switch egress flow namespace\n");
- return -EIO;
+ return -EOPNOTSUPP;
}
flow_group_in = mlx5_vzalloc(inlen);
@@ -1078,7 +1078,7 @@ static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS);
if (!root_ns) {
esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n");
- return -EIO;
+ return -EOPNOTSUPP;
}
flow_group_in = mlx5_vzalloc(inlen);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index d239f5d..b08b9e2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -414,6 +414,7 @@ static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports)
root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
if (!root_ns) {
esw_warn(dev, "Failed to get FDB flow namespace\n");
+ err = -EOPNOTSUPP;
goto ns_err;
}
@@ -520,7 +521,7 @@ static int esw_create_offloads_table(struct mlx5_eswitch *esw)
ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
if (!ns) {
esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
- return -ENOMEM;
+ return -EOPNOTSUPP;
}
ft_offloads = mlx5_create_flow_table(ns, 0, dev->priv.sriov.num_vfs + 2, 0);
@@ -639,7 +640,7 @@ static int esw_offloads_start(struct mlx5_eswitch *esw)
esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err);
err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
if (err1)
- esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err);
+ esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err1);
}
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 7e20e4b..4de3c28 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1678,7 +1678,7 @@ static int create_anchor_flow_table(struct mlx5_flow_steering *steering)
struct mlx5_flow_table *ft;
ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR);
- if (!ns)
+ if (WARN_ON(!ns))
return -EINVAL;
ft = mlx5_create_flow_table(ns, ANCHOR_PRIO, ANCHOR_SIZE, ANCHOR_LEVEL);
if (IS_ERR(ft)) {
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 862f18e..510ff62 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -179,6 +179,49 @@ static struct mdiobb_ops bb_ops = {
.get_mdio_data = ravb_get_mdio_data,
};
+/* Free TX skb function for AVB-IP */
+static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &priv->stats[q];
+ struct ravb_tx_desc *desc;
+ int free_num = 0;
+ int entry;
+ u32 size;
+
+ for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
+ bool txed;
+
+ entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
+ NUM_TX_DESC);
+ desc = &priv->tx_ring[q][entry];
+ txed = desc->die_dt == DT_FEMPTY;
+ if (free_txed_only && !txed)
+ break;
+ /* Descriptor type must be checked before all other reads */
+ dma_rmb();
+ size = le16_to_cpu(desc->ds_tagl) & TX_DS;
+ /* Free the original skb. */
+ if (priv->tx_skb[q][entry / NUM_TX_DESC]) {
+ dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
+ size, DMA_TO_DEVICE);
+ /* Last packet descriptor? */
+ if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) {
+ entry /= NUM_TX_DESC;
+ dev_kfree_skb_any(priv->tx_skb[q][entry]);
+ priv->tx_skb[q][entry] = NULL;
+ if (txed)
+ stats->tx_packets++;
+ }
+ free_num++;
+ }
+ if (txed)
+ stats->tx_bytes += size;
+ desc->die_dt = DT_EEMPTY;
+ }
+ return free_num;
+}
+
/* Free skb's and DMA buffers for Ethernet AVB */
static void ravb_ring_free(struct net_device *ndev, int q)
{
@@ -194,19 +237,21 @@ static void ravb_ring_free(struct net_device *ndev, int q)
kfree(priv->rx_skb[q]);
priv->rx_skb[q] = NULL;
- /* Free TX skb ringbuffer */
- if (priv->tx_skb[q]) {
- for (i = 0; i < priv->num_tx_ring[q]; i++)
- dev_kfree_skb(priv->tx_skb[q][i]);
- }
- kfree(priv->tx_skb[q]);
- priv->tx_skb[q] = NULL;
-
/* Free aligned TX buffers */
kfree(priv->tx_align[q]);
priv->tx_align[q] = NULL;
if (priv->rx_ring[q]) {
+ for (i = 0; i < priv->num_rx_ring[q]; i++) {
+ struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i];
+
+ if (!dma_mapping_error(ndev->dev.parent,
+ le32_to_cpu(desc->dptr)))
+ dma_unmap_single(ndev->dev.parent,
+ le32_to_cpu(desc->dptr),
+ PKT_BUF_SZ,
+ DMA_FROM_DEVICE);
+ }
ring_size = sizeof(struct ravb_ex_rx_desc) *
(priv->num_rx_ring[q] + 1);
dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q],
@@ -215,12 +260,20 @@ static void ravb_ring_free(struct net_device *ndev, int q)
}
if (priv->tx_ring[q]) {
+ ravb_tx_free(ndev, q, false);
+
ring_size = sizeof(struct ravb_tx_desc) *
(priv->num_tx_ring[q] * NUM_TX_DESC + 1);
dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q],
priv->tx_desc_dma[q]);
priv->tx_ring[q] = NULL;
}
+
+ /* Free TX skb ringbuffer.
+ * SKBs are freed by ravb_tx_free() call above.
+ */
+ kfree(priv->tx_skb[q]);
+ priv->tx_skb[q] = NULL;
}
/* Format skb and descriptor buffer for Ethernet AVB */
@@ -431,44 +484,6 @@ static int ravb_dmac_init(struct net_device *ndev)
return 0;
}
-/* Free TX skb function for AVB-IP */
-static int ravb_tx_free(struct net_device *ndev, int q)
-{
- struct ravb_private *priv = netdev_priv(ndev);
- struct net_device_stats *stats = &priv->stats[q];
- struct ravb_tx_desc *desc;
- int free_num = 0;
- int entry;
- u32 size;
-
- for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
- entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
- NUM_TX_DESC);
- desc = &priv->tx_ring[q][entry];
- if (desc->die_dt != DT_FEMPTY)
- break;
- /* Descriptor type must be checked before all other reads */
- dma_rmb();
- size = le16_to_cpu(desc->ds_tagl) & TX_DS;
- /* Free the original skb. */
- if (priv->tx_skb[q][entry / NUM_TX_DESC]) {
- dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
- size, DMA_TO_DEVICE);
- /* Last packet descriptor? */
- if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) {
- entry /= NUM_TX_DESC;
- dev_kfree_skb_any(priv->tx_skb[q][entry]);
- priv->tx_skb[q][entry] = NULL;
- stats->tx_packets++;
- }
- free_num++;
- }
- stats->tx_bytes += size;
- desc->die_dt = DT_EEMPTY;
- }
- return free_num;
-}
-
static void ravb_get_tx_tstamp(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
@@ -902,7 +917,7 @@ static int ravb_poll(struct napi_struct *napi, int budget)
spin_lock_irqsave(&priv->lock, flags);
/* Clear TX interrupt */
ravb_write(ndev, ~mask, TIS);
- ravb_tx_free(ndev, q);
+ ravb_tx_free(ndev, q, true);
netif_wake_subqueue(ndev, q);
mmiowb();
spin_unlock_irqrestore(&priv->lock, flags);
@@ -1571,7 +1586,8 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
priv->cur_tx[q] += NUM_TX_DESC;
if (priv->cur_tx[q] - priv->dirty_tx[q] >
- (priv->num_tx_ring[q] - 1) * NUM_TX_DESC && !ravb_tx_free(ndev, q))
+ (priv->num_tx_ring[q] - 1) * NUM_TX_DESC &&
+ !ravb_tx_free(ndev, q, true))
netif_stop_subqueue(ndev, q);
exit:
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 93dc10b..aa02a03 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -100,6 +100,14 @@
/* BUFFER_ALIGN(adr) calculates the number of bytes to the next alignment. */
#define BUFFER_ALIGN(adr) ((ALIGNMENT - ((u32) adr)) % ALIGNMENT)
+#ifdef __BIG_ENDIAN
+#define xemaclite_readl ioread32be
+#define xemaclite_writel iowrite32be
+#else
+#define xemaclite_readl ioread32
+#define xemaclite_writel iowrite32
+#endif
+
/**
* struct net_local - Our private per device data
* @ndev: instance of the network device
@@ -156,15 +164,15 @@ static void xemaclite_enable_interrupts(struct net_local *drvdata)
u32 reg_data;
/* Enable the Tx interrupts for the first Buffer */
- reg_data = __raw_readl(drvdata->base_addr + XEL_TSR_OFFSET);
- __raw_writel(reg_data | XEL_TSR_XMIT_IE_MASK,
- drvdata->base_addr + XEL_TSR_OFFSET);
+ reg_data = xemaclite_readl(drvdata->base_addr + XEL_TSR_OFFSET);
+ xemaclite_writel(reg_data | XEL_TSR_XMIT_IE_MASK,
+ drvdata->base_addr + XEL_TSR_OFFSET);
/* Enable the Rx interrupts for the first buffer */
- __raw_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr + XEL_RSR_OFFSET);
+ xemaclite_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr + XEL_RSR_OFFSET);
/* Enable the Global Interrupt Enable */
- __raw_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET);
+ xemaclite_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET);
}
/**
@@ -179,17 +187,17 @@ static void xemaclite_disable_interrupts(struct net_local *drvdata)
u32 reg_data;
/* Disable the Global Interrupt Enable */
- __raw_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET);
+ xemaclite_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET);
/* Disable the Tx interrupts for the first buffer */
- reg_data = __raw_readl(drvdata->base_addr + XEL_TSR_OFFSET);
- __raw_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK),
- drvdata->base_addr + XEL_TSR_OFFSET);
+ reg_data = xemaclite_readl(drvdata->base_addr + XEL_TSR_OFFSET);
+ xemaclite_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK),
+ drvdata->base_addr + XEL_TSR_OFFSET);
/* Disable the Rx interrupts for the first buffer */
- reg_data = __raw_readl(drvdata->base_addr + XEL_RSR_OFFSET);
- __raw_writel(reg_data & (~XEL_RSR_RECV_IE_MASK),
- drvdata->base_addr + XEL_RSR_OFFSET);
+ reg_data = xemaclite_readl(drvdata->base_addr + XEL_RSR_OFFSET);
+ xemaclite_writel(reg_data & (~XEL_RSR_RECV_IE_MASK),
+ drvdata->base_addr + XEL_RSR_OFFSET);
}
/**
@@ -321,7 +329,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
byte_count = ETH_FRAME_LEN;
/* Check if the expected buffer is available */
- reg_data = __raw_readl(addr + XEL_TSR_OFFSET);
+ reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK |
XEL_TSR_XMIT_ACTIVE_MASK)) == 0) {
@@ -334,7 +342,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
addr = (void __iomem __force *)((u32 __force)addr ^
XEL_BUFFER_OFFSET);
- reg_data = __raw_readl(addr + XEL_TSR_OFFSET);
+ reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK |
XEL_TSR_XMIT_ACTIVE_MASK)) != 0)
@@ -345,16 +353,16 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
/* Write the frame to the buffer */
xemaclite_aligned_write(data, (u32 __force *) addr, byte_count);
- __raw_writel((byte_count & XEL_TPLR_LENGTH_MASK),
- addr + XEL_TPLR_OFFSET);
+ xemaclite_writel((byte_count & XEL_TPLR_LENGTH_MASK),
+ addr + XEL_TPLR_OFFSET);
/* Update the Tx Status Register to indicate that there is a
* frame to send. Set the XEL_TSR_XMIT_ACTIVE_MASK flag which
* is used by the interrupt handler to check whether a frame
* has been transmitted */
- reg_data = __raw_readl(addr + XEL_TSR_OFFSET);
+ reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
reg_data |= (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_XMIT_ACTIVE_MASK);
- __raw_writel(reg_data, addr + XEL_TSR_OFFSET);
+ xemaclite_writel(reg_data, addr + XEL_TSR_OFFSET);
return 0;
}
@@ -369,7 +377,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
*
* Return: Total number of bytes received
*/
-static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
+static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen)
{
void __iomem *addr;
u16 length, proto_type;
@@ -379,7 +387,7 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
addr = (drvdata->base_addr + drvdata->next_rx_buf_to_use);
/* Verify which buffer has valid data */
- reg_data = __raw_readl(addr + XEL_RSR_OFFSET);
+ reg_data = xemaclite_readl(addr + XEL_RSR_OFFSET);
if ((reg_data & XEL_RSR_RECV_DONE_MASK) == XEL_RSR_RECV_DONE_MASK) {
if (drvdata->rx_ping_pong != 0)
@@ -396,27 +404,28 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
return 0; /* No data was available */
/* Verify that buffer has valid data */
- reg_data = __raw_readl(addr + XEL_RSR_OFFSET);
+ reg_data = xemaclite_readl(addr + XEL_RSR_OFFSET);
if ((reg_data & XEL_RSR_RECV_DONE_MASK) !=
XEL_RSR_RECV_DONE_MASK)
return 0; /* No data was available */
}
/* Get the protocol type of the ethernet frame that arrived */
- proto_type = ((ntohl(__raw_readl(addr + XEL_HEADER_OFFSET +
+ proto_type = ((ntohl(xemaclite_readl(addr + XEL_HEADER_OFFSET +
XEL_RXBUFF_OFFSET)) >> XEL_HEADER_SHIFT) &
XEL_RPLR_LENGTH_MASK);
/* Check if received ethernet frame is a raw ethernet frame
* or an IP packet or an ARP packet */
- if (proto_type > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
+ if (proto_type > ETH_DATA_LEN) {
if (proto_type == ETH_P_IP) {
- length = ((ntohl(__raw_readl(addr +
+ length = ((ntohl(xemaclite_readl(addr +
XEL_HEADER_IP_LENGTH_OFFSET +
XEL_RXBUFF_OFFSET)) >>
XEL_HEADER_SHIFT) &
XEL_RPLR_LENGTH_MASK);
+ length = min_t(u16, length, ETH_DATA_LEN);
length += ETH_HLEN + ETH_FCS_LEN;
} else if (proto_type == ETH_P_ARP)
@@ -429,14 +438,17 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
/* Use the length in the frame, plus the header and trailer */
length = proto_type + ETH_HLEN + ETH_FCS_LEN;
+ if (WARN_ON(length > maxlen))
+ length = maxlen;
+
/* Read from the EmacLite device */
xemaclite_aligned_read((u32 __force *) (addr + XEL_RXBUFF_OFFSET),
data, length);
/* Acknowledge the frame */
- reg_data = __raw_readl(addr + XEL_RSR_OFFSET);
+ reg_data = xemaclite_readl(addr + XEL_RSR_OFFSET);
reg_data &= ~XEL_RSR_RECV_DONE_MASK;
- __raw_writel(reg_data, addr + XEL_RSR_OFFSET);
+ xemaclite_writel(reg_data, addr + XEL_RSR_OFFSET);
return length;
}
@@ -463,14 +475,14 @@ static void xemaclite_update_address(struct net_local *drvdata,
xemaclite_aligned_write(address_ptr, (u32 __force *) addr, ETH_ALEN);
- __raw_writel(ETH_ALEN, addr + XEL_TPLR_OFFSET);
+ xemaclite_writel(ETH_ALEN, addr + XEL_TPLR_OFFSET);
/* Update the MAC address in the EmacLite */
- reg_data = __raw_readl(addr + XEL_TSR_OFFSET);
- __raw_writel(reg_data | XEL_TSR_PROG_MAC_ADDR, addr + XEL_TSR_OFFSET);
+ reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
+ xemaclite_writel(reg_data | XEL_TSR_PROG_MAC_ADDR, addr + XEL_TSR_OFFSET);
/* Wait for EmacLite to finish with the MAC address update */
- while ((__raw_readl(addr + XEL_TSR_OFFSET) &
+ while ((xemaclite_readl(addr + XEL_TSR_OFFSET) &
XEL_TSR_PROG_MAC_ADDR) != 0)
;
}
@@ -603,7 +615,7 @@ static void xemaclite_rx_handler(struct net_device *dev)
skb_reserve(skb, 2);
- len = xemaclite_recv_data(lp, (u8 *) skb->data);
+ len = xemaclite_recv_data(lp, (u8 *) skb->data, len);
if (!len) {
dev->stats.rx_errors++;
@@ -640,32 +652,32 @@ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
u32 tx_status;
/* Check if there is Rx Data available */
- if ((__raw_readl(base_addr + XEL_RSR_OFFSET) &
+ if ((xemaclite_readl(base_addr + XEL_RSR_OFFSET) &
XEL_RSR_RECV_DONE_MASK) ||
- (__raw_readl(base_addr + XEL_BUFFER_OFFSET + XEL_RSR_OFFSET)
+ (xemaclite_readl(base_addr + XEL_BUFFER_OFFSET + XEL_RSR_OFFSET)
& XEL_RSR_RECV_DONE_MASK))
xemaclite_rx_handler(dev);
/* Check if the Transmission for the first buffer is completed */
- tx_status = __raw_readl(base_addr + XEL_TSR_OFFSET);
+ tx_status = xemaclite_readl(base_addr + XEL_TSR_OFFSET);
if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) &&
(tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK;
- __raw_writel(tx_status, base_addr + XEL_TSR_OFFSET);
+ xemaclite_writel(tx_status, base_addr + XEL_TSR_OFFSET);
tx_complete = true;
}
/* Check if the Transmission for the second buffer is completed */
- tx_status = __raw_readl(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
+ tx_status = xemaclite_readl(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) &&
(tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK;
- __raw_writel(tx_status, base_addr + XEL_BUFFER_OFFSET +
- XEL_TSR_OFFSET);
+ xemaclite_writel(tx_status, base_addr + XEL_BUFFER_OFFSET +
+ XEL_TSR_OFFSET);
tx_complete = true;
}
@@ -698,7 +710,7 @@ static int xemaclite_mdio_wait(struct net_local *lp)
/* wait for the MDIO interface to not be busy or timeout
after some time.
*/
- while (__raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET) &
+ while (xemaclite_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET) &
XEL_MDIOCTRL_MDIOSTS_MASK) {
if (time_before_eq(end, jiffies)) {
WARN_ON(1);
@@ -734,17 +746,17 @@ static int xemaclite_mdio_read(struct mii_bus *bus, int phy_id, int reg)
* MDIO Address register. Set the Status bit in the MDIO Control
* register to start a MDIO read transaction.
*/
- ctrl_reg = __raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET);
- __raw_writel(XEL_MDIOADDR_OP_MASK |
- ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg),
- lp->base_addr + XEL_MDIOADDR_OFFSET);
- __raw_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK,
- lp->base_addr + XEL_MDIOCTRL_OFFSET);
+ ctrl_reg = xemaclite_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET);
+ xemaclite_writel(XEL_MDIOADDR_OP_MASK |
+ ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg),
+ lp->base_addr + XEL_MDIOADDR_OFFSET);
+ xemaclite_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK,
+ lp->base_addr + XEL_MDIOCTRL_OFFSET);
if (xemaclite_mdio_wait(lp))
return -ETIMEDOUT;
- rc = __raw_readl(lp->base_addr + XEL_MDIORD_OFFSET);
+ rc = xemaclite_readl(lp->base_addr + XEL_MDIORD_OFFSET);
dev_dbg(&lp->ndev->dev,
"xemaclite_mdio_read(phy_id=%i, reg=%x) == %x\n",
@@ -781,13 +793,13 @@ static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg,
* Data register. Finally, set the Status bit in the MDIO Control
* register to start a MDIO write transaction.
*/
- ctrl_reg = __raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET);
- __raw_writel(~XEL_MDIOADDR_OP_MASK &
- ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg),
- lp->base_addr + XEL_MDIOADDR_OFFSET);
- __raw_writel(val, lp->base_addr + XEL_MDIOWR_OFFSET);
- __raw_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK,
- lp->base_addr + XEL_MDIOCTRL_OFFSET);
+ ctrl_reg = xemaclite_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET);
+ xemaclite_writel(~XEL_MDIOADDR_OP_MASK &
+ ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg),
+ lp->base_addr + XEL_MDIOADDR_OFFSET);
+ xemaclite_writel(val, lp->base_addr + XEL_MDIOWR_OFFSET);
+ xemaclite_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK,
+ lp->base_addr + XEL_MDIOCTRL_OFFSET);
return 0;
}
@@ -834,8 +846,8 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
/* Enable the MDIO bus by asserting the enable bit in MDIO Control
* register.
*/
- __raw_writel(XEL_MDIOCTRL_MDIOEN_MASK,
- lp->base_addr + XEL_MDIOCTRL_OFFSET);
+ xemaclite_writel(XEL_MDIOCTRL_MDIOEN_MASK,
+ lp->base_addr + XEL_MDIOCTRL_OFFSET);
bus = mdiobus_alloc();
if (!bus) {
@@ -1140,8 +1152,8 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
}
/* Clear the Tx CSR's in case this is a restart */
- __raw_writel(0, lp->base_addr + XEL_TSR_OFFSET);
- __raw_writel(0, lp->base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
+ xemaclite_writel(0, lp->base_addr + XEL_TSR_OFFSET);
+ xemaclite_writel(0, lp->base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
/* Set the MAC address in the EmacLite device */
xemaclite_update_address(lp, ndev->dev_addr);
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 97e0cbc..cebde07 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -1372,3 +1372,4 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Harald Welte <hwelte@sysmocom.de>");
MODULE_DESCRIPTION("Interface driver for GTP encapsulated traffic");
MODULE_ALIAS_RTNL_LINK("gtp");
+MODULE_ALIAS_GENL_FAMILY("gtp");
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 1dfe230..e0a6b1a 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -648,8 +648,8 @@ static void ax_setup(struct net_device *dev)
{
/* Finish setting up the DEVICE info. */
dev->mtu = AX_MTU;
- dev->hard_header_len = 0;
- dev->addr_len = 0;
+ dev->hard_header_len = AX25_MAX_HEADER_LEN;
+ dev->addr_len = AX25_ADDR_LEN;
dev->type = ARPHRD_AX25;
dev->tx_queue_len = 10;
dev->header_ops = &ax25_header_ops;
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index d6a541b..2f70f80 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -1114,8 +1114,6 @@ static int marvell_read_status_page(struct phy_device *phydev, int page)
if (adv < 0)
return adv;
- lpa &= adv;
-
if (status & MII_M1011_PHY_STATUS_FULLDUPLEX)
phydev->duplex = DUPLEX_FULL;
else
diff --git a/drivers/net/phy/mdio-bcm-iproc.c b/drivers/net/phy/mdio-bcm-iproc.c
index c0b4e65..46fe1ae 100644
--- a/drivers/net/phy/mdio-bcm-iproc.c
+++ b/drivers/net/phy/mdio-bcm-iproc.c
@@ -81,8 +81,6 @@ static int iproc_mdio_read(struct mii_bus *bus, int phy_id, int reg)
if (rc)
return rc;
- iproc_mdio_config_clk(priv->base);
-
/* Prepare the read operation */
cmd = (MII_DATA_TA_VAL << MII_DATA_TA_SHIFT) |
(reg << MII_DATA_RA_SHIFT) |
@@ -112,8 +110,6 @@ static int iproc_mdio_write(struct mii_bus *bus, int phy_id,
if (rc)
return rc;
- iproc_mdio_config_clk(priv->base);
-
/* Prepare the write operation */
cmd = (MII_DATA_TA_VAL << MII_DATA_TA_SHIFT) |
(reg << MII_DATA_RA_SHIFT) |
@@ -163,6 +159,8 @@ static int iproc_mdio_probe(struct platform_device *pdev)
bus->read = iproc_mdio_read;
bus->write = iproc_mdio_write;
+ iproc_mdio_config_clk(priv->base);
+
rc = of_mdiobus_register(bus, pdev->dev.of_node);
if (rc) {
dev_err(&pdev->dev, "MDIO bus registration failed\n");
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index ea92d52..fab56c9 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -1014,6 +1014,20 @@ static struct phy_driver ksphy_driver[] = {
.get_stats = kszphy_get_stats,
.suspend = genphy_suspend,
.resume = genphy_resume,
+}, {
+ .phy_id = PHY_ID_KSZ8795,
+ .phy_id_mask = MICREL_PHY_ID_MASK,
+ .name = "Micrel KSZ8795",
+ .features = (SUPPORTED_Pause | SUPPORTED_Asym_Pause),
+ .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .config_init = kszphy_config_init,
+ .config_aneg = ksz8873mll_config_aneg,
+ .read_status = ksz8873mll_read_status,
+ .get_sset_count = kszphy_get_sset_count,
+ .get_strings = kszphy_get_strings,
+ .get_stats = kszphy_get_stats,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
} };
module_phy_driver(ksphy_driver);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index c4ceb08..14d57d0 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -860,6 +860,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
struct module *ndev_owner = dev->dev.parent->driver->owner;
struct mii_bus *bus = phydev->mdio.bus;
struct device *d = &phydev->mdio.dev;
+ bool using_genphy = false;
int err;
/* For Ethernet device drivers that register their own MDIO bus, we
@@ -885,12 +886,22 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
d->driver =
&genphy_driver[GENPHY_DRV_1G].mdiodrv.driver;
+ using_genphy = true;
+ }
+
+ if (!try_module_get(d->driver->owner)) {
+ dev_err(&dev->dev, "failed to get the device driver module\n");
+ err = -EIO;
+ goto error_put_device;
+ }
+
+ if (using_genphy) {
err = d->driver->probe(d);
if (err >= 0)
err = device_bind_driver(d);
if (err)
- goto error;
+ goto error_module_put;
}
if (phydev->attached_dev) {
@@ -926,6 +937,13 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
return err;
error:
+ /* phy_detach() does all of the cleanup below */
+ phy_detach(phydev);
+ return err;
+
+error_module_put:
+ module_put(d->driver->owner);
+error_put_device:
put_device(d);
if (ndev_owner != bus->owner)
module_put(bus->owner);
@@ -987,6 +1005,8 @@ void phy_detach(struct phy_device *phydev)
phydev->attached_dev = NULL;
phy_suspend(phydev);
+ module_put(phydev->mdio.dev.driver->owner);
+
/* If the device had no specific driver before (i.e. - it
* was using the generic driver), we unbind the device
* from the generic driver so that there's a chance a
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 90b426c..afb953a 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -32,7 +32,7 @@
#define NETNEXT_VERSION "08"
/* Information for net */
-#define NET_VERSION "7"
+#define NET_VERSION "8"
#define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION
#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
@@ -1936,6 +1936,9 @@ static int r8152_poll(struct napi_struct *napi, int budget)
napi_complete(napi);
if (!list_empty(&tp->rx_done))
napi_schedule(napi);
+ else if (!skb_queue_empty(&tp->tx_queue) &&
+ !list_empty(&tp->tx_free))
+ napi_schedule(napi);
}
return work_done;
@@ -3155,10 +3158,13 @@ static void set_carrier(struct r8152 *tp)
if (!netif_carrier_ok(netdev)) {
tp->rtl_ops.enable(tp);
set_bit(RTL8152_SET_RX_MODE, &tp->flags);
+ netif_stop_queue(netdev);
napi_disable(&tp->napi);
netif_carrier_on(netdev);
rtl_start_rx(tp);
napi_enable(&tp->napi);
+ netif_wake_queue(netdev);
+ netif_info(tp, link, netdev, "carrier on\n");
}
} else {
if (netif_carrier_ok(netdev)) {
@@ -3166,6 +3172,7 @@ static void set_carrier(struct r8152 *tp)
napi_disable(&tp->napi);
tp->rtl_ops.disable(tp);
napi_enable(&tp->napi);
+ netif_info(tp, link, netdev, "carrier off\n");
}
}
}
@@ -3515,12 +3522,12 @@ static int rtl8152_pre_reset(struct usb_interface *intf)
if (!netif_running(netdev))
return 0;
+ netif_stop_queue(netdev);
napi_disable(&tp->napi);
clear_bit(WORK_ENABLE, &tp->flags);
usb_kill_urb(tp->intr_urb);
cancel_delayed_work_sync(&tp->schedule);
if (netif_carrier_ok(netdev)) {
- netif_stop_queue(netdev);
mutex_lock(&tp->control);
tp->rtl_ops.disable(tp);
mutex_unlock(&tp->control);
@@ -3545,12 +3552,17 @@ static int rtl8152_post_reset(struct usb_interface *intf)
if (netif_carrier_ok(netdev)) {
mutex_lock(&tp->control);
tp->rtl_ops.enable(tp);
+ rtl_start_rx(tp);
rtl8152_set_rx_mode(netdev);
mutex_unlock(&tp->control);
- netif_wake_queue(netdev);
}
napi_enable(&tp->napi);
+ netif_wake_queue(netdev);
+ usb_submit_urb(tp->intr_urb, GFP_KERNEL);
+
+ if (!list_empty(&tp->rx_done))
+ napi_schedule(&tp->napi);
return 0;
}
@@ -3583,10 +3595,15 @@ static int rtl8152_rumtime_suspend(struct r8152 *tp)
struct net_device *netdev = tp->netdev;
int ret = 0;
+ set_bit(SELECTIVE_SUSPEND, &tp->flags);
+ smp_mb__after_atomic();
+
if (netif_running(netdev) && test_bit(WORK_ENABLE, &tp->flags)) {
u32 rcr = 0;
if (delay_autosuspend(tp)) {
+ clear_bit(SELECTIVE_SUSPEND, &tp->flags);
+ smp_mb__after_atomic();
ret = -EBUSY;
goto out1;
}
@@ -3603,6 +3620,8 @@ static int rtl8152_rumtime_suspend(struct r8152 *tp)
if (!(ocp_data & RXFIFO_EMPTY)) {
rxdy_gated_en(tp, false);
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, rcr);
+ clear_bit(SELECTIVE_SUSPEND, &tp->flags);
+ smp_mb__after_atomic();
ret = -EBUSY;
goto out1;
}
@@ -3622,8 +3641,6 @@ static int rtl8152_rumtime_suspend(struct r8152 *tp)
}
}
- set_bit(SELECTIVE_SUSPEND, &tp->flags);
-
out1:
return ret;
}
@@ -3679,12 +3696,15 @@ static int rtl8152_resume(struct usb_interface *intf)
if (netif_running(tp->netdev) && tp->netdev->flags & IFF_UP) {
if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
tp->rtl_ops.autosuspend_en(tp, false);
- clear_bit(SELECTIVE_SUSPEND, &tp->flags);
napi_disable(&tp->napi);
set_bit(WORK_ENABLE, &tp->flags);
if (netif_carrier_ok(tp->netdev))
rtl_start_rx(tp);
napi_enable(&tp->napi);
+ clear_bit(SELECTIVE_SUSPEND, &tp->flags);
+ smp_mb__after_atomic();
+ if (!list_empty(&tp->rx_done))
+ napi_schedule(&tp->napi);
} else {
tp->rtl_ops.up(tp);
netif_carrier_off(tp->netdev);
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index a2515887..0b5a84c 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -73,8 +73,6 @@ static atomic_t iface_counter = ATOMIC_INIT(0);
/* Private data structure */
struct sierra_net_data {
- u8 ethr_hdr_tmpl[ETH_HLEN]; /* ethernet header template for rx'd pkts */
-
u16 link_up; /* air link up or down */
u8 tx_hdr_template[4]; /* part of HIP hdr for tx'd packets */
@@ -122,6 +120,7 @@ struct param {
/* LSI Protocol types */
#define SIERRA_NET_PROTOCOL_UMTS 0x01
+#define SIERRA_NET_PROTOCOL_UMTS_DS 0x04
/* LSI Coverage */
#define SIERRA_NET_COVERAGE_NONE 0x00
#define SIERRA_NET_COVERAGE_NOPACKET 0x01
@@ -129,7 +128,8 @@ struct param {
/* LSI Session */
#define SIERRA_NET_SESSION_IDLE 0x00
/* LSI Link types */
-#define SIERRA_NET_AS_LINK_TYPE_IPv4 0x00
+#define SIERRA_NET_AS_LINK_TYPE_IPV4 0x00
+#define SIERRA_NET_AS_LINK_TYPE_IPV6 0x02
struct lsi_umts {
u8 protocol;
@@ -137,9 +137,14 @@ struct lsi_umts {
__be16 length;
/* eventually use a union for the rest - assume umts for now */
u8 coverage;
- u8 unused2[41];
+ u8 network_len; /* network name len */
+ u8 network[40]; /* network name (UCS2, bigendian) */
u8 session_state;
u8 unused3[33];
+} __packed;
+
+struct lsi_umts_single {
+ struct lsi_umts lsi;
u8 link_type;
u8 pdp_addr_len; /* NW-supplied PDP address len */
u8 pdp_addr[16]; /* NW-supplied PDP address (bigendian)) */
@@ -158,10 +163,31 @@ struct lsi_umts {
u8 reserved[8];
} __packed;
+struct lsi_umts_dual {
+ struct lsi_umts lsi;
+ u8 pdp_addr4_len; /* NW-supplied PDP IPv4 address len */
+ u8 pdp_addr4[4]; /* NW-supplied PDP IPv4 address (bigendian)) */
+ u8 pdp_addr6_len; /* NW-supplied PDP IPv6 address len */
+ u8 pdp_addr6[16]; /* NW-supplied PDP IPv6 address (bigendian)) */
+ u8 unused4[23];
+ u8 dns1_addr4_len; /* NW-supplied 1st DNS v4 address len (bigendian) */
+ u8 dns1_addr4[4]; /* NW-supplied 1st DNS v4 address */
+ u8 dns1_addr6_len; /* NW-supplied 1st DNS v6 address len */
+ u8 dns1_addr6[16]; /* NW-supplied 1st DNS v6 address (bigendian)*/
+ u8 dns2_addr4_len; /* NW-supplied 2nd DNS v4 address len (bigendian) */
+ u8 dns2_addr4[4]; /* NW-supplied 2nd DNS v4 address */
+ u8 dns2_addr6_len; /* NW-supplied 2nd DNS v6 address len */
+ u8 dns2_addr6[16]; /* NW-supplied 2nd DNS v6 address (bigendian)*/
+ u8 unused5[68];
+} __packed;
+
#define SIERRA_NET_LSI_COMMON_LEN 4
-#define SIERRA_NET_LSI_UMTS_LEN (sizeof(struct lsi_umts))
+#define SIERRA_NET_LSI_UMTS_LEN (sizeof(struct lsi_umts_single))
#define SIERRA_NET_LSI_UMTS_STATUS_LEN \
(SIERRA_NET_LSI_UMTS_LEN - SIERRA_NET_LSI_COMMON_LEN)
+#define SIERRA_NET_LSI_UMTS_DS_LEN (sizeof(struct lsi_umts_dual))
+#define SIERRA_NET_LSI_UMTS_DS_STATUS_LEN \
+ (SIERRA_NET_LSI_UMTS_DS_LEN - SIERRA_NET_LSI_COMMON_LEN)
/* Forward definitions */
static void sierra_sync_timer(unsigned long syncdata);
@@ -191,10 +217,11 @@ static inline void sierra_net_set_private(struct usbnet *dev,
dev->data[0] = (unsigned long)priv;
}
-/* is packet IPv4 */
+/* is packet IPv4/IPv6 */
static inline int is_ip(struct sk_buff *skb)
{
- return skb->protocol == cpu_to_be16(ETH_P_IP);
+ return skb->protocol == cpu_to_be16(ETH_P_IP) ||
+ skb->protocol == cpu_to_be16(ETH_P_IPV6);
}
/*
@@ -350,46 +377,51 @@ static inline int sierra_net_is_valid_addrlen(u8 len)
static int sierra_net_parse_lsi(struct usbnet *dev, char *data, int datalen)
{
struct lsi_umts *lsi = (struct lsi_umts *)data;
+ u32 expected_length;
- if (datalen < sizeof(struct lsi_umts)) {
- netdev_err(dev->net, "%s: Data length %d, exp %Zu\n",
- __func__, datalen,
- sizeof(struct lsi_umts));
+ if (datalen < sizeof(struct lsi_umts_single)) {
+ netdev_err(dev->net, "%s: Data length %d, exp >= %Zu\n",
+ __func__, datalen, sizeof(struct lsi_umts_single));
return -1;
}
- if (lsi->length != cpu_to_be16(SIERRA_NET_LSI_UMTS_STATUS_LEN)) {
- netdev_err(dev->net, "%s: LSI_UMTS_STATUS_LEN %d, exp %u\n",
- __func__, be16_to_cpu(lsi->length),
- (u32)SIERRA_NET_LSI_UMTS_STATUS_LEN);
- return -1;
- }
-
- /* Validate the protocol - only support UMTS for now */
- if (lsi->protocol != SIERRA_NET_PROTOCOL_UMTS) {
- netdev_err(dev->net, "Protocol unsupported, 0x%02x\n",
- lsi->protocol);
- return -1;
- }
-
- /* Validate the link type */
- if (lsi->link_type != SIERRA_NET_AS_LINK_TYPE_IPv4) {
- netdev_err(dev->net, "Link type unsupported: 0x%02x\n",
- lsi->link_type);
- return -1;
- }
-
- /* Validate the coverage */
- if (lsi->coverage == SIERRA_NET_COVERAGE_NONE
- || lsi->coverage == SIERRA_NET_COVERAGE_NOPACKET) {
- netdev_err(dev->net, "No coverage, 0x%02x\n", lsi->coverage);
- return 0;
- }
-
/* Validate the session state */
if (lsi->session_state == SIERRA_NET_SESSION_IDLE) {
netdev_err(dev->net, "Session idle, 0x%02x\n",
- lsi->session_state);
+ lsi->session_state);
+ return 0;
+ }
+
+ /* Validate the protocol - only support UMTS for now */
+ if (lsi->protocol == SIERRA_NET_PROTOCOL_UMTS) {
+ struct lsi_umts_single *single = (struct lsi_umts_single *)lsi;
+
+ /* Validate the link type */
+ if (single->link_type != SIERRA_NET_AS_LINK_TYPE_IPV4 &&
+ single->link_type != SIERRA_NET_AS_LINK_TYPE_IPV6) {
+ netdev_err(dev->net, "Link type unsupported: 0x%02x\n",
+ single->link_type);
+ return -1;
+ }
+ expected_length = SIERRA_NET_LSI_UMTS_STATUS_LEN;
+ } else if (lsi->protocol == SIERRA_NET_PROTOCOL_UMTS_DS) {
+ expected_length = SIERRA_NET_LSI_UMTS_DS_STATUS_LEN;
+ } else {
+ netdev_err(dev->net, "Protocol unsupported, 0x%02x\n",
+ lsi->protocol);
+ return -1;
+ }
+
+ if (be16_to_cpu(lsi->length) != expected_length) {
+ netdev_err(dev->net, "%s: LSI_UMTS_STATUS_LEN %d, exp %u\n",
+ __func__, be16_to_cpu(lsi->length), expected_length);
+ return -1;
+ }
+
+ /* Validate the coverage */
+ if (lsi->coverage == SIERRA_NET_COVERAGE_NONE ||
+ lsi->coverage == SIERRA_NET_COVERAGE_NOPACKET) {
+ netdev_err(dev->net, "No coverage, 0x%02x\n", lsi->coverage);
return 0;
}
@@ -662,7 +694,6 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
u8 numendpoints;
u16 fwattr = 0;
int status;
- struct ethhdr *eth;
struct sierra_net_data *priv;
static const u8 sync_tmplate[sizeof(priv->sync_msg)] = {
0x00, 0x00, SIERRA_NET_HIP_MSYNC_ID, 0x00};
@@ -700,11 +731,6 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter);
dev->net->dev_addr[ETH_ALEN-1] = ifacenum;
- /* we will have to manufacture ethernet headers, prepare template */
- eth = (struct ethhdr *)priv->ethr_hdr_tmpl;
- memcpy(ð->h_dest, dev->net->dev_addr, ETH_ALEN);
- eth->h_proto = cpu_to_be16(ETH_P_IP);
-
/* prepare shutdown message template */
memcpy(priv->shdwn_msg, shdwn_tmplate, sizeof(priv->shdwn_msg));
/* set context index initially to 0 - prepares tx hdr template */
@@ -833,9 +859,14 @@ static int sierra_net_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
skb_pull(skb, hh.hdrlen);
- /* We are going to accept this packet, prepare it */
- memcpy(skb->data, sierra_net_get_private(dev)->ethr_hdr_tmpl,
- ETH_HLEN);
+ /* We are going to accept this packet, prepare it.
+ * In case protocol is IPv6, keep it, otherwise force IPv4.
+ */
+ skb_reset_mac_header(skb);
+ if (eth_hdr(skb)->h_proto != cpu_to_be16(ETH_P_IPV6))
+ eth_hdr(skb)->h_proto = cpu_to_be16(ETH_P_IP);
+ eth_zero_addr(eth_hdr(skb)->h_source);
+ memcpy(eth_hdr(skb)->h_dest, dev->net->dev_addr, ETH_ALEN);
/* Last packet in batch handled by usbnet */
if (hh.payload_len.word == skb->len)
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 030d849..d092d34 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -121,4 +121,6 @@
Select Y to compile the driver in order to have WLAN functionality
support.
+source "drivers/net/wireless/cnss_utils/Kconfig"
+
endif # WLAN
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 91594de..005523c 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -27,3 +27,5 @@
obj-$(CONFIG_MAC80211_HWSIM) += mac80211_hwsim.o
obj-$(CONFIG_WCNSS_MEM_PRE_ALLOC) += cnss_prealloc/
+
+obj-$(CONFIG_CNSS_UTILS) += cnss_utils/
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 0457e31..6063cf4 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -1647,6 +1647,8 @@ static int ath10k_pci_hif_start(struct ath10k *ar)
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
+ napi_enable(&ar->napi);
+
ath10k_pci_irq_enable(ar);
ath10k_pci_rx_post(ar);
@@ -2531,7 +2533,6 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar)
ath10k_err(ar, "could not wake up target CPU: %d\n", ret);
goto err_ce;
}
- napi_enable(&ar->napi);
return 0;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
index c7c1e99..d231042 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
@@ -442,7 +442,7 @@ struct brcmf_fw {
const char *nvram_name;
u16 domain_nr;
u16 bus_nr;
- void (*done)(struct device *dev, const struct firmware *fw,
+ void (*done)(struct device *dev, int err, const struct firmware *fw,
void *nvram_image, u32 nvram_len);
};
@@ -477,52 +477,51 @@ static void brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx)
if (!nvram && !(fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL))
goto fail;
- fwctx->done(fwctx->dev, fwctx->code, nvram, nvram_length);
+ fwctx->done(fwctx->dev, 0, fwctx->code, nvram, nvram_length);
kfree(fwctx);
return;
fail:
brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev));
release_firmware(fwctx->code);
- device_release_driver(fwctx->dev);
+ fwctx->done(fwctx->dev, -ENOENT, NULL, NULL, 0);
kfree(fwctx);
}
static void brcmf_fw_request_code_done(const struct firmware *fw, void *ctx)
{
struct brcmf_fw *fwctx = ctx;
- int ret;
+ int ret = 0;
brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(fwctx->dev));
- if (!fw)
+ if (!fw) {
+ ret = -ENOENT;
goto fail;
-
- /* only requested code so done here */
- if (!(fwctx->flags & BRCMF_FW_REQUEST_NVRAM)) {
- fwctx->done(fwctx->dev, fw, NULL, 0);
- kfree(fwctx);
- return;
}
+ /* only requested code so done here */
+ if (!(fwctx->flags & BRCMF_FW_REQUEST_NVRAM))
+ goto done;
+
fwctx->code = fw;
ret = request_firmware_nowait(THIS_MODULE, true, fwctx->nvram_name,
fwctx->dev, GFP_KERNEL, fwctx,
brcmf_fw_request_nvram_done);
- if (!ret)
- return;
-
- brcmf_fw_request_nvram_done(NULL, fwctx);
+ /* pass NULL to nvram callback for bcm47xx fallback */
+ if (ret)
+ brcmf_fw_request_nvram_done(NULL, fwctx);
return;
fail:
brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev));
- device_release_driver(fwctx->dev);
+done:
+ fwctx->done(fwctx->dev, ret, fw, NULL, 0);
kfree(fwctx);
}
int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags,
const char *code, const char *nvram,
- void (*fw_cb)(struct device *dev,
+ void (*fw_cb)(struct device *dev, int err,
const struct firmware *fw,
void *nvram_image, u32 nvram_len),
u16 domain_nr, u16 bus_nr)
@@ -555,7 +554,7 @@ int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags,
int brcmf_fw_get_firmwares(struct device *dev, u16 flags,
const char *code, const char *nvram,
- void (*fw_cb)(struct device *dev,
+ void (*fw_cb)(struct device *dev, int err,
const struct firmware *fw,
void *nvram_image, u32 nvram_len))
{
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h
index d3c9f0d..8fa4b7e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h
@@ -73,13 +73,13 @@ void brcmf_fw_nvram_free(void *nvram);
*/
int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags,
const char *code, const char *nvram,
- void (*fw_cb)(struct device *dev,
+ void (*fw_cb)(struct device *dev, int err,
const struct firmware *fw,
void *nvram_image, u32 nvram_len),
u16 domain_nr, u16 bus_nr);
int brcmf_fw_get_firmwares(struct device *dev, u16 flags,
const char *code, const char *nvram,
- void (*fw_cb)(struct device *dev,
+ void (*fw_cb)(struct device *dev, int err,
const struct firmware *fw,
void *nvram_image, u32 nvram_len));
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index 3deba90..d3d7921 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -1618,16 +1618,23 @@ static const struct brcmf_buscore_ops brcmf_pcie_buscore_ops = {
.write32 = brcmf_pcie_buscore_write32,
};
-static void brcmf_pcie_setup(struct device *dev, const struct firmware *fw,
+static void brcmf_pcie_setup(struct device *dev, int ret,
+ const struct firmware *fw,
void *nvram, u32 nvram_len)
{
- struct brcmf_bus *bus = dev_get_drvdata(dev);
- struct brcmf_pciedev *pcie_bus_dev = bus->bus_priv.pcie;
- struct brcmf_pciedev_info *devinfo = pcie_bus_dev->devinfo;
+ struct brcmf_bus *bus;
+ struct brcmf_pciedev *pcie_bus_dev;
+ struct brcmf_pciedev_info *devinfo;
struct brcmf_commonring **flowrings;
- int ret;
u32 i;
+ /* check firmware loading result */
+ if (ret)
+ goto fail;
+
+ bus = dev_get_drvdata(dev);
+ pcie_bus_dev = bus->bus_priv.pcie;
+ devinfo = pcie_bus_dev->devinfo;
brcmf_pcie_attach(devinfo);
/* Some of the firmwares have the size of the memory of the device
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index 2458e6e..8744b9b 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -3975,21 +3975,26 @@ static const struct brcmf_bus_ops brcmf_sdio_bus_ops = {
.get_memdump = brcmf_sdio_bus_get_memdump,
};
-static void brcmf_sdio_firmware_callback(struct device *dev,
+static void brcmf_sdio_firmware_callback(struct device *dev, int err,
const struct firmware *code,
void *nvram, u32 nvram_len)
{
- struct brcmf_bus *bus_if = dev_get_drvdata(dev);
- struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
- struct brcmf_sdio *bus = sdiodev->bus;
- int err = 0;
+ struct brcmf_bus *bus_if;
+ struct brcmf_sdio_dev *sdiodev;
+ struct brcmf_sdio *bus;
u8 saveclk;
- brcmf_dbg(TRACE, "Enter: dev=%s\n", dev_name(dev));
+ brcmf_dbg(TRACE, "Enter: dev=%s, err=%d\n", dev_name(dev), err);
+ bus_if = dev_get_drvdata(dev);
+ sdiodev = bus_if->bus_priv.sdio;
+ if (err)
+ goto fail;
if (!bus_if->drvr)
return;
+ bus = sdiodev->bus;
+
/* try to download image and nvram to the dongle */
bus->alp_only = true;
err = brcmf_sdio_download_firmware(bus, code, nvram, nvram_len);
@@ -4076,6 +4081,7 @@ static void brcmf_sdio_firmware_callback(struct device *dev,
fail:
brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), err);
device_release_driver(dev);
+ device_release_driver(&sdiodev->func[2]->dev);
}
struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
index 2f978a3..053f3b5 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
@@ -1158,17 +1158,18 @@ static int brcmf_usb_bus_setup(struct brcmf_usbdev_info *devinfo)
return ret;
}
-static void brcmf_usb_probe_phase2(struct device *dev,
+static void brcmf_usb_probe_phase2(struct device *dev, int ret,
const struct firmware *fw,
void *nvram, u32 nvlen)
{
struct brcmf_bus *bus = dev_get_drvdata(dev);
- struct brcmf_usbdev_info *devinfo;
- int ret;
+ struct brcmf_usbdev_info *devinfo = bus->bus_priv.usb->devinfo;
+
+ if (ret)
+ goto error;
brcmf_dbg(USB, "Start fw downloading\n");
- devinfo = bus->bus_priv.usb->devinfo;
ret = check_file(fw->data);
if (ret < 0) {
brcmf_err("invalid firmware\n");
diff --git a/drivers/net/wireless/cnss_utils/Kconfig b/drivers/net/wireless/cnss_utils/Kconfig
new file mode 100644
index 0000000..5f43e48
--- /dev/null
+++ b/drivers/net/wireless/cnss_utils/Kconfig
@@ -0,0 +1,6 @@
+config CNSS_UTILS
+ bool "CNSS utilities support"
+ ---help---
+ Add CNSS utilities support for the WLAN driver module.
+ This feature enable wlan driver to use CNSS utilities APIs to set
+ and get wlan related information.
\ No newline at end of file
diff --git a/drivers/net/wireless/cnss_utils/Makefile b/drivers/net/wireless/cnss_utils/Makefile
new file mode 100644
index 0000000..0d1ed7a
--- /dev/null
+++ b/drivers/net/wireless/cnss_utils/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_CNSS_UTILS) += cnss_utils.o
diff --git a/drivers/net/wireless/cnss_utils/cnss_utils.c b/drivers/net/wireless/cnss_utils/cnss_utils.c
new file mode 100644
index 0000000..d73846e
--- /dev/null
+++ b/drivers/net/wireless/cnss_utils/cnss_utils.c
@@ -0,0 +1,310 @@
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "cnss_utils: " fmt
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/etherdevice.h>
+#include <net/cnss_utils.h>
+
+#define CNSS_MAX_CH_NUM 45
+struct cnss_unsafe_channel_list {
+ u16 unsafe_ch_count;
+ u16 unsafe_ch_list[CNSS_MAX_CH_NUM];
+};
+
+struct cnss_dfs_nol_info {
+ void *dfs_nol_info;
+ u16 dfs_nol_info_len;
+};
+
+#define MAX_NO_OF_MAC_ADDR 4
+struct cnss_wlan_mac_addr {
+ u8 mac_addr[MAX_NO_OF_MAC_ADDR][ETH_ALEN];
+ u32 no_of_mac_addr_set;
+};
+
+static struct cnss_utils_priv {
+ struct cnss_unsafe_channel_list unsafe_channel_list;
+ struct cnss_dfs_nol_info dfs_nol_info;
+ /* generic mutex for unsafe channel */
+ struct mutex unsafe_channel_list_lock;
+ /* generic spin-lock for dfs_nol info */
+ spinlock_t dfs_nol_info_lock;
+ int driver_load_cnt;
+ bool is_wlan_mac_set;
+ struct cnss_wlan_mac_addr wlan_mac_addr;
+ enum cnss_utils_cc_src cc_source;
+} *cnss_utils_priv;
+
+int cnss_utils_set_wlan_unsafe_channel(struct device *dev,
+ u16 *unsafe_ch_list, u16 ch_count)
+{
+ struct cnss_utils_priv *priv = cnss_utils_priv;
+
+ if (!priv)
+ return -EINVAL;
+
+ mutex_lock(&priv->unsafe_channel_list_lock);
+ if ((!unsafe_ch_list) || (ch_count > CNSS_MAX_CH_NUM)) {
+ mutex_unlock(&priv->unsafe_channel_list_lock);
+ return -EINVAL;
+ }
+
+ priv->unsafe_channel_list.unsafe_ch_count = ch_count;
+
+ if (ch_count == 0)
+ goto end;
+
+ memcpy(priv->unsafe_channel_list.unsafe_ch_list,
+ unsafe_ch_list, ch_count * sizeof(u16));
+
+end:
+ mutex_unlock(&priv->unsafe_channel_list_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(cnss_utils_set_wlan_unsafe_channel);
+
+int cnss_utils_get_wlan_unsafe_channel(struct device *dev,
+ u16 *unsafe_ch_list,
+ u16 *ch_count, u16 buf_len)
+{
+ struct cnss_utils_priv *priv = cnss_utils_priv;
+
+ if (!priv)
+ return -EINVAL;
+
+ mutex_lock(&priv->unsafe_channel_list_lock);
+ if (!unsafe_ch_list || !ch_count) {
+ mutex_unlock(&priv->unsafe_channel_list_lock);
+ return -EINVAL;
+ }
+
+ if (buf_len <
+ (priv->unsafe_channel_list.unsafe_ch_count * sizeof(u16))) {
+ mutex_unlock(&priv->unsafe_channel_list_lock);
+ return -ENOMEM;
+ }
+
+ *ch_count = priv->unsafe_channel_list.unsafe_ch_count;
+ memcpy(unsafe_ch_list, priv->unsafe_channel_list.unsafe_ch_list,
+ priv->unsafe_channel_list.unsafe_ch_count * sizeof(u16));
+ mutex_unlock(&priv->unsafe_channel_list_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(cnss_utils_get_wlan_unsafe_channel);
+
+int cnss_utils_wlan_set_dfs_nol(struct device *dev,
+ const void *info, u16 info_len)
+{
+ void *temp;
+ void *old_nol_info;
+ struct cnss_dfs_nol_info *dfs_info;
+ struct cnss_utils_priv *priv = cnss_utils_priv;
+
+ if (!priv)
+ return -EINVAL;
+
+ if (!info || !info_len)
+ return -EINVAL;
+
+ temp = kmalloc(info_len, GFP_ATOMIC);
+ if (!temp)
+ return -ENOMEM;
+
+ memcpy(temp, info, info_len);
+ spin_lock_bh(&priv->dfs_nol_info_lock);
+ dfs_info = &priv->dfs_nol_info;
+ old_nol_info = dfs_info->dfs_nol_info;
+ dfs_info->dfs_nol_info = temp;
+ dfs_info->dfs_nol_info_len = info_len;
+ spin_unlock_bh(&priv->dfs_nol_info_lock);
+ kfree(old_nol_info);
+
+ return 0;
+}
+EXPORT_SYMBOL(cnss_utils_wlan_set_dfs_nol);
+
+int cnss_utils_wlan_get_dfs_nol(struct device *dev,
+ void *info, u16 info_len)
+{
+ int len;
+ struct cnss_dfs_nol_info *dfs_info;
+ struct cnss_utils_priv *priv = cnss_utils_priv;
+
+ if (!priv)
+ return -EINVAL;
+
+ if (!info || !info_len)
+ return -EINVAL;
+
+ spin_lock_bh(&priv->dfs_nol_info_lock);
+
+ dfs_info = &priv->dfs_nol_info;
+ if (!dfs_info->dfs_nol_info ||
+ dfs_info->dfs_nol_info_len == 0) {
+ spin_unlock_bh(&priv->dfs_nol_info_lock);
+ return -ENOENT;
+ }
+
+ len = min(info_len, dfs_info->dfs_nol_info_len);
+ memcpy(info, dfs_info->dfs_nol_info, len);
+ spin_unlock_bh(&priv->dfs_nol_info_lock);
+
+ return len;
+}
+EXPORT_SYMBOL(cnss_utils_wlan_get_dfs_nol);
+
+void cnss_utils_increment_driver_load_cnt(struct device *dev)
+{
+ struct cnss_utils_priv *priv = cnss_utils_priv;
+
+ if (!priv)
+ return;
+
+ ++(priv->driver_load_cnt);
+}
+EXPORT_SYMBOL(cnss_utils_increment_driver_load_cnt);
+
+int cnss_utils_get_driver_load_cnt(struct device *dev)
+{
+ struct cnss_utils_priv *priv = cnss_utils_priv;
+
+ if (!priv)
+ return -EINVAL;
+
+ return priv->driver_load_cnt;
+}
+EXPORT_SYMBOL(cnss_utils_get_driver_load_cnt);
+
+int cnss_utils_set_wlan_mac_address(const u8 *in, const uint32_t len)
+{
+ struct cnss_utils_priv *priv = cnss_utils_priv;
+ u32 no_of_mac_addr;
+ struct cnss_wlan_mac_addr *addr = NULL;
+ int iter;
+ u8 *temp = NULL;
+
+ if (!priv)
+ return -EINVAL;
+
+ if (priv->is_wlan_mac_set) {
+ pr_debug("WLAN MAC address is already set\n");
+ return 0;
+ }
+
+ if (len == 0 || (len % ETH_ALEN) != 0) {
+ pr_err("Invalid length %d\n", len);
+ return -EINVAL;
+ }
+
+ no_of_mac_addr = len / ETH_ALEN;
+ if (no_of_mac_addr > MAX_NO_OF_MAC_ADDR) {
+ pr_err("Exceed maximum supported MAC address %u %u\n",
+ MAX_NO_OF_MAC_ADDR, no_of_mac_addr);
+ return -EINVAL;
+ }
+
+ priv->is_wlan_mac_set = true;
+ addr = &priv->wlan_mac_addr;
+ addr->no_of_mac_addr_set = no_of_mac_addr;
+ temp = &addr->mac_addr[0][0];
+
+ for (iter = 0; iter < no_of_mac_addr;
+ ++iter, temp += ETH_ALEN, in += ETH_ALEN) {
+ ether_addr_copy(temp, in);
+ pr_debug("MAC_ADDR:%02x:%02x:%02x:%02x:%02x:%02x\n",
+ temp[0], temp[1], temp[2],
+ temp[3], temp[4], temp[5]);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(cnss_utils_set_wlan_mac_address);
+
+u8 *cnss_utils_get_wlan_mac_address(struct device *dev, uint32_t *num)
+{
+ struct cnss_utils_priv *priv = cnss_utils_priv;
+ struct cnss_wlan_mac_addr *addr = NULL;
+
+ if (!priv)
+ goto out;
+
+ if (!priv->is_wlan_mac_set) {
+ pr_debug("WLAN MAC address is not set\n");
+ goto out;
+ }
+
+ addr = &priv->wlan_mac_addr;
+ *num = addr->no_of_mac_addr_set;
+ return &addr->mac_addr[0][0];
+out:
+ *num = 0;
+ return NULL;
+}
+EXPORT_SYMBOL(cnss_utils_get_wlan_mac_address);
+
+void cnss_utils_set_cc_source(struct device *dev,
+ enum cnss_utils_cc_src cc_source)
+{
+ struct cnss_utils_priv *priv = cnss_utils_priv;
+
+ if (!priv)
+ return;
+
+ priv->cc_source = cc_source;
+}
+EXPORT_SYMBOL(cnss_utils_set_cc_source);
+
+enum cnss_utils_cc_src cnss_utils_get_cc_source(struct device *dev)
+{
+ struct cnss_utils_priv *priv = cnss_utils_priv;
+
+ if (!priv)
+ return -EINVAL;
+
+ return priv->cc_source;
+}
+EXPORT_SYMBOL(cnss_utils_get_cc_source);
+
+static int __init cnss_utils_init(void)
+{
+ struct cnss_utils_priv *priv = NULL;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->cc_source = CNSS_UTILS_SOURCE_CORE;
+
+ mutex_init(&priv->unsafe_channel_list_lock);
+ spin_lock_init(&priv->dfs_nol_info_lock);
+
+ cnss_utils_priv = priv;
+
+ return 0;
+}
+
+static void __exit cnss_utils_exit(void)
+{
+ kfree(cnss_utils_priv);
+ cnss_utils_priv = NULL;
+}
+
+module_init(cnss_utils_init);
+module_exit(cnss_utils_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION(DEVICE "CNSS Utilities Driver");
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
index b7273be..c8d9075 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
@@ -1422,21 +1422,6 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
cancel_work_sync(&rt2x00dev->intf_work);
cancel_delayed_work_sync(&rt2x00dev->autowakeup_work);
cancel_work_sync(&rt2x00dev->sleep_work);
-#if IS_ENABLED(CONFIG_RT2X00_LIB_USB)
- if (rt2x00_is_usb(rt2x00dev)) {
- usb_kill_anchored_urbs(rt2x00dev->anchor);
- hrtimer_cancel(&rt2x00dev->txstatus_timer);
- cancel_work_sync(&rt2x00dev->rxdone_work);
- cancel_work_sync(&rt2x00dev->txdone_work);
- }
-#endif
- if (rt2x00dev->workqueue)
- destroy_workqueue(rt2x00dev->workqueue);
-
- /*
- * Free the tx status fifo.
- */
- kfifo_free(&rt2x00dev->txstatus_fifo);
/*
* Kill the tx status tasklet.
@@ -1452,6 +1437,14 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
*/
rt2x00lib_uninitialize(rt2x00dev);
+ if (rt2x00dev->workqueue)
+ destroy_workqueue(rt2x00dev->workqueue);
+
+ /*
+ * Free the tx status fifo.
+ */
+ kfifo_free(&rt2x00dev->txstatus_fifo);
+
/*
* Free extra components
*/
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
index 662705e..631df69 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
@@ -740,6 +740,11 @@ void rt2x00usb_uninitialize(struct rt2x00_dev *rt2x00dev)
{
struct data_queue *queue;
+ usb_kill_anchored_urbs(rt2x00dev->anchor);
+ hrtimer_cancel(&rt2x00dev->txstatus_timer);
+ cancel_work_sync(&rt2x00dev->rxdone_work);
+ cancel_work_sync(&rt2x00dev->txdone_work);
+
queue_for_each(rt2x00dev, queue)
rt2x00usb_free_entries(queue);
}
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 005ef5d..ca8ddc3 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -438,6 +438,10 @@ static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev,
{
struct ib_recv_wr *bad_wr;
+ ib_dma_sync_single_for_device(ndev->device,
+ cmd->sge[0].addr, cmd->sge[0].length,
+ DMA_FROM_DEVICE);
+
if (ndev->srq)
return ib_post_srq_recv(ndev->srq, &cmd->wr, &bad_wr);
return ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, &bad_wr);
@@ -538,6 +542,11 @@ static void nvmet_rdma_queue_response(struct nvmet_req *req)
first_wr = &rsp->send_wr;
nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd);
+
+ ib_dma_sync_single_for_device(rsp->queue->dev->device,
+ rsp->send_sge.addr, rsp->send_sge.length,
+ DMA_TO_DEVICE);
+
if (ib_post_send(cm_id->qp, first_wr, &bad_wr)) {
pr_err("sending cmd response failed\n");
nvmet_rdma_release_rsp(rsp);
@@ -698,6 +707,14 @@ static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue,
cmd->n_rdma = 0;
cmd->req.port = queue->port;
+
+ ib_dma_sync_single_for_cpu(queue->dev->device,
+ cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length,
+ DMA_FROM_DEVICE);
+ ib_dma_sync_single_for_cpu(queue->dev->device,
+ cmd->send_sge.addr, cmd->send_sge.length,
+ DMA_TO_DEVICE);
+
if (!nvmet_req_init(&cmd->req, &queue->nvme_cq,
&queue->nvme_sq, &nvmet_rdma_ops))
return;
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 66af185..c0914fb 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -741,9 +741,12 @@ int __init of_scan_flat_dt(int (*it)(unsigned long node,
const char *pathp;
int offset, rc = 0, depth = -1;
- for (offset = fdt_next_node(blob, -1, &depth);
- offset >= 0 && depth >= 0 && !rc;
- offset = fdt_next_node(blob, offset, &depth)) {
+ if (!blob)
+ return 0;
+
+ for (offset = fdt_next_node(blob, -1, &depth);
+ offset >= 0 && depth >= 0 && !rc;
+ offset = fdt_next_node(blob, offset, &depth)) {
pathp = fdt_get_name(blob, offset, NULL);
if (*pathp == '/')
diff --git a/drivers/parport/parport_gsc.c b/drivers/parport/parport_gsc.c
index 6e3a60c..50f3bb0 100644
--- a/drivers/parport/parport_gsc.c
+++ b/drivers/parport/parport_gsc.c
@@ -293,7 +293,7 @@ struct parport *parport_gsc_probe_port(unsigned long base,
p->irq = PARPORT_IRQ_NONE;
}
if (p->irq != PARPORT_IRQ_NONE) {
- printk(", irq %d", p->irq);
+ pr_cont(", irq %d", p->irq);
if (p->dma == PARPORT_DMA_AUTO) {
p->dma = PARPORT_DMA_NONE;
@@ -303,8 +303,8 @@ struct parport *parport_gsc_probe_port(unsigned long base,
is mandatory (see above) */
p->dma = PARPORT_DMA_NONE;
- printk(" [");
-#define printmode(x) {if(p->modes&PARPORT_MODE_##x){printk("%s%s",f?",":"",#x);f++;}}
+ pr_cont(" [");
+#define printmode(x) {if(p->modes&PARPORT_MODE_##x){pr_cont("%s%s",f?",":"",#x);f++;}}
{
int f = 0;
printmode(PCSPP);
@@ -315,7 +315,7 @@ struct parport *parport_gsc_probe_port(unsigned long base,
// printmode(DMA);
}
#undef printmode
- printk("]\n");
+ pr_cont("]\n");
if (p->irq != PARPORT_IRQ_NONE) {
if (request_irq (p->irq, parport_irq_handler,
diff --git a/drivers/pci/host/pci-msm.c b/drivers/pci/host/pci-msm.c
index 361d7dd0..0491a86 100644
--- a/drivers/pci/host/pci-msm.c
+++ b/drivers/pci/host/pci-msm.c
@@ -4926,9 +4926,8 @@ static int msm_pcie_map_qgic_addr(struct msm_pcie_dev_t *dev,
{
struct iommu_domain *domain = iommu_get_domain_for_dev(&pdev->dev);
struct iommu_domain_geometry geometry;
- int ret, fastmap_en = 0, bypass_en = 0;
- dma_addr_t iova;
- phys_addr_t gicm_db_offset;
+ int fastmap_en = 0, bypass_en = 0;
+ dma_addr_t iova, addr;
msg->address_hi = 0;
msg->address_lo = dev->msi_gicm_addr;
@@ -4970,18 +4969,15 @@ static int msm_pcie_map_qgic_addr(struct msm_pcie_dev_t *dev,
iova = rounddown(pcie_base_addr, PAGE_SIZE);
}
- ret = iommu_map(domain, iova, rounddown(dev->msi_gicm_addr, PAGE_SIZE),
- PAGE_SIZE, IOMMU_READ | IOMMU_WRITE);
- if (ret < 0) {
- PCIE_ERR(dev,
- "PCIe: RC%d: ret: %d: Could not do iommu map for QGIC address\n",
- dev->rc_idx, ret);
- return -ENOMEM;
+ addr = dma_map_resource(&pdev->dev, dev->msi_gicm_addr, PAGE_SIZE,
+ DMA_BIDIRECTIONAL, 0);
+ if (dma_mapping_error(&pdev->dev, addr)) {
+ PCIE_ERR(dev, "PCIe: RC%d: failed to map QGIC address",
+ dev->rc_idx);
+ return -EIO;
}
- gicm_db_offset = dev->msi_gicm_addr -
- rounddown(dev->msi_gicm_addr, PAGE_SIZE);
- msg->address_lo = iova + gicm_db_offset;
+ msg->address_lo = iova + addr;
return 0;
}
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 579c494..e7d4048 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -2142,7 +2142,8 @@ bool pci_dev_keep_suspended(struct pci_dev *pci_dev)
if (!pm_runtime_suspended(dev)
|| pci_target_state(pci_dev) != pci_dev->current_state
- || platform_pci_need_resume(pci_dev))
+ || platform_pci_need_resume(pci_dev)
+ || (pci_dev->dev_flags & PCI_DEV_FLAGS_NEEDS_RESUME))
return false;
/*
diff --git a/drivers/pinctrl/berlin/berlin-bg4ct.c b/drivers/pinctrl/berlin/berlin-bg4ct.c
index 0917204..c617ec4 100644
--- a/drivers/pinctrl/berlin/berlin-bg4ct.c
+++ b/drivers/pinctrl/berlin/berlin-bg4ct.c
@@ -217,7 +217,7 @@ static const struct berlin_desc_group berlin4ct_soc_pinctrl_groups[] = {
BERLIN_PINCTRL_GROUP("SCRD0_CRD_PRES", 0xc, 0x3, 0x15,
BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO20 */
BERLIN_PINCTRL_FUNCTION(0x1, "scrd0"), /* crd pres */
- BERLIN_PINCTRL_FUNCTION(0x1, "sd1a")), /* DAT3 */
+ BERLIN_PINCTRL_FUNCTION(0x3, "sd1a")), /* DAT3 */
BERLIN_PINCTRL_GROUP("SPI1_SS0n", 0xc, 0x3, 0x18,
BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* SS0n */
BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* GPIO37 */
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index 583ae3f..5419de8 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -1250,10 +1250,12 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
debounce = readl(db_reg);
debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
+ if (arg)
+ conf |= BYT_DEBOUNCE_EN;
+ else
+ conf &= ~BYT_DEBOUNCE_EN;
+
switch (arg) {
- case 0:
- conf &= BYT_DEBOUNCE_EN;
- break;
case 375:
debounce |= BYT_DEBOUNCE_PULSE_375US;
break;
@@ -1276,7 +1278,9 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
debounce |= BYT_DEBOUNCE_PULSE_24MS;
break;
default:
- ret = -EINVAL;
+ if (arg)
+ ret = -EINVAL;
+ break;
}
if (!ret)
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c b/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c
index ae06d54..2dd82c1 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c
@@ -58,7 +58,6 @@ enum ipa_uc_offload_state {
IPA_UC_OFFLOAD_STATE_INVALID,
IPA_UC_OFFLOAD_STATE_INITIALIZED,
IPA_UC_OFFLOAD_STATE_UP,
- IPA_UC_OFFLOAD_STATE_DOWN,
};
struct ipa_uc_offload_ctx {
@@ -413,8 +412,7 @@ int ipa_uc_offload_conn_pipes(struct ipa_uc_offload_conn_in_params *inp,
return -EINVAL;
}
- if (offload_ctx->state != IPA_UC_OFFLOAD_STATE_INITIALIZED &&
- offload_ctx->state != IPA_UC_OFFLOAD_STATE_DOWN) {
+ if (offload_ctx->state != IPA_UC_OFFLOAD_STATE_INITIALIZED) {
IPA_UC_OFFLOAD_ERR("Invalid state %d\n", offload_ctx->state);
return -EPERM;
}
@@ -471,7 +469,7 @@ static int ipa_uc_ntn_disconn_pipes(struct ipa_uc_offload_ctx *ntn_ctx)
int ipa_ep_idx_ul, ipa_ep_idx_dl;
int ret = 0;
- ntn_ctx->state = IPA_UC_OFFLOAD_STATE_DOWN;
+ ntn_ctx->state = IPA_UC_OFFLOAD_STATE_INITIALIZED;
ret = ipa_rm_release_resource(IPA_RM_RESOURCE_ETHERNET_PROD);
if (ret) {
@@ -597,7 +595,7 @@ int ipa_uc_offload_cleanup(u32 clnt_hdl)
return -EINVAL;
}
- if (offload_ctx->state != IPA_UC_OFFLOAD_STATE_DOWN) {
+ if (offload_ctx->state != IPA_UC_OFFLOAD_STATE_INITIALIZED) {
IPA_UC_OFFLOAD_ERR("Invalid State %d\n", offload_ctx->state);
return -EINVAL;
}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
index e8710a6..2b517a1 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
@@ -83,6 +83,10 @@ const char *ipa_event_name[] = {
__stringify(IPA_QUOTA_REACH),
__stringify(IPA_SSR_BEFORE_SHUTDOWN),
__stringify(IPA_SSR_AFTER_POWERUP),
+ __stringify(ADD_VLAN_IFACE),
+ __stringify(DEL_VLAN_IFACE),
+ __stringify(ADD_L2TP_VLAN_MAPPING),
+ __stringify(DEL_L2TP_VLAN_MAPPING)
};
const char *ipa_hdr_l2_type_name[] = {
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index 1c3995d..1e2b200 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -546,6 +546,90 @@ static int ipa3_send_wan_msg(unsigned long usr_param, uint8_t msg_type)
return 0;
}
+static void ipa3_vlan_l2tp_msg_free_cb(void *buff, u32 len, u32 type)
+{
+ if (!buff) {
+ IPAERR("Null buffer\n");
+ return;
+ }
+
+ if (type != ADD_VLAN_IFACE &&
+ type != DEL_VLAN_IFACE &&
+ type != ADD_L2TP_VLAN_MAPPING &&
+ type != DEL_L2TP_VLAN_MAPPING) {
+ IPAERR("Wrong type given. buff %pK type %d\n", buff, type);
+ return;
+ }
+
+ kfree(buff);
+}
+
+static int ipa3_send_vlan_l2tp_msg(unsigned long usr_param, uint8_t msg_type)
+{
+ int retval;
+ struct ipa_ioc_vlan_iface_info *vlan_info;
+ struct ipa_ioc_l2tp_vlan_mapping_info *mapping_info;
+ struct ipa_msg_meta msg_meta;
+
+ if (msg_type == ADD_VLAN_IFACE ||
+ msg_type == DEL_VLAN_IFACE) {
+ vlan_info = kzalloc(sizeof(struct ipa_ioc_vlan_iface_info),
+ GFP_KERNEL);
+ if (!vlan_info) {
+ IPAERR("no memory\n");
+ return -ENOMEM;
+ }
+
+ if (copy_from_user((u8 *)vlan_info, (void __user *)usr_param,
+ sizeof(struct ipa_ioc_vlan_iface_info))) {
+ kfree(vlan_info);
+ return -EFAULT;
+ }
+
+ memset(&msg_meta, 0, sizeof(msg_meta));
+ msg_meta.msg_type = msg_type;
+ msg_meta.msg_len = sizeof(struct ipa_ioc_vlan_iface_info);
+ retval = ipa3_send_msg(&msg_meta, vlan_info,
+ ipa3_vlan_l2tp_msg_free_cb);
+ if (retval) {
+ IPAERR("ipa3_send_msg failed: %d\n", retval);
+ kfree(vlan_info);
+ return retval;
+ }
+ } else if (msg_type == ADD_L2TP_VLAN_MAPPING ||
+ msg_type == DEL_L2TP_VLAN_MAPPING) {
+ mapping_info = kzalloc(sizeof(struct
+ ipa_ioc_l2tp_vlan_mapping_info), GFP_KERNEL);
+ if (!mapping_info) {
+ IPAERR("no memory\n");
+ return -ENOMEM;
+ }
+
+ if (copy_from_user((u8 *)mapping_info,
+ (void __user *)usr_param,
+ sizeof(struct ipa_ioc_l2tp_vlan_mapping_info))) {
+ kfree(mapping_info);
+ return -EFAULT;
+ }
+
+ memset(&msg_meta, 0, sizeof(msg_meta));
+ msg_meta.msg_type = msg_type;
+ msg_meta.msg_len = sizeof(struct
+ ipa_ioc_l2tp_vlan_mapping_info);
+ retval = ipa3_send_msg(&msg_meta, mapping_info,
+ ipa3_vlan_l2tp_msg_free_cb);
+ if (retval) {
+ IPAERR("ipa3_send_msg failed: %d\n", retval);
+ kfree(mapping_info);
+ return retval;
+ }
+ } else {
+ IPAERR("Unexpected event\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
@@ -1530,6 +1614,34 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
break;
+ case IPA_IOC_ADD_VLAN_IFACE:
+ if (ipa3_send_vlan_l2tp_msg(arg, ADD_VLAN_IFACE)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case IPA_IOC_DEL_VLAN_IFACE:
+ if (ipa3_send_vlan_l2tp_msg(arg, DEL_VLAN_IFACE)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case IPA_IOC_ADD_L2TP_VLAN_MAPPING:
+ if (ipa3_send_vlan_l2tp_msg(arg, ADD_L2TP_VLAN_MAPPING)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case IPA_IOC_DEL_L2TP_VLAN_MAPPING:
+ if (ipa3_send_vlan_l2tp_msg(arg, DEL_L2TP_VLAN_MAPPING)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
default: /* redundant, as cmd was checked against MAXNR */
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return -ENOTTY;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
index fb44f96..5e789af 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
@@ -64,6 +64,10 @@ const char *ipa3_event_name[] = {
__stringify(IPA_QUOTA_REACH),
__stringify(IPA_SSR_BEFORE_SHUTDOWN),
__stringify(IPA_SSR_AFTER_POWERUP),
+ __stringify(ADD_VLAN_IFACE),
+ __stringify(DEL_VLAN_IFACE),
+ __stringify(ADD_L2TP_VLAN_MAPPING),
+ __stringify(DEL_L2TP_VLAN_MAPPING)
};
const char *ipa3_hdr_l2_type_name[] = {
@@ -526,7 +530,8 @@ static int ipa3_attrib_dump(struct ipa_rule_attrib *attrib,
}
if ((attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) ||
- (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3)) {
+ (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) ||
+ (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_L2TP)) {
pr_err("dst_mac_addr:%pM ", attrib->dst_mac_addr);
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index 61889b6..edd5b54 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -2091,8 +2091,15 @@ int ipa3_init_hw(void)
ipahal_write_reg(IPA_BCR, val);
- if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0)
+ if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
+ struct ipahal_reg_tx_cfg cfg;
+
ipahal_write_reg(IPA_CLKON_CFG, IPA_CLKON_CFG_v4_0);
+ ipahal_read_reg_fields(IPA_TX_CFG, &cfg);
+ /* disable PA_MASK_EN to allow holb drop */
+ cfg.pa_mask_en = 0;
+ ipahal_write_reg_fields(IPA_TX_CFG, &cfg);
+ }
ipa3_cfg_qsb();
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
index 4f20e0f..2253b4b 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
@@ -797,6 +797,38 @@ static int ipa_fltrt_generate_hw_rule_bdy_ip4(u16 *en_rule,
ihl_ofst_meq32++;
}
+ if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_L2TP) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+ ihl_ofst_meq32) || IPA_IS_RAN_OUT_OF_EQ(
+ ipa3_0_ihl_ofst_meq32, ihl_ofst_meq32 + 1)) {
+ IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32 + 1]);
+ /* populate first ihl meq eq */
+ extra = ipa_write_8(8, extra);
+ rest = ipa_write_8(attrib->dst_mac_addr_mask[3], rest);
+ rest = ipa_write_8(attrib->dst_mac_addr_mask[2], rest);
+ rest = ipa_write_8(attrib->dst_mac_addr_mask[1], rest);
+ rest = ipa_write_8(attrib->dst_mac_addr_mask[0], rest);
+ rest = ipa_write_8(attrib->dst_mac_addr[3], rest);
+ rest = ipa_write_8(attrib->dst_mac_addr[2], rest);
+ rest = ipa_write_8(attrib->dst_mac_addr[1], rest);
+ rest = ipa_write_8(attrib->dst_mac_addr[0], rest);
+ /* populate second ihl meq eq */
+ extra = ipa_write_8(12, extra);
+ rest = ipa_write_16(0, rest);
+ rest = ipa_write_8(attrib->dst_mac_addr_mask[5], rest);
+ rest = ipa_write_8(attrib->dst_mac_addr_mask[4], rest);
+ rest = ipa_write_16(0, rest);
+ rest = ipa_write_8(attrib->dst_mac_addr[5], rest);
+ rest = ipa_write_8(attrib->dst_mac_addr[4], rest);
+ ihl_ofst_meq32 += 2;
+ }
+
if (attrib->attrib_mask & IPA_FLT_META_DATA) {
*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_METADATA_COMPARE);
rest = ipa_write_32(attrib->meta_data_mask, rest);
@@ -1103,6 +1135,38 @@ static int ipa_fltrt_generate_hw_rule_bdy_ip6(u16 *en_rule,
ihl_ofst_meq32++;
}
+ if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_L2TP) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+ ihl_ofst_meq32) || IPA_IS_RAN_OUT_OF_EQ(
+ ipa3_0_ihl_ofst_meq32, ihl_ofst_meq32 + 1)) {
+ IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32 + 1]);
+ /* populate first ihl meq eq */
+ extra = ipa_write_8(8, extra);
+ rest = ipa_write_8(attrib->dst_mac_addr_mask[3], rest);
+ rest = ipa_write_8(attrib->dst_mac_addr_mask[2], rest);
+ rest = ipa_write_8(attrib->dst_mac_addr_mask[1], rest);
+ rest = ipa_write_8(attrib->dst_mac_addr_mask[0], rest);
+ rest = ipa_write_8(attrib->dst_mac_addr[3], rest);
+ rest = ipa_write_8(attrib->dst_mac_addr[2], rest);
+ rest = ipa_write_8(attrib->dst_mac_addr[1], rest);
+ rest = ipa_write_8(attrib->dst_mac_addr[0], rest);
+ /* populate second ihl meq eq */
+ extra = ipa_write_8(12, extra);
+ rest = ipa_write_16(0, rest);
+ rest = ipa_write_8(attrib->dst_mac_addr_mask[5], rest);
+ rest = ipa_write_8(attrib->dst_mac_addr_mask[4], rest);
+ rest = ipa_write_16(0, rest);
+ rest = ipa_write_8(attrib->dst_mac_addr[5], rest);
+ rest = ipa_write_8(attrib->dst_mac_addr[4], rest);
+ ihl_ofst_meq32 += 2;
+ }
+
if (attrib->attrib_mask & IPA_FLT_META_DATA) {
*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_METADATA_COMPARE);
rest = ipa_write_32(attrib->meta_data_mask, rest);
@@ -1613,6 +1677,40 @@ static int ipa_flt_generate_eq_ip4(enum ipa_ip_type ip,
ofst_meq128++;
}
+ if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_L2TP) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+ ihl_ofst_meq32) || IPA_IS_RAN_OUT_OF_EQ(
+ ipa3_0_ihl_ofst_meq32, ihl_ofst_meq32 + 1)) {
+ IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32 + 1]);
+ /* populate the first ihl meq 32 eq */
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 8;
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask =
+ (attrib->dst_mac_addr_mask[3] & 0xFF) |
+ ((attrib->dst_mac_addr_mask[2] << 8) & 0xFF00) |
+ ((attrib->dst_mac_addr_mask[1] << 16) & 0xFF0000) |
+ ((attrib->dst_mac_addr_mask[0] << 24) & 0xFF000000);
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+ (attrib->dst_mac_addr[3] & 0xFF) |
+ ((attrib->dst_mac_addr[2] << 8) & 0xFF00) |
+ ((attrib->dst_mac_addr[1] << 16) & 0xFF0000) |
+ ((attrib->dst_mac_addr[0] << 24) & 0xFF000000);
+ /* populate the second ihl meq 32 eq */
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32 + 1].offset = 12;
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32 + 1].mask =
+ ((attrib->dst_mac_addr_mask[5] << 16) & 0xFF0000) |
+ ((attrib->dst_mac_addr_mask[4] << 24) & 0xFF000000);
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32 + 1].value =
+ ((attrib->dst_mac_addr[5] << 16) & 0xFF0000) |
+ ((attrib->dst_mac_addr[4] << 24) & 0xFF000000);
+ ihl_ofst_meq32 += 2;
+ }
+
if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
IPAHAL_ERR("ran out of meq32 eq\n");
@@ -1976,6 +2074,40 @@ static int ipa_flt_generate_eq_ip6(enum ipa_ip_type ip,
ofst_meq128++;
}
+ if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_L2TP) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+ ihl_ofst_meq32) || IPA_IS_RAN_OUT_OF_EQ(
+ ipa3_0_ihl_ofst_meq32, ihl_ofst_meq32 + 1)) {
+ IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32 + 1]);
+ /* populate the first ihl meq 32 eq */
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 8;
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask =
+ (attrib->dst_mac_addr_mask[3] & 0xFF) |
+ ((attrib->dst_mac_addr_mask[2] << 8) & 0xFF00) |
+ ((attrib->dst_mac_addr_mask[1] << 16) & 0xFF0000) |
+ ((attrib->dst_mac_addr_mask[0] << 24) & 0xFF000000);
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+ (attrib->dst_mac_addr[3] & 0xFF) |
+ ((attrib->dst_mac_addr[2] << 8) & 0xFF00) |
+ ((attrib->dst_mac_addr[1] << 16) & 0xFF0000) |
+ ((attrib->dst_mac_addr[0] << 24) & 0xFF000000);
+ /* populate the second ihl meq 32 eq */
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32 + 1].offset = 12;
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32 + 1].mask =
+ ((attrib->dst_mac_addr_mask[5] << 16) & 0xFF0000) |
+ ((attrib->dst_mac_addr_mask[4] << 24) & 0xFF000000);
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32 + 1].value =
+ ((attrib->dst_mac_addr[5] << 16) & 0xFF0000) |
+ ((attrib->dst_mac_addr[4] << 24) & 0xFF000000);
+ ihl_ofst_meq32 += 2;
+ }
+
if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) {
if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
IPAHAL_ERR("ran out of meq32 eq\n");
diff --git a/drivers/scsi/ufs/ufs-debugfs.c b/drivers/scsi/ufs/ufs-debugfs.c
index bc2d2d4..5a578f1 100644
--- a/drivers/scsi/ufs/ufs-debugfs.c
+++ b/drivers/scsi/ufs/ufs-debugfs.c
@@ -850,8 +850,8 @@ static const struct file_operations ufsdbg_host_regs_fops = {
static int ufsdbg_dump_device_desc_show(struct seq_file *file, void *data)
{
int err = 0;
- int buff_len = QUERY_DESC_DEVICE_MAX_SIZE;
- u8 desc_buf[QUERY_DESC_DEVICE_MAX_SIZE];
+ int buff_len = QUERY_DESC_DEVICE_DEF_SIZE;
+ u8 desc_buf[QUERY_DESC_DEVICE_DEF_SIZE];
struct ufs_hba *hba = (struct ufs_hba *)file->private;
struct desc_field_offset device_desc_field_name[] = {
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index 3245fe1..f85a67d 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -65,6 +65,7 @@
#define UFS_MAX_LUNS (SCSI_W_LUN_BASE + UFS_UPIU_MAX_UNIT_NUM_ID)
#define UFS_UPIU_WLUN_ID (1 << 7)
#define UFS_UPIU_MAX_GENERAL_LUN 8
+#define QUERY_DESC_IDN_CONFIGURATION QUERY_DESC_IDN_CONFIGURAION
/* Well known logical unit id in LUN field of UPIU */
enum {
@@ -144,19 +145,13 @@ enum desc_header_offset {
QUERY_DESC_DESC_TYPE_OFFSET = 0x01,
};
-enum ufs_desc_max_size {
- QUERY_DESC_DEVICE_MAX_SIZE = 0x40,
- QUERY_DESC_CONFIGURAION_MAX_SIZE = 0x90,
- QUERY_DESC_UNIT_MAX_SIZE = 0x23,
- QUERY_DESC_INTERCONNECT_MAX_SIZE = 0x06,
- /*
- * Max. 126 UNICODE characters (2 bytes per character) plus 2 bytes
- * of descriptor header.
- */
- QUERY_DESC_STRING_MAX_SIZE = 0xFE,
- QUERY_DESC_GEOMETRY_MAZ_SIZE = 0x44,
- QUERY_DESC_POWER_MAX_SIZE = 0x62,
- QUERY_DESC_RFU_MAX_SIZE = 0x00,
+enum ufs_desc_def_size {
+ QUERY_DESC_DEVICE_DEF_SIZE = 0x40,
+ QUERY_DESC_CONFIGURATION_DEF_SIZE = 0x90,
+ QUERY_DESC_UNIT_DEF_SIZE = 0x23,
+ QUERY_DESC_INTERCONNECT_DEF_SIZE = 0x06,
+ QUERY_DESC_GEOMETRY_DEF_SIZE = 0x44,
+ QUERY_DESC_POWER_DEF_SIZE = 0x62,
};
/* Unit descriptor parameters offsets in bytes*/
diff --git a/drivers/scsi/ufs/ufs_quirks.c b/drivers/scsi/ufs/ufs_quirks.c
index 3210d60..da2bfd5 100644
--- a/drivers/scsi/ufs/ufs_quirks.c
+++ b/drivers/scsi/ufs/ufs_quirks.c
@@ -51,7 +51,7 @@ static struct ufs_card_fix ufs_fixups[] = {
void ufs_advertise_fixup_device(struct ufs_hba *hba)
{
int err;
- u8 str_desc_buf[QUERY_DESC_STRING_MAX_SIZE + 1];
+ u8 str_desc_buf[QUERY_DESC_MAX_SIZE + 1];
char *model;
struct ufs_card_fix *f;
@@ -59,13 +59,13 @@ void ufs_advertise_fixup_device(struct ufs_hba *hba)
if (!model)
goto out;
- memset(str_desc_buf, 0, QUERY_DESC_STRING_MAX_SIZE);
+ memset(str_desc_buf, 0, QUERY_DESC_MAX_SIZE);
err = ufshcd_read_string_desc(hba, hba->dev_info.i_product_name,
- str_desc_buf, QUERY_DESC_STRING_MAX_SIZE, ASCII_STD);
+ str_desc_buf, QUERY_DESC_MAX_SIZE, ASCII_STD);
if (err)
goto out;
- str_desc_buf[QUERY_DESC_STRING_MAX_SIZE] = '\0';
+ str_desc_buf[QUERY_DESC_MAX_SIZE] = '\0';
strlcpy(model, (str_desc_buf + QUERY_DESC_HDR_SIZE),
min_t(u8, str_desc_buf[QUERY_DESC_LENGTH_OFFSET],
MAX_MODEL_LEN));
diff --git a/drivers/scsi/ufs/ufs_test.c b/drivers/scsi/ufs/ufs_test.c
index d41871a..2e3997d 100644
--- a/drivers/scsi/ufs/ufs_test.c
+++ b/drivers/scsi/ufs/ufs_test.c
@@ -603,8 +603,8 @@ static void ufs_test_random_async_query(void *data, async_cookie_t cookie)
struct ufs_test_data *utd = test_iosched->blk_dev_test_data;
struct scsi_device *sdev;
struct ufs_hba *hba;
- int buff_len = QUERY_DESC_UNIT_MAX_SIZE;
- u8 desc_buf[QUERY_DESC_UNIT_MAX_SIZE];
+ int buff_len = QUERY_DESC_UNIT_DEF_SIZE;
+ u8 desc_buf[QUERY_DESC_UNIT_DEF_SIZE];
bool flag;
u32 att;
int ret = 0;
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 59222ea..a2b5ea0 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -251,19 +251,6 @@ static void ufshcd_hex_dump(struct ufs_hba *hba, const char * const str,
16, 4, buf, len, false);
}
-static u32 ufs_query_desc_max_size[] = {
- QUERY_DESC_DEVICE_MAX_SIZE,
- QUERY_DESC_CONFIGURAION_MAX_SIZE,
- QUERY_DESC_UNIT_MAX_SIZE,
- QUERY_DESC_RFU_MAX_SIZE,
- QUERY_DESC_INTERCONNECT_MAX_SIZE,
- QUERY_DESC_STRING_MAX_SIZE,
- QUERY_DESC_RFU_MAX_SIZE,
- QUERY_DESC_GEOMETRY_MAZ_SIZE,
- QUERY_DESC_POWER_MAX_SIZE,
- QUERY_DESC_RFU_MAX_SIZE,
-};
-
enum {
UFSHCD_MAX_CHANNEL = 0,
UFSHCD_MAX_ID = 1,
@@ -3628,7 +3615,7 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba,
goto out;
}
- if (*buf_len <= QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
+ if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
__func__, *buf_len);
err = -EINVAL;
@@ -3708,6 +3695,92 @@ int ufshcd_query_descriptor(struct ufs_hba *hba,
EXPORT_SYMBOL(ufshcd_query_descriptor);
/**
+ * ufshcd_read_desc_length - read the specified descriptor length from header
+ * @hba: Pointer to adapter instance
+ * @desc_id: descriptor idn value
+ * @desc_index: descriptor index
+ * @desc_length: pointer to variable to read the length of descriptor
+ *
+ * Return 0 in case of success, non-zero otherwise
+ */
+static int ufshcd_read_desc_length(struct ufs_hba *hba,
+ enum desc_idn desc_id,
+ int desc_index,
+ int *desc_length)
+{
+ int ret;
+ u8 header[QUERY_DESC_HDR_SIZE];
+ int header_len = QUERY_DESC_HDR_SIZE;
+
+ if (desc_id >= QUERY_DESC_IDN_MAX)
+ return -EINVAL;
+
+ ret = ufshcd_query_descriptor(hba, UPIU_QUERY_OPCODE_READ_DESC,
+ desc_id, desc_index, 0, header,
+ &header_len);
+
+ if (ret) {
+ dev_err(hba->dev, "%s: Failed to get descriptor header id %d",
+ __func__, desc_id);
+ return ret;
+ } else if (desc_id != header[QUERY_DESC_DESC_TYPE_OFFSET]) {
+ dev_warn(hba->dev, "%s: descriptor header id %d and desc_id %d mismatch",
+ __func__, header[QUERY_DESC_DESC_TYPE_OFFSET],
+ desc_id);
+ ret = -EINVAL;
+ }
+
+ *desc_length = header[QUERY_DESC_LENGTH_OFFSET];
+ return ret;
+
+}
+
+/**
+ * ufshcd_map_desc_id_to_length - map descriptor IDN to its length
+ * @hba: Pointer to adapter instance
+ * @desc_id: descriptor idn value
+ * @desc_len: mapped desc length (out)
+ *
+ * Return 0 in case of success, non-zero otherwise
+ */
+int ufshcd_map_desc_id_to_length(struct ufs_hba *hba,
+ enum desc_idn desc_id, int *desc_len)
+{
+ switch (desc_id) {
+ case QUERY_DESC_IDN_DEVICE:
+ *desc_len = hba->desc_size.dev_desc;
+ break;
+ case QUERY_DESC_IDN_POWER:
+ *desc_len = hba->desc_size.pwr_desc;
+ break;
+ case QUERY_DESC_IDN_GEOMETRY:
+ *desc_len = hba->desc_size.geom_desc;
+ break;
+ case QUERY_DESC_IDN_CONFIGURATION:
+ *desc_len = hba->desc_size.conf_desc;
+ break;
+ case QUERY_DESC_IDN_UNIT:
+ *desc_len = hba->desc_size.unit_desc;
+ break;
+ case QUERY_DESC_IDN_INTERCONNECT:
+ *desc_len = hba->desc_size.interc_desc;
+ break;
+ case QUERY_DESC_IDN_STRING:
+ *desc_len = QUERY_DESC_MAX_SIZE;
+ break;
+ case QUERY_DESC_IDN_RFU_0:
+ case QUERY_DESC_IDN_RFU_1:
+ *desc_len = 0;
+ break;
+ default:
+ *desc_len = 0;
+ return -EINVAL;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(ufshcd_map_desc_id_to_length);
+
+/**
* ufshcd_read_desc_param - read the specified descriptor parameter
* @hba: Pointer to adapter instance
* @desc_id: descriptor idn value
@@ -3721,37 +3794,45 @@ EXPORT_SYMBOL(ufshcd_query_descriptor);
static int ufshcd_read_desc_param(struct ufs_hba *hba,
enum desc_idn desc_id,
int desc_index,
- u32 param_offset,
+ u8 param_offset,
u8 *param_read_buf,
- u32 param_size)
+ u8 param_size)
{
int ret;
u8 *desc_buf;
- u32 buff_len;
+ int buff_len;
bool is_kmalloc = true;
- /* safety checks */
- if (desc_id >= QUERY_DESC_IDN_MAX)
+ /* Safety check */
+ if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
return -EINVAL;
- buff_len = ufs_query_desc_max_size[desc_id];
- if ((param_offset + param_size) > buff_len)
- return -EINVAL;
+ /* Get the max length of descriptor from structure filled up at probe
+ * time.
+ */
+ ret = ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
- if (!param_offset && (param_size == buff_len)) {
- /* memory space already available to hold full descriptor */
- desc_buf = param_read_buf;
- is_kmalloc = false;
- } else {
- /* allocate memory to hold full descriptor */
+ /* Sanity checks */
+ if (ret || !buff_len) {
+ dev_err(hba->dev, "%s: Failed to get full descriptor length",
+ __func__);
+ return ret;
+ }
+
+ /* Check whether we need temp memory */
+ if (param_offset != 0 || param_size < buff_len) {
desc_buf = kmalloc(buff_len, GFP_KERNEL);
if (!desc_buf)
return -ENOMEM;
+ } else {
+ desc_buf = param_read_buf;
+ is_kmalloc = false;
}
+ /* Request for full descriptor */
ret = ufshcd_query_descriptor(hba, UPIU_QUERY_OPCODE_READ_DESC,
- desc_id, desc_index, 0, desc_buf,
- &buff_len);
+ desc_id, desc_index, 0,
+ desc_buf, &buff_len);
if (ret) {
dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
@@ -3768,25 +3849,9 @@ static int ufshcd_read_desc_param(struct ufs_hba *hba,
goto out;
}
- /*
- * While reading variable size descriptors (like string descriptor),
- * some UFS devices may report the "LENGTH" (field in "Transaction
- * Specific fields" of Query Response UPIU) same as what was requested
- * in Query Request UPIU instead of reporting the actual size of the
- * variable size descriptor.
- * Although it's safe to ignore the "LENGTH" field for variable size
- * descriptors as we can always derive the length of the descriptor from
- * the descriptor header fields. Hence this change impose the length
- * match check only for fixed size descriptors (for which we always
- * request the correct size as part of Query Request UPIU).
- */
- if ((desc_id != QUERY_DESC_IDN_STRING) &&
- (buff_len != desc_buf[QUERY_DESC_LENGTH_OFFSET])) {
- dev_err(hba->dev, "%s: desc_buf length mismatch: buff_len %d, buff_len(desc_header) %d",
- __func__, buff_len, desc_buf[QUERY_DESC_LENGTH_OFFSET]);
- ret = -EINVAL;
- goto out;
- }
+ /* Check wherher we will not copy more data, than available */
+ if (is_kmalloc && param_size > buff_len)
+ param_size = buff_len;
if (is_kmalloc)
memcpy(param_read_buf, &desc_buf[param_offset], param_size);
@@ -7170,10 +7235,19 @@ static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
static void ufshcd_set_active_icc_lvl(struct ufs_hba *hba)
{
int ret;
- int buff_len = QUERY_DESC_POWER_MAX_SIZE;
- u8 desc_buf[QUERY_DESC_POWER_MAX_SIZE];
+ int buff_len = hba->desc_size.pwr_desc;
+ u8 *desc_buf = NULL;
u32 icc_level;
+ if (buff_len) {
+ desc_buf = kmalloc(buff_len, GFP_KERNEL);
+ if (!desc_buf) {
+ dev_err(hba->dev,
+ "%s: Failed to allocate desc_buf\n", __func__);
+ return;
+ }
+ }
+
ret = ufshcd_read_power_desc(hba, desc_buf, buff_len);
if (ret) {
dev_err(hba->dev,
@@ -7554,9 +7628,18 @@ static int ufshcd_set_dev_ref_clk(struct ufs_hba *hba)
static int ufs_read_device_desc_data(struct ufs_hba *hba)
{
int err;
- u8 desc_buf[QUERY_DESC_DEVICE_MAX_SIZE];
+ u8 *desc_buf = NULL;
- err = ufshcd_read_device_desc(hba, desc_buf, sizeof(desc_buf));
+ if (hba->desc_size.dev_desc) {
+ desc_buf = kmalloc(hba->desc_size.dev_desc, GFP_KERNEL);
+ if (!desc_buf) {
+ err = -ENOMEM;
+ dev_err(hba->dev,
+ "%s: Failed to allocate desc_buf\n", __func__);
+ return err;
+ }
+ }
+ err = ufshcd_read_device_desc(hba, desc_buf, hba->desc_size.dev_desc);
if (err)
return err;
@@ -7574,6 +7657,51 @@ static int ufs_read_device_desc_data(struct ufs_hba *hba)
return 0;
}
+static void ufshcd_init_desc_sizes(struct ufs_hba *hba)
+{
+ int err;
+
+ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_DEVICE, 0,
+ &hba->desc_size.dev_desc);
+ if (err)
+ hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
+
+ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_POWER, 0,
+ &hba->desc_size.pwr_desc);
+ if (err)
+ hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
+
+ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_INTERCONNECT, 0,
+ &hba->desc_size.interc_desc);
+ if (err)
+ hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
+
+ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_CONFIGURATION, 0,
+ &hba->desc_size.conf_desc);
+ if (err)
+ hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
+
+ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_UNIT, 0,
+ &hba->desc_size.unit_desc);
+ if (err)
+ hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
+
+ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_GEOMETRY, 0,
+ &hba->desc_size.geom_desc);
+ if (err)
+ hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
+}
+
+static void ufshcd_def_desc_sizes(struct ufs_hba *hba)
+{
+ hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
+ hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
+ hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
+ hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
+ hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
+ hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
+}
+
/**
* ufshcd_probe_hba - probe hba to detect device and initialize
* @hba: per-adapter instance
@@ -7614,6 +7742,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
if (ret)
goto out;
+ /* Init check for device descriptor sizes */
+ ufshcd_init_desc_sizes(hba);
ufs_advertise_fixup_device(hba);
ufshcd_tune_unipro_params(hba);
@@ -10075,6 +10205,9 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
ufshcd_init_lanes_per_dir(hba);
+ /* Set descriptor lengths to specification defaults */
+ ufshcd_def_desc_sizes(hba);
+
err = ufshcd_hba_init(hba);
if (err)
goto out_error;
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index a485885..343f327 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -249,6 +249,15 @@ struct ufs_dev_cmd {
struct ufs_query query;
};
+struct ufs_desc_size {
+ int dev_desc;
+ int pwr_desc;
+ int geom_desc;
+ int interc_desc;
+ int unit_desc;
+ int conf_desc;
+};
+
/**
* struct ufs_clk_info - UFS clock related info
* @list: list headed by hba->clk_list_head
@@ -738,6 +747,7 @@ struct ufshcd_cmd_log {
* @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for
* device is known or not.
* @scsi_block_reqs_cnt: reference counting for scsi block requests
+ * @desc_size: descriptor sizes reported by device
*/
struct ufs_hba {
void __iomem *mmio_base;
@@ -967,6 +977,7 @@ struct ufs_hba {
int latency_hist_enabled;
struct io_latency_state io_lat_s;
+ struct ufs_desc_size desc_size;
};
static inline void ufshcd_mark_shutdown_ongoing(struct ufs_hba *hba)
@@ -1208,6 +1219,10 @@ int ufshcd_change_power_mode(struct ufs_hba *hba,
struct ufs_pa_layer_attr *pwr_mode);
void ufshcd_abort_outstanding_transfer_requests(struct ufs_hba *hba,
int result);
+
+int ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
+ int *desc_length);
+
u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba);
void ufshcd_scsi_block_requests(struct ufs_hba *hba);
diff --git a/drivers/sensors/sensors_ssc.c b/drivers/sensors/sensors_ssc.c
index d738767..dfdbd8e 100644
--- a/drivers/sensors/sensors_ssc.c
+++ b/drivers/sensors/sensors_ssc.c
@@ -32,6 +32,7 @@
#define IMAGE_LOAD_CMD 1
#define IMAGE_UNLOAD_CMD 0
+#define SSR_RESET_CMD 1
#define CLASS_NAME "ssc"
#define DRV_NAME "sensors"
#define DRV_VERSION "2.00"
@@ -53,6 +54,10 @@ static ssize_t slpi_boot_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count);
+static ssize_t slpi_ssr_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count);
+
struct slpi_loader_private {
void *pil_h;
struct kobject *boot_slpi_obj;
@@ -62,8 +67,12 @@ struct slpi_loader_private {
static struct kobj_attribute slpi_boot_attribute =
__ATTR(boot, 0220, NULL, slpi_boot_store);
+static struct kobj_attribute slpi_ssr_attribute =
+ __ATTR(ssr, 0220, NULL, slpi_ssr_store);
+
static struct attribute *attrs[] = {
&slpi_boot_attribute.attr,
+ &slpi_ssr_attribute.attr,
NULL,
};
@@ -138,6 +147,44 @@ static void slpi_loader_unload(struct platform_device *pdev)
}
}
+static ssize_t slpi_ssr_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ int ssr_cmd = 0;
+ struct subsys_device *sns_dev = NULL;
+ struct platform_device *pdev = slpi_private;
+ struct slpi_loader_private *priv = NULL;
+
+ pr_debug("%s: going to call slpi_ssr\n", __func__);
+
+ if (kstrtoint(buf, 10, &ssr_cmd) < 0)
+ return -EINVAL;
+
+ if (ssr_cmd != SSR_RESET_CMD)
+ return -EINVAL;
+
+ priv = platform_get_drvdata(pdev);
+ if (!priv)
+ return -EINVAL;
+
+ sns_dev = (struct subsys_device *)priv->pil_h;
+ if (!sns_dev)
+ return -EINVAL;
+
+ dev_err(&pdev->dev, "Something went wrong with SLPI, restarting\n");
+
+ /* subsystem_restart_dev has worker queue to handle */
+ if (subsystem_restart_dev(sns_dev) != 0) {
+ dev_err(&pdev->dev, "subsystem_restart_dev failed\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(&pdev->dev, "SLPI restarted\n");
+ return count;
+}
+
static ssize_t slpi_boot_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf,
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 121fa34..e6c2aa3 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -506,6 +506,7 @@
config ICNSS
tristate "Platform driver for Q6 integrated connectivity"
+ select CNSS_UTILS
---help---
This module adds support for Q6 integrated WLAN connectivity
subsystem. This module is responsible for communicating WLAN on/off
@@ -545,6 +546,38 @@
used by audio driver to configure QDSP6v2's
ASM, ADM and AFE.
+config MSM_QDSP6_SSR
+ bool "Audio QDSP6 SSR support"
+ depends on MSM_QDSP6_APRV2_GLINK || MSM_QDSP6_APRV3_GLINK
+ help
+ Enable Subsystem Restart. Reset audio
+ clients when the ADSP subsystem is
+ restarted. Subsystem Restart for audio
+ is only used for processes on the ADSP
+ and signals audio drivers through APR.
+
+
+config MSM_QDSP6_PDR
+ bool "Audio QDSP6 PDR support"
+ depends on MSM_QDSP6_APRV2_GLINK || MSM_QDSP6_APRV3_GLINK
+ help
+ Enable Protection Domain Restart. Reset
+ audio clients when a process on the ADSP
+ is restarted. PDR for audio is only used
+ for processes on the ADSP and signals
+ audio drivers through APR.
+
+config MSM_QDSP6_NOTIFIER
+ bool "Audio QDSP6 PDR support"
+ depends on MSM_QDSP6_SSR || MSM_QDSP6_PDR
+ help
+ Enable notifier which decides whether
+ to use SSR or PDR and notifies all
+ audio clients of the event. Both SSR
+ and PDR are recovery methods when
+ there is a crash on ADSP. Audio drivers
+ are contacted by ADSP through APR.
+
config MSM_ADSP_LOADER
tristate "ADSP loader support"
select SND_SOC_MSM_APRV2_INTF
diff --git a/drivers/soc/qcom/glink.c b/drivers/soc/qcom/glink.c
index dcf6654..d31bf8d 100644
--- a/drivers/soc/qcom/glink.c
+++ b/drivers/soc/qcom/glink.c
@@ -1930,6 +1930,7 @@ static struct channel_ctx *ch_name_to_ch_ctx_create(
kfree(flcid);
}
+ ctx->transport_ptr = xprt_ctx;
list_add_tail(&ctx->port_list_node, &xprt_ctx->channels);
GLINK_INFO_PERF_CH_XPRT(ctx, xprt_ctx,
@@ -2616,7 +2617,6 @@ void *glink_open(const struct glink_open_config *cfg)
ctx->local_xprt_req = best_id;
ctx->no_migrate = cfg->transport &&
!(cfg->options & GLINK_OPT_INITIAL_XPORT);
- ctx->transport_ptr = transport_ptr;
ctx->local_open_state = GLINK_CHANNEL_OPENING;
GLINK_INFO_PERF_CH(ctx,
"%s: local:GLINK_CHANNEL_CLOSED->GLINK_CHANNEL_OPENING\n",
@@ -2862,7 +2862,7 @@ static int glink_tx_common(void *handle, void *pkt_priv,
struct channel_ctx *ctx = (struct channel_ctx *)handle;
uint32_t riid;
int ret = 0;
- struct glink_core_tx_pkt *tx_info;
+ struct glink_core_tx_pkt *tx_info = NULL;
size_t intent_size;
bool is_atomic =
tx_flags & (GLINK_TX_SINGLE_THREADED | GLINK_TX_ATOMIC);
@@ -2877,6 +2877,13 @@ static int glink_tx_common(void *handle, void *pkt_priv,
return ret;
rwref_read_get_atomic(&ctx->ch_state_lhb2, is_atomic);
+ tx_info = kzalloc(sizeof(struct glink_core_tx_pkt),
+ is_atomic ? GFP_ATOMIC : GFP_KERNEL);
+ if (!tx_info) {
+ GLINK_ERR_CH(ctx, "%s: No memory for allocation\n", __func__);
+ ret = -ENOMEM;
+ goto glink_tx_common_err;
+ }
if (!(vbuf_provider || pbuf_provider)) {
ret = -EINVAL;
goto glink_tx_common_err;
@@ -2996,14 +3003,7 @@ static int glink_tx_common(void *handle, void *pkt_priv,
GLINK_INFO_PERF_CH(ctx, "%s: R[%u]:%zu data[%p], size[%zu]. TID %u\n",
__func__, riid, intent_size,
data ? data : iovec, size, current->pid);
- tx_info = kzalloc(sizeof(struct glink_core_tx_pkt),
- is_atomic ? GFP_ATOMIC : GFP_KERNEL);
- if (!tx_info) {
- GLINK_ERR_CH(ctx, "%s: No memory for allocation\n", __func__);
- ch_push_remote_rx_intent(ctx, intent_size, riid, cookie);
- ret = -ENOMEM;
- goto glink_tx_common_err;
- }
+
rwref_lock_init(&tx_info->pkt_ref, glink_tx_pkt_release);
INIT_LIST_HEAD(&tx_info->list_done);
INIT_LIST_HEAD(&tx_info->list_node);
@@ -3028,10 +3028,15 @@ static int glink_tx_common(void *handle, void *pkt_priv,
else
xprt_schedule_tx(ctx->transport_ptr, ctx, tx_info);
+ rwref_read_put(&ctx->ch_state_lhb2);
+ glink_put_ch_ctx(ctx, false);
+ return ret;
+
glink_tx_common_err:
rwref_read_put(&ctx->ch_state_lhb2);
glink_tx_common_err_2:
glink_put_ch_ctx(ctx, false);
+ kfree(tx_info);
return ret;
}
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index b5bb719..28f89bf 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -168,6 +168,76 @@ enum icnss_driver_event_type {
ICNSS_DRIVER_EVENT_MAX,
};
+enum icnss_msa_perm {
+ ICNSS_MSA_PERM_HLOS_ALL = 0,
+ ICNSS_MSA_PERM_WLAN_HW_RW = 1,
+ ICNSS_MSA_PERM_DUMP_COLLECT = 2,
+ ICNSS_MSA_PERM_MAX,
+};
+
+#define ICNSS_MAX_VMIDS 4
+
+struct icnss_mem_region_info {
+ uint64_t reg_addr;
+ uint32_t size;
+ uint8_t secure_flag;
+ enum icnss_msa_perm perm;
+};
+
+struct icnss_msa_perm_list_t {
+ int vmids[ICNSS_MAX_VMIDS];
+ int perms[ICNSS_MAX_VMIDS];
+ int nelems;
+};
+
+struct icnss_msa_perm_list_t msa_perm_secure_list[ICNSS_MSA_PERM_MAX] = {
+ [ICNSS_MSA_PERM_HLOS_ALL] = {
+ .vmids = {VMID_HLOS},
+ .perms = {PERM_READ | PERM_WRITE | PERM_EXEC},
+ .nelems = 1,
+ },
+
+ [ICNSS_MSA_PERM_WLAN_HW_RW] = {
+ .vmids = {VMID_MSS_MSA, VMID_WLAN},
+ .perms = {PERM_READ | PERM_WRITE,
+ PERM_READ | PERM_WRITE},
+ .nelems = 2,
+ },
+
+ [ICNSS_MSA_PERM_DUMP_COLLECT] = {
+ .vmids = {VMID_MSS_MSA, VMID_WLAN, VMID_HLOS},
+ .perms = {PERM_READ | PERM_WRITE,
+ PERM_READ | PERM_WRITE,
+ PERM_READ},
+ .nelems = 3,
+ },
+};
+
+struct icnss_msa_perm_list_t msa_perm_list[ICNSS_MSA_PERM_MAX] = {
+ [ICNSS_MSA_PERM_HLOS_ALL] = {
+ .vmids = {VMID_HLOS},
+ .perms = {PERM_READ | PERM_WRITE | PERM_EXEC},
+ .nelems = 1,
+ },
+
+ [ICNSS_MSA_PERM_WLAN_HW_RW] = {
+ .vmids = {VMID_MSS_MSA, VMID_WLAN, VMID_WLAN_CE},
+ .perms = {PERM_READ | PERM_WRITE,
+ PERM_READ | PERM_WRITE,
+ PERM_READ | PERM_WRITE},
+ .nelems = 3,
+ },
+
+ [ICNSS_MSA_PERM_DUMP_COLLECT] = {
+ .vmids = {VMID_MSS_MSA, VMID_WLAN, VMID_WLAN_CE, VMID_HLOS},
+ .perms = {PERM_READ | PERM_WRITE,
+ PERM_READ | PERM_WRITE,
+ PERM_READ | PERM_WRITE,
+ PERM_READ},
+ .nelems = 4,
+ },
+};
+
struct icnss_event_pd_service_down_data {
bool crashed;
bool fw_rejuvenate;
@@ -375,6 +445,84 @@ static void icnss_ignore_qmi_timeout(bool ignore)
static void icnss_ignore_qmi_timeout(bool ignore) { }
#endif
+static int icnss_assign_msa_perm(struct icnss_mem_region_info
+ *mem_region, enum icnss_msa_perm new_perm)
+{
+ int ret = 0;
+ phys_addr_t addr;
+ u32 size;
+ u32 i = 0;
+ u32 source_vmids[ICNSS_MAX_VMIDS];
+ u32 source_nelems;
+ u32 dest_vmids[ICNSS_MAX_VMIDS];
+ u32 dest_perms[ICNSS_MAX_VMIDS];
+ u32 dest_nelems;
+ enum icnss_msa_perm cur_perm = mem_region->perm;
+ struct icnss_msa_perm_list_t *new_perm_list, *old_perm_list;
+
+ addr = mem_region->reg_addr;
+ size = mem_region->size;
+
+ if (mem_region->secure_flag) {
+ new_perm_list = &msa_perm_secure_list[new_perm];
+ old_perm_list = &msa_perm_secure_list[cur_perm];
+ } else {
+ new_perm_list = &msa_perm_list[new_perm];
+ old_perm_list = &msa_perm_list[cur_perm];
+ }
+
+ source_nelems = old_perm_list->nelems;
+ dest_nelems = new_perm_list->nelems;
+
+ for (i = 0; i < source_nelems; ++i)
+ source_vmids[i] = old_perm_list->vmids[i];
+
+ for (i = 0; i < dest_nelems; ++i) {
+ dest_vmids[i] = new_perm_list->vmids[i];
+ dest_perms[i] = new_perm_list->perms[i];
+ }
+
+ ret = hyp_assign_phys(addr, size, source_vmids, source_nelems,
+ dest_vmids, dest_perms, dest_nelems);
+ if (ret) {
+ icnss_pr_err("Hyperviser map failed for PA=%pa size=%u err=%d\n",
+ &addr, size, ret);
+ goto out;
+ }
+
+ icnss_pr_dbg("Hypervisor map for source_nelems=%d, source[0]=%x, source[1]=%x, source[2]=%x,"
+ "source[3]=%x, dest_nelems=%d, dest[0]=%x, dest[1]=%x, dest[2]=%x, dest[3]=%x\n",
+ source_nelems, source_vmids[0], source_vmids[1],
+ source_vmids[2], source_vmids[3], dest_nelems,
+ dest_vmids[0], dest_vmids[1], dest_vmids[2],
+ dest_vmids[3]);
+out:
+ return ret;
+}
+
+static int icnss_assign_msa_perm_all(struct icnss_priv *priv,
+ enum icnss_msa_perm new_perm)
+{
+ int ret;
+ int i;
+ enum icnss_msa_perm old_perm;
+
+ for (i = 0; i < priv->nr_mem_region; i++) {
+ old_perm = priv->mem_region[i].perm;
+ ret = icnss_assign_msa_perm(&priv->mem_region[i], new_perm);
+ if (ret)
+ goto err_unmap;
+ priv->mem_region[i].perm = new_perm;
+ }
+ return 0;
+
+err_unmap:
+ for (i--; i >= 0; i--) {
+ icnss_assign_msa_perm(&priv->mem_region[i], old_perm);
+ }
+ return ret;
+}
+
static void icnss_pm_stay_awake(struct icnss_priv *priv)
{
if (atomic_inc_return(&priv->pm_count) != 1)
@@ -980,119 +1128,6 @@ int icnss_power_off(struct device *dev)
}
EXPORT_SYMBOL(icnss_power_off);
-static int icnss_map_msa_permissions(struct icnss_mem_region_info *mem_region)
-{
- int ret = 0;
- phys_addr_t addr;
- u32 size;
- u32 source_vmlist[1] = {VMID_HLOS};
- int dest_vmids[3] = {VMID_MSS_MSA, VMID_WLAN, 0};
- int dest_perms[3] = {PERM_READ|PERM_WRITE,
- PERM_READ|PERM_WRITE,
- PERM_READ|PERM_WRITE};
- int source_nelems = sizeof(source_vmlist)/sizeof(u32);
- int dest_nelems = 0;
-
- addr = mem_region->reg_addr;
- size = mem_region->size;
-
- if (!mem_region->secure_flag) {
- dest_vmids[2] = VMID_WLAN_CE;
- dest_nelems = 3;
- } else {
- dest_vmids[2] = 0;
- dest_nelems = 2;
- }
- ret = hyp_assign_phys(addr, size, source_vmlist, source_nelems,
- dest_vmids, dest_perms, dest_nelems);
- if (ret) {
- icnss_pr_err("Hyperviser map failed for PA=%pa size=%u err=%d\n",
- &addr, size, ret);
- goto out;
- }
-
- icnss_pr_dbg("Hypervisor map for source=%x, dest_nelems=%d, dest[0]=%x, dest[1]=%x, dest[2]=%x\n",
- source_vmlist[0], dest_nelems, dest_vmids[0],
- dest_vmids[1], dest_vmids[2]);
-out:
- return ret;
-
-}
-
-static int icnss_unmap_msa_permissions(struct icnss_mem_region_info *mem_region)
-{
- int ret = 0;
- phys_addr_t addr;
- u32 size;
- u32 dest_vmids[1] = {VMID_HLOS};
- int source_vmlist[3] = {VMID_MSS_MSA, VMID_WLAN, 0};
- int dest_perms[1] = {PERM_READ|PERM_WRITE|PERM_EXEC};
- int source_nelems = 0;
- int dest_nelems = sizeof(dest_vmids)/sizeof(u32);
-
- addr = mem_region->reg_addr;
- size = mem_region->size;
-
- if (!mem_region->secure_flag) {
- source_vmlist[2] = VMID_WLAN_CE;
- source_nelems = 3;
- } else {
- source_vmlist[2] = 0;
- source_nelems = 2;
- }
-
- ret = hyp_assign_phys(addr, size, source_vmlist, source_nelems,
- dest_vmids, dest_perms, dest_nelems);
- if (ret) {
- icnss_pr_err("Hyperviser unmap failed for PA=%pa size=%u err=%d\n",
- &addr, size, ret);
- goto out;
- }
- icnss_pr_dbg("Hypervisor unmap for source_nelems=%d, source[0]=%x, source[1]=%x, source[2]=%x, dest=%x\n",
- source_nelems, source_vmlist[0], source_vmlist[1],
- source_vmlist[2], dest_vmids[0]);
-out:
- return ret;
-}
-
-static int icnss_setup_msa_permissions(struct icnss_priv *priv)
-{
- int ret;
- int i;
-
- if (test_bit(ICNSS_MSA0_ASSIGNED, &priv->state))
- return 0;
-
- for (i = 0; i < priv->nr_mem_region; i++) {
-
- ret = icnss_map_msa_permissions(&priv->mem_region[i]);
- if (ret)
- goto err_unmap;
- }
-
- set_bit(ICNSS_MSA0_ASSIGNED, &priv->state);
-
- return 0;
-
-err_unmap:
- for (i--; i >= 0; i--)
- icnss_unmap_msa_permissions(&priv->mem_region[i]);
- return ret;
-}
-
-static void icnss_remove_msa_permissions(struct icnss_priv *priv)
-{
- int i;
-
- if (!test_bit(ICNSS_MSA0_ASSIGNED, &priv->state))
- return;
-
- for (i = 0; i < priv->nr_mem_region; i++)
- icnss_unmap_msa_permissions(&priv->mem_region[i]);
-
- clear_bit(ICNSS_MSA0_ASSIGNED, &priv->state);
-}
-
static int wlfw_msa_mem_info_send_sync_msg(void)
{
int ret;
@@ -1898,9 +1933,12 @@ static int icnss_driver_event_server_arrive(void *data)
if (ret < 0)
goto err_power_on;
- ret = icnss_setup_msa_permissions(penv);
- if (ret < 0)
- goto err_power_on;
+ if (!test_bit(ICNSS_MSA0_ASSIGNED, &penv->state)) {
+ ret = icnss_assign_msa_perm_all(penv, ICNSS_MSA_PERM_WLAN_HW_RW);
+ if (ret < 0)
+ goto err_power_on;
+ set_bit(ICNSS_MSA0_ASSIGNED, &penv->state);
+ }
ret = wlfw_msa_ready_send_sync_msg();
if (ret < 0)
@@ -1918,7 +1956,7 @@ static int icnss_driver_event_server_arrive(void *data)
return ret;
err_setup_msa:
- icnss_remove_msa_permissions(penv);
+ icnss_assign_msa_perm_all(penv, ICNSS_MSA_PERM_HLOS_ALL);
err_power_on:
icnss_hw_power_off(penv);
fail:
@@ -2333,14 +2371,22 @@ static int icnss_modem_notifier_nb(struct notifier_block *nb,
struct icnss_priv *priv = container_of(nb, struct icnss_priv,
modem_ssr_nb);
struct icnss_uevent_fw_down_data fw_down_data;
+ int ret = 0;
icnss_pr_vdbg("Modem-Notify: event %lu\n", code);
- if (code == SUBSYS_AFTER_SHUTDOWN &&
- notif->crashed == CRASH_STATUS_ERR_FATAL) {
- icnss_remove_msa_permissions(priv);
- icnss_pr_info("Collecting msa0 segment dump\n");
- icnss_msa0_ramdump(priv);
+ if (code == SUBSYS_AFTER_SHUTDOWN) {
+ ret = icnss_assign_msa_perm_all(priv,
+ ICNSS_MSA_PERM_DUMP_COLLECT);
+ if (!ret) {
+ icnss_pr_info("Collecting msa0 segment dump\n");
+ icnss_msa0_ramdump(priv);
+ icnss_assign_msa_perm_all(priv,
+ ICNSS_MSA_PERM_WLAN_HW_RW);
+ } else {
+ icnss_pr_err("Not able to Collect msa0 segment dump"
+ "Apps permissions not assigned %d\n", ret);
+ }
return NOTIFY_OK;
}
@@ -4307,7 +4353,8 @@ static int icnss_remove(struct platform_device *pdev)
icnss_hw_power_off(penv);
- icnss_remove_msa_permissions(penv);
+ icnss_assign_msa_perm_all(penv, ICNSS_MSA_PERM_HLOS_ALL);
+ clear_bit(ICNSS_MSA0_ASSIGNED, &penv->state);
dev_set_drvdata(&pdev->dev, NULL);
diff --git a/drivers/soc/qcom/ramdump.c b/drivers/soc/qcom/ramdump.c
index d5b051e..dd77062 100644
--- a/drivers/soc/qcom/ramdump.c
+++ b/drivers/soc/qcom/ramdump.c
@@ -16,7 +16,6 @@
#include <linux/jiffies.h>
#include <linux/sched.h>
#include <linux/module.h>
-#include <linux/miscdevice.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/slab.h>
@@ -24,10 +23,20 @@
#include <linux/uaccess.h>
#include <linux/elf.h>
#include <linux/wait.h>
+#include <linux/cdev.h>
#include <soc/qcom/ramdump.h>
#include <linux/dma-mapping.h>
#include <linux/of.h>
+
+#define RAMDUMP_NUM_DEVICES 256
+#define RAMDUMP_NAME "ramdump"
+
+static struct class *ramdump_class;
+static dev_t ramdump_dev;
+static DEFINE_MUTEX(rd_minor_mutex);
+static DEFINE_IDA(rd_minor_id);
+static bool ramdump_devnode_inited;
#define RAMDUMP_WAIT_MSECS 120000
struct ramdump_device {
@@ -38,7 +47,8 @@ struct ramdump_device {
int ramdump_status;
struct completion ramdump_complete;
- struct miscdevice device;
+ struct cdev cdev;
+ struct device *dev;
wait_queue_head_t dump_wait_q;
int nsegments;
@@ -51,17 +61,19 @@ struct ramdump_device {
static int ramdump_open(struct inode *inode, struct file *filep)
{
- struct ramdump_device *rd_dev = container_of(filep->private_data,
- struct ramdump_device, device);
+ struct ramdump_device *rd_dev = container_of(inode->i_cdev,
+ struct ramdump_device, cdev);
rd_dev->consumer_present = 1;
rd_dev->ramdump_status = 0;
+ filep->private_data = rd_dev;
return 0;
}
static int ramdump_release(struct inode *inode, struct file *filep)
{
- struct ramdump_device *rd_dev = container_of(filep->private_data,
- struct ramdump_device, device);
+
+ struct ramdump_device *rd_dev = container_of(inode->i_cdev,
+ struct ramdump_device, cdev);
rd_dev->consumer_present = 0;
rd_dev->data_ready = 0;
complete(&rd_dev->ramdump_complete);
@@ -105,8 +117,7 @@ static unsigned long offset_translate(loff_t user_offset,
static ssize_t ramdump_read(struct file *filep, char __user *buf, size_t count,
loff_t *pos)
{
- struct ramdump_device *rd_dev = container_of(filep->private_data,
- struct ramdump_device, device);
+ struct ramdump_device *rd_dev = filep->private_data;
void *device_mem = NULL, *origdevice_mem = NULL, *vaddr = NULL;
unsigned long data_left = 0, bytes_before, bytes_after;
unsigned long addr = 0;
@@ -154,7 +165,7 @@ static ssize_t ramdump_read(struct file *filep, char __user *buf, size_t count,
rd_dev->attrs = 0;
rd_dev->attrs |= DMA_ATTR_SKIP_ZEROING;
- device_mem = vaddr ?: dma_remap(rd_dev->device.parent, NULL, addr,
+ device_mem = vaddr ?: dma_remap(rd_dev->dev->parent, NULL, addr,
copy_size, rd_dev->attrs);
origdevice_mem = device_mem;
@@ -206,7 +217,7 @@ static ssize_t ramdump_read(struct file *filep, char __user *buf, size_t count,
kfree(finalbuf);
if (!vaddr && origdevice_mem)
- dma_unremap(rd_dev->device.parent, origdevice_mem, copy_size);
+ dma_unremap(rd_dev->dev->parent, origdevice_mem, copy_size);
*pos += copy_size;
@@ -217,7 +228,7 @@ static ssize_t ramdump_read(struct file *filep, char __user *buf, size_t count,
ramdump_done:
if (!vaddr && origdevice_mem)
- dma_unremap(rd_dev->device.parent, origdevice_mem, copy_size);
+ dma_unremap(rd_dev->dev->parent, origdevice_mem, copy_size);
kfree(finalbuf);
rd_dev->data_ready = 0;
@@ -229,8 +240,7 @@ static ssize_t ramdump_read(struct file *filep, char __user *buf, size_t count,
static unsigned int ramdump_poll(struct file *filep,
struct poll_table_struct *wait)
{
- struct ramdump_device *rd_dev = container_of(filep->private_data,
- struct ramdump_device, device);
+ struct ramdump_device *rd_dev = filep->private_data;
unsigned int mask = 0;
if (rd_dev->data_ready)
@@ -247,9 +257,26 @@ static const struct file_operations ramdump_file_ops = {
.poll = ramdump_poll
};
-void *create_ramdump_device(const char *dev_name, struct device *parent)
+static int ramdump_devnode_init(void)
{
int ret;
+
+ ramdump_class = class_create(THIS_MODULE, RAMDUMP_NAME);
+ ret = alloc_chrdev_region(&ramdump_dev, 0, RAMDUMP_NUM_DEVICES,
+ RAMDUMP_NAME);
+ if (ret < 0) {
+ pr_warn("%s: unable to allocate major\n", __func__);
+ return ret;
+ }
+
+ ramdump_devnode_inited = true;
+
+ return 0;
+}
+
+void *create_ramdump_device(const char *dev_name, struct device *parent)
+{
+ int ret, minor;
struct ramdump_device *rd_dev;
if (!dev_name) {
@@ -257,6 +284,14 @@ void *create_ramdump_device(const char *dev_name, struct device *parent)
return NULL;
}
+ mutex_lock(&rd_minor_mutex);
+ if (!ramdump_devnode_inited) {
+ ret = ramdump_devnode_init();
+ if (ret)
+ return ERR_PTR(ret);
+ }
+ mutex_unlock(&rd_minor_mutex);
+
rd_dev = kzalloc(sizeof(struct ramdump_device), GFP_KERNEL);
if (!rd_dev) {
@@ -265,15 +300,20 @@ void *create_ramdump_device(const char *dev_name, struct device *parent)
return NULL;
}
+ /* get a minor number */
+ minor = ida_simple_get(&rd_minor_id, 0, RAMDUMP_NUM_DEVICES,
+ GFP_KERNEL);
+ if (minor < 0) {
+ pr_err("%s: No more minor numbers left! rc:%d\n", __func__,
+ minor);
+ ret = -ENODEV;
+ goto fail_out_of_minors;
+ }
+
snprintf(rd_dev->name, ARRAY_SIZE(rd_dev->name), "ramdump_%s",
dev_name);
init_completion(&rd_dev->ramdump_complete);
-
- rd_dev->device.minor = MISC_DYNAMIC_MINOR;
- rd_dev->device.name = rd_dev->name;
- rd_dev->device.fops = &ramdump_file_ops;
- rd_dev->device.parent = parent;
if (parent) {
rd_dev->complete_ramdump = of_property_read_bool(
parent->of_node, "qcom,complete-ramdump");
@@ -284,27 +324,48 @@ void *create_ramdump_device(const char *dev_name, struct device *parent)
init_waitqueue_head(&rd_dev->dump_wait_q);
- ret = misc_register(&rd_dev->device);
-
- if (ret) {
- pr_err("%s: misc_register failed for %s (%d)", __func__,
+ rd_dev->dev = device_create(ramdump_class, parent,
+ MKDEV(MAJOR(ramdump_dev), minor),
+ rd_dev, rd_dev->name);
+ if (IS_ERR(rd_dev->dev)) {
+ ret = PTR_ERR(rd_dev->dev);
+ pr_err("%s: device_create failed for %s (%d)", __func__,
dev_name, ret);
- kfree(rd_dev);
- return NULL;
+ goto fail_return_minor;
+ }
+
+ cdev_init(&rd_dev->cdev, &ramdump_file_ops);
+
+ ret = cdev_add(&rd_dev->cdev, MKDEV(MAJOR(ramdump_dev), minor), 1);
+ if (ret < 0) {
+ pr_err("%s: cdev_add failed for %s (%d)", __func__,
+ dev_name, ret);
+ goto fail_cdev_add;
}
return (void *)rd_dev;
+
+fail_cdev_add:
+ device_unregister(rd_dev->dev);
+fail_return_minor:
+ ida_simple_remove(&rd_minor_id, minor);
+fail_out_of_minors:
+ kfree(rd_dev);
+ return ERR_PTR(ret);
}
EXPORT_SYMBOL(create_ramdump_device);
void destroy_ramdump_device(void *dev)
{
struct ramdump_device *rd_dev = dev;
+ int minor = MINOR(rd_dev->cdev.dev);
if (IS_ERR_OR_NULL(rd_dev))
return;
- misc_deregister(&rd_dev->device);
+ cdev_del(&rd_dev->cdev);
+ device_unregister(rd_dev->dev);
+ ida_simple_remove(&rd_minor_id, minor);
kfree(rd_dev);
}
EXPORT_SYMBOL(destroy_ramdump_device);
diff --git a/drivers/soc/qcom/wcd-dsp-glink.c b/drivers/soc/qcom/wcd-dsp-glink.c
index c8bb13d..870b9f7 100644
--- a/drivers/soc/qcom/wcd-dsp-glink.c
+++ b/drivers/soc/qcom/wcd-dsp-glink.c
@@ -58,6 +58,7 @@ struct wdsp_glink_rsp_que {
struct wdsp_glink_tx_buf {
struct work_struct tx_work;
+ struct work_struct free_tx_work;
/* Glink channel information */
struct wdsp_glink_ch *ch;
@@ -125,6 +126,46 @@ static int wdsp_glink_close_ch(struct wdsp_glink_ch *ch);
static int wdsp_glink_open_ch(struct wdsp_glink_ch *ch);
/*
+ * wdsp_glink_free_tx_buf_work - Work function to free tx pkt
+ * work: Work structure
+ */
+static void wdsp_glink_free_tx_buf_work(struct work_struct *work)
+{
+ struct wdsp_glink_tx_buf *tx_buf;
+
+ tx_buf = container_of(work, struct wdsp_glink_tx_buf,
+ free_tx_work);
+ vfree(tx_buf);
+}
+
+/*
+ * wdsp_glink_free_tx_buf - Function to free tx buffer
+ * priv: Pointer to the channel
+ * pkt_priv: Pointer to the tx buffer
+ */
+static void wdsp_glink_free_tx_buf(const void *priv, const void *pkt_priv)
+{
+ struct wdsp_glink_tx_buf *tx_buf = (struct wdsp_glink_tx_buf *)pkt_priv;
+ struct wdsp_glink_priv *wpriv;
+ struct wdsp_glink_ch *ch;
+
+ if (!priv) {
+ pr_err("%s: Invalid priv\n", __func__);
+ return;
+ }
+ if (!tx_buf) {
+ pr_err("%s: Invalid tx_buf\n", __func__);
+ return;
+ }
+
+ ch = (struct wdsp_glink_ch *)priv;
+ wpriv = ch->wpriv;
+ /* Work queue to free tx pkt */
+ INIT_WORK(&tx_buf->free_tx_work, wdsp_glink_free_tx_buf_work);
+ queue_work(wpriv->work_queue, &tx_buf->free_tx_work);
+}
+
+/*
* wdsp_glink_notify_rx - Glink notify rx callback for responses
* handle: Opaque Channel handle returned by GLink
* priv: Private pointer to the channel
@@ -183,14 +224,8 @@ static void wdsp_glink_notify_rx(void *handle, const void *priv,
static void wdsp_glink_notify_tx_done(void *handle, const void *priv,
const void *pkt_priv, const void *ptr)
{
- if (!pkt_priv) {
- pr_err("%s: Invalid parameter\n", __func__);
- return;
- }
- /* Free tx pkt */
- vfree(pkt_priv);
+ wdsp_glink_free_tx_buf(priv, pkt_priv);
}
-
/*
* wdsp_glink_notify_tx_abort - Glink notify tx abort callback to
* free tx buffer
@@ -201,12 +236,7 @@ static void wdsp_glink_notify_tx_done(void *handle, const void *priv,
static void wdsp_glink_notify_tx_abort(void *handle, const void *priv,
const void *pkt_priv)
{
- if (!pkt_priv) {
- pr_err("%s: Invalid parameter\n", __func__);
- return;
- }
- /* Free tx pkt */
- vfree(pkt_priv);
+ wdsp_glink_free_tx_buf(priv, pkt_priv);
}
/*
@@ -555,7 +585,7 @@ static int wdsp_glink_ch_info_init(struct wdsp_glink_priv *wpriv,
goto done;
}
ch = kcalloc(no_of_channels, sizeof(struct wdsp_glink_ch *),
- GFP_KERNEL);
+ GFP_ATOMIC);
if (!ch) {
ret = -ENOMEM;
goto done;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 838783c..24d4492 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1004,7 +1004,7 @@ static int spi_transfer_one_message(struct spi_master *master,
ret = 0;
ms = 8LL * 1000LL * xfer->len;
do_div(ms, xfer->speed_hz);
- ms += ms + 100; /* some tolerance */
+ ms += ms + 200; /* some tolerance */
if (ms > UINT_MAX)
ms = UINT_MAX;
diff --git a/drivers/spmi/spmi-pmic-arb-debug.c b/drivers/spmi/spmi-pmic-arb-debug.c
index c5a31a9..2c90bef 100644
--- a/drivers/spmi/spmi-pmic-arb-debug.c
+++ b/drivers/spmi/spmi-pmic-arb-debug.c
@@ -11,6 +11,7 @@
* GNU General Public License for more details.
*/
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
@@ -69,6 +70,7 @@ enum pmic_arb_cmd_op_code {
struct spmi_pmic_arb_debug {
void __iomem *addr;
raw_spinlock_t lock;
+ struct clk *clock;
};
static inline void pmic_arb_debug_write(struct spmi_pmic_arb_debug *pa,
@@ -181,6 +183,12 @@ static int pmic_arb_debug_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
else
return -EINVAL;
+ rc = clk_prepare_enable(pa->clock);
+ if (rc) {
+ pr_err("%s: failed to enable core clock, rc=%d\n",
+ __func__, rc);
+ return rc;
+ }
raw_spin_lock_irqsave(&pa->lock, flags);
rc = pmic_arb_debug_issue_command(ctrl, opc, sid, addr, len);
@@ -192,6 +200,7 @@ static int pmic_arb_debug_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
buf[i] = pmic_arb_debug_read(pa, PMIC_ARB_DEBUG_RDATA(i));
done:
raw_spin_unlock_irqrestore(&pa->lock, flags);
+ clk_disable_unprepare(pa->clock);
return rc;
}
@@ -221,6 +230,12 @@ static int pmic_arb_debug_write_cmd(struct spmi_controller *ctrl, u8 opc,
else
return -EINVAL;
+ rc = clk_prepare_enable(pa->clock);
+ if (rc) {
+ pr_err("%s: failed to enable core clock, rc=%d\n",
+ __func__, rc);
+ return rc;
+ }
raw_spin_lock_irqsave(&pa->lock, flags);
/* Write data to FIFO */
@@ -230,6 +245,7 @@ static int pmic_arb_debug_write_cmd(struct spmi_controller *ctrl, u8 opc,
rc = pmic_arb_debug_issue_command(ctrl, opc, sid, addr, len);
raw_spin_unlock_irqrestore(&pa->lock, flags);
+ clk_disable_unprepare(pa->clock);
return rc;
}
@@ -293,6 +309,17 @@ static int spmi_pmic_arb_debug_probe(struct platform_device *pdev)
goto err_put_ctrl;
}
+ if (of_find_property(pdev->dev.of_node, "clock-names", NULL)) {
+ pa->clock = devm_clk_get(&pdev->dev, "core_clk");
+ if (IS_ERR(pa->clock)) {
+ rc = PTR_ERR(pa->clock);
+ if (rc != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "unable to request core clock, rc=%d\n",
+ rc);
+ goto err_put_ctrl;
+ }
+ }
+
platform_set_drvdata(pdev, ctrl);
raw_spin_lock_init(&pa->lock);
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
index ad6028f..c9028bb 100644
--- a/drivers/staging/android/ion/ion_system_heap.c
+++ b/drivers/staging/android/ion/ion_system_heap.c
@@ -2,7 +2,7 @@
* drivers/staging/android/ion/ion_system_heap.c
*
* Copyright (C) 2011 Google, Inc.
- * Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -123,9 +123,11 @@ static struct page *alloc_buffer_page(struct ion_system_heap *heap,
if (order)
gfp_mask = high_order_gfp_flags;
+
page = alloc_pages(gfp_mask, order);
- ion_pages_sync_for_device(dev, page, PAGE_SIZE << order,
- DMA_BIDIRECTIONAL);
+ if (page)
+ ion_pages_sync_for_device(dev, page, PAGE_SIZE << order,
+ DMA_BIDIRECTIONAL);
}
if (!page)
return 0;
diff --git a/drivers/staging/android/uapi/msm_ion.h b/drivers/staging/android/uapi/msm_ion.h
index db4fc63..cc77674 100644
--- a/drivers/staging/android/uapi/msm_ion.h
+++ b/drivers/staging/android/uapi/msm_ion.h
@@ -84,7 +84,6 @@ enum cp_mem_usage {
#define ION_FLAG_CP_NON_PIXEL ION_BIT(20)
#define ION_FLAG_CP_CAMERA ION_BIT(21)
#define ION_FLAG_CP_HLOS ION_BIT(22)
-#define ION_FLAG_CP_HLOS_FREE ION_BIT(23)
#define ION_FLAG_CP_SEC_DISPLAY ION_BIT(25)
#define ION_FLAG_CP_APP ION_BIT(26)
#define ION_FLAG_CP_CAMERA_PREVIEW ION_BIT(27)
@@ -96,13 +95,6 @@ enum cp_mem_usage {
*/
#define ION_FLAG_SECURE ION_BIT(ION_HEAP_ID_RESERVED)
-/**
- * Flag for clients to force contiguous memort allocation
- *
- * Use of this flag is carefully monitored!
- */
-#define ION_FLAG_FORCE_CONTIGUOUS ION_BIT(30)
-
/*
* Used in conjunction with heap which pool memory to force an allocation
* to come from the page allocator directly instead of from the pool allocation
@@ -113,7 +105,6 @@ enum cp_mem_usage {
* Deprecated! Please use the corresponding ION_FLAG_*
*/
#define ION_SECURE ION_FLAG_SECURE
-#define ION_FORCE_CONTIGUOUS ION_FLAG_FORCE_CONTIGUOUS
/**
* Macro should be used with ion_heap_ids defined above.
diff --git a/drivers/staging/iio/light/tsl2x7x_core.c b/drivers/staging/iio/light/tsl2x7x_core.c
index ea15bc1..197201a 100644
--- a/drivers/staging/iio/light/tsl2x7x_core.c
+++ b/drivers/staging/iio/light/tsl2x7x_core.c
@@ -854,7 +854,7 @@ void tsl2x7x_prox_calculate(int *data, int length,
tmp = data[i] - statP->mean;
sample_sum += tmp * tmp;
}
- statP->stddev = int_sqrt((long)sample_sum) / length;
+ statP->stddev = int_sqrt((long)sample_sum / length);
}
/**
diff --git a/drivers/staging/media/cec/cec-adap.c b/drivers/staging/media/cec/cec-adap.c
index 611e07b..057c9b5 100644
--- a/drivers/staging/media/cec/cec-adap.c
+++ b/drivers/staging/media/cec/cec-adap.c
@@ -1017,7 +1017,7 @@ static int cec_config_thread_func(void *arg)
las->log_addr[i] = CEC_LOG_ADDR_INVALID;
if (last_la == CEC_LOG_ADDR_INVALID ||
last_la == CEC_LOG_ADDR_UNREGISTERED ||
- !(last_la & type2mask[type]))
+ !((1 << last_la) & type2mask[type]))
last_la = la_list[0];
err = cec_config_log_addr(adap, i, last_la);
diff --git a/drivers/staging/rtl8188eu/core/rtw_ap.c b/drivers/staging/rtl8188eu/core/rtw_ap.c
index 553e8d5..6513ace 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ap.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ap.c
@@ -890,7 +890,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
return _FAIL;
- if (len > MAX_IE_SZ)
+ if (len < 0 || len > MAX_IE_SZ)
return _FAIL;
pbss_network->IELength = len;
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 01ea228..155fe0e 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -1287,6 +1287,18 @@ iscsit_get_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr,
*/
if (dump_payload)
goto after_immediate_data;
+ /*
+ * Check for underflow case where both EDTL and immediate data payload
+ * exceeds what is presented by CDB's TRANSFER LENGTH, and what has
+ * already been set in target_cmd_size_check() as se_cmd->data_length.
+ *
+ * For this special case, fail the command and dump the immediate data
+ * payload.
+ */
+ if (cmd->first_burst_len > cmd->se_cmd.data_length) {
+ cmd->sense_reason = TCM_INVALID_CDB_FIELD;
+ goto after_immediate_data;
+ }
immed_ret = iscsit_handle_immediate_data(cmd, hdr,
cmd->first_burst_len);
@@ -4431,8 +4443,11 @@ static void iscsit_logout_post_handler_closesession(
* always sleep waiting for RX/TX thread shutdown to complete
* within iscsit_close_connection().
*/
- if (!conn->conn_transport->rdma_shutdown)
+ if (!conn->conn_transport->rdma_shutdown) {
sleep = cmpxchg(&conn->tx_thread_active, true, false);
+ if (!sleep)
+ return;
+ }
atomic_set(&conn->conn_logout_remove, 0);
complete(&conn->conn_logout_comp);
@@ -4448,8 +4463,11 @@ static void iscsit_logout_post_handler_samecid(
{
int sleep = 1;
- if (!conn->conn_transport->rdma_shutdown)
+ if (!conn->conn_transport->rdma_shutdown) {
sleep = cmpxchg(&conn->tx_thread_active, true, false);
+ if (!sleep)
+ return;
+ }
atomic_set(&conn->conn_logout_remove, 0);
complete(&conn->conn_logout_comp);
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index e2c970a..be52838 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -131,7 +131,7 @@ int init_se_kmem_caches(void);
void release_se_kmem_caches(void);
u32 scsi_get_new_index(scsi_index_t);
void transport_subsystem_check_init(void);
-void transport_cmd_finish_abort(struct se_cmd *, int);
+int transport_cmd_finish_abort(struct se_cmd *, int);
unsigned char *transport_dump_cmd_direction(struct se_cmd *);
void transport_dump_dev_state(struct se_device *, char *, int *);
void transport_dump_dev_info(struct se_device *, struct se_lun *,
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 4f229e7..27dd1e1 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -75,7 +75,7 @@ void core_tmr_release_req(struct se_tmr_req *tmr)
kfree(tmr);
}
-static void core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas)
+static int core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas)
{
unsigned long flags;
bool remove = true, send_tas;
@@ -91,7 +91,7 @@ static void core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas)
transport_send_task_abort(cmd);
}
- transport_cmd_finish_abort(cmd, remove);
+ return transport_cmd_finish_abort(cmd, remove);
}
static int target_check_cdb_and_preempt(struct list_head *list,
@@ -185,8 +185,8 @@ void core_tmr_abort_task(
cancel_work_sync(&se_cmd->work);
transport_wait_for_tasks(se_cmd);
- transport_cmd_finish_abort(se_cmd, true);
- target_put_sess_cmd(se_cmd);
+ if (!transport_cmd_finish_abort(se_cmd, true))
+ target_put_sess_cmd(se_cmd);
printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for"
" ref_tag: %llu\n", ref_tag);
@@ -286,8 +286,8 @@ static void core_tmr_drain_tmr_list(
cancel_work_sync(&cmd->work);
transport_wait_for_tasks(cmd);
- transport_cmd_finish_abort(cmd, 1);
- target_put_sess_cmd(cmd);
+ if (!transport_cmd_finish_abort(cmd, 1))
+ target_put_sess_cmd(cmd);
}
}
@@ -385,8 +385,8 @@ static void core_tmr_drain_state_list(
cancel_work_sync(&cmd->work);
transport_wait_for_tasks(cmd);
- core_tmr_handle_tas_abort(cmd, tas);
- target_put_sess_cmd(cmd);
+ if (!core_tmr_handle_tas_abort(cmd, tas))
+ target_put_sess_cmd(cmd);
}
}
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 077344c..1f9bfa4 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -673,9 +673,10 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
percpu_ref_put(&lun->lun_ref);
}
-void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
+int transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
{
bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF);
+ int ret = 0;
if (cmd->se_cmd_flags & SCF_SE_LUN_CMD)
transport_lun_remove_cmd(cmd);
@@ -687,9 +688,11 @@ void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
cmd->se_tfo->aborted_task(cmd);
if (transport_cmd_check_stop_to_fabric(cmd))
- return;
+ return 1;
if (remove && ack_kref)
- transport_put_cmd(cmd);
+ ret = transport_put_cmd(cmd);
+
+ return ret;
}
static void target_complete_failure_work(struct work_struct *work)
diff --git a/drivers/thermal/qcom/Kconfig b/drivers/thermal/qcom/Kconfig
index 38d5b93..be33725 100644
--- a/drivers/thermal/qcom/Kconfig
+++ b/drivers/thermal/qcom/Kconfig
@@ -49,3 +49,14 @@
will be used by QTI chipset to place a floor voltage restriction at
low temperatures. The regulator cooling device will message the AOP
using mail box to establish the floor voltage.
+
+config QTI_QMI_COOLING_DEVICE
+ bool "QTI QMI cooling devices"
+ depends on MSM_QMI_INTERFACE && THERMAL_OF
+ help
+ This enables the QTI remote subsystem cooling devices. These cooling
+ devices will be used by QTI chipset to place various remote
+ subsystem mitigations like remote processor passive mitigation,
+ remote subsystem voltage restriction at low temperatures etc.
+ The QMI cooling device will interface with remote subsystem
+ using QTI QMI interface.
diff --git a/drivers/thermal/qcom/Makefile b/drivers/thermal/qcom/Makefile
index 2ba84c6..000c6e7 100644
--- a/drivers/thermal/qcom/Makefile
+++ b/drivers/thermal/qcom/Makefile
@@ -4,3 +4,4 @@
obj-$(CONFIG_QTI_THERMAL_LIMITS_DCVS) += msm_lmh_dcvs.o lmh_dbg.o
obj-$(CONFIG_QTI_VIRTUAL_SENSOR) += qti_virtual_sensor.o
obj-$(CONFIG_QTI_REG_COOLING_DEVICE) += regulator_cooling.o
+obj-$(CONFIG_QTI_QMI_COOLING_DEVICE) += thermal_mitigation_device_service_v01.o qmi_cooling.o
diff --git a/drivers/thermal/qcom/qmi_cooling.c b/drivers/thermal/qcom/qmi_cooling.c
new file mode 100644
index 0000000..af82030
--- /dev/null
+++ b/drivers/thermal/qcom/qmi_cooling.c
@@ -0,0 +1,681 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s:%s " fmt, KBUILD_MODNAME, __func__
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/thermal.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <soc/qcom/msm_qmi_interface.h>
+
+#include "thermal_mitigation_device_service_v01.h"
+
+#define QMI_CDEV_DRIVER "qmi-cooling-device"
+#define QMI_TMD_RESP_TOUT_MSEC 50
+#define QMI_CLIENT_NAME_LENGTH 40
+
+enum qmi_device_type {
+ QMI_CDEV_MAX_LIMIT_TYPE,
+ QMI_CDEV_MIN_LIMIT_TYPE,
+ QMI_CDEV_TYPE_NR,
+};
+
+struct qmi_cooling_device {
+ struct device_node *np;
+ char cdev_name[THERMAL_NAME_LENGTH];
+ char qmi_name[QMI_CLIENT_NAME_LENGTH];
+ bool connection_active;
+ enum qmi_device_type type;
+ struct list_head qmi_node;
+ struct thermal_cooling_device *cdev;
+ unsigned int mtgn_state;
+ unsigned int max_level;
+ struct qmi_tmd_instance *tmd;
+};
+
+struct qmi_tmd_instance {
+ struct device *dev;
+ struct qmi_handle *handle;
+ struct mutex mutex;
+ struct work_struct work_svc_arrive;
+ struct work_struct work_svc_exit;
+ struct work_struct work_rcv_msg;
+ struct notifier_block nb;
+ uint32_t inst_id;
+ struct list_head tmd_cdev_list;
+};
+
+struct qmi_dev_info {
+ char *dev_name;
+ enum qmi_device_type type;
+};
+
+static struct workqueue_struct *qmi_tmd_wq;
+static struct qmi_tmd_instance *tmd_instances;
+static int tmd_inst_cnt;
+
+static struct qmi_dev_info device_clients[] = {
+ {
+ .dev_name = "pa",
+ .type = QMI_CDEV_MAX_LIMIT_TYPE,
+ },
+ {
+ .dev_name = "cx_vdd_limit",
+ .type = QMI_CDEV_MAX_LIMIT_TYPE,
+ },
+ {
+ .dev_name = "modem",
+ .type = QMI_CDEV_MAX_LIMIT_TYPE,
+ },
+ {
+ .dev_name = "modem_current",
+ .type = QMI_CDEV_MAX_LIMIT_TYPE,
+ },
+ {
+ .dev_name = "modem_bw",
+ .type = QMI_CDEV_MAX_LIMIT_TYPE,
+ },
+ {
+ .dev_name = "cpuv_restriction_cold",
+ .type = QMI_CDEV_MIN_LIMIT_TYPE,
+ },
+ {
+ .dev_name = "cpr_cold",
+ .type = QMI_CDEV_MIN_LIMIT_TYPE,
+ }
+};
+
+static int qmi_get_max_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ struct qmi_cooling_device *qmi_cdev = cdev->devdata;
+
+ if (!qmi_cdev)
+ return -EINVAL;
+
+ *state = qmi_cdev->max_level;
+
+ return 0;
+}
+
+static int qmi_get_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ struct qmi_cooling_device *qmi_cdev = cdev->devdata;
+
+ if (!qmi_cdev)
+ return -EINVAL;
+
+ if (qmi_cdev->type == QMI_CDEV_MIN_LIMIT_TYPE) {
+ *state = 0;
+ return 0;
+ }
+ *state = qmi_cdev->mtgn_state;
+
+ return 0;
+}
+
+static int qmi_tmd_send_state_request(struct qmi_cooling_device *qmi_cdev,
+ uint8_t state)
+{
+ int ret = 0;
+ struct tmd_set_mitigation_level_req_msg_v01 req;
+ struct tmd_set_mitigation_level_resp_msg_v01 tmd_resp;
+ struct msg_desc req_desc, resp_desc;
+ struct qmi_tmd_instance *tmd = qmi_cdev->tmd;
+
+ memset(&req, 0, sizeof(req));
+ memset(&tmd_resp, 0, sizeof(tmd_resp));
+
+ strlcpy(req.mitigation_dev_id.mitigation_dev_id, qmi_cdev->qmi_name,
+ QMI_TMD_MITIGATION_DEV_ID_LENGTH_MAX_V01);
+ req.mitigation_level = state;
+
+ req_desc.max_msg_len = TMD_SET_MITIGATION_LEVEL_REQ_MSG_V01_MAX_MSG_LEN;
+ req_desc.msg_id = QMI_TMD_SET_MITIGATION_LEVEL_REQ_V01;
+ req_desc.ei_array = tmd_set_mitigation_level_req_msg_v01_ei;
+
+ resp_desc.max_msg_len =
+ TMD_SET_MITIGATION_LEVEL_RESP_MSG_V01_MAX_MSG_LEN;
+ resp_desc.msg_id = QMI_TMD_SET_MITIGATION_LEVEL_RESP_V01;
+ resp_desc.ei_array = tmd_set_mitigation_level_resp_msg_v01_ei;
+
+ mutex_lock(&tmd->mutex);
+ ret = qmi_send_req_wait(tmd->handle,
+ &req_desc, &req, sizeof(req),
+ &resp_desc, &tmd_resp, sizeof(tmd_resp),
+ QMI_TMD_RESP_TOUT_MSEC);
+ if (ret < 0) {
+ pr_err("qmi set state:%d failed for %s ret:%d\n",
+ state, qmi_cdev->cdev_name, ret);
+ goto qmi_send_exit;
+ }
+
+ if (tmd_resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ret = tmd_resp.resp.result;
+ pr_err("qmi set state:%d NOT success for %s ret:%d\n",
+ state, qmi_cdev->cdev_name, ret);
+ goto qmi_send_exit;
+ }
+ pr_debug("Requested qmi state:%d for %s\n", state, qmi_cdev->cdev_name);
+
+qmi_send_exit:
+ mutex_unlock(&tmd->mutex);
+ return ret;
+}
+
+static int qmi_set_cur_or_min_state(struct qmi_cooling_device *qmi_cdev,
+ unsigned long state)
+{
+ int ret = 0;
+ struct qmi_tmd_instance *tmd = qmi_cdev->tmd;
+
+ if (!tmd)
+ return -EINVAL;
+
+ if (qmi_cdev->mtgn_state == state)
+ return ret;
+
+ /* save it and return if server exit */
+ if (!qmi_cdev->connection_active) {
+ qmi_cdev->mtgn_state = state;
+ pr_debug("Pending request:%ld for %s\n", state,
+ qmi_cdev->cdev_name);
+ return ret;
+ }
+
+ /* It is best effort to save state even if QMI fail */
+ ret = qmi_tmd_send_state_request(qmi_cdev, (uint8_t)state);
+
+ qmi_cdev->mtgn_state = state;
+
+ return ret;
+}
+
+static int qmi_set_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long state)
+{
+ struct qmi_cooling_device *qmi_cdev = cdev->devdata;
+
+ if (!qmi_cdev)
+ return -EINVAL;
+
+ if (qmi_cdev->type == QMI_CDEV_MIN_LIMIT_TYPE)
+ return 0;
+
+ if (state > qmi_cdev->max_level)
+ state = qmi_cdev->max_level;
+
+ return qmi_set_cur_or_min_state(qmi_cdev, state);
+}
+
+static int qmi_set_min_state(struct thermal_cooling_device *cdev,
+ unsigned long state)
+{
+ struct qmi_cooling_device *qmi_cdev = cdev->devdata;
+
+ if (!qmi_cdev)
+ return -EINVAL;
+
+ if (qmi_cdev->type == QMI_CDEV_MAX_LIMIT_TYPE)
+ return 0;
+
+ if (state > qmi_cdev->max_level)
+ state = qmi_cdev->max_level;
+
+ /* Convert state into QMI client expects for min state */
+ state = qmi_cdev->max_level - state;
+
+ return qmi_set_cur_or_min_state(qmi_cdev, state);
+}
+
+static int qmi_get_min_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ struct qmi_cooling_device *qmi_cdev = cdev->devdata;
+
+ if (!qmi_cdev)
+ return -EINVAL;
+
+ if (qmi_cdev->type == QMI_CDEV_MAX_LIMIT_TYPE) {
+ *state = 0;
+ return 0;
+ }
+ *state = qmi_cdev->max_level - qmi_cdev->mtgn_state;
+
+ return 0;
+}
+
+static struct thermal_cooling_device_ops qmi_device_ops = {
+ .get_max_state = qmi_get_max_state,
+ .get_cur_state = qmi_get_cur_state,
+ .set_cur_state = qmi_set_cur_state,
+ .set_min_state = qmi_set_min_state,
+ .get_min_state = qmi_get_min_state,
+};
+
+static int qmi_register_cooling_device(struct qmi_cooling_device *qmi_cdev)
+{
+ qmi_cdev->cdev = thermal_of_cooling_device_register(
+ qmi_cdev->np,
+ qmi_cdev->cdev_name,
+ qmi_cdev,
+ &qmi_device_ops);
+ if (IS_ERR(qmi_cdev->cdev)) {
+ pr_err("Cooling register failed for %s, ret:%ld\n",
+ qmi_cdev->cdev_name, PTR_ERR(qmi_cdev->cdev));
+ return PTR_ERR(qmi_cdev->cdev);
+ }
+ pr_debug("Cooling register success for %s\n", qmi_cdev->cdev_name);
+
+ return 0;
+}
+
+static int verify_devices_and_register(struct qmi_tmd_instance *tmd)
+{
+ struct tmd_get_mitigation_device_list_req_msg_v01 req;
+ struct tmd_get_mitigation_device_list_resp_msg_v01 *tmd_resp;
+ struct msg_desc req_desc, resp_desc;
+ int ret = 0, i;
+
+ memset(&req, 0, sizeof(req));
+ /* size of tmd_resp is very high, use heap memory rather than stack */
+ tmd_resp = kzalloc(sizeof(*tmd_resp), GFP_KERNEL);
+ if (!tmd_resp)
+ return -ENOMEM;
+
+ req_desc.max_msg_len =
+ TMD_GET_MITIGATION_DEVICE_LIST_REQ_MSG_V01_MAX_MSG_LEN;
+ req_desc.msg_id = QMI_TMD_GET_MITIGATION_DEVICE_LIST_REQ_V01;
+ req_desc.ei_array = tmd_get_mitigation_device_list_req_msg_v01_ei;
+
+ resp_desc.max_msg_len =
+ TMD_GET_MITIGATION_DEVICE_LIST_RESP_MSG_V01_MAX_MSG_LEN;
+ resp_desc.msg_id = QMI_TMD_GET_MITIGATION_DEVICE_LIST_RESP_V01;
+ resp_desc.ei_array = tmd_get_mitigation_device_list_resp_msg_v01_ei;
+
+ mutex_lock(&tmd->mutex);
+ ret = qmi_send_req_wait(tmd->handle,
+ &req_desc, &req, sizeof(req),
+ &resp_desc, tmd_resp, sizeof(*tmd_resp),
+ 0);
+ if (ret < 0) {
+ pr_err("qmi get device list failed for inst_id:0x%x ret:%d\n",
+ tmd->inst_id, ret);
+ goto reg_exit;
+ }
+
+ if (tmd_resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+ ret = tmd_resp->resp.result;
+ pr_err("Get device list NOT success for inst_id:0x%x ret:%d\n",
+ tmd->inst_id, ret);
+ goto reg_exit;
+ }
+ mutex_unlock(&tmd->mutex);
+
+ for (i = 0; i < tmd_resp->mitigation_device_list_len; i++) {
+ struct qmi_cooling_device *qmi_cdev = NULL;
+
+ list_for_each_entry(qmi_cdev, &tmd->tmd_cdev_list,
+ qmi_node) {
+ struct tmd_mitigation_dev_list_type_v01 *device =
+ &tmd_resp->mitigation_device_list[i];
+
+ if ((strncasecmp(qmi_cdev->qmi_name,
+ device->mitigation_dev_id.mitigation_dev_id,
+ QMI_TMD_MITIGATION_DEV_ID_LENGTH_MAX_V01)))
+ continue;
+
+ qmi_cdev->connection_active = true;
+ qmi_cdev->max_level = device->max_mitigation_level;
+ /*
+ * It is better to set current state
+ * initially or during restart
+ */
+ qmi_tmd_send_state_request(qmi_cdev,
+ qmi_cdev->mtgn_state);
+ if (!qmi_cdev->cdev)
+ ret = qmi_register_cooling_device(qmi_cdev);
+ break;
+ }
+ }
+
+ kfree(tmd_resp);
+ return ret;
+
+reg_exit:
+ mutex_unlock(&tmd->mutex);
+ kfree(tmd_resp);
+
+ return ret;
+}
+
+static void qmi_tmd_rcv_msg(struct work_struct *work)
+{
+ int rc;
+ struct qmi_tmd_instance *tmd = container_of(work,
+ struct qmi_tmd_instance,
+ work_rcv_msg);
+
+ do {
+ pr_debug("Notified about a Receive Event\n");
+ } while ((rc = qmi_recv_msg(tmd->handle)) == 0);
+
+ if (rc != -ENOMSG)
+ pr_err("Error receiving message for SVC:0x%x, ret:%d\n",
+ tmd->inst_id, rc);
+}
+
+static void qmi_tmd_clnt_notify(struct qmi_handle *handle,
+ enum qmi_event_type event, void *priv_data)
+{
+ struct qmi_tmd_instance *tmd =
+ (struct qmi_tmd_instance *)priv_data;
+
+ if (!tmd) {
+ pr_debug("tmd is NULL\n");
+ return;
+ }
+
+ switch (event) {
+ case QMI_RECV_MSG:
+ queue_work(qmi_tmd_wq, &tmd->work_rcv_msg);
+ break;
+ default:
+ break;
+ }
+}
+
+static void qmi_tmd_svc_arrive(struct work_struct *work)
+{
+ int ret = 0;
+ struct qmi_tmd_instance *tmd = container_of(work,
+ struct qmi_tmd_instance,
+ work_svc_arrive);
+
+ mutex_lock(&tmd->mutex);
+ tmd->handle = qmi_handle_create(qmi_tmd_clnt_notify, tmd);
+ if (!tmd->handle) {
+ pr_err("QMI TMD client handle alloc failed for 0x%x\n",
+ tmd->inst_id);
+ goto arrive_exit;
+ }
+
+ ret = qmi_connect_to_service(tmd->handle, TMD_SERVICE_ID_V01,
+ TMD_SERVICE_VERS_V01,
+ tmd->inst_id);
+ if (ret < 0) {
+ pr_err("Could not connect handle to service for 0x%x, ret:%d\n",
+ tmd->inst_id, ret);
+ qmi_handle_destroy(tmd->handle);
+ tmd->handle = NULL;
+ goto arrive_exit;
+ }
+ mutex_unlock(&tmd->mutex);
+
+ verify_devices_and_register(tmd);
+
+ return;
+
+arrive_exit:
+ mutex_unlock(&tmd->mutex);
+}
+
+static void qmi_tmd_svc_exit(struct work_struct *work)
+{
+ struct qmi_tmd_instance *tmd = container_of(work,
+ struct qmi_tmd_instance,
+ work_svc_exit);
+ struct qmi_cooling_device *qmi_cdev;
+
+ mutex_lock(&tmd->mutex);
+ qmi_handle_destroy(tmd->handle);
+ tmd->handle = NULL;
+
+ list_for_each_entry(qmi_cdev, &tmd->tmd_cdev_list, qmi_node)
+ qmi_cdev->connection_active = false;
+
+ mutex_unlock(&tmd->mutex);
+}
+
+static int qmi_tmd_svc_event_notify(struct notifier_block *this,
+ unsigned long event,
+ void *data)
+{
+ struct qmi_tmd_instance *tmd = container_of(this,
+ struct qmi_tmd_instance,
+ nb);
+
+ if (!tmd) {
+ pr_debug("tmd is NULL\n");
+ return -EINVAL;
+ }
+
+ switch (event) {
+ case QMI_SERVER_ARRIVE:
+ schedule_work(&tmd->work_svc_arrive);
+ break;
+ case QMI_SERVER_EXIT:
+ schedule_work(&tmd->work_svc_exit);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static void qmi_tmd_cleanup(void)
+{
+ int idx = 0;
+ struct qmi_tmd_instance *tmd = tmd_instances;
+ struct qmi_cooling_device *qmi_cdev, *c_next;
+
+ for (; idx < tmd_inst_cnt; idx++) {
+ mutex_lock(&tmd[idx].mutex);
+ list_for_each_entry_safe(qmi_cdev, c_next,
+ &tmd[idx].tmd_cdev_list, qmi_node) {
+ if (qmi_cdev->cdev)
+ thermal_cooling_device_unregister(
+ qmi_cdev->cdev);
+
+ list_del(&qmi_cdev->qmi_node);
+ }
+ if (tmd[idx].handle)
+ qmi_handle_destroy(tmd[idx].handle);
+
+ if (tmd[idx].nb.notifier_call)
+ qmi_svc_event_notifier_unregister(TMD_SERVICE_ID_V01,
+ TMD_SERVICE_VERS_V01,
+ tmd[idx].inst_id,
+ &tmd[idx].nb);
+ mutex_unlock(&tmd[idx].mutex);
+ }
+
+ if (qmi_tmd_wq) {
+ destroy_workqueue(qmi_tmd_wq);
+ qmi_tmd_wq = NULL;
+ }
+}
+
+static int of_get_qmi_tmd_platform_data(struct device *dev)
+{
+ int ret = 0, idx = 0, i = 0, subsys_cnt = 0;
+ struct device_node *np = dev->of_node;
+ struct device_node *subsys_np, *cdev_np;
+ struct qmi_tmd_instance *tmd;
+ struct qmi_cooling_device *qmi_cdev;
+
+ subsys_cnt = of_get_available_child_count(np);
+ if (!subsys_cnt) {
+ dev_err(dev, "No child node to process\n");
+ return -EFAULT;
+ }
+
+ tmd = devm_kcalloc(dev, subsys_cnt, sizeof(*tmd), GFP_KERNEL);
+ if (!tmd)
+ return -ENOMEM;
+
+ for_each_available_child_of_node(np, subsys_np) {
+ if (idx >= subsys_cnt)
+ break;
+
+ ret = of_property_read_u32(subsys_np, "qcom,instance-id",
+ &tmd[idx].inst_id);
+ if (ret) {
+ dev_err(dev, "error reading qcom,insance-id. ret:%d\n",
+ ret);
+ return ret;
+ }
+
+ tmd[idx].dev = dev;
+ mutex_init(&tmd[idx].mutex);
+ INIT_LIST_HEAD(&tmd[idx].tmd_cdev_list);
+
+ for_each_available_child_of_node(subsys_np, cdev_np) {
+ const char *qmi_name;
+
+ qmi_cdev = devm_kzalloc(dev, sizeof(*qmi_cdev),
+ GFP_KERNEL);
+ if (!qmi_cdev) {
+ ret = -ENOMEM;
+ return ret;
+ }
+
+ strlcpy(qmi_cdev->cdev_name, cdev_np->name,
+ THERMAL_NAME_LENGTH);
+
+ if (!of_property_read_string(cdev_np,
+ "qcom,qmi-dev-name",
+ &qmi_name)) {
+ strlcpy(qmi_cdev->qmi_name, qmi_name,
+ QMI_CLIENT_NAME_LENGTH);
+ } else {
+ dev_err(dev, "Fail to parse dev name for %s\n",
+ cdev_np->name);
+ break;
+ }
+ /* Check for supported qmi dev*/
+ for (i = 0; i < ARRAY_SIZE(device_clients); i++) {
+ if (strcmp(device_clients[i].dev_name,
+ qmi_cdev->qmi_name) == 0)
+ break;
+ }
+
+ if (i >= ARRAY_SIZE(device_clients)) {
+ dev_err(dev, "Not supported dev name for %s\n",
+ cdev_np->name);
+ break;
+ }
+ qmi_cdev->type = device_clients[i].type;
+ qmi_cdev->tmd = &tmd[idx];
+ qmi_cdev->np = cdev_np;
+ qmi_cdev->mtgn_state = 0;
+ list_add(&qmi_cdev->qmi_node, &tmd[idx].tmd_cdev_list);
+ }
+ idx++;
+ }
+ tmd_instances = tmd;
+ tmd_inst_cnt = subsys_cnt;
+
+ return 0;
+}
+
+static int qmi_device_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ int ret = 0, idx = 0;
+
+ ret = of_get_qmi_tmd_platform_data(dev);
+ if (ret)
+ goto probe_err;
+
+ if (!tmd_instances || !tmd_inst_cnt) {
+ dev_err(dev, "Empty tmd instances\n");
+ return -EINVAL;
+ }
+
+ qmi_tmd_wq = create_singlethread_workqueue("qmi_tmd_wq");
+ if (!qmi_tmd_wq) {
+ dev_err(dev, "Failed to create single thread workqueue\n");
+ ret = -EFAULT;
+ goto probe_err;
+ }
+
+ for (; idx < tmd_inst_cnt; idx++) {
+ struct qmi_tmd_instance *tmd = &tmd_instances[idx];
+
+ if (list_empty(&tmd->tmd_cdev_list))
+ continue;
+
+ tmd->nb.notifier_call = qmi_tmd_svc_event_notify;
+ INIT_WORK(&tmd->work_svc_arrive, qmi_tmd_svc_arrive);
+ INIT_WORK(&tmd->work_svc_exit, qmi_tmd_svc_exit);
+ INIT_WORK(&tmd->work_rcv_msg, qmi_tmd_rcv_msg);
+
+ ret = qmi_svc_event_notifier_register(TMD_SERVICE_ID_V01,
+ TMD_SERVICE_VERS_V01,
+ tmd->inst_id,
+ &tmd->nb);
+ if (ret < 0) {
+ dev_err(dev, "QMI register failed for 0x%x, ret:%d\n",
+ tmd->inst_id, ret);
+ goto probe_err;
+ }
+ }
+
+ return 0;
+
+probe_err:
+ qmi_tmd_cleanup();
+ return ret;
+}
+
+static int qmi_device_remove(struct platform_device *pdev)
+{
+ qmi_tmd_cleanup();
+
+ return 0;
+}
+
+static const struct of_device_id qmi_device_match[] = {
+ {.compatible = "qcom,qmi_cooling_devices"},
+ {}
+};
+
+static struct platform_driver qmi_device_driver = {
+ .probe = qmi_device_probe,
+ .remove = qmi_device_remove,
+ .driver = {
+ .name = "QMI_CDEV_DRIVER",
+ .owner = THIS_MODULE,
+ .of_match_table = qmi_device_match,
+ },
+};
+
+static int __init qmi_device_init(void)
+{
+ return platform_driver_register(&qmi_device_driver);
+}
+module_init(qmi_device_init);
+
+static void __exit qmi_device_exit(void)
+{
+ platform_driver_unregister(&qmi_device_driver);
+}
+module_exit(qmi_device_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("QTI QMI cooling device driver");
diff --git a/drivers/thermal/qcom/thermal_mitigation_device_service_v01.c b/drivers/thermal/qcom/thermal_mitigation_device_service_v01.c
new file mode 100644
index 0000000..af020eb
--- /dev/null
+++ b/drivers/thermal/qcom/thermal_mitigation_device_service_v01.c
@@ -0,0 +1,359 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/qmi_encdec.h>
+
+#include <soc/qcom/msm_qmi_interface.h>
+
+#include "thermal_mitigation_device_service_v01.h"
+
+static struct elem_info tmd_mitigation_dev_id_type_v01_ei[] = {
+ {
+ .data_type = QMI_STRING,
+ .elem_len = QMI_TMD_MITIGATION_DEV_ID_LENGTH_MAX_V01 + 1,
+ .elem_size = sizeof(char),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(
+ struct tmd_mitigation_dev_id_type_v01,
+ mitigation_dev_id),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct elem_info tmd_mitigation_dev_list_type_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct tmd_mitigation_dev_id_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(
+ struct tmd_mitigation_dev_list_type_v01,
+ mitigation_dev_id),
+ .ei_array = tmd_mitigation_dev_id_type_v01_ei,
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(
+ struct tmd_mitigation_dev_list_type_v01,
+ max_mitigation_level),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info tmd_get_mitigation_device_list_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info tmd_get_mitigation_device_list_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(
+ struct tmd_get_mitigation_device_list_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct tmd_get_mitigation_device_list_resp_msg_v01,
+ mitigation_device_list_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct tmd_get_mitigation_device_list_resp_msg_v01,
+ mitigation_device_list_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_TMD_MITIGATION_DEV_LIST_MAX_V01,
+ .elem_size = sizeof(
+ struct tmd_mitigation_dev_list_type_v01),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct tmd_get_mitigation_device_list_resp_msg_v01,
+ mitigation_device_list),
+ .ei_array = tmd_mitigation_dev_list_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info tmd_set_mitigation_level_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct tmd_mitigation_dev_id_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(
+ struct tmd_set_mitigation_level_req_msg_v01,
+ mitigation_dev_id),
+ .ei_array = tmd_mitigation_dev_id_type_v01_ei,
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(
+ struct tmd_set_mitigation_level_req_msg_v01,
+ mitigation_level),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info tmd_set_mitigation_level_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(
+ struct tmd_set_mitigation_level_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info tmd_get_mitigation_level_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct tmd_mitigation_dev_id_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(
+ struct tmd_get_mitigation_level_req_msg_v01,
+ mitigation_device),
+ .ei_array = tmd_mitigation_dev_id_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info tmd_get_mitigation_level_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(
+ struct tmd_get_mitigation_level_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct tmd_get_mitigation_level_resp_msg_v01,
+ current_mitigation_level_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct tmd_get_mitigation_level_resp_msg_v01,
+ current_mitigation_level),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(
+ struct tmd_get_mitigation_level_resp_msg_v01,
+ requested_mitigation_level_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(
+ struct tmd_get_mitigation_level_resp_msg_v01,
+ requested_mitigation_level),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info tmd_register_notification_mitigation_level_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct tmd_mitigation_dev_id_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(
+ struct tmd_register_notification_mitigation_level_req_msg_v01,
+ mitigation_device),
+ .ei_array = tmd_mitigation_dev_id_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info tmd_register_notification_mitigation_level_resp_msg_v01_ei[]
+ = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(
+ struct tmd_register_notification_mitigation_level_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info tmd_deregister_notification_mitigation_level_req_msg_v01_ei[]
+ = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct tmd_mitigation_dev_id_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct
+ tmd_deregister_notification_mitigation_level_req_msg_v01,
+ mitigation_device),
+ .ei_array = tmd_mitigation_dev_id_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info tmd_deregister_notification_mitigation_level_resp_msg_v01_ei[]
+ = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ tmd_deregister_notification_mitigation_level_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info tmd_mitigation_level_report_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct tmd_mitigation_dev_id_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(
+ struct tmd_mitigation_level_report_ind_msg_v01,
+ mitigation_device),
+ .ei_array = tmd_mitigation_dev_id_type_v01_ei,
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(
+ struct tmd_mitigation_level_report_ind_msg_v01,
+ current_mitigation_level),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
diff --git a/drivers/thermal/qcom/thermal_mitigation_device_service_v01.h b/drivers/thermal/qcom/thermal_mitigation_device_service_v01.h
new file mode 100644
index 0000000..c2d1201
--- /dev/null
+++ b/drivers/thermal/qcom/thermal_mitigation_device_service_v01.h
@@ -0,0 +1,128 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef THERMAL_MITIGATION_DEVICE_SERVICE_V01_H
+#define THERMAL_MITIGATION_DEVICE_SERVICE_V01_H
+
+#define TMD_SERVICE_ID_V01 0x18
+#define TMD_SERVICE_VERS_V01 0x01
+
+#define QMI_TMD_GET_MITIGATION_DEVICE_LIST_RESP_V01 0x0020
+#define QMI_TMD_GET_MITIGATION_LEVEL_REQ_V01 0x0022
+#define QMI_TMD_GET_SUPPORTED_MSGS_REQ_V01 0x001E
+#define QMI_TMD_SET_MITIGATION_LEVEL_REQ_V01 0x0021
+#define QMI_TMD_REGISTER_NOTIFICATION_MITIGATION_LEVEL_RESP_V01 0x0023
+#define QMI_TMD_GET_SUPPORTED_MSGS_RESP_V01 0x001E
+#define QMI_TMD_SET_MITIGATION_LEVEL_RESP_V01 0x0021
+#define QMI_TMD_DEREGISTER_NOTIFICATION_MITIGATION_LEVEL_RESP_V01 0x0024
+#define QMI_TMD_MITIGATION_LEVEL_REPORT_IND_V01 0x0025
+#define QMI_TMD_GET_MITIGATION_LEVEL_RESP_V01 0x0022
+#define QMI_TMD_GET_SUPPORTED_FIELDS_REQ_V01 0x001F
+#define QMI_TMD_GET_MITIGATION_DEVICE_LIST_REQ_V01 0x0020
+#define QMI_TMD_REGISTER_NOTIFICATION_MITIGATION_LEVEL_REQ_V01 0x0023
+#define QMI_TMD_DEREGISTER_NOTIFICATION_MITIGATION_LEVEL_REQ_V01 0x0024
+#define QMI_TMD_GET_SUPPORTED_FIELDS_RESP_V01 0x001F
+
+#define QMI_TMD_MITIGATION_DEV_ID_LENGTH_MAX_V01 32
+#define QMI_TMD_MITIGATION_DEV_LIST_MAX_V01 32
+
+struct tmd_mitigation_dev_id_type_v01 {
+ char mitigation_dev_id[QMI_TMD_MITIGATION_DEV_ID_LENGTH_MAX_V01 + 1];
+};
+
+struct tmd_mitigation_dev_list_type_v01 {
+ struct tmd_mitigation_dev_id_type_v01 mitigation_dev_id;
+ uint8_t max_mitigation_level;
+};
+
+struct tmd_get_mitigation_device_list_req_msg_v01 {
+ char placeholder;
+};
+#define TMD_GET_MITIGATION_DEVICE_LIST_REQ_MSG_V01_MAX_MSG_LEN 0
+extern struct elem_info tmd_get_mitigation_device_list_req_msg_v01_ei[];
+
+struct tmd_get_mitigation_device_list_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ uint8_t mitigation_device_list_valid;
+ uint32_t mitigation_device_list_len;
+ struct tmd_mitigation_dev_list_type_v01
+ mitigation_device_list[QMI_TMD_MITIGATION_DEV_LIST_MAX_V01];
+};
+#define TMD_GET_MITIGATION_DEVICE_LIST_RESP_MSG_V01_MAX_MSG_LEN 1099
+extern struct elem_info tmd_get_mitigation_device_list_resp_msg_v01_ei[];
+
+struct tmd_set_mitigation_level_req_msg_v01 {
+ struct tmd_mitigation_dev_id_type_v01 mitigation_dev_id;
+ uint8_t mitigation_level;
+};
+#define TMD_SET_MITIGATION_LEVEL_REQ_MSG_V01_MAX_MSG_LEN 40
+extern struct elem_info tmd_set_mitigation_level_req_msg_v01_ei[];
+
+struct tmd_set_mitigation_level_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+#define TMD_SET_MITIGATION_LEVEL_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info tmd_set_mitigation_level_resp_msg_v01_ei[];
+
+struct tmd_get_mitigation_level_req_msg_v01 {
+ struct tmd_mitigation_dev_id_type_v01 mitigation_device;
+};
+#define TMD_GET_MITIGATION_LEVEL_REQ_MSG_V01_MAX_MSG_LEN 36
+extern struct elem_info tmd_get_mitigation_level_req_msg_v01_ei[];
+
+struct tmd_get_mitigation_level_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ uint8_t current_mitigation_level_valid;
+ uint8_t current_mitigation_level;
+ uint8_t requested_mitigation_level_valid;
+ uint8_t requested_mitigation_level;
+};
+#define TMD_GET_MITIGATION_LEVEL_RESP_MSG_V01_MAX_MSG_LEN 15
+extern struct elem_info tmd_get_mitigation_level_resp_msg_v01_ei[];
+
+struct tmd_register_notification_mitigation_level_req_msg_v01 {
+ struct tmd_mitigation_dev_id_type_v01 mitigation_device;
+};
+#define TMD_REGISTER_NOTIFICATION_MITIGATION_LEVEL_REQ_MSG_V01_MAX_MSG_LEN 36
+extern struct elem_info
+ tmd_register_notification_mitigation_level_req_msg_v01_ei[];
+
+struct tmd_register_notification_mitigation_level_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+#define TMD_REGISTER_NOTIFICATION_MITIGATION_LEVEL_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info
+ tmd_register_notification_mitigation_level_resp_msg_v01_ei[];
+
+struct tmd_deregister_notification_mitigation_level_req_msg_v01 {
+ struct tmd_mitigation_dev_id_type_v01 mitigation_device;
+};
+#define TMD_DEREGISTER_NOTIFICATION_MITIGATION_LEVEL_REQ_MSG_V01_MAX_MSG_LEN 36
+extern struct elem_info
+ tmd_deregister_notification_mitigation_level_req_msg_v01_ei[];
+
+struct tmd_deregister_notification_mitigation_level_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+#define TMD_DEREGISTER_NOTIFICATION_MITIGATION_LEVEL_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info
+ tmd_deregister_notification_mitigation_level_resp_msg_v01_ei[];
+
+struct tmd_mitigation_level_report_ind_msg_v01 {
+ struct tmd_mitigation_dev_id_type_v01 mitigation_device;
+ uint8_t current_mitigation_level;
+};
+#define TMD_MITIGATION_LEVEL_REPORT_IND_MSG_V01_MAX_MSG_LEN 40
+extern struct elem_info tmd_mitigation_level_report_ind_msg_v01_ei[];
+
+#endif
diff --git a/drivers/tty/serial/efm32-uart.c b/drivers/tty/serial/efm32-uart.c
index 195acc8..5d47691 100644
--- a/drivers/tty/serial/efm32-uart.c
+++ b/drivers/tty/serial/efm32-uart.c
@@ -27,6 +27,7 @@
#define UARTn_FRAME 0x04
#define UARTn_FRAME_DATABITS__MASK 0x000f
#define UARTn_FRAME_DATABITS(n) ((n) - 3)
+#define UARTn_FRAME_PARITY__MASK 0x0300
#define UARTn_FRAME_PARITY_NONE 0x0000
#define UARTn_FRAME_PARITY_EVEN 0x0200
#define UARTn_FRAME_PARITY_ODD 0x0300
@@ -572,12 +573,16 @@ static void efm32_uart_console_get_options(struct efm32_uart_port *efm_port,
16 * (4 + (clkdiv >> 6)));
frame = efm32_uart_read32(efm_port, UARTn_FRAME);
- if (frame & UARTn_FRAME_PARITY_ODD)
+ switch (frame & UARTn_FRAME_PARITY__MASK) {
+ case UARTn_FRAME_PARITY_ODD:
*parity = 'o';
- else if (frame & UARTn_FRAME_PARITY_EVEN)
+ break;
+ case UARTn_FRAME_PARITY_EVEN:
*parity = 'e';
- else
+ break;
+ default:
*parity = 'n';
+ }
*bits = (frame & UARTn_FRAME_DATABITS__MASK) -
UARTn_FRAME_DATABITS(4) + 4;
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index ee84f89..7e97a1c 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -2366,6 +2366,10 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
*/
udelay(DIV_ROUND_UP(10 * 1000000, baud));
}
+ if (port->flags & UPF_HARD_FLOW) {
+ /* Refresh (Auto) RTS */
+ sci_set_mctrl(port, port->mctrl);
+ }
#ifdef CONFIG_SERIAL_SH_SCI_DMA
/*
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 32f99da..e07fa76 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -2578,6 +2578,7 @@ struct usb_hcd *__usb_create_hcd(const struct hc_driver *driver,
hcd->bandwidth_mutex = kmalloc(sizeof(*hcd->bandwidth_mutex),
GFP_KERNEL);
if (!hcd->bandwidth_mutex) {
+ kfree(hcd->address0_mutex);
kfree(hcd);
dev_dbg(dev, "hcd bandwidth mutex alloc failed\n");
return NULL;
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index fcbaa61..50679bc 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -1345,7 +1345,13 @@ static int hub_configure(struct usb_hub *hub,
if (ret < 0) {
message = "can't read hub descriptor";
goto fail;
- } else if (hub->descriptor->bNbrPorts > USB_MAXCHILDREN) {
+ }
+
+ maxchild = USB_MAXCHILDREN;
+ if (hub_is_superspeed(hdev))
+ maxchild = min_t(unsigned, maxchild, USB_SS_MAXPORTS);
+
+ if (hub->descriptor->bNbrPorts > maxchild) {
message = "hub has too many ports!";
ret = -ENODEV;
goto fail;
diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c
index 2f1fb7e..9eba51b 100644
--- a/drivers/usb/dwc3/dwc3-exynos.c
+++ b/drivers/usb/dwc3/dwc3-exynos.c
@@ -148,7 +148,8 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
exynos->axius_clk = devm_clk_get(dev, "usbdrd30_axius_clk");
if (IS_ERR(exynos->axius_clk)) {
dev_err(dev, "no AXI UpScaler clk specified\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto axius_clk_err;
}
clk_prepare_enable(exynos->axius_clk);
} else {
@@ -206,6 +207,7 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
regulator_disable(exynos->vdd33);
err2:
clk_disable_unprepare(exynos->axius_clk);
+axius_clk_err:
clk_disable_unprepare(exynos->susp_clk);
clk_disable_unprepare(exynos->clk);
return ret;
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index ec166f2..8a6ae0b 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -2291,7 +2291,7 @@ int composite_os_desc_req_prepare(struct usb_composite_dev *cdev,
cdev->os_desc_req->buf = kmalloc(4096, GFP_KERNEL);
if (!cdev->os_desc_req->buf) {
ret = -ENOMEM;
- kfree(cdev->os_desc_req);
+ usb_ep_free_request(ep0, cdev->os_desc_req);
goto end;
}
cdev->os_desc_req->context = cdev;
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index 1468d8f..f959c42 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -1183,8 +1183,10 @@ dev_release (struct inode *inode, struct file *fd)
/* closing ep0 === shutdown all */
- if (dev->gadget_registered)
+ if (dev->gadget_registered) {
usb_gadget_unregister_driver (&gadgetfs_driver);
+ dev->gadget_registered = false;
+ }
/* at this point "good" hardware has disconnected the
* device from USB; the host won't see it any more.
@@ -1677,9 +1679,10 @@ static void
gadgetfs_suspend (struct usb_gadget *gadget)
{
struct dev_data *dev = get_gadget_data (gadget);
+ unsigned long flags;
INFO (dev, "suspended from state %d\n", dev->state);
- spin_lock (&dev->lock);
+ spin_lock_irqsave(&dev->lock, flags);
switch (dev->state) {
case STATE_DEV_SETUP: // VERY odd... host died??
case STATE_DEV_CONNECTED:
@@ -1690,7 +1693,7 @@ gadgetfs_suspend (struct usb_gadget *gadget)
default:
break;
}
- spin_unlock (&dev->lock);
+ spin_unlock_irqrestore(&dev->lock, flags);
}
static struct usb_gadget_driver gadgetfs_driver = {
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index 4fa5de2..94c8a9f 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -442,23 +442,16 @@ static void set_link_state(struct dummy_hcd *dum_hcd)
/* Report reset and disconnect events to the driver */
if (dum->driver && (disconnect || reset)) {
stop_activity(dum);
- spin_unlock(&dum->lock);
if (reset)
usb_gadget_udc_reset(&dum->gadget, dum->driver);
else
dum->driver->disconnect(&dum->gadget);
- spin_lock(&dum->lock);
}
} else if (dum_hcd->active != dum_hcd->old_active) {
- if (dum_hcd->old_active && dum->driver->suspend) {
- spin_unlock(&dum->lock);
+ if (dum_hcd->old_active && dum->driver->suspend)
dum->driver->suspend(&dum->gadget);
- spin_lock(&dum->lock);
- } else if (!dum_hcd->old_active && dum->driver->resume) {
- spin_unlock(&dum->lock);
+ else if (!dum_hcd->old_active && dum->driver->resume)
dum->driver->resume(&dum->gadget);
- spin_lock(&dum->lock);
- }
}
dum_hcd->old_status = dum_hcd->port_status;
@@ -983,7 +976,9 @@ static int dummy_udc_stop(struct usb_gadget *g)
struct dummy_hcd *dum_hcd = gadget_to_dummy_hcd(g);
struct dummy *dum = dum_hcd->dum;
+ spin_lock_irq(&dum->lock);
dum->driver = NULL;
+ spin_unlock_irq(&dum->lock);
return 0;
}
@@ -2009,7 +2004,7 @@ ss_hub_descriptor(struct usb_hub_descriptor *desc)
HUB_CHAR_COMMON_OCPM);
desc->bNbrPorts = 1;
desc->u.ss.bHubHdrDecLat = 0x04; /* Worst case: 0.4 micro sec*/
- desc->u.ss.DeviceRemovable = 0xffff;
+ desc->u.ss.DeviceRemovable = 0;
}
static inline void hub_descriptor(struct usb_hub_descriptor *desc)
@@ -2021,8 +2016,8 @@ static inline void hub_descriptor(struct usb_hub_descriptor *desc)
HUB_CHAR_INDV_PORT_LPSM |
HUB_CHAR_COMMON_OCPM);
desc->bNbrPorts = 1;
- desc->u.hs.DeviceRemovable[0] = 0xff;
- desc->u.hs.DeviceRemovable[1] = 0xff;
+ desc->u.hs.DeviceRemovable[0] = 0;
+ desc->u.hs.DeviceRemovable[1] = 0xff; /* PortPwrCtrlMask */
}
static int dummy_hub_control(
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
index 61c938c..33f3987 100644
--- a/drivers/usb/gadget/udc/net2280.c
+++ b/drivers/usb/gadget/udc/net2280.c
@@ -2469,11 +2469,8 @@ static void stop_activity(struct net2280 *dev, struct usb_gadget_driver *driver)
nuke(&dev->ep[i]);
/* report disconnect; the driver is already quiesced */
- if (driver) {
- spin_unlock(&dev->lock);
+ if (driver)
driver->disconnect(&dev->gadget);
- spin_lock(&dev->lock);
- }
usb_reinit(dev);
}
@@ -3347,8 +3344,6 @@ static void handle_stat0_irqs(struct net2280 *dev, u32 stat)
BIT(PCI_RETRY_ABORT_INTERRUPT))
static void handle_stat1_irqs(struct net2280 *dev, u32 stat)
-__releases(dev->lock)
-__acquires(dev->lock)
{
struct net2280_ep *ep;
u32 tmp, num, mask, scratch;
@@ -3389,14 +3384,12 @@ __acquires(dev->lock)
if (disconnect || reset) {
stop_activity(dev, dev->driver);
ep0_start(dev);
- spin_unlock(&dev->lock);
if (reset)
usb_gadget_udc_reset
(&dev->gadget, dev->driver);
else
(dev->driver->disconnect)
(&dev->gadget);
- spin_lock(&dev->lock);
return;
}
}
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index fb8fc34..ba78e3f 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -1401,7 +1401,13 @@ static void usb3_request_done_pipen(struct renesas_usb3 *usb3,
struct renesas_usb3_request *usb3_req,
int status)
{
- usb3_pn_stop(usb3);
+ unsigned long flags;
+
+ spin_lock_irqsave(&usb3->lock, flags);
+ if (usb3_pn_change(usb3, usb3_ep->num))
+ usb3_pn_stop(usb3);
+ spin_unlock_irqrestore(&usb3->lock, flags);
+
usb3_disable_pipe_irq(usb3, usb3_ep->num);
usb3_request_done(usb3_ep, usb3_req, status);
@@ -1430,30 +1436,46 @@ static void usb3_irq_epc_pipen_bfrdy(struct renesas_usb3 *usb3, int num)
{
struct renesas_usb3_ep *usb3_ep = usb3_get_ep(usb3, num);
struct renesas_usb3_request *usb3_req = usb3_get_request(usb3_ep);
+ bool done = false;
if (!usb3_req)
return;
+ spin_lock(&usb3->lock);
+ if (usb3_pn_change(usb3, num))
+ goto out;
+
if (usb3_ep->dir_in) {
/* Do not stop the IN pipe here to detect LSTTR interrupt */
if (!usb3_write_pipe(usb3_ep, usb3_req, USB3_PN_WRITE))
usb3_clear_bit(usb3, PN_INT_BFRDY, USB3_PN_INT_ENA);
} else {
if (!usb3_read_pipe(usb3_ep, usb3_req, USB3_PN_READ))
- usb3_request_done_pipen(usb3, usb3_ep, usb3_req, 0);
+ done = true;
}
+
+out:
+ /* need to unlock because usb3_request_done_pipen() locks it */
+ spin_unlock(&usb3->lock);
+
+ if (done)
+ usb3_request_done_pipen(usb3, usb3_ep, usb3_req, 0);
}
static void usb3_irq_epc_pipen(struct renesas_usb3 *usb3, int num)
{
u32 pn_int_sta;
- if (usb3_pn_change(usb3, num) < 0)
+ spin_lock(&usb3->lock);
+ if (usb3_pn_change(usb3, num) < 0) {
+ spin_unlock(&usb3->lock);
return;
+ }
pn_int_sta = usb3_read(usb3, USB3_PN_INT_STA);
pn_int_sta &= usb3_read(usb3, USB3_PN_INT_ENA);
usb3_write(usb3, pn_int_sta, USB3_PN_INT_STA);
+ spin_unlock(&usb3->lock);
if (pn_int_sta & PN_INT_LSTTR)
usb3_irq_epc_pipen_lsttr(usb3, num);
if (pn_int_sta & PN_INT_BFRDY)
@@ -1707,6 +1729,9 @@ static int renesas_usb3_start(struct usb_gadget *gadget,
/* hook up the driver */
usb3->driver = driver;
+ pm_runtime_enable(usb3_to_dev(usb3));
+ pm_runtime_get_sync(usb3_to_dev(usb3));
+
renesas_usb3_init_controller(usb3);
return 0;
@@ -1715,14 +1740,14 @@ static int renesas_usb3_start(struct usb_gadget *gadget,
static int renesas_usb3_stop(struct usb_gadget *gadget)
{
struct renesas_usb3 *usb3 = gadget_to_renesas_usb3(gadget);
- unsigned long flags;
- spin_lock_irqsave(&usb3->lock, flags);
usb3->softconnect = false;
usb3->gadget.speed = USB_SPEED_UNKNOWN;
usb3->driver = NULL;
renesas_usb3_stop_controller(usb3);
- spin_unlock_irqrestore(&usb3->lock, flags);
+
+ pm_runtime_put(usb3_to_dev(usb3));
+ pm_runtime_disable(usb3_to_dev(usb3));
return 0;
}
@@ -1761,9 +1786,6 @@ static int renesas_usb3_remove(struct platform_device *pdev)
{
struct renesas_usb3 *usb3 = platform_get_drvdata(pdev);
- pm_runtime_put(&pdev->dev);
- pm_runtime_disable(&pdev->dev);
-
usb_del_gadget_udc(&usb3->gadget);
__renesas_usb3_ep_free_request(usb3->ep0_req);
@@ -1948,9 +1970,6 @@ static int renesas_usb3_probe(struct platform_device *pdev)
usb3->workaround_for_vbus = priv->workaround_for_vbus;
- pm_runtime_enable(&pdev->dev);
- pm_runtime_get_sync(&pdev->dev);
-
dev_info(&pdev->dev, "probed\n");
return 0;
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index bfa7fa3..7bf78be 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -1269,7 +1269,7 @@ static void set_td_timer(struct r8a66597 *r8a66597, struct r8a66597_td *td)
time = 30;
break;
default:
- time = 300;
+ time = 50;
break;
}
@@ -1785,6 +1785,7 @@ static void r8a66597_td_timer(unsigned long _r8a66597)
pipe = td->pipe;
pipe_stop(r8a66597, pipe);
+ /* Select a different address or endpoint */
new_td = td;
do {
list_move_tail(&new_td->queue,
@@ -1794,7 +1795,8 @@ static void r8a66597_td_timer(unsigned long _r8a66597)
new_td = td;
break;
}
- } while (td != new_td && td->address == new_td->address);
+ } while (td != new_td && td->address == new_td->address &&
+ td->pipe->info.epnum == new_td->pipe->info.epnum);
start_transfer(r8a66597, new_td);
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index d680eb3..c99121a6 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -2230,11 +2230,12 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
{
u32 temp, port_offset, port_count;
int i;
- u8 major_revision;
+ u8 major_revision, minor_revision;
struct xhci_hub *rhub;
temp = readl(addr);
major_revision = XHCI_EXT_PORT_MAJOR(temp);
+ minor_revision = XHCI_EXT_PORT_MINOR(temp);
if (major_revision == 0x03) {
rhub = &xhci->usb3_rhub;
@@ -2248,7 +2249,9 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
return;
}
rhub->maj_rev = XHCI_EXT_PORT_MAJOR(temp);
- rhub->min_rev = XHCI_EXT_PORT_MINOR(temp);
+
+ if (rhub->min_rev < minor_revision)
+ rhub->min_rev = minor_revision;
/* Port offset and count in the third dword, see section 7.2 */
temp = readl(addr + 2);
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index e7d6752..69864ba 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -201,6 +201,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
pdev->device == 0x1042)
xhci->quirks |= XHCI_BROKEN_STREAMS;
+ if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
+ pdev->device == 0x1142)
+ xhci->quirks |= XHCI_TRUST_TX_LENGTH;
if (pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241)
xhci->quirks |= XHCI_LIMIT_ENDPOINT_INTERVAL_7;
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 358feca..261ed2c 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1909,6 +1909,14 @@ static void musb_pm_runtime_check_session(struct musb *musb)
static void musb_irq_work(struct work_struct *data)
{
struct musb *musb = container_of(data, struct musb, irq_work.work);
+ int error;
+
+ error = pm_runtime_get_sync(musb->controller);
+ if (error < 0) {
+ dev_err(musb->controller, "Could not enable: %i\n", error);
+
+ return;
+ }
musb_pm_runtime_check_session(musb);
@@ -1916,6 +1924,9 @@ static void musb_irq_work(struct work_struct *data)
musb->xceiv_old_state = musb->xceiv->otg->state;
sysfs_notify(&musb->controller->kobj, NULL, "mode");
}
+
+ pm_runtime_mark_last_busy(musb->controller);
+ pm_runtime_put_autosuspend(musb->controller);
}
static void musb_recover_from_babble(struct musb *musb)
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index 9f125e1..39666fb 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -213,6 +213,12 @@ static int dsps_check_status(struct musb *musb, void *unused)
msecs_to_jiffies(wrp->poll_timeout));
break;
case OTG_STATE_A_WAIT_BCON:
+ /* keep VBUS on for host-only mode */
+ if (musb->port_mode == MUSB_PORT_MODE_HOST) {
+ mod_timer(&glue->timer, jiffies +
+ msecs_to_jiffies(wrp->poll_timeout));
+ break;
+ }
musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
skip_session = 1;
/* fall */
diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c
index 5643613..2682d29 100644
--- a/drivers/usb/pd/policy_engine.c
+++ b/drivers/usb/pd/policy_engine.c
@@ -763,10 +763,7 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state)
case PE_SRC_STARTUP:
if (pd->current_dr == DR_NONE) {
pd->current_dr = DR_DFP;
- /*
- * Defer starting USB host mode until PE_SRC_READY or
- * when PE_SRC_SEND_CAPABILITIES fails
- */
+ start_usb_host(pd, true);
}
dual_role_instance_changed(pd->dual_role);
@@ -1302,14 +1299,6 @@ static void handle_vdm_rx(struct usbpd *pd, struct rx_msg *rx_msg)
if (svid == 0xFF01)
has_dp = true;
}
-
- /*
- * Finally start USB host now that we have determined
- * if DisplayPort mode is present or not and limit USB
- * to HS-only mode if so.
- */
- start_usb_host(pd, !has_dp);
-
break;
default:
@@ -1326,7 +1315,6 @@ static void handle_vdm_rx(struct usbpd *pd, struct rx_msg *rx_msg)
switch (cmd) {
case USBPD_SVDM_DISCOVER_IDENTITY:
case USBPD_SVDM_DISCOVER_SVIDS:
- start_usb_host(pd, true);
break;
default:
break;
@@ -1723,11 +1711,7 @@ static void usbpd_sm(struct work_struct *w)
ARRAY_SIZE(default_src_caps), SOP_MSG);
if (ret) {
pd->caps_count++;
-
- if (pd->caps_count == 10 && pd->current_dr == DR_DFP) {
- /* Likely not PD-capable, start host now */
- start_usb_host(pd, true);
- } else if (pd->caps_count >= PD_CAPS_COUNT) {
+ if (pd->caps_count >= PD_CAPS_COUNT) {
usbpd_dbg(&pd->dev, "Src CapsCounter exceeded, disabling PD\n");
usbpd_set_state(pd, PE_SRC_DISABLED);
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
index 03eccf2..d6dc165 100644
--- a/drivers/usb/usbip/vhci_hcd.c
+++ b/drivers/usb/usbip/vhci_hcd.c
@@ -235,14 +235,19 @@ static int vhci_hub_status(struct usb_hcd *hcd, char *buf)
static inline void hub_descriptor(struct usb_hub_descriptor *desc)
{
+ int width;
+
memset(desc, 0, sizeof(*desc));
desc->bDescriptorType = USB_DT_HUB;
- desc->bDescLength = 9;
desc->wHubCharacteristics = cpu_to_le16(
HUB_CHAR_INDV_PORT_LPSM | HUB_CHAR_COMMON_OCPM);
+
desc->bNbrPorts = VHCI_HC_PORTS;
- desc->u.hs.DeviceRemovable[0] = 0xff;
- desc->u.hs.DeviceRemovable[1] = 0xff;
+ BUILD_BUG_ON(VHCI_HC_PORTS > USB_MAXCHILDREN);
+ width = desc->bNbrPorts / 8 + 1;
+ desc->bDescLength = USB_DT_HUB_NONVAR_SIZE + 2 * width;
+ memset(&desc->u.hs.DeviceRemovable[0], 0, width);
+ memset(&desc->u.hs.DeviceRemovable[width], 0xff, width);
}
static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index c882357..79ddcb0 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -1246,6 +1246,8 @@ static void tce_iommu_release_ownership_ddw(struct tce_container *container,
static long tce_iommu_take_ownership_ddw(struct tce_container *container,
struct iommu_table_group *table_group)
{
+ long i, ret = 0;
+
if (!table_group->ops->create_table || !table_group->ops->set_window ||
!table_group->ops->release_ownership) {
WARN_ON_ONCE(1);
@@ -1254,7 +1256,27 @@ static long tce_iommu_take_ownership_ddw(struct tce_container *container,
table_group->ops->take_ownership(table_group);
+ /* Set all windows to the new group */
+ for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
+ struct iommu_table *tbl = container->tables[i];
+
+ if (!tbl)
+ continue;
+
+ ret = table_group->ops->set_window(table_group, i, tbl);
+ if (ret)
+ goto release_exit;
+ }
+
return 0;
+
+release_exit:
+ for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
+ table_group->ops->unset_window(table_group, i);
+
+ table_group->ops->release_ownership(table_group);
+
+ return ret;
}
static int tce_iommu_attach_group(void *iommu_data,
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index a504e2e0..e3fad30 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -368,6 +368,7 @@ static void vhost_vsock_handle_rx_kick(struct vhost_work *work)
static int vhost_vsock_start(struct vhost_vsock *vsock)
{
+ struct vhost_virtqueue *vq;
size_t i;
int ret;
@@ -378,19 +379,20 @@ static int vhost_vsock_start(struct vhost_vsock *vsock)
goto err;
for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
- struct vhost_virtqueue *vq = &vsock->vqs[i];
+ vq = &vsock->vqs[i];
mutex_lock(&vq->mutex);
if (!vhost_vq_access_ok(vq)) {
ret = -EFAULT;
- mutex_unlock(&vq->mutex);
goto err_vq;
}
if (!vq->private_data) {
vq->private_data = vsock;
- vhost_vq_init_access(vq);
+ ret = vhost_vq_init_access(vq);
+ if (ret)
+ goto err_vq;
}
mutex_unlock(&vq->mutex);
@@ -400,8 +402,11 @@ static int vhost_vsock_start(struct vhost_vsock *vsock)
return 0;
err_vq:
+ vq->private_data = NULL;
+ mutex_unlock(&vq->mutex);
+
for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
- struct vhost_virtqueue *vq = &vsock->vqs[i];
+ vq = &vsock->vqs[i];
mutex_lock(&vq->mutex);
vq->private_data = NULL;
diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c
index fc09eb7..ffc69dd 100644
--- a/fs/autofs4/dev-ioctl.c
+++ b/fs/autofs4/dev-ioctl.c
@@ -345,7 +345,7 @@ static int autofs_dev_ioctl_fail(struct file *fp,
int status;
token = (autofs_wqt_t) param->fail.token;
- status = param->fail.status ? param->fail.status : -ENOENT;
+ status = param->fail.status < 0 ? param->fail.status : -ENOENT;
return autofs4_wait_release(sbi, token, status);
}
diff --git a/fs/btrfs/hash.c b/fs/btrfs/hash.c
index a97fdc1..baacc18 100644
--- a/fs/btrfs/hash.c
+++ b/fs/btrfs/hash.c
@@ -38,6 +38,7 @@ u32 btrfs_crc32c(u32 crc, const void *address, unsigned int length)
{
SHASH_DESC_ON_STACK(shash, tfm);
u32 *ctx = (u32 *)shash_desc_ctx(shash);
+ u32 retval;
int err;
shash->tfm = tfm;
@@ -47,5 +48,7 @@ u32 btrfs_crc32c(u32 crc, const void *address, unsigned int length)
err = crypto_shash_update(shash, address, length);
BUG_ON(err);
- return *ctx;
+ retval = *ctx;
+ barrier_data(ctx);
+ return retval;
}
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
index 87b87e0..efd72e1 100644
--- a/fs/cifs/smb1ops.c
+++ b/fs/cifs/smb1ops.c
@@ -849,8 +849,13 @@ cifs_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
struct cifs_fid *fid, __u16 search_flags,
struct cifs_search_info *srch_inf)
{
- return CIFSFindFirst(xid, tcon, path, cifs_sb,
- &fid->netfid, search_flags, srch_inf, true);
+ int rc;
+
+ rc = CIFSFindFirst(xid, tcon, path, cifs_sb,
+ &fid->netfid, search_flags, srch_inf, true);
+ if (rc)
+ cifs_dbg(FYI, "find first failed=%d\n", rc);
+ return rc;
}
static int
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 36334fe..b696824 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -964,7 +964,7 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL);
kfree(utf16_path);
if (rc) {
- cifs_dbg(VFS, "open dir failed\n");
+ cifs_dbg(FYI, "open dir failed rc=%d\n", rc);
return rc;
}
@@ -974,7 +974,7 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
rc = SMB2_query_directory(xid, tcon, fid->persistent_fid,
fid->volatile_fid, 0, srch_inf);
if (rc) {
- cifs_dbg(VFS, "query directory failed\n");
+ cifs_dbg(FYI, "query directory failed rc=%d\n", rc);
SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
}
return rc;
diff --git a/fs/configfs/symlink.c b/fs/configfs/symlink.c
index db6d692..314b4ed 100644
--- a/fs/configfs/symlink.c
+++ b/fs/configfs/symlink.c
@@ -83,14 +83,13 @@ static int create_link(struct config_item *parent_item,
ret = -ENOMEM;
sl = kmalloc(sizeof(struct configfs_symlink), GFP_KERNEL);
if (sl) {
- sl->sl_target = config_item_get(item);
spin_lock(&configfs_dirent_lock);
if (target_sd->s_type & CONFIGFS_USET_DROPPING) {
spin_unlock(&configfs_dirent_lock);
- config_item_put(item);
kfree(sl);
return -ENOENT;
}
+ sl->sl_target = config_item_get(item);
list_add(&sl->sl_list, &target_sd->s_links);
spin_unlock(&configfs_dirent_lock);
ret = configfs_create_link(sl, parent_item->ci_dentry,
diff --git a/fs/exec.c b/fs/exec.c
index c8ca064..26ab263 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -215,8 +215,26 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
if (write) {
unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start;
+ unsigned long ptr_size;
struct rlimit *rlim;
+ /*
+ * Since the stack will hold pointers to the strings, we
+ * must account for them as well.
+ *
+ * The size calculation is the entire vma while each arg page is
+ * built, so each time we get here it's calculating how far it
+ * is currently (rather than each call being just the newly
+ * added size from the arg page). As a result, we need to
+ * always add the entire size of the pointers, so that on the
+ * last call to get_arg_page() we'll actually have the entire
+ * correct size.
+ */
+ ptr_size = (bprm->argc + bprm->envc) * sizeof(void *);
+ if (ptr_size > ULONG_MAX - size)
+ goto fail;
+ size += ptr_size;
+
acct_arg_size(bprm, size / PAGE_SIZE);
/*
@@ -234,13 +252,15 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
* to work from.
*/
rlim = current->signal->rlim;
- if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4) {
- put_page(page);
- return NULL;
- }
+ if (size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4)
+ goto fail;
}
return page;
+
+fail:
+ put_page(page);
+ return NULL;
}
static void put_arg_page(struct page *page)
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index c12f695..88e111a 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -948,6 +948,7 @@ static inline u32 f2fs_crc32(struct f2fs_sb_info *sbi, const void *address,
{
SHASH_DESC_ON_STACK(shash, sbi->s_chksum_driver);
u32 *ctx = (u32 *)shash_desc_ctx(shash);
+ u32 retval;
int err;
shash->tfm = sbi->s_chksum_driver;
@@ -957,7 +958,9 @@ static inline u32 f2fs_crc32(struct f2fs_sb_info *sbi, const void *address,
err = crypto_shash_update(shash, address, length);
BUG_ON(err);
- return *ctx;
+ retval = *ctx;
+ barrier_data(ctx);
+ return retval;
}
static inline bool f2fs_crc_valid(struct f2fs_sb_info *sbi, __u32 blk_crc,
diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
index 43040721..40d6107 100644
--- a/fs/fscache/cookie.c
+++ b/fs/fscache/cookie.c
@@ -542,6 +542,7 @@ void __fscache_disable_cookie(struct fscache_cookie *cookie, bool invalidate)
hlist_for_each_entry(object, &cookie->backing_objects, cookie_link) {
if (invalidate)
set_bit(FSCACHE_OBJECT_RETIRED, &object->flags);
+ clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
fscache_raise_event(object, FSCACHE_OBJECT_EV_KILL);
}
} else {
@@ -560,6 +561,10 @@ void __fscache_disable_cookie(struct fscache_cookie *cookie, bool invalidate)
wait_on_atomic_t(&cookie->n_active, fscache_wait_atomic_t,
TASK_UNINTERRUPTIBLE);
+ /* Make sure any pending writes are cancelled. */
+ if (cookie->def->type != FSCACHE_COOKIE_TYPE_INDEX)
+ fscache_invalidate_writes(cookie);
+
/* Reset the cookie state if it wasn't relinquished */
if (!test_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags)) {
atomic_inc(&cookie->n_active);
diff --git a/fs/fscache/netfs.c b/fs/fscache/netfs.c
index 9b28649..a8aa00b 100644
--- a/fs/fscache/netfs.c
+++ b/fs/fscache/netfs.c
@@ -48,6 +48,7 @@ int __fscache_register_netfs(struct fscache_netfs *netfs)
cookie->flags = 1 << FSCACHE_COOKIE_ENABLED;
spin_lock_init(&cookie->lock);
+ spin_lock_init(&cookie->stores_lock);
INIT_HLIST_HEAD(&cookie->backing_objects);
/* check the netfs type is not already present */
diff --git a/fs/fscache/object.c b/fs/fscache/object.c
index 9e792e3..7a182c8 100644
--- a/fs/fscache/object.c
+++ b/fs/fscache/object.c
@@ -30,6 +30,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
static const struct fscache_state *fscache_object_available(struct fscache_object *, int);
static const struct fscache_state *fscache_parent_ready(struct fscache_object *, int);
static const struct fscache_state *fscache_update_object(struct fscache_object *, int);
+static const struct fscache_state *fscache_object_dead(struct fscache_object *, int);
#define __STATE_NAME(n) fscache_osm_##n
#define STATE(n) (&__STATE_NAME(n))
@@ -91,7 +92,7 @@ static WORK_STATE(LOOKUP_FAILURE, "LCFL", fscache_lookup_failure);
static WORK_STATE(KILL_OBJECT, "KILL", fscache_kill_object);
static WORK_STATE(KILL_DEPENDENTS, "KDEP", fscache_kill_dependents);
static WORK_STATE(DROP_OBJECT, "DROP", fscache_drop_object);
-static WORK_STATE(OBJECT_DEAD, "DEAD", (void*)2UL);
+static WORK_STATE(OBJECT_DEAD, "DEAD", fscache_object_dead);
static WAIT_STATE(WAIT_FOR_INIT, "?INI",
TRANSIT_TO(INIT_OBJECT, 1 << FSCACHE_OBJECT_EV_NEW_CHILD));
@@ -229,6 +230,10 @@ static void fscache_object_sm_dispatcher(struct fscache_object *object)
event = -1;
if (new_state == NO_TRANSIT) {
_debug("{OBJ%x} %s notrans", object->debug_id, state->name);
+ if (unlikely(state == STATE(OBJECT_DEAD))) {
+ _leave(" [dead]");
+ return;
+ }
fscache_enqueue_object(object);
event_mask = object->oob_event_mask;
goto unmask_events;
@@ -239,7 +244,7 @@ static void fscache_object_sm_dispatcher(struct fscache_object *object)
object->state = state = new_state;
if (state->work) {
- if (unlikely(state->work == ((void *)2UL))) {
+ if (unlikely(state == STATE(OBJECT_DEAD))) {
_leave(" [dead]");
return;
}
@@ -645,6 +650,12 @@ static const struct fscache_state *fscache_kill_object(struct fscache_object *ob
fscache_mark_object_dead(object);
object->oob_event_mask = 0;
+ if (test_bit(FSCACHE_OBJECT_RETIRED, &object->flags)) {
+ /* Reject any new read/write ops and abort any that are pending. */
+ clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
+ fscache_cancel_all_ops(object);
+ }
+
if (list_empty(&object->dependents) &&
object->n_ops == 0 &&
object->n_children == 0)
@@ -1077,3 +1088,20 @@ void fscache_object_mark_killed(struct fscache_object *object,
}
}
EXPORT_SYMBOL(fscache_object_mark_killed);
+
+/*
+ * The object is dead. We can get here if an object gets queued by an event
+ * that would lead to its death (such as EV_KILL) when the dispatcher is
+ * already running (and so can be requeued) but hasn't yet cleared the event
+ * mask.
+ */
+static const struct fscache_state *fscache_object_dead(struct fscache_object *object,
+ int event)
+{
+ if (!test_and_set_bit(FSCACHE_OBJECT_RUN_AFTER_DEAD,
+ &object->flags))
+ return NO_TRANSIT;
+
+ WARN(true, "FS-Cache object redispatched after death");
+ return NO_TRANSIT;
+}
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 133f322..6528724 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -1425,26 +1425,32 @@ static struct shrinker glock_shrinker = {
* @sdp: the filesystem
* @bucket: the bucket
*
+ * Note that the function can be called multiple times on the same
+ * object. So the user must ensure that the function can cope with
+ * that.
*/
static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
{
struct gfs2_glock *gl;
- struct rhash_head *pos;
- const struct bucket_table *tbl;
- int i;
+ struct rhashtable_iter iter;
- rcu_read_lock();
- tbl = rht_dereference_rcu(gl_hash_table.tbl, &gl_hash_table);
- for (i = 0; i < tbl->size; i++) {
- rht_for_each_entry_rcu(gl, pos, tbl, i, gl_node) {
+ rhashtable_walk_enter(&gl_hash_table, &iter);
+
+ do {
+ gl = ERR_PTR(rhashtable_walk_start(&iter));
+ if (gl)
+ continue;
+
+ while ((gl = rhashtable_walk_next(&iter)) && !IS_ERR(gl))
if ((gl->gl_name.ln_sbd == sdp) &&
lockref_get_not_dead(&gl->gl_lockref))
examiner(gl);
- }
- }
- rcu_read_unlock();
- cond_resched();
+
+ rhashtable_walk_stop(&iter);
+ } while (cond_resched(), gl == ERR_PTR(-EAGAIN));
+
+ rhashtable_walk_exit(&iter);
}
/**
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 4fb7b10..704fa0b 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -191,7 +191,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
addr = ALIGN(addr, huge_page_size(h));
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 0959c96..9267191 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1079,6 +1079,7 @@ static void nfs_increment_seqid(int status, struct nfs_seqid *seqid)
case -NFS4ERR_BADXDR:
case -NFS4ERR_RESOURCE:
case -NFS4ERR_NOFILEHANDLE:
+ case -NFS4ERR_MOVED:
/* Non-seqid mutating errors */
return;
};
diff --git a/fs/proc/base.c b/fs/proc/base.c
index c573113..45f75c4 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -3386,6 +3386,8 @@ int proc_pid_readdir(struct file *file, struct dir_context *ctx)
iter.tgid += 1, iter = next_tgid(ns, iter)) {
char name[PROC_NUMBUF];
int len;
+
+ cond_resched();
if (!has_pid_permissions(ns, iter.task, 2))
continue;
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index f998332..9182f84 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -349,11 +349,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
/* We don't show the stack guard page in /proc/maps */
start = vma->vm_start;
- if (stack_guard_page_start(vma, start))
- start += PAGE_SIZE;
end = vma->vm_end;
- if (stack_guard_page_end(vma, end))
- end -= PAGE_SIZE;
seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
diff --git a/fs/read_write.c b/fs/read_write.c
index 190e0d36..e479e24 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -1232,7 +1232,7 @@ static size_t compat_writev(struct file *file,
if (!(file->f_mode & FMODE_CAN_WRITE))
goto out;
- ret = compat_do_readv_writev(WRITE, file, vec, vlen, pos, 0);
+ ret = compat_do_readv_writev(WRITE, file, vec, vlen, pos, flags);
out:
if (ret > 0)
diff --git a/fs/romfs/super.c b/fs/romfs/super.c
index d0f8a38..0186fe6 100644
--- a/fs/romfs/super.c
+++ b/fs/romfs/super.c
@@ -74,6 +74,7 @@
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/uaccess.h>
+#include <linux/major.h>
#include "internal.h"
static struct kmem_cache *romfs_inode_cachep;
@@ -416,7 +417,22 @@ static void romfs_destroy_inode(struct inode *inode)
static int romfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct super_block *sb = dentry->d_sb;
- u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
+ u64 id = 0;
+
+ /* When calling huge_encode_dev(),
+ * use sb->s_bdev->bd_dev when,
+ * - CONFIG_ROMFS_ON_BLOCK defined
+ * use sb->s_dev when,
+ * - CONFIG_ROMFS_ON_BLOCK undefined and
+ * - CONFIG_ROMFS_ON_MTD defined
+ * leave id as 0 when,
+ * - CONFIG_ROMFS_ON_BLOCK undefined and
+ * - CONFIG_ROMFS_ON_MTD undefined
+ */
+ if (sb->s_bdev)
+ id = huge_encode_dev(sb->s_bdev->bd_dev);
+ else if (sb->s_dev)
+ id = huge_encode_dev(sb->s_dev);
buf->f_type = ROMFS_MAGIC;
buf->f_namelen = ROMFS_MAXFN;
@@ -489,6 +505,11 @@ static int romfs_fill_super(struct super_block *sb, void *data, int silent)
sb->s_flags |= MS_RDONLY | MS_NOATIME;
sb->s_op = &romfs_super_ops;
+#ifdef CONFIG_ROMFS_ON_MTD
+ /* Use same dev ID from the underlying mtdblock device */
+ if (sb->s_mtd)
+ sb->s_dev = MKDEV(MTD_BLOCK_MAJOR, sb->s_mtd->index);
+#endif
/* read the image superblock and check it */
rsb = kmalloc(512, GFP_KERNEL);
if (!rsb)
diff --git a/fs/sdcardfs/packagelist.c b/fs/sdcardfs/packagelist.c
index 00a0f65..6da0c21 100644
--- a/fs/sdcardfs/packagelist.c
+++ b/fs/sdcardfs/packagelist.c
@@ -174,19 +174,6 @@ int check_caller_access_to_name(struct inode *parent_node, const struct qstr *na
return 1;
}
-/* This function is used when file opening. The open flags must be
- * checked before calling check_caller_access_to_name()
- */
-int open_flags_to_access_mode(int open_flags)
-{
- if ((open_flags & O_ACCMODE) == O_RDONLY)
- return 0; /* R_OK */
- if ((open_flags & O_ACCMODE) == O_WRONLY)
- return 1; /* W_OK */
- /* Probably O_RDRW, but treat as default to be safe */
- return 1; /* R_OK | W_OK */
-}
-
static struct hashtable_entry *alloc_hashtable_entry(const struct qstr *key,
appid_t value)
{
diff --git a/fs/sdcardfs/sdcardfs.h b/fs/sdcardfs/sdcardfs.h
index 3687b22..4e0ce49 100644
--- a/fs/sdcardfs/sdcardfs.h
+++ b/fs/sdcardfs/sdcardfs.h
@@ -499,7 +499,6 @@ extern appid_t get_appid(const char *app_name);
extern appid_t get_ext_gid(const char *app_name);
extern appid_t is_excluded(const char *app_name, userid_t userid);
extern int check_caller_access_to_name(struct inode *parent_node, const struct qstr *name);
-extern int open_flags_to_access_mode(int open_flags);
extern int packagelist_init(void);
extern void packagelist_exit(void);
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 69c867c0..2cde073 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -63,6 +63,7 @@ struct userfaultfd_wait_queue {
struct uffd_msg msg;
wait_queue_t wq;
struct userfaultfd_ctx *ctx;
+ bool waken;
};
struct userfaultfd_wake_range {
@@ -86,6 +87,12 @@ static int userfaultfd_wake_function(wait_queue_t *wq, unsigned mode,
if (len && (start > uwq->msg.arg.pagefault.address ||
start + len <= uwq->msg.arg.pagefault.address))
goto out;
+ WRITE_ONCE(uwq->waken, true);
+ /*
+ * The implicit smp_mb__before_spinlock in try_to_wake_up()
+ * renders uwq->waken visible to other CPUs before the task is
+ * waken.
+ */
ret = wake_up_state(wq->private, mode);
if (ret)
/*
@@ -264,6 +271,7 @@ int handle_userfault(struct fault_env *fe, unsigned long reason)
struct userfaultfd_wait_queue uwq;
int ret;
bool must_wait, return_to_userland;
+ long blocking_state;
BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
@@ -333,10 +341,13 @@ int handle_userfault(struct fault_env *fe, unsigned long reason)
uwq.wq.private = current;
uwq.msg = userfault_msg(fe->address, fe->flags, reason);
uwq.ctx = ctx;
+ uwq.waken = false;
return_to_userland =
(fe->flags & (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE)) ==
(FAULT_FLAG_USER|FAULT_FLAG_KILLABLE);
+ blocking_state = return_to_userland ? TASK_INTERRUPTIBLE :
+ TASK_KILLABLE;
spin_lock(&ctx->fault_pending_wqh.lock);
/*
@@ -349,8 +360,7 @@ int handle_userfault(struct fault_env *fe, unsigned long reason)
* following the spin_unlock to happen before the list_add in
* __add_wait_queue.
*/
- set_current_state(return_to_userland ? TASK_INTERRUPTIBLE :
- TASK_KILLABLE);
+ set_current_state(blocking_state);
spin_unlock(&ctx->fault_pending_wqh.lock);
must_wait = userfaultfd_must_wait(ctx, fe->address, fe->flags, reason);
@@ -362,6 +372,29 @@ int handle_userfault(struct fault_env *fe, unsigned long reason)
wake_up_poll(&ctx->fd_wqh, POLLIN);
schedule();
ret |= VM_FAULT_MAJOR;
+
+ /*
+ * False wakeups can orginate even from rwsem before
+ * up_read() however userfaults will wait either for a
+ * targeted wakeup on the specific uwq waitqueue from
+ * wake_userfault() or for signals or for uffd
+ * release.
+ */
+ while (!READ_ONCE(uwq.waken)) {
+ /*
+ * This needs the full smp_store_mb()
+ * guarantee as the state write must be
+ * visible to other CPUs before reading
+ * uwq.waken from other CPUs.
+ */
+ set_current_state(blocking_state);
+ if (READ_ONCE(uwq.waken) ||
+ READ_ONCE(ctx->released) ||
+ (return_to_userland ? signal_pending(current) :
+ fatal_signal_pending(current)))
+ break;
+ schedule();
+ }
}
__set_current_state(TASK_RUNNING);
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index e9fb2e8..0c4f9c67 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -776,6 +776,7 @@ struct drm_device {
struct drm_minor *control; /**< Control node */
struct drm_minor *primary; /**< Primary node */
struct drm_minor *render; /**< Render node */
+ bool registered;
/* currently active master for this device. Protected by master_mutex */
struct drm_master *master;
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index 1c12875..b28c4a3 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -345,6 +345,8 @@ struct drm_connector_funcs {
* core drm connector interfaces. Everything added from this callback
* should be unregistered in the early_unregister callback.
*
+ * This is called while holding drm_connector->mutex.
+ *
* Returns:
*
* 0 on success, or a negative error code on failure.
@@ -359,6 +361,8 @@ struct drm_connector_funcs {
* late_register(). It is called from drm_connector_unregister(),
* early in the driver unload sequence to disable userspace access
* before data structures are torndown.
+ *
+ * This is called while holding drm_connector->mutex.
*/
void (*early_unregister)(struct drm_connector *connector);
@@ -511,7 +515,6 @@ struct drm_cmdline_mode {
* @interlace_allowed: can this connector handle interlaced modes?
* @doublescan_allowed: can this connector handle doublescan?
* @stereo_allowed: can this connector handle stereo modes?
- * @registered: is this connector exposed (registered) with userspace?
* @modes: modes available on this connector (from fill_modes() + user)
* @status: one of the drm_connector_status enums (connected, not, or unknown)
* @probed_modes: list of modes derived directly from the display
@@ -574,6 +577,13 @@ struct drm_connector {
char *name;
/**
+ * @mutex: Lock for general connector state, but currently only protects
+ * @registered. Most of the connector state is still protected by the
+ * mutex in &drm_mode_config.
+ */
+ struct mutex mutex;
+
+ /**
* @index: Compacted connector index, which matches the position inside
* the mode_config.list for drivers not supporting hot-add/removing. Can
* be used as an array index. It is invariant over the lifetime of the
@@ -586,6 +596,10 @@ struct drm_connector {
bool interlace_allowed;
bool doublescan_allowed;
bool stereo_allowed;
+ /**
+ * @registered: Is this connector exposed (registered) with userspace?
+ * Protected by @mutex.
+ */
bool registered;
struct list_head modes; /* list of modes on this connector */
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index b8eb25b..4fbc62e 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -103,6 +103,13 @@ extern struct cpumask __cpu_isolated_mask;
#define num_present_cpus() cpumask_weight(cpu_present_mask)
#define num_active_cpus() cpumask_weight(cpu_active_mask)
#define num_isolated_cpus() cpumask_weight(cpu_isolated_mask)
+#define num_online_uniso_cpus() \
+({ \
+ cpumask_t mask; \
+ \
+ cpumask_andnot(&mask, cpu_online_mask, cpu_isolated_mask); \
+ cpumask_weight(&mask); \
+})
#define cpu_online(cpu) cpumask_test_cpu((cpu), cpu_online_mask)
#define cpu_possible(cpu) cpumask_test_cpu((cpu), cpu_possible_mask)
#define cpu_present(cpu) cpumask_test_cpu((cpu), cpu_present_mask)
@@ -114,6 +121,7 @@ extern struct cpumask __cpu_isolated_mask;
#define num_present_cpus() 1U
#define num_active_cpus() 1U
#define num_isolated_cpus() 0U
+#define num_online_uniso_cpus() 1U
#define cpu_online(cpu) ((cpu) == 0)
#define cpu_possible(cpu) ((cpu) == 0)
#define cpu_present(cpu) ((cpu) == 0)
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
index 13ba552..4c467ef 100644
--- a/include/linux/fscache-cache.h
+++ b/include/linux/fscache-cache.h
@@ -360,6 +360,7 @@ struct fscache_object {
#define FSCACHE_OBJECT_IS_AVAILABLE 5 /* T if object has become active */
#define FSCACHE_OBJECT_RETIRED 6 /* T if object was retired on relinquishment */
#define FSCACHE_OBJECT_KILLED_BY_CACHE 7 /* T if object was killed by the cache */
+#define FSCACHE_OBJECT_RUN_AFTER_DEAD 8 /* T if object has been dispatched after death */
struct list_head cache_link; /* link in cache->object_list */
struct hlist_node cookie_link; /* link in cookie->backing_objects */
diff --git a/include/linux/log2.h b/include/linux/log2.h
index f38fae2..c373295f 100644
--- a/include/linux/log2.h
+++ b/include/linux/log2.h
@@ -194,6 +194,17 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
* ... and so on.
*/
-#define order_base_2(n) ilog2(roundup_pow_of_two(n))
+static inline __attribute_const__
+int __order_base_2(unsigned long n)
+{
+ return n > 1 ? ilog2(n - 1) + 1 : 0;
+}
+#define order_base_2(n) \
+( \
+ __builtin_constant_p(n) ? ( \
+ ((n) == 0 || (n) == 1) ? 0 : \
+ ilog2((n) - 1) + 1) : \
+ __order_base_2(n) \
+)
#endif /* _LINUX_LOG2_H */
diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h
index 257173e..f541da6 100644
--- a/include/linux/micrel_phy.h
+++ b/include/linux/micrel_phy.h
@@ -35,6 +35,8 @@
#define PHY_ID_KSZ886X 0x00221430
#define PHY_ID_KSZ8863 0x00221435
+#define PHY_ID_KSZ8795 0x00221550
+
/* struct phy_device dev_flags definitions */
#define MICREL_PHY_50MHZ_CLK 0x00000001
#define MICREL_PHY_FXEN 0x00000002
diff --git a/include/linux/mm.h b/include/linux/mm.h
index f7b0dab..6a14034 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1357,39 +1357,11 @@ int clear_page_dirty_for_io(struct page *page);
int get_cmdline(struct task_struct *task, char *buffer, int buflen);
-/* Is the vma a continuation of the stack vma above it? */
-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
-{
- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
-}
-
static inline bool vma_is_anonymous(struct vm_area_struct *vma)
{
return !vma->vm_ops;
}
-static inline int stack_guard_page_start(struct vm_area_struct *vma,
- unsigned long addr)
-{
- return (vma->vm_flags & VM_GROWSDOWN) &&
- (vma->vm_start == addr) &&
- !vma_growsdown(vma->vm_prev, addr);
-}
-
-/* Is the vma a continuation of the stack vma below it? */
-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
-{
- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
-}
-
-static inline int stack_guard_page_end(struct vm_area_struct *vma,
- unsigned long addr)
-{
- return (vma->vm_flags & VM_GROWSUP) &&
- (vma->vm_end == addr) &&
- !vma_growsup(vma->vm_next, addr);
-}
-
int vma_is_stack_for_current(struct vm_area_struct *vma);
extern unsigned long move_page_tables(struct vm_area_struct *vma,
@@ -2128,6 +2100,7 @@ void page_cache_async_readahead(struct address_space *mapping,
pgoff_t offset,
unsigned long size);
+extern unsigned long stack_guard_gap;
/* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
@@ -2156,6 +2129,30 @@ static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * m
return vma;
}
+static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
+{
+ unsigned long vm_start = vma->vm_start;
+
+ if (vma->vm_flags & VM_GROWSDOWN) {
+ vm_start -= stack_guard_gap;
+ if (vm_start > vma->vm_start)
+ vm_start = 0;
+ }
+ return vm_start;
+}
+
+static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
+{
+ unsigned long vm_end = vma->vm_end;
+
+ if (vma->vm_flags & VM_GROWSUP) {
+ vm_end += stack_guard_gap;
+ if (vm_end < vma->vm_end)
+ vm_end = -PAGE_SIZE;
+ }
+ return vm_end;
+}
+
static inline unsigned long vma_pages(struct vm_area_struct *vma)
{
return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index 0f2e651..b718105 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -233,6 +233,7 @@ extern void mmc_cmdq_clk_scaling_start_busy(struct mmc_host *host,
bool lock_needed);
extern void mmc_cmdq_clk_scaling_stop_busy(struct mmc_host *host,
bool lock_needed, bool is_cmdq_dcmd);
+extern void mmc_recovery_fallback_lower_speed(struct mmc_host *host);
/**
* mmc_claim_host - exclusively claim a host
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 9200069..f214b0c 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -480,6 +480,7 @@ struct mmc_host {
int clk_requests; /* internal reference counter */
unsigned int clk_delay; /* number of MCI clk hold cycles */
bool clk_gated; /* clock gated */
+ struct workqueue_struct *clk_gate_wq; /* clock gate work queue */
struct delayed_work clk_gate_work; /* delayed clock gate */
unsigned int clk_old; /* old clock value cache */
spinlock_t clk_lock; /* lock for clk fields */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index a9dcd27..d92d9a6 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -865,11 +865,15 @@ struct netdev_xdp {
* of useless work if you return NETDEV_TX_BUSY.
* Required; cannot be NULL.
*
- * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
- * netdev_features_t features);
- * Adjusts the requested feature flags according to device-specific
- * constraints, and returns the resulting flags. Must not modify
- * the device state.
+ * netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
+ * struct net_device *dev
+ * netdev_features_t features);
+ * Called by core transmit path to determine if device is capable of
+ * performing offload operations on a given packet. This is to give
+ * the device an opportunity to implement any restrictions that cannot
+ * be otherwise expressed by feature flags. The check is called with
+ * the set of features that the stack has calculated and it returns
+ * those the driver believes to be appropriate.
*
* u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
* void *accel_priv, select_queue_fallback_t fallback);
@@ -1027,6 +1031,12 @@ struct netdev_xdp {
* Called to release previously enslaved netdev.
*
* Feature/offload setting functions.
+ * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
+ * netdev_features_t features);
+ * Adjusts the requested feature flags according to device-specific
+ * constraints, and returns the resulting flags. Must not modify
+ * the device state.
+ *
* int (*ndo_set_features)(struct net_device *dev, netdev_features_t features);
* Called to update device configuration to new features. Passed
* feature set might be less than what was returned by ndo_fix_features()).
@@ -1099,15 +1109,6 @@ struct netdev_xdp {
* Callback to use for xmit over the accelerated station. This
* is used in place of ndo_start_xmit on accelerated net
* devices.
- * netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
- * struct net_device *dev
- * netdev_features_t features);
- * Called by core transmit path to determine if device is capable of
- * performing offload operations on a given packet. This is to give
- * the device an opportunity to implement any restrictions that cannot
- * be otherwise expressed by feature flags. The check is called with
- * the set of features that the stack has calculated and it returns
- * those the driver believes to be appropriate.
* int (*ndo_set_tx_maxrate)(struct net_device *dev,
* int queue_index, u32 maxrate);
* Called when a user wants to set a max-rate limitation of specific
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index dde3b13..3ca2526 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -7,6 +7,23 @@
#include <linux/sched.h>
#include <asm/irq.h>
+/*
+ * The run state of the lockup detectors is controlled by the content of the
+ * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit -
+ * bit 0 for the hard lockup detector and bit 1 for the soft lockup detector.
+ *
+ * 'watchdog_user_enabled', 'nmi_watchdog_enabled' and 'soft_watchdog_enabled'
+ * are variables that are only used as an 'interface' between the parameters
+ * in /proc/sys/kernel and the internal state bits in 'watchdog_enabled'. The
+ * 'watchdog_thresh' variable is handled differently because its value is not
+ * boolean, and the lockup detectors are 'suspended' while 'watchdog_thresh'
+ * is equal zero.
+ */
+#define NMI_WATCHDOG_ENABLED_BIT 0
+#define SOFT_WATCHDOG_ENABLED_BIT 1
+#define NMI_WATCHDOG_ENABLED (1 << NMI_WATCHDOG_ENABLED_BIT)
+#define SOFT_WATCHDOG_ENABLED (1 << SOFT_WATCHDOG_ENABLED_BIT)
+
/**
* touch_nmi_watchdog - restart NMI watchdog timeout.
*
@@ -14,11 +31,8 @@
* may be used to reset the timeout - for code which intentionally
* disables interrupts for a long time. This call is stateless.
*/
-#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR_NMI)
-#include <asm/nmi.h>
-#endif
-
#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR)
+#include <asm/nmi.h>
extern void touch_nmi_watchdog(void);
#else
static inline void touch_nmi_watchdog(void)
@@ -106,9 +120,17 @@ extern int nmi_watchdog_enabled;
extern int soft_watchdog_enabled;
extern int watchdog_user_enabled;
extern int watchdog_thresh;
+extern unsigned long watchdog_enabled;
extern unsigned long *watchdog_cpumask_bits;
+extern atomic_t watchdog_park_in_progress;
+#ifdef CONFIG_SMP
extern int sysctl_softlockup_all_cpu_backtrace;
extern int sysctl_hardlockup_all_cpu_backtrace;
+#else
+#define sysctl_softlockup_all_cpu_backtrace 0
+#define sysctl_hardlockup_all_cpu_backtrace 0
+#endif
+extern bool is_hardlockup(void);
struct ctl_table;
extern int proc_watchdog(struct ctl_table *, int ,
void __user *, size_t *, loff_t *);
diff --git a/include/linux/pci.h b/include/linux/pci.h
index a38772a..1b71179 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -178,6 +178,11 @@ enum pci_dev_flags {
PCI_DEV_FLAGS_NO_PM_RESET = (__force pci_dev_flags_t) (1 << 7),
/* Get VPD from function 0 VPD */
PCI_DEV_FLAGS_VPD_REF_F0 = (__force pci_dev_flags_t) (1 << 8),
+ /*
+ * Resume before calling the driver's system suspend hooks, disabling
+ * the direct_complete optimization.
+ */
+ PCI_DEV_FLAGS_NEEDS_RESUME = (__force pci_dev_flags_t) (1 << 11),
};
enum pci_irq_reroute_variant {
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 9e7ab05..864c7d7 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -177,11 +177,26 @@ extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
extern u64 nr_running_integral(unsigned int cpu);
#endif
+#ifdef CONFIG_SMP
extern void sched_update_nr_prod(int cpu, long delta, bool inc);
extern void sched_get_nr_running_avg(int *avg, int *iowait_avg, int *big_avg,
unsigned int *max_nr,
unsigned int *big_max_nr);
extern unsigned int sched_get_cpu_util(int cpu);
+#else
+static inline void sched_update_nr_prod(int cpu, long delta, bool inc)
+{
+}
+static inline void sched_get_nr_running_avg(int *avg, int *iowait_avg,
+ int *big_avg, unsigned int *max_nr,
+ unsigned int *big_max_nr)
+{
+}
+static inline unsigned int sched_get_cpu_util(int cpu)
+{
+ return 0;
+}
+#endif
extern void calc_global_load(unsigned long ticks);
@@ -3905,6 +3920,7 @@ static inline unsigned long rlimit_max(unsigned int limit)
#define SCHED_CPUFREQ_DL (1U << 1)
#define SCHED_CPUFREQ_IOWAIT (1U << 2)
#define SCHED_CPUFREQ_INTERCLUSTER_MIG (1U << 3)
+#define SCHED_CPUFREQ_WALT (1U << 4)
#define SCHED_CPUFREQ_RT_DL (SCHED_CPUFREQ_RT | SCHED_CPUFREQ_DL)
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 190bf3b..f0ba8e6 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -22,6 +22,8 @@ extern unsigned int sysctl_sched_is_big_little;
extern unsigned int sysctl_sched_sync_hint_enable;
extern unsigned int sysctl_sched_initial_task_util;
extern unsigned int sysctl_sched_cstate_aware;
+extern unsigned int sysctl_sched_capacity_margin;
+extern unsigned int sysctl_sched_capacity_margin_down;
#ifdef CONFIG_SCHED_WALT
extern unsigned int sysctl_sched_use_walt_cpu_util;
extern unsigned int sysctl_sched_use_walt_task_util;
@@ -33,6 +35,8 @@ extern unsigned int sysctl_sched_cpu_high_irqload;
extern unsigned int sysctl_sched_use_walt_cpu_util;
extern unsigned int sysctl_sched_use_walt_task_util;
extern unsigned int sysctl_sched_boost;
+extern unsigned int sysctl_sched_group_upmigrate_pct;
+extern unsigned int sysctl_sched_group_downmigrate_pct;
#endif
#ifdef CONFIG_SCHED_HMP
@@ -53,8 +57,6 @@ extern unsigned int sysctl_sched_spill_nr_run;
extern unsigned int sysctl_sched_spill_load_pct;
extern unsigned int sysctl_sched_upmigrate_pct;
extern unsigned int sysctl_sched_downmigrate_pct;
-extern unsigned int sysctl_sched_group_upmigrate_pct;
-extern unsigned int sysctl_sched_group_downmigrate_pct;
extern unsigned int sysctl_early_detection_duration;
extern unsigned int sysctl_sched_small_wakee_task_load_pct;
extern unsigned int sysctl_sched_big_waker_task_load_pct;
@@ -67,6 +69,14 @@ extern unsigned int sysctl_sched_freq_aggregate_threshold_pct;
extern unsigned int sysctl_sched_prefer_sync_wakee_to_waker;
extern unsigned int sysctl_sched_short_burst;
extern unsigned int sysctl_sched_short_sleep;
+
+#elif defined(CONFIG_SCHED_WALT)
+
+extern int
+walt_proc_update_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos);
+
#endif /* CONFIG_SCHED_HMP */
enum sched_tunable_scaling {
@@ -148,6 +158,10 @@ extern int sched_rt_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos);
+extern int sched_updown_migrate_handler(struct ctl_table *table,
+ int write, void __user *buffer,
+ size_t *lenp, loff_t *ppos);
+
extern int sysctl_numa_balancing(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos);
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index adf4e51..8f84c84 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -59,6 +59,9 @@ extern int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int,
void __user *, size_t *, loff_t *);
extern int proc_do_large_bitmap(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
+extern int proc_douintvec_capacity(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos);
/*
* Register a set of sysctl names by calling register_sysctl_table
diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h
index e880054..2c225d4 100644
--- a/include/linux/timekeeper_internal.h
+++ b/include/linux/timekeeper_internal.h
@@ -29,7 +29,6 @@
*/
struct tk_read_base {
struct clocksource *clock;
- cycle_t (*read)(struct clocksource *cs);
cycle_t mask;
cycle_t cycle_last;
u32 mult;
@@ -58,7 +57,7 @@ struct tk_read_base {
* interval.
* @xtime_remainder: Shifted nano seconds left over when rounding
* @cycle_interval
- * @raw_interval: Raw nano seconds accumulated per NTP interval.
+ * @raw_interval: Shifted raw nano seconds accumulated per NTP interval.
* @ntp_error: Difference between accumulated time and NTP time in ntp
* shifted nano seconds.
* @ntp_error_shift: Shift conversion between clock shifted nano seconds and
@@ -100,7 +99,7 @@ struct timekeeper {
cycle_t cycle_interval;
u64 xtime_interval;
s64 xtime_remainder;
- u32 raw_interval;
+ u64 raw_interval;
/* The ntp_tick_length() value currently being used.
* This cached copy ensures we consistently apply the tick
* length for an entire tick, as ntp_tick_length may change
diff --git a/include/net/cnss_utils.h b/include/net/cnss_utils.h
new file mode 100644
index 0000000..6ff0fd0
--- /dev/null
+++ b/include/net/cnss_utils.h
@@ -0,0 +1,40 @@
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CNSS_UTILS_H_
+#define _CNSS_UTILS_H_
+
+enum cnss_utils_cc_src {
+ CNSS_UTILS_SOURCE_CORE,
+ CNSS_UTILS_SOURCE_11D,
+ CNSS_UTILS_SOURCE_USER
+};
+
+extern int cnss_utils_set_wlan_unsafe_channel(struct device *dev,
+ u16 *unsafe_ch_list,
+ u16 ch_count);
+extern int cnss_utils_get_wlan_unsafe_channel(struct device *dev,
+ u16 *unsafe_ch_list,
+ u16 *ch_count, u16 buf_len);
+extern int cnss_utils_wlan_set_dfs_nol(struct device *dev,
+ const void *info, u16 info_len);
+extern int cnss_utils_wlan_get_dfs_nol(struct device *dev,
+ void *info, u16 info_len);
+extern int cnss_utils_get_driver_load_cnt(struct device *dev);
+extern void cnss_utils_increment_driver_load_cnt(struct device *dev);
+extern int cnss_utils_set_wlan_mac_address(const u8 *in, uint32_t len);
+extern u8 *cnss_utils_get_wlan_mac_address(struct device *dev, uint32_t *num);
+extern void cnss_utils_set_cc_source(struct device *dev,
+ enum cnss_utils_cc_src cc_source);
+extern enum cnss_utils_cc_src cnss_utils_get_cc_source(struct device *dev);
+
+#endif
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 91afb4a..615ce0a 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -776,6 +776,11 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
{
u32 hash;
+ /* @flowlabel may include more than a flow label, eg, the traffic class.
+ * Here we want only the flow label value.
+ */
+ flowlabel &= IPV6_FLOWLABEL_MASK;
+
if (flowlabel ||
net->ipv6.sysctl.auto_flowlabels == IP6_AUTO_FLOW_LABEL_OFF ||
(!autolabel &&
diff --git a/include/soc/qcom/icnss.h b/include/soc/qcom/icnss.h
index 549cb84..3527c35 100644
--- a/include/soc/qcom/icnss.h
+++ b/include/soc/qcom/icnss.h
@@ -77,13 +77,6 @@ struct icnss_wlan_enable_cfg {
struct icnss_shadow_reg_cfg *shadow_reg_cfg;
};
-/* MSA Memory Regions Information */
-struct icnss_mem_region_info {
- uint64_t reg_addr;
- uint32_t size;
- uint8_t secure_flag;
-};
-
/* driver modes */
enum icnss_driver_mode {
ICNSS_MISSION,
diff --git a/include/trace/events/skb.h b/include/trace/events/skb.h
index 0c68ae22..33dfa76 100644
--- a/include/trace/events/skb.h
+++ b/include/trace/events/skb.h
@@ -50,6 +50,33 @@ TRACE_EVENT(consume_skb,
TP_printk("skbaddr=%p", __entry->skbaddr)
);
+TRACE_EVENT(print_skb_gso,
+
+ TP_PROTO(struct sk_buff *skb, __be16 src, __be16 dest),
+
+ TP_ARGS(skb, src, dest),
+
+ TP_STRUCT__entry(
+ __field(void *, skbaddr)
+ __field(int, len)
+ __field(int, data_len)
+ __field(__be16, src)
+ __field(__be16, dest)
+ ),
+
+ TP_fast_assign(
+ __entry->skbaddr = skb;
+ __entry->len = skb->len;
+ __entry->data_len = skb->data_len;
+ __entry->src = src;
+ __entry->dest = dest;
+ ),
+
+ TP_printk("GSO: skbaddr=%pK, len=%d, data_len=%d, src=%u, dest=%u",
+ __entry->skbaddr, __entry->len, __entry->data_len,
+ be16_to_cpu(__entry->src), be16_to_cpu(__entry->dest))
+);
+
TRACE_EVENT(skb_copy_datagram_iovec,
TP_PROTO(const struct sk_buff *skb, int len),
diff --git a/include/uapi/drm/sde_drm.h b/include/uapi/drm/sde_drm.h
index 44b42a6..439a925 100644
--- a/include/uapi/drm/sde_drm.h
+++ b/include/uapi/drm/sde_drm.h
@@ -67,6 +67,48 @@
#define SDE_DRM_BITMASK_COUNT 64
/**
+ * Framebuffer modes for "fb_translation_mode" PLANE property
+ *
+ * @SDE_DRM_FB_NON_SEC: IOMMU configuration for this framebuffer mode
+ * is non-secure domain and requires
+ * both stage I and stage II translations when
+ * this buffer is accessed by the display HW.
+ * This is the default mode of all frambuffers.
+ * @SDE_DRM_FB_SEC: IOMMU configuration for this framebuffer mode
+ * is secure domain and requires
+ * both stage I and stage II translations when
+ * this buffer is accessed by the display HW.
+ * @SDE_DRM_FB_NON_SEC_DIR_TRANS: IOMMU configuration for this framebuffer mode
+ * is non-secure domain and requires
+ * only stage II translation when
+ * this buffer is accessed by the display HW.
+ * @SDE_DRM_FB_SEC_DIR_TRANS: IOMMU configuration for this framebuffer mode
+ * is secure domain and requires
+ * only stage II translation when
+ * this buffer is accessed by the display HW.
+ */
+
+#define SDE_DRM_FB_NON_SEC 0
+#define SDE_DRM_FB_SEC 1
+#define SDE_DRM_FB_NON_SEC_DIR_TRANS 2
+#define SDE_DRM_FB_SEC_DIR_TRANS 3
+
+/**
+ * Secure levels for "security_level" CRTC property.
+ * CRTC property which specifies what plane types
+ * can be attached to this CRTC. Plane component
+ * derives the plane type based on the FB_MODE.
+ * @ SDE_DRM_SEC_NON_SEC: Both Secure and non-secure plane types can be
+ * attached to this CRTC. This is the default state of
+ * the CRTC.
+ * @ SDE_DRM_SEC_ONLY: Only secure planes can be added to this CRTC. If a
+ * CRTC is instructed to be in this mode it follows the
+ * platform dependent restrictions.
+ */
+#define SDE_DRM_SEC_NON_SEC 0
+#define SDE_DRM_SEC_ONLY 1
+
+/**
* struct sde_drm_pix_ext_v1 - version 1 of pixel ext structure
* @num_ext_pxls_lr: Number of total horizontal pixels
* @num_ext_pxls_tb: Number of total vertical lines
diff --git a/include/uapi/linux/msm_ipa.h b/include/uapi/linux/msm_ipa.h
index 5410ec9..939ad08 100644
--- a/include/uapi/linux/msm_ipa.h
+++ b/include/uapi/linux/msm_ipa.h
@@ -69,8 +69,12 @@
#define IPA_IOCTL_ADD_FLT_RULE_AFTER 44
#define IPA_IOCTL_GET_HW_VERSION 45
#define IPA_IOCTL_ADD_RT_RULE_EXT 46
-#define IPA_IOCTL_NAT_MODIFY_PDN 47
-#define IPA_IOCTL_MAX 48
+#define IPA_IOCTL_ADD_VLAN_IFACE 47
+#define IPA_IOCTL_DEL_VLAN_IFACE 48
+#define IPA_IOCTL_ADD_L2TP_VLAN_MAPPING 49
+#define IPA_IOCTL_DEL_L2TP_VLAN_MAPPING 50
+#define IPA_IOCTL_NAT_MODIFY_PDN 51
+#define IPA_IOCTL_MAX 52
/**
* max size of the header to be inserted
@@ -127,6 +131,7 @@
#define IPA_FLT_MAC_SRC_ADDR_802_3 (1ul << 19)
#define IPA_FLT_MAC_DST_ADDR_802_3 (1ul << 20)
#define IPA_FLT_MAC_ETHER_TYPE (1ul << 21)
+#define IPA_FLT_MAC_DST_ADDR_L2TP (1ul << 22)
/**
* maximal number of NAT PDNs in the PDN config table
@@ -454,7 +459,16 @@ enum ipa_ssr_event {
IPA_SSR_EVENT_MAX
};
-#define IPA_EVENT_MAX_NUM ((int)IPA_SSR_EVENT_MAX)
+enum ipa_vlan_l2tp_event {
+ ADD_VLAN_IFACE = IPA_SSR_EVENT_MAX,
+ DEL_VLAN_IFACE,
+ ADD_L2TP_VLAN_MAPPING,
+ DEL_L2TP_VLAN_MAPPING,
+ IPA_VLAN_L2TP_EVENT_MAX,
+};
+
+#define IPA_EVENT_MAX_NUM (IPA_VLAN_L2TP_EVENT_MAX)
+#define IPA_EVENT_MAX ((int)IPA_EVENT_MAX_NUM)
/**
* enum ipa_rm_resource_name - IPA RM clients identification names
@@ -1488,6 +1502,30 @@ struct ipa_ioc_nat_pdn_entry {
};
/**
+ * struct ipa_ioc_vlan_iface_info - add vlan interface
+ * @name: interface name
+ * @vlan_id: VLAN ID
+ */
+struct ipa_ioc_vlan_iface_info {
+ char name[IPA_RESOURCE_NAME_MAX];
+ uint8_t vlan_id;
+};
+
+/**
+ * struct ipa_ioc_l2tp_vlan_mapping_info - l2tp->vlan mapping info
+ * @iptype: l2tp tunnel IP type
+ * @l2tp_iface_name: l2tp interface name
+ * @l2tp_session_id: l2tp session id
+ * @vlan_iface_name: vlan interface name
+ */
+struct ipa_ioc_l2tp_vlan_mapping_info {
+ enum ipa_ip_type iptype;
+ char l2tp_iface_name[IPA_RESOURCE_NAME_MAX];
+ uint8_t l2tp_session_id;
+ char vlan_iface_name[IPA_RESOURCE_NAME_MAX];
+};
+
+/**
* struct ipa_msg_meta - Format of the message meta-data.
* @msg_type: the type of the message
* @rsvd: reserved bits for future use.
@@ -1764,6 +1802,21 @@ enum ipacm_client_enum {
IPA_IOCTL_GET_HW_VERSION, \
enum ipa_hw_type *)
+#define IPA_IOC_ADD_VLAN_IFACE _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_ADD_VLAN_IFACE, \
+ struct ipa_ioc_vlan_iface_info *)
+
+#define IPA_IOC_DEL_VLAN_IFACE _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_DEL_VLAN_IFACE, \
+ struct ipa_ioc_vlan_iface_info *)
+
+#define IPA_IOC_ADD_L2TP_VLAN_MAPPING _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_ADD_L2TP_VLAN_MAPPING, \
+ struct ipa_ioc_l2tp_vlan_mapping_info *)
+
+#define IPA_IOC_DEL_L2TP_VLAN_MAPPING _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_DEL_L2TP_VLAN_MAPPING, \
+ struct ipa_ioc_l2tp_vlan_mapping_info *)
/*
* unique magic number of the Tethering bridge ioctls
*/
diff --git a/include/uapi/linux/netfilter/nf_log.h b/include/uapi/linux/netfilter/nf_log.h
index 8be21e0..d0b5fa9 100644
--- a/include/uapi/linux/netfilter/nf_log.h
+++ b/include/uapi/linux/netfilter/nf_log.h
@@ -9,4 +9,6 @@
#define NF_LOG_MACDECODE 0x20 /* Decode MAC header */
#define NF_LOG_MASK 0x2f
+#define NF_LOG_PREFIXLEN 128
+
#endif /* _NETFILTER_NF_LOG_H */
diff --git a/include/uapi/linux/usb/ch11.h b/include/uapi/linux/usb/ch11.h
index 361297e..576c704e 100644
--- a/include/uapi/linux/usb/ch11.h
+++ b/include/uapi/linux/usb/ch11.h
@@ -22,6 +22,9 @@
*/
#define USB_MAXCHILDREN 31
+/* See USB 3.1 spec Table 10-5 */
+#define USB_SS_MAXPORTS 15
+
/*
* Hub request types
*/
diff --git a/kernel/Makefile b/kernel/Makefile
index eb26e12c..314e7d6 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -84,6 +84,7 @@
obj-$(CONFIG_KGDB) += debug/
obj-$(CONFIG_DETECT_HUNG_TASK) += hung_task.o
obj-$(CONFIG_LOCKUP_DETECTOR) += watchdog.o
+obj-$(CONFIG_HARDLOCKUP_DETECTOR) += watchdog_hld.o
obj-$(CONFIG_SECCOMP) += seccomp.o
obj-$(CONFIG_RELAY) += relay.o
obj-$(CONFIG_SYSCTL) += utsname_sysctl.o
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index fe158bd..44c17f4 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -2164,7 +2164,6 @@ static bool may_access_skb(enum bpf_prog_type type)
case BPF_PROG_TYPE_SOCKET_FILTER:
case BPF_PROG_TYPE_SCHED_CLS:
case BPF_PROG_TYPE_SCHED_ACT:
- case BPF_PROG_TYPE_CGROUP_SKB:
return true;
default:
return false;
diff --git a/kernel/configs/android-base-arm64.cfg b/kernel/configs/android-base-arm64.cfg
new file mode 100644
index 0000000..43f23d6
--- /dev/null
+++ b/kernel/configs/android-base-arm64.cfg
@@ -0,0 +1,5 @@
+# KEEP ALPHABETICALLY SORTED
+CONFIG_ARMV8_DEPRECATED=y
+CONFIG_CP15_BARRIER_EMULATION=y
+CONFIG_SETEND_EMULATION=y
+CONFIG_SWP_EMULATION=y
diff --git a/kernel/configs/android-base.config b/kernel/configs/android-base.config
index fb6017e..301e1a6 100644
--- a/kernel/configs/android-base.config
+++ b/kernel/configs/android-base.config
@@ -12,7 +12,6 @@
CONFIG_ANDROID_BINDER_DEVICES=binder,hwbinder,vndbinder
CONFIG_ANDROID_BINDER_IPC=y
CONFIG_ANDROID_LOW_MEMORY_KILLER=y
-CONFIG_ARMV8_DEPRECATED=y
CONFIG_ASHMEM=y
CONFIG_AUDIT=y
CONFIG_BLK_DEV_INITRD=y
@@ -22,7 +21,6 @@
CONFIG_CGROUP_FREEZER=y
CONFIG_CGROUP_SCHED=y
CONFIG_CGROUP_BPF=y
-CONFIG_CP15_BARRIER_EMULATION=y
CONFIG_DEFAULT_SECURITY_SELINUX=y
CONFIG_EMBEDDED=y
CONFIG_FB=y
@@ -156,9 +154,7 @@
CONFIG_SECURITY_NETWORK=y
CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
CONFIG_SECURITY_SELINUX=y
-CONFIG_SETEND_EMULATION=y
CONFIG_STAGING=y
-CONFIG_SWP_EMULATION=y
CONFIG_SYNC=y
CONFIG_TUN=y
CONFIG_UID_SYS_STATS=y
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 78b72d5..3577ec6a 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -886,6 +886,9 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
if (!cpu_present(cpu))
return -EINVAL;
+ if (!tasks_frozen && !cpu_isolated(cpu) && num_online_uniso_cpus() == 1)
+ return -EBUSY;
+
cpu_hotplug_begin();
cpuhp_tasks_frozen = tasks_frozen;
diff --git a/kernel/irq/cpuhotplug.c b/kernel/irq/cpuhotplug.c
index dac3724..007482b 100644
--- a/kernel/irq/cpuhotplug.c
+++ b/kernel/irq/cpuhotplug.c
@@ -36,10 +36,32 @@ static bool migrate_one_irq(struct irq_desc *desc)
affinity = &available_cpus;
if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
+ /*
+ * The order of preference for selecting a fallback CPU is
+ *
+ * (1) online and un-isolated CPU from default affinity
+ * (2) online and un-isolated CPU
+ * (3) online CPU
+ */
cpumask_andnot(&available_cpus, cpu_online_mask,
cpu_isolated_mask);
- if (cpumask_empty(affinity))
+ if (cpumask_intersects(&available_cpus, irq_default_affinity))
+ cpumask_and(&available_cpus, &available_cpus,
+ irq_default_affinity);
+ else if (cpumask_empty(&available_cpus))
affinity = cpu_online_mask;
+
+ /*
+ * We are overriding the affinity with all online and
+ * un-isolated cpus. irq_set_affinity_locked() call
+ * below notify this mask to PM QOS affinity listener.
+ * That results in applying the CPU_DMA_LATENCY QOS
+ * to all the CPUs specified in the mask. But the low
+ * level irqchip driver sets the affinity of an irq
+ * to only one CPU. So pick only one CPU from the
+ * prepared mask while overriding the user affinity.
+ */
+ affinity = cpumask_of(cpumask_any(affinity));
ret = true;
}
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 4f64490..c1195eb 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -1311,8 +1311,10 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
ret = __irq_set_trigger(desc,
new->flags & IRQF_TRIGGER_MASK);
- if (ret)
+ if (ret) {
+ irq_release_resources(desc);
goto out_mask;
+ }
}
desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index feaa813..88a02e3 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -114,6 +114,11 @@ static ssize_t write_irq_affinity(int type, struct file *file,
goto free_cpumask;
}
+ if (cpumask_subset(new_value, cpu_isolated_mask)) {
+ err = -EINVAL;
+ goto free_cpumask;
+ }
+
/*
* Do not allow disabling IRQs completely - it's a too easy
* way to make the system unusable accidentally :-) At least
diff --git a/kernel/power/qos.c b/kernel/power/qos.c
index 0854263..12fe782 100644
--- a/kernel/power/qos.c
+++ b/kernel/power/qos.c
@@ -284,6 +284,9 @@ static inline void pm_qos_set_value_for_cpus(struct pm_qos_constraints *c)
if (req->node.prio > qos_val[cpu])
qos_val[cpu] = req->node.prio;
break;
+ case PM_QOS_SUM:
+ qos_val[cpu] += req->node.prio;
+ break;
default:
BUG();
break;
diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
index 27a7574..3d12ce8 100644
--- a/kernel/sched/Makefile
+++ b/kernel/sched/Makefile
@@ -17,9 +17,9 @@
obj-y += core.o loadavg.o clock.o cputime.o
obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o
-obj-y += wait.o swait.o completion.o idle.o sched_avg.o
+obj-y += wait.o swait.o completion.o idle.o
obj-$(CONFIG_SCHED_HMP) += hmp.o boost.o
-obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o energy.o
+obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o energy.o sched_avg.o
obj-$(CONFIG_SCHED_WALT) += walt.o boost.o
obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o
obj-$(CONFIG_SCHEDSTATS) += stats.o
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index de1b3b7..c2433b3 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5787,7 +5787,7 @@ void idle_task_exit(void)
BUG_ON(cpu_online(smp_processor_id()));
if (mm != &init_mm) {
- switch_mm_irqs_off(mm, &init_mm, current);
+ switch_mm(mm, &init_mm, current);
finish_arch_post_lock_switch();
}
mmdrop(mm);
@@ -9137,6 +9137,32 @@ int sched_rr_handler(struct ctl_table *table, int write,
return ret;
}
+#ifdef CONFIG_PROC_SYSCTL
+int sched_updown_migrate_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos)
+{
+ int ret;
+ unsigned int *data = (unsigned int *)table->data;
+ unsigned int old_val;
+ static DEFINE_MUTEX(mutex);
+
+ mutex_lock(&mutex);
+ old_val = *data;
+
+ ret = proc_douintvec_capacity(table, write, buffer, lenp, ppos);
+
+ if (!ret && write &&
+ sysctl_sched_capacity_margin > sysctl_sched_capacity_margin_down) {
+ ret = -EINVAL;
+ *data = old_val;
+ }
+ mutex_unlock(&mutex);
+
+ return ret;
+}
+#endif
+
#ifdef CONFIG_CGROUP_SCHED
inline struct task_group *css_tg(struct cgroup_subsys_state *css)
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 0a0e9aa..e756b83 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -36,6 +36,10 @@ struct sugov_policy {
raw_spinlock_t update_lock; /* For shared policies */
u64 last_freq_update_time;
s64 freq_update_delay_ns;
+ u64 last_ws;
+ u64 curr_cycles;
+ u64 last_cyc_update_time;
+ unsigned long avg_cap;
unsigned int next_freq;
unsigned int cached_raw_freq;
unsigned long hispeed_util;
@@ -199,19 +203,63 @@ static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, unsigned long *util,
sg_cpu->iowait_boost >>= 1;
}
+static unsigned long freq_to_util(struct sugov_policy *sg_policy,
+ unsigned int freq)
+{
+ return mult_frac(sg_policy->max, freq,
+ sg_policy->policy->cpuinfo.max_freq);
+}
+
+#define KHZ 1000
+static void sugov_track_cycles(struct sugov_policy *sg_policy,
+ unsigned int prev_freq,
+ u64 upto)
+{
+ u64 delta_ns, cycles;
+ /* Track cycles in current window */
+ delta_ns = upto - sg_policy->last_cyc_update_time;
+ cycles = (prev_freq * delta_ns) / (NSEC_PER_SEC / KHZ);
+ sg_policy->curr_cycles += cycles;
+ sg_policy->last_cyc_update_time = upto;
+}
+
+static void sugov_calc_avg_cap(struct sugov_policy *sg_policy, u64 curr_ws,
+ unsigned int prev_freq)
+{
+ u64 last_ws = sg_policy->last_ws;
+ unsigned int avg_freq;
+
+ WARN_ON(curr_ws < last_ws);
+ if (curr_ws <= last_ws)
+ return;
+
+ /* If we skipped some windows */
+ if (curr_ws > (last_ws + sched_ravg_window)) {
+ avg_freq = prev_freq;
+ /* Reset tracking history */
+ sg_policy->last_cyc_update_time = curr_ws;
+ } else {
+ sugov_track_cycles(sg_policy, prev_freq, curr_ws);
+ avg_freq = sg_policy->curr_cycles;
+ avg_freq /= sched_ravg_window / (NSEC_PER_SEC / KHZ);
+ }
+ sg_policy->avg_cap = freq_to_util(sg_policy, avg_freq);
+ sg_policy->curr_cycles = 0;
+ sg_policy->last_ws = curr_ws;
+}
+
#define NL_RATIO 75
#define HISPEED_LOAD 90
static void sugov_walt_adjust(struct sugov_cpu *sg_cpu, unsigned long *util,
unsigned long *max)
{
struct sugov_policy *sg_policy = sg_cpu->sg_policy;
- unsigned long cap_cur = capacity_curr_of(sg_cpu->cpu);
bool is_migration = sg_cpu->flags & SCHED_CPUFREQ_INTERCLUSTER_MIG;
unsigned long nl = sg_cpu->walt_load.nl;
unsigned long cpu_util = sg_cpu->util;
bool is_hiload;
- is_hiload = (cpu_util >= mult_frac(cap_cur,
+ is_hiload = (cpu_util >= mult_frac(sg_policy->avg_cap,
HISPEED_LOAD,
100));
@@ -247,6 +295,8 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
} else {
sugov_get_util(&util, &max, sg_cpu->cpu);
sugov_iowait_boost(sg_cpu, &util, &max);
+ sugov_calc_avg_cap(sg_policy, sg_cpu->walt_load.ws,
+ sg_policy->policy->cur);
sugov_walt_adjust(sg_cpu, &util, &max);
next_f = get_next_freq(sg_policy, util, max);
}
@@ -322,12 +372,11 @@ static void sugov_update_shared(struct update_util_data *hook, u64 time,
raw_spin_lock(&sg_policy->update_lock);
if (sg_policy->max != max) {
- hs_util = mult_frac(max,
- sg_policy->tunables->hispeed_freq,
- sg_policy->policy->cpuinfo.max_freq);
+ sg_policy->max = max;
+ hs_util = freq_to_util(sg_policy,
+ sg_policy->tunables->hispeed_freq);
hs_util = mult_frac(hs_util, TARGET_LOAD, 100);
sg_policy->hispeed_util = hs_util;
- sg_policy->max = max;
}
sg_cpu->util = util;
@@ -337,6 +386,9 @@ static void sugov_update_shared(struct update_util_data *hook, u64 time,
sugov_set_iowait_boost(sg_cpu, time, flags);
sg_cpu->last_update = time;
+ sugov_calc_avg_cap(sg_policy, sg_cpu->walt_load.ws,
+ sg_policy->policy->cur);
+
trace_sugov_util_update(sg_cpu->cpu, sg_cpu->util, max,
sg_cpu->walt_load.nl,
sg_cpu->walt_load.pl, flags);
@@ -354,6 +406,10 @@ static void sugov_work(struct kthread_work *work)
struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work);
mutex_lock(&sg_policy->work_lock);
+ raw_spin_lock(&sg_policy->update_lock);
+ sugov_track_cycles(sg_policy, sg_policy->policy->cur,
+ sched_ktime_clock());
+ raw_spin_unlock(&sg_policy->update_lock);
__cpufreq_driver_target(sg_policy->policy, sg_policy->next_freq,
CPUFREQ_RELATION_L);
mutex_unlock(&sg_policy->work_lock);
@@ -438,11 +494,12 @@ static ssize_t hispeed_freq_store(struct gov_attr_set *attr_set,
tunables->hispeed_freq = val;
list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook) {
- hs_util = mult_frac(sg_policy->max,
- sg_policy->tunables->hispeed_freq,
- sg_policy->policy->cpuinfo.max_freq);
+ raw_spin_lock(&sg_policy->update_lock);
+ hs_util = freq_to_util(sg_policy,
+ sg_policy->tunables->hispeed_freq);
hs_util = mult_frac(hs_util, TARGET_LOAD, 100);
sg_policy->hispeed_util = hs_util;
+ raw_spin_unlock(&sg_policy->update_lock);
}
return count;
@@ -725,6 +782,10 @@ static void sugov_limits(struct cpufreq_policy *policy)
if (!policy->fast_switch_enabled) {
mutex_lock(&sg_policy->work_lock);
+ raw_spin_lock(&sg_policy->update_lock);
+ sugov_track_cycles(sg_policy, sg_policy->policy->cur,
+ sched_ktime_clock());
+ raw_spin_unlock(&sg_policy->update_lock);
cpufreq_policy_apply_limits(policy);
mutex_unlock(&sg_policy->work_lock);
}
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 4d7c054..d06ac7d 100755
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -221,8 +221,8 @@ unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
* The margin used when comparing utilization with CPU capacity:
* util * 1024 < capacity * margin
*/
-unsigned int capacity_margin = 1078; /* ~5% margin */
-unsigned int capacity_margin_down = 1205; /* ~15% margin */
+unsigned int sysctl_sched_capacity_margin = 1078; /* ~5% margin */
+unsigned int sysctl_sched_capacity_margin_down = 1205; /* ~15% margin */
static inline void update_load_add(struct load_weight *lw, unsigned long inc)
{
@@ -5918,9 +5918,9 @@ static inline bool __task_fits(struct task_struct *p, int cpu, int util)
util += boosted_task_util(p);
if (capacity_orig_of(task_cpu(p)) > capacity_orig_of(cpu))
- margin = capacity_margin_down;
+ margin = sysctl_sched_capacity_margin_down;
else
- margin = capacity_margin;
+ margin = sysctl_sched_capacity_margin;
return (capacity_orig_of(cpu) * 1024) > (util * margin);
}
@@ -5948,7 +5948,7 @@ static inline bool task_fits_spare(struct task_struct *p, int cpu)
static bool __cpu_overutilized(int cpu, int delta)
{
return (capacity_orig_of(cpu) * 1024) <
- ((cpu_util(cpu) + delta) * capacity_margin);
+ ((cpu_util(cpu) + delta) * sysctl_sched_capacity_margin);
}
bool cpu_overutilized(int cpu)
@@ -6085,10 +6085,14 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
struct sched_group *fit_group = NULL, *spare_group = NULL;
unsigned long min_load = ULONG_MAX, this_load = 0;
unsigned long fit_capacity = ULONG_MAX;
- unsigned long max_spare_capacity = capacity_margin - SCHED_CAPACITY_SCALE;
+ unsigned long max_spare_capacity;
+
int load_idx = sd->forkexec_idx;
int imbalance = 100 + (sd->imbalance_pct-100)/2;
+ max_spare_capacity = sysctl_sched_capacity_margin -
+ SCHED_CAPACITY_SCALE;
+
if (sd_flag & SD_BALANCE_WAKE)
load_idx = sd->wake_idx;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 566e103..2524954 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1779,6 +1779,7 @@ struct sched_walt_cpu_load {
unsigned long prev_window_util;
unsigned long nl;
unsigned long pl;
+ u64 ws;
};
static inline unsigned long cpu_util_cum(int cpu, int delta)
@@ -1828,6 +1829,7 @@ cpu_util_freq(int cpu, struct sched_walt_cpu_load *walt_load)
walt_load->prev_window_util = util;
walt_load->nl = nl;
walt_load->pl = 0;
+ walt_load->ws = rq->window_start;
}
}
#endif
@@ -2207,6 +2209,15 @@ static inline u64 irq_time_read(int cpu)
}
#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
+#ifdef CONFIG_SCHED_WALT
+u64 sched_ktime_clock(void);
+#else /* CONFIG_SCHED_WALT */
+static inline u64 sched_ktime_clock(void)
+{
+ return 0;
+}
+#endif /* CONFIG_SCHED_WALT */
+
#ifdef CONFIG_CPU_FREQ
DECLARE_PER_CPU(struct update_util_data *, cpufreq_update_util_data);
@@ -2239,8 +2250,10 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
#ifdef CONFIG_SCHED_WALT
/*
* Skip if we've already reported, but not if this is an inter-cluster
- * migration
+ * migration. Also only allow WALT update sites.
*/
+ if (!(flags & SCHED_CPUFREQ_WALT))
+ return;
if (!sched_disable_window_stats &&
(rq->load_reported_window == rq->window_start) &&
!(flags & SCHED_CPUFREQ_INTERCLUSTER_MIG))
@@ -2251,7 +2264,7 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data,
cpu_of(rq)));
if (data)
- data->func(data, sched_clock(), flags);
+ data->func(data, sched_ktime_clock(), flags);
}
static inline void cpufreq_update_this_cpu(struct rq *rq, unsigned int flags)
@@ -2336,7 +2349,6 @@ extern unsigned int __read_mostly sched_downmigrate;
extern unsigned int __read_mostly sysctl_sched_spill_nr_run;
extern unsigned int __read_mostly sched_load_granule;
-extern u64 sched_ktime_clock(void);
extern int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb);
extern void reset_cpu_hmp_stats(int cpu, int reset_cra);
extern int update_preferred_cluster(struct related_thread_group *grp,
diff --git a/kernel/sched/sched_avg.c b/kernel/sched/sched_avg.c
index 7f86c0b..4238924 100644
--- a/kernel/sched/sched_avg.c
+++ b/kernel/sched/sched_avg.c
@@ -162,16 +162,14 @@ EXPORT_SYMBOL(sched_update_nr_prod);
unsigned int sched_get_cpu_util(int cpu)
{
struct rq *rq = cpu_rq(cpu);
- u64 util = 0;
- unsigned long capacity = SCHED_CAPACITY_SCALE, flags;
+ u64 util;
+ unsigned long capacity, flags;
unsigned int busy;
raw_spin_lock_irqsave(&rq->lock, flags);
-#ifdef CONFIG_SMP
util = rq->cfs.avg.util_avg;
capacity = capacity_orig_of(cpu);
-#endif
#ifdef CONFIG_SCHED_WALT
if (!walt_disabled && sysctl_sched_use_walt_cpu_util) {
diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c
index 50f889b..ae45283 100644
--- a/kernel/sched/walt.c
+++ b/kernel/sched/walt.c
@@ -871,8 +871,10 @@ void fixup_busy_time(struct task_struct *p, int new_cpu)
migrate_top_tasks(p, src_rq, dest_rq);
if (!same_freq_domain(new_cpu, task_cpu(p))) {
- cpufreq_update_util(dest_rq, SCHED_CPUFREQ_INTERCLUSTER_MIG);
- cpufreq_update_util(src_rq, SCHED_CPUFREQ_INTERCLUSTER_MIG);
+ cpufreq_update_util(dest_rq, SCHED_CPUFREQ_INTERCLUSTER_MIG |
+ SCHED_CPUFREQ_WALT);
+ cpufreq_update_util(src_rq, SCHED_CPUFREQ_INTERCLUSTER_MIG |
+ SCHED_CPUFREQ_WALT);
}
if (p == src_rq->ed_task) {
@@ -3040,10 +3042,40 @@ void walt_irq_work(struct irq_work *irq_work)
for_each_sched_cluster(cluster)
for_each_cpu(cpu, &cluster->cpus)
- cpufreq_update_util(cpu_rq(cpu), 0);
+ cpufreq_update_util(cpu_rq(cpu), SCHED_CPUFREQ_WALT);
for_each_cpu(cpu, cpu_possible_mask)
raw_spin_unlock(&cpu_rq(cpu)->lock);
core_ctl_check(this_rq()->window_start);
}
+
+#ifndef CONFIG_SCHED_HMP
+int walt_proc_update_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos)
+{
+ int ret;
+ unsigned int *data = (unsigned int *)table->data;
+ static DEFINE_MUTEX(mutex);
+
+ mutex_lock(&mutex);
+ ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+ if (ret || !write) {
+ mutex_unlock(&mutex);
+ return ret;
+ }
+
+ if (data == &sysctl_sched_group_upmigrate_pct)
+ sched_group_upmigrate =
+ pct_to_real(sysctl_sched_group_upmigrate_pct);
+ else if (data == &sysctl_sched_group_downmigrate_pct)
+ sched_group_downmigrate =
+ pct_to_real(sysctl_sched_group_downmigrate_pct);
+ else
+ ret = -EINVAL;
+ mutex_unlock(&mutex);
+
+ return ret;
+}
+#endif
diff --git a/kernel/sched/walt.h b/kernel/sched/walt.h
index f153332..887933f 100644
--- a/kernel/sched/walt.h
+++ b/kernel/sched/walt.h
@@ -194,8 +194,6 @@ static inline int exiting_task(struct task_struct *p)
return (p->ravg.sum_history[0] == EXITING_TASK_MARKER);
}
-extern u64 sched_ktime_clock(void);
-
static inline struct sched_cluster *cpu_cluster(int cpu)
{
return cpu_rq(cpu)->cluster;
@@ -335,11 +333,6 @@ static inline void mark_task_starting(struct task_struct *p) { }
static inline void set_window_start(struct rq *rq) { }
static inline int sched_cpu_high_irqload(int cpu) { return 0; }
-static inline u64 sched_ktime_clock(void)
-{
- return 0;
-}
-
static inline void sched_account_irqstart(int cpu, struct task_struct *curr,
u64 wallclock)
{
diff --git a/kernel/signal.c b/kernel/signal.c
index 0b14157..deb04d5 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -503,7 +503,8 @@ int unhandled_signal(struct task_struct *tsk, int sig)
return !tsk->ptrace;
}
-static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
+static void collect_signal(int sig, struct sigpending *list, siginfo_t *info,
+ bool *resched_timer)
{
struct sigqueue *q, *first = NULL;
@@ -525,6 +526,12 @@ static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
still_pending:
list_del_init(&first->list);
copy_siginfo(info, &first->info);
+
+ *resched_timer =
+ (first->flags & SIGQUEUE_PREALLOC) &&
+ (info->si_code == SI_TIMER) &&
+ (info->si_sys_private);
+
__sigqueue_free(first);
} else {
/*
@@ -541,12 +548,12 @@ static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
}
static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
- siginfo_t *info)
+ siginfo_t *info, bool *resched_timer)
{
int sig = next_signal(pending, mask);
if (sig)
- collect_signal(sig, pending, info);
+ collect_signal(sig, pending, info, resched_timer);
return sig;
}
@@ -558,15 +565,16 @@ static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
*/
int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
{
+ bool resched_timer = false;
int signr;
/* We only dequeue private signals from ourselves, we don't let
* signalfd steal them
*/
- signr = __dequeue_signal(&tsk->pending, mask, info);
+ signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
if (!signr) {
signr = __dequeue_signal(&tsk->signal->shared_pending,
- mask, info);
+ mask, info, &resched_timer);
/*
* itimer signal ?
*
@@ -611,7 +619,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
*/
current->jobctl |= JOBCTL_STOP_DEQUEUED;
}
- if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
+ if (resched_timer) {
/*
* Release the siglock to ensure proper locking order
* of timer locks outside of siglocks. Note, we leave
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 534431a..b076cba 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -300,6 +300,31 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
+ {
+ .procname = "sched_group_upmigrate",
+ .data = &sysctl_sched_group_upmigrate_pct,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+#ifdef CONFIG_SCHED_HMP
+ .proc_handler = sched_hmp_proc_update_handler,
+#else
+ .proc_handler = walt_proc_update_handler,
+#endif
+ .extra1 = &sysctl_sched_group_downmigrate_pct,
+ },
+ {
+ .procname = "sched_group_downmigrate",
+ .data = &sysctl_sched_group_downmigrate_pct,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+#ifdef CONFIG_SCHED_HMP
+ .proc_handler = sched_hmp_proc_update_handler,
+#else
+ .proc_handler = walt_proc_update_handler,
+#endif
+ .extra1 = &zero,
+ .extra2 = &sysctl_sched_group_upmigrate_pct,
+ },
#endif
#ifdef CONFIG_SCHED_HMP
{
@@ -377,22 +402,6 @@ static struct ctl_table kern_table[] = {
.extra2 = &one_hundred,
},
{
- .procname = "sched_group_upmigrate",
- .data = &sysctl_sched_group_upmigrate_pct,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = sched_hmp_proc_update_handler,
- .extra1 = &zero,
- },
- {
- .procname = "sched_group_downmigrate",
- .data = &sysctl_sched_group_downmigrate_pct,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = sched_hmp_proc_update_handler,
- .extra1 = &zero,
- },
- {
.procname = "sched_init_task_load",
.data = &sysctl_sched_init_task_load_pct,
.maxlen = sizeof(unsigned int),
@@ -577,6 +586,20 @@ static struct ctl_table kern_table[] = {
.extra1 = &min_wakeup_granularity_ns,
.extra2 = &max_wakeup_granularity_ns,
},
+ {
+ .procname = "sched_upmigrate",
+ .data = &sysctl_sched_capacity_margin,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = sched_updown_migrate_handler,
+ },
+ {
+ .procname = "sched_downmigrate",
+ .data = &sysctl_sched_capacity_margin_down,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = sched_updown_migrate_handler,
+ },
#ifdef CONFIG_SMP
{
.procname = "sched_tunable_scaling",
@@ -3172,6 +3195,39 @@ int proc_do_large_bitmap(struct ctl_table *table, int write,
}
}
+static int do_proc_douintvec_capacity_conv(bool *negp, unsigned long *lvalp,
+ int *valp, int write, void *data)
+{
+ if (write) {
+ if (*negp)
+ return -EINVAL;
+ *valp = SCHED_FIXEDPOINT_SCALE * 100 / *lvalp;
+ } else {
+ *negp = false;
+ *lvalp = SCHED_FIXEDPOINT_SCALE * 100 / *valp;
+ }
+
+ return 0;
+}
+
+/**
+ * proc_douintvec_capacity - read a vector of integers in percentage and convert
+ * into sched capacity
+ * @table: the sysctl table
+ * @write: %TRUE if this is a write to the sysctl file
+ * @buffer: the user buffer
+ * @lenp: the size of the user buffer
+ * @ppos: file position
+ *
+ * Returns 0 on success.
+ */
+int proc_douintvec_capacity(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ return do_proc_dointvec(table, write, buffer, lenp, ppos,
+ do_proc_douintvec_capacity_conv, NULL);
+}
+
#else /* CONFIG_PROC_SYSCTL */
int proc_dostring(struct ctl_table *table, int write,
@@ -3229,6 +3285,11 @@ int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int write,
return -ENOSYS;
}
+int proc_douintvec_capacity(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ return -ENOSYS;
+}
#endif /* CONFIG_PROC_SYSCTL */
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index fa80192..5dfdf39 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -568,7 +568,7 @@ void alarm_start_relative(struct alarm *alarm, ktime_t start)
{
struct alarm_base *base = &alarm_bases[alarm->type];
- start = ktime_add(start, base->gettime());
+ start = ktime_add_safe(start, base->gettime());
alarm_start(alarm, start);
}
EXPORT_SYMBOL_GPL(alarm_start_relative);
@@ -654,7 +654,7 @@ u64 alarm_forward(struct alarm *alarm, ktime_t now, ktime_t interval)
overrun++;
}
- alarm->node.expires = ktime_add(alarm->node.expires, interval);
+ alarm->node.expires = ktime_add_safe(alarm->node.expires, interval);
return overrun;
}
EXPORT_SYMBOL_GPL(alarm_forward);
@@ -840,13 +840,21 @@ static int alarm_timer_set(struct k_itimer *timr, int flags,
/* start the timer */
timr->it.alarm.interval = timespec_to_ktime(new_setting->it_interval);
+
+ /*
+ * Rate limit to the tick as a hot fix to prevent DOS. Will be
+ * mopped up later.
+ */
+ if (ktime_to_ns(timr->it.alarm.interval) < TICK_NSEC)
+ timr->it.alarm.interval = ktime_set(0, TICK_NSEC);
+
exp = timespec_to_ktime(new_setting->it_value);
/* Convert (if necessary) to absolute time */
if (flags != TIMER_ABSTIME) {
ktime_t now;
now = alarm_bases[timr->it.alarm.alarmtimer.type].gettime();
- exp = ktime_add(now, exp);
+ exp = ktime_add_safe(now, exp);
}
alarm_start(&timr->it.alarm.alarmtimer, exp);
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index bfe589e..234d3e4 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -116,6 +116,26 @@ static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
tk->offs_boot = ktime_add(tk->offs_boot, delta);
}
+/*
+ * tk_clock_read - atomic clocksource read() helper
+ *
+ * This helper is necessary to use in the read paths because, while the
+ * seqlock ensures we don't return a bad value while structures are updated,
+ * it doesn't protect from potential crashes. There is the possibility that
+ * the tkr's clocksource may change between the read reference, and the
+ * clock reference passed to the read function. This can cause crashes if
+ * the wrong clocksource is passed to the wrong read function.
+ * This isn't necessary to use when holding the timekeeper_lock or doing
+ * a read of the fast-timekeeper tkrs (which is protected by its own locking
+ * and update logic).
+ */
+static inline u64 tk_clock_read(struct tk_read_base *tkr)
+{
+ struct clocksource *clock = READ_ONCE(tkr->clock);
+
+ return clock->read(clock);
+}
+
#ifdef CONFIG_DEBUG_TIMEKEEPING
#define WARNING_FREQ (HZ*300) /* 5 minute rate-limiting */
@@ -173,7 +193,7 @@ static inline cycle_t timekeeping_get_delta(struct tk_read_base *tkr)
*/
do {
seq = read_seqcount_begin(&tk_core.seq);
- now = tkr->read(tkr->clock);
+ now = tk_clock_read(tkr);
last = tkr->cycle_last;
mask = tkr->mask;
max = tkr->clock->max_cycles;
@@ -207,7 +227,7 @@ static inline cycle_t timekeeping_get_delta(struct tk_read_base *tkr)
cycle_t cycle_now, delta;
/* read clocksource */
- cycle_now = tkr->read(tkr->clock);
+ cycle_now = tk_clock_read(tkr);
/* calculate the delta since the last update_wall_time */
delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask);
@@ -236,12 +256,10 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
++tk->cs_was_changed_seq;
old_clock = tk->tkr_mono.clock;
tk->tkr_mono.clock = clock;
- tk->tkr_mono.read = clock->read;
tk->tkr_mono.mask = clock->mask;
- tk->tkr_mono.cycle_last = tk->tkr_mono.read(clock);
+ tk->tkr_mono.cycle_last = tk_clock_read(&tk->tkr_mono);
tk->tkr_raw.clock = clock;
- tk->tkr_raw.read = clock->read;
tk->tkr_raw.mask = clock->mask;
tk->tkr_raw.cycle_last = tk->tkr_mono.cycle_last;
@@ -260,8 +278,7 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
/* Go back from cycles -> shifted ns */
tk->xtime_interval = (u64) interval * clock->mult;
tk->xtime_remainder = ntpinterval - tk->xtime_interval;
- tk->raw_interval =
- ((u64) interval * clock->mult) >> clock->shift;
+ tk->raw_interval = interval * clock->mult;
/* if changing clocks, convert xtime_nsec shift units */
if (old_clock) {
@@ -405,7 +422,7 @@ static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
now += timekeeping_delta_to_ns(tkr,
clocksource_delta(
- tkr->read(tkr->clock),
+ tk_clock_read(tkr),
tkr->cycle_last,
tkr->mask));
} while (read_seqcount_retry(&tkf->seq, seq));
@@ -462,6 +479,10 @@ static cycle_t dummy_clock_read(struct clocksource *cs)
return cycles_at_suspend;
}
+static struct clocksource dummy_clock = {
+ .read = dummy_clock_read,
+};
+
/**
* halt_fast_timekeeper - Prevent fast timekeeper from accessing clocksource.
* @tk: Timekeeper to snapshot.
@@ -478,13 +499,13 @@ static void halt_fast_timekeeper(struct timekeeper *tk)
struct tk_read_base *tkr = &tk->tkr_mono;
memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
- cycles_at_suspend = tkr->read(tkr->clock);
- tkr_dummy.read = dummy_clock_read;
+ cycles_at_suspend = tk_clock_read(tkr);
+ tkr_dummy.clock = &dummy_clock;
update_fast_timekeeper(&tkr_dummy, &tk_fast_mono);
tkr = &tk->tkr_raw;
memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
- tkr_dummy.read = dummy_clock_read;
+ tkr_dummy.clock = &dummy_clock;
update_fast_timekeeper(&tkr_dummy, &tk_fast_raw);
}
@@ -650,11 +671,10 @@ static void timekeeping_update(struct timekeeper *tk, unsigned int action)
*/
static void timekeeping_forward_now(struct timekeeper *tk)
{
- struct clocksource *clock = tk->tkr_mono.clock;
cycle_t cycle_now, delta;
s64 nsec;
- cycle_now = tk->tkr_mono.read(clock);
+ cycle_now = tk_clock_read(&tk->tkr_mono);
delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
tk->tkr_mono.cycle_last = cycle_now;
tk->tkr_raw.cycle_last = cycle_now;
@@ -930,8 +950,7 @@ void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot)
do {
seq = read_seqcount_begin(&tk_core.seq);
-
- now = tk->tkr_mono.read(tk->tkr_mono.clock);
+ now = tk_clock_read(&tk->tkr_mono);
systime_snapshot->cs_was_changed_seq = tk->cs_was_changed_seq;
systime_snapshot->clock_was_set_seq = tk->clock_was_set_seq;
base_real = ktime_add(tk->tkr_mono.base,
@@ -1110,7 +1129,7 @@ int get_device_system_crosststamp(int (*get_time_fn)
* Check whether the system counter value provided by the
* device driver is on the current timekeeping interval.
*/
- now = tk->tkr_mono.read(tk->tkr_mono.clock);
+ now = tk_clock_read(&tk->tkr_mono);
interval_start = tk->tkr_mono.cycle_last;
if (!cycle_between(interval_start, cycles, now)) {
clock_was_set_seq = tk->clock_was_set_seq;
@@ -1668,7 +1687,7 @@ void timekeeping_resume(void)
* The less preferred source will only be tried if there is no better
* usable source. The rtc part is handled separately in rtc core code.
*/
- cycle_now = tk->tkr_mono.read(clock);
+ cycle_now = tk_clock_read(&tk->tkr_mono);
if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) &&
cycle_now > tk->tkr_mono.cycle_last) {
u64 num, max = ULLONG_MAX;
@@ -2032,7 +2051,7 @@ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
unsigned int *clock_set)
{
cycle_t interval = tk->cycle_interval << shift;
- u64 raw_nsecs;
+ u64 snsec_per_sec;
/* If the offset is smaller than a shifted interval, do nothing */
if (offset < interval)
@@ -2047,14 +2066,15 @@ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
*clock_set |= accumulate_nsecs_to_secs(tk);
/* Accumulate raw time */
- raw_nsecs = (u64)tk->raw_interval << shift;
- raw_nsecs += tk->raw_time.tv_nsec;
- if (raw_nsecs >= NSEC_PER_SEC) {
- u64 raw_secs = raw_nsecs;
- raw_nsecs = do_div(raw_secs, NSEC_PER_SEC);
- tk->raw_time.tv_sec += raw_secs;
+ tk->tkr_raw.xtime_nsec += (u64)tk->raw_time.tv_nsec << tk->tkr_raw.shift;
+ tk->tkr_raw.xtime_nsec += tk->raw_interval << shift;
+ snsec_per_sec = (u64)NSEC_PER_SEC << tk->tkr_raw.shift;
+ while (tk->tkr_raw.xtime_nsec >= snsec_per_sec) {
+ tk->tkr_raw.xtime_nsec -= snsec_per_sec;
+ tk->raw_time.tv_sec++;
}
- tk->raw_time.tv_nsec = raw_nsecs;
+ tk->raw_time.tv_nsec = tk->tkr_raw.xtime_nsec >> tk->tkr_raw.shift;
+ tk->tkr_raw.xtime_nsec -= (u64)tk->raw_time.tv_nsec << tk->tkr_raw.shift;
/* Accumulate error between NTP and clock interval */
tk->ntp_error += tk->ntp_tick << shift;
@@ -2086,7 +2106,7 @@ void update_wall_time(void)
#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
offset = real_tk->cycle_interval;
#else
- offset = clocksource_delta(tk->tkr_mono.read(tk->tkr_mono.clock),
+ offset = clocksource_delta(tk_clock_read(&tk->tkr_mono),
tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
#endif
diff --git a/kernel/ucount.c b/kernel/ucount.c
index f4ac185..c761cdb 100644
--- a/kernel/ucount.c
+++ b/kernel/ucount.c
@@ -231,11 +231,10 @@ static __init int user_namespace_sysctl_init(void)
* properly.
*/
user_header = register_sysctl("user", empty);
+ kmemleak_ignore(user_header);
BUG_ON(!user_header);
BUG_ON(!setup_userns_sysctls(&init_user_ns));
#endif
return 0;
}
subsys_initcall(user_namespace_sysctl_init);
-
-
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 44ae68a..cffb5f2 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -25,33 +25,15 @@
#include <asm/irq_regs.h>
#include <linux/kvm_para.h>
-#include <linux/perf_event.h>
#include <linux/kthread.h>
#include <soc/qcom/watchdog.h>
-/*
- * The run state of the lockup detectors is controlled by the content of the
- * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit -
- * bit 0 for the hard lockup detector and bit 1 for the soft lockup detector.
- *
- * 'watchdog_user_enabled', 'nmi_watchdog_enabled' and 'soft_watchdog_enabled'
- * are variables that are only used as an 'interface' between the parameters
- * in /proc/sys/kernel and the internal state bits in 'watchdog_enabled'. The
- * 'watchdog_thresh' variable is handled differently because its value is not
- * boolean, and the lockup detectors are 'suspended' while 'watchdog_thresh'
- * is equal zero.
- */
-#define NMI_WATCHDOG_ENABLED_BIT 0
-#define SOFT_WATCHDOG_ENABLED_BIT 1
-#define NMI_WATCHDOG_ENABLED (1 << NMI_WATCHDOG_ENABLED_BIT)
-#define SOFT_WATCHDOG_ENABLED (1 << SOFT_WATCHDOG_ENABLED_BIT)
-
static DEFINE_MUTEX(watchdog_proc_mutex);
-#ifdef CONFIG_HARDLOCKUP_DETECTOR
-static unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED|NMI_WATCHDOG_ENABLED;
+#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR)
+unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED|NMI_WATCHDOG_ENABLED;
#else
-static unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED;
+unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED;
#endif
int __read_mostly nmi_watchdog_enabled;
int __read_mostly soft_watchdog_enabled;
@@ -61,9 +43,6 @@ int __read_mostly watchdog_thresh = 10;
#ifdef CONFIG_SMP
int __read_mostly sysctl_softlockup_all_cpu_backtrace;
int __read_mostly sysctl_hardlockup_all_cpu_backtrace;
-#else
-#define sysctl_softlockup_all_cpu_backtrace 0
-#define sysctl_hardlockup_all_cpu_backtrace 0
#endif
static struct cpumask watchdog_cpumask __read_mostly;
unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
@@ -72,6 +51,8 @@ unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
#define for_each_watchdog_cpu(cpu) \
for_each_cpu_and((cpu), cpu_online_mask, &watchdog_cpumask)
+atomic_t watchdog_park_in_progress = ATOMIC_INIT(0);
+
/*
* The 'watchdog_running' variable is set to 1 when the watchdog threads
* are registered/started and is set to 0 when the watchdog threads are
@@ -103,55 +84,9 @@ static DEFINE_PER_CPU(bool, soft_watchdog_warn);
static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
static DEFINE_PER_CPU(unsigned long, soft_lockup_hrtimer_cnt);
static DEFINE_PER_CPU(struct task_struct *, softlockup_task_ptr_saved);
-#ifdef CONFIG_HARDLOCKUP_DETECTOR
-static DEFINE_PER_CPU(bool, hard_watchdog_warn);
-static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
-#endif
-#ifdef CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU
-static cpumask_t __read_mostly watchdog_cpus;
-#endif
-#ifdef CONFIG_HARDLOCKUP_DETECTOR_NMI
-static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
-#endif
static unsigned long soft_lockup_nmi_warn;
-/* boot commands */
-/*
- * Should we panic when a soft-lockup or hard-lockup occurs:
- */
-#ifdef CONFIG_HARDLOCKUP_DETECTOR
-unsigned int __read_mostly hardlockup_panic =
- CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
-static unsigned long __maybe_unused hardlockup_allcpu_dumped;
-/*
- * We may not want to enable hard lockup detection by default in all cases,
- * for example when running the kernel as a guest on a hypervisor. In these
- * cases this function can be called to disable hard lockup detection. This
- * function should only be executed once by the boot processor before the
- * kernel command line parameters are parsed, because otherwise it is not
- * possible to override this in hardlockup_panic_setup().
- */
-void hardlockup_detector_disable(void)
-{
- watchdog_enabled &= ~NMI_WATCHDOG_ENABLED;
-}
-
-static int __init hardlockup_panic_setup(char *str)
-{
- if (!strncmp(str, "panic", 5))
- hardlockup_panic = 1;
- else if (!strncmp(str, "nopanic", 7))
- hardlockup_panic = 0;
- else if (!strncmp(str, "0", 1))
- watchdog_enabled &= ~NMI_WATCHDOG_ENABLED;
- else if (!strncmp(str, "1", 1))
- watchdog_enabled |= NMI_WATCHDOG_ENABLED;
- return 1;
-}
-__setup("nmi_watchdog=", hardlockup_panic_setup);
-#endif
-
unsigned int __read_mostly softlockup_panic =
CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
@@ -272,32 +207,14 @@ void touch_all_softlockup_watchdogs(void)
wq_watchdog_touch(-1);
}
-#ifdef CONFIG_HARDLOCKUP_DETECTOR
-void touch_nmi_watchdog(void)
-{
- /*
- * Using __raw here because some code paths have
- * preemption enabled. If preemption is enabled
- * then interrupts should be enabled too, in which
- * case we shouldn't have to worry about the watchdog
- * going off.
- */
- raw_cpu_write(watchdog_nmi_touch, true);
- touch_softlockup_watchdog();
-}
-EXPORT_SYMBOL(touch_nmi_watchdog);
-
-#endif
-
void touch_softlockup_watchdog_sync(void)
{
__this_cpu_write(softlockup_touch_sync, true);
__this_cpu_write(watchdog_touch_ts, 0);
}
-#ifdef CONFIG_HARDLOCKUP_DETECTOR_NMI
/* watchdog detector functions */
-static bool is_hardlockup(void)
+bool is_hardlockup(void)
{
unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
@@ -307,80 +224,6 @@ static bool is_hardlockup(void)
__this_cpu_write(hrtimer_interrupts_saved, hrint);
return false;
}
-#endif
-
-#ifdef CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU
-static unsigned int watchdog_next_cpu(unsigned int cpu)
-{
- cpumask_t cpus = watchdog_cpus;
- unsigned int next_cpu;
-
- next_cpu = cpumask_next(cpu, &cpus);
- if (next_cpu >= nr_cpu_ids)
- next_cpu = cpumask_first(&cpus);
-
- if (next_cpu == cpu)
- return nr_cpu_ids;
-
- return next_cpu;
-}
-
-static int is_hardlockup_other_cpu(unsigned int cpu)
-{
- unsigned long hrint = per_cpu(hrtimer_interrupts, cpu);
-
- if (per_cpu(hrtimer_interrupts_saved, cpu) == hrint)
- return 1;
-
- per_cpu(hrtimer_interrupts_saved, cpu) = hrint;
- return 0;
-}
-
-static void watchdog_check_hardlockup_other_cpu(void)
-{
- unsigned int next_cpu;
-
- /*
- * Test for hardlockups every 3 samples. The sample period is
- * watchdog_thresh * 2 / 5, so 3 samples gets us back to slightly over
- * watchdog_thresh (over by 20%).
- */
- if (__this_cpu_read(hrtimer_interrupts) % 3 != 0)
- return;
-
- /* check for a hardlockup on the next cpu */
- next_cpu = watchdog_next_cpu(smp_processor_id());
- if (next_cpu >= nr_cpu_ids)
- return;
-
- smp_rmb();
-
- if (per_cpu(watchdog_nmi_touch, next_cpu) == true) {
- per_cpu(watchdog_nmi_touch, next_cpu) = false;
- return;
- }
-
- if (is_hardlockup_other_cpu(next_cpu)) {
- /* only warn once */
- if (per_cpu(hard_watchdog_warn, next_cpu) == true)
- return;
-
- if (hardlockup_panic) {
- pr_err("Watchdog detected hard LOCKUP on cpu %u",
- next_cpu);
- msm_trigger_wdog_bite();
- }
- else
- WARN(1, "Watchdog detected hard LOCKUP on cpu %u", next_cpu);
-
- per_cpu(hard_watchdog_warn, next_cpu) = true;
- } else {
- per_cpu(hard_watchdog_warn, next_cpu) = false;
- }
-}
-#else
-static inline void watchdog_check_hardlockup_other_cpu(void) { return; }
-#endif
static int is_softlockup(unsigned long touch_ts)
{
@@ -394,80 +237,22 @@ static int is_softlockup(unsigned long touch_ts)
return 0;
}
-#ifdef CONFIG_HARDLOCKUP_DETECTOR_NMI
-
-static struct perf_event_attr wd_hw_attr = {
- .type = PERF_TYPE_HARDWARE,
- .config = PERF_COUNT_HW_CPU_CYCLES,
- .size = sizeof(struct perf_event_attr),
- .pinned = 1,
- .disabled = 1,
-};
-
-/* Callback function for perf event subsystem */
-static void watchdog_overflow_callback(struct perf_event *event,
- struct perf_sample_data *data,
- struct pt_regs *regs)
-{
- /* Ensure the watchdog never gets throttled */
- event->hw.interrupts = 0;
-
- if (__this_cpu_read(watchdog_nmi_touch) == true) {
- __this_cpu_write(watchdog_nmi_touch, false);
- return;
- }
-
- /* check for a hardlockup
- * This is done by making sure our timer interrupt
- * is incrementing. The timer interrupt should have
- * fired multiple times before we overflow'd. If it hasn't
- * then this is a good indication the cpu is stuck
- */
- if (is_hardlockup()) {
- int this_cpu = smp_processor_id();
-
- /* only print hardlockups once */
- if (__this_cpu_read(hard_watchdog_warn) == true)
- return;
-
- pr_emerg("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
- if (hardlockup_panic)
- msm_trigger_wdog_bite();
-
- print_modules();
- print_irqtrace_events(current);
- if (regs)
- show_regs(regs);
- else
- dump_stack();
-
- /*
- * Perform all-CPU dump only once to avoid multiple hardlockups
- * generating interleaving traces
- */
- if (sysctl_hardlockup_all_cpu_backtrace &&
- !test_and_set_bit(0, &hardlockup_allcpu_dumped))
- trigger_allbutself_cpu_backtrace();
-
- if (hardlockup_panic)
- nmi_panic(regs, "Hard LOCKUP");
-
- __this_cpu_write(hard_watchdog_warn, true);
- return;
- }
-
- __this_cpu_write(hard_watchdog_warn, false);
- return;
-}
-#endif /* CONFIG_HARDLOCKUP_DETECTOR_NMI */
-
static void watchdog_interrupt_count(void)
{
__this_cpu_inc(hrtimer_interrupts);
}
-static int watchdog_nmi_enable(unsigned int cpu);
-static void watchdog_nmi_disable(unsigned int cpu);
+/*
+ * These two functions are mostly architecture specific
+ * defining them as weak here.
+ */
+int __weak watchdog_nmi_enable(unsigned int cpu)
+{
+ return 0;
+}
+void __weak watchdog_nmi_disable(unsigned int cpu)
+{
+}
static int watchdog_enable_all_cpus(void);
static void watchdog_disable_all_cpus(void);
@@ -480,12 +265,12 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
int duration;
int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
+ if (atomic_read(&watchdog_park_in_progress) != 0)
+ return HRTIMER_NORESTART;
+
/* kick the hardlockup detector */
watchdog_interrupt_count();
- /* test for hardlockups on the next cpu */
- watchdog_check_hardlockup_other_cpu();
-
/* kick the softlockup detector */
wake_up_process(__this_cpu_read(softlockup_watchdog));
@@ -694,144 +479,6 @@ static void watchdog(unsigned int cpu)
watchdog_nmi_disable(cpu);
}
-#ifdef CONFIG_HARDLOCKUP_DETECTOR_NMI
-/*
- * People like the simple clean cpu node info on boot.
- * Reduce the watchdog noise by only printing messages
- * that are different from what cpu0 displayed.
- */
-static unsigned long cpu0_err;
-
-static int watchdog_nmi_enable(unsigned int cpu)
-{
- struct perf_event_attr *wd_attr;
- struct perf_event *event = per_cpu(watchdog_ev, cpu);
-
- /* nothing to do if the hard lockup detector is disabled */
- if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
- goto out;
-
- /* is it already setup and enabled? */
- if (event && event->state > PERF_EVENT_STATE_OFF)
- goto out;
-
- /* it is setup but not enabled */
- if (event != NULL)
- goto out_enable;
-
- wd_attr = &wd_hw_attr;
- wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh);
-
- /* Try to register using hardware perf events */
- event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL);
-
- /* save cpu0 error for future comparision */
- if (cpu == 0 && IS_ERR(event))
- cpu0_err = PTR_ERR(event);
-
- if (!IS_ERR(event)) {
- /* only print for cpu0 or different than cpu0 */
- if (cpu == 0 || cpu0_err)
- pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n");
- goto out_save;
- }
-
- /*
- * Disable the hard lockup detector if _any_ CPU fails to set up
- * set up the hardware perf event. The watchdog() function checks
- * the NMI_WATCHDOG_ENABLED bit periodically.
- *
- * The barriers are for syncing up watchdog_enabled across all the
- * cpus, as clear_bit() does not use barriers.
- */
- smp_mb__before_atomic();
- clear_bit(NMI_WATCHDOG_ENABLED_BIT, &watchdog_enabled);
- smp_mb__after_atomic();
-
- /* skip displaying the same error again */
- if (cpu > 0 && (PTR_ERR(event) == cpu0_err))
- return PTR_ERR(event);
-
- /* vary the KERN level based on the returned errno */
- if (PTR_ERR(event) == -EOPNOTSUPP)
- pr_info("disabled (cpu%i): not supported (no LAPIC?)\n", cpu);
- else if (PTR_ERR(event) == -ENOENT)
- pr_warn("disabled (cpu%i): hardware events not enabled\n",
- cpu);
- else
- pr_err("disabled (cpu%i): unable to create perf event: %ld\n",
- cpu, PTR_ERR(event));
-
- pr_info("Shutting down hard lockup detector on all cpus\n");
-
- return PTR_ERR(event);
-
- /* success path */
-out_save:
- per_cpu(watchdog_ev, cpu) = event;
-out_enable:
- perf_event_enable(per_cpu(watchdog_ev, cpu));
-out:
- return 0;
-}
-
-static void watchdog_nmi_disable(unsigned int cpu)
-{
- struct perf_event *event = per_cpu(watchdog_ev, cpu);
-
- if (event) {
- perf_event_disable(event);
- per_cpu(watchdog_ev, cpu) = NULL;
-
- /* should be in cleanup, but blocks oprofile */
- perf_event_release_kernel(event);
- }
- if (cpu == 0) {
- /* watchdog_nmi_enable() expects this to be zero initially. */
- cpu0_err = 0;
- }
-}
-
-#else
-#ifdef CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU
-static int watchdog_nmi_enable(unsigned int cpu)
-{
- /*
- * The new cpu will be marked online before the first hrtimer interrupt
- * runs on it. If another cpu tests for a hardlockup on the new cpu
- * before it has run its first hrtimer, it will get a false positive.
- * Touch the watchdog on the new cpu to delay the first check for at
- * least 3 sampling periods to guarantee one hrtimer has run on the new
- * cpu.
- */
- per_cpu(watchdog_nmi_touch, cpu) = true;
- smp_wmb();
- cpumask_set_cpu(cpu, &watchdog_cpus);
- return 0;
-}
-
-static void watchdog_nmi_disable(unsigned int cpu)
-{
- unsigned int next_cpu = watchdog_next_cpu(cpu);
-
- /*
- * Offlining this cpu will cause the cpu before this one to start
- * checking the one after this one. If this cpu just finished checking
- * the next cpu and updating hrtimer_interrupts_saved, and then the
- * previous cpu checks it within one sample period, it will trigger a
- * false positive. Touch the watchdog on the next cpu to prevent it.
- */
- if (next_cpu < nr_cpu_ids)
- per_cpu(watchdog_nmi_touch, next_cpu) = true;
- smp_wmb();
- cpumask_clear_cpu(cpu, &watchdog_cpus);
-}
-#else
-static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
-static void watchdog_nmi_disable(unsigned int cpu) { return; }
-#endif /* CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU */
-#endif /* CONFIG_HARDLOCKUP_DETECTOR_NMI */
-
static struct smp_hotplug_thread watchdog_threads = {
.store = &softlockup_watchdog,
.thread_should_run = watchdog_should_run,
@@ -859,12 +506,16 @@ static int watchdog_park_threads(void)
{
int cpu, ret = 0;
+ atomic_set(&watchdog_park_in_progress, 1);
+
for_each_watchdog_cpu(cpu) {
ret = kthread_park(per_cpu(softlockup_watchdog, cpu));
if (ret)
break;
}
+ atomic_set(&watchdog_park_in_progress, 0);
+
return ret;
}
diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c
new file mode 100644
index 0000000..12b8dd6
--- /dev/null
+++ b/kernel/watchdog_hld.c
@@ -0,0 +1,230 @@
+/*
+ * Detect hard lockups on a system
+ *
+ * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
+ *
+ * Note: Most of this code is borrowed heavily from the original softlockup
+ * detector, so thanks to Ingo for the initial implementation.
+ * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
+ * to those contributors as well.
+ */
+
+#define pr_fmt(fmt) "NMI watchdog: " fmt
+
+#include <linux/nmi.h>
+#include <linux/module.h>
+#include <asm/irq_regs.h>
+#include <linux/perf_event.h>
+
+static DEFINE_PER_CPU(bool, hard_watchdog_warn);
+static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
+static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
+
+/* boot commands */
+/*
+ * Should we panic when a soft-lockup or hard-lockup occurs:
+ */
+unsigned int __read_mostly hardlockup_panic =
+ CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
+static unsigned long hardlockup_allcpu_dumped;
+/*
+ * We may not want to enable hard lockup detection by default in all cases,
+ * for example when running the kernel as a guest on a hypervisor. In these
+ * cases this function can be called to disable hard lockup detection. This
+ * function should only be executed once by the boot processor before the
+ * kernel command line parameters are parsed, because otherwise it is not
+ * possible to override this in hardlockup_panic_setup().
+ */
+void hardlockup_detector_disable(void)
+{
+ watchdog_enabled &= ~NMI_WATCHDOG_ENABLED;
+}
+
+static int __init hardlockup_panic_setup(char *str)
+{
+ if (!strncmp(str, "panic", 5))
+ hardlockup_panic = 1;
+ else if (!strncmp(str, "nopanic", 7))
+ hardlockup_panic = 0;
+ else if (!strncmp(str, "0", 1))
+ watchdog_enabled &= ~NMI_WATCHDOG_ENABLED;
+ else if (!strncmp(str, "1", 1))
+ watchdog_enabled |= NMI_WATCHDOG_ENABLED;
+ return 1;
+}
+__setup("nmi_watchdog=", hardlockup_panic_setup);
+
+void touch_nmi_watchdog(void)
+{
+ /*
+ * Using __raw here because some code paths have
+ * preemption enabled. If preemption is enabled
+ * then interrupts should be enabled too, in which
+ * case we shouldn't have to worry about the watchdog
+ * going off.
+ */
+ raw_cpu_write(watchdog_nmi_touch, true);
+ touch_softlockup_watchdog();
+}
+EXPORT_SYMBOL(touch_nmi_watchdog);
+
+static struct perf_event_attr wd_hw_attr = {
+ .type = PERF_TYPE_HARDWARE,
+ .config = PERF_COUNT_HW_CPU_CYCLES,
+ .size = sizeof(struct perf_event_attr),
+ .pinned = 1,
+ .disabled = 1,
+};
+
+/* Callback function for perf event subsystem */
+static void watchdog_overflow_callback(struct perf_event *event,
+ struct perf_sample_data *data,
+ struct pt_regs *regs)
+{
+ /* Ensure the watchdog never gets throttled */
+ event->hw.interrupts = 0;
+
+ if (atomic_read(&watchdog_park_in_progress) != 0)
+ return;
+
+ if (__this_cpu_read(watchdog_nmi_touch) == true) {
+ __this_cpu_write(watchdog_nmi_touch, false);
+ return;
+ }
+
+ /* check for a hardlockup
+ * This is done by making sure our timer interrupt
+ * is incrementing. The timer interrupt should have
+ * fired multiple times before we overflow'd. If it hasn't
+ * then this is a good indication the cpu is stuck
+ */
+ if (is_hardlockup()) {
+ int this_cpu = smp_processor_id();
+
+ /* only print hardlockups once */
+ if (__this_cpu_read(hard_watchdog_warn) == true)
+ return;
+
+ pr_emerg("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
+ print_modules();
+ print_irqtrace_events(current);
+ if (regs)
+ show_regs(regs);
+ else
+ dump_stack();
+
+ /*
+ * Perform all-CPU dump only once to avoid multiple hardlockups
+ * generating interleaving traces
+ */
+ if (sysctl_hardlockup_all_cpu_backtrace &&
+ !test_and_set_bit(0, &hardlockup_allcpu_dumped))
+ trigger_allbutself_cpu_backtrace();
+
+ if (hardlockup_panic)
+ nmi_panic(regs, "Hard LOCKUP");
+
+ __this_cpu_write(hard_watchdog_warn, true);
+ return;
+ }
+
+ __this_cpu_write(hard_watchdog_warn, false);
+ return;
+}
+
+/*
+ * People like the simple clean cpu node info on boot.
+ * Reduce the watchdog noise by only printing messages
+ * that are different from what cpu0 displayed.
+ */
+static unsigned long cpu0_err;
+
+int watchdog_nmi_enable(unsigned int cpu)
+{
+ struct perf_event_attr *wd_attr;
+ struct perf_event *event = per_cpu(watchdog_ev, cpu);
+
+ /* nothing to do if the hard lockup detector is disabled */
+ if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
+ goto out;
+
+ /* is it already setup and enabled? */
+ if (event && event->state > PERF_EVENT_STATE_OFF)
+ goto out;
+
+ /* it is setup but not enabled */
+ if (event != NULL)
+ goto out_enable;
+
+ wd_attr = &wd_hw_attr;
+ wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh);
+
+ /* Try to register using hardware perf events */
+ event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL);
+
+ /* save cpu0 error for future comparision */
+ if (cpu == 0 && IS_ERR(event))
+ cpu0_err = PTR_ERR(event);
+
+ if (!IS_ERR(event)) {
+ /* only print for cpu0 or different than cpu0 */
+ if (cpu == 0 || cpu0_err)
+ pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n");
+ goto out_save;
+ }
+
+ /*
+ * Disable the hard lockup detector if _any_ CPU fails to set up
+ * set up the hardware perf event. The watchdog() function checks
+ * the NMI_WATCHDOG_ENABLED bit periodically.
+ *
+ * The barriers are for syncing up watchdog_enabled across all the
+ * cpus, as clear_bit() does not use barriers.
+ */
+ smp_mb__before_atomic();
+ clear_bit(NMI_WATCHDOG_ENABLED_BIT, &watchdog_enabled);
+ smp_mb__after_atomic();
+
+ /* skip displaying the same error again */
+ if (cpu > 0 && (PTR_ERR(event) == cpu0_err))
+ return PTR_ERR(event);
+
+ /* vary the KERN level based on the returned errno */
+ if (PTR_ERR(event) == -EOPNOTSUPP)
+ pr_info("disabled (cpu%i): not supported (no LAPIC?)\n", cpu);
+ else if (PTR_ERR(event) == -ENOENT)
+ pr_warn("disabled (cpu%i): hardware events not enabled\n",
+ cpu);
+ else
+ pr_err("disabled (cpu%i): unable to create perf event: %ld\n",
+ cpu, PTR_ERR(event));
+
+ pr_info("Shutting down hard lockup detector on all cpus\n");
+
+ return PTR_ERR(event);
+
+ /* success path */
+out_save:
+ per_cpu(watchdog_ev, cpu) = event;
+out_enable:
+ perf_event_enable(per_cpu(watchdog_ev, cpu));
+out:
+ return 0;
+}
+
+void watchdog_nmi_disable(unsigned int cpu)
+{
+ struct perf_event *event = per_cpu(watchdog_ev, cpu);
+
+ if (event) {
+ perf_event_disable(event);
+ per_cpu(watchdog_ev, cpu) = NULL;
+
+ /* should be in cleanup, but blocks oprofile */
+ perf_event_release_kernel(event);
+ }
+ if (cpu == 0) {
+ /* watchdog_nmi_enable() expects this to be zero initially. */
+ cpu0_err = 0;
+ }
+}
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 6878aa8..2f9f7aa 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -775,27 +775,15 @@
The overhead should be minimal. A periodic hrtimer runs to
generate interrupts and kick the watchdog task every 4 seconds.
An NMI is generated every 10 seconds or so to check for hardlockups.
- If NMIs are not available on the platform, every 12 seconds the
- hrtimer interrupt on one cpu will be used to check for hardlockups
- on the next cpu.
The frequency of hrtimer and NMI events and the soft and hard lockup
thresholds can be controlled through the sysctl watchdog_thresh.
-config HARDLOCKUP_DETECTOR_NMI
+config HARDLOCKUP_DETECTOR
def_bool y
depends on LOCKUP_DETECTOR && !HAVE_NMI_WATCHDOG
depends on PERF_EVENTS && HAVE_PERF_EVENTS_NMI
-config HARDLOCKUP_DETECTOR_OTHER_CPU
- def_bool y
- depends on LOCKUP_DETECTOR && SMP
- depends on !HARDLOCKUP_DETECTOR_NMI && !HAVE_NMI_WATCHDOG
-
-config HARDLOCKUP_DETECTOR
- def_bool y
- depends on HARDLOCKUP_DETECTOR_NMI || HARDLOCKUP_DETECTOR_OTHER_CPU
-
config BOOTPARAM_HARDLOCKUP_PANIC
bool "Panic (Reboot) On Hard Lockups"
depends on HARDLOCKUP_DETECTOR
diff --git a/lib/cmdline.c b/lib/cmdline.c
index 8f13cf7..79069d7 100644
--- a/lib/cmdline.c
+++ b/lib/cmdline.c
@@ -22,14 +22,14 @@
* the values[M, M+1, ..., N] into the ints array in get_options.
*/
-static int get_range(char **str, int *pint)
+static int get_range(char **str, int *pint, int n)
{
int x, inc_counter, upper_range;
(*str)++;
upper_range = simple_strtol((*str), NULL, 0);
inc_counter = upper_range - *pint;
- for (x = *pint; x < upper_range; x++)
+ for (x = *pint; n && x < upper_range; x++, n--)
*pint++ = x;
return inc_counter;
}
@@ -96,7 +96,7 @@ char *get_options(const char *str, int nints, int *ints)
break;
if (res == 3) {
int range_nums;
- range_nums = get_range((char **)&str, ints + i);
+ range_nums = get_range((char **)&str, ints + i, nints - i);
if (range_nums < 0)
break;
/*
diff --git a/lib/libcrc32c.c b/lib/libcrc32c.c
index 74a54b7..9f79547 100644
--- a/lib/libcrc32c.c
+++ b/lib/libcrc32c.c
@@ -43,7 +43,7 @@ static struct crypto_shash *tfm;
u32 crc32c(u32 crc, const void *address, unsigned int length)
{
SHASH_DESC_ON_STACK(shash, tfm);
- u32 *ctx = (u32 *)shash_desc_ctx(shash);
+ u32 ret, *ctx = (u32 *)shash_desc_ctx(shash);
int err;
shash->tfm = tfm;
@@ -53,7 +53,9 @@ u32 crc32c(u32 crc, const void *address, unsigned int length)
err = crypto_shash_update(shash, address, length);
BUG_ON(err);
- return *ctx;
+ ret = *ctx;
+ barrier_data(ctx);
+ return ret;
}
EXPORT_SYMBOL(crc32c);
diff --git a/mm/gup.c b/mm/gup.c
index ec4f827..c63a034 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -370,11 +370,6 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
/* mlock all present pages, but do not fault in new pages */
if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK)
return -ENOENT;
- /* For mm_populate(), just skip the stack guard page. */
- if ((*flags & FOLL_POPULATE) &&
- (stack_guard_page_start(vma, address) ||
- stack_guard_page_end(vma, address + PAGE_SIZE)))
- return -ENOENT;
if (*flags & FOLL_WRITE)
fault_flags |= FAULT_FLAG_WRITE;
if (*flags & FOLL_REMOTE)
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index 35d2db8..4df20e1 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -13,6 +13,7 @@
*
*/
+#include <linux/ftrace.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/printk.h>
@@ -359,6 +360,8 @@ void kasan_report(unsigned long addr, size_t size,
if (likely(!kasan_report_enabled()))
return;
+ disable_trace_on_warning();
+
info.access_addr = (void *)addr;
info.access_size = size;
info.is_write = is_write;
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 4bd4480..ce7d416 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1176,7 +1176,10 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
* page_remove_rmap() in try_to_unmap_one(). So to determine page status
* correctly, we save a copy of the page flags at this time.
*/
- page_flags = p->flags;
+ if (PageHuge(p))
+ page_flags = hpage->flags;
+ else
+ page_flags = p->flags;
/*
* unpoison always clear PG_hwpoison inside page lock
diff --git a/mm/memory.c b/mm/memory.c
index 91e1653..49d9b42 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2699,40 +2699,6 @@ int do_swap_page(struct fault_env *fe, pte_t orig_pte)
}
/*
- * This is like a special single-page "expand_{down|up}wards()",
- * except we must first make sure that 'address{-|+}PAGE_SIZE'
- * doesn't hit another vma.
- */
-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
-{
- address &= PAGE_MASK;
- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
- struct vm_area_struct *prev = vma->vm_prev;
-
- /*
- * Is there a mapping abutting this one below?
- *
- * That's only ok if it's the same stack mapping
- * that has gotten split..
- */
- if (prev && prev->vm_end == address)
- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
-
- return expand_downwards(vma, address - PAGE_SIZE);
- }
- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
- struct vm_area_struct *next = vma->vm_next;
-
- /* As VM_GROWSDOWN but s/below/above/ */
- if (next && next->vm_start == address + PAGE_SIZE)
- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
-
- return expand_upwards(vma, address + PAGE_SIZE);
- }
- return 0;
-}
-
-/*
* We enter with non-exclusive mmap_sem (to exclude vma changes,
* but allow concurrent faults), and pte mapped but not yet locked.
* We return with mmap_sem still held, but pte unmapped and unlocked.
@@ -2748,10 +2714,6 @@ static int do_anonymous_page(struct fault_env *fe)
if (vma->vm_flags & VM_SHARED)
return VM_FAULT_SIGBUS;
- /* Check if we need to add a guard page to the stack */
- if (check_stack_guard_page(vma, fe->address) < 0)
- return VM_FAULT_SIGSEGV;
-
/*
* Use pte_alloc() instead of pte_alloc_map(). We can't run
* pte_offset_map() on pmds where a huge pmd might be created
diff --git a/mm/mmap.c b/mm/mmap.c
index 143d62f..b8f91e0e 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -183,6 +183,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
unsigned long retval;
unsigned long newbrk, oldbrk;
struct mm_struct *mm = current->mm;
+ struct vm_area_struct *next;
unsigned long min_brk;
bool populate;
@@ -228,7 +229,8 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
}
/* Check against existing mmap mappings. */
- if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
+ next = find_vma(mm, oldbrk);
+ if (next && newbrk + PAGE_SIZE > vm_start_gap(next))
goto out;
/* Ok, looks good - let it rip. */
@@ -251,10 +253,22 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
static long vma_compute_subtree_gap(struct vm_area_struct *vma)
{
- unsigned long max, subtree_gap;
- max = vma->vm_start;
- if (vma->vm_prev)
- max -= vma->vm_prev->vm_end;
+ unsigned long max, prev_end, subtree_gap;
+
+ /*
+ * Note: in the rare case of a VM_GROWSDOWN above a VM_GROWSUP, we
+ * allow two stack_guard_gaps between them here, and when choosing
+ * an unmapped area; whereas when expanding we only require one.
+ * That's a little inconsistent, but keeps the code here simpler.
+ */
+ max = vm_start_gap(vma);
+ if (vma->vm_prev) {
+ prev_end = vm_end_gap(vma->vm_prev);
+ if (max > prev_end)
+ max -= prev_end;
+ else
+ max = 0;
+ }
if (vma->vm_rb.rb_left) {
subtree_gap = rb_entry(vma->vm_rb.rb_left,
struct vm_area_struct, vm_rb)->rb_subtree_gap;
@@ -350,7 +364,7 @@ static void validate_mm(struct mm_struct *mm)
anon_vma_unlock_read(anon_vma);
}
- highest_address = vma->vm_end;
+ highest_address = vm_end_gap(vma);
vma = vma->vm_next;
i++;
}
@@ -539,7 +553,7 @@ void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
if (vma->vm_next)
vma_gap_update(vma->vm_next);
else
- mm->highest_vm_end = vma->vm_end;
+ mm->highest_vm_end = vm_end_gap(vma);
/*
* vma->vm_prev wasn't known when we followed the rbtree to find the
@@ -854,7 +868,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
vma_gap_update(vma);
if (end_changed) {
if (!next)
- mm->highest_vm_end = end;
+ mm->highest_vm_end = vm_end_gap(vma);
else if (!adjust_next)
vma_gap_update(next);
}
@@ -939,7 +953,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
* mm->highest_vm_end doesn't need any update
* in remove_next == 1 case.
*/
- VM_WARN_ON(mm->highest_vm_end != end);
+ VM_WARN_ON(mm->highest_vm_end != vm_end_gap(vma));
}
}
if (insert && file)
@@ -1792,7 +1806,7 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
while (true) {
/* Visit left subtree if it looks promising */
- gap_end = vma->vm_start;
+ gap_end = vm_start_gap(vma);
if (gap_end >= low_limit && vma->vm_rb.rb_left) {
struct vm_area_struct *left =
rb_entry(vma->vm_rb.rb_left,
@@ -1803,12 +1817,13 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
}
}
- gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
+ gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
check_current:
/* Check if current node has a suitable gap */
if (gap_start > high_limit)
return -ENOMEM;
- if (gap_end >= low_limit && gap_end - gap_start >= length)
+ if (gap_end >= low_limit &&
+ gap_end > gap_start && gap_end - gap_start >= length)
goto found;
/* Visit right subtree if it looks promising */
@@ -1830,8 +1845,8 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
vma = rb_entry(rb_parent(prev),
struct vm_area_struct, vm_rb);
if (prev == vma->vm_rb.rb_left) {
- gap_start = vma->vm_prev->vm_end;
- gap_end = vma->vm_start;
+ gap_start = vm_end_gap(vma->vm_prev);
+ gap_end = vm_start_gap(vma);
goto check_current;
}
}
@@ -1895,7 +1910,7 @@ unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
while (true) {
/* Visit right subtree if it looks promising */
- gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
+ gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
if (gap_start <= high_limit && vma->vm_rb.rb_right) {
struct vm_area_struct *right =
rb_entry(vma->vm_rb.rb_right,
@@ -1908,10 +1923,11 @@ unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
check_current:
/* Check if current node has a suitable gap */
- gap_end = vma->vm_start;
+ gap_end = vm_start_gap(vma);
if (gap_end < low_limit)
return -ENOMEM;
- if (gap_start <= high_limit && gap_end - gap_start >= length)
+ if (gap_start <= high_limit &&
+ gap_end > gap_start && gap_end - gap_start >= length)
goto found;
/* Visit left subtree if it looks promising */
@@ -1934,7 +1950,7 @@ unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
struct vm_area_struct, vm_rb);
if (prev == vma->vm_rb.rb_right) {
gap_start = vma->vm_prev ?
- vma->vm_prev->vm_end : 0;
+ vm_end_gap(vma->vm_prev) : 0;
goto check_current;
}
}
@@ -1972,7 +1988,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
+ struct vm_area_struct *vma, *prev;
struct vm_unmapped_area_info info;
if (len > TASK_SIZE - mmap_min_addr)
@@ -1983,9 +1999,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
if (addr) {
addr = PAGE_ALIGN(addr);
- vma = find_vma(mm, addr);
+ vma = find_vma_prev(mm, addr, &prev);
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)) &&
+ (!prev || addr >= vm_end_gap(prev)))
return addr;
}
@@ -2008,7 +2025,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
const unsigned long len, const unsigned long pgoff,
const unsigned long flags)
{
- struct vm_area_struct *vma;
+ struct vm_area_struct *vma, *prev;
struct mm_struct *mm = current->mm;
unsigned long addr = addr0;
struct vm_unmapped_area_info info;
@@ -2023,9 +2040,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
/* requesting a specific address */
if (addr) {
addr = PAGE_ALIGN(addr);
- vma = find_vma(mm, addr);
+ vma = find_vma_prev(mm, addr, &prev);
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)) &&
+ (!prev || addr >= vm_end_gap(prev)))
return addr;
}
@@ -2160,21 +2178,19 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
* update accounting. This is shared with both the
* grow-up and grow-down cases.
*/
-static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow)
+static int acct_stack_growth(struct vm_area_struct *vma,
+ unsigned long size, unsigned long grow)
{
struct mm_struct *mm = vma->vm_mm;
struct rlimit *rlim = current->signal->rlim;
- unsigned long new_start, actual_size;
+ unsigned long new_start;
/* address space limit tests */
if (!may_expand_vm(mm, vma->vm_flags, grow))
return -ENOMEM;
/* Stack limit test */
- actual_size = size;
- if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN)))
- actual_size -= PAGE_SIZE;
- if (actual_size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur))
+ if (size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur))
return -ENOMEM;
/* mlock limit tests */
@@ -2212,16 +2228,32 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
int expand_upwards(struct vm_area_struct *vma, unsigned long address)
{
struct mm_struct *mm = vma->vm_mm;
+ struct vm_area_struct *next;
+ unsigned long gap_addr;
int error = 0;
if (!(vma->vm_flags & VM_GROWSUP))
return -EFAULT;
- /* Guard against wrapping around to address 0. */
- if (address < PAGE_ALIGN(address+4))
- address = PAGE_ALIGN(address+4);
- else
+ /* Guard against exceeding limits of the address space. */
+ address &= PAGE_MASK;
+ if (address >= TASK_SIZE)
return -ENOMEM;
+ address += PAGE_SIZE;
+
+ /* Enforce stack_guard_gap */
+ gap_addr = address + stack_guard_gap;
+
+ /* Guard against overflow */
+ if (gap_addr < address || gap_addr > TASK_SIZE)
+ gap_addr = TASK_SIZE;
+
+ next = vma->vm_next;
+ if (next && next->vm_start < gap_addr) {
+ if (!(next->vm_flags & VM_GROWSUP))
+ return -ENOMEM;
+ /* Check that both stack segments have the same anon_vma? */
+ }
/* We must make sure the anon_vma is allocated. */
if (unlikely(anon_vma_prepare(vma)))
@@ -2266,7 +2298,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
if (vma->vm_next)
vma_gap_update(vma->vm_next);
else
- mm->highest_vm_end = address;
+ mm->highest_vm_end = vm_end_gap(vma);
spin_unlock(&mm->page_table_lock);
perf_event_mmap(vma);
@@ -2287,6 +2319,8 @@ int expand_downwards(struct vm_area_struct *vma,
unsigned long address)
{
struct mm_struct *mm = vma->vm_mm;
+ struct vm_area_struct *prev;
+ unsigned long gap_addr;
int error;
address &= PAGE_MASK;
@@ -2294,6 +2328,17 @@ int expand_downwards(struct vm_area_struct *vma,
if (error)
return error;
+ /* Enforce stack_guard_gap */
+ gap_addr = address - stack_guard_gap;
+ if (gap_addr > address)
+ return -ENOMEM;
+ prev = vma->vm_prev;
+ if (prev && prev->vm_end > gap_addr) {
+ if (!(prev->vm_flags & VM_GROWSDOWN))
+ return -ENOMEM;
+ /* Check that both stack segments have the same anon_vma? */
+ }
+
/* We must make sure the anon_vma is allocated. */
if (unlikely(anon_vma_prepare(vma)))
return -ENOMEM;
@@ -2348,28 +2393,25 @@ int expand_downwards(struct vm_area_struct *vma,
return error;
}
-/*
- * Note how expand_stack() refuses to expand the stack all the way to
- * abut the next virtual mapping, *unless* that mapping itself is also
- * a stack mapping. We want to leave room for a guard page, after all
- * (the guard page itself is not added here, that is done by the
- * actual page faulting logic)
- *
- * This matches the behavior of the guard page logic (see mm/memory.c:
- * check_stack_guard_page()), which only allows the guard page to be
- * removed under these circumstances.
- */
+/* enforced gap between the expanding stack and other mappings. */
+unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT;
+
+static int __init cmdline_parse_stack_guard_gap(char *p)
+{
+ unsigned long val;
+ char *endptr;
+
+ val = simple_strtoul(p, &endptr, 10);
+ if (!*endptr)
+ stack_guard_gap = val << PAGE_SHIFT;
+
+ return 0;
+}
+__setup("stack_guard_gap=", cmdline_parse_stack_guard_gap);
+
#ifdef CONFIG_STACK_GROWSUP
int expand_stack(struct vm_area_struct *vma, unsigned long address)
{
- struct vm_area_struct *next;
-
- address &= PAGE_MASK;
- next = vma->vm_next;
- if (next && next->vm_start == address + PAGE_SIZE) {
- if (!(next->vm_flags & VM_GROWSUP))
- return -ENOMEM;
- }
return expand_upwards(vma, address);
}
@@ -2391,14 +2433,6 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
#else
int expand_stack(struct vm_area_struct *vma, unsigned long address)
{
- struct vm_area_struct *prev;
-
- address &= PAGE_MASK;
- prev = vma->vm_prev;
- if (prev && prev->vm_end == address) {
- if (!(prev->vm_flags & VM_GROWSDOWN))
- return -ENOMEM;
- }
return expand_downwards(vma, address);
}
@@ -2496,7 +2530,7 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
vma->vm_prev = prev;
vma_gap_update(vma);
} else
- mm->highest_vm_end = prev ? prev->vm_end : 0;
+ mm->highest_vm_end = prev ? vm_end_gap(prev) : 0;
tail_vma->vm_next = NULL;
/* Kill the cache */
diff --git a/mm/shmem.c b/mm/shmem.c
index e9c2b6e..142887f 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -412,6 +412,7 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
struct shrink_control *sc, unsigned long nr_to_split)
{
LIST_HEAD(list), *pos, *next;
+ LIST_HEAD(to_remove);
struct inode *inode;
struct shmem_inode_info *info;
struct page *page;
@@ -438,9 +439,8 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
/* Check if there's anything to gain */
if (round_up(inode->i_size, PAGE_SIZE) ==
round_up(inode->i_size, HPAGE_PMD_SIZE)) {
- list_del_init(&info->shrinklist);
+ list_move(&info->shrinklist, &to_remove);
removed++;
- iput(inode);
goto next;
}
@@ -451,6 +451,13 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
}
spin_unlock(&sbinfo->shrinklist_lock);
+ list_for_each_safe(pos, next, &to_remove) {
+ info = list_entry(pos, struct shmem_inode_info, shrinklist);
+ inode = &info->vfs_inode;
+ list_del_init(&info->shrinklist);
+ iput(inode);
+ }
+
list_for_each_safe(pos, next, &list) {
int ret;
diff --git a/mm/swap_cgroup.c b/mm/swap_cgroup.c
index 310ac0b..454d6d7 100644
--- a/mm/swap_cgroup.c
+++ b/mm/swap_cgroup.c
@@ -48,6 +48,9 @@ static int swap_cgroup_prepare(int type)
if (!page)
goto not_enough_page;
ctrl->map[idx] = page;
+
+ if (!(idx % SWAP_CLUSTER_MAX))
+ cond_resched();
}
return 0;
not_enough_page:
diff --git a/net/core/dev.c b/net/core/dev.c
index c0d0b49..a143dbd 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -141,6 +141,8 @@
#include <linux/netfilter_ingress.h>
#include <linux/sctp.h>
#include <linux/crash_dump.h>
+#include <linux/tcp.h>
+#include <net/tcp.h>
#include "net-sysfs.h"
@@ -2988,6 +2990,10 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device
if (netif_needs_gso(skb, features)) {
struct sk_buff *segs;
+ __be16 src_port = tcp_hdr(skb)->source;
+ __be16 dest_port = tcp_hdr(skb)->dest;
+
+ trace_print_skb_gso(skb, src_port, dest_port);
segs = skb_gso_segment(skb, features);
if (IS_ERR(segs)) {
goto out_kfree_skb;
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 047a175..072c1f4 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -1394,9 +1394,12 @@ static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
if (regs.len > reglen)
regs.len = reglen;
- regbuf = vzalloc(reglen);
- if (reglen && !regbuf)
- return -ENOMEM;
+ regbuf = NULL;
+ if (reglen) {
+ regbuf = vzalloc(reglen);
+ if (!regbuf)
+ return -ENOMEM;
+ }
ops->get_regs(dev, ®s, regbuf);
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 89a8cac4..51b27ae 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -1263,7 +1263,7 @@ void __init arp_init(void)
/*
* ax25 -> ASCII conversion
*/
-static char *ax2asc2(ax25_address *a, char *buf)
+static void ax2asc2(ax25_address *a, char *buf)
{
char c, *s;
int n;
@@ -1285,10 +1285,10 @@ static char *ax2asc2(ax25_address *a, char *buf)
*s++ = n + '0';
*s++ = '\0';
- if (*buf == '\0' || *buf == '-')
- return "*";
-
- return buf;
+ if (*buf == '\0' || *buf == '-') {
+ buf[0] = '*';
+ buf[1] = '\0';
+ }
}
#endif /* CONFIG_AX25 */
@@ -1322,7 +1322,7 @@ static void arp_format_neigh_entry(struct seq_file *seq,
}
#endif
sprintf(tbuf, "%pI4", n->primary_key);
- seq_printf(seq, "%-16s 0x%-10x0x%-10x%s * %s\n",
+ seq_printf(seq, "%-16s 0x%-10x0x%-10x%-17s * %s\n",
tbuf, hatype, arp_state_to_flags(n), hbuffer, dev->name);
read_unlock(&n->lock);
}
diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
index f6c50af..3d063eb 100644
--- a/net/ipv4/tcp_probe.c
+++ b/net/ipv4/tcp_probe.c
@@ -117,7 +117,7 @@ static void jtcp_rcv_established(struct sock *sk, struct sk_buff *skb,
(fwmark > 0 && skb->mark == fwmark)) &&
(full || tp->snd_cwnd != tcp_probe.lastcwnd)) {
- spin_lock(&tcp_probe.lock);
+ spin_lock_bh(&tcp_probe.lock);
/* If log fills, just silently drop */
if (tcp_probe_avail() > 1) {
struct tcp_log *p = tcp_probe.log + tcp_probe.head;
@@ -157,7 +157,7 @@ static void jtcp_rcv_established(struct sock *sk, struct sk_buff *skb,
tcp_probe.head = (tcp_probe.head + 1) & (bufsize - 1);
}
tcp_probe.lastcwnd = tp->snd_cwnd;
- spin_unlock(&tcp_probe.lock);
+ spin_unlock_bh(&tcp_probe.lock);
wake_up(&tcp_probe.wait);
}
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 58d7c1d..d600735 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -3427,9 +3427,15 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
}
if (idev) {
- if (idev->if_flags & IF_READY)
- /* device is already configured. */
+ if (idev->if_flags & IF_READY) {
+ /* device is already configured -
+ * but resend MLD reports, we might
+ * have roamed and need to update
+ * multicast snooping switches
+ */
+ ipv6_mc_up(idev);
break;
+ }
idev->if_flags |= IF_READY;
}
@@ -4044,6 +4050,12 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id)
if (bump_id)
rt_genid_bump_ipv6(dev_net(dev));
+
+ /* Make sure that a new temporary address will be created
+ * before this temporary address becomes deprecated.
+ */
+ if (ifp->flags & IFA_F_TEMPORARY)
+ addrconf_verify_rtnl();
}
static void addrconf_dad_run(struct inet6_dev *idev)
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 9217390..edf15f0 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -167,18 +167,22 @@ int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr,
if (np->sndflow)
fl6_flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK;
- addr_type = ipv6_addr_type(&usin->sin6_addr);
-
- if (addr_type == IPV6_ADDR_ANY) {
+ if (ipv6_addr_any(&usin->sin6_addr)) {
/*
* connect to self
*/
- usin->sin6_addr.s6_addr[15] = 0x01;
+ if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
+ ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
+ &usin->sin6_addr);
+ else
+ usin->sin6_addr = in6addr_loopback;
}
+ addr_type = ipv6_addr_type(&usin->sin6_addr);
+
daddr = &usin->sin6_addr;
- if (addr_type == IPV6_ADDR_MAPPED) {
+ if (addr_type & IPV6_ADDR_MAPPED) {
struct sockaddr_in sin;
if (__ipv6_only_sock(sk)) {
diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c
index e604013..7a5b9812 100644
--- a/net/ipv6/ila/ila_xlat.c
+++ b/net/ipv6/ila/ila_xlat.c
@@ -68,6 +68,7 @@ static inline u32 ila_locator_hash(struct ila_locator loc)
{
u32 *v = (u32 *)loc.v32;
+ __ila_hash_secret_init();
return jhash_2words(v[0], v[1], hashrnd);
}
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index d472a5f..fafad39 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -67,9 +67,6 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *
struct in6_addr *nexthop;
int ret;
- skb->protocol = htons(ETH_P_IPV6);
- skb->dev = dev;
-
if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
@@ -153,6 +150,9 @@ int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
struct net_device *dev = skb_dst(skb)->dev;
struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
+ skb->protocol = htons(ETH_P_IPV6);
+ skb->dev = dev;
+
if (unlikely(idev->cnf.disable_ipv6)) {
IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
kfree_skb(skb);
@@ -867,7 +867,6 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
if (skb->sk && dst_allfrag(skb_dst(skb)))
sk_nocaps_add(skb->sk, NETIF_F_GSO_MASK);
- skb->dev = skb_dst(skb)->dev;
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
err = -EMSGSIZE;
@@ -1028,6 +1027,9 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
}
}
#endif
+ if (ipv6_addr_v4mapped(&fl6->saddr) &&
+ !(ipv6_addr_v4mapped(&fl6->daddr) || ipv6_addr_any(&fl6->daddr)))
+ return -EAFNOSUPPORT;
return 0;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index f8a6036..9828dc2 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -148,8 +148,13 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
* connect() to INADDR_ANY means loopback (BSD'ism).
*/
- if (ipv6_addr_any(&usin->sin6_addr))
- usin->sin6_addr.s6_addr[15] = 0x1;
+ if (ipv6_addr_any(&usin->sin6_addr)) {
+ if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
+ ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
+ &usin->sin6_addr);
+ else
+ usin->sin6_addr = in6addr_loopback;
+ }
addr_type = ipv6_addr_type(&usin->sin6_addr);
@@ -188,7 +193,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
* TCP over IPv4
*/
- if (addr_type == IPV6_ADDR_MAPPED) {
+ if (addr_type & IPV6_ADDR_MAPPED) {
u32 exthdrlen = icsk->icsk_ext_hdr_len;
struct sockaddr_in sin;
@@ -1237,9 +1242,6 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
if (skb->protocol == htons(ETH_P_IP))
return tcp_v4_do_rcv(sk, skb);
- if (tcp_filter(sk, skb))
- goto discard;
-
/*
* socket locking is here for SMP purposes as backlog rcv
* is currently called with bh processing disabled.
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 26d5718..c925fd9 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1049,6 +1049,10 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
if (addr_len < SIN6_LEN_RFC2133)
return -EINVAL;
daddr = &sin6->sin6_addr;
+ if (ipv6_addr_any(daddr) &&
+ ipv6_addr_v4mapped(&np->saddr))
+ ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
+ daddr);
break;
case AF_INET:
goto do_udp_sendmsg;
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index fd6541f..07001b6 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -865,6 +865,8 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
default:
return -EINVAL;
}
+ sdata->u.ap.req_smps = sdata->smps_mode;
+
sdata->needed_rx_chains = sdata->local->rx_chains;
mutex_lock(&local->mtx);
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index a31d307..62d13ea 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -66,6 +66,8 @@ ieee80211_ibss_build_presp(struct ieee80211_sub_if_data *sdata,
2 + (IEEE80211_MAX_SUPP_RATES - 8) +
2 + sizeof(struct ieee80211_ht_cap) +
2 + sizeof(struct ieee80211_ht_operation) +
+ 2 + sizeof(struct ieee80211_vht_cap) +
+ 2 + sizeof(struct ieee80211_vht_operation) +
ifibss->ie_len;
presp = kzalloc(sizeof(*presp) + frame_len, GFP_KERNEL);
if (!presp)
@@ -487,14 +489,14 @@ int ieee80211_ibss_csa_beacon(struct ieee80211_sub_if_data *sdata,
struct beacon_data *presp, *old_presp;
struct cfg80211_bss *cbss;
const struct cfg80211_bss_ies *ies;
- u16 capability = 0;
+ u16 capability = WLAN_CAPABILITY_IBSS;
u64 tsf;
int ret = 0;
sdata_assert_lock(sdata);
if (ifibss->privacy)
- capability = WLAN_CAPABILITY_PRIVACY;
+ capability |= WLAN_CAPABILITY_PRIVACY;
cbss = cfg80211_get_bss(sdata->local->hw.wiphy, ifibss->chandef.chan,
ifibss->bssid, ifibss->ssid,
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index acaaf61..c45a0fc 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -1585,12 +1585,16 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
*/
if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) &&
!ieee80211_has_morefrags(hdr->frame_control) &&
+ !ieee80211_is_back_req(hdr->frame_control) &&
!(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
(rx->sdata->vif.type == NL80211_IFTYPE_AP ||
rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
- /* PM bit is only checked in frames where it isn't reserved,
+ /*
+ * PM bit is only checked in frames where it isn't reserved,
* in AP mode it's reserved in non-bufferable management frames
* (cf. IEEE 802.11-2012 8.2.4.1.7 Power Management field)
+ * BAR frames should be ignored as specified in
+ * IEEE 802.11-2012 10.2.1.2.
*/
(!ieee80211_is_mgmt(hdr->frame_control) ||
ieee80211_is_bufferable_mmpdu(hdr->frame_control))) {
@@ -2467,7 +2471,8 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
if (is_multicast_ether_addr(hdr->addr1)) {
mpp_addr = hdr->addr3;
proxied_addr = mesh_hdr->eaddr1;
- } else if (mesh_hdr->flags & MESH_FLAGS_AE_A5_A6) {
+ } else if ((mesh_hdr->flags & MESH_FLAGS_AE) ==
+ MESH_FLAGS_AE_A5_A6) {
/* has_a4 already checked in ieee80211_rx_mesh_check */
mpp_addr = hdr->addr4;
proxied_addr = mesh_hdr->eaddr2;
@@ -3949,6 +3954,7 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx,
stats->last_rate = sta_stats_encode_rate(status);
stats->fragments++;
+ stats->packets++;
if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
stats->last_signal = status->signal;
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 8e05032..b2c823ff 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -2148,7 +2148,7 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
struct ieee80211_sta_rx_stats *cpurxs;
cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu);
- sinfo->rx_packets += cpurxs->dropped;
+ sinfo->rx_dropped_misc += cpurxs->dropped;
}
}
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 42ce9bd..5c71d60 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -17,6 +17,7 @@
#include <asm/unaligned.h>
#include <net/mac80211.h>
#include <crypto/aes.h>
+#include <crypto/algapi.h>
#include "ieee80211_i.h"
#include "michael.h"
@@ -153,7 +154,7 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
data_len = skb->len - hdrlen - MICHAEL_MIC_LEN;
key = &rx->key->conf.key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY];
michael_mic(key, hdr, data, data_len, mic);
- if (memcmp(mic, data + data_len, MICHAEL_MIC_LEN) != 0)
+ if (crypto_memneq(mic, data + data_len, MICHAEL_MIC_LEN))
goto mic_fail;
/* remove Michael MIC from payload */
@@ -1047,7 +1048,7 @@ ieee80211_crypto_aes_cmac_decrypt(struct ieee80211_rx_data *rx)
bip_aad(skb, aad);
ieee80211_aes_cmac(key->u.aes_cmac.tfm, aad,
skb->data + 24, skb->len - 24, mic);
- if (memcmp(mic, mmie->mic, sizeof(mmie->mic)) != 0) {
+ if (crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) {
key->u.aes_cmac.icverrors++;
return RX_DROP_UNUSABLE;
}
@@ -1097,7 +1098,7 @@ ieee80211_crypto_aes_cmac_256_decrypt(struct ieee80211_rx_data *rx)
bip_aad(skb, aad);
ieee80211_aes_cmac_256(key->u.aes_cmac.tfm, aad,
skb->data + 24, skb->len - 24, mic);
- if (memcmp(mic, mmie->mic, sizeof(mmie->mic)) != 0) {
+ if (crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) {
key->u.aes_cmac.icverrors++;
return RX_DROP_UNUSABLE;
}
@@ -1201,7 +1202,7 @@ ieee80211_crypto_aes_gmac_decrypt(struct ieee80211_rx_data *rx)
if (ieee80211_aes_gmac(key->u.aes_gmac.tfm, aad, nonce,
skb->data + 24, skb->len - 24,
mic) < 0 ||
- memcmp(mic, mmie->mic, sizeof(mmie->mic)) != 0) {
+ crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) {
key->u.aes_gmac.icverrors++;
return RX_DROP_UNUSABLE;
}
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index c3fc14e..3a8dc39 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -1630,8 +1630,6 @@ static int __init nf_conntrack_sip_init(void)
ports[ports_c++] = SIP_PORT;
for (i = 0; i < ports_c; i++) {
- memset(&sip[i], 0, sizeof(sip[i]));
-
nf_ct_helper_init(&sip[4 * i], AF_INET, IPPROTO_UDP, "sip",
SIP_PORT, ports[i], i, sip_exp_policy,
SIP_EXPECT_MAX,
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index 3dca90d..ffb9e8a 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -13,7 +13,6 @@
/* Internal logging interface, which relies on the real
LOG target modules */
-#define NF_LOG_PREFIXLEN 128
#define NFLOGGER_NAME_LEN 64
static struct nf_logger __rcu *loggers[NFPROTO_NUMPROTO][NF_LOG_TYPE_MAX] __read_mostly;
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index e5194f6f..778fcdb 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -3637,10 +3637,18 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
goto err5;
}
+ if (set->size &&
+ !atomic_add_unless(&set->nelems, 1, set->size + set->ndeact)) {
+ err = -ENFILE;
+ goto err6;
+ }
+
nft_trans_elem(trans) = elem;
list_add_tail(&trans->list, &ctx->net->nft.commit_list);
return 0;
+err6:
+ set->ops->remove(set, &elem);
err5:
kfree(trans);
err4:
@@ -3687,15 +3695,9 @@ static int nf_tables_newsetelem(struct net *net, struct sock *nlsk,
return -EBUSY;
nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) {
- if (set->size &&
- !atomic_add_unless(&set->nelems, 1, set->size + set->ndeact))
- return -ENFILE;
-
err = nft_add_set_elem(&ctx, set, attr, nlh->nlmsg_flags);
- if (err < 0) {
- atomic_dec(&set->nelems);
+ if (err < 0)
break;
- }
}
return err;
}
diff --git a/net/netfilter/nft_log.c b/net/netfilter/nft_log.c
index 1b01404..c7704e9 100644
--- a/net/netfilter/nft_log.c
+++ b/net/netfilter/nft_log.c
@@ -38,7 +38,8 @@ static void nft_log_eval(const struct nft_expr *expr,
static const struct nla_policy nft_log_policy[NFTA_LOG_MAX + 1] = {
[NFTA_LOG_GROUP] = { .type = NLA_U16 },
- [NFTA_LOG_PREFIX] = { .type = NLA_STRING },
+ [NFTA_LOG_PREFIX] = { .type = NLA_STRING,
+ .len = NF_LOG_PREFIXLEN - 1 },
[NFTA_LOG_SNAPLEN] = { .type = NLA_U32 },
[NFTA_LOG_QTHRESHOLD] = { .type = NLA_U16 },
[NFTA_LOG_LEVEL] = { .type = NLA_U32 },
diff --git a/net/rxrpc/key.c b/net/rxrpc/key.c
index 18c737a..7fc3407 100644
--- a/net/rxrpc/key.c
+++ b/net/rxrpc/key.c
@@ -217,7 +217,7 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ,
unsigned int *_toklen)
{
const __be32 *xdr = *_xdr;
- unsigned int toklen = *_toklen, n_parts, loop, tmp;
+ unsigned int toklen = *_toklen, n_parts, loop, tmp, paddedlen;
/* there must be at least one name, and at least #names+1 length
* words */
@@ -247,16 +247,16 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ,
toklen -= 4;
if (tmp <= 0 || tmp > AFSTOKEN_STRING_MAX)
return -EINVAL;
- if (tmp > toklen)
+ paddedlen = (tmp + 3) & ~3;
+ if (paddedlen > toklen)
return -EINVAL;
princ->name_parts[loop] = kmalloc(tmp + 1, GFP_KERNEL);
if (!princ->name_parts[loop])
return -ENOMEM;
memcpy(princ->name_parts[loop], xdr, tmp);
princ->name_parts[loop][tmp] = 0;
- tmp = (tmp + 3) & ~3;
- toklen -= tmp;
- xdr += tmp >> 2;
+ toklen -= paddedlen;
+ xdr += paddedlen >> 2;
}
if (toklen < 4)
@@ -265,16 +265,16 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ,
toklen -= 4;
if (tmp <= 0 || tmp > AFSTOKEN_K5_REALM_MAX)
return -EINVAL;
- if (tmp > toklen)
+ paddedlen = (tmp + 3) & ~3;
+ if (paddedlen > toklen)
return -EINVAL;
princ->realm = kmalloc(tmp + 1, GFP_KERNEL);
if (!princ->realm)
return -ENOMEM;
memcpy(princ->realm, xdr, tmp);
princ->realm[tmp] = 0;
- tmp = (tmp + 3) & ~3;
- toklen -= tmp;
- xdr += tmp >> 2;
+ toklen -= paddedlen;
+ xdr += paddedlen >> 2;
_debug("%s/...@%s", princ->name_parts[0], princ->realm);
@@ -293,7 +293,7 @@ static int rxrpc_krb5_decode_tagged_data(struct krb5_tagged_data *td,
unsigned int *_toklen)
{
const __be32 *xdr = *_xdr;
- unsigned int toklen = *_toklen, len;
+ unsigned int toklen = *_toklen, len, paddedlen;
/* there must be at least one tag and one length word */
if (toklen <= 8)
@@ -307,15 +307,17 @@ static int rxrpc_krb5_decode_tagged_data(struct krb5_tagged_data *td,
toklen -= 8;
if (len > max_data_size)
return -EINVAL;
+ paddedlen = (len + 3) & ~3;
+ if (paddedlen > toklen)
+ return -EINVAL;
td->data_len = len;
if (len > 0) {
td->data = kmemdup(xdr, len, GFP_KERNEL);
if (!td->data)
return -ENOMEM;
- len = (len + 3) & ~3;
- toklen -= len;
- xdr += len >> 2;
+ toklen -= paddedlen;
+ xdr += paddedlen >> 2;
}
_debug("tag %x len %x", td->tag, td->data_len);
@@ -387,7 +389,7 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen,
const __be32 **_xdr, unsigned int *_toklen)
{
const __be32 *xdr = *_xdr;
- unsigned int toklen = *_toklen, len;
+ unsigned int toklen = *_toklen, len, paddedlen;
/* there must be at least one length word */
if (toklen <= 4)
@@ -399,6 +401,9 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen,
toklen -= 4;
if (len > AFSTOKEN_K5_TIX_MAX)
return -EINVAL;
+ paddedlen = (len + 3) & ~3;
+ if (paddedlen > toklen)
+ return -EINVAL;
*_tktlen = len;
_debug("ticket len %u", len);
@@ -407,9 +412,8 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen,
*_ticket = kmemdup(xdr, len, GFP_KERNEL);
if (!*_ticket)
return -ENOMEM;
- len = (len + 3) & ~3;
- toklen -= len;
- xdr += len >> 2;
+ toklen -= paddedlen;
+ xdr += paddedlen >> 2;
}
*_xdr = xdr;
@@ -552,7 +556,7 @@ static int rxrpc_preparse_xdr(struct key_preparsed_payload *prep)
{
const __be32 *xdr = prep->data, *token;
const char *cp;
- unsigned int len, tmp, loop, ntoken, toklen, sec_ix;
+ unsigned int len, paddedlen, loop, ntoken, toklen, sec_ix;
size_t datalen = prep->datalen;
int ret;
@@ -578,22 +582,21 @@ static int rxrpc_preparse_xdr(struct key_preparsed_payload *prep)
if (len < 1 || len > AFSTOKEN_CELL_MAX)
goto not_xdr;
datalen -= 4;
- tmp = (len + 3) & ~3;
- if (tmp > datalen)
+ paddedlen = (len + 3) & ~3;
+ if (paddedlen > datalen)
goto not_xdr;
cp = (const char *) xdr;
for (loop = 0; loop < len; loop++)
if (!isprint(cp[loop]))
goto not_xdr;
- if (len < tmp)
- for (; loop < tmp; loop++)
- if (cp[loop])
- goto not_xdr;
+ for (; loop < paddedlen; loop++)
+ if (cp[loop])
+ goto not_xdr;
_debug("cellname: [%u/%u] '%*.*s'",
- len, tmp, len, len, (const char *) xdr);
- datalen -= tmp;
- xdr += tmp >> 2;
+ len, paddedlen, len, len, (const char *) xdr);
+ datalen -= paddedlen;
+ xdr += paddedlen >> 2;
/* get the token count */
if (datalen < 12)
@@ -614,10 +617,11 @@ static int rxrpc_preparse_xdr(struct key_preparsed_payload *prep)
sec_ix = ntohl(*xdr);
datalen -= 4;
_debug("token: [%x/%zx] %x", toklen, datalen, sec_ix);
- if (toklen < 20 || toklen > datalen)
+ paddedlen = (toklen + 3) & ~3;
+ if (toklen < 20 || toklen > datalen || paddedlen > datalen)
goto not_xdr;
- datalen -= (toklen + 3) & ~3;
- xdr += (toklen + 3) >> 2;
+ datalen -= paddedlen;
+ xdr += paddedlen >> 2;
} while (--loop > 0);
diff --git a/net/sctp/offload.c b/net/sctp/offload.c
index 7e869d0..4f5a2b5 100644
--- a/net/sctp/offload.c
+++ b/net/sctp/offload.c
@@ -68,7 +68,7 @@ static struct sk_buff *sctp_gso_segment(struct sk_buff *skb,
goto out;
}
- segs = skb_segment(skb, features | NETIF_F_HW_CSUM);
+ segs = skb_segment(skb, features | NETIF_F_HW_CSUM | NETIF_F_SG);
if (IS_ERR(segs))
goto out;
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 14346dc..e1719c6 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -235,8 +235,12 @@ static struct sctp_transport *sctp_addr_id2transport(struct sock *sk,
sctp_assoc_t id)
{
struct sctp_association *addr_asoc = NULL, *id_asoc = NULL;
- struct sctp_transport *transport;
+ struct sctp_af *af = sctp_get_af_specific(addr->ss_family);
union sctp_addr *laddr = (union sctp_addr *)addr;
+ struct sctp_transport *transport;
+
+ if (sctp_verify_addr(sk, laddr, af->sockaddr_len))
+ return NULL;
addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep,
laddr,
diff --git a/net/tipc/net.c b/net/tipc/net.c
index 28bf4fe..ab8a2d5 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -110,6 +110,10 @@ int tipc_net_start(struct net *net, u32 addr)
char addr_string[16];
tn->own_addr = addr;
+
+ /* Ensure that the new address is visible before we reinit. */
+ smp_mb();
+
tipc_named_reinit(net);
tipc_sk_reinit(net);
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 9d2f4c2..2775332 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -263,6 +263,11 @@ static void tipc_node_write_lock(struct tipc_node *n)
write_lock_bh(&n->lock);
}
+static void tipc_node_write_unlock_fast(struct tipc_node *n)
+{
+ write_unlock_bh(&n->lock);
+}
+
static void tipc_node_write_unlock(struct tipc_node *n)
{
struct net *net = n->net;
@@ -417,7 +422,7 @@ void tipc_node_subscribe(struct net *net, struct list_head *subscr, u32 addr)
}
tipc_node_write_lock(n);
list_add_tail(subscr, &n->publ_list);
- tipc_node_write_unlock(n);
+ tipc_node_write_unlock_fast(n);
tipc_node_put(n);
}
@@ -435,7 +440,7 @@ void tipc_node_unsubscribe(struct net *net, struct list_head *subscr, u32 addr)
}
tipc_node_write_lock(n);
list_del_init(subscr);
- tipc_node_write_unlock(n);
+ tipc_node_write_unlock_fast(n);
tipc_node_put(n);
}
diff --git a/net/tipc/server.c b/net/tipc/server.c
index 215849c..f89c0c2 100644
--- a/net/tipc/server.c
+++ b/net/tipc/server.c
@@ -91,7 +91,8 @@ static void tipc_sock_release(struct tipc_conn *con);
static void tipc_conn_kref_release(struct kref *kref)
{
struct tipc_conn *con = container_of(kref, struct tipc_conn, kref);
- struct sockaddr_tipc *saddr = con->server->saddr;
+ struct tipc_server *s = con->server;
+ struct sockaddr_tipc *saddr = s->saddr;
struct socket *sock = con->sock;
struct sock *sk;
@@ -106,6 +107,11 @@ static void tipc_conn_kref_release(struct kref *kref)
tipc_sock_release(con);
sock_release(sock);
con->sock = NULL;
+
+ spin_lock_bh(&s->idr_lock);
+ idr_remove(&s->conn_idr, con->conid);
+ s->idr_in_use--;
+ spin_unlock_bh(&s->idr_lock);
}
tipc_clean_outqueues(con);
@@ -128,8 +134,10 @@ static struct tipc_conn *tipc_conn_lookup(struct tipc_server *s, int conid)
spin_lock_bh(&s->idr_lock);
con = idr_find(&s->conn_idr, conid);
- if (con)
+ if (con && test_bit(CF_CONNECTED, &con->flags))
conn_get(con);
+ else
+ con = NULL;
spin_unlock_bh(&s->idr_lock);
return con;
}
@@ -198,15 +206,8 @@ static void tipc_sock_release(struct tipc_conn *con)
static void tipc_close_conn(struct tipc_conn *con)
{
- struct tipc_server *s = con->server;
-
if (test_and_clear_bit(CF_CONNECTED, &con->flags)) {
- spin_lock_bh(&s->idr_lock);
- idr_remove(&s->conn_idr, con->conid);
- s->idr_in_use--;
- spin_unlock_bh(&s->idr_lock);
-
/* We shouldn't flush pending works as we may be in the
* thread. In fact the races with pending rx/tx work structs
* are harmless for us here as we have already deleted this
@@ -458,6 +459,11 @@ int tipc_conn_sendmsg(struct tipc_server *s, int conid,
if (!con)
return -EINVAL;
+ if (!test_bit(CF_CONNECTED, &con->flags)) {
+ conn_put(con);
+ return 0;
+ }
+
e = tipc_alloc_entry(data, len);
if (!e) {
conn_put(con);
@@ -471,12 +477,8 @@ int tipc_conn_sendmsg(struct tipc_server *s, int conid,
list_add_tail(&e->list, &con->outqueue);
spin_unlock_bh(&con->outqueue_lock);
- if (test_bit(CF_CONNECTED, &con->flags)) {
- if (!queue_work(s->send_wq, &con->swork))
- conn_put(con);
- } else {
+ if (!queue_work(s->send_wq, &con->swork))
conn_put(con);
- }
return 0;
}
@@ -500,7 +502,7 @@ static void tipc_send_to_sock(struct tipc_conn *con)
int ret;
spin_lock_bh(&con->outqueue_lock);
- while (1) {
+ while (test_bit(CF_CONNECTED, &con->flags)) {
e = list_entry(con->outqueue.next, struct outqueue_entry,
list);
if ((struct list_head *) e == &con->outqueue)
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 41f0138..25bc5c3 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -335,8 +335,6 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
INIT_LIST_HEAD(&tsk->publications);
msg = &tsk->phdr;
tn = net_generic(sock_net(sk), tipc_net_id);
- tipc_msg_init(tn->own_addr, msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG,
- NAMED_H_SIZE, 0);
/* Finish initializing socket data structures */
sock->ops = ops;
@@ -346,6 +344,13 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
pr_warn("Socket create failed; port number exhausted\n");
return -EINVAL;
}
+
+ /* Ensure tsk is visible before we read own_addr. */
+ smp_mb();
+
+ tipc_msg_init(tn->own_addr, msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG,
+ NAMED_H_SIZE, 0);
+
msg_set_origport(msg, tsk->portid);
setup_timer(&sk->sk_timer, tipc_sk_timeout, (unsigned long)tsk);
sk->sk_backlog_rcv = tipc_backlog_rcv;
@@ -2264,24 +2269,27 @@ static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
void tipc_sk_reinit(struct net *net)
{
struct tipc_net *tn = net_generic(net, tipc_net_id);
- const struct bucket_table *tbl;
- struct rhash_head *pos;
+ struct rhashtable_iter iter;
struct tipc_sock *tsk;
struct tipc_msg *msg;
- int i;
- rcu_read_lock();
- tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht);
- for (i = 0; i < tbl->size; i++) {
- rht_for_each_entry_rcu(tsk, pos, tbl, i, node) {
+ rhashtable_walk_enter(&tn->sk_rht, &iter);
+
+ do {
+ tsk = ERR_PTR(rhashtable_walk_start(&iter));
+ if (tsk)
+ continue;
+
+ while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) {
spin_lock_bh(&tsk->sk.sk_lock.slock);
msg = &tsk->phdr;
msg_set_prevnode(msg, tn->own_addr);
msg_set_orignode(msg, tn->own_addr);
spin_unlock_bh(&tsk->sk.sk_lock.slock);
}
- }
- rcu_read_unlock();
+
+ rhashtable_walk_stop(&iter);
+ } while (tsk == ERR_PTR(-EAGAIN));
}
static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index 0dd0224..9d94e65 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -54,6 +54,8 @@ struct tipc_subscriber {
static void tipc_subscrp_delete(struct tipc_subscription *sub);
static void tipc_subscrb_put(struct tipc_subscriber *subscriber);
+static void tipc_subscrp_put(struct tipc_subscription *subscription);
+static void tipc_subscrp_get(struct tipc_subscription *subscription);
/**
* htohl - convert value to endianness used by destination
@@ -123,6 +125,7 @@ void tipc_subscrp_report_overlap(struct tipc_subscription *sub, u32 found_lower,
{
struct tipc_name_seq seq;
+ tipc_subscrp_get(sub);
tipc_subscrp_convert_seq(&sub->evt.s.seq, sub->swap, &seq);
if (!tipc_subscrp_check_overlap(&seq, found_lower, found_upper))
return;
@@ -132,30 +135,23 @@ void tipc_subscrp_report_overlap(struct tipc_subscription *sub, u32 found_lower,
tipc_subscrp_send_event(sub, found_lower, found_upper, event, port_ref,
node);
+ tipc_subscrp_put(sub);
}
static void tipc_subscrp_timeout(unsigned long data)
{
struct tipc_subscription *sub = (struct tipc_subscription *)data;
- struct tipc_subscriber *subscriber = sub->subscriber;
/* Notify subscriber of timeout */
tipc_subscrp_send_event(sub, sub->evt.s.seq.lower, sub->evt.s.seq.upper,
TIPC_SUBSCR_TIMEOUT, 0, 0);
- spin_lock_bh(&subscriber->lock);
- tipc_subscrp_delete(sub);
- spin_unlock_bh(&subscriber->lock);
-
- tipc_subscrb_put(subscriber);
+ tipc_subscrp_put(sub);
}
static void tipc_subscrb_kref_release(struct kref *kref)
{
- struct tipc_subscriber *subcriber = container_of(kref,
- struct tipc_subscriber, kref);
-
- kfree(subcriber);
+ kfree(container_of(kref,struct tipc_subscriber, kref));
}
static void tipc_subscrb_put(struct tipc_subscriber *subscriber)
@@ -168,6 +164,59 @@ static void tipc_subscrb_get(struct tipc_subscriber *subscriber)
kref_get(&subscriber->kref);
}
+static void tipc_subscrp_kref_release(struct kref *kref)
+{
+ struct tipc_subscription *sub = container_of(kref,
+ struct tipc_subscription,
+ kref);
+ struct tipc_net *tn = net_generic(sub->net, tipc_net_id);
+ struct tipc_subscriber *subscriber = sub->subscriber;
+
+ spin_lock_bh(&subscriber->lock);
+ tipc_nametbl_unsubscribe(sub);
+ list_del(&sub->subscrp_list);
+ atomic_dec(&tn->subscription_count);
+ spin_unlock_bh(&subscriber->lock);
+ kfree(sub);
+ tipc_subscrb_put(subscriber);
+}
+
+static void tipc_subscrp_put(struct tipc_subscription *subscription)
+{
+ kref_put(&subscription->kref, tipc_subscrp_kref_release);
+}
+
+static void tipc_subscrp_get(struct tipc_subscription *subscription)
+{
+ kref_get(&subscription->kref);
+}
+
+/* tipc_subscrb_subscrp_delete - delete a specific subscription or all
+ * subscriptions for a given subscriber.
+ */
+static void tipc_subscrb_subscrp_delete(struct tipc_subscriber *subscriber,
+ struct tipc_subscr *s)
+{
+ struct list_head *subscription_list = &subscriber->subscrp_list;
+ struct tipc_subscription *sub, *temp;
+
+ spin_lock_bh(&subscriber->lock);
+ list_for_each_entry_safe(sub, temp, subscription_list, subscrp_list) {
+ if (s && memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr)))
+ continue;
+
+ tipc_subscrp_get(sub);
+ spin_unlock_bh(&subscriber->lock);
+ tipc_subscrp_delete(sub);
+ tipc_subscrp_put(sub);
+ spin_lock_bh(&subscriber->lock);
+
+ if (s)
+ break;
+ }
+ spin_unlock_bh(&subscriber->lock);
+}
+
static struct tipc_subscriber *tipc_subscrb_create(int conid)
{
struct tipc_subscriber *subscriber;
@@ -177,8 +226,8 @@ static struct tipc_subscriber *tipc_subscrb_create(int conid)
pr_warn("Subscriber rejected, no memory\n");
return NULL;
}
- kref_init(&subscriber->kref);
INIT_LIST_HEAD(&subscriber->subscrp_list);
+ kref_init(&subscriber->kref);
subscriber->conid = conid;
spin_lock_init(&subscriber->lock);
@@ -187,55 +236,22 @@ static struct tipc_subscriber *tipc_subscrb_create(int conid)
static void tipc_subscrb_delete(struct tipc_subscriber *subscriber)
{
- struct tipc_subscription *sub, *temp;
- u32 timeout;
-
- spin_lock_bh(&subscriber->lock);
- /* Destroy any existing subscriptions for subscriber */
- list_for_each_entry_safe(sub, temp, &subscriber->subscrp_list,
- subscrp_list) {
- timeout = htohl(sub->evt.s.timeout, sub->swap);
- if ((timeout == TIPC_WAIT_FOREVER) || del_timer(&sub->timer)) {
- tipc_subscrp_delete(sub);
- tipc_subscrb_put(subscriber);
- }
- }
- spin_unlock_bh(&subscriber->lock);
-
+ tipc_subscrb_subscrp_delete(subscriber, NULL);
tipc_subscrb_put(subscriber);
}
static void tipc_subscrp_delete(struct tipc_subscription *sub)
{
- struct tipc_net *tn = net_generic(sub->net, tipc_net_id);
+ u32 timeout = htohl(sub->evt.s.timeout, sub->swap);
- tipc_nametbl_unsubscribe(sub);
- list_del(&sub->subscrp_list);
- kfree(sub);
- atomic_dec(&tn->subscription_count);
+ if (timeout == TIPC_WAIT_FOREVER || del_timer(&sub->timer))
+ tipc_subscrp_put(sub);
}
static void tipc_subscrp_cancel(struct tipc_subscr *s,
struct tipc_subscriber *subscriber)
{
- struct tipc_subscription *sub, *temp;
- u32 timeout;
-
- spin_lock_bh(&subscriber->lock);
- /* Find first matching subscription, exit if not found */
- list_for_each_entry_safe(sub, temp, &subscriber->subscrp_list,
- subscrp_list) {
- if (!memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr))) {
- timeout = htohl(sub->evt.s.timeout, sub->swap);
- if ((timeout == TIPC_WAIT_FOREVER) ||
- del_timer(&sub->timer)) {
- tipc_subscrp_delete(sub);
- tipc_subscrb_put(subscriber);
- }
- break;
- }
- }
- spin_unlock_bh(&subscriber->lock);
+ tipc_subscrb_subscrp_delete(subscriber, s);
}
static struct tipc_subscription *tipc_subscrp_create(struct net *net,
@@ -272,6 +288,7 @@ static struct tipc_subscription *tipc_subscrp_create(struct net *net,
sub->swap = swap;
memcpy(&sub->evt.s, s, sizeof(*s));
atomic_inc(&tn->subscription_count);
+ kref_init(&sub->kref);
return sub;
}
@@ -288,17 +305,16 @@ static void tipc_subscrp_subscribe(struct net *net, struct tipc_subscr *s,
spin_lock_bh(&subscriber->lock);
list_add(&sub->subscrp_list, &subscriber->subscrp_list);
- tipc_subscrb_get(subscriber);
sub->subscriber = subscriber;
tipc_nametbl_subscribe(sub);
+ tipc_subscrb_get(subscriber);
spin_unlock_bh(&subscriber->lock);
- timeout = htohl(sub->evt.s.timeout, swap);
- if (timeout == TIPC_WAIT_FOREVER)
- return;
-
setup_timer(&sub->timer, tipc_subscrp_timeout, (unsigned long)sub);
- mod_timer(&sub->timer, jiffies + msecs_to_jiffies(timeout));
+ timeout = htohl(sub->evt.s.timeout, swap);
+
+ if (timeout != TIPC_WAIT_FOREVER)
+ mod_timer(&sub->timer, jiffies + msecs_to_jiffies(timeout));
}
/* Handle one termination request for the subscriber */
diff --git a/net/tipc/subscr.h b/net/tipc/subscr.h
index be60103..ffdc214 100644
--- a/net/tipc/subscr.h
+++ b/net/tipc/subscr.h
@@ -57,6 +57,7 @@ struct tipc_subscriber;
* @evt: template for events generated by subscription
*/
struct tipc_subscription {
+ struct kref kref;
struct tipc_subscriber *subscriber;
struct net *net;
struct timer_list timer;
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 60ee74c..7c8b406 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -456,6 +456,8 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
if (iftype == NL80211_IFTYPE_MESH_POINT)
skb_copy_bits(skb, hdrlen, &mesh_flags, 1);
+ mesh_flags &= MESH_FLAGS_AE;
+
switch (hdr->frame_control &
cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) {
case cpu_to_le16(IEEE80211_FCTL_TODS):
@@ -471,9 +473,9 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
iftype != NL80211_IFTYPE_STATION))
return -1;
if (iftype == NL80211_IFTYPE_MESH_POINT) {
- if (mesh_flags & MESH_FLAGS_AE_A4)
+ if (mesh_flags == MESH_FLAGS_AE_A4)
return -1;
- if (mesh_flags & MESH_FLAGS_AE_A5_A6) {
+ if (mesh_flags == MESH_FLAGS_AE_A5_A6) {
skb_copy_bits(skb, hdrlen +
offsetof(struct ieee80211s_hdr, eaddr1),
tmp.h_dest, 2 * ETH_ALEN);
@@ -489,9 +491,9 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
ether_addr_equal(tmp.h_source, addr)))
return -1;
if (iftype == NL80211_IFTYPE_MESH_POINT) {
- if (mesh_flags & MESH_FLAGS_AE_A5_A6)
+ if (mesh_flags == MESH_FLAGS_AE_A5_A6)
return -1;
- if (mesh_flags & MESH_FLAGS_AE_A4)
+ if (mesh_flags == MESH_FLAGS_AE_A4)
skb_copy_bits(skb, hdrlen +
offsetof(struct ieee80211s_hdr, eaddr1),
tmp.h_source, ETH_ALEN);
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index bbba7be..6faddfb 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -2509,7 +2509,7 @@ static int pcm_chmap_ctl_get(struct snd_kcontrol *kcontrol,
struct snd_pcm_substream *substream;
const struct snd_pcm_chmap_elem *map;
- if (snd_BUG_ON(!info->chmap))
+ if (!info->chmap)
return -EINVAL;
substream = snd_pcm_chmap_substream(info, idx);
if (!substream)
@@ -2541,7 +2541,7 @@ static int pcm_chmap_ctl_tlv(struct snd_kcontrol *kcontrol, int op_flag,
unsigned int __user *dst;
int c, count = 0;
- if (snd_BUG_ON(!info->chmap))
+ if (!info->chmap)
return -EINVAL;
if (size < 8)
return -ENOMEM;
diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c
index 00060c4..9741757 100644
--- a/sound/firewire/amdtp-stream.c
+++ b/sound/firewire/amdtp-stream.c
@@ -606,7 +606,9 @@ static void out_stream_callback(struct fw_iso_context *context, u32 tstamp,
cycle = increment_cycle_count(cycle, 1);
if (handle_out_packet(s, cycle, i) < 0) {
s->packet_index = -1;
- amdtp_stream_pcm_abort(s);
+ if (in_interrupt())
+ amdtp_stream_pcm_abort(s);
+ WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN);
return;
}
}
@@ -658,7 +660,9 @@ static void in_stream_callback(struct fw_iso_context *context, u32 tstamp,
/* Queueing error or detecting invalid payload. */
if (i < packets) {
s->packet_index = -1;
- amdtp_stream_pcm_abort(s);
+ if (in_interrupt())
+ amdtp_stream_pcm_abort(s);
+ WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN);
return;
}
diff --git a/sound/firewire/amdtp-stream.h b/sound/firewire/amdtp-stream.h
index c1bc7fa..f7c054b 100644
--- a/sound/firewire/amdtp-stream.h
+++ b/sound/firewire/amdtp-stream.h
@@ -124,7 +124,7 @@ struct amdtp_stream {
/* For a PCM substream processing. */
struct snd_pcm_substream *pcm;
struct tasklet_struct period_tasklet;
- unsigned int pcm_buffer_pointer;
+ snd_pcm_uframes_t pcm_buffer_pointer;
unsigned int pcm_period_pointer;
/* To wait for first packet. */
diff --git a/sound/soc/codecs/wcd-mbhc-v2.c b/sound/soc/codecs/wcd-mbhc-v2.c
index cb96f2b..3b2426d 100644
--- a/sound/soc/codecs/wcd-mbhc-v2.c
+++ b/sound/soc/codecs/wcd-mbhc-v2.c
@@ -2058,9 +2058,12 @@ void wcd_mbhc_deinit(struct wcd_mbhc *mbhc)
mbhc->mbhc_cb->free_irq(codec, mbhc->intr_ids->hph_right_ocp, mbhc);
if (mbhc->mbhc_cb && mbhc->mbhc_cb->register_notifier)
mbhc->mbhc_cb->register_notifier(mbhc, &mbhc->nblock, false);
- if (mbhc->mbhc_fn->wcd_cancel_hs_detect_plug)
+ if (mbhc->mbhc_fn->wcd_cancel_hs_detect_plug) {
+ WCD_MBHC_RSC_LOCK(mbhc);
mbhc->mbhc_fn->wcd_cancel_hs_detect_plug(mbhc,
&mbhc->correct_plug_swch);
+ WCD_MBHC_RSC_UNLOCK(mbhc);
+ }
mutex_destroy(&mbhc->codec_resource_lock);
mutex_destroy(&mbhc->hphl_pa_lock);
mutex_destroy(&mbhc->hphr_pa_lock);
diff --git a/sound/soc/msm/sdm845.c b/sound/soc/msm/sdm845.c
index e699760..d3c4e05 100644
--- a/sound/soc/msm/sdm845.c
+++ b/sound/soc/msm/sdm845.c
@@ -493,6 +493,8 @@ static SOC_ENUM_SINGLE_EXT_DECL(tert_mi2s_rx_chs, mi2s_ch_text);
static SOC_ENUM_SINGLE_EXT_DECL(tert_mi2s_tx_chs, mi2s_ch_text);
static SOC_ENUM_SINGLE_EXT_DECL(quat_mi2s_rx_chs, mi2s_ch_text);
static SOC_ENUM_SINGLE_EXT_DECL(quat_mi2s_tx_chs, mi2s_ch_text);
+static SOC_ENUM_SINGLE_EXT_DECL(mi2s_rx_format, bit_format_text);
+static SOC_ENUM_SINGLE_EXT_DECL(mi2s_tx_format, bit_format_text);
static SOC_ENUM_SINGLE_EXT_DECL(hifi_function, hifi_text);
static struct platform_device *spdev;
@@ -2250,6 +2252,54 @@ static int mi2s_get_sample_rate(int value)
return sample_rate;
}
+static int mi2s_get_format(int value)
+{
+ int format;
+
+ switch (value) {
+ case 0:
+ format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ case 1:
+ format = SNDRV_PCM_FORMAT_S24_LE;
+ break;
+ case 2:
+ format = SNDRV_PCM_FORMAT_S24_3LE;
+ break;
+ case 3:
+ format = SNDRV_PCM_FORMAT_S32_LE;
+ break;
+ default:
+ format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ }
+ return format;
+}
+
+static int mi2s_get_format_value(int format)
+{
+ int value;
+
+ switch (format) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ value = 0;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ value = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S24_3LE:
+ value = 2;
+ break;
+ case SNDRV_PCM_FORMAT_S32_LE:
+ value = 3;
+ break;
+ default:
+ value = 0;
+ break;
+ }
+ return value;
+}
+
static int mi2s_rx_sample_rate_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
@@ -2382,6 +2432,78 @@ static int msm_mi2s_tx_ch_put(struct snd_kcontrol *kcontrol,
return 1;
}
+static int msm_mi2s_rx_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int idx = mi2s_get_port_idx(kcontrol);
+
+ if (idx < 0)
+ return idx;
+
+ ucontrol->value.enumerated.item[0] =
+ mi2s_get_format_value(mi2s_rx_cfg[idx].bit_format);
+
+ pr_debug("%s: idx[%d]_rx_format = %d, item = %d\n", __func__,
+ idx, mi2s_rx_cfg[idx].bit_format,
+ ucontrol->value.enumerated.item[0]);
+
+ return 0;
+}
+
+static int msm_mi2s_rx_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int idx = mi2s_get_port_idx(kcontrol);
+
+ if (idx < 0)
+ return idx;
+
+ mi2s_rx_cfg[idx].bit_format =
+ mi2s_get_format(ucontrol->value.enumerated.item[0]);
+
+ pr_debug("%s: idx[%d]_rx_format = %d, item = %d\n", __func__,
+ idx, mi2s_rx_cfg[idx].bit_format,
+ ucontrol->value.enumerated.item[0]);
+
+ return 0;
+}
+
+static int msm_mi2s_tx_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int idx = mi2s_get_port_idx(kcontrol);
+
+ if (idx < 0)
+ return idx;
+
+ ucontrol->value.enumerated.item[0] =
+ mi2s_get_format_value(mi2s_tx_cfg[idx].bit_format);
+
+ pr_debug("%s: idx[%d]_tx_format = %d, item = %d\n", __func__,
+ idx, mi2s_tx_cfg[idx].bit_format,
+ ucontrol->value.enumerated.item[0]);
+
+ return 0;
+}
+
+static int msm_mi2s_tx_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int idx = mi2s_get_port_idx(kcontrol);
+
+ if (idx < 0)
+ return idx;
+
+ mi2s_tx_cfg[idx].bit_format =
+ mi2s_get_format(ucontrol->value.enumerated.item[0]);
+
+ pr_debug("%s: idx[%d]_tx_format = %d, item = %d\n", __func__,
+ idx, mi2s_tx_cfg[idx].bit_format,
+ ucontrol->value.enumerated.item[0]);
+
+ return 0;
+}
+
static int msm_hifi_ctrl(struct snd_soc_codec *codec)
{
struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
@@ -2627,6 +2749,22 @@ static const struct snd_kcontrol_new msm_snd_controls[] = {
msm_mi2s_rx_ch_get, msm_mi2s_rx_ch_put),
SOC_ENUM_EXT("QUAT_MI2S_TX Channels", quat_mi2s_tx_chs,
msm_mi2s_tx_ch_get, msm_mi2s_tx_ch_put),
+ SOC_ENUM_EXT("PRIM_MI2S_RX Format", mi2s_rx_format,
+ msm_mi2s_rx_format_get, msm_mi2s_rx_format_put),
+ SOC_ENUM_EXT("PRIM_MI2S_TX Format", mi2s_tx_format,
+ msm_mi2s_tx_format_get, msm_mi2s_tx_format_put),
+ SOC_ENUM_EXT("SEC_MI2S_RX Format", mi2s_rx_format,
+ msm_mi2s_rx_format_get, msm_mi2s_rx_format_put),
+ SOC_ENUM_EXT("SEC_MI2S_TX Format", mi2s_tx_format,
+ msm_mi2s_tx_format_get, msm_mi2s_tx_format_put),
+ SOC_ENUM_EXT("TERT_MI2S_RX Format", mi2s_rx_format,
+ msm_mi2s_rx_format_get, msm_mi2s_rx_format_put),
+ SOC_ENUM_EXT("TERT_MI2S_TX Format", mi2s_tx_format,
+ msm_mi2s_tx_format_get, msm_mi2s_tx_format_put),
+ SOC_ENUM_EXT("QUAT_MI2S_RX Format", mi2s_rx_format,
+ msm_mi2s_rx_format_get, msm_mi2s_rx_format_put),
+ SOC_ENUM_EXT("QUAT_MI2S_TX Format", mi2s_tx_format,
+ msm_mi2s_tx_format_get, msm_mi2s_tx_format_put),
SOC_ENUM_EXT("HiFi Function", hifi_function, msm_hifi_get,
msm_hifi_put),
};
@@ -3052,48 +3190,64 @@ static int msm_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
break;
case MSM_BACKEND_DAI_PRI_MI2S_RX:
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ mi2s_rx_cfg[PRIM_MI2S].bit_format);
rate->min = rate->max = mi2s_rx_cfg[PRIM_MI2S].sample_rate;
channels->min = channels->max =
mi2s_rx_cfg[PRIM_MI2S].channels;
break;
case MSM_BACKEND_DAI_PRI_MI2S_TX:
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ mi2s_tx_cfg[PRIM_MI2S].bit_format);
rate->min = rate->max = mi2s_tx_cfg[PRIM_MI2S].sample_rate;
channels->min = channels->max =
mi2s_tx_cfg[PRIM_MI2S].channels;
break;
case MSM_BACKEND_DAI_SECONDARY_MI2S_RX:
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ mi2s_rx_cfg[SEC_MI2S].bit_format);
rate->min = rate->max = mi2s_rx_cfg[SEC_MI2S].sample_rate;
channels->min = channels->max =
mi2s_rx_cfg[SEC_MI2S].channels;
break;
case MSM_BACKEND_DAI_SECONDARY_MI2S_TX:
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ mi2s_tx_cfg[SEC_MI2S].bit_format);
rate->min = rate->max = mi2s_tx_cfg[SEC_MI2S].sample_rate;
channels->min = channels->max =
mi2s_tx_cfg[SEC_MI2S].channels;
break;
case MSM_BACKEND_DAI_TERTIARY_MI2S_RX:
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ mi2s_rx_cfg[TERT_MI2S].bit_format);
rate->min = rate->max = mi2s_rx_cfg[TERT_MI2S].sample_rate;
channels->min = channels->max =
mi2s_rx_cfg[TERT_MI2S].channels;
break;
case MSM_BACKEND_DAI_TERTIARY_MI2S_TX:
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ mi2s_tx_cfg[TERT_MI2S].bit_format);
rate->min = rate->max = mi2s_tx_cfg[TERT_MI2S].sample_rate;
channels->min = channels->max =
mi2s_tx_cfg[TERT_MI2S].channels;
break;
case MSM_BACKEND_DAI_QUATERNARY_MI2S_RX:
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ mi2s_rx_cfg[QUAT_MI2S].bit_format);
rate->min = rate->max = mi2s_rx_cfg[QUAT_MI2S].sample_rate;
channels->min = channels->max =
mi2s_rx_cfg[QUAT_MI2S].channels;
break;
case MSM_BACKEND_DAI_QUATERNARY_MI2S_TX:
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ mi2s_tx_cfg[QUAT_MI2S].bit_format);
rate->min = rate->max = mi2s_tx_cfg[QUAT_MI2S].sample_rate;
channels->min = channels->max =
mi2s_tx_cfg[QUAT_MI2S].channels;
@@ -3855,6 +4009,7 @@ static u32 get_mi2s_bits_per_sample(u32 bit_format)
u32 bit_per_sample;
switch (bit_format) {
+ case SNDRV_PCM_FORMAT_S32_LE:
case SNDRV_PCM_FORMAT_S24_3LE:
case SNDRV_PCM_FORMAT_S24_LE:
bit_per_sample = 32;