Merge "drm/msm/sde: Avoid event notification for SDE power ON case"
diff --git a/Documentation/devicetree/bindings/arm/arch_timer.txt b/Documentation/devicetree/bindings/arm/arch_timer.txt
index ad440a2..e926aea 100644
--- a/Documentation/devicetree/bindings/arm/arch_timer.txt
+++ b/Documentation/devicetree/bindings/arm/arch_timer.txt
@@ -31,6 +31,12 @@
This also affects writes to the tval register, due to the implicit
counter read.
+- hisilicon,erratum-161010101 : A boolean property. Indicates the
+ presence of Hisilicon erratum 161010101, which says that reading the
+ counters is unreliable in some cases, and reads may return a value 32
+ beyond the correct value. This also affects writes to the tval
+ registers, due to the implicit counter read.
+
** Optional properties:
- arm,cpu-registers-not-fw-configured : Firmware does not initialize
diff --git a/Documentation/devicetree/bindings/arm/davinci.txt b/Documentation/devicetree/bindings/arm/davinci.txt
index f0841ce..715622c 100644
--- a/Documentation/devicetree/bindings/arm/davinci.txt
+++ b/Documentation/devicetree/bindings/arm/davinci.txt
@@ -13,6 +13,10 @@
Required root node properties:
- compatible = "enbw,cmc", "ti,da850;
+LEGO MINDSTORMS EV3 (AM1808 based)
+Required root node properties:
+ - compatible = "lego,ev3", "ti,da850";
+
Generic DaVinci Boards
----------------------
diff --git a/Documentation/devicetree/bindings/arm/msm/msm.txt b/Documentation/devicetree/bindings/arm/msm/msm.txt
index 61226c9..b3d4d44 100644
--- a/Documentation/devicetree/bindings/arm/msm/msm.txt
+++ b/Documentation/devicetree/bindings/arm/msm/msm.txt
@@ -172,6 +172,9 @@
- HDK device:
compatible = "qcom,hdk"
+- IPC device:
+ compatible = "qcom,ipc"
+
Boards (SoC type + board variant):
compatible = "qcom,apq8016"
@@ -201,6 +204,7 @@
compatible = "qcom,apq8017-mtp"
compatible = "qcom,apq8053-cdp"
compatible = "qcom,apq8053-mtp"
+compatible = "qcom,apq8053-ipc"
compatible = "qcom,mdm9630-cdp"
compatible = "qcom,mdm9630-mtp"
compatible = "qcom,mdm9630-sim"
@@ -311,6 +315,7 @@
compatible = "qcom,msm8953-sim"
compatible = "qcom,msm8953-cdp"
compatible = "qcom,msm8953-mtp"
+compatible = "qcom,msm8953-ipc"
compatible = "qcom,msm8953-qrd"
compatible = "qcom,msm8953-qrd-sku3"
compatible = "qcom,sdm450-mtp"
diff --git a/Documentation/devicetree/bindings/arm/msm/qcom,osm.txt b/Documentation/devicetree/bindings/arm/msm/qcom,osm.txt
index bce983a..7496f4d 100644
--- a/Documentation/devicetree/bindings/arm/msm/qcom,osm.txt
+++ b/Documentation/devicetree/bindings/arm/msm/qcom,osm.txt
@@ -21,10 +21,27 @@
Usage: required
Value type: <stringlist>
Definition: Address names. Must be "osm_l3_base", "osm_pwrcl_base",
- "osm_perfcl_base".
+ "osm_perfcl_base", and "cpr_rc".
Must be specified in the same order as the corresponding
addresses are specified in the reg property.
+- vdd_l3_mx_ao-supply
+ Usage: required
+ Value type: <phandle>
+ Definition: Phandle to the MX active-only regulator device.
+
+- vdd_pwrcl_mx_ao-supply
+ Usage: required
+ Value type: <phandle>
+ Definition: Phandle to the MX active-only regulator device.
+
+- qcom,mx-turbo-freq
+ Usage: required
+ Value type: <array>
+ Definition: List of frequencies for the 3 clock domains (following the
+ order of L3, power, and performance clusters) that denote
+ the lowest rate that requires a TURBO vote on the MX rail.
+
- l3-devs
Usage: optional
Value type: <phandle>
@@ -46,10 +63,15 @@
compatible = "qcom,clk-cpu-osm";
reg = <0x17d41000 0x1400>,
<0x17d43000 0x1400>,
- <0x17d45800 0x1400>;
- reg-names = "osm_l3_base", "osm_pwrcl_base", "osm_perfcl_base";
+ <0x17d45800 0x1400>,
+ <0x784248 0x4>;
+ reg-names = "osm_l3_base", "osm_pwrcl_base", "osm_perfcl_base",
+ "cpr_rc";
+ vdd_l3_mx_ao-supply = <&pm8998_s6_level_ao>;
+ vdd_pwrcl_mx_ao-supply = <&pm8998_s6_level_ao>;
- l3-devs = <&phandle0 &phandle1 &phandle2>;
+ qcom,mx-turbo-freq = <1478400000 1689600000 3300000001>;
+ l3-devs = <&l3_cpu0 &l3_cpu4 &l3_cdsp>;
clock-names = "xo_ao";
clocks = <&clock_rpmh RPMH_CXO_CLK_A>;
diff --git a/Documentation/devicetree/bindings/clock/qcom,a7-cpucc.txt b/Documentation/devicetree/bindings/clock/qcom,a7-cpucc.txt
new file mode 100644
index 0000000..2782b9c
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,a7-cpucc.txt
@@ -0,0 +1,48 @@
+Qualcomm Application A7 CPU clock driver
+-------------------------------------
+
+It is the clock controller driver which provides higher frequency
+clocks and allows A7 CPU frequency scaling on sdxpoorwills based platforms.
+
+Required properties:
+- compatible : shall contain only one of the following:
+ "qcom,cpu-sdxpoorwills",
+- clocks : Phandle to the clock device.
+- clock-names: Names of the used clocks.
+- qcom,a7cc-init-rate = Initial rate which needs to be set from cpu driver.
+- reg : shall contain base register offset and size.
+- reg-names : Names of the bases for the above registers.
+- vdd_dig_ao-supply : The regulator powering the APSS PLL.
+- cpu-vdd-supply : The regulator powering the APSS RCG.
+- qcom,rcg-reg-offset : Register offset for APSS RCG.
+- qcom,speedX-bin-vZ : A table of CPU frequency (Hz) to regulator voltage (uV) mapping.
+ Format: <freq uV>
+ This represents the max frequency possible for each possible
+ power configuration for a CPU that's binned as speed bin X,
+ speed bin revision Z. Speed bin values can be between [0-7]
+ and the version can be between [0-3].
+- #clock-cells : shall contain 1.
+
+Optional properties :
+- reg-names: "efuse",
+
+Example:
+ clock_cpu: qcom,clock-a7@17808100 {
+ compatible = "qcom,cpu-sdxpoorwills";
+ clocks = <&clock_rpmh RPMH_CXO_CLK_A>;
+ clock-names = "xo_ao";
+ qcom,a7cc-init-rate = <1497600000>;
+ reg = <0x17808100 0x7F10>;
+ reg-names = "apcs_pll";
+
+ vdd_dig_ao-supply = <&pmxpoorwills_s5_level_ao>;
+ cpu-vdd-supply = <&pmxpoorwills_s5_level_ao>;
+ qcom,rcg-reg-offset = <0x7F08>;
+ qcom,speed0-bin-v0 =
+ < 0 RPMH_REGULATOR_LEVEL_OFF>,
+ < 345600000 RPMH_REGULATOR_LEVEL_LOW_SVS>,
+ < 576000000 RPMH_REGULATOR_LEVEL_SVS>,
+ < 1094400000 RPMH_REGULATOR_LEVEL_NOM>,
+ < 1497600000 RPMH_REGULATOR_LEVEL_TURBO>;
+ #clock-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc.txt b/Documentation/devicetree/bindings/clock/qcom,gcc.txt
index 78bb87a..7330db4 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc.txt
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc.txt
@@ -21,6 +21,7 @@
"qcom,gcc-sdm845-v2.1"
"qcom,gcc-sdm670"
"qcom,debugcc-sdm845"
+ "qcom,gcc-sdxpoorwills"
- reg : shall contain base register location and length
- #clock-cells : shall contain 1
diff --git a/Documentation/devicetree/bindings/clock/qcom,rpmh.txt b/Documentation/devicetree/bindings/clock/qcom,rpmh.txt
index 9ad7263..d57f61a 100644
--- a/Documentation/devicetree/bindings/clock/qcom,rpmh.txt
+++ b/Documentation/devicetree/bindings/clock/qcom,rpmh.txt
@@ -3,6 +3,7 @@
Required properties :
- compatible : shall contain "qcom,rpmh-clk-sdm845" or "qcom,rpmh-clk-sdm670"
+ or "qcom,rpmh-clk-sdxpoorwills"
- #clock-cells : must contain 1
- mboxes : list of RPMh mailbox phandle and channel identifier tuples.
diff --git a/Documentation/devicetree/bindings/clock/qoriq-clock.txt b/Documentation/devicetree/bindings/clock/qoriq-clock.txt
index 16a3ec4..1bd2c76 100644
--- a/Documentation/devicetree/bindings/clock/qoriq-clock.txt
+++ b/Documentation/devicetree/bindings/clock/qoriq-clock.txt
@@ -31,6 +31,7 @@
* "fsl,t4240-clockgen"
* "fsl,b4420-clockgen"
* "fsl,b4860-clockgen"
+ * "fsl,ls1012a-clockgen"
* "fsl,ls1021a-clockgen"
Chassis-version clock strings include:
* "fsl,qoriq-clockgen-1.0": for chassis 1.0 clocks
diff --git a/Documentation/devicetree/bindings/leds/leds-qpnp-flash.txt b/Documentation/devicetree/bindings/leds/leds-qpnp-flash.txt
new file mode 100644
index 0000000..a7a2eda
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/leds-qpnp-flash.txt
@@ -0,0 +1,180 @@
+Qualcomm Technologies Inc. PNP Flash LED
+
+QPNP (Qualcomm Technologies Inc. Plug N Play) Flash LED (Light
+Emitting Diode) driver is used to provide illumination to
+camera sensor when background light is dim to capture good
+picture. It can also be used for flashlight/torch application.
+It is part of PMIC on Qualcomm Technologies Inc. reference platforms.
+The PMIC is connected to the host processor via SPMI bus.
+
+Required properties:
+- compatible : should be "qcom,qpnp-flash-led"
+- reg : base address and size for flash LED modules
+
+Optional properties:
+- qcom,headroom : headroom to use. Values should be 250, 300,
+ 400 and 500 in mV.
+- qcom,startup-dly : delay before flashing after flash executed.
+ Values should 10, 32, 64, and 128 in us.
+- qcom,clamp-curr : current to clamp at when voltage droop happens.
+ Values are in integer from 0 to 1000 inclusive,
+ indicating 0 to 1000 mA.
+- qcom,self-check-enabled : boolean type. self fault check enablement
+- qcom,thermal-derate-enabled : boolean type. derate enablement when module
+ temperature reaches threshold
+- qcom,thermal-derate-threshold : thermal threshold for derate. Values
+ should be 95, 105, 115, 125 in C.
+- qcom,thermal-derate-rate : derate rate when module temperature
+ reaches threshold. Values should be
+ "1_PERCENT", "1P25_PERCENT", "2_PERCENT",
+ "2P5_PERCENT", "5_PERCENT" in string.
+- qcom,current-ramp-enabled : boolean type. stepped current ramp enablement
+- qcom,ramp-up-step : current ramp up rate. Values should be
+ "0P2US", "0P4US", "0P8US", "1P6US", "3P3US",
+ "6P7US", "13P5US", "27US".
+- qcom,ramp-dn-step : current ramp down rate. Values should be
+ "0P2US", "0P4US", "0P8US", "1P6US", "3P3US",
+ "6P7US", "13P5US", "27US".
+- qcom,vph-pwr-droop-enabled : boolean type. VPH power droop enablement. Enablement
+ allows current clamp when phone power drops below
+ pre-determined threshold
+- qcom,vph-pwr-droop-threshold : VPH power threshold for module to clamp current.
+ Values are 2500 - 3200 in mV with 100 mV steps.
+- qcom,vph-pwr-droop-debounce-time : debounce time for module to confirm a voltage
+ droop is happening. Values are 0, 10, 32, 64
+ in us.
+- qcom,pmic-charger-support : Boolean type. This tells if flash utilizes charger boost
+ support
+- qcom,headroom-sense-ch0-enabled: Boolean type. This configures headroom sensing enablement
+ for LED channel 0
+- qcom,headroom-sense-ch1-enabled: Boolean type. This configures headroom sensing enablement
+ for LED channel 1
+- qcom,power-detect-enabled : Boolean type. This enables driver to get maximum flash LED
+ current at current battery level to avoid intensity clamp
+ when battery voltage is low
+- qcom,otst2-moduled-enabled : Boolean type. This enables driver to enable MASK to support
+ OTST2 connection.
+- qcom,follow-otst2-rb-disabled : Boolean type. This allows driver to reset/deset module.
+ By default, driver resets module. This entry allows driver to
+ bypass reset module sequence.
+- qcom,die-current-derate-enabled: Boolean type. This enables driver to get maximum flash LED
+ current, based on PMIC die temperature threshold to
+ avoid significant current derate from hardware. This property
+ is not needed if PMIC is older than PMI8994v2.0.
+- qcom,die-temp-vadc : VADC channel source for flash LED. This property is not
+ needed if PMIC is older than PMI8994v2.0.
+- qcom,die-temp-threshold : Integer type array for PMIC die temperature threshold.
+ Array should have at least one value. Values should be in
+ celcius. This property is not needed if PMIC is older than
+ PMI8994v2.0.
+- qcom,die-temp-derate-current : Integer type arrray for PMIC die temperature derate
+ current. Array should have at least one value. Values
+ should be in mA. This property is not needed if PMIC is older
+ than PMI8994v2.0.
+
+Required properties inside child node. Chile node contains settings for each individual LED.
+Each LED hardware needs a node for itself and a switch node to control brightness.
+For the purpose of turning on/off LED and better regulator control, "led:switch" node
+is introduced. "led:switch" acquires several existing properties from other nodes for
+operational simplification. For backward compatibility purpose, switch node can be optional:
+- label : type of led that will be used, either "flash" or "torch".
+- qcom,led-name : name of the LED. Accepted values are "led:flash_0",
+ "led:flash_1", "led:torch_0", "led:torch_1"
+- qcom,default-led-trigger : trigger for the camera flash and torch. Accepted values are
+ "flash0_trigger", "flash1_trigger", "torch0_trigger", torch1_trigger"
+- qcom,id : enumerated ID for each physical LED. Accepted values are "0",
+ "1", etc..
+- qcom,max-current : maximum current allowed on this LED. Valid values should be
+ integer from 0 to 1000 inclusive, indicating 0 to 1000 mA.
+- qcom,pmic-revid : PMIC revision id source. This property is needed for PMI8996
+ revision check.
+
+Optional properties inside child node:
+- qcom,current : default current intensity for LED. Accepted values should be
+ integer from 0 t 1000 inclusive, indicating 0 to 1000 mA.
+- qcom,duration : Duration for flash LED. When duration time expires, hardware will turn off
+ flash LED. Values should be from 10 ms to 1280 ms with 10 ms incremental
+ step. Not applicable to torch. It is required for LED:SWITCH node to handle
+ LED used as flash.
+- reg<n> : reg<n> (<n> represents number. eg 0,1,2,..) property is to add support for
+ multiple power sources. It includes two properties regulator-name and max-voltage.
+ Required property inside regulator node:
+ - regulator-name : This denotes this node is a regulator node and which
+ regulator to use.
+ Optional property inside regulator node:
+ - max-voltage : This specifies max voltage of regulator. Some switch
+ or boost regulator does not need this property.
+
+Example:
+ qcom,leds@d300 {
+ compatible = "qcom,qpnp-flash-led";
+ status = "okay";
+ reg = <0xd300 0x100>;
+ label = "flash";
+ qcom,headroom = <500>;
+ qcom,startup-dly = <128>;
+ qcom,clamp-curr = <200>;
+ qcom,pmic-charger-support;
+ qcom,self-check-enabled;
+ qcom,thermal-derate-enabled;
+ qcom,thermal-derate-threshold = <80>;
+ qcom,thermal-derate-rate = "4_PERCENT";
+ qcom,current-ramp-enabled;
+ qcom,ramp_up_step = "27US";
+ qcom,ramp_dn_step = "27US";
+ qcom,vph-pwr-droop-enabled;
+ qcom,vph-pwr-droop-threshold = <3200>;
+ qcom,vph-pwr-droop-debounce-time = <10>;
+ qcom,headroom-sense-ch0-enabled;
+ qcom,headroom-sense-ch1-enabled;
+ qcom,die-current-derate-enabled;
+ qcom,die-temp-vadc = <&pmi8994_vadc>;
+ qcom,die-temp-threshold = <85 80 75 70 65>;
+ qcom,die-temp-derate-current = <400 800 1200 1600 2000>;
+ qcom,pmic-revid = <&pmi8994_revid>;
+
+ pm8226_flash0: qcom,flash_0 {
+ label = "flash";
+ qcom,led-name = "led:flash_0";
+ qcom,default-led-trigger =
+ "flash0_trigger";
+ qcom,max-current = <1000>;
+ qcom,id = <0>;
+ qcom,duration = <1280>;
+ qcom,current = <625>;
+ };
+
+ pm8226_torch: qcom,torch_0 {
+ label = "torch";
+ qcom,led-name = "led:torch_0";
+ qcom,default-led-trigger =
+ "torch0_trigger";
+ boost-supply = <&pm8226_chg_boost>;
+ qcom,max-current = <200>;
+ qcom,id = <0>;
+ qcom,current = <120>;
+ qcom,max-current = <200>;
+ reg0 {
+ regulator-name =
+ "pm8226_chg_boost";
+ max-voltage = <3600000>;
+ };
+ };
+
+ pm8226_switch: qcom,switch {
+ lable = "switch";
+ qcom,led-name = "led:switch";
+ qcom,default-led-trigger =
+ "switch_trigger";
+ qcom,id = <2>;
+ qcom,current = <625>;
+ qcom,duration = <1280>;
+ qcom,max-current = <1000>;
+ reg0 {
+ regulator-name =
+ "pm8226_chg_boost";
+ max-voltage = <3600000>;
+ };
+ };
+ };
+
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-cci.txt b/Documentation/devicetree/bindings/media/video/msm-cam-cci.txt
index 1127544..cd4d222 100644
--- a/Documentation/devicetree/bindings/media/video/msm-cam-cci.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-cci.txt
@@ -180,6 +180,9 @@
should contain phandle of respective ir-cut node
- qcom,special-support-sensors: if only some special sensors are supported
on this board, add sensor name in this property.
+- use-shared-clk : It is booloean property. This property is required
+ if the clk is shared clk between different sensor and ois, if this
+ device need to be opened together.
- clock-rates: clock rate in Hz.
- clock-cntl-level: says what all different cloc level node has.
- clock-cntl-support: Says whether clock control support is present or not
@@ -248,6 +251,9 @@
required from the regulators mentioned in the regulator-names property
(in the same order).
- cam_vaf-supply : should contain regulator from which ois voltage is supplied
+- use-shared-clk : It is booloean property. This property is required
+ if the clk is shared clk between different sensor and ois, if this
+ device need to be opened together.
Example:
@@ -354,8 +360,8 @@
status = "ok";
shared-gpios = <18 19>;
pinctrl-names = "cam_res_mgr_default", "cam_res_mgr_suspend";
- pinctrl-0 = <&cam_res_mgr_active>;
- pinctrl-1 = <&cam_res_mgr_suspend>;
+ pinctrl-0 = <&cam_shared_clk_active &cam_res_mgr_active>;
+ pinctrl-1 = <&cam_shared_clk_suspend &cam_res_mgr_suspend>;
};
qcom,cam-sensor@0 {
@@ -374,7 +380,7 @@
cam_vio-supply = <&pm845_lvs1>;
cam_vana-supply = <&pmi8998_bob>;
regulator-names = "cam_vdig", "cam_vio", "cam_vana";
- rgltr-cntrl-support;
+ rgltr-cntrl-support;
rgltr-min-voltage = <0 3312000 1352000>;
rgltr-max-voltage = <0 3312000 1352000>;
rgltr-load-current = <0 80000 105000>;
@@ -398,6 +404,7 @@
sensor-mode = <0>;
cci-master = <0>;
status = "ok";
+ use-shared-clk;
clocks = <&clock_mmss clk_mclk0_clk_src>,
<&clock_mmss clk_camss_mclk0_clk>;
clock-names = "cam_src_clk", "cam_clk";
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-lrme.txt b/Documentation/devicetree/bindings/media/video/msm-cam-lrme.txt
new file mode 100644
index 0000000..9a37922
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-lrme.txt
@@ -0,0 +1,149 @@
+* Qualcomm Technologies, Inc. MSM Camera LRME
+
+The MSM camera Low Resolution Motion Estimation device provides dependency
+definitions for enabling Camera LRME HW. MSM camera LRME is implemented in
+multiple device nodes. The root LRME device node has properties defined to
+hint the driver about the LRME HW nodes available during the probe sequence.
+Each node has multiple properties defined for interrupts, clocks and
+regulators.
+
+=======================
+Required Node Structure
+=======================
+LRME root interface node takes care of the handling LRME high level
+driver handling and controls underlying LRME hardware present.
+
+- compatible
+ Usage: required
+ Value type: <string>
+ Definition: Should be "qcom,cam-lrme"
+
+- compat-hw-name
+ Usage: required
+ Value type: <string>
+ Definition: Should be "qcom,lrme"
+
+- num-lrme
+ Usage: required
+ Value type: <u32>
+ Definition: Number of supported LRME HW blocks
+
+Example:
+ qcom,cam-lrme {
+ compatible = "qcom,cam-lrme";
+ compat-hw-name = "qcom,lrme";
+ num-lrme = <1>;
+ };
+
+=======================
+Required Node Structure
+=======================
+LRME Node provides interface for Low Resolution Motion Estimation hardware
+driver about the device register map, interrupt map, clocks, regulators.
+
+- cell-index
+ Usage: required
+ Value type: <u32>
+ Definition: Node instance number
+
+- compatible
+ Usage: required
+ Value type: <string>
+ Definition: Should be "qcom,lrme"
+
+- reg-names
+ Usage: optional
+ Value type: <string>
+ Definition: Name of the register resources
+
+- reg
+ Usage: optional
+ Value type: <u32>
+ Definition: Register values
+
+- reg-cam-base
+ Usage: optional
+ Value type: <u32>
+ Definition: Offset of the register space compared to
+ to Camera base register space
+
+- interrupt-names
+ Usage: optional
+ Value type: <string>
+ Definition: Name of the interrupt
+
+- interrupts
+ Usage: optional
+ Value type: <u32>
+ Definition: Interrupt line associated with LRME HW
+
+- regulator-names
+ Usage: required
+ Value type: <string>
+ Definition: Name of the regulator resources for LRME HW
+
+- camss-supply
+ Usage: required
+ Value type: <phandle>
+ Definition: Regulator reference corresponding to the names listed
+ in "regulator-names"
+
+- clock-names
+ Usage: required
+ Value type: <string>
+ Definition: List of clock names required for LRME HW
+
+- clocks
+ Usage: required
+ Value type: <phandle>
+ Definition: List of clocks required for LRME HW
+
+- clock-rates
+ Usage: required
+ Value type: <u32>
+ Definition: List of clocks rates
+
+- clock-cntl-level
+ Usage: required
+ Value type: <string>
+ Definition: List of strings corresponds clock-rates levels
+ Supported strings: minsvs, lowsvs, svs, svs_l1, nominal, turbo
+
+- src-clock-name
+ Usage: required
+ Value type: <string>
+ Definition: Source clock name
+
+Examples:
+ cam_lrme: qcom,lrme@ac6b000 {
+ cell-index = <0>;
+ compatible = "qcom,lrme";
+ reg-names = "lrme";
+ reg = <0xac6b000 0xa00>;
+ reg-cam-base = <0x6b000>;
+ interrupt-names = "lrme";
+ interrupts = <0 476 0>;
+ regulator-names = "camss";
+ camss-supply = <&titan_top_gdsc>;
+ clock-names = "camera_ahb",
+ "camera_axi",
+ "soc_ahb_clk",
+ "cpas_ahb_clk",
+ "camnoc_axi_clk",
+ "lrme_clk_src",
+ "lrme_clk";
+ clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
+ <&clock_gcc GCC_CAMERA_AXI_CLK>,
+ <&clock_camcc CAM_CC_SOC_AHB_CLK>,
+ <&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+ <&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
+ <&clock_camcc CAM_CC_LRME_CLK_SRC>,
+ <&clock_camcc CAM_CC_LRME_CLK>;
+ clock-rates = <0 0 0 0 0 0 0>,
+ <0 0 0 0 0 19200000 19200000>,
+ <0 0 0 0 0 19200000 19200000>,
+ <0 0 0 0 0 19200000 19200000>;
+ clock-cntl-level = "lowsvs", "svs", "svs_l1", "turbo";
+ src-clock-name = "lrme_core_clk_src";
+ };
+
diff --git a/Documentation/devicetree/bindings/pci/msm_ep_pcie.txt b/Documentation/devicetree/bindings/pci/msm_ep_pcie.txt
new file mode 100644
index 0000000..faf56c2
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/msm_ep_pcie.txt
@@ -0,0 +1,141 @@
+MSM PCI express endpoint
+
+Required properties:
+ - compatible: should be "qcom,pcie-ep".
+ - reg: should contain PCIe register maps.
+ - reg-names: indicates various resources passed to driver by name.
+ Should be "msi", "dm_core", "elbi", "parf", "phy", "mmio".
+ These correspond to different modules within the PCIe domain.
+ - #address-cells: Should provide a value of 0.
+ - interrupt-parent: Should be the PCIe device node itself here.
+ - interrupts: Should be in the format <0 1 2> and it is an index to the
+ interrupt-map that contains PCIe related interrupts.
+ - #interrupt-cells: Should provide a value of 1.
+ - #interrupt-map-mask: should provide a value of 0xffffffff.
+ - interrupt-map: Must create mapping for the number of interrupts
+ that are defined in above interrupts property.
+ For PCIe device node, it should define 6 mappings for
+ the corresponding PCIe interrupts supporting the
+ specification.
+ - interrupt-names: indicates interrupts passed to driver by name.
+ Should be "int_pm_turnoff", "int_dstate_change",
+ "int_l1sub_timeout", "int_link_up",
+ "int_link_down", "int_bridge_flush_n".
+ - perst-gpio: PERST GPIO specified by PCIe spec.
+ - wake-gpio: WAKE GPIO specified by PCIe spec.
+ - clkreq-gpio: CLKREQ GPIO specified by PCIe spec.
+ - <supply-name>-supply: phandle to the regulator device tree node.
+ Refer to the schematics for the corresponding voltage regulators.
+ vreg-1.8-supply: phandle to the analog supply for the PCIe controller.
+ vreg-0.9-supply: phandle to the analog supply for the PCIe controller.
+
+Optional Properties:
+ - qcom,<supply-name>-voltage-level: specifies voltage levels for supply.
+ Should be specified in pairs (max, min, optimal), units uV.
+ - clock-names: list of names of clock inputs.
+ Should be "pcie_0_pipe_clk",
+ "pcie_0_aux_clk", "pcie_0_cfg_ahb_clk",
+ "pcie_0_mstr_axi_clk", "pcie_0_slv_axi_clk",
+ "pcie_0_ldo";
+ - max-clock-frequency-hz: list of the maximum operating frequencies stored
+ in the same order of clock names;
+ - resets: reset specifier pair consists of phandle for the reset controller
+ and reset lines used by this controller.
+ - reset-names: reset signal names sorted in the same order as the property
+ of resets.
+ - qcom,pcie-phy-ver: version of PCIe PHY.
+ - qcom,phy-init: The initialization sequence to bring up the PCIe PHY.
+ Should be specified in groups (offset, value, delay, direction).
+ - qcom,phy-status-reg: Register offset for PHY status.
+ - qcom,dbi-base-reg: Register offset for DBI base address.
+ - qcom,slv-space-reg: Register offset for slave address space size.
+ - qcom,pcie-link-speed: generation of PCIe link speed. The value could be
+ 1, 2 or 3.
+ - qcom,pcie-active-config: boolean type; active configuration of PCIe
+ addressing.
+ - qcom,pcie-aggregated-irq: boolean type; interrupts are aggregated.
+ - qcom,pcie-mhi-a7-irq: boolean type; MHI a7 has separate irq.
+ - qcom,pcie-perst-enum: Link enumeration will be triggered by PERST
+ deassertion.
+ - mdm2apstatus-gpio: GPIO used by PCIe endpoint side to notify the host side.
+ - Refer to "Documentation/devicetree/bindings/arm/msm/msm_bus.txt" for
+ below optional properties:
+ - qcom,msm-bus,name
+ - qcom,msm-bus,num-cases
+ - qcom,msm-bus,num-paths
+ - qcom,msm-bus,vectors-KBps
+
+Example:
+
+ pcie_ep: qcom,pcie@bfffd000 {
+ compatible = "qcom,pcie-ep";
+
+ reg = <0xbfffd000 0x1000>,
+ <0xbfffe000 0x1000>,
+ <0xbffff000 0x1000>,
+ <0xfc520000 0x2000>,
+ <0xfc526000 0x1000>,
+ <0xfc527000 0x1000>;
+ reg-names = "msi", "dm_core", "elbi", "parf", "phy", "mmio";
+
+ #address-cells = <0>;
+ interrupt-parent = <&pcie_ep>;
+ interrupts = <0 1 2 3 4 5>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0xffffffff>;
+ interrupt-map = <0 &intc 0 44 0
+ 1 &intc 0 46 0
+ 2 &intc 0 47 0
+ 3 &intc 0 50 0
+ 4 &intc 0 51 0
+ 5 &intc 0 52 0>;
+ interrupt-names = "int_pm_turnoff", "int_dstate_change",
+ "int_l1sub_timeout", "int_link_up",
+ "int_link_down", "int_bridge_flush_n";
+
+ perst-gpio = <&msmgpio 65 0>;
+ wake-gpio = <&msmgpio 61 0>;
+ clkreq-gpio = <&msmgpio 64 0>;
+ mdm2apstatus-gpio = <&tlmm_pinmux 16 0>;
+
+ gdsc-vdd-supply = <&gdsc_pcie_0>;
+ vreg-1.8-supply = <&pmd9635_l8>;
+ vreg-0.9-supply = <&pmd9635_l4>;
+
+ qcom,vreg-1.8-voltage-level = <1800000 1800000 1000>;
+ qcom,vreg-0.9-voltage-level = <950000 950000 24000>;
+
+ clock-names = "pcie_0_pipe_clk",
+ "pcie_0_aux_clk", "pcie_0_cfg_ahb_clk",
+ "pcie_0_mstr_axi_clk", "pcie_0_slv_axi_clk",
+ "pcie_0_ldo";
+ max-clock-frequency-hz = <62500000>, <1000000>,
+ <0>, <0>, <0>, <0>;
+
+ resets = <&clock_gcc GCC_PCIE_BCR>,
+ <&clock_gcc GCC_PCIE_PHY_BCR>;
+
+ reset-names = "pcie_0_core_reset", "pcie_0_phy_reset";
+
+ qcom,msm-bus,name = "pcie-ep";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <45 512 0 0>,
+ <45 512 500 800>;
+
+ qcom,pcie-link-speed = <1>;
+ qcom,pcie-active-config;
+ qcom,pcie-aggregated-irq;
+ qcom,pcie-mhi-a7-irq;
+ qcom,pcie-perst-enum;
+ qcom,phy-status-reg = <0x728>;
+ qcom,dbi-base-reg = <0x168>;
+ qcom,slv-space-reg = <0x16c>;
+
+ qcom,phy-init = <0x604 0x03 0x0 0x1
+ 0x048 0x08 0x0 0x1
+ 0x64c 0x4d 0x0 0x1
+ 0x600 0x00 0x0 0x1
+ 0x608 0x03 0x0 0x1>;
+ };
diff --git a/Documentation/devicetree/bindings/qdsp/msm-fastrpc.txt b/Documentation/devicetree/bindings/qdsp/msm-fastrpc.txt
index b0db996..0c5f696 100644
--- a/Documentation/devicetree/bindings/qdsp/msm-fastrpc.txt
+++ b/Documentation/devicetree/bindings/qdsp/msm-fastrpc.txt
@@ -13,6 +13,7 @@
Optional properties:
- qcom,fastrpc-glink: Flag to use glink instead of smd for IPC
- qcom,rpc-latency-us: FastRPC QoS latency vote
+- qcom,adsp-remoteheap-vmid: FastRPC remote heap VMID number
Optional subnodes:
- qcom,msm_fastrpc_compute_cb : Child nodes representing the compute context
@@ -28,6 +29,7 @@
compatible = "qcom,msm-fastrpc-adsp";
qcom,fastrpc-glink;
qcom,rpc-latency-us = <2343>;
+ qcom,adsp-remoteheap-vmid = <37>;
qcom,msm_fastrpc_compute_cb_1 {
compatible = "qcom,msm-fastrpc-compute-cb";
diff --git a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
index d4db970..34c2963 100644
--- a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
+++ b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
@@ -2016,6 +2016,66 @@
qcom,aux-codec = <&stub_codec>;
};
+* SDX ASoC Machine driver
+
+Required properties:
+- compatible : "qcom,sdx-asoc-snd-tavil"
+- qcom,model : The user-visible name of this sound card.
+- qcom,prim_mi2s_aux_master : Handle to prim_master pinctrl configurations
+- qcom,prim_mi2s_aux_slave : Handle to prim_slave pinctrl configurations
+- qcom,sec_mi2s_aux_master : Handle to sec_master pinctrl configurations
+- qcom,sec_mi2s_aux_slave : Handle to sec_slave pinctrl configurations
+- asoc-platform: This is phandle list containing the references to platform device
+ nodes that are used as part of the sound card dai-links.
+- asoc-platform-names: This property contains list of platform names. The order of
+ the platform names should match to that of the phandle order
+ given in "asoc-platform".
+- asoc-cpu: This is phandle list containing the references to cpu dai device nodes
+ that are used as part of the sound card dai-links.
+- asoc-cpu-names: This property contains list of cpu dai names. The order of the
+ cpu dai names should match to that of the phandle order give
+ in "asoc-cpu". The cpu names are in the form of "%s.%d" form,
+ where the id (%d) field represents the back-end AFE port id that
+ this CPU dai is associated with.
+
+Example:
+
+ sound-tavil {
+ compatible = "qcom,sdx-asoc-snd-tavil";
+ qcom,model = "sdx-tavil-i2s-snd-card";
+ qcom,prim_mi2s_aux_master = <&prim_master>;
+ qcom,prim_mi2s_aux_slave = <&prim_slave>;
+ qcom,sec_mi2s_aux_master = <&sec_master>;
+ qcom,sec_mi2s_aux_slave = <&sec_slave>;
+
+ asoc-platform = <&pcm0>, <&pcm1>, <&voip>, <&voice>,
+ <&loopback>, <&hostless>, <&afe>, <&routing>,
+ <&pcm_dtmf>, <&host_pcm>, <&compress>;
+ asoc-platform-names = "msm-pcm-dsp.0", "msm-pcm-dsp.1",
+ "msm-voip-dsp", "msm-pcm-voice",
+ "msm-pcm-loopback", "msm-pcm-hostless",
+ "msm-pcm-afe", "msm-pcm-routing",
+ "msm-pcm-dtmf", "msm-voice-host-pcm",
+ "msm-compress-dsp";
+ asoc-cpu = <&dai_pri_auxpcm>, <&mi2s_prim>, <&mi2s_sec>,
+ <&dtmf_tx>,
+ <&rx_capture_tx>, <&rx_playback_rx>,
+ <&tx_capture_tx>, <&tx_playback_rx>,
+ <&afe_pcm_rx>, <&afe_pcm_tx>, <&afe_proxy_rx>,
+ <&afe_proxy_tx>, <&incall_record_rx>,
+ <&incall_record_tx>, <&incall_music_rx>,
+ <&dai_sec_auxpcm>;
+ asoc-cpu-names = "msm-dai-q6-auxpcm.1",
+ "msm-dai-q6-mi2s.0", "msm-dai-q6-mi2s.1",
+ "msm-dai-stub-dev.4", "msm-dai-stub-dev.5",
+ "msm-dai-stub-dev.6", "msm-dai-stub-dev.7",
+ "msm-dai-stub-dev.8", "msm-dai-q6-dev.224",
+ "msm-dai-q6-dev.225", "msm-dai-q6-dev.241",
+ "msm-dai-q6-dev.240", "msm-dai-q6-dev.32771",
+ "msm-dai-q6-dev.32772", "msm-dai-q6-dev.32773",
+ "msm-dai-q6-auxpcm.2";
+ };
+
* APQ8096 Automotive ASoC Machine driver
Required properties:
diff --git a/Documentation/devicetree/bindings/sound/wcd_codec.txt b/Documentation/devicetree/bindings/sound/wcd_codec.txt
index c848ab5..6d2ae5e 100644
--- a/Documentation/devicetree/bindings/sound/wcd_codec.txt
+++ b/Documentation/devicetree/bindings/sound/wcd_codec.txt
@@ -3,7 +3,7 @@
Required properties:
- compatible : "qcom,tasha-slim-pgd" or "qcom,tasha-i2c-pgd" for Tasha Codec
- or "qcom,tavil-slim-pgd" for Tavil Codec
+ "qcom,tavil-slim-pgd" or "qcom,tavil-i2c-pgd" for Tavil Codec
- elemental-addr: codec slimbus slave PGD enumeration address.(48 bits)
- qcom,cdc-reset-gpio: gpio used for codec SOC reset.
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index a491bd7..86c9259 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -155,6 +155,7 @@
kyo Kyocera Corporation
lacie LaCie
lantiq Lantiq Semiconductor
+lego LEGO Systems A/S
lenovo Lenovo Group Ltd.
lg LG Corporation
linux Linux-specific binding
diff --git a/Makefile b/Makefile
index 1e85d9b..6f6262b 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 9
-SUBLEVEL = 60
+SUBLEVEL = 62
EXTRAVERSION =
NAME = Roaring Lionus
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 393c23f..d8d8b82 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -572,6 +572,7 @@
select USE_OF
select PINCTRL
select ARCH_WANT_KMAP_ATOMIC_FLUSH
+ select SND_SOC_COMPRESS
help
Support for Qualcomm MSM/QSD based systems. This runs on the
apps processor of the MSM/QSD and depends on a shared memory
diff --git a/arch/arm/boot/dts/armada-375.dtsi b/arch/arm/boot/dts/armada-375.dtsi
index cc952cf..024f1b7 100644
--- a/arch/arm/boot/dts/armada-375.dtsi
+++ b/arch/arm/boot/dts/armada-375.dtsi
@@ -176,9 +176,9 @@
reg = <0x8000 0x1000>;
cache-unified;
cache-level = <2>;
- arm,double-linefill-incr = <1>;
+ arm,double-linefill-incr = <0>;
arm,double-linefill-wrap = <0>;
- arm,double-linefill = <1>;
+ arm,double-linefill = <0>;
prefetch-data = <1>;
};
diff --git a/arch/arm/boot/dts/armada-38x.dtsi b/arch/arm/boot/dts/armada-38x.dtsi
index 2d76688..c60cfe9 100644
--- a/arch/arm/boot/dts/armada-38x.dtsi
+++ b/arch/arm/boot/dts/armada-38x.dtsi
@@ -143,9 +143,9 @@
reg = <0x8000 0x1000>;
cache-unified;
cache-level = <2>;
- arm,double-linefill-incr = <1>;
+ arm,double-linefill-incr = <0>;
arm,double-linefill-wrap = <0>;
- arm,double-linefill = <1>;
+ arm,double-linefill = <0>;
prefetch-data = <1>;
};
diff --git a/arch/arm/boot/dts/armada-39x.dtsi b/arch/arm/boot/dts/armada-39x.dtsi
index 34cba87..aeecfa7 100644
--- a/arch/arm/boot/dts/armada-39x.dtsi
+++ b/arch/arm/boot/dts/armada-39x.dtsi
@@ -111,9 +111,9 @@
reg = <0x8000 0x1000>;
cache-unified;
cache-level = <2>;
- arm,double-linefill-incr = <1>;
+ arm,double-linefill-incr = <0>;
arm,double-linefill-wrap = <0>;
- arm,double-linefill = <1>;
+ arm,double-linefill = <0>;
prefetch-data = <1>;
};
diff --git a/arch/arm/boot/dts/imx53-qsb-common.dtsi b/arch/arm/boot/dts/imx53-qsb-common.dtsi
index c05e7cf..40b3e31 100644
--- a/arch/arm/boot/dts/imx53-qsb-common.dtsi
+++ b/arch/arm/boot/dts/imx53-qsb-common.dtsi
@@ -215,16 +215,16 @@
pinctrl_fec: fecgrp {
fsl,pins = <
- MX53_PAD_FEC_MDC__FEC_MDC 0x80000000
- MX53_PAD_FEC_MDIO__FEC_MDIO 0x80000000
- MX53_PAD_FEC_REF_CLK__FEC_TX_CLK 0x80000000
- MX53_PAD_FEC_RX_ER__FEC_RX_ER 0x80000000
- MX53_PAD_FEC_CRS_DV__FEC_RX_DV 0x80000000
- MX53_PAD_FEC_RXD1__FEC_RDATA_1 0x80000000
- MX53_PAD_FEC_RXD0__FEC_RDATA_0 0x80000000
- MX53_PAD_FEC_TX_EN__FEC_TX_EN 0x80000000
- MX53_PAD_FEC_TXD1__FEC_TDATA_1 0x80000000
- MX53_PAD_FEC_TXD0__FEC_TDATA_0 0x80000000
+ MX53_PAD_FEC_MDC__FEC_MDC 0x4
+ MX53_PAD_FEC_MDIO__FEC_MDIO 0x1fc
+ MX53_PAD_FEC_REF_CLK__FEC_TX_CLK 0x180
+ MX53_PAD_FEC_RX_ER__FEC_RX_ER 0x180
+ MX53_PAD_FEC_CRS_DV__FEC_RX_DV 0x180
+ MX53_PAD_FEC_RXD1__FEC_RDATA_1 0x180
+ MX53_PAD_FEC_RXD0__FEC_RDATA_0 0x180
+ MX53_PAD_FEC_TX_EN__FEC_TX_EN 0x4
+ MX53_PAD_FEC_TXD1__FEC_TDATA_1 0x4
+ MX53_PAD_FEC_TXD0__FEC_TDATA_0 0x4
>;
};
diff --git a/arch/arm/boot/dts/qcom/Makefile b/arch/arm/boot/dts/qcom/Makefile
index 3826bad..c51581d 100644
--- a/arch/arm/boot/dts/qcom/Makefile
+++ b/arch/arm/boot/dts/qcom/Makefile
@@ -3,17 +3,15 @@
sdxpoorwills-cdp.dtb \
sdxpoorwills-mtp.dtb
-
-ifeq ($(CONFIG_ARM64),y)
-always := $(dtb-y)
-subdir-y := $(dts-dirs)
-else
targets += dtbs
targets += $(addprefix ../, $(dtb-y))
$(obj)/../%.dtb: $(src)/%.dts FORCE
$(call if_changed_dep,dtc)
+include $(srctree)/arch/arm64/boot/dts/qcom/Makefile
+$(obj)/../%.dtb: $(src)/../../../../arm64/boot/dts/qcom/%.dts FORCE
+ $(call if_changed_dep,dtc)
+
dtbs: $(addprefix $(obj)/../,$(dtb-y))
-endif
clean-files := *.dtb
diff --git a/arch/arm/boot/dts/qcom/sdx-audio-lpass.dtsi b/arch/arm/boot/dts/qcom/sdx-audio-lpass.dtsi
new file mode 100644
index 0000000..0fd3b34
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/sdx-audio-lpass.dtsi
@@ -0,0 +1,261 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+ qcom,msm-adsp-loader {
+ compatible = "qcom,adsp-loader";
+ qcom,adsp-state = <0>;
+ qcom,proc-img-to-load = "modem";
+ };
+
+ qcom,msm-audio-ion {
+ compatible = "qcom,msm-audio-ion";
+ qcom,scm-mp-enabled;
+ memory-region = <&audio_mem>;
+ };
+
+ pcm0: qcom,msm-pcm {
+ compatible = "qcom,msm-pcm-dsp";
+ qcom,msm-pcm-dsp-id = <0>;
+ };
+
+ routing: qcom,msm-pcm-routing {
+ compatible = "qcom,msm-pcm-routing";
+ };
+
+ pcm1: qcom,msm-pcm-low-latency {
+ compatible = "qcom,msm-pcm-dsp";
+ qcom,msm-pcm-dsp-id = <1>;
+ qcom,msm-pcm-low-latency;
+ qcom,latency-level = "ultra";
+ };
+
+ qcom,msm-compr-dsp {
+ compatible = "qcom,msm-compr-dsp";
+ };
+
+ voip: qcom,msm-voip-dsp {
+ compatible = "qcom,msm-voip-dsp";
+ };
+
+ voice: qcom,msm-pcm-voice {
+ compatible = "qcom,msm-pcm-voice";
+ qcom,destroy-cvd;
+ };
+
+ stub_codec: qcom,msm-stub-codec {
+ compatible = "qcom,msm-stub-codec";
+ };
+
+ qcom,msm-dai-fe {
+ compatible = "qcom,msm-dai-fe";
+ };
+
+ afe: qcom,msm-pcm-afe {
+ compatible = "qcom,msm-pcm-afe";
+ };
+
+ hostless: qcom,msm-pcm-hostless {
+ compatible = "qcom,msm-pcm-hostless";
+ };
+
+ host_pcm: qcom,msm-voice-host-pcm {
+ compatible = "qcom,msm-voice-host-pcm";
+ };
+
+ loopback: qcom,msm-pcm-loopback {
+ compatible = "qcom,msm-pcm-loopback";
+ };
+
+ compress: qcom,msm-compress-dsp {
+ compatible = "qcom,msm-compress-dsp";
+ qcom,adsp-version = "MDSP 1.2";
+ };
+
+ qcom,msm-dai-stub {
+ compatible = "qcom,msm-dai-stub";
+ dtmf_tx: qcom,msm-dai-stub-dtmf-tx {
+ compatible = "qcom,msm-dai-stub-dev";
+ qcom,msm-dai-stub-dev-id = <4>;
+ };
+
+ rx_capture_tx: qcom,msm-dai-stub-host-rx-capture-tx {
+ compatible = "qcom,msm-dai-stub-dev";
+ qcom,msm-dai-stub-dev-id = <5>;
+ };
+
+ rx_playback_rx: qcom,msm-dai-stub-host-rx-playback-rx {
+ compatible = "qcom,msm-dai-stub-dev";
+ qcom,msm-dai-stub-dev-id = <6>;
+ };
+
+ tx_capture_tx: qcom,msm-dai-stub-host-tx-capture-tx {
+ compatible = "qcom,msm-dai-stub-dev";
+ qcom,msm-dai-stub-dev-id = <7>;
+ };
+
+ tx_playback_rx: qcom,msm-dai-stub-host-tx-playback-rx {
+ compatible = "qcom,msm-dai-stub-dev";
+ qcom,msm-dai-stub-dev-id = <8>;
+ };
+ };
+
+ qcom,msm-dai-q6 {
+ compatible = "qcom,msm-dai-q6";
+ afe_pcm_rx: qcom,msm-dai-q6-be-afe-pcm-rx {
+ compatible = "qcom,msm-dai-q6-dev";
+ qcom,msm-dai-q6-dev-id = <224>;
+ };
+
+ afe_pcm_tx: qcom,msm-dai-q6-be-afe-pcm-tx {
+ compatible = "qcom,msm-dai-q6-dev";
+ qcom,msm-dai-q6-dev-id = <225>;
+ };
+
+ afe_proxy_rx: qcom,msm-dai-q6-afe-proxy-rx {
+ compatible = "qcom,msm-dai-q6-dev";
+ qcom,msm-dai-q6-dev-id = <241>;
+ };
+
+ afe_proxy_tx: qcom,msm-dai-q6-afe-proxy-tx {
+ compatible = "qcom,msm-dai-q6-dev";
+ qcom,msm-dai-q6-dev-id = <240>;
+ };
+
+ incall_record_rx: qcom,msm-dai-q6-incall-record-rx {
+ compatible = "qcom,msm-dai-q6-dev";
+ qcom,msm-dai-q6-dev-id = <32771>;
+ };
+
+ incall_record_tx: qcom,msm-dai-q6-incall-record-tx {
+ compatible = "qcom,msm-dai-q6-dev";
+ qcom,msm-dai-q6-dev-id = <32772>;
+ };
+
+ incall_music_rx: qcom,msm-dai-q6-incall-music-rx {
+ compatible = "qcom,msm-dai-q6-dev";
+ qcom,msm-dai-q6-dev-id = <32773>;
+ };
+ };
+
+ pcm_dtmf: qcom,msm-pcm-dtmf {
+ compatible = "qcom,msm-pcm-dtmf";
+ };
+
+ cpu-pmu {
+ compatible = "arm,cortex-a7-pmu";
+ qcom,irq-is-percpu;
+ interrupts = <1 8 0x100>;
+ };
+
+ dai_pri_auxpcm: qcom,msm-pri-auxpcm {
+ compatible = "qcom,msm-auxpcm-dev";
+ qcom,msm-cpudai-auxpcm-mode = <0>, <0>;
+ qcom,msm-cpudai-auxpcm-sync = <1>, <1>;
+ qcom,msm-cpudai-auxpcm-frame = <5>, <4>;
+ qcom,msm-cpudai-auxpcm-quant = <2>, <2>;
+ qcom,msm-cpudai-auxpcm-num-slots = <1>, <1>;
+ qcom,msm-cpudai-auxpcm-slot-mapping = <1>, <1>;
+ qcom,msm-cpudai-auxpcm-data = <0>, <0>;
+ qcom,msm-cpudai-auxpcm-pcm-clk-rate = <2048000>, <2048000>;
+ qcom,msm-auxpcm-interface = "primary";
+ qcom,msm-cpudai-afe-clk-ver = <2>;
+ };
+
+ dai_sec_auxpcm: qcom,msm-sec-auxpcm {
+ compatible = "qcom,msm-auxpcm-dev";
+ qcom,msm-cpudai-auxpcm-mode = <0>, <0>;
+ qcom,msm-cpudai-auxpcm-sync = <1>, <1>;
+ qcom,msm-cpudai-auxpcm-frame = <5>, <4>;
+ qcom,msm-cpudai-auxpcm-quant = <2>, <2>;
+ qcom,msm-cpudai-auxpcm-num-slots = <1>, <1>;
+ qcom,msm-cpudai-auxpcm-slot-mapping = <1>, <1>;
+ qcom,msm-cpudai-auxpcm-data = <0>, <0>;
+ qcom,msm-cpudai-auxpcm-pcm-clk-rate = <2048000>, <2048000>;
+ qcom,msm-auxpcm-interface = "secondary";
+ qcom,msm-cpudai-afe-clk-ver = <2>;
+ };
+
+ qcom,msm-dai-mi2s {
+ compatible = "qcom,msm-dai-mi2s";
+ mi2s_prim: qcom,msm-dai-q6-mi2s-prim {
+ compatible = "qcom,msm-dai-q6-mi2s";
+ qcom,msm-dai-q6-mi2s-dev-id = <0>;
+ qcom,msm-mi2s-rx-lines = <2>;
+ qcom,msm-mi2s-tx-lines = <1>;
+ };
+ mi2s_sec: qcom,msm-dai-q6-mi2s-sec {
+ compatible = "qcom,msm-dai-q6-mi2s";
+ qcom,msm-dai-q6-mi2s-dev-id = <1>;
+ qcom,msm-mi2s-rx-lines = <2>;
+ qcom,msm-mi2s-tx-lines = <1>;
+ };
+
+ };
+
+ prim_master: prim_master_pinctrl {
+ compatible = "qcom,msm-cdc-pinctrl";
+ pinctrl-names = "aud_active", "aud_sleep";
+ pinctrl-0 = <&pri_ws_active_master
+ &pri_sck_active_master
+ &pri_dout_active
+ &pri_din_active>;
+ pinctrl-1 = <&pri_ws_sleep
+ &pri_sck_sleep
+ &pri_dout_sleep
+ &pri_din_sleep>;
+ qcom,mi2s-auxpcm-cdc-gpios;
+ };
+
+ prim_slave: prim_slave_pinctrl {
+ compatible = "qcom,msm-cdc-pinctrl";
+ pinctrl-names = "aud_active", "aud_sleep";
+ pinctrl-0 = <&pri_ws_active_slave
+ &pri_sck_active_slave
+ &pri_dout_active
+ &pri_din_active>;
+ pinctrl-1 = <&pri_ws_sleep
+ &pri_sck_sleep
+ &pri_dout_sleep
+ &pri_din_sleep>;
+ qcom,mi2s-auxpcm-cdc-gpios;
+ };
+
+ sec_master: sec_master_pinctrl {
+ compatible = "qcom,msm-cdc-pinctrl";
+ pinctrl-names = "aud_active", "aud_sleep";
+ pinctrl-0 = <&sec_ws_active_master
+ &sec_sck_active_master
+ &sec_dout_active
+ &sec_din_active>;
+ pinctrl-1 = <&sec_ws_sleep
+ &sec_sck_sleep
+ &sec_dout_sleep
+ &sec_din_sleep>;
+ qcom,mi2s-auxpcm-cdc-gpios;
+ };
+
+ sec_slave: sec_slave_pinctrl {
+ compatible = "qcom,msm-cdc-pinctrl";
+ pinctrl-names = "aud_active", "aud_sleep";
+ pinctrl-0 = <&sec_ws_active_slave
+ &sec_sck_active_slave
+ &sec_dout_active
+ &sec_din_active>;
+ pinctrl-1 = <&sec_ws_sleep
+ &sec_sck_sleep
+ &sec_dout_sleep
+ &sec_din_sleep>;
+ qcom,mi2s-auxpcm-cdc-gpios;
+ };
+};
diff --git a/arch/arm/boot/dts/qcom/sdx-wsa881x.dtsi b/arch/arm/boot/dts/qcom/sdx-wsa881x.dtsi
new file mode 100644
index 0000000..a294e6c
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/sdx-wsa881x.dtsi
@@ -0,0 +1,45 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&i2c_3 {
+ tavil_codec {
+ swr_master {
+ compatible = "qcom,swr-wcd";
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ wsa881x_0211: wsa881x@20170211 {
+ compatible = "qcom,wsa881x";
+ reg = <0x00 0x20170211>;
+ qcom,spkr-sd-n-node = <&wsa_spkr_wcd_sd1>;
+ };
+
+ wsa881x_0212: wsa881x@20170212 {
+ compatible = "qcom,wsa881x";
+ reg = <0x00 0x20170212>;
+ qcom,spkr-sd-n-node = <&wsa_spkr_wcd_sd2>;
+ };
+
+ wsa881x_0213: wsa881x@21170213 {
+ compatible = "qcom,wsa881x";
+ reg = <0x00 0x21170213>;
+ qcom,spkr-sd-n-node = <&wsa_spkr_wcd_sd1>;
+ };
+
+ wsa881x_0214: wsa881x@21170214 {
+ compatible = "qcom,wsa881x";
+ reg = <0x00 0x21170214>;
+ qcom,spkr-sd-n-node = <&wsa_spkr_wcd_sd2>;
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-audio-overlay.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-audio-overlay.dtsi
new file mode 100644
index 0000000..f90bd7f
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-audio-overlay.dtsi
@@ -0,0 +1,143 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "sdxpoorwills-wcd.dtsi"
+#include "sdx-wsa881x.dtsi"
+#include <dt-bindings/clock/qcom,audio-ext-clk.h>
+
+&snd_934x {
+ qcom,audio-routing =
+ "AIF4 VI", "MCLK",
+ "RX_BIAS", "MCLK",
+ "MADINPUT", "MCLK",
+ "AMIC2", "MIC BIAS2",
+ "MIC BIAS2", "Headset Mic",
+ "AMIC3", "MIC BIAS2",
+ "MIC BIAS2", "ANCRight Headset Mic",
+ "AMIC4", "MIC BIAS2",
+ "MIC BIAS2", "ANCLeft Headset Mic",
+ "AMIC5", "MIC BIAS3",
+ "MIC BIAS3", "Handset Mic",
+ "DMIC0", "MIC BIAS1",
+ "MIC BIAS1", "Digital Mic0",
+ "DMIC1", "MIC BIAS1",
+ "MIC BIAS1", "Digital Mic1",
+ "DMIC2", "MIC BIAS3",
+ "MIC BIAS3", "Digital Mic2",
+ "DMIC3", "MIC BIAS3",
+ "MIC BIAS3", "Digital Mic3",
+ "DMIC4", "MIC BIAS4",
+ "MIC BIAS4", "Digital Mic4",
+ "DMIC5", "MIC BIAS4",
+ "MIC BIAS4", "Digital Mic5",
+ "SpkrLeft IN", "SPK1 OUT",
+ "SpkrRight IN", "SPK2 OUT";
+
+ qcom,msm-mbhc-hphl-swh = <1>;
+ qcom,msm-mbhc-gnd-swh = <1>;
+ qcom,msm-mbhc-hs-mic-max-threshold-mv = <1700>;
+ qcom,msm-mbhc-hs-mic-min-threshold-mv = <50>;
+ qcom,tavil-mclk-clk-freq = <12288000>;
+
+ asoc-codec = <&stub_codec>;
+ asoc-codec-names = "msm-stub-codec.1";
+
+ qcom,wsa-max-devs = <2>;
+ qcom,wsa-devs = <&wsa881x_0211>, <&wsa881x_0212>,
+ <&wsa881x_0213>, <&wsa881x_0214>;
+ qcom,wsa-aux-dev-prefix = "SpkrLeft", "SpkrRight",
+ "SpkrLeft", "SpkrRight";
+};
+
+&soc {
+ wcd9xxx_intc: wcd9xxx-irq {
+ status = "ok";
+ compatible = "qcom,wcd9xxx-irq";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&tlmm>;
+ qcom,gpio-connect = <&tlmm 71 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&wcd_intr_default>;
+ };
+
+ clock_audio_up: audio_ext_clk_up {
+ compatible = "qcom,audio-ref-clk";
+ qcom,codec-mclk-clk-freq = <12288000>;
+ pinctrl-names = "sleep", "active";
+ pinctrl-0 = <&i2s_mclk_sleep>;
+ pinctrl-1 = <&i2s_mclk_active>;
+ #clock-cells = <1>;
+ };
+
+ wcd_rst_gpio: msm_cdc_pinctrl@77 {
+ compatible = "qcom,msm-cdc-pinctrl";
+ qcom,cdc-rst-n-gpio = <&tlmm 77 0>;
+ pinctrl-names = "aud_active", "aud_sleep";
+ pinctrl-0 = <&cdc_reset_active>;
+ pinctrl-1 = <&cdc_reset_sleep>;
+ };
+};
+
+&i2c_3 {
+ wcd934x_cdc: tavil_codec {
+ compatible = "qcom,tavil-i2c-pgd";
+ elemental-addr = [00 01 50 02 17 02];
+
+ interrupt-parent = <&wcd9xxx_intc>;
+ interrupts = <0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
+ 17 18 19 20 21 22 23 24 25 26 27 28 29
+ 30 31>;
+
+ qcom,wcd-rst-gpio-node = <&wcd_rst_gpio>;
+
+ clock-names = "wcd_clk";
+ clocks = <&clock_audio_up AUDIO_LPASS_MCLK>;
+
+ cdc-vdd-buck-supply = <&pmxpoorwills_l6>;
+ qcom,cdc-vdd-buck-voltage = <1800000 1800000>;
+ qcom,cdc-vdd-buck-current = <650000>;
+
+ cdc-buck-sido-supply = <&pmxpoorwills_l6>;
+ qcom,cdc-buck-sido-voltage = <1800000 1800000>;
+ qcom,cdc-buck-sido-current = <250000>;
+
+ cdc-vdd-tx-h-supply = <&pmxpoorwills_l6>;
+ qcom,cdc-vdd-tx-h-voltage = <1800000 1800000>;
+ qcom,cdc-vdd-tx-h-current = <25000>;
+
+ cdc-vdd-rx-h-supply = <&pmxpoorwills_l6>;
+ qcom,cdc-vdd-rx-h-voltage = <1800000 1800000>;
+ qcom,cdc-vdd-rx-h-current = <25000>;
+
+ cdc-vddpx-1-supply = <&pmxpoorwills_l6>;
+ qcom,cdc-vddpx-1-voltage = <1800000 1800000>;
+ qcom,cdc-vddpx-1-current = <10000>;
+
+ qcom,cdc-static-supplies = "cdc-vdd-buck",
+ "cdc-buck-sido",
+ "cdc-vdd-tx-h",
+ "cdc-vdd-rx-h",
+ "cdc-vddpx-1";
+
+ qcom,cdc-micbias1-mv = <1800>;
+ qcom,cdc-micbias2-mv = <1800>;
+ qcom,cdc-micbias3-mv = <1800>;
+ qcom,cdc-micbias4-mv = <1800>;
+
+ qcom,cdc-mclk-clk-rate = <12288000>;
+ qcom,cdc-dmic-sample-rate = <4800000>;
+
+ qcom,wdsp-cmpnt-dev-name = "tavil_codec";
+ };
+};
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-audio.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-audio.dtsi
new file mode 100644
index 0000000..a3eba9a
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-audio.dtsi
@@ -0,0 +1,51 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "sdx-audio-lpass.dtsi"
+
+&soc {
+ snd_934x: sound-tavil {
+ compatible = "qcom,sdx-asoc-snd-tavil";
+ qcom,model = "sdx-tavil-i2s-snd-card";
+ qcom,prim_mi2s_aux_master = <&prim_master>;
+ qcom,prim_mi2s_aux_slave = <&prim_slave>;
+ qcom,sec_mi2s_aux_master = <&sec_master>;
+ qcom,sec_mi2s_aux_slave = <&sec_slave>;
+
+ asoc-platform = <&pcm0>, <&pcm1>, <&voip>, <&voice>,
+ <&loopback>, <&hostless>, <&afe>, <&routing>,
+ <&pcm_dtmf>, <&host_pcm>, <&compress>;
+ asoc-platform-names = "msm-pcm-dsp.0", "msm-pcm-dsp.1",
+ "msm-voip-dsp", "msm-pcm-voice",
+ "msm-pcm-loopback", "msm-pcm-hostless",
+ "msm-pcm-afe", "msm-pcm-routing",
+ "msm-pcm-dtmf", "msm-voice-host-pcm",
+ "msm-compress-dsp";
+ asoc-cpu = <&dai_pri_auxpcm>, <&mi2s_prim>, <&mi2s_sec>,
+ <&dtmf_tx>,
+ <&rx_capture_tx>, <&rx_playback_rx>,
+ <&tx_capture_tx>, <&tx_playback_rx>,
+ <&afe_pcm_rx>, <&afe_pcm_tx>, <&afe_proxy_rx>,
+ <&afe_proxy_tx>, <&incall_record_rx>,
+ <&incall_record_tx>, <&incall_music_rx>,
+ <&dai_sec_auxpcm>;
+ asoc-cpu-names = "msm-dai-q6-auxpcm.1",
+ "msm-dai-q6-mi2s.0", "msm-dai-q6-mi2s.1",
+ "msm-dai-stub-dev.4", "msm-dai-stub-dev.5",
+ "msm-dai-stub-dev.6", "msm-dai-stub-dev.7",
+ "msm-dai-stub-dev.8", "msm-dai-q6-dev.224",
+ "msm-dai-q6-dev.225", "msm-dai-q6-dev.241",
+ "msm-dai-q6-dev.240", "msm-dai-q6-dev.32771",
+ "msm-dai-q6-dev.32772", "msm-dai-q6-dev.32773",
+ "msm-dai-q6-auxpcm.2";
+ };
+};
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-cdp-audio-overlay.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-cdp-audio-overlay.dtsi
new file mode 100644
index 0000000..a7943cd
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-cdp-audio-overlay.dtsi
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "sdxpoorwills-audio-overlay.dtsi"
+
+&soc {
+ sound-tavil {
+ qcom,wsa-max-devs = <1>;
+ qcom,wsa-devs = <&wsa881x_0214>;
+ qcom,wsa-aux-dev-prefix = "SpkrRight";
+ };
+};
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-pinctrl.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-pinctrl.dtsi
index 2b0fa5c..b6c04ec 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-pinctrl.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-pinctrl.dtsi
@@ -919,6 +919,361 @@
};
};
};
+
+ wcd9xxx_intr {
+ wcd_intr_default: wcd_intr_default{
+ mux {
+ pins = "gpio71";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio71";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down; /* pull down */
+ input-enable;
+ };
+ };
+ };
+
+ cdc_reset_ctrl {
+ cdc_reset_sleep: cdc_reset_sleep {
+ mux {
+ pins = "gpio77";
+ function = "gpio";
+ };
+ config {
+ pins = "gpio77";
+ drive-strength = <2>;
+ bias-disable;
+ output-low;
+ };
+ };
+
+ cdc_reset_active:cdc_reset_active {
+ mux {
+ pins = "gpio77";
+ function = "gpio";
+ };
+ config {
+ pins = "gpio77";
+ drive-strength = <8>;
+ bias-pull-down;
+ output-high;
+ };
+ };
+ };
+
+ i2s_mclk {
+ i2s_mclk_sleep: i2s_mclk_sleep {
+ mux {
+ pins = "gpio62";
+ function = "i2s_mclk";
+ };
+
+ config {
+ pins = "gpio62";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down; /* PULL DOWN */
+ };
+ };
+
+ i2s_mclk_active: i2s_mclk_active {
+ mux {
+ pins = "gpio62";
+ function = "i2s_mclk";
+ };
+
+ config {
+ pins = "gpio62";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable; /* NO PULL*/
+ output-high;
+ };
+ };
+ };
+
+ pmx_pri_mi2s_aux {
+ pri_ws_sleep: pri_ws_sleep {
+ mux {
+ pins = "gpio12";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio12";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down; /* PULL DOWN */
+ input-enable;
+ };
+ };
+
+ pri_sck_sleep: pri_sck_sleep {
+ mux {
+ pins = "gpio15";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio15";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down; /* PULL DOWN */
+ input-enable;
+ };
+ };
+
+ pri_dout_sleep: pri_dout_sleep {
+ mux {
+ pins = "gpio14";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio14";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down; /* PULL DOWN */
+ input-enable;
+ };
+ };
+
+ pri_ws_active_master: pri_ws_active_master {
+ mux {
+ pins = "gpio12";
+ function = "pri_mi2s_ws_a";
+ };
+
+ config {
+ pins = "gpio12";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable; /* NO PULL*/
+ output-high;
+ };
+ };
+
+ pri_sck_active_master: pri_sck_active_master {
+ mux {
+ pins = "gpio15";
+ function = "pri_mi2s_sck_a";
+ };
+
+ config {
+ pins = "gpio15";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable; /* NO PULL*/
+ output-high;
+ };
+ };
+
+ pri_ws_active_slave: pri_ws_active_slave {
+ mux {
+ pins = "gpio12";
+ function = "pri_mi2s_ws_a";
+ };
+
+ config {
+ pins = "gpio12";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable; /* NO PULL*/
+ };
+ };
+
+ pri_sck_active_slave: pri_sck_active_slave {
+ mux {
+ pins = "gpio15";
+ function = "pri_mi2s_sck_a";
+ };
+
+ config {
+ pins = "gpio15";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable; /* NO PULL*/
+ };
+ };
+
+ pri_dout_active: pri_dout_active {
+ mux {
+ pins = "gpio14";
+ function = "pri_mi2s_data1_a";
+ };
+
+ config {
+ pins = "gpio14";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable; /* NO PULL*/
+ output-high;
+ };
+ };
+ };
+
+ pmx_pri_mi2s_aux_din {
+ pri_din_sleep: pri_din_sleep {
+ mux {
+ pins = "gpio13";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio13";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down; /* PULL DOWN */
+ input-enable;
+ };
+ };
+
+ pri_din_active: pri_din_active {
+ mux {
+ pins = "gpio13";
+ function = "pri_mi2s_data0_a";
+ };
+
+ config {
+ pins = "gpio13";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable; /* NO PULL */
+ };
+ };
+ };
+
+ pmx_sec_mi2s_aux {
+ sec_ws_sleep: sec_ws_sleep {
+ mux {
+ pins = "gpio16";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio16";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down; /* PULL DOWN */
+ input-enable;
+ };
+ };
+
+ sec_sck_sleep: sec_sck_sleep {
+ mux {
+ pins = "gpio19";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio19";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down; /* PULL DOWN */
+ input-enable;
+ };
+ };
+
+ sec_dout_sleep: sec_dout_sleep {
+ mux {
+ pins = "gpio18";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio18";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down; /* PULL DOWN */
+ input-enable;
+ };
+ };
+
+ sec_ws_active_master: sec_ws_active_master {
+ mux {
+ pins = "gpio16";
+ function = "sec_mi2s_ws_a";
+ };
+
+ config {
+ pins = "gpio16";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable; /* NO PULL*/
+ output-high;
+ };
+ };
+
+ sec_sck_active_master: sec_sck_active_master {
+ mux {
+ pins = "gpio19";
+ function = "sec_mi2s_sck_a";
+ };
+
+ config {
+ pins = "gpio19";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable; /* NO PULL*/
+ output-high;
+ };
+ };
+
+ sec_ws_active_slave: sec_ws_active_slave {
+ mux {
+ pins = "gpio16";
+ function = "sec_mi2s_ws_a";
+ };
+
+ config {
+ pins = "gpio16";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable; /* NO PULL*/
+ };
+ };
+
+ sec_sck_active_slave: sec_sck_active_slave {
+ mux {
+ pins = "gpio19";
+ function = "sec_mi2s_sck_a";
+ };
+
+ config {
+ pins = "gpio19";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable; /* NO PULL*/
+ };
+ };
+
+ sec_dout_active: sec_dout_active {
+ mux {
+ pins = "gpio18";
+ function = "sec_mi2s_data1_a";
+ };
+
+ config {
+ pins = "gpio18";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable; /* NO PULL*/
+ output-high;
+ };
+ };
+ };
+
+ pmx_sec_mi2s_aux_din {
+ sec_din_sleep: sec_din_sleep {
+ mux {
+ pins = "gpio17";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio17";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down; /* PULL DOWN */
+ input-enable;
+ };
+ };
+
+ sec_din_active: sec_din_active {
+ mux {
+ pins = "gpio17";
+ function = "sec_mi2s_data0_a";
+ };
+
+ config {
+ pins = "gpio17";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable; /* NO PULL */
+ };
+ };
+ };
};
};
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-regulator.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-regulator.dtsi
index cc126f6..9947594 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-regulator.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-regulator.dtsi
@@ -12,103 +12,324 @@
#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
-/* Stub regulators */
-/ {
- pmxpoorwills_s1: regualtor-pmxpoorwills-s1 {
- compatible = "qcom,stub-regulator";
- regulator-name = "pmxpoorwills_s1";
- qcom,hpm-min-load = <100000>;
- regulator-min-microvolt = <752000>;
- regulator-max-microvolt = <752000>;
+&soc {
+ /* RPMh regulators */
+
+ /* pmxpoorwills S1 - VDD_MODEM supply */
+ rpmh-regulator-modemlvl {
+ compatible = "qcom,rpmh-arc-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "mss.lvl";
+ pmxpoorwills_s1_level: regualtor-pmxpoorwills-s1 {
+ regulator-name = "pmxpoorwills_s1_level";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
+ regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
+ };
};
- /* VDD CX supply */
- pmxpoorwills_s5_level: regualtor-pmxpoorwills-s5-level {
- compatible = "qcom,stub-regulator";
- regulator-name = "pmxpoorwills_s5_level";
- qcom,hpm-min-load = <100000>;
- regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
- regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
+ rpmh-regulator-smpa4 {
+ compatible = "qcom,rpmh-vrm-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "smpa4";
+ pmxpoorwills_s4: regulator-pmxpoorwills-s4 {
+ regulator-name = "pmxpoorwills_s4";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ qcom,init-voltage = <1800000>;
+ };
};
- pmxpoorwills_s5_level_ao: regualtor-pmxpoorwills-s5-level-ao {
- compatible = "qcom,stub-regulator";
- regulator-name = "pmxpoorwills_s5_level_ao";
- qcom,hpm-min-load = <100000>;
- regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
- regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
+ /* pmxpoorwills S5 - VDD_CX supply */
+ rpmh-regulator-cxlvl {
+ compatible = "qcom,rpmh-arc-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "cx.lvl";
+ pmxpoorwills_s5_level-parent-supply = <&pmxpoorwills_l9_level>;
+ pmxpoorwills_s5_level_ao-parent-supply =
+ <&pmxpoorwills_l9_level_ao>;
+ pmxpoorwills_s5_level: regualtor-pmxpoorwills-s5-level {
+ regulator-name = "pmxpoorwills_s5_level";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
+ regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
+ qcom,min-dropout-voltage-level = <(-1)>;
+ };
+
+ pmxpoorwills_s5_level_ao: regualtor-pmxpoorwills-s5-level-ao {
+ regulator-name = "pmxpoorwills_s5_level_ao";
+ qcom,set = <RPMH_REGULATOR_SET_ACTIVE>;
+ regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
+ regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
+ qcom,min-dropout-voltage-level = <(-1)>;
+ };
};
- pmxpoorwills_l1: regualtor-pmxpoorwills-11 {
- compatible = "qcom,stub-regulator";
- regulator-name = "pmxpoorwills_l1";
- qcom,hpm-min-load = <10000>;
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <1200000>;
+ rpmh-regulator-ldoa1 {
+ compatible = "qcom,rpmh-vrm-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "ldoa1";
+ qcom,supported-modes =
+ <RPMH_REGULATOR_MODE_LDO_LPM
+ RPMH_REGULATOR_MODE_LDO_HPM>;
+ qcom,mode-threshold-currents = <0 1>;
+ pmxpoorwills_l1: regualtor-pmxpoorwills-11 {
+ regulator-name = "pmxpoorwills_l1";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ qcom,init-voltage = <1200000>;
+ qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+ };
};
- pmxpoorwills_l3: regualtor-pmxpoorwills-l3 {
- compatible = "qcom,stub-regulator";
- regulator-name = "pmxpoorwills_l3";
- qcom,hpm-min-load = <10000>;
- regulator-min-microvolt = <800000>;
- regulator-max-microvolt = <800000>;
+ rpmh-regulator-ldoa2 {
+ compatible = "qcom,rpmh-vrm-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "ldoa2";
+ qcom,supported-modes =
+ <RPMH_REGULATOR_MODE_LDO_LPM
+ RPMH_REGULATOR_MODE_LDO_HPM>;
+ qcom,mode-threshold-currents = <0 1>;
+ pmxpoorwills_l2: regualtor-pmxpoorwills-12 {
+ regulator-name = "pmxpoorwills_l2";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt = <1128000>;
+ regulator-max-microvolt = <1128000>;
+ qcom,init-voltage = <1128000>;
+ qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+ regulator-always-on;
+ };
};
- pmxpoorwills_l4: regualtor-pmxpoorwills-l4 {
- compatible = "qcom,stub-regulator";
- regulator-name = "pmxpoorwills_l4";
- qcom,hpm-min-load = <10000>;
- regulator-min-microvolt = <872000>;
- regulator-max-microvolt = <872000>;
+ rpmh-regulator-ldoa3 {
+ compatible = "qcom,rpmh-vrm-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "ldoa3";
+ qcom,supported-modes =
+ <RPMH_REGULATOR_MODE_LDO_LPM
+ RPMH_REGULATOR_MODE_LDO_HPM>;
+ qcom,mode-threshold-currents = <0 1>;
+ pmxpoorwills_l3: regualtor-pmxpoorwills-l3 {
+ regulator-name = "pmxpoorwills_l3";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <800000>;
+ qcom,init-voltage = <800000>;
+ qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+ };
};
- pmxpoorwills_l5: regualtor-pmxpoorwills-l5 {
- compatible = "qcom,stub-regulator";
- regulator-name = "pmxpoorwills_l5";
- qcom,hpm-min-load = <10000>;
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
+ rpmh-regulator-ldoa4 {
+ compatible = "qcom,rpmh-vrm-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "ldoa4";
+ qcom,supported-modes =
+ <RPMH_REGULATOR_MODE_LDO_LPM
+ RPMH_REGULATOR_MODE_LDO_HPM>;
+ qcom,mode-threshold-currents = <0 1>;
+ pmxpoorwills_l4: regualtor-pmxpoorwills-l4 {
+ regulator-name = "pmxpoorwills_l4";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt = <872000>;
+ regulator-max-microvolt = <872000>;
+ qcom,init-voltage = <872000>;
+ qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+ };
};
- pmxpoorwills_l6: regualtor-pmxpoorwills-l6 {
- compatible = "qcom,stub-regulator";
- regulator-name = "pmxpoorwills_l6";
- qcom,hpm-min-load = <10000>;
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
+ rpmh-regulator-ldoa5 {
+ compatible = "qcom,rpmh-vrm-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "ldoa5";
+ qcom,supported-modes =
+ <RPMH_REGULATOR_MODE_LDO_LPM
+ RPMH_REGULATOR_MODE_LDO_HPM>;
+ qcom,mode-threshold-currents = <0 1>;
+ pmxpoorwills_l5: regualtor-pmxpoorwills-l5 {
+ regulator-name = "pmxpoorwills_l5";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt = <1704000>;
+ regulator-max-microvolt = <1704000>;
+ qcom,init-voltage = <1704000>;
+ qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+ };
};
- pmxpoorwills_l8: regualtor-pmxpoorwills-l8 {
- compatible = "qcom,stub-regulator";
- regulator-name = "pmxpoorwills_l8";
- qcom,hpm-min-load = <10000>;
- regulator-min-microvolt = <800000>;
- regulator-max-microvolt = <800000>;
+ rpmh-regulator-ldoa7 {
+ compatible = "qcom,rpmh-vrm-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "ldoa7";
+ qcom,supported-modes =
+ <RPMH_REGULATOR_MODE_LDO_LPM
+ RPMH_REGULATOR_MODE_LDO_HPM>;
+ qcom,mode-threshold-currents = <0 1>;
+ pmxpoorwills_l7: regualtor-pmxpoorwills-l7 {
+ regulator-name = "pmxpoorwills_l7";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt = <2952000>;
+ regulator-max-microvolt = <2952000>;
+ qcom,init-voltage = <2952000>;
+ qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+ };
};
- /* VDD MX supply */
- pmxpoorwills_l9_level: regualtor-pmxpoorwills-l9-level {
- compatible = "qcom,stub-regulator";
- regulator-name = "pmxpoorwills_l9_level";
- qcom,hpm-min-load = <10000>;
- regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
- regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
+ rpmh-regulator-ldoa8 {
+ compatible = "qcom,rpmh-vrm-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "ldoa8";
+ qcom,supported-modes =
+ <RPMH_REGULATOR_MODE_LDO_LPM
+ RPMH_REGULATOR_MODE_LDO_HPM>;
+ qcom,mode-threshold-currents = <0 1>;
+ pmxpoorwills_l8: regualtor-pmxpoorwills-l8 {
+ regulator-name = "pmxpoorwills_l8";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <800000>;
+ qcom,init-voltage = <800000>;
+ qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+ };
};
- pmxpoorwills_l9_level_ao: regualtor-pmxpoorwills-l9-level_ao {
- compatible = "qcom,stub-regulator";
- regulator-name = "pmxpoorwills_l9_level_ao";
- qcom,hpm-min-load = <10000>;
- regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
- regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
+ /* pmxpoorwills L9 - VDD_MX supply */
+ rpmh-regulator-mxlvl {
+ compatible = "qcom,rpmh-arc-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "mx.lvl";
+ pmxpoorwills_l9_level: regualtor-pmxpoorwills-l9-level {
+ regulator-name = "pmxpoorwills_l9_level";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
+ regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
+ };
+
+ pmxpoorwills_l9_level_ao: regualtor-pmxpoorwills-l9-level-ao {
+ regulator-name = "pmxpoorwills_l9_level_ao";
+ qcom,set = <RPMH_REGULATOR_SET_ACTIVE>;
+ regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
+ regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
+ };
};
- pmxpoorwills_l10: regualtor-pmxpoorwills-l10 {
- compatible = "qcom,stub-regulator";
- regulator-name = "pmxpoorwills_l10";
- qcom,hpm-min-load = <10000>;
- regulator-min-microvolt = <3088000>;
- regulator-max-microvolt = <3088000>;
+ rpmh-regulator-ldoa10 {
+ compatible = "qcom,rpmh-vrm-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "ldoa10";
+ qcom,supported-modes =
+ <RPMH_REGULATOR_MODE_LDO_LPM
+ RPMH_REGULATOR_MODE_LDO_HPM>;
+ qcom,mode-threshold-currents = <0 1>;
+ pmxpoorwills_l10: regualtor-pmxpoorwills-l10 {
+ regulator-name = "pmxpoorwills_l10";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt = <3088000>;
+ regulator-max-microvolt = <3088000>;
+ qcom,init-voltage = <3088000>;
+ qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+ };
+ };
+
+ rpmh-regulator-ldoa11 {
+ compatible = "qcom,rpmh-vrm-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "ldoa11";
+ qcom,supported-modes =
+ <RPMH_REGULATOR_MODE_LDO_LPM
+ RPMH_REGULATOR_MODE_LDO_HPM>;
+ qcom,mode-threshold-currents = <0 1>;
+ pmxpoorwills_l11: regualtor-pmxpoorwills-l11 {
+ regulator-name = "pmxpoorwills_l11";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt = <1808000>;
+ regulator-max-microvolt = <1808000>;
+ qcom,init-voltage = <1808000>;
+ qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+ };
+ };
+
+ rpmh-regulator-ldoa12 {
+ compatible = "qcom,rpmh-vrm-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "ldoa12";
+ qcom,supported-modes =
+ <RPMH_REGULATOR_MODE_LDO_LPM
+ RPMH_REGULATOR_MODE_LDO_HPM>;
+ qcom,mode-threshold-currents = <0 1>;
+ pmxpoorwills_l12: regualtor-pmxpoorwills-l12 {
+ regulator-name = "pmxpoorwills_l12";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt = <2704000>;
+ regulator-max-microvolt = <2704000>;
+ qcom,init-voltage = <2704000>;
+ qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+ };
+ };
+
+ rpmh-regulator-ldoa13 {
+ compatible = "qcom,rpmh-vrm-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "ldoa13";
+ qcom,supported-modes =
+ <RPMH_REGULATOR_MODE_LDO_LPM
+ RPMH_REGULATOR_MODE_LDO_HPM>;
+ qcom,mode-threshold-currents = <0 1>;
+ pmxpoorwills_l13: regualtor-pmxpoorwills-l13 {
+ regulator-name = "pmxpoorwills_l13";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt = <1808000>;
+ regulator-max-microvolt = <1808000>;
+ qcom,init-voltage = <1808000>;
+ qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+ };
+ };
+
+ rpmh-regulator-ldoa14 {
+ compatible = "qcom,rpmh-vrm-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "ldoa14";
+ qcom,supported-modes =
+ <RPMH_REGULATOR_MODE_LDO_LPM
+ RPMH_REGULATOR_MODE_LDO_HPM>;
+ qcom,mode-threshold-currents = <0 1>;
+ pmxpoorwills_l14: regualtor-pmxpoorwills-l14 {
+ regulator-name = "pmxpoorwills_l14";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt = <620000>;
+ regulator-max-microvolt = <620000>;
+ qcom,init-voltage = <620000>;
+ qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+ };
+ };
+
+ rpmh-regulator-ldoa16 {
+ compatible = "qcom,rpmh-vrm-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "ldoa16";
+ qcom,supported-modes =
+ <RPMH_REGULATOR_MODE_LDO_LPM
+ RPMH_REGULATOR_MODE_LDO_HPM>;
+ qcom,mode-threshold-currents = <0 1>;
+ pmxpoorwills_l16: regualtor-pmxpoorwills-l16 {
+ regulator-name = "pmxpoorwills_l16";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt = <752000>;
+ regulator-max-microvolt = <752000>;
+ qcom,init-voltage = <752000>;
+ qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+ regulator-always-on;
+ };
+ };
+
+ /* VREF_RGMII */
+ rpmh-regulator-rgmii {
+ compatible = "qcom,rpmh-xob-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "vrefa2";
+ vreg_rgmii: regulator-rgmii {
+ regulator-name = "vreg_rgmii";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ };
};
};
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-rumi.dts b/arch/arm/boot/dts/qcom/sdxpoorwills-rumi.dts
index 3aacd63..aa9e7f2 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-rumi.dts
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-rumi.dts
@@ -23,6 +23,30 @@
qcom,board-id = <15 0>;
};
+&soc {
+ /* Delete rpmh regulators */
+ /delete-node/ rpmh-regulator-modemlvl;
+ /delete-node/ rpmh-regulator-smpa4;
+ /delete-node/ rpmh-regulator-cxlvl;
+ /delete-node/ rpmh-regulator-ldoa1;
+ /delete-node/ rpmh-regulator-ldoa2;
+ /delete-node/ rpmh-regulator-ldoa3;
+ /delete-node/ rpmh-regulator-ldoa4;
+ /delete-node/ rpmh-regulator-ldoa5;
+ /delete-node/ rpmh-regulator-ldoa7;
+ /delete-node/ rpmh-regulator-ldoa8;
+ /delete-node/ rpmh-regulator-mxlvl;
+ /delete-node/ rpmh-regulator-ldoa10;
+ /delete-node/ rpmh-regulator-ldoa11;
+ /delete-node/ rpmh-regulator-ldoa12;
+ /delete-node/ rpmh-regulator-ldoa13;
+ /delete-node/ rpmh-regulator-ldoa14;
+ /delete-node/ rpmh-regulator-ldoa16;
+ /delete-node/ rpmh-regulator-rgmii;
+};
+
+#include "sdxpoorwills-stub-regulator.dtsi"
+
&blsp1_uart2 {
pinctrl-names = "default";
pinctrl-0 = <&uart2_console_active>;
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-stub-regulator.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-stub-regulator.dtsi
new file mode 100644
index 0000000..7c6b7b0
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-stub-regulator.dtsi
@@ -0,0 +1,176 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+
+/* Stub regulators */
+/ {
+ pmxpoorwills_s1: regualtor-pmxpoorwills-s1 {
+ compatible = "qcom,stub-regulator";
+ regulator-name = "pmxpoorwills_s1";
+ qcom,hpm-min-load = <100000>;
+ regulator-min-microvolt = <752000>;
+ regulator-max-microvolt = <752000>;
+ };
+
+ pmxpoorwills_s4: regualtor-pmxpoorwills-s4 {
+ compatible = "qcom,stub-regulator";
+ regulator-name = "pmxpoorwills_s4";
+ qcom,hpm-min-load = <100000>;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ /* VDD CX supply */
+ pmxpoorwills_s5_level: regualtor-pmxpoorwills-s5-level {
+ compatible = "qcom,stub-regulator";
+ regulator-name = "pmxpoorwills_s5_level";
+ qcom,hpm-min-load = <100000>;
+ regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
+ regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
+ };
+
+ pmxpoorwills_s5_level_ao: regualtor-pmxpoorwills-s5-level-ao {
+ compatible = "qcom,stub-regulator";
+ regulator-name = "pmxpoorwills_s5_level_ao";
+ qcom,hpm-min-load = <100000>;
+ regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
+ regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
+ };
+
+ pmxpoorwills_l1: regualtor-pmxpoorwills-11 {
+ compatible = "qcom,stub-regulator";
+ regulator-name = "pmxpoorwills_l1";
+ qcom,hpm-min-load = <10000>;
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ };
+
+ pmxpoorwills_l2: regualtor-pmxpoorwills-12 {
+ compatible = "qcom,stub-regulator";
+ regulator-name = "pmxpoorwills_l2";
+ qcom,hpm-min-load = <10000>;
+ regulator-min-microvolt = <1128000>;
+ regulator-max-microvolt = <1128000>;
+ };
+
+ pmxpoorwills_l3: regualtor-pmxpoorwills-l3 {
+ compatible = "qcom,stub-regulator";
+ regulator-name = "pmxpoorwills_l3";
+ qcom,hpm-min-load = <10000>;
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <800000>;
+ };
+
+ pmxpoorwills_l4: regualtor-pmxpoorwills-l4 {
+ compatible = "qcom,stub-regulator";
+ regulator-name = "pmxpoorwills_l4";
+ qcom,hpm-min-load = <10000>;
+ regulator-min-microvolt = <872000>;
+ regulator-max-microvolt = <872000>;
+ };
+
+ pmxpoorwills_l5: regualtor-pmxpoorwills-l5 {
+ compatible = "qcom,stub-regulator";
+ regulator-name = "pmxpoorwills_l5";
+ qcom,hpm-min-load = <10000>;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ pmxpoorwills_l7: regualtor-pmxpoorwills-l7 {
+ compatible = "qcom,stub-regulator";
+ regulator-name = "pmxpoorwills_l7";
+ qcom,hpm-min-load = <10000>;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2950000>;
+ };
+
+ pmxpoorwills_l8: regualtor-pmxpoorwills-l8 {
+ compatible = "qcom,stub-regulator";
+ regulator-name = "pmxpoorwills_l8";
+ qcom,hpm-min-load = <10000>;
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <800000>;
+ };
+
+ /* VDD MX supply */
+ pmxpoorwills_l9_level: regualtor-pmxpoorwills-l9-level {
+ compatible = "qcom,stub-regulator";
+ regulator-name = "pmxpoorwills_l9_level";
+ qcom,hpm-min-load = <10000>;
+ regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
+ regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
+ };
+
+ pmxpoorwills_l9_level_ao: regualtor-pmxpoorwills-l9-level_ao {
+ compatible = "qcom,stub-regulator";
+ regulator-name = "pmxpoorwills_l9_level_ao";
+ qcom,hpm-min-load = <10000>;
+ regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
+ regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
+ };
+
+ pmxpoorwills_l10: regualtor-pmxpoorwills-l10 {
+ compatible = "qcom,stub-regulator";
+ regulator-name = "pmxpoorwills_l10";
+ qcom,hpm-min-load = <10000>;
+ regulator-min-microvolt = <3088000>;
+ regulator-max-microvolt = <3088000>;
+ };
+
+ pmxpoorwills_l11: regualtor-pmxpoorwills-l11 {
+ compatible = "qcom,stub-regulator";
+ regulator-name = "pmxpoorwills_l11";
+ qcom,hpm-min-load = <10000>;
+ regulator-min-microvolt = <1808000>;
+ regulator-max-microvolt = <2848000>;
+ };
+
+ pmxpoorwills_l12: regualtor-pmxpoorwills-l12 {
+ compatible = "qcom,stub-regulator";
+ regulator-name = "pmxpoorwills_l12";
+ qcom,hpm-min-load = <10000>;
+ regulator-min-microvolt = <2704000>;
+ regulator-max-microvolt = <2704000>;
+ };
+
+ pmxpoorwills_l13: regualtor-pmxpoorwills-l13 {
+ compatible = "qcom,stub-regulator";
+ regulator-name = "pmxpoorwills_l13";
+ qcom,hpm-min-load = <10000>;
+ regulator-min-microvolt = <1808000>;
+ regulator-max-microvolt = <2848000>;
+ };
+
+ pmxpoorwills_l14: regualtor-pmxpoorwills-l14 {
+ compatible = "qcom,stub-regulator";
+ regulator-name = "pmxpoorwills_l14";
+ qcom,hpm-min-load = <10000>;
+ regulator-min-microvolt = <620000>;
+ regulator-max-microvolt = <752000>;
+ };
+
+ pmxpoorwills_l16: regualtor-pmxpoorwills-l16 {
+ compatible = "qcom,stub-regulator";
+ regulator-name = "pmxpoorwills_l16";
+ qcom,hpm-min-load = <10000>;
+ regulator-min-microvolt = <752000>;
+ regulator-max-microvolt = <752000>;
+ };
+
+ /* VREF_RGMII */
+ vreg_rgmii: rgmii-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vreg_rgmii";
+ };
+};
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-wcd.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-wcd.dtsi
new file mode 100644
index 0000000..9303ed1
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-wcd.dtsi
@@ -0,0 +1,80 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&i2c_3 {
+ tavil_codec {
+ wcd: wcd_pinctrl@5 {
+ compatible = "qcom,wcd-pinctrl";
+ qcom,num-gpios = <5>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ spkr_1_wcd_en_active: spkr_1_wcd_en_active {
+ mux {
+ pins = "gpio2";
+ };
+
+ config {
+ pins = "gpio2";
+ output-high;
+ };
+ };
+
+ spkr_1_wcd_en_sleep: spkr_1_wcd_en_sleep {
+ mux {
+ pins = "gpio2";
+ };
+
+ config {
+ pins = "gpio2";
+ input-enable;
+ };
+ };
+
+ spkr_2_wcd_en_active: spkr_2_sd_n_active {
+ mux {
+ pins = "gpio3";
+ };
+
+ config {
+ pins = "gpio3";
+ output-high;
+ };
+ };
+
+ spkr_2_wcd_en_sleep: spkr_2_sd_n_sleep {
+ mux {
+ pins = "gpio3";
+ };
+
+ config {
+ pins = "gpio3";
+ input-enable;
+ };
+ };
+ };
+
+ wsa_spkr_wcd_sd1: msm_cdc_pinctrll {
+ compatible = "qcom,msm-cdc-pinctrl";
+ pinctrl-names = "aud_active", "aud_sleep";
+ pinctrl-0 = <&spkr_1_wcd_en_active>;
+ pinctrl-1 = <&spkr_1_wcd_en_sleep>;
+ };
+
+ wsa_spkr_wcd_sd2: msm_cdc_pinctrlr {
+ compatible = "qcom,msm-cdc-pinctrl";
+ pinctrl-names = "aud_active", "aud_sleep";
+ pinctrl-0 = <&spkr_2_wcd_en_active>;
+ pinctrl-1 = <&spkr_2_wcd_en_sleep>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
index 2b89ee8..96390434 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
@@ -15,11 +15,12 @@
#include <dt-bindings/clock/qcom,rpmh.h>
#include <dt-bindings/clock/qcom,gcc-sdxpoorwills.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
/ {
model = "Qualcomm Technologies, Inc. SDX POORWILLS";
compatible = "qcom,sdxpoorwills";
- qcom,msm-id = <334 0x0>;
+ qcom,msm-id = <334 0x0>, <335 0x0>;
interrupt-parent = <&intc>;
reserved-memory {
@@ -40,6 +41,12 @@
reg = <0x87800000 0x8000000>;
label = "mss_mem";
};
+
+ audio_mem: audio_region@0 {
+ compatible = "shared-dma-pool";
+ reusable;
+ size = <0x400000>;
+ };
};
cpus {
@@ -151,22 +158,40 @@
};
clock_gcc: qcom,gcc@100000 {
- compatible = "qcom,dummycc";
- clock-output-names = "gcc_clocks";
+ compatible = "qcom,gcc-sdxpoorwills";
+ reg = <0x100000 0x1f0000>;
+ reg-names = "cc_base";
+ vdd_cx-supply = <&pmxpoorwills_s5_level>;
+ vdd_cx_ao-supply = <&pmxpoorwills_s5_level_ao>;
#clock-cells = <1>;
#reset-cells = <1>;
};
- clock_cpu: qcom,clock-a7@17810008 {
- compatible = "qcom,dummycc";
- clock-output-names = "cpu_clocks";
+ clock_cpu: qcom,clock-a7@17808100 {
+ compatible = "qcom,cpu-sdxpoorwills";
+ clocks = <&clock_rpmh RPMH_CXO_CLK_A>;
+ clock-names = "xo_ao";
+ qcom,a7cc-init-rate = <1497600000>;
+ reg = <0x17808100 0x7F10>;
+ reg-names = "apcs_pll";
+ qcom,rcg-reg-offset = <0x7F08>;
+
+ vdd_dig_ao-supply = <&pmxpoorwills_s5_level_ao>;
+ cpu-vdd-supply = <&pmxpoorwills_s5_level_ao>;
+ qcom,speed0-bin-v0 =
+ < 0 RPMH_REGULATOR_LEVEL_OFF>,
+ < 345600000 RPMH_REGULATOR_LEVEL_LOW_SVS>,
+ < 576000000 RPMH_REGULATOR_LEVEL_SVS>,
+ < 1094400000 RPMH_REGULATOR_LEVEL_NOM>,
+ < 1497600000 RPMH_REGULATOR_LEVEL_TURBO>;
#clock-cells = <1>;
};
clock_rpmh: qcom,rpmhclk {
- compatible = "qcom,dummycc";
- clock-output-names = "rpmh_clocks";
+ compatible = "qcom,rpmh-clk-sdxpoorwills";
#clock-cells = <1>;
+ mboxes = <&apps_rsc 0>;
+ mbox-names = "apps";
};
blsp1_uart2: serial@831000 {
@@ -183,7 +208,6 @@
compatible = "qcom,gdsc";
regulator-name = "gdsc_usb30";
reg = <0x0010b004 0x4>;
- status = "ok";
};
qcom,sps {
@@ -195,7 +219,12 @@
compatible = "qcom,gdsc";
regulator-name = "gdsc_pcie";
reg = <0x00137004 0x4>;
- status = "ok";
+ };
+
+ gdsc_emac: qcom,gdsc@147004 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_emac";
+ reg = <0x00147004 0x4>;
};
qnand_1: nand@1b00000 {
@@ -460,6 +489,19 @@
io-interface = "rgmii";
};
};
+
+ qmp_aop: qcom,qmp-aop@c300000 {
+ compatible = "qcom,qmp-mbox";
+ label = "aop";
+ reg = <0xc300000 0x400>,
+ <0x17811008 0x4>;
+ reg-names = "msgram", "irq-reg-base";
+ qcom,irq-mask = <0x1>;
+ interrupts = <GIC_SPI 221 IRQ_TYPE_EDGE_RISING>;
+ priority = <0>;
+ mbox-desc-offset = <0x0>;
+ #mbox-cells = <1>;
+ };
};
#include "pmxpoorwills.dtsi"
@@ -469,3 +511,4 @@
#include "sdxpoorwills-usb.dtsi"
#include "sdxpoorwills-bus.dtsi"
#include "sdxpoorwills-thermal.dtsi"
+#include "sdxpoorwills-audio.dtsi"
diff --git a/arch/arm/boot/dts/stih410.dtsi b/arch/arm/boot/dts/stih410.dtsi
index a3ef734..4d329b2 100644
--- a/arch/arm/boot/dts/stih410.dtsi
+++ b/arch/arm/boot/dts/stih410.dtsi
@@ -131,7 +131,7 @@
<&clk_s_d2_quadfs 0>;
assigned-clock-rates = <297000000>,
- <108000000>,
+ <297000000>,
<0>,
<400000000>,
<400000000>;
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index 53e1a88..66d7196 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -216,6 +216,7 @@
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=32
+CONFIG_SERIAL_8250_RUNTIME_UARTS=6
CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_MANY_PORTS=y
CONFIG_SERIAL_8250_SHARE_IRQ=y
diff --git a/arch/arm/configs/sdxpoorwills-perf_defconfig b/arch/arm/configs/sdxpoorwills-perf_defconfig
index d48a917..28a0c38 100644
--- a/arch/arm/configs/sdxpoorwills-perf_defconfig
+++ b/arch/arm/configs/sdxpoorwills-perf_defconfig
@@ -275,9 +275,11 @@
CONFIG_USB_CONFIGFS_MASS_STORAGE=y
CONFIG_USB_CONFIGFS_F_FS=y
CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_UAC1=y
CONFIG_USB_CONFIGFS_F_DIAG=y
CONFIG_USB_CONFIGFS_F_CDEV=y
CONFIG_USB_CONFIGFS_F_GSI=y
+CONFIG_USB_CONFIGFS_F_QDSS=y
CONFIG_MMC=y
CONFIG_MMC_PARANOID_SD_INIT=y
CONFIG_MMC_BLOCK_MINORS=32
@@ -303,8 +305,11 @@
CONFIG_SPS_SUPPORT_NDP_BAM=y
CONFIG_QPNP_REVID=y
CONFIG_USB_BAM=y
+CONFIG_MDM_GCC_SDXPOORWILLS=y
+CONFIG_MDM_CLOCK_CPU_SDXPOORWILLS=y
CONFIG_REMOTE_SPINLOCK_MSM=y
CONFIG_MAILBOX=y
+CONFIG_MSM_QMP=y
CONFIG_QCOM_SCM=y
CONFIG_MSM_BOOT_STATS=y
CONFIG_MSM_SMEM=y
diff --git a/arch/arm/configs/sdxpoorwills_defconfig b/arch/arm/configs/sdxpoorwills_defconfig
index b518fe0..6c3ebc7 100644
--- a/arch/arm/configs/sdxpoorwills_defconfig
+++ b/arch/arm/configs/sdxpoorwills_defconfig
@@ -273,9 +273,11 @@
CONFIG_USB_CONFIGFS_MASS_STORAGE=y
CONFIG_USB_CONFIGFS_F_FS=y
CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_UAC1=y
CONFIG_USB_CONFIGFS_F_DIAG=y
CONFIG_USB_CONFIGFS_F_CDEV=y
CONFIG_USB_CONFIGFS_F_GSI=y
+CONFIG_USB_CONFIGFS_F_QDSS=y
CONFIG_MMC=y
CONFIG_MMC_PARANOID_SD_INIT=y
CONFIG_MMC_BLOCK_MINORS=32
@@ -299,8 +301,11 @@
CONFIG_SPS=y
CONFIG_SPS_SUPPORT_NDP_BAM=y
CONFIG_QPNP_REVID=y
+CONFIG_MDM_GCC_SDXPOORWILLS=y
+CONFIG_MDM_CLOCK_CPU_SDXPOORWILLS=y
CONFIG_REMOTE_SPINLOCK_MSM=y
CONFIG_MAILBOX=y
+CONFIG_MSM_QMP=y
CONFIG_QCOM_SCM=y
CONFIG_MSM_BOOT_STATS=y
CONFIG_MSM_SMEM=y
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index 55e0e3e..bd12b98 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -37,4 +37,3 @@
generic-y += termios.h
generic-y += timex.h
generic-y += trace_clock.h
-generic-y += unaligned.h
diff --git a/arch/arm/include/asm/topology.h b/arch/arm/include/asm/topology.h
index 9edea10..41e9107 100644
--- a/arch/arm/include/asm/topology.h
+++ b/arch/arm/include/asm/topology.h
@@ -32,6 +32,9 @@
#define arch_scale_cpu_capacity scale_cpu_capacity
extern unsigned long scale_cpu_capacity(struct sched_domain *sd, int cpu);
+#define arch_update_cpu_capacity update_cpu_power_capacity
+extern void update_cpu_power_capacity(int cpu);
+
#else
static inline void init_cpu_topology(void) { }
diff --git a/arch/arm/include/asm/unaligned.h b/arch/arm/include/asm/unaligned.h
new file mode 100644
index 0000000..ab905ff
--- /dev/null
+++ b/arch/arm/include/asm/unaligned.h
@@ -0,0 +1,27 @@
+#ifndef __ASM_ARM_UNALIGNED_H
+#define __ASM_ARM_UNALIGNED_H
+
+/*
+ * We generally want to set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS on ARMv6+,
+ * but we don't want to use linux/unaligned/access_ok.h since that can lead
+ * to traps on unaligned stm/ldm or strd/ldrd.
+ */
+#include <asm/byteorder.h>
+
+#if defined(__LITTLE_ENDIAN)
+# include <linux/unaligned/le_struct.h>
+# include <linux/unaligned/be_byteshift.h>
+# include <linux/unaligned/generic.h>
+# define get_unaligned __get_unaligned_le
+# define put_unaligned __put_unaligned_le
+#elif defined(__BIG_ENDIAN)
+# include <linux/unaligned/be_struct.h>
+# include <linux/unaligned/le_byteshift.h>
+# include <linux/unaligned/generic.h>
+# define get_unaligned __get_unaligned_be
+# define put_unaligned __put_unaligned_be
+#else
+# error need to define endianess
+#endif
+
+#endif /* __ASM_ARM_UNALIGNED_H */
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
index 2b6c530..28dcd44 100644
--- a/arch/arm/kernel/topology.c
+++ b/arch/arm/kernel/topology.c
@@ -42,6 +42,16 @@
*/
static DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
+unsigned long arch_scale_freq_power(struct sched_domain *sd, int cpu)
+{
+ return per_cpu(cpu_scale, cpu);
+}
+
+static void set_power_scale(unsigned int cpu, unsigned long power)
+{
+ per_cpu(cpu_scale, cpu) = power;
+}
+
unsigned long scale_cpu_capacity(struct sched_domain *sd, int cpu)
{
#ifdef CONFIG_CPU_FREQ
@@ -397,6 +407,23 @@
return &cpu_topology[cpu].thread_sibling;
}
+static void update_cpu_power(unsigned int cpu)
+{
+ if (!cpu_capacity(cpu))
+ return;
+
+ set_power_scale(cpu, cpu_capacity(cpu) / middle_capacity);
+
+ pr_info("CPU%u: update cpu_power %lu\n",
+ cpu, arch_scale_freq_power(NULL, cpu));
+}
+
+void update_cpu_power_capacity(int cpu)
+{
+ update_cpu_power(cpu);
+ update_cpu_capacity(cpu);
+}
+
static void update_siblings_masks(unsigned int cpuid)
{
struct cputopo_arm *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 9688ec0..1b30489 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -152,30 +152,26 @@
set_fs(fs);
}
-static void dump_instr(const char *lvl, struct pt_regs *regs)
+static void __dump_instr(const char *lvl, struct pt_regs *regs)
{
unsigned long addr = instruction_pointer(regs);
const int thumb = thumb_mode(regs);
const int width = thumb ? 4 : 8;
- mm_segment_t fs;
char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
int i;
/*
- * We need to switch to kernel mode so that we can use __get_user
- * to safely read from kernel space. Note that we now dump the
- * code first, just in case the backtrace kills us.
+ * Note that we now dump the code first, just in case the backtrace
+ * kills us.
*/
- fs = get_fs();
- set_fs(KERNEL_DS);
for (i = -4; i < 1 + !!thumb; i++) {
unsigned int val, bad;
if (thumb)
- bad = __get_user(val, &((u16 *)addr)[i]);
+ bad = get_user(val, &((u16 *)addr)[i]);
else
- bad = __get_user(val, &((u32 *)addr)[i]);
+ bad = get_user(val, &((u32 *)addr)[i]);
if (!bad)
p += sprintf(p, i == 0 ? "(%0*x) " : "%0*x ",
@@ -186,8 +182,20 @@
}
}
printk("%sCode: %s\n", lvl, str);
+}
- set_fs(fs);
+static void dump_instr(const char *lvl, struct pt_regs *regs)
+{
+ mm_segment_t fs;
+
+ if (!user_mode(regs)) {
+ fs = get_fs();
+ set_fs(KERNEL_DS);
+ __dump_instr(lvl, regs);
+ set_fs(fs);
+ } else {
+ __dump_instr(lvl, regs);
+ }
}
#ifdef CONFIG_ARM_UNWIND
diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c
index 0064b86..30a13647 100644
--- a/arch/arm/kvm/emulate.c
+++ b/arch/arm/kvm/emulate.c
@@ -227,7 +227,7 @@
u32 return_offset = (is_thumb) ? 2 : 4;
kvm_update_psr(vcpu, UND_MODE);
- *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) - return_offset;
+ *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
/* Branch to exception vector */
*vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset;
@@ -239,10 +239,8 @@
*/
static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr)
{
- unsigned long cpsr = *vcpu_cpsr(vcpu);
- bool is_thumb = (cpsr & PSR_T_BIT);
u32 vect_offset;
- u32 return_offset = (is_thumb) ? 4 : 0;
+ u32 return_offset = (is_pabt) ? 4 : 8;
bool is_lpae;
kvm_update_psr(vcpu, ABT_MODE);
diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile
index 8679405..92eab1d 100644
--- a/arch/arm/kvm/hyp/Makefile
+++ b/arch/arm/kvm/hyp/Makefile
@@ -2,7 +2,7 @@
# Makefile for Kernel-based Virtual Machine module, HYP part
#
-ccflags-y += -fno-stack-protector
+ccflags-y += -fno-stack-protector -DDISABLE_BRANCH_PROFILING
KVM=../../../../virt/kvm
diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile
index 40a6aab..3df7439 100644
--- a/arch/arm64/boot/dts/qcom/Makefile
+++ b/arch/arm64/boot/dts/qcom/Makefile
@@ -7,10 +7,10 @@
sdm845-cdp-overlay.dtbo \
sdm845-mtp-overlay.dtbo \
sdm845-qrd-overlay.dtbo \
- sdm845-qvr-overlay.dtbo \
sdm845-4k-panel-mtp-overlay.dtbo \
sdm845-4k-panel-cdp-overlay.dtbo \
sdm845-4k-panel-qrd-overlay.dtbo \
+ sdm845-v2-qvr-overlay.dtbo \
sdm845-v2-cdp-overlay.dtbo \
sdm845-v2-mtp-overlay.dtbo \
sdm845-v2-qrd-overlay.dtbo \
@@ -46,11 +46,10 @@
sdm845-cdp-overlay.dtbo-base := sdm845.dtb
sdm845-mtp-overlay.dtbo-base := sdm845.dtb
sdm845-qrd-overlay.dtbo-base := sdm845.dtb
-sdm845-qvr-overlay.dtbo-base := sdm845-v2.dtb
-sdm845-qvr-overlay.dtbo-base := sdm845.dtb
sdm845-4k-panel-mtp-overlay.dtbo-base := sdm845.dtb
sdm845-4k-panel-cdp-overlay.dtbo-base := sdm845.dtb
sdm845-4k-panel-qrd-overlay.dtbo-base := sdm845.dtb
+sdm845-v2-qvr-overlay.dtbo-base := sdm845-v2.dtb
sdm845-v2-cdp-overlay.dtbo-base := sdm845-v2.dtb
sdm845-v2-mtp-overlay.dtbo-base := sdm845-v2.dtb
sdm845-v2-qrd-overlay.dtbo-base := sdm845-v2.dtb
@@ -92,7 +91,7 @@
sdm845-v2-cdp.dtb \
sdm845-qrd.dtb \
sdm845-v2-qrd.dtb \
- sdm845-qvr.dtb \
+ sdm845-v2-qvr.dtb \
sdm845-4k-panel-mtp.dtb \
sdm845-4k-panel-cdp.dtb \
sdm845-4k-panel-qrd.dtb \
@@ -180,6 +179,7 @@
sda670-cdp.dtb \
sda670-pm660a-mtp.dtb \
sda670-pm660a-cdp.dtb \
+ qcs605-360camera.dtb \
qcs605-mtp.dtb \
qcs605-cdp.dtb \
qcs605-external-codec-mtp.dtb
@@ -187,7 +187,29 @@
ifeq ($(CONFIG_BUILD_ARM64_DT_OVERLAY),y)
else
-dtb-$(CONFIG_ARCH_MSM8953) += msm8953-mtp.dtb
+dtb-$(CONFIG_ARCH_MSM8953) += msm8953-cdp.dtb \
+ msm8953-mtp.dtb \
+ msm8953-ext-codec-mtp.dtb \
+ msm8953-qrd-sku3.dtb \
+ msm8953-rcm.dtb \
+ apq8053-rcm.dtb \
+ msm8953-ext-codec-rcm.dtb \
+ apq8053-cdp.dtb \
+ apq8053-ipc.dtb \
+ msm8953-ipc.dtb \
+ apq8053-mtp.dtb \
+ apq8053-ext-audio-mtp.dtb \
+ apq8053-ext-codec-rcm.dtb \
+ msm8953-cdp-1200p.dtb \
+ msm8953-iot-mtp.dtb \
+ apq8053-iot-mtp.dtb \
+ msm8953-pmi8940-cdp.dtb \
+ msm8953-pmi8940-mtp.dtb \
+ msm8953-pmi8937-cdp.dtb \
+ msm8953-pmi8937-mtp.dtb \
+ msm8953-pmi8940-ext-codec-mtp.dtb \
+ msm8953-pmi8937-ext-codec-mtp.dtb
+
dtb-$(CONFIG_ARCH_SDM450) += sdm450-rcm.dtb \
sdm450-cdp.dtb \
sdm450-mtp.dtb \
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts b/arch/arm64/boot/dts/qcom/apq8053-cdp.dts
similarity index 60%
copy from arch/arm64/boot/dts/qcom/sdm845-qvr.dts
copy to arch/arm64/boot/dts/qcom/apq8053-cdp.dts
index 5513c92..5e89e4f 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/apq8053-cdp.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,15 +11,15 @@
* GNU General Public License for more details.
*/
-
/dts-v1/;
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "apq8053.dtsi"
+#include "msm8953-cdp.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
+ model = "Qualcomm Technologies, Inc. APQ8053 + PMI8950 CDP";
+ compatible = "qcom,apq8053-cdp", "qcom,apq8053", "qcom,cdp";
+ qcom,board-id= <1 0>;
+ qcom,pmic-id = <0x010016 0x010011 0x0 0x0>;
};
+
diff --git a/arch/arm64/boot/dts/qcom/apq8053-ext-audio-mtp.dts b/arch/arm64/boot/dts/qcom/apq8053-ext-audio-mtp.dts
new file mode 100644
index 0000000..2c7b228
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/apq8053-ext-audio-mtp.dts
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "apq8053.dtsi"
+#include "msm8953-mtp.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. APQ8053 + PMI8950 Ext Codec MTP";
+ compatible = "qcom,apq8053-mtp", "qcom,apq8053", "qcom,mtp";
+ qcom,board-id= <8 1>;
+ qcom,pmic-id = <0x010016 0x010011 0x0 0x0>;
+};
+
diff --git a/arch/arm64/boot/dts/qcom/apq8053-ext-codec-rcm.dts b/arch/arm64/boot/dts/qcom/apq8053-ext-codec-rcm.dts
new file mode 100644
index 0000000..d026734
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/apq8053-ext-codec-rcm.dts
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "apq8053.dtsi"
+#include "msm8953-cdp.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. APQ8053 + PMI8950 Ext Codec RCM";
+ compatible = "qcom,apq8053-cdp", "qcom,apq8053", "qcom,cdp";
+ qcom,board-id= <21 1>;
+ qcom,pmic-id = <0x010016 0x010011 0x0 0x0>;
+};
+
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts b/arch/arm64/boot/dts/qcom/apq8053-iot-mtp.dts
similarity index 60%
copy from arch/arm64/boot/dts/qcom/sdm845-qvr.dts
copy to arch/arm64/boot/dts/qcom/apq8053-iot-mtp.dts
index 5513c92..177e105 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/apq8053-iot-mtp.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,15 +11,15 @@
* GNU General Public License for more details.
*/
-
/dts-v1/;
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "apq8053.dtsi"
+#include "msm8953-mtp.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
+ model = "Qualcomm Technologies, Inc. APQ8053 + PMI8950 IOT MTP";
+ compatible = "qcom,apq8053-mtp", "qcom,apq8053", "qcom,mtp";
+ qcom,board-id= <8 2>;
+ qcom,pmic-id = <0x010016 0x010011 0x0 0x0>;
};
+
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts b/arch/arm64/boot/dts/qcom/apq8053-ipc.dts
similarity index 60%
copy from arch/arm64/boot/dts/qcom/sdm845-qvr.dts
copy to arch/arm64/boot/dts/qcom/apq8053-ipc.dts
index 5513c92..3381b2a 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/apq8053-ipc.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,15 +11,14 @@
* GNU General Public License for more details.
*/
-
/dts-v1/;
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "apq8053.dtsi"
+#include "msm8953-ipc.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
+ model = "Qualcomm Technologies, Inc. APQ8053 + PMI8950 IPC";
+ compatible = "qcom,apq8053-ipc", "qcom,apq8053", "qcom,ipc";
+ qcom,board-id= <12 0>;
+ qcom,pmic-id = <0x010016 0x010011 0x0 0x0>;
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts b/arch/arm64/boot/dts/qcom/apq8053-mtp.dts
similarity index 60%
copy from arch/arm64/boot/dts/qcom/sdm845-qvr.dts
copy to arch/arm64/boot/dts/qcom/apq8053-mtp.dts
index 5513c92..be544af 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/apq8053-mtp.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,15 +11,15 @@
* GNU General Public License for more details.
*/
-
/dts-v1/;
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "apq8053.dtsi"
+#include "msm8953-mtp.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
+ model = "Qualcomm Technologies, Inc. APQ8053 + PMI8950 MTP";
+ compatible = "qcom,apq8053-mtp", "qcom,apq8053", "qcom,mtp";
+ qcom,board-id= <8 0>;
+ qcom,pmic-id = <0x010016 0x010011 0x0 0x0>;
};
+
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts b/arch/arm64/boot/dts/qcom/apq8053-rcm.dts
similarity index 60%
copy from arch/arm64/boot/dts/qcom/sdm845-qvr.dts
copy to arch/arm64/boot/dts/qcom/apq8053-rcm.dts
index 5513c92..cc5bdaa 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/apq8053-rcm.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,15 +11,14 @@
* GNU General Public License for more details.
*/
-
/dts-v1/;
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "apq8053.dtsi"
+#include "msm8953-cdp.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
+ model = "Qualcomm Technologies, Inc. APQ8053 + PMI8950 RCM";
+ compatible = "qcom,apq8053-cdp", "qcom,apq8053", "qcom,cdp";
+ qcom,board-id= <21 0>;
+ qcom,pmic-id = <0x010016 0x010011 0x0 0x0>;
};
diff --git a/arch/arm64/boot/dts/qcom/apq8053.dtsi b/arch/arm64/boot/dts/qcom/apq8053.dtsi
new file mode 100644
index 0000000..15a1595
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/apq8053.dtsi
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include "msm8953.dtsi"
+/ {
+ model = "Qualcomm Technologies, Inc. APQ 8953";
+ compatible = "qcom,apq8053";
+ qcom,msm-id = <304 0x0>;
+};
+
+&secure_mem {
+ status = "disabled";
+};
+
diff --git a/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm670.dtsi b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm670.dtsi
index fc468f5..ae22a36 100644
--- a/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm670.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm670.dtsi
@@ -61,6 +61,7 @@
qcom,skip-init;
qcom,use-3-lvl-tables;
qcom,no-asid-retention;
+ qcom,disable-atos;
#global-interrupts = <1>;
#size-cells = <1>;
#address-cells = <1>;
diff --git a/arch/arm64/boot/dts/qcom/msm8953-cdp-1200p.dts b/arch/arm64/boot/dts/qcom/msm8953-cdp-1200p.dts
new file mode 100644
index 0000000..a685380
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8953-cdp-1200p.dts
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "msm8953.dtsi"
+#include "msm8953-cdp.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. MSM8953 + PMI8950 CDP 1200P";
+ compatible = "qcom,msm8953-cdp", "qcom,msm8953", "qcom,cdp";
+ qcom,board-id= <1 1>;
+ qcom,pmic-id = <0x010016 0x010011 0x0 0x0>;
+};
+
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts b/arch/arm64/boot/dts/qcom/msm8953-cdp.dts
similarity index 60%
copy from arch/arm64/boot/dts/qcom/sdm845-qvr.dts
copy to arch/arm64/boot/dts/qcom/msm8953-cdp.dts
index 5513c92..1f78902 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-cdp.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,15 +11,15 @@
* GNU General Public License for more details.
*/
-
/dts-v1/;
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "msm8953.dtsi"
+#include "msm8953-cdp.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
+ model = "Qualcomm Technologies, Inc. MSM8953 + PMI8950 CDP";
+ compatible = "qcom,msm8953-cdp", "qcom,msm8953", "qcom,cdp";
+ qcom,board-id= <1 0>;
+ qcom,pmic-id = <0x010016 0x010011 0x0 0x0>;
};
+
diff --git a/arch/arm64/boot/dts/qcom/msm8953-ext-codec-mtp.dts b/arch/arm64/boot/dts/qcom/msm8953-ext-codec-mtp.dts
new file mode 100644
index 0000000..3dfd848
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8953-ext-codec-mtp.dts
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "msm8953.dtsi"
+#include "msm8953-mtp.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. MSM8953 + PMI8950 Ext Codec MTP";
+ compatible = "qcom,msm8953-mtp", "qcom,msm8953", "qcom,mtp";
+ qcom,board-id= <8 1>;
+ qcom,pmic-id = <0x010016 0x010011 0x0 0x0>;
+};
+
diff --git a/arch/arm64/boot/dts/qcom/msm8953-ext-codec-rcm.dts b/arch/arm64/boot/dts/qcom/msm8953-ext-codec-rcm.dts
new file mode 100644
index 0000000..a81e212
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8953-ext-codec-rcm.dts
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "msm8953.dtsi"
+#include "msm8953-cdp.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. MSM8953 + PMI8950 Ext Codec RCM";
+ compatible = "qcom,msm8953-cdp", "qcom,msm8953", "qcom,cdp";
+ qcom,board-id= <21 1>;
+ qcom,pmic-id = <0x010016 0x010011 0x0 0x0>;
+};
+
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts b/arch/arm64/boot/dts/qcom/msm8953-iot-mtp.dts
similarity index 60%
copy from arch/arm64/boot/dts/qcom/sdm845-qvr.dts
copy to arch/arm64/boot/dts/qcom/msm8953-iot-mtp.dts
index 5513c92..524e7ca 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-iot-mtp.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,15 +11,15 @@
* GNU General Public License for more details.
*/
-
/dts-v1/;
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "msm8953.dtsi"
+#include "msm8953-mtp.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
+ model = "Qualcomm Technologies, Inc. MSM8953 + PMI8950 IOT MTP";
+ compatible = "qcom,msm8953-mtp", "qcom,msm8953", "qcom,mtp";
+ qcom,board-id= <8 2>;
+ qcom,pmic-id = <0x010016 0x010011 0x0 0x0>;
};
+
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts b/arch/arm64/boot/dts/qcom/msm8953-ipc.dts
similarity index 60%
copy from arch/arm64/boot/dts/qcom/sdm845-qvr.dts
copy to arch/arm64/boot/dts/qcom/msm8953-ipc.dts
index 5513c92..89a54af 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-ipc.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,15 +11,15 @@
* GNU General Public License for more details.
*/
-
/dts-v1/;
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "msm8953.dtsi"
+#include "msm8953-ipc.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
+ model = "Qualcomm Technologies, Inc. MSM8953 + PMI8950 IPC";
+ compatible = "qcom,msm8953-ipc", "qcom,msm8953", "qcom,ipc";
+ qcom,board-id= <12 0>;
+ qcom,pmic-id = <0x010016 0x010011 0x0 0x0>;
};
+
diff --git a/arch/arm64/boot/dts/qcom/msm8953-ipc.dtsi b/arch/arm64/boot/dts/qcom/msm8953-ipc.dtsi
new file mode 100644
index 0000000..26f4338
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8953-ipc.dtsi
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&blsp1_uart0 {
+ status = "ok";
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart_console_active>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts b/arch/arm64/boot/dts/qcom/msm8953-pmi8937-cdp.dts
similarity index 60%
copy from arch/arm64/boot/dts/qcom/sdm845-qvr.dts
copy to arch/arm64/boot/dts/qcom/msm8953-pmi8937-cdp.dts
index 5513c92..a751d5d 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-pmi8937-cdp.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,15 +11,15 @@
* GNU General Public License for more details.
*/
-
/dts-v1/;
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "msm8953.dtsi"
+#include "msm8953-cdp.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
+ model = "Qualcomm Technologies, Inc. MSM8953 + PMI8937 CDP";
+ compatible = "qcom,msm8953-cdp", "qcom,msm8953", "qcom,cdp";
+ qcom,board-id= <1 0>;
+ qcom,pmic-id = <0x010016 0x020037 0x0 0x0>;
};
+
diff --git a/arch/arm64/boot/dts/qcom/msm8953-pmi8937-ext-codec-mtp.dts b/arch/arm64/boot/dts/qcom/msm8953-pmi8937-ext-codec-mtp.dts
new file mode 100644
index 0000000..13aba62
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8953-pmi8937-ext-codec-mtp.dts
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "msm8953.dtsi"
+#include "msm8953-mtp.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. MSM8953 + PMI8937 Ext Codec MTP";
+ compatible = "qcom,msm8953-mtp", "qcom,msm8953", "qcom,mtp";
+ qcom,board-id= <8 1>;
+ qcom,pmic-id = <0x010016 0x020037 0x0 0x0>;
+};
+
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts b/arch/arm64/boot/dts/qcom/msm8953-pmi8937-mtp.dts
similarity index 60%
copy from arch/arm64/boot/dts/qcom/sdm845-qvr.dts
copy to arch/arm64/boot/dts/qcom/msm8953-pmi8937-mtp.dts
index 5513c92..9d6be47 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-pmi8937-mtp.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,15 +11,15 @@
* GNU General Public License for more details.
*/
-
/dts-v1/;
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "msm8953.dtsi"
+#include "msm8953-mtp.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
+ model = "Qualcomm Technologies, Inc. MSM8953 + PMI8937 MTP";
+ compatible = "qcom,msm8953-mtp", "qcom,msm8953", "qcom,mtp";
+ qcom,board-id= <8 0>;
+ qcom,pmic-id = <0x010016 0x020037 0x0 0x0>;
};
+
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts b/arch/arm64/boot/dts/qcom/msm8953-pmi8940-cdp.dts
similarity index 60%
copy from arch/arm64/boot/dts/qcom/sdm845-qvr.dts
copy to arch/arm64/boot/dts/qcom/msm8953-pmi8940-cdp.dts
index 5513c92..d2bb465 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-pmi8940-cdp.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,15 +11,15 @@
* GNU General Public License for more details.
*/
-
/dts-v1/;
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "msm8953.dtsi"
+#include "msm8953-cdp.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
+ model = "Qualcomm Technologies, Inc. MSM8953 + PMI8940 CDP";
+ compatible = "qcom,msm8953-cdp", "qcom,msm8953", "qcom,cdp";
+ qcom,board-id= <1 0>;
+ qcom,pmic-id = <0x010016 0x020040 0x0 0x0>;
};
+
diff --git a/arch/arm64/boot/dts/qcom/msm8953-pmi8940-ext-codec-mtp.dts b/arch/arm64/boot/dts/qcom/msm8953-pmi8940-ext-codec-mtp.dts
new file mode 100644
index 0000000..dbbb6b8
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8953-pmi8940-ext-codec-mtp.dts
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "msm8953.dtsi"
+#include "msm8953-mtp.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. MSM8953 + PMI8940 Ext Codec MTP";
+ compatible = "qcom,msm8953-mtp", "qcom,msm8953", "qcom,mtp";
+ qcom,board-id= <8 1>;
+ qcom,pmic-id = <0x010016 0x020040 0x0 0x0>;
+};
+
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts b/arch/arm64/boot/dts/qcom/msm8953-pmi8940-mtp.dts
similarity index 60%
copy from arch/arm64/boot/dts/qcom/sdm845-qvr.dts
copy to arch/arm64/boot/dts/qcom/msm8953-pmi8940-mtp.dts
index 5513c92..0fb793b 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-pmi8940-mtp.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,15 +11,15 @@
* GNU General Public License for more details.
*/
-
/dts-v1/;
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "msm8953.dtsi"
+#include "msm8953-mtp.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
+ model = "Qualcomm Technologies, Inc. MSM8953 + PMI8940 MTP";
+ compatible = "qcom,msm8953-mtp", "qcom,msm8953", "qcom,mtp";
+ qcom,board-id= <8 0>;
+ qcom,pmic-id = <0x010016 0x020040 0x0 0x0>;
};
+
diff --git a/arch/arm64/boot/dts/qcom/msm8953-qrd-sku3.dts b/arch/arm64/boot/dts/qcom/msm8953-qrd-sku3.dts
new file mode 100644
index 0000000..5d892fd
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8953-qrd-sku3.dts
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "msm8953.dtsi"
+#include "msm8953-qrd-sku3.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. MSM8953 + PMI8950 QRD SKU3";
+ compatible = "qcom,msm8953-qrd-sku3",
+ "qcom,msm8953-qrd", "qcom,msm8953", "qcom,qrd";
+ qcom,board-id= <0x2000b 0>;
+ qcom,pmic-id = <0x010016 0x010011 0x0 0x0>;
+};
+
diff --git a/arch/arm64/boot/dts/qcom/msm8953-qrd-sku3.dtsi b/arch/arm64/boot/dts/qcom/msm8953-qrd-sku3.dtsi
new file mode 100644
index 0000000..96e185b
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8953-qrd-sku3.dtsi
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm8953-qrd.dtsi"
+
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts b/arch/arm64/boot/dts/qcom/msm8953-rcm.dts
similarity index 60%
copy from arch/arm64/boot/dts/qcom/sdm845-qvr.dts
copy to arch/arm64/boot/dts/qcom/msm8953-rcm.dts
index 5513c92..a3117ed 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-rcm.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,15 +11,15 @@
* GNU General Public License for more details.
*/
-
/dts-v1/;
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "msm8953.dtsi"
+#include "msm8953-cdp.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
+ model = "Qualcomm Technologies, Inc. MSM8953 + PMI8950 RCM";
+ compatible = "qcom,msm8953-cdp", "qcom,msm8953", "qcom,cdp";
+ qcom,board-id= <21 0>;
+ qcom,pmic-id = <0x010016 0x010011 0x0 0x0>;
};
+
diff --git a/arch/arm64/boot/dts/qcom/msm8953.dtsi b/arch/arm64/boot/dts/qcom/msm8953.dtsi
index e90c30b..87d5f34 100644
--- a/arch/arm64/boot/dts/qcom/msm8953.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953.dtsi
@@ -640,10 +640,10 @@
interrupts = <GIC_SPI 190 IRQ_TYPE_NONE>;
qcom,ee = <0>;
qcom,channel = <0>;
- #address-cells = <1>;
+ #address-cells = <2>;
#size-cells = <0>;
interrupt-controller;
- #interrupt-cells = <3>;
+ #interrupt-cells = <4>;
cell-index = <0>;
};
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts b/arch/arm64/boot/dts/qcom/mtp8953-ipc.dts
similarity index 60%
copy from arch/arm64/boot/dts/qcom/sdm845-qvr.dts
copy to arch/arm64/boot/dts/qcom/mtp8953-ipc.dts
index 5513c92..481e576 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/mtp8953-ipc.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,15 +11,14 @@
* GNU General Public License for more details.
*/
-
/dts-v1/;
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "msm8953.dtsi"
+#include "msm8953-ipc.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
+ model = "Qualcomm Technologies, Inc. MSM8953 + PMI8950 IPC";
+ compatible = "qcom,msm8953-ipc", "qcom,msm8953", "qcom,ipc";
+ qcom,board-id= <12 0>;
+ qcom,pmic-id = <0x010016 0x010011 0x0 0x0>;
};
diff --git a/arch/arm64/boot/dts/qcom/qcs605-360camera.dts b/arch/arm64/boot/dts/qcom/qcs605-360camera.dts
new file mode 100644
index 0000000..8caad4b
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/qcs605-360camera.dts
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+
+#include "qcs605.dtsi"
+#include "qcs605-360camera.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. QCS605 PM660 + PM660L 360camera";
+ compatible = "qcom,qcs605-mtp", "qcom,qcs605", "qcom,mtp";
+ qcom,board-id = <0x0000000b 1>;
+ qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>,
+ <0x0001001b 0x0102001a 0x0 0x0>,
+ <0x0001001b 0x0201011a 0x0 0x0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/qcs605-360camera.dtsi b/arch/arm64/boot/dts/qcom/qcs605-360camera.dtsi
new file mode 100644
index 0000000..efd6f4a
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/qcs605-360camera.dtsi
@@ -0,0 +1,236 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "sdm670-mtp.dtsi"
+#include "sdm670-camera-sensor-360camera.dtsi"
+#include "sdm670-audio-overlay.dtsi"
+
+&qupv3_se3_i2c {
+ status = "disabled";
+};
+
+&qupv3_se10_i2c {
+ status = "okay";
+};
+
+&qupv3_se12_2uart {
+ status = "okay";
+};
+
+&qupv3_se6_4uart {
+ status = "okay";
+};
+
+&qupv3_se13_i2c {
+ status = "disabled";
+};
+
+&qupv3_se13_spi {
+ status = "disabled";
+};
+
+&int_codec {
+ qcom,model = "sdm670-360cam-snd-card";
+ qcom,audio-routing =
+ "RX_BIAS", "INT_MCLK0",
+ "SPK_RX_BIAS", "INT_MCLK0",
+ "INT_LDO_H", "INT_MCLK0",
+ "DMIC1", "MIC BIAS External",
+ "MIC BIAS External", "Digital Mic1",
+ "DMIC2", "MIC BIAS External",
+ "MIC BIAS External", "Digital Mic2",
+ "DMIC3", "MIC BIAS External2",
+ "MIC BIAS External2", "Digital Mic3",
+ "DMIC4", "MIC BIAS External2",
+ "MIC BIAS External2", "Digital Mic4",
+ "PDM_IN_RX1", "PDM_OUT_RX1",
+ "PDM_IN_RX2", "PDM_OUT_RX2",
+ "PDM_IN_RX3", "PDM_OUT_RX3",
+ "ADC1_IN", "ADC1_OUT",
+ "ADC2_IN", "ADC2_OUT",
+ "ADC3_IN", "ADC3_OUT";
+ qcom,wsa-max-devs = <0>;
+};
+
+&tlmm {
+ pwr_led_green_default: pwr_led_green_default {
+ mux {
+ pins = "gpio106";
+ function = "gpio";
+ };
+ config {
+ pins = "gpio106";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable;
+ output-low;
+ };
+ };
+
+ pwr_led_red_default: pwr_led_red_default {
+ mux {
+ pins = "gpio111";
+ function = "gpio";
+ };
+ config {
+ pins = "gpio111";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable;
+ output-low;
+ };
+ };
+
+ wifi_led_green_default: wifi_led_green_default {
+ mux {
+ pins = "gpio114";
+ function = "gpio";
+ };
+ config {
+ pins = "gpio114";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable;
+ output-low;
+ };
+ };
+
+ wifi_led_red_default: wifi_led_red_default {
+ mux {
+ pins = "gpio115";
+ function = "gpio";
+ };
+ config {
+ pins = "gpio115";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable;
+ output-low;
+ };
+ };
+
+ key_wcnss_default: key_wcnss_default {
+ mux {
+ pins = "gpio120";
+ function = "gpio";
+ };
+ config {
+ pins = "gpio120";
+ drive-strength = <8>; /* 8 mA */
+ bias-pull-up;
+ input-enable;
+ };
+ };
+
+ key_record_default: key_record_default {
+ mux {
+ pins = "gpio119";
+ function = "gpio";
+ };
+ config {
+ pins = "gpio119";
+ drive-strength = <8>; /* 8 mA */
+ bias-pull-up;
+ input-enable;
+ };
+ };
+
+ key_snapshot_default: key_snapshot_default {
+ mux {
+ pins = "gpio91";
+ function = "gpio";
+ };
+ config {
+ pins = "gpio91";
+ drive-strength = <8>; /* 8 mA */
+ bias-pull-up;
+ input-enable;
+ };
+ };
+};
+
+&soc {
+ gpio-leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwr_led_green_default
+ &pwr_led_red_default
+ &wifi_led_green_default
+ &wifi_led_red_default>;
+ status = "okay";
+
+ led@1 {
+ label = "PWR_LED:red:106";
+ gpios = <&tlmm 106 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "wlan";
+ default-state = "off";
+ };
+
+ led@2 {
+ label = "PWR_LED:green:111";
+ gpios = <&tlmm 111 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "wlan";
+ default-state = "on";
+ };
+
+ led@3 {
+ label = "WIFI_LED:red:114";
+ gpios = <&tlmm 114 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "wlan";
+ default-state = "on";
+ };
+
+ led@4 {
+ label = "WIFI_LED:green:115";
+ gpios = <&tlmm 115 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "wlan";
+ default-state = "off";
+ };
+ };
+
+ gpio_keys {
+ compatible = "gpio-keys";
+ label = "gpio-keys";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&key_snapshot_default
+ &key_record_default
+ &key_wcnss_default>;
+ status = "okay";
+ cam_snapshot {
+ label = "cam_snapshot";
+ gpios = <&tlmm 91 GPIO_ACTIVE_LOW>;
+ linux,input-type = <1>;
+ linux,code = <766>;
+ gpio-key,wakeup;
+ debounce-interval = <15>;
+ linux,can-disable;
+ };
+
+ cam_record {
+ label = "cam_record";
+ gpios = <&tlmm 119 GPIO_ACTIVE_LOW>;
+ linux,input-type = <1>;
+ linux,code = <766>;
+ gpio-key,wakeup;
+ debounce-interval = <15>;
+ linux,can-disable;
+ };
+
+ wcnss_key {
+ label = "wcnss_key";
+ gpios = <&tlmm 120 GPIO_ACTIVE_LOW>;
+ linux,input-type = <1>;
+ linux,code = <528>;
+ gpio-key,wakeup;
+ debounce-interval = <15>;
+ linux,can-disable;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/qcs605.dtsi b/arch/arm64/boot/dts/qcom/qcs605.dtsi
index 12da650..66493d1 100644
--- a/arch/arm64/boot/dts/qcom/qcs605.dtsi
+++ b/arch/arm64/boot/dts/qcom/qcs605.dtsi
@@ -17,3 +17,13 @@
model = "Qualcomm Technologies, Inc. QCS605";
qcom,msm-id = <347 0x0>;
};
+
+&soc {
+ qcom,rmnet-ipa {
+ status = "disabled";
+ };
+};
+
+&ipa_hw {
+ status = "disabled";
+};
diff --git a/arch/arm64/boot/dts/qcom/sda845-v2-hdk-overlay.dts b/arch/arm64/boot/dts/qcom/sda845-v2-hdk-overlay.dts
index de20f87..813c198 100644
--- a/arch/arm64/boot/dts/qcom/sda845-v2-hdk-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sda845-v2-hdk-overlay.dts
@@ -29,3 +29,28 @@
qcom,msm-id = <341 0x20000>;
qcom,board-id = <0x01001F 0x00>;
};
+
+&dsi_dual_nt36850_truly_cmd {
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,mdss-dsi-mode-sel-gpio-state = "dual_port";
+ qcom,panel-mode-gpio = <&tlmm 52 0>;
+ qcom,platform-te-gpio = <&tlmm 10 0>;
+ qcom,platform-reset-gpio = <&tlmm 6 0>;
+};
+
+&dsi_dual_nt36850_truly_cmd_display {
+ qcom,dsi-display-active;
+};
+
+&labibb {
+ status = "ok";
+ qcom,qpnp-labibb-mode = "lcd";
+};
+
+&pmi8998_wled {
+ status = "okay";
+ qcom,led-strings-list = [01 02];
+};
diff --git a/arch/arm64/boot/dts/qcom/sda845-v2-hdk.dtsi b/arch/arm64/boot/dts/qcom/sda845-v2-hdk.dtsi
index d212554..26a73b0 100644
--- a/arch/arm64/boot/dts/qcom/sda845-v2-hdk.dtsi
+++ b/arch/arm64/boot/dts/qcom/sda845-v2-hdk.dtsi
@@ -22,3 +22,19 @@
&sdhc_2 {
cd-gpios = <&tlmm 126 GPIO_ACTIVE_LOW>;
};
+
+&usb1 {
+ status = "ok";
+ dwc3@a800000 {
+ maximum-speed = "high-speed";
+ dr_mode = "host";
+ };
+};
+
+&qusb_phy1 {
+ status = "ok";
+};
+
+&usb_qmp_phy {
+ status = "ok";
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-audio-overlay.dtsi b/arch/arm64/boot/dts/qcom/sdm670-audio-overlay.dtsi
index 58c290d..5dd5c0d 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-audio-overlay.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-audio-overlay.dtsi
@@ -50,8 +50,8 @@
qcom,hph-en0-gpio = <&tavil_hph_en0>;
qcom,hph-en1-gpio = <&tavil_hph_en1>;
qcom,msm-mclk-freq = <9600000>;
- asoc-codec = <&stub_codec>;
- asoc-codec-names = "msm-stub-codec.1";
+ asoc-codec = <&stub_codec>, <&ext_disp_audio_codec>;
+ asoc-codec-names = "msm-stub-codec.1", "msm-ext-disp-audio-codec-rx";
qcom,wsa-max-devs = <2>;
qcom,wsa-devs = <&wsa881x_0211>, <&wsa881x_0212>,
<&wsa881x_0213>, <&wsa881x_0214>;
@@ -100,9 +100,11 @@
qcom,cdc-dmic-gpios = <&cdc_dmic_gpios>;
asoc-codec = <&stub_codec>, <&msm_digital_codec>,
- <&pmic_analog_codec>, <&msm_sdw_codec>;
+ <&pmic_analog_codec>, <&msm_sdw_codec>,
+ <&ext_disp_audio_codec>;
asoc-codec-names = "msm-stub-codec.1", "msm-dig-codec",
- "analog-codec", "msm_sdw_codec";
+ "analog-codec", "msm_sdw_codec",
+ "msm-ext-disp-audio-codec-rx";
qcom,wsa-max-devs = <2>;
qcom,wsa-devs = <&wsa881x_211_en>, <&wsa881x_212_en>,
diff --git a/arch/arm64/boot/dts/qcom/sdm670-audio.dtsi b/arch/arm64/boot/dts/qcom/sdm670-audio.dtsi
index b26ec5c..bda44cc 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-audio.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-audio.dtsi
@@ -39,6 +39,7 @@
qcom,wcn-btfm;
qcom,mi2s-audio-intf;
qcom,auxpcm-audio-intf;
+ qcom,ext-disp-audio-rx;
asoc-platform = <&pcm0>, <&pcm1>, <&pcm2>, <&voip>, <&voice>,
<&loopback>, <&compress>, <&hostless>,
<&afe>, <&lsm>, <&routing>, <&cpe>, <&compr>,
@@ -50,7 +51,7 @@
"msm-pcm-afe", "msm-lsm-client",
"msm-pcm-routing", "msm-cpe-lsm",
"msm-compr-dsp", "msm-pcm-dsp-noirq";
- asoc-cpu = <&dai_mi2s0>, <&dai_mi2s1>,
+ asoc-cpu = <&dai_dp>, <&dai_mi2s0>, <&dai_mi2s1>,
<&dai_mi2s2>, <&dai_mi2s3>, <&dai_mi2s4>,
<&dai_pri_auxpcm>, <&dai_sec_auxpcm>,
<&dai_tert_auxpcm>, <&dai_quat_auxpcm>,
@@ -70,7 +71,8 @@
<&dai_tert_tdm_rx_0>, <&dai_tert_tdm_tx_0>,
<&dai_quat_tdm_rx_0>, <&dai_quat_tdm_tx_0>,
<&dai_quin_tdm_rx_0>, <&dai_quin_tdm_tx_0>;
- asoc-cpu-names = "msm-dai-q6-mi2s.0", "msm-dai-q6-mi2s.1",
+ asoc-cpu-names = "msm-dai-q6-dp.24608",
+ "msm-dai-q6-mi2s.0", "msm-dai-q6-mi2s.1",
"msm-dai-q6-mi2s.2", "msm-dai-q6-mi2s.3",
"msm-dai-q6-mi2s.4",
"msm-dai-q6-auxpcm.1", "msm-dai-q6-auxpcm.2",
@@ -102,6 +104,7 @@
compatible = "qcom,sdm670-asoc-snd";
qcom,model = "sdm670-mtp-snd-card";
qcom,wcn-btfm;
+ qcom,ext-disp-audio-rx;
qcom,mi2s-audio-intf;
qcom,auxpcm-audio-intf;
asoc-platform = <&pcm0>, <&pcm1>, <&pcm2>, <&voip>, <&voice>,
@@ -115,7 +118,7 @@
"msm-pcm-afe", "msm-lsm-client",
"msm-pcm-routing", "msm-compr-dsp",
"msm-pcm-dsp-noirq";
- asoc-cpu = <&dai_mi2s0>, <&dai_mi2s1>,
+ asoc-cpu = <&dai_dp>, <&dai_mi2s0>, <&dai_mi2s1>,
<&dai_mi2s2>, <&dai_mi2s3>, <&dai_mi2s4>,
<&dai_int_mi2s0>, <&dai_int_mi2s1>,
<&dai_int_mi2s2>, <&dai_int_mi2s3>,
@@ -134,7 +137,8 @@
<&dai_tert_tdm_rx_0>, <&dai_tert_tdm_tx_0>,
<&dai_quat_tdm_rx_0>, <&dai_quat_tdm_tx_0>,
<&dai_quin_tdm_rx_0>, <&dai_quin_tdm_tx_0>;
- asoc-cpu-names = "msm-dai-q6-mi2s.0", "msm-dai-q6-mi2s.1",
+ asoc-cpu-names = "msm-dai-q6-dp.24608",
+ "msm-dai-q6-mi2s.0","msm-dai-q6-mi2s.1",
"msm-dai-q6-mi2s.2", "msm-dai-q6-mi2s.3",
"msm-dai-q6-mi2s.4",
"msm-dai-q6-mi2s.7", "msm-dai-q6-mi2s.8",
diff --git a/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-360camera.dtsi b/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-360camera.dtsi
new file mode 100644
index 0000000..18b0cd8
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-360camera.dtsi
@@ -0,0 +1,382 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+ led_flash_rear: qcom,camera-flash@0 {
+ cell-index = <0>;
+ reg = <0x00 0x00>;
+ compatible = "qcom,camera-flash";
+ flash-source = <&pm660l_flash0 &pm660l_flash1>;
+ torch-source = <&pm660l_torch0 &pm660l_torch1>;
+ switch-source = <&pm660l_switch0>;
+ status = "ok";
+ };
+
+ led_flash_front: qcom,camera-flash@1 {
+ cell-index = <1>;
+ reg = <0x01 0x00>;
+ compatible = "qcom,camera-flash";
+ flash-source = <&pm660l_flash2>;
+ torch-source = <&pm660l_torch2>;
+ switch-source = <&pm660l_switch1>;
+ status = "ok";
+ };
+
+ actuator_regulator: gpio-regulator@0 {
+ compatible = "regulator-fixed";
+ reg = <0x00 0x00>;
+ regulator-name = "actuator_regulator";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-enable-ramp-delay = <100>;
+ enable-active-high;
+ gpio = <&tlmm 27 0>;
+ };
+
+ camera_ldo: gpio-regulator@2 {
+ compatible = "regulator-fixed";
+ reg = <0x02 0x00>;
+ regulator-name = "camera_ldo";
+ regulator-min-microvolt = <1352000>;
+ regulator-max-microvolt = <1352000>;
+ regulator-enable-ramp-delay = <233>;
+ enable-active-high;
+ gpio = <&pm660l_gpios 4 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&camera_dvdd_en_default>;
+ vin-supply = <&pm660_s6>;
+ };
+
+ camera_rear_ldo: gpio-regulator@1 {
+ compatible = "regulator-fixed";
+ reg = <0x01 0x00>;
+ regulator-name = "camera_rear_ldo";
+ regulator-min-microvolt = <1352000>;
+ regulator-max-microvolt = <1352000>;
+ regulator-enable-ramp-delay = <135>;
+ enable-active-high;
+ gpio = <&pm660l_gpios 4 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&camera_rear_dvdd_en_default>;
+ vin-supply = <&pm660_s6>;
+ };
+
+ camera_vio_ldo: gpio-regulator@3 {
+ compatible = "regulator-fixed";
+ reg = <0x03 0x00>;
+ regulator-name = "camera_vio_ldo";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-enable-ramp-delay = <233>;
+ enable-active-high;
+ gpio = <&tlmm 29 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&cam_sensor_rear_vio>;
+ vin-supply = <&pm660_s4>;
+ };
+
+ camera_vana_ldo: gpio-regulator@4 {
+ compatible = "regulator-fixed";
+ reg = <0x04 0x00>;
+ regulator-name = "camera_vana_ldo";
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <2850000>;
+ regulator-enable-ramp-delay = <233>;
+ enable-active-high;
+ gpio = <&tlmm 8 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&cam_sensor_rear_vana>;
+ vin-supply = <&pm660l_bob>;
+ };
+};
+
+&cam_cci {
+ actuator_rear: qcom,actuator@0 {
+ cell-index = <0>;
+ reg = <0x0>;
+ compatible = "qcom,actuator";
+ cci-master = <0>;
+ cam_vaf-supply = <&actuator_regulator>;
+ regulator-names = "cam_vaf";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <2800000>;
+ rgltr-max-voltage = <2800000>;
+ rgltr-load-current = <0>;
+ };
+
+ actuator_front: qcom,actuator@1 {
+ cell-index = <1>;
+ reg = <0x1>;
+ compatible = "qcom,actuator";
+ cci-master = <1>;
+ cam_vaf-supply = <&actuator_regulator>;
+ regulator-names = "cam_vaf";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <2800000>;
+ rgltr-max-voltage = <2800000>;
+ rgltr-load-current = <0>;
+ };
+
+ ois_rear: qcom,ois@0 {
+ cell-index = <0>;
+ reg = <0x0>;
+ compatible = "qcom,ois";
+ cci-master = <0>;
+ cam_vaf-supply = <&actuator_regulator>;
+ regulator-names = "cam_vaf";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <2800000>;
+ rgltr-max-voltage = <2800000>;
+ rgltr-load-current = <0>;
+ status = "disabled";
+ };
+
+ eeprom_rear: qcom,eeprom@0 {
+ cell-index = <0>;
+ reg = <0>;
+ compatible = "qcom,eeprom";
+ cam_vio-supply = <&camera_vio_ldo>;
+ cam_vana-supply = <&camera_vana_ldo>;
+ cam_vdig-supply = <&camera_rear_ldo>;
+ cam_clk-supply = <&titan_top_gdsc>;
+ cam_vaf-supply = <&actuator_regulator>;
+ regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+ "cam_clk", "cam_vaf";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <1800000 2850000 1352000 0 2800000>;
+ rgltr-max-voltage = <1800000 2850000 1352000 0 2800000>;
+ rgltr-load-current = <0 80000 105000 0 0>;
+ gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk0_active
+ &cam_sensor_rear_active>;
+ pinctrl-1 = <&cam_sensor_mclk0_suspend
+ &cam_sensor_rear_suspend>;
+ gpios = <&tlmm 13 0>,
+ <&tlmm 30 0>;
+ gpio-reset = <1>;
+ gpio-req-tbl-num = <0 1>;
+ gpio-req-tbl-flags = <1 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK0",
+ "CAM_RESET0";
+ sensor-mode = <0>;
+ cci-master = <0>;
+ status = "ok";
+ clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
+ clock-names = "cam_clk";
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
+ };
+
+ eeprom_rear_aux: qcom,eeprom@1 {
+ cell-index = <1>;
+ reg = <0x1>;
+ compatible = "qcom,eeprom";
+ cam_vio-supply = <&camera_vio_ldo>;
+ cam_vana-supply = <&camera_vana_ldo>;
+ cam_vdig-supply = <&camera_ldo>;
+ cam_clk-supply = <&titan_top_gdsc>;
+ cam_vaf-supply = <&actuator_regulator>;
+ regulator-names = "cam_vdig", "cam_vio", "cam_vana",
+ "cam_clk", "cam_vaf";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <1352000 1800000 2850000 0 2800000>;
+ rgltr-max-voltage = <1352000 1800000 2850000 0 2800000>;
+ rgltr-load-current = <105000 0 80000 0>;
+ gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk1_active
+ &cam_sensor_rear2_active>;
+ pinctrl-1 = <&cam_sensor_mclk1_suspend
+ &cam_sensor_rear2_suspend>;
+ gpios = <&tlmm 14 0>,
+ <&tlmm 28 0>;
+ gpio-reset = <1>;
+ gpio-req-tbl-num = <0 1>;
+ gpio-req-tbl-flags = <1 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK1",
+ "CAM_RESET1";
+ sensor-position = <0>;
+ sensor-mode = <0>;
+ cci-master = <1>;
+ status = "ok";
+ clock-names = "cam_clk";
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
+ };
+
+ eeprom_front: qcom,eeprom@2 {
+ cell-index = <2>;
+ reg = <0x2>;
+ compatible = "qcom,eeprom";
+ cam_vio-supply = <&camera_vio_ldo>;
+ cam_vana-supply = <&camera_vana_ldo>;
+ cam_vdig-supply = <&camera_ldo>;
+ cam_clk-supply = <&titan_top_gdsc>;
+ cam_vaf-supply = <&actuator_regulator>;
+ regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+ "cam_clk", "cam_vaf";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <1800000 2850000 1352000 0 2800000>;
+ rgltr-max-voltage = <1800000 2850000 1352000 0 2800000>;
+ rgltr-load-current = <0 80000 105000 0>;
+ gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk2_active
+ &cam_sensor_front_active>;
+ pinctrl-1 = <&cam_sensor_mclk2_suspend
+ &cam_sensor_front_suspend>;
+ gpios = <&tlmm 15 0>,
+ <&tlmm 9 0>;
+ gpio-reset = <1>;
+ gpio-req-tbl-num = <0 1>;
+ gpio-req-tbl-flags = <1 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK2",
+ "CAM_RESET2";
+ sensor-mode = <0>;
+ cci-master = <1>;
+ status = "ok";
+ clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
+ clock-names = "cam_clk";
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
+ };
+
+ qcom,cam-sensor@0 {
+ cell-index = <0>;
+ compatible = "qcom,cam-sensor";
+ reg = <0x0>;
+ csiphy-sd-index = <0>;
+ sensor-position-roll = <270>;
+ sensor-position-pitch = <0>;
+ sensor-position-yaw = <180>;
+ led-flash-src = <&led_flash_rear>;
+ actuator-src = <&actuator_rear>;
+ ois-src = <&ois_rear>;
+ eeprom-src = <&eeprom_rear>;
+ cam_vio-supply = <&camera_vio_ldo>;
+ cam_vana-supply = <&camera_vana_ldo>;
+ cam_vdig-supply = <&camera_rear_ldo>;
+ cam_clk-supply = <&titan_top_gdsc>;
+ regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+ "cam_clk";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <1800000 2850000 1352000 0>;
+ rgltr-max-voltage = <1800000 2850000 1352000 0>;
+ rgltr-load-current = <0 80000 105000 0>;
+ gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk0_active
+ &cam_sensor_rear_active>;
+ pinctrl-1 = <&cam_sensor_mclk0_suspend
+ &cam_sensor_rear_suspend>;
+ gpios = <&tlmm 13 0>,
+ <&tlmm 30 0>;
+ gpio-reset = <1>;
+ gpio-req-tbl-num = <0 1>;
+ gpio-req-tbl-flags = <1 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK0",
+ "CAM_RESET0";
+ sensor-mode = <0>;
+ cci-master = <0>;
+ status = "ok";
+ clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
+ clock-names = "cam_clk";
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
+ };
+
+ qcom,cam-sensor@1 {
+ cell-index = <1>;
+ compatible = "qcom,cam-sensor";
+ reg = <0x1>;
+ csiphy-sd-index = <1>;
+ sensor-position-roll = <90>;
+ sensor-position-pitch = <0>;
+ sensor-position-yaw = <180>;
+ eeprom-src = <&eeprom_rear_aux>;
+ cam_vio-supply = <&camera_vio_ldo>;
+ cam_vana-supply = <&camera_vana_ldo>;
+ cam_vdig-supply = <&camera_ldo>;
+ cam_clk-supply = <&titan_top_gdsc>;
+ regulator-names = "cam_vdig", "cam_vio", "cam_vana",
+ "cam_clk";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <1352000 1800000 2850000 0>;
+ rgltr-max-voltage = <1352000 1800000 2850000 0>;
+ rgltr-load-current = <105000 0 80000 0>;
+ gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk1_active
+ &cam_sensor_rear2_active>;
+ pinctrl-1 = <&cam_sensor_mclk1_suspend
+ &cam_sensor_rear2_suspend>;
+ gpios = <&tlmm 14 0>,
+ <&tlmm 28 0>;
+ gpio-reset = <1>;
+ gpio-req-tbl-num = <0 1>;
+ gpio-req-tbl-flags = <1 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK1",
+ "CAM_RESET1";
+ sensor-mode = <0>;
+ cci-master = <1>;
+ status = "ok";
+ clocks = <&clock_camcc CAM_CC_MCLK1_CLK>;
+ clock-names = "cam_clk";
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
+ };
+
+ qcom,cam-sensor@2 {
+ cell-index = <2>;
+ compatible = "qcom,cam-sensor";
+ reg = <0x02>;
+ csiphy-sd-index = <2>;
+ sensor-position-roll = <270>;
+ sensor-position-pitch = <0>;
+ sensor-position-yaw = <0>;
+ eeprom-src = <&eeprom_front>;
+ actuator-src = <&actuator_front>;
+ led-flash-src = <&led_flash_front>;
+ cam_vio-supply = <&camera_vio_ldo>;
+ cam_vana-supply = <&camera_vana_ldo>;
+ cam_vdig-supply = <&camera_ldo>;
+ cam_clk-supply = <&titan_top_gdsc>;
+ regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+ "cam_clk";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <1800000 2850000 1352000 0>;
+ rgltr-max-voltage = <1800000 2850000 1352000 0>;
+ rgltr-load-current = <0 80000 105000 0>;
+ gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk2_active
+ &cam_sensor_front_active>;
+ pinctrl-1 = <&cam_sensor_mclk2_suspend
+ &cam_sensor_front_suspend>;
+ gpios = <&tlmm 15 0>,
+ <&tlmm 9 0>;
+ gpio-reset = <1>;
+ gpio-req-tbl-num = <0 1>;
+ gpio-req-tbl-flags = <1 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK2",
+ "CAM_RESET2";
+ sensor-mode = <0>;
+ cci-master = <1>;
+ status = "ok";
+ clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
+ clock-names = "cam_clk";
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-cdp.dtsi
index c4ca6c5..8b94ca2 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-cdp.dtsi
@@ -197,7 +197,7 @@
rgltr-cntrl-support;
rgltr-min-voltage = <1352000 1800000 2850000 0 2800000>;
rgltr-max-voltage = <1352000 1800000 2850000 0 2800000>;
- rgltr-load-current = <105000 0 80000 0>;
+ rgltr-load-current = <105000 0 80000 0 0>;
gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cam_sensor_mclk1_active
@@ -234,7 +234,7 @@
rgltr-cntrl-support;
rgltr-min-voltage = <1800000 2850000 1352000 0 2800000>;
rgltr-max-voltage = <1800000 2850000 1352000 0 2800000>;
- rgltr-load-current = <0 80000 105000 0>;
+ rgltr-load-current = <0 80000 105000 0 0>;
gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cam_sensor_mclk2_active
diff --git a/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-mtp.dtsi
index c4ca6c5..8b94ca2 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-mtp.dtsi
@@ -197,7 +197,7 @@
rgltr-cntrl-support;
rgltr-min-voltage = <1352000 1800000 2850000 0 2800000>;
rgltr-max-voltage = <1352000 1800000 2850000 0 2800000>;
- rgltr-load-current = <105000 0 80000 0>;
+ rgltr-load-current = <105000 0 80000 0 0>;
gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cam_sensor_mclk1_active
@@ -234,7 +234,7 @@
rgltr-cntrl-support;
rgltr-min-voltage = <1800000 2850000 1352000 0 2800000>;
rgltr-max-voltage = <1800000 2850000 1352000 0 2800000>;
- rgltr-load-current = <0 80000 105000 0>;
+ rgltr-load-current = <0 80000 105000 0 0>;
gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cam_sensor_mclk2_active
diff --git a/arch/arm64/boot/dts/qcom/sdm670-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm670-cdp.dtsi
index e50be92..521b048 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-cdp.dtsi
@@ -41,6 +41,12 @@
status = "ok";
};
+&pm660l_switch1 {
+ pinctrl-names = "led_enable", "led_disable";
+ pinctrl-0 = <&flash_led3_front_en>;
+ pinctrl-1 = <&flash_led3_front_dis>;
+};
+
&qupv3_se9_2uart {
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-coresight.dtsi b/arch/arm64/boot/dts/qcom/sdm670-coresight.dtsi
index 34fe19f..a885495 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-coresight.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-coresight.dtsi
@@ -537,6 +537,58 @@
<&funnel_apss_merg_out_funnel_in2>;
};
};
+ port@4 {
+ reg = <6>;
+ funnel_in2_in_funnel_gfx: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&funnel_gfx_out_funnel_in2>;
+ };
+ };
+ };
+ };
+
+ funnel_gfx: funnel@0x6943000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b908>;
+
+ reg = <0x6943000 0x1000>;
+ reg-names = "funnel-base";
+
+ coresight-name = "coresight-funnel-gfx";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ funnel_gfx_out_funnel_in2: endpoint {
+ remote-endpoint =
+ <&funnel_in2_in_funnel_gfx>;
+ };
+ };
+
+ port@1 {
+ reg = <0>;
+ funnel_in2_in_gfx: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&gfx_out_funnel_in2>;
+ };
+ };
+
+ port@2 {
+ reg = <1>;
+ funnel_in2_in_gfx_cx: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&gfx_cx_out_funnel_in2>;
+ };
+ };
};
};
@@ -1336,7 +1388,7 @@
reg = <0x69e1000 0x1000>;
reg-names = "cti-base";
- coresight-name = "coresight-cti0-ddr0";
+ coresight-name = "coresight-cti-ddr_dl_0_cti0";
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
@@ -1348,7 +1400,7 @@
reg = <0x69e4000 0x1000>;
reg-names = "cti-base";
- coresight-name = "coresight-cti0-ddr1";
+ coresight-name = "coresight-cti-ddr_dl_1_cti0";
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
@@ -1360,7 +1412,7 @@
reg = <0x69e5000 0x1000>;
reg-names = "cti-base";
- coresight-name = "coresight-cti1-ddr1";
+ coresight-name = "coresight-cti-ddr_dl_1_cti1";
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
@@ -1372,7 +1424,7 @@
reg = <0x6c09000 0x1000>;
reg-names = "cti-base";
- coresight-name = "coresight-cti0-dlmm";
+ coresight-name = "coresight-cti-dlmm_cti0";
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
@@ -1384,7 +1436,7 @@
reg = <0x6c0a000 0x1000>;
reg-names = "cti-base";
- coresight-name = "coresight-cti1-dlmm";
+ coresight-name = "coresight-cti-dlmm_cti1";
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
@@ -1396,7 +1448,7 @@
reg = <0x6c29000 0x1000>;
reg-names = "cti-base";
- coresight-name = "coresight-cti0-dlct";
+ coresight-name = "coresight-cti-dlct_cti0";
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
@@ -1408,7 +1460,7 @@
reg = <0x6c2a000 0x1000>;
reg-names = "cti-base";
- coresight-name = "coresight-cti1-dlct";
+ coresight-name = "coresight-cti-dlct_cti1";
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
@@ -1420,7 +1472,7 @@
reg = <0x69a4000 0x1000>;
reg-names = "cti-base";
- coresight-name = "coresight-cti0-wcss";
+ coresight-name = "coresight-cti-wcss_cti0";
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
@@ -1432,7 +1484,7 @@
reg = <0x69a5000 0x1000>;
reg-names = "cti-base";
- coresight-name = "coresight-cti1-wcss";
+ coresight-name = "coresight-cti-wcss_cti1";
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
@@ -1444,7 +1496,7 @@
reg = <0x69a6000 0x1000>;
reg-names = "cti-base";
- coresight-name = "coresight-cti2-wcss";
+ coresight-name = "coresight-cti-wcss_cti2";
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
@@ -1480,7 +1532,7 @@
reg = <0x6b10000 0x1000>;
reg-names = "cti-base";
- coresight-name = "coresight-cti2-ssc_sdc";
+ coresight-name = "coresight-cti-ssc_sdc_cti2";
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
@@ -1492,7 +1544,7 @@
reg = <0x6b11000 0x1000>;
reg-names = "cti-base";
- coresight-name = "coresight-cti1-ssc";
+ coresight-name = "coresight-cti-ssc_cti1";
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
@@ -1504,7 +1556,7 @@
reg = <0x6b1b000 0x1000>;
reg-names = "cti-base";
- coresight-name = "coresight-cti0-ssc-q6";
+ coresight-name = "coresight-cti-ssc_q6_cti0";
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
@@ -1516,7 +1568,7 @@
reg = <0x6b1e000 0x1000>;
reg-names = "cti-base";
- coresight-name = "coresight-cti-ssc-noc";
+ coresight-name = "coresight-cti-ssc_noc";
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
@@ -1528,7 +1580,7 @@
reg = <0x6b1f000 0x1000>;
reg-names = "cti-base";
- coresight-name = "coresight-cti6-ssc-noc";
+ coresight-name = "coresight-cti-ssc_noc_cti6";
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
@@ -1540,7 +1592,7 @@
reg = <0x6b04000 0x1000>;
reg-names = "cti-base";
- coresight-name = "coresight-cti0-swao";
+ coresight-name = "coresight-cti-swao_cti0";
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
@@ -1552,7 +1604,7 @@
reg = <0x6b05000 0x1000>;
reg-names = "cti-base";
- coresight-name = "coresight-cti1-swao";
+ coresight-name = "coresight-cti-swao_cti1";
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
@@ -1564,7 +1616,7 @@
reg = <0x6b06000 0x1000>;
reg-names = "cti-base";
- coresight-name = "coresight-cti2-swao";
+ coresight-name = "coresight-cti-swao_cti2";
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
@@ -1576,7 +1628,7 @@
reg = <0x6b07000 0x1000>;
reg-names = "cti-base";
- coresight-name = "coresight-cti3-swao";
+ coresight-name = "coresight-cti-swao_cti3";
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
@@ -1624,7 +1676,7 @@
reg = <0x78e0000 0x1000>;
reg-names = "cti-base";
- coresight-name = "coresight-cti0-apss";
+ coresight-name = "coresight-cti-apss_cti0";
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
@@ -1636,7 +1688,7 @@
reg = <0x78f0000 0x1000>;
reg-names = "cti-base";
- coresight-name = "coresight-cti1-apss";
+ coresight-name = "coresight-cti-apss_cti1";
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
@@ -1648,7 +1700,7 @@
reg = <0x7900000 0x1000>;
reg-names = "cti-base";
- coresight-name = "coresight-cti2-apss";
+ coresight-name = "coresight-cti-apss_cti2";
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-gpu.dtsi b/arch/arm64/boot/dts/qcom/sdm670-gpu.dtsi
index 41a66e9..1a93fc2 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-gpu.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-gpu.dtsi
@@ -129,6 +129,36 @@
qcom,gpu-speed-bin = <0x41a0 0x1fe00000 21>;
+ qcom,gpu-coresights {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "qcom,gpu-coresight";
+
+ qcom,gpu-coresight@0 {
+ reg = <0>;
+ coresight-name = "coresight-gfx";
+ coresight-atid = <50>;
+ port {
+ gfx_out_funnel_in2: endpoint {
+ remote-endpoint =
+ <&funnel_in2_in_gfx>;
+ };
+ };
+ };
+
+ qcom,gpu-coresight@1 {
+ reg = <1>;
+ coresight-name = "coresight-gfx-cx";
+ coresight-atid = <51>;
+ port {
+ gfx_cx_out_funnel_in2: endpoint {
+ remote-endpoint =
+ <&funnel_in2_in_gfx_cx>;
+ };
+ };
+ };
+ };
+
/* GPU Mempools */
qcom,gpu-mempools {
#address-cells = <1>;
@@ -394,7 +424,7 @@
gfx3d_secure: gfx3d_secure {
compatible = "qcom,smmu-kgsl-cb";
- iommus = <&kgsl_smmu 2>;
+ iommus = <&kgsl_smmu 2>, <&kgsl_smmu 1>;
};
};
@@ -404,12 +434,10 @@
reg =
<0x506a000 0x31000>,
- <0xb200000 0x300000>,
- <0xc200000 0x10000>;
+ <0xb200000 0x300000>;
reg-names =
"kgsl_gmu_reg",
- "kgsl_gmu_pdc_reg",
- "kgsl_gmu_cpr_reg";
+ "kgsl_gmu_pdc_reg";
interrupts = <0 304 0>, <0 305 0>;
interrupt-names = "kgsl_hfi_irq", "kgsl_gmu_irq";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm670-mtp.dtsi
index 14c2c5a..ef1fc08 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-mtp.dtsi
@@ -42,6 +42,12 @@
status = "ok";
};
+&pm660l_switch1 {
+ pinctrl-names = "led_enable", "led_disable";
+ pinctrl-0 = <&flash_led3_front_en>;
+ pinctrl-1 = <&flash_led3_front_dis>;
+};
+
&qupv3_se9_2uart {
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi
index 188da58..d4953c1 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi
@@ -1558,6 +1558,36 @@
};
};
+ flash_led3_front {
+ flash_led3_front_en: flash_led3_front_en {
+ mux {
+ pins = "gpio21";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio21";
+ drive_strength = <2>;
+ output-high;
+ bias-disable;
+ };
+ };
+
+ flash_led3_front_dis: flash_led3_front_dis {
+ mux {
+ pins = "gpio21";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio21";
+ drive_strength = <2>;
+ output-low;
+ bias-disable;
+ };
+ };
+ };
+
/* Pinctrl setting for CAMERA GPIO key */
key_cam_snapshot {
key_cam_snapshot_default: key_cam_snapshot_default {
diff --git a/arch/arm64/boot/dts/qcom/sdm670-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdm670-qrd.dtsi
index 93e4c51..0e87314 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-qrd.dtsi
@@ -68,6 +68,10 @@
};
};
+&eud {
+ vdda33-supply = <&pm660l_l7>;
+};
+
&pm660_fg {
qcom,battery-data = <&qrd_batterydata>;
qcom,fg-bmd-en-delay-ms = <300>;
@@ -142,6 +146,29 @@
};
};
+&qusb_phy0 {
+ qcom,qusb-phy-init-seq =
+ /* <value reg_offset> */
+ <0x23 0x210 /* PWR_CTRL1 */
+ 0x03 0x04 /* PLL_ANALOG_CONTROLS_TWO */
+ 0x7c 0x18c /* PLL_CLOCK_INVERTERS */
+ 0x80 0x2c /* PLL_CMODE */
+ 0x0a 0x184 /* PLL_LOCK_DELAY */
+ 0x19 0xb4 /* PLL_DIGITAL_TIMERS_TWO */
+ 0x40 0x194 /* PLL_BIAS_CONTROL_1 */
+ 0x20 0x198 /* PLL_BIAS_CONTROL_2 */
+ 0x21 0x214 /* PWR_CTRL2 */
+ 0x07 0x220 /* IMP_CTRL1 */
+ 0x58 0x224 /* IMP_CTRL2 */
+ 0x77 0x240 /* TUNE1 */
+ 0x29 0x244 /* TUNE2 */
+ 0xca 0x248 /* TUNE3 */
+ 0x04 0x24c /* TUNE4 */
+ 0x03 0x250 /* TUNE5 */
+ 0x00 0x23c /* CHG_CTRL2 */
+ 0x22 0x210>; /* PWR_CTRL1 */
+};
+
&pm660_haptics {
qcom,vmax-mv = <1800>;
qcom,wave-play-rate-us = <4255>;
@@ -279,7 +306,7 @@
&pm660l_wled {
status = "okay";
- qcom,led-strings-list = [01 02];
+ qcom,led-strings-list = [00 01];
};
&mdss_mdp {
diff --git a/arch/arm64/boot/dts/qcom/sdm670-regulator.dtsi b/arch/arm64/boot/dts/qcom/sdm670-regulator.dtsi
index 24b8dd6..3c84314 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-regulator.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-regulator.dtsi
@@ -46,9 +46,9 @@
pm660_s4: regulator-pm660-s4 {
regulator-name = "pm660_s4";
qcom,set = <RPMH_REGULATOR_SET_ALL>;
- regulator-min-microvolt = <2040000>;
+ regulator-min-microvolt = <1808000>;
regulator-max-microvolt = <2040000>;
- qcom,init-voltage = <2040000>;
+ qcom,init-voltage = <1808000>;
};
};
@@ -72,9 +72,9 @@
pm660_s6: regulator-pm660-s6 {
regulator-name = "pm660_s6";
qcom,set = <RPMH_REGULATOR_SET_ALL>;
- regulator-min-microvolt = <1352000>;
+ regulator-min-microvolt = <1224000>;
regulator-max-microvolt = <1352000>;
- qcom,init-voltage = <1352000>;
+ qcom,init-voltage = <1224000>;
};
};
@@ -162,11 +162,14 @@
<RPMH_REGULATOR_MODE_LDO_LPM
RPMH_REGULATOR_MODE_LDO_HPM>;
qcom,mode-threshold-currents = <0 1>;
+ proxy-supply = <&pm660_l1>;
pm660_l1: regulator-pm660-l1 {
regulator-name = "pm660_l1";
qcom,set = <RPMH_REGULATOR_SET_ALL>;
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <1250000>;
+ qcom,proxy-consumer-enable;
+ qcom,proxy-consumer-current = <43600>;
qcom,init-voltage = <1200000>;
qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
};
@@ -237,9 +240,9 @@
pm660_l6: regulator-pm660-l6 {
regulator-name = "pm660_l6";
qcom,set = <RPMH_REGULATOR_SET_ALL>;
- regulator-min-microvolt = <1304000>;
+ regulator-min-microvolt = <1248000>;
regulator-max-microvolt = <1304000>;
- qcom,init-voltage = <1304000>;
+ qcom,init-voltage = <1248000>;
qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
};
};
@@ -324,11 +327,14 @@
<RPMH_REGULATOR_MODE_LDO_LPM
RPMH_REGULATOR_MODE_LDO_HPM>;
qcom,mode-threshold-currents = <0 1>;
+ proxy-supply = <&pm660_l11>;
pm660_l11: regulator-pm660-l11 {
regulator-name = "pm660_l11";
qcom,set = <RPMH_REGULATOR_SET_ALL>;
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
+ qcom,proxy-consumer-enable;
+ qcom,proxy-consumer-current = <115000>;
qcom,init-voltage = <1800000>;
qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
};
@@ -468,11 +474,14 @@
<RPMH_REGULATOR_MODE_LDO_LPM
RPMH_REGULATOR_MODE_LDO_HPM>;
qcom,mode-threshold-currents = <0 1>;
+ proxy-supply = <&pm660l_l1>;
pm660l_l1: regulator-pm660l-l1 {
regulator-name = "pm660l_l1";
qcom,set = <RPMH_REGULATOR_SET_ALL>;
regulator-min-microvolt = <880000>;
regulator-max-microvolt = <900000>;
+ qcom,proxy-consumer-enable;
+ qcom,proxy-consumer-current = <72000>;
qcom,init-voltage = <880000>;
qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-rumi.dts b/arch/arm64/boot/dts/qcom/sdm670-rumi.dts
index e137705..6201488 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-rumi.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-rumi.dts
@@ -16,7 +16,6 @@
#include "sdm670.dtsi"
#include "sdm670-rumi.dtsi"
-#include "sdm670-audio-overlay.dtsi"
/ {
model = "Qualcomm Technologies, Inc. SDM670 RUMI";
compatible = "qcom,sdm670-rumi", "qcom,sdm670", "qcom,rumi";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi b/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi
index 6404bcf..8dbd063 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi
@@ -534,6 +534,13 @@
qcom,mdss-dsi-pan-enable-dynamic-fps;
qcom,mdss-dsi-pan-fps-update =
"dfps_immediate_porch_mode_vfp";
+ qcom,esd-check-enabled;
+ qcom,mdss-dsi-panel-status-check-mode = "reg_read";
+ qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a];
+ qcom,mdss-dsi-panel-status-command-state = "dsi_hs_mode";
+ qcom,mdss-dsi-panel-status-value = <0x9c>;
+ qcom,mdss-dsi-panel-on-check-value = <0x9c>;
+ qcom,mdss-dsi-panel-status-read-length = <1>;
qcom,mdss-dsi-display-timings {
timing@0{
qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07
@@ -551,6 +558,13 @@
qcom,ulps-enabled;
qcom,partial-update-enabled = "single_roi";
qcom,panel-roi-alignment = <720 128 720 128 1440 128>;
+ qcom,esd-check-enabled;
+ qcom,mdss-dsi-panel-status-check-mode = "reg_read";
+ qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a];
+ qcom,mdss-dsi-panel-status-command-state = "dsi_hs_mode";
+ qcom,mdss-dsi-panel-status-value = <0x9c>;
+ qcom,mdss-dsi-panel-on-check-value = <0x9c>;
+ qcom,mdss-dsi-panel-status-read-length = <1>;
qcom,mdss-dsi-display-timings {
timing@0{
qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07
@@ -566,6 +580,13 @@
qcom,mdss-dsi-t-clk-post = <0x0b>;
qcom,mdss-dsi-t-clk-pre = <0x23>;
qcom,ulps-enabled;
+ qcom,esd-check-enabled;
+ qcom,mdss-dsi-panel-status-check-mode = "reg_read";
+ qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a];
+ qcom,mdss-dsi-panel-status-command-state = "dsi_hs_mode";
+ qcom,mdss-dsi-panel-status-value = <0x9c>;
+ qcom,mdss-dsi-panel-on-check-value = <0x9c>;
+ qcom,mdss-dsi-panel-status-read-length = <1>;
qcom,mdss-dsi-display-timings {
timing@0{
qcom,mdss-dsi-panel-phy-timings = [00 15 05 05 20 1f 05
@@ -586,6 +607,13 @@
qcom,mdss-dsi-pan-enable-dynamic-fps;
qcom,mdss-dsi-pan-fps-update =
"dfps_immediate_porch_mode_vfp";
+ qcom,esd-check-enabled;
+ qcom,mdss-dsi-panel-status-check-mode = "reg_read";
+ qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a];
+ qcom,mdss-dsi-panel-status-command-state = "dsi_hs_mode";
+ qcom,mdss-dsi-panel-status-value = <0x9c>;
+ qcom,mdss-dsi-panel-on-check-value = <0x9c>;
+ qcom,mdss-dsi-panel-status-read-length = <1>;
qcom,mdss-dsi-display-timings {
timing@0{
qcom,mdss-dsi-panel-phy-timings = [00 15 05 05 20 1f 05
diff --git a/arch/arm64/boot/dts/qcom/sdm670.dtsi b/arch/arm64/boot/dts/qcom/sdm670.dtsi
index a71dc43..a3b8c78 100644
--- a/arch/arm64/boot/dts/qcom/sdm670.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670.dtsi
@@ -690,6 +690,32 @@
clock-frequency = <19200000>;
};
+ qcom,memshare {
+ compatible = "qcom,memshare";
+
+ qcom,client_1 {
+ compatible = "qcom,memshare-peripheral";
+ qcom,peripheral-size = <0x0>;
+ qcom,client-id = <0>;
+ qcom,allocate-boot-time;
+ label = "modem";
+ };
+
+ qcom,client_2 {
+ compatible = "qcom,memshare-peripheral";
+ qcom,peripheral-size = <0x0>;
+ qcom,client-id = <2>;
+ label = "modem";
+ };
+
+ mem_client_3_size: qcom,client_3 {
+ compatible = "qcom,memshare-peripheral";
+ qcom,peripheral-size = <0x500000>;
+ qcom,client-id = <1>;
+ label = "modem";
+ };
+ };
+
qcom,sps {
compatible = "qcom,msm_sps_4k";
qcom,pipe-attr-ee;
@@ -1008,9 +1034,14 @@
compatible = "qcom,clk-cpu-osm-sdm670";
reg = <0x17d41000 0x1400>,
<0x17d43000 0x1400>,
- <0x17d45800 0x1400>;
- reg-names = "osm_l3_base", "osm_pwrcl_base", "osm_perfcl_base";
+ <0x17d45800 0x1400>,
+ <0x784248 0x4>;
+ reg-names = "osm_l3_base", "osm_pwrcl_base", "osm_perfcl_base",
+ "cpr_rc";
+ vdd_l3_mx_ao-supply = <&pm660l_s1_level_ao>;
+ vdd_pwrcl_mx_ao-supply = <&pm660l_s1_level_ao>;
+ qcom,mx-turbo-freq = <1478400000 1689600000 3300000001>;
l3-devs = <&l3_cpu0 &l3_cpu6>;
clock-names = "xo_ao";
@@ -1107,6 +1138,11 @@
reg = <0x10 8>;
};
+ dload_type@1c {
+ compatible = "qcom,msm-imem-dload-type";
+ reg = <0x1c 0x4>;
+ };
+
restart_reason@65c {
compatible = "qcom,msm-imem-restart_reason";
reg = <0x65c 4>;
@@ -1281,52 +1317,52 @@
compatible = "qcom,mem-dump";
memory-region = <&dump_mem>;
- rpmh_dump {
+ rpmh {
qcom,dump-size = <0x2000000>;
qcom,dump-id = <0xec>;
};
- rpm_sw_dump {
+ rpm_sw {
qcom,dump-size = <0x28000>;
qcom,dump-id = <0xea>;
};
- pmic_dump {
+ pmic {
qcom,dump-size = <0x10000>;
qcom,dump-id = <0xe4>;
};
- tmc_etf_dump {
+ tmc_etf {
qcom,dump-size = <0x10000>;
qcom,dump-id = <0xf0>;
};
- tmc_etf_swao_dump {
+ tmc_etfswao {
qcom,dump-size = <0x8400>;
qcom,dump-id = <0xf1>;
};
- tmc_etr_reg_dump {
+ tmc_etr_reg {
qcom,dump-size = <0x1000>;
qcom,dump-id = <0x100>;
};
- tmc_etf_reg_dump {
+ tmc_etf_reg {
qcom,dump-size = <0x1000>;
qcom,dump-id = <0x101>;
};
- tmc_etf_swao_reg_dump {
+ etfswao_reg {
qcom,dump-size = <0x1000>;
qcom,dump-id = <0x102>;
};
- misc_data_dump {
+ misc_data {
qcom,dump-size = <0x1000>;
qcom,dump-id = <0xe8>;
};
- power_regs_data_dump {
+ power_regs {
qcom,dump-size = <0x100000>;
qcom,dump-id = <0xed>;
};
@@ -1618,6 +1654,10 @@
qcom,dump-size = <0x80000>;
};
+ qcom,llcc-perfmon {
+ compatible = "qcom,llcc-perfmon";
+ };
+
qcom,llcc-erp {
compatible = "qcom,llcc-erp";
interrupt-names = "ecc_irq";
@@ -2050,6 +2090,8 @@
vdd_cx-voltage = <RPMH_REGULATOR_LEVEL_TURBO>;
vdd_mx-supply = <&pm660l_s1_level>;
vdd_mx-uV = <RPMH_REGULATOR_LEVEL_TURBO>;
+ vdd_mss-supply = <&pm660_s5_level>;
+ vdd_mss-uV = <RPMH_REGULATOR_LEVEL_TURBO>;
qcom,firmware-name = "modem";
qcom,pil-self-auth;
qcom,sysmon-id = <0>;
@@ -2199,10 +2241,10 @@
<1 782 100000 100000>,
/* 50 MB/s */
<150 512 130718 200000>,
- <1 782 133320 133320>,
+ <1 782 100000 100000>,
/* 100 MB/s */
<150 512 130718 200000>,
- <1 782 150000 150000>,
+ <1 782 130000 130000>,
/* 200 MB/s */
<150 512 261438 400000>,
<1 782 300000 300000>,
@@ -2235,7 +2277,6 @@
qcom,nonremovable;
- qcom,scaling-lower-bus-speed-mode = "DDR52";
status = "disabled";
};
@@ -2274,10 +2315,10 @@
<1 608 100000 100000>,
/* 50 MB/s */
<81 512 130718 200000>,
- <1 608 133320 133320>,
+ <1 608 100000 100000>,
/* 100 MB/s */
<81 512 261438 200000>,
- <1 608 150000 150000>,
+ <1 608 130000 130000>,
/* 200 MB/s */
<81 512 261438 400000>,
<1 608 300000 300000>,
@@ -2312,6 +2353,7 @@
qcom,msm_fastrpc {
compatible = "qcom,msm-fastrpc-compute";
+ qcom,adsp-remoteheap-vmid = <37>;
qcom,msm_fastrpc_compute_cb1 {
compatible = "qcom,msm-fastrpc-compute-cb";
@@ -2460,6 +2502,8 @@
qcom,count-unit = <0x10000>;
qcom,hw-timer-hz = <19200000>;
qcom,target-dev = <&cpubw>;
+ qcom,byte-mid-mask = <0xe000>;
+ qcom,byte-mid-match = <0xe000>;
};
memlat_cpu0: qcom,memlat-cpu0 {
@@ -2749,6 +2793,8 @@
&mdss_core_gdsc {
status = "ok";
+ proxy-supply = <&mdss_core_gdsc>;
+ qcom,proxy-consumer-enable;
};
&gpu_cx_gdsc {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
index 17bcf0955..35a7774 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
@@ -434,13 +434,14 @@
"csid0", "csid1", "csid2",
"ife0", "ife1", "ife2", "ipe0",
"ipe1", "cam-cdm-intf0", "cpas-cdm0", "bps0",
- "icp0", "jpeg-dma0", "jpeg-enc0", "fd0";
+ "icp0", "jpeg-dma0", "jpeg-enc0", "fd0", "lrmecpas";
client-axi-port-names =
"cam_hf_1", "cam_hf_2", "cam_hf_2", "cam_sf_1",
"cam_hf_1", "cam_hf_2", "cam_hf_2",
"cam_hf_1", "cam_hf_2", "cam_hf_2", "cam_sf_1",
"cam_sf_1", "cam_sf_1", "cam_sf_1", "cam_sf_1",
- "cam_sf_1", "cam_sf_1", "cam_sf_1", "cam_sf_1";
+ "cam_sf_1", "cam_sf_1", "cam_sf_1", "cam_sf_1",
+ "cam_sf_1";
client-bus-camnoc-based;
qcom,axi-port-list {
qcom,axi-port1 {
@@ -529,7 +530,8 @@
cdm-client-names = "vfe",
"jpegdma",
"jpegenc",
- "fd";
+ "fd",
+ "lrmecdm";
status = "ok";
};
@@ -775,7 +777,7 @@
clock-rates =
<0 0 0 0 0 0 384000000 0 0 0 404000000 0>,
<0 0 0 0 0 0 538000000 0 0 0 600000000 0>;
- clock-cntl-level = "svs";
+ clock-cntl-level = "svs", "turbo";
src-clock-name = "ife_csid_clk_src";
status = "ok";
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi b/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
index 8e36887..fcfab09 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
@@ -1584,7 +1584,7 @@
reg = <0x69e1000 0x1000>;
reg-names = "cti-base";
- coresight-name = "coresight-cti-DDR_DL_0_CTI";
+ coresight-name = "coresight-cti-ddr_dl_0_cti";
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
@@ -1596,7 +1596,7 @@
reg = <0x69e4000 0x1000>;
reg-names = "cti-base";
- coresight-name = "coresight-cti-DDR_DL_1_CTI0";
+ coresight-name = "coresight-cti-ddr_dl_1_cti0";
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
@@ -1608,7 +1608,7 @@
reg = <0x69e5000 0x1000>;
reg-names = "cti-base";
- coresight-name = "coresight-cti-DDR_DL_1_CTI1";
+ coresight-name = "coresight-cti-ddr_dl_1_cti1";
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
@@ -1620,7 +1620,7 @@
reg = <0x6c09000 0x1000>;
reg-names = "cti-base";
- coresight-name = "coresight-cti-DLMM_CTI0";
+ coresight-name = "coresight-cti-dlmm_cti0";
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
@@ -1632,7 +1632,7 @@
reg = <0x6c0a000 0x1000>;
reg-names = "cti-base";
- coresight-name = "coresight-cti-DLMM_CTI1";
+ coresight-name = "coresight-cti-dlmm_cti1";
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
@@ -1644,7 +1644,7 @@
reg = <0x78e0000 0x1000>;
reg-names = "cti-base";
- coresight-name = "coresight-cti-APSS_CTI0";
+ coresight-name = "coresight-cti-apss_cti0";
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
@@ -1656,7 +1656,7 @@
reg = <0x78f0000 0x1000>;
reg-names = "cti-base";
- coresight-name = "coresight-cti-APSS_CTI1";
+ coresight-name = "coresight-cti-apss_cti1";
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
@@ -1668,7 +1668,7 @@
reg = <0x7900000 0x1000>;
reg-names = "cti-base";
- coresight-name = "coresight-cti-APSS_CTI2";
+ coresight-name = "coresight-cti-apss_cti2";
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
@@ -1996,7 +1996,7 @@
reg = <0x6b04000 0x1000>;
reg-names = "cti-base";
- coresight-name = "coresight-cti-SWAO_CTI0";
+ coresight-name = "coresight-cti-swao_cti0";
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-interposer-pm660.dtsi b/arch/arm64/boot/dts/qcom/sdm845-interposer-pm660.dtsi
index f38f5f8..10efa20 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-interposer-pm660.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-interposer-pm660.dtsi
@@ -66,6 +66,12 @@
ibb-supply = <&lcdb_ncp_vreg>;
};
+&dsi_dual_nt36850_truly_cmd_display {
+ vddio-supply = <&pm660_l11>;
+ lab-supply = <&lcdb_ldo_vreg>;
+ ibb-supply = <&lcdb_ncp_vreg>;
+};
+
&sde_dp {
status = "disabled";
/delete-property/ vdda-1p2-supply;
@@ -236,6 +242,11 @@
/delete-property/ vdd_gfx-supply;
};
+&clock_cpucc {
+ /delete-property/ vdd_l3_mx_ao-supply;
+ /delete-property/ vdd_pwrcl_mx_ao-supply;
+};
+
&pil_modem {
/delete-property/ vdd_cx-supply;
/delete-property/ vdd_mx-supply;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qvr.dtsi
index 54d25e1..00f0650 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-qvr.dtsi
@@ -159,3 +159,7 @@
status = "ok";
};
+
+&wil6210 {
+ status = "ok";
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
index 1e8c943..4ecb49a 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
@@ -27,6 +27,7 @@
#include "dsi-panel-s6e3ha3-amoled-dualmipi-wqhd-cmd.dtsi"
#include "dsi-panel-nt35597-dualmipi-wqxga-video.dtsi"
#include "dsi-panel-nt35597-dualmipi-wqxga-cmd.dtsi"
+#include "dsi-panel-nt36850-truly-dualmipi-wqhd-cmd.dtsi"
#include <dt-bindings/clock/mdss-10nm-pll-clk.h>
&soc {
@@ -451,6 +452,30 @@
ibb-supply = <&ibb_regulator>;
};
+ dsi_dual_nt36850_truly_cmd_display: qcom,dsi-display@16 {
+ compatible = "qcom,dsi-display";
+ label = "dsi_dual_nt36850_truly_cmd_display";
+ qcom,display-type = "primary";
+
+ qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
+ qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
+ clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
+ <&mdss_dsi0_pll PCLK_MUX_0_CLK>;
+ clock-names = "src_byte_clk", "src_pixel_clk";
+
+ pinctrl-names = "panel_active", "panel_suspend";
+ pinctrl-0 = <&sde_dsi_active &sde_te_active>;
+ pinctrl-1 = <&sde_dsi_suspend &sde_te_suspend>;
+ qcom,platform-te-gpio = <&tlmm 10 0>;
+ qcom,platform-reset-gpio = <&tlmm 6 0>;
+ qcom,panel-mode-gpio = <&tlmm 52 0>;
+
+ qcom,dsi-panel = <&dsi_dual_nt36850_truly_cmd>;
+ vddio-supply = <&pm8998_l14>;
+ lab-supply = <&lab_regulator>;
+ ibb-supply = <&ibb_regulator>;
+ };
+
sde_wb: qcom,wb-display@0 {
compatible = "qcom,wb-display";
cell-index = <0>;
@@ -504,13 +529,6 @@
&dsi_dual_nt35597_truly_cmd {
qcom,mdss-dsi-t-clk-post = <0x0D>;
qcom,mdss-dsi-t-clk-pre = <0x2D>;
- qcom,esd-check-enabled;
- qcom,mdss-dsi-panel-status-check-mode = "reg_read";
- qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a];
- qcom,mdss-dsi-panel-status-command-state = "dsi_hs_mode";
- qcom,mdss-dsi-panel-status-value = <0x9c>;
- qcom,mdss-dsi-panel-on-check-value = <0x9c>;
- qcom,mdss-dsi-panel-status-read-length = <1>;
qcom,mdss-dsi-display-timings {
timing@0{
qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07
@@ -526,13 +544,6 @@
qcom,mdss-dsi-t-clk-post = <0x0b>;
qcom,mdss-dsi-t-clk-pre = <0x23>;
qcom,ulps-enabled;
- qcom,esd-check-enabled;
- qcom,mdss-dsi-panel-status-check-mode = "reg_read";
- qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a];
- qcom,mdss-dsi-panel-status-command-state = "dsi_hs_mode";
- qcom,mdss-dsi-panel-status-value = <0x9c>;
- qcom,mdss-dsi-panel-on-check-value = <0x9c>;
- qcom,mdss-dsi-panel-status-read-length = <1>;
qcom,mdss-dsi-display-timings {
timing@0{
qcom,mdss-dsi-panel-phy-timings = [00 15 05 05 20 1f 05
@@ -774,3 +785,17 @@
};
};
};
+
+&dsi_dual_nt36850_truly_cmd {
+ qcom,mdss-dsi-t-clk-post = <0x0E>;
+ qcom,mdss-dsi-t-clk-pre = <0x30>;
+ qcom,mdss-dsi-display-timings {
+ timing@0 {
+ qcom,mdss-dsi-panel-phy-timings = [00 1f 08 08 24 23 08
+ 08 05 03 04 00];
+ qcom,display-topology = <2 0 2>,
+ <1 0 2>;
+ qcom,default-topology-index = <0>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
index 0b8e6fd..4194e67 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
@@ -524,8 +524,6 @@
00 00 00 00
00 00 00 80];
qcom,platform-regulator-settings = [1d 1d 1d 1d 1d];
- qcom,panel-allow-phy-poweroff;
- qcom,dsi-phy-regulator-min-datarate-bps = <1200000000>;
qcom,phy-supply-entries {
#address-cells = <1>;
#size-cells = <0>;
@@ -559,8 +557,6 @@
00 00 00 00
00 00 00 00
00 00 00 80];
- qcom,panel-allow-phy-poweroff;
- qcom,dsi-phy-regulator-min-datarate-bps = <1200000000>;
qcom,phy-supply-entries {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi b/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
index 70fe3e7..b9eabcf 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
@@ -136,7 +136,9 @@
0x230 /* QUSB2PHY_INTR_CTRL */
0x0a8 /* QUSB2PHY_PLL_CORE_INPUT_OVERRIDE */
0x254 /* QUSB2PHY_TEST1 */
- 0x198>; /* PLL_BIAS_CONTROL_2 */
+ 0x198 /* PLL_BIAS_CONTROL_2 */
+ 0x228 /* QUSB2PHY_SQ_CTRL1 */
+ 0x22c>; /* QUSB2PHY_SQ_CTRL2 */
qcom,qusb-phy-init-seq =
/* <value reg_offset> */
@@ -421,7 +423,9 @@
0x230 /* QUSB2PHY_INTR_CTRL */
0x0a8 /* QUSB2PHY_PLL_CORE_INPUT_OVERRIDE */
0x254 /* QUSB2PHY_TEST1 */
- 0x198>; /* PLL_BIAS_CONTROL_2 */
+ 0x198 /* PLL_BIAS_CONTROL_2 */
+ 0x228 /* QUSB2PHY_SQ_CTRL1 */
+ 0x22c>; /* QUSB2PHY_SQ_CTRL2 */
qcom,qusb-phy-init-seq =
/* <value reg_offset> */
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-camera.dtsi b/arch/arm64/boot/dts/qcom/sdm845-v2-camera.dtsi
index d867129..d2ee9eb 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-camera.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2-camera.dtsi
@@ -157,6 +157,33 @@
compatible = "qcom,msm-cam-smmu";
status = "ok";
+ msm_cam_smmu_lrme {
+ compatible = "qcom,msm-cam-smmu-cb";
+ iommus = <&apps_smmu 0x1038 0x0>,
+ <&apps_smmu 0x1058 0x0>,
+ <&apps_smmu 0x1039 0x0>,
+ <&apps_smmu 0x1059 0x0>;
+ label = "lrme";
+ lrme_iova_mem_map: iova-mem-map {
+ iova-mem-region-shared {
+ /* Shared region is 100MB long */
+ iova-region-name = "shared";
+ iova-region-start = <0x7400000>;
+ iova-region-len = <0x6400000>;
+ iova-region-id = <0x1>;
+ status = "ok";
+ };
+ /* IO region is approximately 3.3 GB */
+ iova-mem-region-io {
+ iova-region-name = "io";
+ iova-region-start = <0xd800000>;
+ iova-region-len = <0xd2800000>;
+ iova-region-id = <0x3>;
+ status = "ok";
+ };
+ };
+ };
+
msm_cam_smmu_ife {
compatible = "qcom,msm-cam-smmu-cb";
iommus = <&apps_smmu 0x808 0x0>,
@@ -329,13 +356,14 @@
"csid0", "csid1", "csid2",
"ife0", "ife1", "ife2", "ipe0",
"ipe1", "cam-cdm-intf0", "cpas-cdm0", "bps0",
- "icp0", "jpeg-dma0", "jpeg-enc0", "fd0";
+ "icp0", "jpeg-dma0", "jpeg-enc0", "fd0", "lrmecpas0";
client-axi-port-names =
"cam_hf_1", "cam_hf_2", "cam_hf_2", "cam_hf_2",
"cam_sf_1", "cam_hf_1", "cam_hf_2", "cam_hf_2",
"cam_hf_1", "cam_hf_2", "cam_hf_2", "cam_sf_1",
"cam_sf_1", "cam_sf_1", "cam_sf_1", "cam_sf_1",
- "cam_sf_1", "cam_sf_1", "cam_sf_1", "cam_sf_1";
+ "cam_sf_1", "cam_sf_1", "cam_sf_1", "cam_sf_1",
+ "cam_sf_1";
client-bus-camnoc-based;
qcom,axi-port-list {
qcom,axi-port1 {
@@ -415,4 +443,44 @@
};
};
};
+
+ qcom,cam-lrme {
+ compatible = "qcom,cam-lrme";
+ arch-compat = "lrme";
+ status = "ok";
+ };
+
+ cam_lrme: qcom,lrme@ac6b000 {
+ cell-index = <0>;
+ compatible = "qcom,lrme";
+ reg-names = "lrme";
+ reg = <0xac6b000 0xa00>;
+ reg-cam-base = <0x6b000>;
+ interrupt-names = "lrme";
+ interrupts = <0 476 0>;
+ regulator-names = "camss";
+ camss-supply = <&titan_top_gdsc>;
+ clock-names = "camera_ahb",
+ "camera_axi",
+ "soc_ahb_clk",
+ "cpas_ahb_clk",
+ "camnoc_axi_clk",
+ "lrme_clk_src",
+ "lrme_clk";
+ clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
+ <&clock_gcc GCC_CAMERA_AXI_CLK>,
+ <&clock_camcc CAM_CC_SOC_AHB_CLK>,
+ <&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+ <&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
+ <&clock_camcc CAM_CC_LRME_CLK_SRC>,
+ <&clock_camcc CAM_CC_LRME_CLK>;
+ clock-rates = <0 0 0 0 0 200000000 200000000>,
+ <0 0 0 0 0 269000000 269000000>,
+ <0 0 0 0 0 320000000 320000000>,
+ <0 0 0 0 0 400000000 400000000>;
+
+ clock-cntl-level = "lowsvs", "svs", "svs_l1", "turbo";
+ src-clock-name = "lrme_clk_src";
+ status = "ok";
+ };
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr-overlay.dts b/arch/arm64/boot/dts/qcom/sdm845-v2-qvr-overlay.dts
similarity index 95%
rename from arch/arm64/boot/dts/qcom/sdm845-qvr-overlay.dts
rename to arch/arm64/boot/dts/qcom/sdm845-v2-qvr-overlay.dts
index 58f5782..e1ec364 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2-qvr-overlay.dts
@@ -25,7 +25,7 @@
#include "sdm845-camera-sensor-qvr.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 v2 QVR";
+ model = "Qualcomm Technologies, Inc. SDM845 V2 QVR";
compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
qcom,msm-id = <321 0x20000>;
qcom,board-id = <0x01000B 0x20>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts b/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
similarity index 92%
rename from arch/arm64/boot/dts/qcom/sdm845-qvr.dts
rename to arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
index 5513c92..0a56c79 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
@@ -18,7 +18,7 @@
#include "sdm845-camera-sensor-qvr.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 QVR";
+ model = "Qualcomm Technologies, Inc. SDM845 V2 QVR";
compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
qcom,board-id = <0x01000B 0x20>;
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi b/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi
index db2fcc1..1fcf893 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi
@@ -81,6 +81,12 @@
&clock_cpucc {
compatible = "qcom,clk-cpu-osm-v2";
+ reg = <0x17d41000 0x1400>,
+ <0x17d43000 0x1400>,
+ <0x17d45800 0x1400>,
+ <0x78425c 0x4>;
+ reg-names = "osm_l3_base", "osm_pwrcl_base", "osm_perfcl_base",
+ "cpr_rc";
};
&pcie1 {
@@ -588,7 +594,7 @@
qcom,gpu-freq = <520000000>;
qcom,bus-freq = <9>;
qcom,bus-min = <8>;
- qcom,bus-max = <10>;
+ qcom,bus-max = <11>;
};
qcom,gpu-pwrlevel@4 {
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index e8e9ce7..97904e3 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -1229,9 +1229,14 @@
compatible = "qcom,clk-cpu-osm";
reg = <0x17d41000 0x1400>,
<0x17d43000 0x1400>,
- <0x17d45800 0x1400>;
- reg-names = "osm_l3_base", "osm_pwrcl_base", "osm_perfcl_base";
+ <0x17d45800 0x1400>,
+ <0x784248 0x4>;
+ reg-names = "osm_l3_base", "osm_pwrcl_base", "osm_perfcl_base",
+ "cpr_rc";
+ vdd_l3_mx_ao-supply = <&pm8998_s6_level_ao>;
+ vdd_pwrcl_mx_ao-supply = <&pm8998_s6_level_ao>;
+ qcom,mx-turbo-freq = <1478400000 1689600000 3300000001>;
l3-devs = <&l3_cpu0 &l3_cpu4 &l3_cdsp>;
clock-names = "xo_ao";
@@ -2492,7 +2497,7 @@
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps =
<1 618 0 0>, /* No vote */
- <1 618 0 800>; /* 100 KHz */
+ <1 618 0 300000>; /* 75 MHz */
clocks = <&clock_gcc GCC_PRNG_AHB_CLK>;
clock-names = "iface_clk";
};
@@ -3498,6 +3503,182 @@
};
};
+ cpu0-silver-step {
+ polling-delay-passive = <100>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens0 1>;
+ thermal-governor = "step_wise";
+ trips {
+ emerg_config0: emerg-config0 {
+ temperature = <110000>;
+ hysteresis = <10000>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ emerg_cdev0 {
+ trip = <&emerg_config0>;
+ cooling-device =
+ <&CPU0 THERMAL_MAX_LIMIT
+ THERMAL_MAX_LIMIT>;
+ };
+ };
+ };
+
+ cpu1-silver-step {
+ polling-delay-passive = <100>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens0 2>;
+ thermal-governor = "step_wise";
+ trips {
+ emerg_config1: emerg-config1 {
+ temperature = <110000>;
+ hysteresis = <10000>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ emerg_cdev1 {
+ trip = <&emerg_config1>;
+ cooling-device =
+ <&CPU1 THERMAL_MAX_LIMIT
+ THERMAL_MAX_LIMIT>;
+ };
+ };
+ };
+
+ cpu2-silver-step {
+ polling-delay-passive = <100>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens0 3>;
+ thermal-governor = "step_wise";
+ trips {
+ emerg_config2: emerg-config2 {
+ temperature = <110000>;
+ hysteresis = <10000>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ emerg_cdev2 {
+ trip = <&emerg_config2>;
+ cooling-device =
+ <&CPU2 THERMAL_MAX_LIMIT
+ THERMAL_MAX_LIMIT>;
+ };
+ };
+ };
+
+ cpu3-silver-step {
+ polling-delay-passive = <100>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens0 4>;
+ thermal-governor = "step_wise";
+ trips {
+ emerg_config3: emerg-config3 {
+ temperature = <110000>;
+ hysteresis = <10000>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ emerg_cdev3 {
+ trip = <&emerg_config3>;
+ cooling-device =
+ <&CPU3 THERMAL_MAX_LIMIT
+ THERMAL_MAX_LIMIT>;
+ };
+ };
+ };
+
+ cpu0-gold-step {
+ polling-delay-passive = <100>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens0 7>;
+ thermal-governor = "step_wise";
+ trips {
+ emerg_config4: emerg-config4 {
+ temperature = <110000>;
+ hysteresis = <10000>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ emerg_cdev4 {
+ trip = <&emerg_config4>;
+ cooling-device =
+ <&CPU4 THERMAL_MAX_LIMIT
+ THERMAL_MAX_LIMIT>;
+ };
+ };
+ };
+
+ cpu1-gold-step {
+ polling-delay-passive = <100>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens0 8>;
+ thermal-governor = "step_wise";
+ trips {
+ emerg_config5: emerg-config5 {
+ temperature = <110000>;
+ hysteresis = <10000>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ emerg_cdev5 {
+ trip = <&emerg_config5>;
+ cooling-device =
+ <&CPU5 THERMAL_MAX_LIMIT
+ THERMAL_MAX_LIMIT>;
+ };
+ };
+ };
+
+ cpu2-gold-step {
+ polling-delay-passive = <100>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens0 9>;
+ thermal-governor = "step_wise";
+ trips {
+ emerg_config6: emerg-config6 {
+ temperature = <110000>;
+ hysteresis = <10000>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ emerg_cdev6 {
+ trip = <&emerg_config6>;
+ cooling-device =
+ <&CPU6 THERMAL_MAX_LIMIT
+ THERMAL_MAX_LIMIT>;
+ };
+ };
+ };
+
+ cpu3-gold-step {
+ polling-delay-passive = <100>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens0 10>;
+ thermal-governor = "step_wise";
+ trips {
+ emerg_config7: emerg-config7 {
+ temperature = <110000>;
+ hysteresis = <10000>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ emerg_cdev7 {
+ trip = <&emerg_config7>;
+ cooling-device =
+ <&CPU7 THERMAL_MAX_LIMIT
+ THERMAL_MAX_LIMIT>;
+ };
+ };
+ };
+
lmh-dcvs-01 {
polling-delay-passive = <0>;
polling-delay = <0>;
@@ -3605,6 +3786,11 @@
qcom,dump-size = <0x1000>;
qcom,dump-id = <0xe8>;
};
+
+ tpdm_swao_dump {
+ qcom,dump-size = <0x512>;
+ qcom,dump-id = <0xf2>;
+ };
};
gpi_dma0: qcom,gpi-dma@0x800000 {
diff --git a/arch/arm64/configs/sdm670-perf_defconfig b/arch/arm64/configs/sdm670-perf_defconfig
index 371c77e..bd42455 100644
--- a/arch/arm64/configs/sdm670-perf_defconfig
+++ b/arch/arm64/configs/sdm670-perf_defconfig
@@ -21,6 +21,8 @@
CONFIG_CPUSETS=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_SCHEDTUNE=y
+CONFIG_MEMCG=y
+CONFIG_MEMCG_SWAP=y
CONFIG_BLK_CGROUP=y
CONFIG_RT_GROUP_SCHED=y
CONFIG_CGROUP_BPF=y
@@ -65,12 +67,14 @@
CONFIG_CMA=y
CONFIG_ZSMALLOC=y
CONFIG_BALANCE_ANON_FILE_RECLAIM=y
+CONFIG_PROCESS_RECLAIM=y
CONFIG_SECCOMP=y
CONFIG_ARMV8_DEPRECATED=y
CONFIG_SWP_EMULATION=y
CONFIG_CP15_BARRIER_EMULATION=y
CONFIG_SETEND_EMULATION=y
# CONFIG_ARM64_VHE is not set
+CONFIG_RANDOMIZE_BASE=y
# CONFIG_EFI is not set
CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
@@ -347,6 +351,7 @@
CONFIG_MFD_I2C_PMIC=y
CONFIG_MFD_SPMI_PMIC=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_PROXY_CONSUMER=y
CONFIG_REGULATOR_CPRH_KBSS=y
CONFIG_REGULATOR_QPNP_LABIBB=y
CONFIG_REGULATOR_QPNP_LCDB=y
@@ -370,8 +375,7 @@
CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y
CONFIG_DVB_MPQ=m
CONFIG_DVB_MPQ_DEMUX=m
-CONFIG_DVB_MPQ_TSPP1=y
-CONFIG_TSPP=m
+CONFIG_DVB_MPQ_SW=y
CONFIG_QCOM_KGSL=y
CONFIG_DRM=y
CONFIG_DRM_SDE_EVTLOG_DEBUG=y
@@ -447,6 +451,7 @@
CONFIG_MMC_CQ_HCI=y
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
CONFIG_LEDS_QPNP=y
CONFIG_LEDS_QPNP_FLASH_V2=y
CONFIG_LEDS_QPNP_WLED=y
@@ -497,6 +502,7 @@
CONFIG_QCOM_RUN_QUEUE_STATS=y
CONFIG_QCOM_LLCC=y
CONFIG_QCOM_SDM670_LLCC=y
+CONFIG_QCOM_LLCC_PERFMON=m
CONFIG_MSM_SERVICE_LOCATOR=y
CONFIG_MSM_SERVICE_NOTIFIER=y
CONFIG_MSM_BOOT_STATS=y
@@ -534,9 +540,11 @@
CONFIG_MSM_EVENT_TIMER=y
CONFIG_MSM_PM=y
CONFIG_MSM_QBT1000=y
+CONFIG_QCOM_DCC_V2=y
CONFIG_QTI_RPM_STATS_LOG=y
CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
CONFIG_QMP_DEBUGFS_CLIENT=y
+CONFIG_MEM_SHARE_QMI_SERVICE=y
CONFIG_MSM_REMOTEQDSS=y
CONFIG_QCOM_BIMC_BWMON=y
CONFIG_ARM_MEMLAT_MON=y
diff --git a/arch/arm64/configs/sdm670_defconfig b/arch/arm64/configs/sdm670_defconfig
index f6c3ec7..718c415 100644
--- a/arch/arm64/configs/sdm670_defconfig
+++ b/arch/arm64/configs/sdm670_defconfig
@@ -22,6 +22,8 @@
CONFIG_CPUSETS=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_SCHEDTUNE=y
+CONFIG_MEMCG=y
+CONFIG_MEMCG_SWAP=y
CONFIG_BLK_CGROUP=y
CONFIG_DEBUG_BLK_CGROUP=y
CONFIG_RT_GROUP_SCHED=y
@@ -70,12 +72,14 @@
CONFIG_CMA_DEBUGFS=y
CONFIG_ZSMALLOC=y
CONFIG_BALANCE_ANON_FILE_RECLAIM=y
+CONFIG_PROCESS_RECLAIM=y
CONFIG_SECCOMP=y
CONFIG_ARMV8_DEPRECATED=y
CONFIG_SWP_EMULATION=y
CONFIG_CP15_BARRIER_EMULATION=y
CONFIG_SETEND_EMULATION=y
# CONFIG_ARM64_VHE is not set
+CONFIG_RANDOMIZE_BASE=y
CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_COMPAT=y
@@ -352,6 +356,7 @@
CONFIG_MFD_I2C_PMIC=y
CONFIG_MFD_SPMI_PMIC=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_PROXY_CONSUMER=y
CONFIG_REGULATOR_CPRH_KBSS=y
CONFIG_REGULATOR_QPNP_LABIBB=y
CONFIG_REGULATOR_QPNP_LCDB=y
@@ -362,6 +367,7 @@
CONFIG_REGULATOR_STUB=y
CONFIG_MEDIA_SUPPORT=y
CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
CONFIG_MEDIA_CONTROLLER=y
CONFIG_VIDEO_V4L2_SUBDEV_API=y
CONFIG_VIDEO_ADV_DEBUG=y
@@ -372,6 +378,9 @@
CONFIG_MSM_VIDC_GOVERNORS=y
CONFIG_MSM_SDE_ROTATOR=y
CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y
+CONFIG_DVB_MPQ=m
+CONFIG_DVB_MPQ_DEMUX=m
+CONFIG_DVB_MPQ_SW=y
CONFIG_QCOM_KGSL=y
CONFIG_DRM=y
CONFIG_DRM_SDE_EVTLOG_DEBUG=y
@@ -447,6 +456,7 @@
CONFIG_MMC_CQ_HCI=y
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
CONFIG_LEDS_QPNP=y
CONFIG_LEDS_QPNP_FLASH_V2=y
CONFIG_LEDS_QPNP_WLED=y
@@ -504,6 +514,7 @@
CONFIG_QCOM_RUN_QUEUE_STATS=y
CONFIG_QCOM_LLCC=y
CONFIG_QCOM_SDM670_LLCC=y
+CONFIG_QCOM_LLCC_PERFMON=m
CONFIG_MSM_SERVICE_LOCATOR=y
CONFIG_MSM_SERVICE_NOTIFIER=y
CONFIG_MSM_BOOT_STATS=y
@@ -549,6 +560,7 @@
CONFIG_QTI_RPM_STATS_LOG=y
CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
CONFIG_QMP_DEBUGFS_CLIENT=y
+CONFIG_MEM_SHARE_QMI_SERVICE=y
CONFIG_MSM_REMOTEQDSS=y
CONFIG_QCOM_BIMC_BWMON=y
CONFIG_ARM_MEMLAT_MON=y
diff --git a/arch/arm64/configs/sdm845-perf_defconfig b/arch/arm64/configs/sdm845-perf_defconfig
index 1cfa935..357a6b2 100644
--- a/arch/arm64/configs/sdm845-perf_defconfig
+++ b/arch/arm64/configs/sdm845-perf_defconfig
@@ -559,6 +559,9 @@
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT3_FS=y
CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
+CONFIG_EXT4_FS_ENCRYPTION=y
+CONFIG_EXT4_FS_ICE_ENCRYPTION=y
CONFIG_QUOTA=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_QFMT_V2=y
@@ -590,13 +593,13 @@
CONFIG_CORESIGHT_EVENT=y
CONFIG_CORESIGHT_HWEVENT=y
CONFIG_CORESIGHT_DUMMY=y
+CONFIG_PFK=y
CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
CONFIG_SECURITY=y
CONFIG_HARDENED_USERCOPY=y
CONFIG_FORTIFY_SOURCE=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SMACK=y
-CONFIG_CRYPTO_CTR=y
CONFIG_CRYPTO_XCBC=y
CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_TWOFISH=y
diff --git a/arch/arm64/configs/sdm845_defconfig b/arch/arm64/configs/sdm845_defconfig
index eceb4be..d0a32e7 100644
--- a/arch/arm64/configs/sdm845_defconfig
+++ b/arch/arm64/configs/sdm845_defconfig
@@ -575,6 +575,9 @@
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT3_FS=y
CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
+CONFIG_EXT4_FS_ENCRYPTION=y
+CONFIG_EXT4_FS_ICE_ENCRYPTION=y
CONFIG_QUOTA=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_QFMT_V2=y
@@ -655,13 +658,13 @@
CONFIG_CORESIGHT_TGU=y
CONFIG_CORESIGHT_HWEVENT=y
CONFIG_CORESIGHT_DUMMY=y
+CONFIG_PFK=y
CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
CONFIG_SECURITY=y
CONFIG_HARDENED_USERCOPY=y
CONFIG_FORTIFY_SOURCE=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SMACK=y
-CONFIG_CRYPTO_CTR=y
CONFIG_CRYPTO_XCBC=y
CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_TWOFISH=y
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 2437f15..623dd48 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -54,6 +54,7 @@
#include <asm/tlbflush.h>
#include <asm/ptrace.h>
#include <asm/virt.h>
+#include <soc/qcom/minidump.h>
#define CREATE_TRACE_POINTS
#include <trace/events/ipi.h>
@@ -844,6 +845,7 @@
pr_crit("CPU%u: stopping\n", cpu);
show_regs(regs);
dump_stack();
+ dump_stack_minidump(regs->sp);
raw_spin_unlock(&stop_lock);
}
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index 900c1ec..f7ce3d2 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -176,7 +176,8 @@
trace->entries[trace->nr_entries++] = ULONG_MAX;
}
-void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+static noinline void __save_stack_trace(struct task_struct *tsk,
+ struct stack_trace *trace, unsigned int nosched)
{
struct stack_trace_data data;
struct stackframe frame;
@@ -186,17 +187,18 @@
data.trace = trace;
data.skip = trace->skip;
+ data.no_sched_functions = nosched;
if (tsk != current) {
- data.no_sched_functions = 1;
frame.fp = thread_saved_fp(tsk);
frame.sp = thread_saved_sp(tsk);
frame.pc = thread_saved_pc(tsk);
} else {
- data.no_sched_functions = 0;
+ /* We don't want this function nor the caller */
+ data.skip += 2;
frame.fp = (unsigned long)__builtin_frame_address(0);
frame.sp = current_stack_pointer;
- frame.pc = (unsigned long)save_stack_trace_tsk;
+ frame.pc = (unsigned long)__save_stack_trace;
}
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
frame.graph = tsk->curr_ret_stack;
@@ -210,9 +212,15 @@
}
EXPORT_SYMBOL(save_stack_trace_tsk);
+void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+{
+ __save_stack_trace(tsk, trace, 1);
+}
+
void save_stack_trace(struct stack_trace *trace)
{
- save_stack_trace_tsk(current, trace);
+ __save_stack_trace(current, trace, 0);
}
+
EXPORT_SYMBOL_GPL(save_stack_trace);
#endif
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 5620500..19f3515 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -114,7 +114,7 @@
for (i = -4; i < 1; i++) {
unsigned int val, bad;
- bad = __get_user(val, &((u32 *)addr)[i]);
+ bad = get_user(val, &((u32 *)addr)[i]);
if (!bad)
p += sprintf(p, i == 0 ? "(%08x) " : "%08x ", val);
diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile
index 14c4e3b..48b0354 100644
--- a/arch/arm64/kvm/hyp/Makefile
+++ b/arch/arm64/kvm/hyp/Makefile
@@ -2,7 +2,7 @@
# Makefile for Kernel-based Virtual Machine module, HYP part
#
-ccflags-y += -fno-stack-protector
+ccflags-y += -fno-stack-protector -DDISABLE_BRANCH_PROFILING
KVM=../../../../virt/kvm
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
index da6a8cf..3556715 100644
--- a/arch/arm64/kvm/inject_fault.c
+++ b/arch/arm64/kvm/inject_fault.c
@@ -33,12 +33,26 @@
#define LOWER_EL_AArch64_VECTOR 0x400
#define LOWER_EL_AArch32_VECTOR 0x600
+/*
+ * Table taken from ARMv8 ARM DDI0487B-B, table G1-10.
+ */
+static const u8 return_offsets[8][2] = {
+ [0] = { 0, 0 }, /* Reset, unused */
+ [1] = { 4, 2 }, /* Undefined */
+ [2] = { 0, 0 }, /* SVC, unused */
+ [3] = { 4, 4 }, /* Prefetch abort */
+ [4] = { 8, 8 }, /* Data abort */
+ [5] = { 0, 0 }, /* HVC, unused */
+ [6] = { 4, 4 }, /* IRQ, unused */
+ [7] = { 4, 4 }, /* FIQ, unused */
+};
+
static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
{
unsigned long cpsr;
unsigned long new_spsr_value = *vcpu_cpsr(vcpu);
bool is_thumb = (new_spsr_value & COMPAT_PSR_T_BIT);
- u32 return_offset = (is_thumb) ? 4 : 0;
+ u32 return_offset = return_offsets[vect_offset >> 2][is_thumb];
u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
cpsr = mode | COMPAT_PSR_I_BIT;
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 0522f50..31d4684 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -971,14 +971,21 @@
* then the IOMMU core will have already configured a group for this
* device, and allocated the default domain for that group.
*/
- if (!domain || iommu_dma_init_domain(domain, dma_base, size, dev)) {
- pr_debug("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
- dev_name(dev));
- return false;
+ if (!domain)
+ goto out_err;
+
+ if (domain->type == IOMMU_DOMAIN_DMA) {
+ if (iommu_dma_init_domain(domain, dma_base, size, dev))
+ goto out_err;
+
+ dev->archdata.dma_ops = &iommu_dma_ops;
}
- dev->archdata.dma_ops = &iommu_dma_ops;
return true;
+out_err:
+ pr_debug("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
+ dev_name(dev));
+ return false;
}
static void queue_iommu_attach(struct device *dev, const struct iommu_ops *ops,
diff --git a/arch/mips/ar7/platform.c b/arch/mips/ar7/platform.c
index 58fca9a..3446b6f 100644
--- a/arch/mips/ar7/platform.c
+++ b/arch/mips/ar7/platform.c
@@ -576,6 +576,7 @@
uart_port.type = PORT_AR7;
uart_port.uartclk = clk_get_rate(bus_clk) / 2;
uart_port.iotype = UPIO_MEM32;
+ uart_port.flags = UPF_FIXED_TYPE;
uart_port.regshift = 2;
uart_port.line = 0;
@@ -654,6 +655,10 @@
u32 val;
int res;
+ res = ar7_gpio_init();
+ if (res)
+ pr_warn("unable to register gpios: %d\n", res);
+
res = ar7_register_uarts();
if (res)
pr_err("unable to setup uart(s): %d\n", res);
diff --git a/arch/mips/ar7/prom.c b/arch/mips/ar7/prom.c
index a23adc4..36aabee 100644
--- a/arch/mips/ar7/prom.c
+++ b/arch/mips/ar7/prom.c
@@ -246,8 +246,6 @@
ar7_init_cmdline(fw_arg0, (char **)fw_arg1);
ar7_init_env((struct env_var *)fw_arg2);
console_config();
-
- ar7_gpio_init();
}
#define PORT(offset) (KSEG1ADDR(AR7_REGS_UART0 + (offset * 4)))
diff --git a/arch/mips/include/asm/mips-cm.h b/arch/mips/include/asm/mips-cm.h
index 2e41807..b6845db 100644
--- a/arch/mips/include/asm/mips-cm.h
+++ b/arch/mips/include/asm/mips-cm.h
@@ -239,8 +239,8 @@
#define CM_GCR_BASE_GCRBASE_MSK (_ULCAST_(0x1ffff) << 15)
#define CM_GCR_BASE_CMDEFTGT_SHF 0
#define CM_GCR_BASE_CMDEFTGT_MSK (_ULCAST_(0x3) << 0)
-#define CM_GCR_BASE_CMDEFTGT_DISABLED 0
-#define CM_GCR_BASE_CMDEFTGT_MEM 1
+#define CM_GCR_BASE_CMDEFTGT_MEM 0
+#define CM_GCR_BASE_CMDEFTGT_RESERVED 1
#define CM_GCR_BASE_CMDEFTGT_IOCU0 2
#define CM_GCR_BASE_CMDEFTGT_IOCU1 3
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 1b50958..c558bce 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -50,9 +50,7 @@
#ifdef CONFIG_HOTPLUG_CPU
void arch_cpu_idle_dead(void)
{
- /* What the heck is this check doing ? */
- if (!cpumask_test_cpu(smp_processor_id(), &cpu_callin_map))
- play_dead();
+ play_dead();
}
#endif
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
index 6d0f132..47c9646 100644
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -587,11 +587,11 @@
/* Flush and enable RAC */
cfg = __raw_readl(cbr + BMIPS_RAC_CONFIG);
- __raw_writel(cfg | 0x100, BMIPS_RAC_CONFIG);
+ __raw_writel(cfg | 0x100, cbr + BMIPS_RAC_CONFIG);
__raw_readl(cbr + BMIPS_RAC_CONFIG);
cfg = __raw_readl(cbr + BMIPS_RAC_CONFIG);
- __raw_writel(cfg | 0xf, BMIPS_RAC_CONFIG);
+ __raw_writel(cfg | 0xf, cbr + BMIPS_RAC_CONFIG);
__raw_readl(cbr + BMIPS_RAC_CONFIG);
cfg = __raw_readl(cbr + BMIPS_RAC_ADDRESS_RANGE);
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 7ebb191..95ba427 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -68,6 +68,9 @@
cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(cpu_core_map);
+static DECLARE_COMPLETION(cpu_starting);
+static DECLARE_COMPLETION(cpu_running);
+
/*
* A logcal cpu mask containing only one VPE per core to
* reduce the number of IPIs on large MT systems.
@@ -369,9 +372,12 @@
cpumask_set_cpu(cpu, &cpu_coherent_mask);
notify_cpu_starting(cpu);
- cpumask_set_cpu(cpu, &cpu_callin_map);
+ /* Notify boot CPU that we're starting & ready to sync counters */
+ complete(&cpu_starting);
+
synchronise_count_slave(cpu);
+ /* The CPU is running and counters synchronised, now mark it online */
set_cpu_online(cpu, true);
set_cpu_sibling_map(cpu);
@@ -380,6 +386,12 @@
calculate_cpu_foreign_map();
/*
+ * Notify boot CPU that we're up & online and it can safely return
+ * from __cpu_up
+ */
+ complete(&cpu_running);
+
+ /*
* irq will be enabled in ->smp_finish(), enabling it too early
* is dangerous.
*/
@@ -430,22 +442,23 @@
{
set_cpu_possible(0, true);
set_cpu_online(0, true);
- cpumask_set_cpu(0, &cpu_callin_map);
}
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
mp_ops->boot_secondary(cpu, tidle);
- /*
- * Trust is futile. We should really have timeouts ...
- */
- while (!cpumask_test_cpu(cpu, &cpu_callin_map)) {
- udelay(100);
- schedule();
+ /* Wait for CPU to start and be ready to sync counters */
+ if (!wait_for_completion_timeout(&cpu_starting,
+ msecs_to_jiffies(1000))) {
+ pr_crit("CPU%u: failed to start\n", cpu);
+ return -EIO;
}
synchronise_count_master(cpu);
+
+ /* Wait for CPU to finish startup & mark itself online before return */
+ wait_for_completion(&cpu_running);
return 0;
}
diff --git a/arch/mips/mm/uasm-micromips.c b/arch/mips/mm/uasm-micromips.c
index 277cf52..6c17cba 100644
--- a/arch/mips/mm/uasm-micromips.c
+++ b/arch/mips/mm/uasm-micromips.c
@@ -80,7 +80,7 @@
{ insn_jr, M(mm_pool32a_op, 0, 0, 0, mm_jalr_op, mm_pool32axf_op), RS },
{ insn_lb, M(mm_lb32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
{ insn_ld, 0, 0 },
- { insn_lh, M(mm_lh32_op, 0, 0, 0, 0, 0), RS | RS | SIMM },
+ { insn_lh, M(mm_lh32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
{ insn_ll, M(mm_pool32c_op, 0, 0, (mm_ll_func << 1), 0, 0), RS | RT | SIMM },
{ insn_lld, 0, 0 },
{ insn_lui, M(mm_pool32i_op, mm_lui_op, 0, 0, 0, 0), RS | SIMM },
diff --git a/arch/powerpc/boot/dts/fsl/kmcoge4.dts b/arch/powerpc/boot/dts/fsl/kmcoge4.dts
index ae70a24..e103c0f 100644
--- a/arch/powerpc/boot/dts/fsl/kmcoge4.dts
+++ b/arch/powerpc/boot/dts/fsl/kmcoge4.dts
@@ -83,6 +83,10 @@
};
};
+ sdhc@114000 {
+ status = "disabled";
+ };
+
i2c@119000 {
status = "disabled";
};
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index bc3f7d0..f1d7e99 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -407,6 +407,7 @@
struct cpu_accounting_data *acct = get_accounting(current);
acct->starttime = get_accounting(prev)->starttime;
+ acct->startspurr = get_accounting(prev)->startspurr;
acct->system_time = 0;
acct->user_time = 0;
}
diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c
index a0ea63a..a8e3498 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_xics.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c
@@ -376,6 +376,7 @@
*/
if (reject && reject != XICS_IPI) {
arch_spin_unlock(&ics->lock);
+ icp->n_reject++;
new_irq = reject;
goto again;
}
@@ -707,10 +708,8 @@
state = &ics->irq_state[src];
/* Still asserted, resend it */
- if (state->asserted) {
- icp->n_reject++;
+ if (state->asserted)
icp_rm_deliver_irq(xics, icp, irq);
- }
if (!hlist_empty(&vcpu->kvm->irq_ack_notifier_list)) {
icp->rm_action |= XICS_RM_NOTIFY_EOI;
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index d5ce34d..1e28747 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -42,6 +42,8 @@
#include <linux/memblock.h>
#include <linux/hugetlb.h>
#include <linux/slab.h>
+#include <linux/of_fdt.h>
+#include <linux/libfdt.h>
#include <asm/pgalloc.h>
#include <asm/page.h>
@@ -421,6 +423,28 @@
}
early_param("disable_radix", parse_disable_radix);
+/*
+ * If we're running under a hypervisor, we currently can't do radix
+ * since we don't have the code to do the H_REGISTER_PROC_TBL hcall.
+ * We tell that we're running under a hypervisor by looking for the
+ * /chosen/ibm,architecture-vec-5 property.
+ */
+static void early_check_vec5(void)
+{
+ unsigned long root, chosen;
+ int size;
+ const u8 *vec5;
+
+ root = of_get_flat_dt_root();
+ chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
+ if (chosen == -FDT_ERR_NOTFOUND)
+ return;
+ vec5 = of_get_flat_dt_prop(chosen, "ibm,architecture-vec-5", &size);
+ if (!vec5)
+ return;
+ cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
+}
+
void __init mmu_early_init_devtree(void)
{
/* Disable radix mode based on kernel command line. */
@@ -428,6 +452,15 @@
if (disable_radix || !(mfmsr() & MSR_HV))
cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
+ /*
+ * Check /chosen/ibm,architecture-vec-5 if running as a guest.
+ * When running bare-metal, we can use radix if we like
+ * even though the ibm,architecture-vec-5 property created by
+ * skiboot doesn't have the necessary bits set.
+ */
+ if (early_radix_enabled() && !(mfmsr() & MSR_HV))
+ early_check_vec5();
+
if (early_radix_enabled())
radix__early_init_devtree();
else
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index 303d28e..591cbdf6 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -28,6 +28,7 @@
#include <linux/cpufeature.h>
#include <linux/init.h>
#include <linux/spinlock.h>
+#include <linux/fips.h>
#include <crypto/xts.h>
#include <asm/cpacf.h>
@@ -501,6 +502,12 @@
if (err)
return err;
+ /* In fips mode only 128 bit or 256 bit keys are valid */
+ if (fips_enabled && key_len != 32 && key_len != 64) {
+ tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ return -EINVAL;
+ }
+
/* Pick the correct function code based on the key length */
fc = (key_len == 32) ? CPACF_KM_XTS_128 :
(key_len == 64) ? CPACF_KM_XTS_256 : 0;
diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c
index 1113389..fe7368a 100644
--- a/arch/s390/crypto/prng.c
+++ b/arch/s390/crypto/prng.c
@@ -110,22 +110,30 @@
/*** helper functions ***/
+/*
+ * generate_entropy:
+ * This algorithm produces 64 bytes of entropy data based on 1024
+ * individual stckf() invocations assuming that each stckf() value
+ * contributes 0.25 bits of entropy. So the caller gets 256 bit
+ * entropy per 64 byte or 4 bits entropy per byte.
+ */
static int generate_entropy(u8 *ebuf, size_t nbytes)
{
int n, ret = 0;
- u8 *pg, *h, hash[32];
+ u8 *pg, *h, hash[64];
- pg = (u8 *) __get_free_page(GFP_KERNEL);
+ /* allocate 2 pages */
+ pg = (u8 *) __get_free_pages(GFP_KERNEL, 1);
if (!pg) {
prng_errorflag = PRNG_GEN_ENTROPY_FAILED;
return -ENOMEM;
}
while (nbytes) {
- /* fill page with urandom bytes */
- get_random_bytes(pg, PAGE_SIZE);
- /* exor page with stckf values */
- for (n = 0; n < PAGE_SIZE / sizeof(u64); n++) {
+ /* fill pages with urandom bytes */
+ get_random_bytes(pg, 2*PAGE_SIZE);
+ /* exor pages with 1024 stckf values */
+ for (n = 0; n < 2 * PAGE_SIZE / sizeof(u64); n++) {
u64 *p = ((u64 *)pg) + n;
*p ^= get_tod_clock_fast();
}
@@ -134,8 +142,8 @@
h = hash;
else
h = ebuf;
- /* generate sha256 from this page */
- cpacf_kimd(CPACF_KIMD_SHA_256, h, pg, PAGE_SIZE);
+ /* hash over the filled pages */
+ cpacf_kimd(CPACF_KIMD_SHA_512, h, pg, 2*PAGE_SIZE);
if (n < sizeof(hash))
memcpy(ebuf, hash, n);
ret += n;
@@ -143,7 +151,7 @@
nbytes -= n;
}
- free_page((unsigned long)pg);
+ free_pages((unsigned long)pg, 1);
return ret;
}
@@ -334,7 +342,7 @@
static int __init prng_sha512_instantiate(void)
{
int ret, datalen;
- u8 seed[64];
+ u8 seed[64 + 32 + 16];
pr_debug("prng runs in SHA-512 mode "
"with chunksize=%d and reseed_limit=%u\n",
@@ -357,12 +365,12 @@
if (ret)
goto outfree;
- /* generate initial seed bytestring, first 48 bytes of entropy */
- ret = generate_entropy(seed, 48);
- if (ret != 48)
+ /* generate initial seed bytestring, with 256 + 128 bits entropy */
+ ret = generate_entropy(seed, 64 + 32);
+ if (ret != 64 + 32)
goto outfree;
/* followed by 16 bytes of unique nonce */
- get_tod_clock_ext(seed + 48);
+ get_tod_clock_ext(seed + 64 + 32);
/* initial seed of the ppno drng */
cpacf_ppno(CPACF_PPNO_SHA512_DRNG_SEED,
@@ -395,9 +403,9 @@
static int prng_sha512_reseed(void)
{
int ret;
- u8 seed[32];
+ u8 seed[64];
- /* generate 32 bytes of fresh entropy */
+ /* fetch 256 bits of fresh entropy */
ret = generate_entropy(seed, sizeof(seed));
if (ret != sizeof(seed))
return ret;
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 2374c5b..0c19686 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -363,6 +363,18 @@
#endif
}
+static int __init topology_setup(char *str)
+{
+ bool enabled;
+ int rc;
+
+ rc = kstrtobool(str, &enabled);
+ if (!rc && !enabled)
+ S390_lowcore.machine_flags &= ~MACHINE_HAS_TOPOLOGY;
+ return rc;
+}
+early_param("topology", topology_setup);
+
static int __init disable_vector_extension(char *str)
{
S390_lowcore.machine_flags &= ~MACHINE_FLAG_VX;
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 8705ee6..239f295 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -37,7 +37,6 @@
static void topology_work_fn(struct work_struct *work);
static struct sysinfo_15_1_x *tl_info;
-static bool topology_enabled = true;
static DECLARE_WORK(topology_work, topology_work_fn);
/*
@@ -56,7 +55,7 @@
cpumask_t mask;
cpumask_copy(&mask, cpumask_of(cpu));
- if (!topology_enabled || !MACHINE_HAS_TOPOLOGY)
+ if (!MACHINE_HAS_TOPOLOGY)
return mask;
for (; info; info = info->next) {
if (cpumask_test_cpu(cpu, &info->mask))
@@ -71,7 +70,7 @@
int i;
cpumask_copy(&mask, cpumask_of(cpu));
- if (!topology_enabled || !MACHINE_HAS_TOPOLOGY)
+ if (!MACHINE_HAS_TOPOLOGY)
return mask;
cpu -= cpu % (smp_cpu_mtid + 1);
for (i = 0; i <= smp_cpu_mtid; i++)
@@ -413,12 +412,6 @@
return &per_cpu(cpu_topology, cpu).drawer_mask;
}
-static int __init early_parse_topology(char *p)
-{
- return kstrtobool(p, &topology_enabled);
-}
-early_param("topology", early_parse_topology);
-
static struct sched_domain_topology_level s390_topology[] = {
{ cpu_thread_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
{ cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh770x.c b/arch/sh/kernel/cpu/sh3/setup-sh770x.c
index 538c10d..8dc315b 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh770x.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh770x.c
@@ -165,7 +165,6 @@
.scscr = SCSCR_TE | SCSCR_RE,
.type = PORT_IRDA,
.ops = &sh770x_sci_port_ops,
- .regshift = 1,
};
static struct resource scif2_resources[] = {
diff --git a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S
index 96df6a3..a2ae689 100644
--- a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S
+++ b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S
@@ -157,8 +157,8 @@
.endr
# Find min length
- vmovdqa _lens+0*16(state), %xmm0
- vmovdqa _lens+1*16(state), %xmm1
+ vmovdqu _lens+0*16(state), %xmm0
+ vmovdqu _lens+1*16(state), %xmm1
vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
@@ -178,8 +178,8 @@
vpsubd %xmm2, %xmm0, %xmm0
vpsubd %xmm2, %xmm1, %xmm1
- vmovdqa %xmm0, _lens+0*16(state)
- vmovdqa %xmm1, _lens+1*16(state)
+ vmovdqu %xmm0, _lens+0*16(state)
+ vmovdqu %xmm1, _lens+1*16(state)
# "state" and "args" are the same address, arg1
# len is arg2
@@ -235,8 +235,8 @@
jc .return_null
# Find min length
- vmovdqa _lens(state), %xmm0
- vmovdqa _lens+1*16(state), %xmm1
+ vmovdqu _lens(state), %xmm0
+ vmovdqu _lens+1*16(state), %xmm1
vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
index a78a069..ec9bee6 100644
--- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
+++ b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
@@ -155,8 +155,8 @@
.endr
# Find min length
- vmovdqa _lens+0*16(state), %xmm0
- vmovdqa _lens+1*16(state), %xmm1
+ vmovdqu _lens+0*16(state), %xmm0
+ vmovdqu _lens+1*16(state), %xmm1
vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
@@ -176,8 +176,8 @@
vpsubd %xmm2, %xmm0, %xmm0
vpsubd %xmm2, %xmm1, %xmm1
- vmovdqa %xmm0, _lens+0*16(state)
- vmovdqa %xmm1, _lens+1*16(state)
+ vmovdqu %xmm0, _lens+0*16(state)
+ vmovdqu %xmm1, _lens+1*16(state)
# "state" and "args" are the same address, arg1
# len is arg2
@@ -234,8 +234,8 @@
jc .return_null
# Find min length
- vmovdqa _lens(state), %xmm0
- vmovdqa _lens+1*16(state), %xmm1
+ vmovdqu _lens(state), %xmm0
+ vmovdqu _lens+1*16(state), %xmm1
vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index a300aa1..dead0f3 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -68,6 +68,12 @@
__chk_range_not_ok((unsigned long __force)(addr), size, limit); \
})
+#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
+# define WARN_ON_IN_IRQ() WARN_ON_ONCE(!in_task())
+#else
+# define WARN_ON_IN_IRQ()
+#endif
+
/**
* access_ok: - Checks if a user space pointer is valid
* @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that
@@ -88,8 +94,11 @@
* checks that the pointer is in the user space range - after calling
* this function, memory access functions may still return -EFAULT.
*/
-#define access_ok(type, addr, size) \
- likely(!__range_not_ok(addr, size, user_addr_max()))
+#define access_ok(type, addr, size) \
+({ \
+ WARN_ON_IN_IRQ(); \
+ likely(!__range_not_ok(addr, size, user_addr_max())); \
+})
/*
* These are the main single-value transfer routines. They automatically
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 36171bc..9fe7b9e 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -181,6 +181,12 @@
smp_store_cpu_info(cpuid);
/*
+ * The topology information must be up to date before
+ * calibrate_delay() and notify_cpu_starting().
+ */
+ set_cpu_sibling_map(raw_smp_processor_id());
+
+ /*
* Get our bogomips.
* Update loops_per_jiffy in cpu_data. Previous call to
* smp_store_cpu_info() stored a value that is close but not as
@@ -190,11 +196,6 @@
cpu_data(cpuid).loops_per_jiffy = loops_per_jiffy;
pr_debug("Stack at about %p\n", &cpuid);
- /*
- * This must be done before setting cpu_online_mask
- * or calling notify_cpu_starting.
- */
- set_cpu_sibling_map(raw_smp_processor_id());
wmb();
notify_cpu_starting(cpuid);
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 6e57edf..44bf5cf 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -1382,12 +1382,10 @@
unsigned long calibrate_delay_is_known(void)
{
int sibling, cpu = smp_processor_id();
- struct cpumask *mask = topology_core_cpumask(cpu);
+ int constant_tsc = cpu_has(&cpu_data(cpu), X86_FEATURE_CONSTANT_TSC);
+ const struct cpumask *mask = topology_core_cpumask(cpu);
- if (!tsc_disabled && !cpu_has(&cpu_data(cpu), X86_FEATURE_CONSTANT_TSC))
- return 0;
-
- if (!mask)
+ if (tsc_disabled || !constant_tsc || !mask)
return 0;
sibling = cpumask_any_but(mask, cpu);
diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
index 350f709..7913b69 100644
--- a/arch/x86/oprofile/op_model_ppro.c
+++ b/arch/x86/oprofile/op_model_ppro.c
@@ -212,8 +212,8 @@
eax.full = cpuid_eax(0xa);
/* Workaround for BIOS bugs in 6/15. Taken from perfmon2 */
- if (eax.split.version_id == 0 && __this_cpu_read(cpu_info.x86) == 6 &&
- __this_cpu_read(cpu_info.x86_model) == 15) {
+ if (eax.split.version_id == 0 && boot_cpu_data.x86 == 6 &&
+ boot_cpu_data.x86_model == 15) {
eax.split.version_id = 2;
eax.split.num_counters = 2;
eax.split.bit_width = 40;
diff --git a/block/bio.c b/block/bio.c
index 07f287b..e14a897 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -589,7 +589,7 @@
bio->bi_opf = bio_src->bi_opf;
bio->bi_iter = bio_src->bi_iter;
bio->bi_io_vec = bio_src->bi_io_vec;
-
+ bio->bi_dio_inode = bio_src->bi_dio_inode;
bio_clone_blkcg_association(bio, bio_src);
}
EXPORT_SYMBOL(__bio_clone_fast);
diff --git a/block/blk-merge.c b/block/blk-merge.c
index abde370..0272fac 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -6,7 +6,7 @@
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/scatterlist.h>
-
+#include <linux/pfk.h>
#include <trace/events/block.h>
#include "blk.h"
@@ -725,6 +725,11 @@
}
}
+static bool crypto_not_mergeable(const struct bio *bio, const struct bio *nxt)
+{
+ return (!pfk_allow_merge_bio(bio, nxt));
+}
+
/*
* Has to be called with the request spinlock acquired
*/
@@ -752,6 +757,8 @@
!blk_write_same_mergeable(req->bio, next->bio))
return 0;
+ if (crypto_not_mergeable(req->bio, next->bio))
+ return 0;
/*
* If we are allowed to merge, then append bio list
* from next to rq and release next. merge_requests_fn
@@ -862,6 +869,8 @@
!blk_write_same_mergeable(rq->bio, bio))
return false;
+ if (crypto_not_mergeable(rq->bio, bio))
+ return false;
return true;
}
diff --git a/crypto/ccm.c b/crypto/ccm.c
index 006d857..b3ace63 100644
--- a/crypto/ccm.c
+++ b/crypto/ccm.c
@@ -413,7 +413,7 @@
unsigned int cryptlen = req->cryptlen;
u8 *authtag = pctx->auth_tag;
u8 *odata = pctx->odata;
- u8 *iv = req->iv;
+ u8 *iv = pctx->idata;
int err;
cryptlen -= authsize;
@@ -429,6 +429,8 @@
if (req->src != req->dst)
dst = pctx->dst;
+ memcpy(iv, req->iv, 16);
+
skcipher_request_set_tfm(skreq, ctx->ctr);
skcipher_request_set_callback(skreq, pctx->flags,
crypto_ccm_decrypt_done, req);
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 256a1d5..4e7e9a7 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -465,9 +465,8 @@
};
enum binder_deferred_state {
- BINDER_DEFERRED_PUT_FILES = 0x01,
- BINDER_DEFERRED_FLUSH = 0x02,
- BINDER_DEFERRED_RELEASE = 0x04,
+ BINDER_DEFERRED_FLUSH = 0x01,
+ BINDER_DEFERRED_RELEASE = 0x02,
};
/**
@@ -504,8 +503,6 @@
* (invariant after initialized)
* @tsk task_struct for group_leader of process
* (invariant after initialized)
- * @files files_struct for process
- * (invariant after initialized)
* @deferred_work_node: element for binder_deferred_list
* (protected by binder_deferred_lock)
* @deferred_work: bitmap of deferred work to perform
@@ -552,7 +549,6 @@
struct list_head waiting_threads;
int pid;
struct task_struct *tsk;
- struct files_struct *files;
struct hlist_node deferred_work_node;
int deferred_work;
bool is_dead;
@@ -600,6 +596,8 @@
* (protected by @proc->inner_lock)
* @todo: list of work to do for this thread
* (protected by @proc->inner_lock)
+ * @process_todo: whether work in @todo should be processed
+ * (protected by @proc->inner_lock)
* @return_error: transaction errors reported by this thread
* (only accessed by this thread)
* @reply_error: transaction errors reported by target thread
@@ -626,6 +624,7 @@
bool looper_need_return; /* can be written by other thread */
struct binder_transaction *transaction_stack;
struct list_head todo;
+ bool process_todo;
struct binder_error return_error;
struct binder_error reply_error;
wait_queue_head_t wait;
@@ -813,6 +812,16 @@
return ret;
}
+/**
+ * binder_enqueue_work_ilocked() - Add an item to the work list
+ * @work: struct binder_work to add to list
+ * @target_list: list to add work to
+ *
+ * Adds the work to the specified list. Asserts that work
+ * is not already on a list.
+ *
+ * Requires the proc->inner_lock to be held.
+ */
static void
binder_enqueue_work_ilocked(struct binder_work *work,
struct list_head *target_list)
@@ -823,22 +832,56 @@
}
/**
- * binder_enqueue_work() - Add an item to the work list
- * @proc: binder_proc associated with list
+ * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
+ * @thread: thread to queue work to
* @work: struct binder_work to add to list
- * @target_list: list to add work to
*
- * Adds the work to the specified list. Asserts that work
- * is not already on a list.
+ * Adds the work to the todo list of the thread. Doesn't set the process_todo
+ * flag, which means that (if it wasn't already set) the thread will go to
+ * sleep without handling this work when it calls read.
+ *
+ * Requires the proc->inner_lock to be held.
*/
static void
-binder_enqueue_work(struct binder_proc *proc,
- struct binder_work *work,
- struct list_head *target_list)
+binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
+ struct binder_work *work)
{
- binder_inner_proc_lock(proc);
- binder_enqueue_work_ilocked(work, target_list);
- binder_inner_proc_unlock(proc);
+ binder_enqueue_work_ilocked(work, &thread->todo);
+}
+
+/**
+ * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
+ * @thread: thread to queue work to
+ * @work: struct binder_work to add to list
+ *
+ * Adds the work to the todo list of the thread, and enables processing
+ * of the todo queue.
+ *
+ * Requires the proc->inner_lock to be held.
+ */
+static void
+binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
+ struct binder_work *work)
+{
+ binder_enqueue_work_ilocked(work, &thread->todo);
+ thread->process_todo = true;
+}
+
+/**
+ * binder_enqueue_thread_work() - Add an item to the thread work list
+ * @thread: thread to queue work to
+ * @work: struct binder_work to add to list
+ *
+ * Adds the work to the todo list of the thread, and enables processing
+ * of the todo queue.
+ */
+static void
+binder_enqueue_thread_work(struct binder_thread *thread,
+ struct binder_work *work)
+{
+ binder_inner_proc_lock(thread->proc);
+ binder_enqueue_thread_work_ilocked(thread, work);
+ binder_inner_proc_unlock(thread->proc);
}
static void
@@ -901,22 +944,34 @@
static void binder_free_proc(struct binder_proc *proc);
static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
+struct files_struct *binder_get_files_struct(struct binder_proc *proc)
+{
+ return get_files_struct(proc->tsk);
+}
+
static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
{
- struct files_struct *files = proc->files;
+ struct files_struct *files;
unsigned long rlim_cur;
unsigned long irqs;
+ int ret;
+ files = binder_get_files_struct(proc);
if (files == NULL)
return -ESRCH;
- if (!lock_task_sighand(proc->tsk, &irqs))
- return -EMFILE;
+ if (!lock_task_sighand(proc->tsk, &irqs)) {
+ ret = -EMFILE;
+ goto err;
+ }
rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
unlock_task_sighand(proc->tsk, &irqs);
- return __alloc_fd(files, 0, rlim_cur, flags);
+ ret = __alloc_fd(files, 0, rlim_cur, flags);
+err:
+ put_files_struct(files);
+ return ret;
}
/*
@@ -925,8 +980,12 @@
static void task_fd_install(
struct binder_proc *proc, unsigned int fd, struct file *file)
{
- if (proc->files)
- __fd_install(proc->files, fd, file);
+ struct files_struct *files = binder_get_files_struct(proc);
+
+ if (files) {
+ __fd_install(files, fd, file);
+ put_files_struct(files);
+ }
}
/*
@@ -934,18 +993,20 @@
*/
static long task_close_fd(struct binder_proc *proc, unsigned int fd)
{
+ struct files_struct *files = binder_get_files_struct(proc);
int retval;
- if (proc->files == NULL)
+ if (files == NULL)
return -ESRCH;
- retval = __close_fd(proc->files, fd);
+ retval = __close_fd(files, fd);
/* can't restart close syscall because file table entry was cleared */
if (unlikely(retval == -ERESTARTSYS ||
retval == -ERESTARTNOINTR ||
retval == -ERESTARTNOHAND ||
retval == -ERESTART_RESTARTBLOCK))
retval = -EINTR;
+ put_files_struct(files);
return retval;
}
@@ -953,7 +1014,7 @@
static bool binder_has_work_ilocked(struct binder_thread *thread,
bool do_proc_work)
{
- return !binder_worklist_empty_ilocked(&thread->todo) ||
+ return thread->process_todo ||
thread->looper_need_return ||
(do_proc_work &&
!binder_worklist_empty_ilocked(&thread->proc->todo));
@@ -1188,7 +1249,7 @@
struct binder_priority node_prio,
bool inherit_rt)
{
- struct binder_priority desired_prio;
+ struct binder_priority desired_prio = t->priority;
if (t->set_priority_called)
return;
@@ -1200,9 +1261,6 @@
if (!inherit_rt && is_rt_policy(desired_prio.sched_policy)) {
desired_prio.prio = NICE_TO_PRIO(0);
desired_prio.sched_policy = SCHED_NORMAL;
- } else {
- desired_prio.prio = t->priority.prio;
- desired_prio.sched_policy = t->priority.sched_policy;
}
if (node_prio.prio < t->priority.prio ||
@@ -1305,7 +1363,7 @@
node->cookie = cookie;
node->work.type = BINDER_WORK_NODE;
priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
- node->sched_policy = (flags & FLAT_BINDER_FLAG_PRIORITY_MASK) >>
+ node->sched_policy = (flags & FLAT_BINDER_FLAG_SCHED_POLICY_MASK) >>
FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT;
node->min_priority = to_kernel_prio(node->sched_policy, priority);
node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
@@ -1373,6 +1431,17 @@
node->local_strong_refs++;
if (!node->has_strong_ref && target_list) {
binder_dequeue_work_ilocked(&node->work);
+ /*
+ * Note: this function is the only place where we queue
+ * directly to a thread->todo without using the
+ * corresponding binder_enqueue_thread_work() helper
+ * functions; in this case it's ok to not set the
+ * process_todo flag, since we know this node work will
+ * always be followed by other work that starts queue
+ * processing: in case of synchronous transactions, a
+ * BR_REPLY or BR_ERROR; in case of oneway
+ * transactions, a BR_TRANSACTION_COMPLETE.
+ */
binder_enqueue_work_ilocked(&node->work, target_list);
}
} else {
@@ -1384,6 +1453,9 @@
node->debug_id);
return -EINVAL;
}
+ /*
+ * See comment above
+ */
binder_enqueue_work_ilocked(&node->work, target_list);
}
}
@@ -2073,9 +2145,9 @@
binder_pop_transaction_ilocked(target_thread, t);
if (target_thread->reply_error.cmd == BR_OK) {
target_thread->reply_error.cmd = error_code;
- binder_enqueue_work_ilocked(
- &target_thread->reply_error.work,
- &target_thread->todo);
+ binder_enqueue_thread_work_ilocked(
+ target_thread,
+ &target_thread->reply_error.work);
wake_up_interruptible(&target_thread->wait);
} else {
WARN(1, "Unexpected reply error: %u\n",
@@ -2714,11 +2786,10 @@
struct binder_proc *proc,
struct binder_thread *thread)
{
- struct list_head *target_list = NULL;
struct binder_node *node = t->buffer->target_node;
struct binder_priority node_prio;
bool oneway = !!(t->flags & TF_ONE_WAY);
- bool wakeup = true;
+ bool pending_async = false;
BUG_ON(!node);
binder_node_lock(node);
@@ -2728,8 +2799,7 @@
if (oneway) {
BUG_ON(thread);
if (node->has_async_transaction) {
- target_list = &node->async_todo;
- wakeup = false;
+ pending_async = true;
} else {
node->has_async_transaction = 1;
}
@@ -2743,22 +2813,20 @@
return false;
}
- if (!thread && !target_list)
+ if (!thread && !pending_async)
thread = binder_select_thread_ilocked(proc);
if (thread) {
- target_list = &thread->todo;
binder_transaction_priority(thread->task, t, node_prio,
node->inherit_rt);
- } else if (!target_list) {
- target_list = &proc->todo;
+ binder_enqueue_thread_work_ilocked(thread, &t->work);
+ } else if (!pending_async) {
+ binder_enqueue_work_ilocked(&t->work, &proc->todo);
} else {
- BUG_ON(target_list != &node->async_todo);
+ binder_enqueue_work_ilocked(&t->work, &node->async_todo);
}
- binder_enqueue_work_ilocked(&t->work, target_list);
-
- if (wakeup)
+ if (!pending_async)
binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
binder_inner_proc_unlock(proc);
@@ -3260,10 +3328,10 @@
}
}
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
- binder_enqueue_work(proc, tcomplete, &thread->todo);
t->work.type = BINDER_WORK_TRANSACTION;
if (reply) {
+ binder_enqueue_thread_work(thread, tcomplete);
binder_inner_proc_lock(target_proc);
if (target_thread->is_dead) {
binder_inner_proc_unlock(target_proc);
@@ -3271,7 +3339,7 @@
}
BUG_ON(t->buffer->async_transaction != 0);
binder_pop_transaction_ilocked(target_thread, in_reply_to);
- binder_enqueue_work_ilocked(&t->work, &target_thread->todo);
+ binder_enqueue_thread_work_ilocked(target_thread, &t->work);
binder_inner_proc_unlock(target_proc);
wake_up_interruptible_sync(&target_thread->wait);
binder_restore_priority(current, in_reply_to->saved_priority);
@@ -3279,6 +3347,14 @@
} else if (!(t->flags & TF_ONE_WAY)) {
BUG_ON(t->buffer->async_transaction != 0);
binder_inner_proc_lock(proc);
+ /*
+ * Defer the TRANSACTION_COMPLETE, so we don't return to
+ * userspace immediately; this allows the target process to
+ * immediately start processing this transaction, reducing
+ * latency. We will then return the TRANSACTION_COMPLETE when
+ * the target replies (or there is an error).
+ */
+ binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
t->need_reply = 1;
t->from_parent = thread->transaction_stack;
thread->transaction_stack = t;
@@ -3292,6 +3368,7 @@
} else {
BUG_ON(target_node == NULL);
BUG_ON(t->buffer->async_transaction != 1);
+ binder_enqueue_thread_work(thread, tcomplete);
if (!binder_proc_transaction(t, target_proc, NULL))
goto err_dead_proc_or_thread;
}
@@ -3371,15 +3448,11 @@
if (in_reply_to) {
binder_restore_priority(current, in_reply_to->saved_priority);
thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
- binder_enqueue_work(thread->proc,
- &thread->return_error.work,
- &thread->todo);
+ binder_enqueue_thread_work(thread, &thread->return_error.work);
binder_send_failed_reply(in_reply_to, return_error);
} else {
thread->return_error.cmd = return_error;
- binder_enqueue_work(thread->proc,
- &thread->return_error.work,
- &thread->todo);
+ binder_enqueue_thread_work(thread, &thread->return_error.work);
}
}
@@ -3683,10 +3756,9 @@
WARN_ON(thread->return_error.cmd !=
BR_OK);
thread->return_error.cmd = BR_ERROR;
- binder_enqueue_work(
- thread->proc,
- &thread->return_error.work,
- &thread->todo);
+ binder_enqueue_thread_work(
+ thread,
+ &thread->return_error.work);
binder_debug(
BINDER_DEBUG_FAILED_TRANSACTION,
"%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
@@ -3766,9 +3838,9 @@
if (thread->looper &
(BINDER_LOOPER_STATE_REGISTERED |
BINDER_LOOPER_STATE_ENTERED))
- binder_enqueue_work_ilocked(
- &death->work,
- &thread->todo);
+ binder_enqueue_thread_work_ilocked(
+ thread,
+ &death->work);
else {
binder_enqueue_work_ilocked(
&death->work,
@@ -3823,8 +3895,8 @@
if (thread->looper &
(BINDER_LOOPER_STATE_REGISTERED |
BINDER_LOOPER_STATE_ENTERED))
- binder_enqueue_work_ilocked(
- &death->work, &thread->todo);
+ binder_enqueue_thread_work_ilocked(
+ thread, &death->work);
else {
binder_enqueue_work_ilocked(
&death->work,
@@ -3998,6 +4070,8 @@
break;
}
w = binder_dequeue_work_head_ilocked(list);
+ if (binder_worklist_empty_ilocked(&thread->todo))
+ thread->process_todo = false;
switch (w->type) {
case BINDER_WORK_TRANSACTION: {
@@ -4757,7 +4831,6 @@
(vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
(unsigned long)pgprot_val(vma->vm_page_prot));
binder_alloc_vma_close(&proc->alloc);
- binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
}
static int binder_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
@@ -4799,10 +4872,8 @@
vma->vm_private_data = proc;
ret = binder_alloc_mmap_handler(&proc->alloc, vma);
- if (ret)
- return ret;
- proc->files = get_files_struct(current);
- return 0;
+
+ return ret;
err_bad_arg:
pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
@@ -4981,8 +5052,6 @@
struct rb_node *n;
int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
- BUG_ON(proc->files);
-
mutex_lock(&binder_procs_lock);
hlist_del(&proc->proc_node);
mutex_unlock(&binder_procs_lock);
@@ -5064,8 +5133,6 @@
static void binder_deferred_func(struct work_struct *work)
{
struct binder_proc *proc;
- struct files_struct *files;
-
int defer;
do {
@@ -5082,21 +5149,11 @@
}
mutex_unlock(&binder_deferred_lock);
- files = NULL;
- if (defer & BINDER_DEFERRED_PUT_FILES) {
- files = proc->files;
- if (files)
- proc->files = NULL;
- }
-
if (defer & BINDER_DEFERRED_FLUSH)
binder_deferred_flush(proc);
if (defer & BINDER_DEFERRED_RELEASE)
binder_deferred_release(proc); /* frees proc */
-
- if (files)
- put_files_struct(files);
} while (proc);
}
static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
diff --git a/drivers/base/power/opp/of.c b/drivers/base/power/opp/of.c
index 5552211..b52c617 100644
--- a/drivers/base/power/opp/of.c
+++ b/drivers/base/power/opp/of.c
@@ -386,7 +386,7 @@
{
const struct property *prop;
const __be32 *val;
- int nr;
+ int nr, ret;
prop = of_find_property(dev->of_node, "operating-points", NULL);
if (!prop)
@@ -409,9 +409,13 @@
unsigned long freq = be32_to_cpup(val++) * 1000;
unsigned long volt = be32_to_cpup(val++);
- if (_opp_add_v1(dev, freq, volt, false))
- dev_warn(dev, "%s: Failed to add OPP %ld\n",
- __func__, freq);
+ ret = _opp_add_v1(dev, freq, volt, false);
+ if (ret) {
+ dev_err(dev, "%s: Failed to add OPP %ld (%d)\n",
+ __func__, freq, ret);
+ dev_pm_opp_of_remove_table(dev);
+ return ret;
+ }
nr -= 2;
}
diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
index 404d94c..feba1b2 100644
--- a/drivers/base/power/wakeirq.c
+++ b/drivers/base/power/wakeirq.c
@@ -141,6 +141,13 @@
struct wake_irq *wirq = _wirq;
int res;
+ /* Maybe abort suspend? */
+ if (irqd_is_wakeup_set(irq_get_irq_data(irq))) {
+ pm_wakeup_event(wirq->dev, 0);
+
+ return IRQ_HANDLED;
+ }
+
/* We don't want RPM_ASYNC or RPM_NOWAIT here */
res = pm_runtime_resume(wirq->dev);
if (res < 0)
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 7b274ff..24f4b54 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -2788,7 +2788,7 @@
* from the parent.
*/
page_count = (u32)calc_pages_for(0, length);
- pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
+ pages = ceph_alloc_page_vector(page_count, GFP_NOIO);
if (IS_ERR(pages)) {
result = PTR_ERR(pages);
pages = NULL;
@@ -2922,7 +2922,7 @@
*/
size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
page_count = (u32)calc_pages_for(0, size);
- pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
+ pages = ceph_alloc_page_vector(page_count, GFP_NOIO);
if (IS_ERR(pages)) {
ret = PTR_ERR(pages);
goto fail_stat_request;
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index b793853..3880c90 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -212,15 +212,28 @@
const struct firmware *firmware)
{
u8 *send_buf;
- int len = 0;
- int err, pipe, size, sent = 0;
- int count = firmware->size;
+ int err, pipe, len, size, sent = 0;
+ int count;
BT_DBG("udev %p", udev);
+ if (!firmware || !firmware->data || firmware->size <= 0) {
+ err = -EINVAL;
+ BT_ERR("Not a valid FW file");
+ return err;
+ }
+
+ count = firmware->size;
+
+ if (count < FW_HDR_SIZE) {
+ err = -EINVAL;
+ BT_ERR("ath3k loading invalid size of file");
+ return err;
+ }
+
pipe = usb_sndctrlpipe(udev, 0);
- send_buf = kmalloc(BULK_SIZE, GFP_KERNEL);
+ send_buf = kzalloc(BULK_SIZE, GFP_KERNEL);
if (!send_buf) {
BT_ERR("Can't allocate memory chunk for firmware");
return -ENOMEM;
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
index 28afd5d..f64e86f 100644
--- a/drivers/bluetooth/btqca.c
+++ b/drivers/bluetooth/btqca.c
@@ -1,7 +1,7 @@
/*
* Bluetooth supports for Qualcomm Atheros chips
*
- * Copyright (c) 2015 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
@@ -27,6 +27,9 @@
#define VERSION "0.1"
+#define MAX_PATCH_FILE_SIZE (100*1024)
+#define MAX_NVM_FILE_SIZE (10*1024)
+
static int rome_patch_ver_req(struct hci_dev *hdev, u32 *rome_version)
{
struct sk_buff *skb;
@@ -285,27 +288,63 @@
struct rome_config *config)
{
const struct firmware *fw;
+ u32 type_len, length;
+ struct tlv_type_hdr *tlv;
int ret;
- BT_INFO("%s: ROME Downloading %s", hdev->name, config->fwname);
-
+ BT_INFO("%s: ROME Downloading file: %s", hdev->name, config->fwname);
ret = request_firmware(&fw, config->fwname, &hdev->dev);
- if (ret) {
- BT_ERR("%s: Failed to request file: %s (%d)", hdev->name,
- config->fwname, ret);
+
+ if (ret || !fw || !fw->data || fw->size <= 0) {
+ BT_ERR("Failed to request file: err = (%d)", ret);
+ ret = ret ? ret : -EINVAL;
return ret;
}
- rome_tlv_check_data(config, fw);
-
- ret = rome_tlv_download_request(hdev, fw);
- if (ret) {
- BT_ERR("%s: Failed to download file: %s (%d)", hdev->name,
- config->fwname, ret);
+ if (config->type != TLV_TYPE_NVM &&
+ config->type != TLV_TYPE_PATCH) {
+ ret = -EINVAL;
+ BT_ERR("TLV_NVM dload: wrong config type selected");
+ goto exit;
}
- release_firmware(fw);
+ if (config->type == TLV_TYPE_PATCH &&
+ (fw->size > MAX_PATCH_FILE_SIZE)) {
+ ret = -EINVAL;
+ BT_ERR("TLV_PATCH dload: wrong patch file sizes");
+ goto exit;
+ } else if (config->type == TLV_TYPE_NVM &&
+ (fw->size > MAX_NVM_FILE_SIZE)) {
+ ret = -EINVAL;
+ BT_ERR("TLV_NVM dload: wrong NVM file sizes");
+ goto exit;
+ }
+ if (fw->size < sizeof(struct tlv_type_hdr)) {
+ ret = -EINVAL;
+ BT_ERR("Firware size smaller to fit minimum value");
+ goto exit;
+ }
+
+ tlv = (struct tlv_type_hdr *)fw->data;
+ type_len = le32_to_cpu(tlv->type_len);
+ length = (type_len >> 8) & 0x00ffffff;
+
+ if (fw->size - 4 != length) {
+ ret = -EINVAL;
+ BT_ERR("Requested size not matching size in header");
+ goto exit;
+ }
+
+ rome_tlv_check_data(config, fw);
+ ret = rome_tlv_download_request(hdev, fw);
+
+ if (ret) {
+ BT_ERR("Failed to download FW: error = (%d)", ret);
+ }
+
+exit:
+ release_firmware(fw);
return ret;
}
@@ -316,8 +355,9 @@
int err;
cmd[0] = EDL_NVM_ACCESS_SET_REQ_CMD;
- cmd[1] = 0x02; /* TAG ID */
- cmd[2] = sizeof(bdaddr_t); /* size */
+ /* Set the TAG ID of 0x02 for NVM set and size of tag */
+ cmd[1] = 0x02;
+ cmd[2] = sizeof(bdaddr_t);
memcpy(cmd + 3, bdaddr, sizeof(bdaddr_t));
skb = __hci_cmd_sync_ev(hdev, EDL_NVM_ACCESS_OPCODE, sizeof(cmd), cmd,
HCI_VENDOR_PKT, HCI_INIT_TIMEOUT);
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index 122ebd2..35eea02 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -75,6 +75,8 @@
#define FASTRPC_LINK_CONNECTING (0x1)
#define FASTRPC_LINK_CONNECTED (0x3)
#define FASTRPC_LINK_DISCONNECTING (0x7)
+#define FASTRPC_LINK_REMOTE_DISCONNECTING (0x8)
+#define FASTRPC_GLINK_INTENT_LEN (64)
#define PERF_KEYS "count:flush:map:copy:glink:getargs:putargs:invalidate:invoke"
#define FASTRPC_STATIC_HANDLE_LISTENER (3)
@@ -232,16 +234,17 @@
int prevssrcount;
int issubsystemup;
int vmid;
+ int rhvmid;
int ramdumpenabled;
void *remoteheap_ramdump_dev;
struct fastrpc_glink_info link;
+ struct mutex mut;
};
struct fastrpc_apps {
struct fastrpc_channel_ctx *channel;
struct cdev cdev;
struct class *class;
- struct mutex smd_mutex;
struct smq_phy_page range;
struct hlist_head maps;
uint32_t staticpd_flags;
@@ -520,7 +523,7 @@
return -ENOTTY;
}
-static void fastrpc_mmap_free(struct fastrpc_mmap *map)
+static void fastrpc_mmap_free(struct fastrpc_mmap *map, uint32_t flags)
{
struct fastrpc_apps *me = &gfa;
struct fastrpc_file *fl;
@@ -537,15 +540,17 @@
if (!map->refs)
hlist_del_init(&map->hn);
spin_unlock(&me->hlock);
+ if (map->refs > 0)
+ return;
} else {
spin_lock(&fl->hlock);
map->refs--;
if (!map->refs)
hlist_del_init(&map->hn);
spin_unlock(&fl->hlock);
+ if (map->refs > 0 && !flags)
+ return;
}
- if (map->refs > 0)
- return;
if (map->flags == ADSP_MMAP_HEAP_ADDR ||
map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
@@ -635,6 +640,11 @@
map->size = len;
map->va = (uintptr_t __user)map->phys;
} else {
+ if (map->attr && (map->attr & FASTRPC_ATTR_KEEP_MAP)) {
+ pr_info("adsprpc: buffer mapped with persist attr %x\n",
+ (unsigned int)map->attr);
+ map->refs = 2;
+ }
VERIFY(err, !IS_ERR_OR_NULL(map->handle =
ion_import_dma_buf_fd(fl->apps->client, fd)));
if (err)
@@ -724,7 +734,7 @@
bail:
if (err && map)
- fastrpc_mmap_free(map);
+ fastrpc_mmap_free(map, 0);
return err;
}
@@ -995,7 +1005,7 @@
hlist_del_init(&ctx->hn);
spin_unlock(&ctx->fl->hlock);
for (i = 0; i < nbufs; ++i)
- fastrpc_mmap_free(ctx->maps[i]);
+ fastrpc_mmap_free(ctx->maps[i], 0);
fastrpc_buf_free(ctx->buf, 1);
ctx->magic = 0;
kfree(ctx);
@@ -1345,7 +1355,7 @@
if (err)
goto bail;
} else {
- fastrpc_mmap_free(ctx->maps[i]);
+ fastrpc_mmap_free(ctx->maps[i], 0);
ctx->maps[i] = NULL;
}
}
@@ -1355,7 +1365,7 @@
break;
if (!fastrpc_mmap_find(ctx->fl, (int)fdlist[i], 0, 0,
0, 0, &mmap))
- fastrpc_mmap_free(mmap);
+ fastrpc_mmap_free(mmap, 0);
}
}
if (ctx->crc && crclist && rpra)
@@ -1486,12 +1496,12 @@
INIT_HLIST_HEAD(&me->drivers);
spin_lock_init(&me->hlock);
- mutex_init(&me->smd_mutex);
me->channel = &gcinfo[0];
for (i = 0; i < NUM_CHANNELS; i++) {
init_completion(&me->channel[i].work);
init_completion(&me->channel[i].workport);
me->channel[i].sesscount = 0;
+ mutex_init(&me->channel[i].mut);
}
}
@@ -1605,7 +1615,7 @@
struct fastrpc_mmap *file = NULL, *mem = NULL;
char *proc_name = NULL;
int srcVM[1] = {VMID_HLOS};
- int destVM[1] = {VMID_ADSP_Q6};
+ int destVM[1] = {me->channel[fl->cid].rhvmid};
int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
int hlosVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
@@ -1782,10 +1792,10 @@
if (mem->flags == ADSP_MMAP_REMOTE_HEAP_ADDR)
hyp_assign_phys(mem->phys, (uint64_t)mem->size,
destVM, 1, srcVM, hlosVMperm, 1);
- fastrpc_mmap_free(mem);
+ fastrpc_mmap_free(mem, 0);
}
if (file)
- fastrpc_mmap_free(file);
+ fastrpc_mmap_free(file, 0);
return err;
}
@@ -1821,6 +1831,7 @@
struct fastrpc_mmap *map)
{
struct fastrpc_ioctl_invoke_crc ioctl;
+ struct fastrpc_apps *me = &gfa;
struct smq_phy_page page;
int num = 1;
remote_arg_t ra[3];
@@ -1875,7 +1886,7 @@
} else if (flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
int srcVM[1] = {VMID_HLOS};
- int destVM[1] = {VMID_ADSP_Q6};
+ int destVM[1] = {me->channel[fl->cid].rhvmid};
int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
VERIFY(err, !hyp_assign_phys(map->phys, (uint64_t)map->size,
@@ -1891,7 +1902,8 @@
struct fastrpc_mmap *map)
{
int err = 0;
- int srcVM[1] = {VMID_ADSP_Q6};
+ struct fastrpc_apps *me = &gfa;
+ int srcVM[1] = {me->channel[fl->cid].rhvmid};
int destVM[1] = {VMID_HLOS};
int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
@@ -2016,7 +2028,7 @@
kfree(ramdump_segments_rh);
}
}
- fastrpc_mmap_free(match);
+ fastrpc_mmap_free(match, 0);
}
} while (match);
bail:
@@ -2042,13 +2054,36 @@
VERIFY(err, !fastrpc_munmap_on_dsp(fl, map));
if (err)
goto bail;
- fastrpc_mmap_free(map);
+ fastrpc_mmap_free(map, 0);
bail:
if (err && map)
fastrpc_mmap_add(map);
return err;
}
+static int fastrpc_internal_munmap_fd(struct fastrpc_file *fl,
+ struct fastrpc_ioctl_munmap_fd *ud) {
+ int err = 0;
+ struct fastrpc_mmap *map = NULL;
+
+ VERIFY(err, (fl && ud));
+ if (err)
+ goto bail;
+
+ if (!fastrpc_mmap_find(fl, ud->fd, ud->va, ud->len, 0, 0, &map)) {
+ pr_err("mapping not found to unamp %x va %llx %x\n",
+ ud->fd, (unsigned long long)ud->va,
+ (unsigned int)ud->len);
+ err = -1;
+ goto bail;
+ }
+ if (map)
+ fastrpc_mmap_free(map, 0);
+bail:
+ return err;
+}
+
+
static int fastrpc_internal_mmap(struct fastrpc_file *fl,
struct fastrpc_ioctl_mmap *ud)
{
@@ -2071,7 +2106,7 @@
ud->vaddrout = map->raddr;
bail:
if (err && map)
- fastrpc_mmap_free(map);
+ fastrpc_mmap_free(map, 0);
return err;
}
@@ -2087,7 +2122,7 @@
ctx->chan = NULL;
glink_unregister_link_state_cb(ctx->link.link_notify_handle);
ctx->link.link_notify_handle = NULL;
- mutex_unlock(&me->smd_mutex);
+ mutex_unlock(&ctx->mut);
pr_info("'closed /dev/%s c %d %d'\n", gcinfo[cid].name,
MAJOR(me->dev_no), cid);
}
@@ -2180,10 +2215,15 @@
link->port_state = FASTRPC_LINK_DISCONNECTED;
break;
case GLINK_REMOTE_DISCONNECTED:
+ mutex_lock(&me->channel[cid].mut);
if (me->channel[cid].chan) {
+ link->port_state = FASTRPC_LINK_REMOTE_DISCONNECTING;
fastrpc_glink_close(me->channel[cid].chan, cid);
me->channel[cid].chan = NULL;
+ } else {
+ link->port_state = FASTRPC_LINK_DISCONNECTED;
}
+ mutex_unlock(&me->channel[cid].mut);
break;
default:
break;
@@ -2194,23 +2234,20 @@
struct fastrpc_session_ctx **session)
{
int err = 0;
- struct fastrpc_apps *me = &gfa;
- mutex_lock(&me->smd_mutex);
+ mutex_lock(&chan->mut);
if (!*session)
err = fastrpc_session_alloc_locked(chan, secure, session);
- mutex_unlock(&me->smd_mutex);
+ mutex_unlock(&chan->mut);
return err;
}
static void fastrpc_session_free(struct fastrpc_channel_ctx *chan,
struct fastrpc_session_ctx *session)
{
- struct fastrpc_apps *me = &gfa;
-
- mutex_lock(&me->smd_mutex);
+ mutex_lock(&chan->mut);
session->used = 0;
- mutex_unlock(&me->smd_mutex);
+ mutex_unlock(&chan->mut);
}
static int fastrpc_file_free(struct fastrpc_file *fl)
@@ -2239,11 +2276,11 @@
fastrpc_context_list_dtor(fl);
fastrpc_buf_list_free(fl);
hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
- fastrpc_mmap_free(map);
+ fastrpc_mmap_free(map, 1);
}
if (fl->ssrcount == fl->apps->channel[cid].ssrcount)
kref_put_mutex(&fl->apps->channel[cid].kref,
- fastrpc_channel_close, &fl->apps->smd_mutex);
+ fastrpc_channel_close, &fl->apps->channel[cid].mut);
if (fl->sctx)
fastrpc_session_free(&fl->apps->channel[cid], fl->sctx);
if (fl->secsctx)
@@ -2320,6 +2357,20 @@
return err;
}
+static void fastrpc_glink_stop(int cid)
+{
+ int err = 0;
+ struct fastrpc_glink_info *link;
+
+ VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
+ if (err)
+ return;
+ link = &gfa.channel[cid].link;
+
+ if (link->port_state == FASTRPC_LINK_CONNECTED)
+ link->port_state = FASTRPC_LINK_REMOTE_DISCONNECTING;
+}
+
static void fastrpc_glink_close(void *chan, int cid)
{
int err = 0;
@@ -2330,7 +2381,8 @@
return;
link = &gfa.channel[cid].link;
- if (link->port_state == FASTRPC_LINK_CONNECTED) {
+ if (link->port_state == FASTRPC_LINK_CONNECTED ||
+ link->port_state == FASTRPC_LINK_REMOTE_DISCONNECTING) {
link->port_state = FASTRPC_LINK_DISCONNECTING;
glink_close(chan);
}
@@ -2496,12 +2548,14 @@
struct fastrpc_apps *me = &gfa;
int cid, err = 0;
- mutex_lock(&me->smd_mutex);
-
VERIFY(err, fl && fl->sctx);
if (err)
- goto bail;
+ return err;
cid = fl->cid;
+ VERIFY(err, cid >= 0 && cid < NUM_CHANNELS);
+ if (err)
+ goto bail;
+ mutex_lock(&me->channel[cid].mut);
if (me->channel[cid].ssrcount !=
me->channel[cid].prevssrcount) {
if (!me->channel[cid].issubsystemup) {
@@ -2510,9 +2564,6 @@
goto bail;
}
}
- VERIFY(err, cid >= 0 && cid < NUM_CHANNELS);
- if (err)
- goto bail;
fl->ssrcount = me->channel[cid].ssrcount;
if ((kref_get_unless_zero(&me->channel[cid].kref) == 0) ||
(me->channel[cid].chan == NULL)) {
@@ -2523,9 +2574,11 @@
if (err)
goto bail;
+ mutex_unlock(&me->channel[cid].mut);
VERIFY(err,
wait_for_completion_timeout(&me->channel[cid].workport,
RPC_TIMEOUT));
+ mutex_lock(&me->channel[cid].mut);
if (err) {
me->channel[cid].chan = NULL;
goto bail;
@@ -2533,8 +2586,10 @@
kref_init(&me->channel[cid].kref);
pr_info("'opened /dev/%s c %d %d'\n", gcinfo[cid].name,
MAJOR(me->dev_no), cid);
- err = glink_queue_rx_intent(me->channel[cid].chan, NULL, 16);
- err |= glink_queue_rx_intent(me->channel[cid].chan, NULL, 64);
+ err = glink_queue_rx_intent(me->channel[cid].chan, NULL,
+ FASTRPC_GLINK_INTENT_LEN);
+ err |= glink_queue_rx_intent(me->channel[cid].chan, NULL,
+ FASTRPC_GLINK_INTENT_LEN);
if (err)
pr_warn("adsprpc: initial intent fail for %d err %d\n",
cid, err);
@@ -2548,7 +2603,7 @@
}
bail:
- mutex_unlock(&me->smd_mutex);
+ mutex_unlock(&me->channel[cid].mut);
return err;
}
@@ -2655,6 +2710,7 @@
struct fastrpc_ioctl_invoke_crc inv;
struct fastrpc_ioctl_mmap mmap;
struct fastrpc_ioctl_munmap munmap;
+ struct fastrpc_ioctl_munmap_fd munmap_fd;
struct fastrpc_ioctl_init_attrs init;
struct fastrpc_ioctl_perf perf;
struct fastrpc_ioctl_control cp;
@@ -2721,6 +2777,16 @@
if (err)
goto bail;
break;
+ case FASTRPC_IOCTL_MUNMAP_FD:
+ K_COPY_FROM_USER(err, 0, &p.munmap_fd, param,
+ sizeof(p.munmap_fd));
+ if (err)
+ goto bail;
+ VERIFY(err, 0 == (err = fastrpc_internal_munmap_fd(fl,
+ &p.munmap_fd)));
+ if (err)
+ goto bail;
+ break;
case FASTRPC_IOCTL_SETMODE:
switch ((uint32_t)ioctl_param) {
case FASTRPC_MODE_PARALLEL:
@@ -2826,16 +2892,14 @@
ctx = container_of(nb, struct fastrpc_channel_ctx, nb);
cid = ctx - &me->channel[0];
if (code == SUBSYS_BEFORE_SHUTDOWN) {
- mutex_lock(&me->smd_mutex);
+ mutex_lock(&ctx->mut);
ctx->ssrcount++;
ctx->issubsystemup = 0;
- if (ctx->chan) {
- fastrpc_glink_close(ctx->chan, cid);
- ctx->chan = NULL;
- pr_info("'restart notifier: closed /dev/%s c %d %d'\n",
- gcinfo[cid].name, MAJOR(me->dev_no), cid);
- }
- mutex_unlock(&me->smd_mutex);
+ pr_info("'restart notifier: /dev/%s c %d %d'\n",
+ gcinfo[cid].name, MAJOR(me->dev_no), cid);
+ if (ctx->chan)
+ fastrpc_glink_stop(cid);
+ mutex_unlock(&ctx->mut);
if (cid == 0)
me->staticpd_flags = 0;
fastrpc_notify_drivers(me, cid);
@@ -2941,6 +3005,17 @@
struct cma *cma;
uint32_t val;
+
+ if (of_device_is_compatible(dev->of_node,
+ "qcom,msm-fastrpc-compute")) {
+ of_property_read_u32(dev->of_node, "qcom,adsp-remoteheap-vmid",
+ &gcinfo[0].rhvmid);
+
+ pr_info("ADSPRPC : vmids adsp=%d\n", gcinfo[0].rhvmid);
+
+ of_property_read_u32(dev->of_node, "qcom,rpc-latency-us",
+ &me->latency);
+ }
if (of_device_is_compatible(dev->of_node,
"qcom,msm-fastrpc-compute-cb"))
return fastrpc_cb_probe(dev);
@@ -2985,10 +3060,6 @@
return 0;
}
- err = of_property_read_u32(dev->of_node, "qcom,rpc-latency-us",
- &me->latency);
- if (err)
- me->latency = 0;
VERIFY(err, !of_platform_populate(pdev->dev.of_node,
fastrpc_match_table,
NULL, &pdev->dev));
@@ -3000,15 +3071,15 @@
static void fastrpc_deinit(void)
{
- struct fastrpc_apps *me = &gfa;
struct fastrpc_channel_ctx *chan = gcinfo;
int i, j;
for (i = 0; i < NUM_CHANNELS; i++, chan++) {
if (chan->chan) {
kref_put_mutex(&chan->kref,
- fastrpc_channel_close, &me->smd_mutex);
+ fastrpc_channel_close, &chan->mut);
chan->chan = NULL;
+ mutex_destroy(&chan->mut);
}
for (j = 0; j < NUM_SESSIONS; j++) {
struct fastrpc_session_ctx *sess = &chan->session[j];
diff --git a/drivers/char/adsprpc_shared.h b/drivers/char/adsprpc_shared.h
index 43edf71..e2f8983 100644
--- a/drivers/char/adsprpc_shared.h
+++ b/drivers/char/adsprpc_shared.h
@@ -29,6 +29,7 @@
#define FASTRPC_IOCTL_INIT_ATTRS _IOWR('R', 10, struct fastrpc_ioctl_init_attrs)
#define FASTRPC_IOCTL_INVOKE_CRC _IOWR('R', 11, struct fastrpc_ioctl_invoke_crc)
#define FASTRPC_IOCTL_CONTROL _IOWR('R', 12, struct fastrpc_ioctl_control)
+#define FASTRPC_IOCTL_MUNMAP_FD _IOWR('R', 13, struct fastrpc_ioctl_munmap_fd)
#define FASTRPC_GLINK_GUID "fastrpcglink-apps-dsp"
#define FASTRPC_SMD_GUID "fastrpcsmd-apps-dsp"
@@ -43,6 +44,9 @@
/* Set for buffers that are dma coherent */
#define FASTRPC_ATTR_COHERENT 0x4
+/* Fastrpc attribute for keeping the map persistent */
+#define FASTRPC_ATTR_KEEP_MAP 0x8
+
/* Driver should operate in parallel with the co-processor */
#define FASTRPC_MODE_PARALLEL 0
@@ -204,6 +208,13 @@
uintptr_t vaddrout; /* dsps virtual address */
};
+struct fastrpc_ioctl_munmap_fd {
+ int fd; /* fd */
+ uint32_t flags; /* control flags */
+ uintptr_t va; /* va */
+ ssize_t len; /* length */
+};
+
struct fastrpc_ioctl_perf { /* kernel performance data */
uintptr_t __user data;
uint32_t numkeys;
diff --git a/drivers/char/diag/diag_debugfs.c b/drivers/char/diag/diag_debugfs.c
index 40bfd74..0a3faba 100644
--- a/drivers/char/diag/diag_debugfs.c
+++ b/drivers/char/diag/diag_debugfs.c
@@ -77,7 +77,8 @@
"Time Sync Enabled: %d\n"
"MD session mode: %d\n"
"MD session mask: %d\n"
- "Uses Time API: %d\n",
+ "Uses Time API: %d\n"
+ "Supports PD buffering: %d\n",
chk_config_get_id(),
chk_polling_response(),
driver->polling_reg_flag,
@@ -92,11 +93,12 @@
driver->time_sync_enabled,
driver->md_session_mode,
driver->md_session_mask,
- driver->uses_time_api);
+ driver->uses_time_api,
+ driver->supports_pd_buffering);
for (i = 0; i < NUM_PERIPHERALS; i++) {
ret += scnprintf(buf+ret, buf_size-ret,
- "p: %s Feature: %02x %02x |%c%c%c%c%c%c%c%c%c|\n",
+ "p: %s Feature: %02x %02x |%c%c%c%c%c%c%c%c%c%c|\n",
PERIPHERAL_STRING(i),
driver->feature[i].feature_mask[0],
driver->feature[i].feature_mask[1],
@@ -105,6 +107,7 @@
driver->feature[i].encode_hdlc ? 'H':'h',
driver->feature[i].peripheral_buffering ? 'B':'b',
driver->feature[i].mask_centralization ? 'M':'m',
+ driver->feature[i].pd_buffering ? 'P':'p',
driver->feature[i].stm_support ? 'Q':'q',
driver->feature[i].sockets_enabled ? 'S':'s',
driver->feature[i].sent_feature_mask ? 'T':'t',
diff --git a/drivers/char/diag/diag_masks.c b/drivers/char/diag/diag_masks.c
index b30bfad..8e5d836 100644
--- a/drivers/char/diag/diag_masks.c
+++ b/drivers/char/diag/diag_masks.c
@@ -554,6 +554,11 @@
mask_info);
return -EINVAL;
}
+ if (!mask_info->ptr) {
+ pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
+ __func__, mask_info->ptr);
+ return -EINVAL;
+ }
if (!diag_apps_responds())
return 0;
@@ -655,7 +660,11 @@
mask_info);
return -EINVAL;
}
-
+ if (!mask_info->ptr) {
+ pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
+ __func__, mask_info->ptr);
+ return -EINVAL;
+ }
if (!diag_apps_responds())
return 0;
@@ -668,6 +677,12 @@
rsp.status = MSG_STATUS_FAIL;
rsp.padding = 0;
mask = (struct diag_msg_mask_t *)mask_info->ptr;
+ if (!mask->ptr) {
+ pr_err("diag: Invalid input in %s, mask->ptr: %pK\n",
+ __func__, mask->ptr);
+ mutex_unlock(&driver->msg_mask_lock);
+ return -EINVAL;
+ }
for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
if ((req->ssid_first < mask->ssid_first) ||
(req->ssid_first > mask->ssid_last_tools)) {
@@ -710,11 +725,23 @@
mask_info);
return -EINVAL;
}
+ if (!mask_info->ptr) {
+ pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
+ __func__, mask_info->ptr);
+ return -EINVAL;
+ }
req = (struct diag_msg_build_mask_t *)src_buf;
mutex_lock(&mask_info->lock);
mutex_lock(&driver->msg_mask_lock);
mask = (struct diag_msg_mask_t *)mask_info->ptr;
+ if (!mask->ptr) {
+ pr_err("diag: Invalid input in %s, mask->ptr: %pK\n",
+ __func__, mask->ptr);
+ mutex_unlock(&driver->msg_mask_lock);
+ mutex_unlock(&mask_info->lock);
+ return -EINVAL;
+ }
for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
if (i < (driver->msg_mask_tbl_count - 1)) {
mask_next = mask;
@@ -833,6 +860,11 @@
mask_info);
return -EINVAL;
}
+ if (!mask_info->ptr) {
+ pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
+ __func__, mask_info->ptr);
+ return -EINVAL;
+ }
req = (struct diag_msg_config_rsp_t *)src_buf;
@@ -840,6 +872,13 @@
mutex_lock(&driver->msg_mask_lock);
mask = (struct diag_msg_mask_t *)mask_info->ptr;
+ if (!mask->ptr) {
+ pr_err("diag: Invalid input in %s, mask->ptr: %pK\n",
+ __func__, mask->ptr);
+ mutex_unlock(&driver->msg_mask_lock);
+ mutex_unlock(&mask_info->lock);
+ return -EINVAL;
+ }
mask_info->status = (req->rt_mask) ? DIAG_CTRL_MASK_ALL_ENABLED :
DIAG_CTRL_MASK_ALL_DISABLED;
for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
@@ -937,7 +976,11 @@
mask_info);
return -EINVAL;
}
-
+ if (!mask_info->ptr) {
+ pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
+ __func__, mask_info->ptr);
+ return -EINVAL;
+ }
req = (struct diag_event_mask_config_t *)src_buf;
mask_len = EVENT_COUNT_TO_BYTES(req->num_bits);
if (mask_len <= 0 || mask_len > event_mask.mask_len) {
@@ -1000,6 +1043,11 @@
mask_info);
return -EINVAL;
}
+ if (!mask_info->ptr) {
+ pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
+ __func__, mask_info->ptr);
+ return -EINVAL;
+ }
toggle = *(src_buf + 1);
mutex_lock(&mask_info->lock);
@@ -1063,6 +1111,11 @@
mask_info);
return -EINVAL;
}
+ if (!mask_info->ptr) {
+ pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
+ __func__, mask_info->ptr);
+ return -EINVAL;
+ }
if (!diag_apps_responds())
return 0;
@@ -1082,6 +1135,11 @@
write_len += rsp_header_len;
log_item = (struct diag_log_mask_t *)mask_info->ptr;
+ if (!log_item->ptr) {
+ pr_err("diag: Invalid input in %s, mask: %pK\n",
+ __func__, log_item);
+ return -EINVAL;
+ }
for (i = 0; i < MAX_EQUIP_ID; i++, log_item++) {
if (log_item->equip_id != req->equip_id)
continue;
@@ -1187,11 +1245,20 @@
mask_info);
return -EINVAL;
}
+ if (!mask_info->ptr) {
+ pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
+ __func__, mask_info->ptr);
+ return -EINVAL;
+ }
req = (struct diag_log_config_req_t *)src_buf;
read_len += req_header_len;
mask = (struct diag_log_mask_t *)mask_info->ptr;
-
+ if (!mask->ptr) {
+ pr_err("diag: Invalid input in %s, mask->ptr: %pK\n",
+ __func__, mask->ptr);
+ return -EINVAL;
+ }
if (req->equip_id >= MAX_EQUIP_ID) {
pr_err("diag: In %s, Invalid logging mask request, equip_id: %d\n",
__func__, req->equip_id);
@@ -1314,9 +1381,17 @@
mask_info);
return -EINVAL;
}
-
+ if (!mask_info->ptr) {
+ pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
+ __func__, mask_info->ptr);
+ return -EINVAL;
+ }
mask = (struct diag_log_mask_t *)mask_info->ptr;
-
+ if (!mask->ptr) {
+ pr_err("diag: Invalid input in %s, mask->ptr: %pK\n",
+ __func__, mask->ptr);
+ return -EINVAL;
+ }
for (i = 0; i < MAX_EQUIP_ID; i++, mask++) {
mutex_lock(&mask->lock);
memset(mask->ptr, 0, mask->range);
@@ -1586,7 +1661,7 @@
static void __diag_mask_exit(struct diag_mask_info *mask_info)
{
- if (!mask_info)
+ if (!mask_info || !mask_info->ptr)
return;
mutex_lock(&mask_info->lock);
@@ -1642,11 +1717,17 @@
int i;
struct diag_log_mask_t *mask = NULL;
- if (!mask_info)
+ if (!mask_info || !mask_info->ptr)
return;
mutex_lock(&mask_info->lock);
mask = (struct diag_log_mask_t *)mask_info->ptr;
+ if (!mask->ptr) {
+ pr_err("diag: Invalid input in %s, mask->ptr: %pK\n",
+ __func__, mask->ptr);
+ mutex_unlock(&mask_info->lock);
+ return;
+ }
for (i = 0; i < MAX_EQUIP_ID; i++, mask++) {
kfree(mask->ptr);
mask->ptr = NULL;
@@ -1722,11 +1803,18 @@
int i;
struct diag_msg_mask_t *mask = NULL;
- if (!mask_info)
+ if (!mask_info || !mask_info->ptr)
return;
mutex_lock(&mask_info->lock);
mutex_lock(&driver->msg_mask_lock);
mask = (struct diag_msg_mask_t *)mask_info->ptr;
+ if (!mask->ptr) {
+ pr_err("diag: Invalid input in %s, mask->ptr: %pK\n",
+ __func__, mask->ptr);
+ mutex_unlock(&driver->msg_mask_lock);
+ mutex_unlock(&mask_info->lock);
+ return;
+ }
for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
kfree(mask->ptr);
mask->ptr = NULL;
@@ -1888,6 +1976,11 @@
if (!mask_info)
return -EIO;
+ if (!mask_info->ptr || !mask_info->update_buf) {
+ pr_err("diag: In %s, invalid input mask_info->ptr: %pK, mask_info->update_buf: %pK\n",
+ __func__, mask_info->ptr, mask_info->update_buf);
+ return -EINVAL;
+ }
mutex_lock(&driver->diag_maskclear_mutex);
if (driver->mask_clear) {
DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
@@ -1900,6 +1993,13 @@
mutex_lock(&driver->msg_mask_lock);
mask = (struct diag_msg_mask_t *)(mask_info->ptr);
+ if (!mask->ptr) {
+ pr_err("diag: Invalid input in %s, mask->ptr: %pK\n",
+ __func__, mask->ptr);
+ mutex_unlock(&driver->msg_mask_lock);
+ mutex_unlock(&mask_info->lock);
+ return -EINVAL;
+ }
for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
ptr = mask_info->update_buf;
len = 0;
@@ -1957,8 +2057,20 @@
if (!mask_info)
return -EIO;
+ if (!mask_info->ptr || !mask_info->update_buf) {
+ pr_err("diag: In %s, invalid input mask_info->ptr: %pK, mask_info->update_buf: %pK\n",
+ __func__, mask_info->ptr, mask_info->update_buf);
+ return -EINVAL;
+ }
+
mutex_lock(&mask_info->lock);
mask = (struct diag_log_mask_t *)(mask_info->ptr);
+ if (!mask->ptr) {
+ pr_err("diag: Invalid input in %s, mask->ptr: %pK\n",
+ __func__, mask->ptr);
+ mutex_unlock(&mask_info->lock);
+ return -EINVAL;
+ }
for (i = 0; i < MAX_EQUIP_ID; i++, mask++) {
ptr = mask_info->update_buf;
len = 0;
diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h
index 74180e5..9de40b0 100644
--- a/drivers/char/diag/diagchar.h
+++ b/drivers/char/diag/diagchar.h
@@ -519,6 +519,7 @@
uint8_t encode_hdlc;
uint8_t untag_header;
uint8_t peripheral_buffering;
+ uint8_t pd_buffering;
uint8_t mask_centralization;
uint8_t stm_support;
uint8_t sockets_enabled;
@@ -552,6 +553,7 @@
int supports_separate_cmdrsp;
int supports_apps_hdlc_encoding;
int supports_apps_header_untagging;
+ int supports_pd_buffering;
int peripheral_untag[NUM_PERIPHERALS];
int supports_sockets;
/* The state requested in the STM command */
@@ -605,8 +607,8 @@
struct diagfwd_info *diagfwd_cmd[NUM_PERIPHERALS];
struct diagfwd_info *diagfwd_dci_cmd[NUM_PERIPHERALS];
struct diag_feature_t feature[NUM_PERIPHERALS];
- struct diag_buffering_mode_t buffering_mode[NUM_PERIPHERALS];
- uint8_t buffering_flag[NUM_PERIPHERALS];
+ struct diag_buffering_mode_t buffering_mode[NUM_MD_SESSIONS];
+ uint8_t buffering_flag[NUM_MD_SESSIONS];
struct mutex mode_lock;
unsigned char *user_space_data_buf;
uint8_t user_space_data_busy;
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index 354e676..919ea0f 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -1960,12 +1960,33 @@
static int diag_ioctl_set_buffering_mode(unsigned long ioarg)
{
struct diag_buffering_mode_t params;
+ int peripheral = 0;
+ uint8_t diag_id = 0;
if (copy_from_user(¶ms, (void __user *)ioarg, sizeof(params)))
return -EFAULT;
- if (params.peripheral >= NUM_PERIPHERALS)
- return -EINVAL;
+ diag_map_pd_to_diagid(params.peripheral, &diag_id, &peripheral);
+
+ if ((peripheral < 0) ||
+ peripheral >= NUM_PERIPHERALS) {
+ pr_err("diag: In %s, invalid peripheral = %d\n", __func__,
+ peripheral);
+ return -EIO;
+ }
+
+ if (params.peripheral > NUM_PERIPHERALS &&
+ !driver->feature[peripheral].pd_buffering) {
+ pr_err("diag: In %s, pd buffering not supported for peripheral:%d\n",
+ __func__, peripheral);
+ return -EIO;
+ }
+
+ if (!driver->feature[peripheral].peripheral_buffering) {
+ pr_err("diag: In %s, peripheral %d doesn't support buffering\n",
+ __func__, peripheral);
+ return -EIO;
+ }
mutex_lock(&driver->mode_lock);
driver->buffering_flag[params.peripheral] = 1;
@@ -1976,24 +1997,29 @@
static int diag_ioctl_peripheral_drain_immediate(unsigned long ioarg)
{
- uint8_t peripheral;
+ uint8_t pd, diag_id = 0;
+ int peripheral = 0;
- if (copy_from_user(&peripheral, (void __user *)ioarg, sizeof(uint8_t)))
+ if (copy_from_user(&pd, (void __user *)ioarg, sizeof(uint8_t)))
return -EFAULT;
- if (peripheral >= NUM_PERIPHERALS) {
+ diag_map_pd_to_diagid(pd, &diag_id, &peripheral);
+
+ if ((peripheral < 0) ||
+ peripheral >= NUM_PERIPHERALS) {
pr_err("diag: In %s, invalid peripheral %d\n", __func__,
peripheral);
return -EINVAL;
}
- if (!driver->feature[peripheral].peripheral_buffering) {
- pr_err("diag: In %s, peripheral %d doesn't support buffering\n",
- __func__, peripheral);
+ if (pd > NUM_PERIPHERALS &&
+ !driver->feature[peripheral].pd_buffering) {
+ pr_err("diag: In %s, pd buffering not supported for peripheral:%d\n",
+ __func__, peripheral);
return -EIO;
}
- return diag_send_peripheral_drain_immediate(peripheral);
+ return diag_send_peripheral_drain_immediate(pd, diag_id, peripheral);
}
static int diag_ioctl_dci_support(unsigned long ioarg)
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
index 34624ad..4195b40 100644
--- a/drivers/char/diag/diagfwd.c
+++ b/drivers/char/diag/diagfwd.c
@@ -1725,6 +1725,7 @@
driver->supports_separate_cmdrsp = 1;
driver->supports_apps_hdlc_encoding = 1;
driver->supports_apps_header_untagging = 1;
+ driver->supports_pd_buffering = 1;
for (i = 0; i < NUM_PERIPHERALS; i++)
driver->peripheral_untag[i] = 0;
mutex_init(&driver->diag_hdlc_mutex);
@@ -1755,6 +1756,7 @@
driver->feature[i].stm_support = DISABLE_STM;
driver->feature[i].rcvd_feature_mask = 0;
driver->feature[i].peripheral_buffering = 0;
+ driver->feature[i].pd_buffering = 0;
driver->feature[i].encode_hdlc = 0;
driver->feature[i].untag_header =
DISABLE_PKT_HEADER_UNTAGGING;
@@ -1762,6 +1764,9 @@
driver->feature[i].log_on_demand = 0;
driver->feature[i].sent_feature_mask = 0;
driver->feature[i].diag_id_support = 0;
+ }
+
+ for (i = 0; i < NUM_MD_SESSIONS; i++) {
driver->buffering_mode[i].peripheral = i;
driver->buffering_mode[i].mode = DIAG_BUFFERING_MODE_STREAMING;
driver->buffering_mode[i].high_wm_val = DEFAULT_HIGH_WM_VAL;
diff --git a/drivers/char/diag/diagfwd_cntl.c b/drivers/char/diag/diagfwd_cntl.c
index 26661e6..eaca17a 100644
--- a/drivers/char/diag/diagfwd_cntl.c
+++ b/drivers/char/diag/diagfwd_cntl.c
@@ -423,6 +423,8 @@
enable_socket_feature(peripheral);
if (FEATURE_SUPPORTED(F_DIAG_DIAGID_SUPPORT))
driver->feature[peripheral].diag_id_support = 1;
+ if (FEATURE_SUPPORTED(F_DIAG_PD_BUFFERING))
+ driver->feature[peripheral].pd_buffering = 1;
}
process_socket_feature(peripheral);
@@ -947,32 +949,54 @@
}
static void diag_create_diag_mode_ctrl_pkt(unsigned char *dest_buf,
- int real_time)
+ uint8_t diag_id, int real_time)
{
struct diag_ctrl_msg_diagmode diagmode;
+ struct diag_ctrl_msg_diagmode_v2 diagmode_v2;
int msg_size = sizeof(struct diag_ctrl_msg_diagmode);
+ int msg_size_2 = sizeof(struct diag_ctrl_msg_diagmode_v2);
if (!dest_buf)
return;
- diagmode.ctrl_pkt_id = DIAG_CTRL_MSG_DIAGMODE;
- diagmode.ctrl_pkt_data_len = DIAG_MODE_PKT_LEN;
- diagmode.version = 1;
- diagmode.sleep_vote = real_time ? 1 : 0;
- /*
- * 0 - Disables real-time logging (to prevent
- * frequent APPS wake-ups, etc.).
- * 1 - Enable real-time logging
- */
- diagmode.real_time = real_time;
- diagmode.use_nrt_values = 0;
- diagmode.commit_threshold = 0;
- diagmode.sleep_threshold = 0;
- diagmode.sleep_time = 0;
- diagmode.drain_timer_val = 0;
- diagmode.event_stale_timer_val = 0;
-
- memcpy(dest_buf, &diagmode, msg_size);
+ if (diag_id) {
+ diagmode_v2.ctrl_pkt_id = DIAG_CTRL_MSG_DIAGMODE;
+ diagmode_v2.ctrl_pkt_data_len = DIAG_MODE_PKT_LEN_V2;
+ diagmode_v2.version = 2;
+ diagmode_v2.sleep_vote = real_time ? 1 : 0;
+ /*
+ * 0 - Disables real-time logging (to prevent
+ * frequent APPS wake-ups, etc.).
+ * 1 - Enable real-time logging
+ */
+ diagmode_v2.real_time = real_time;
+ diagmode_v2.use_nrt_values = 0;
+ diagmode_v2.commit_threshold = 0;
+ diagmode_v2.sleep_threshold = 0;
+ diagmode_v2.sleep_time = 0;
+ diagmode_v2.drain_timer_val = 0;
+ diagmode_v2.event_stale_timer_val = 0;
+ diagmode_v2.diag_id = diag_id;
+ memcpy(dest_buf, &diagmode_v2, msg_size_2);
+ } else {
+ diagmode.ctrl_pkt_id = DIAG_CTRL_MSG_DIAGMODE;
+ diagmode.ctrl_pkt_data_len = DIAG_MODE_PKT_LEN;
+ diagmode.version = 1;
+ diagmode.sleep_vote = real_time ? 1 : 0;
+ /*
+ * 0 - Disables real-time logging (to prevent
+ * frequent APPS wake-ups, etc.).
+ * 1 - Enable real-time logging
+ */
+ diagmode.real_time = real_time;
+ diagmode.use_nrt_values = 0;
+ diagmode.commit_threshold = 0;
+ diagmode.sleep_threshold = 0;
+ diagmode.sleep_time = 0;
+ diagmode.drain_timer_val = 0;
+ diagmode.event_stale_timer_val = 0;
+ memcpy(dest_buf, &diagmode, msg_size);
+ }
}
void diag_update_proc_vote(uint16_t proc, uint8_t vote, int index)
@@ -1057,7 +1081,7 @@
memcpy(buf + write_len, &dci_header, dci_header_size);
write_len += dci_header_size;
- diag_create_diag_mode_ctrl_pkt(buf + write_len, real_time);
+ diag_create_diag_mode_ctrl_pkt(buf + write_len, 0, real_time);
write_len += msg_size;
*(buf + write_len) = CONTROL_CHAR; /* End Terminator */
write_len += sizeof(uint8_t);
@@ -1163,14 +1187,18 @@
}
#endif
-static int __diag_send_real_time_update(uint8_t peripheral, int real_time)
+static int __diag_send_real_time_update(uint8_t peripheral, int real_time,
+ uint8_t diag_id)
{
- char buf[sizeof(struct diag_ctrl_msg_diagmode)];
- int msg_size = sizeof(struct diag_ctrl_msg_diagmode);
+ char buf[sizeof(struct diag_ctrl_msg_diagmode_v2)];
+ int msg_size = 0;
int err = 0;
- if (peripheral >= NUM_PERIPHERALS)
+ if (peripheral >= NUM_PERIPHERALS) {
+ pr_err("diag: In %s, invalid peripheral %d\n", __func__,
+ peripheral);
return -EINVAL;
+ }
if (!driver->diagfwd_cntl[peripheral] ||
!driver->diagfwd_cntl[peripheral]->ch_open) {
@@ -1185,12 +1213,17 @@
return -EINVAL;
}
- diag_create_diag_mode_ctrl_pkt(buf, real_time);
+ msg_size = (diag_id ? sizeof(struct diag_ctrl_msg_diagmode_v2) :
+ sizeof(struct diag_ctrl_msg_diagmode));
+
+ diag_create_diag_mode_ctrl_pkt(buf, diag_id, real_time);
mutex_lock(&driver->diag_cntl_mutex);
+
err = diagfwd_write(peripheral, TYPE_CNTL, buf, msg_size);
+
if (err && err != -ENODEV) {
- pr_err("diag: In %s, unable to write to socket, peripheral: %d, type: %d, len: %d, err: %d\n",
+ pr_err("diag: In %s, unable to write, peripheral: %d, type: %d, len: %d, err: %d\n",
__func__, peripheral, TYPE_CNTL,
msg_size, err);
} else {
@@ -1216,27 +1249,56 @@
return -EINVAL;
}
- return __diag_send_real_time_update(peripheral, real_time);
+ return __diag_send_real_time_update(peripheral, real_time, 0);
+}
+
+void diag_map_pd_to_diagid(uint8_t pd, uint8_t *diag_id, int *peripheral)
+{
+ if (!diag_search_diagid_by_pd(pd, (void *)diag_id,
+ (void *)peripheral)) {
+ *diag_id = 0;
+ if ((pd >= 0) && pd < NUM_PERIPHERALS)
+ *peripheral = pd;
+ else
+ *peripheral = -EINVAL;
+ }
+
+ if (*peripheral >= 0)
+ if (!driver->feature[*peripheral].pd_buffering)
+ *diag_id = 0;
}
int diag_send_peripheral_buffering_mode(struct diag_buffering_mode_t *params)
{
int err = 0;
int mode = MODE_REALTIME;
- uint8_t peripheral = 0;
+ int peripheral = 0;
+ uint8_t diag_id = 0;
if (!params)
return -EIO;
- peripheral = params->peripheral;
- if (peripheral >= NUM_PERIPHERALS) {
+ diag_map_pd_to_diagid(params->peripheral,
+ &diag_id, &peripheral);
+
+ if ((peripheral < 0) ||
+ peripheral >= NUM_PERIPHERALS) {
pr_err("diag: In %s, invalid peripheral %d\n", __func__,
peripheral);
return -EINVAL;
}
- if (!driver->buffering_flag[peripheral])
+ if (!driver->buffering_flag[params->peripheral]) {
+ pr_err("diag: In %s, buffering flag not set for %d\n", __func__,
+ params->peripheral);
return -EINVAL;
+ }
+
+ if (!driver->feature[peripheral].peripheral_buffering) {
+ pr_err("diag: In %s, peripheral %d doesn't support buffering\n",
+ __func__, peripheral);
+ return -EIO;
+ }
switch (params->mode) {
case DIAG_BUFFERING_MODE_STREAMING:
@@ -1255,7 +1317,7 @@
if (!driver->feature[peripheral].peripheral_buffering) {
pr_debug("diag: In %s, peripheral %d doesn't support buffering\n",
__func__, peripheral);
- driver->buffering_flag[peripheral] = 0;
+ driver->buffering_flag[params->peripheral] = 0;
return -EIO;
}
@@ -1270,35 +1332,39 @@
(params->low_wm_val != DIAG_MIN_WM_VAL))) {
pr_err("diag: In %s, invalid watermark values, high: %d, low: %d, peripheral: %d\n",
__func__, params->high_wm_val, params->low_wm_val,
- peripheral);
+ params->peripheral);
return -EINVAL;
}
mutex_lock(&driver->mode_lock);
- err = diag_send_buffering_tx_mode_pkt(peripheral, params);
+ err = diag_send_buffering_tx_mode_pkt(peripheral, diag_id, params);
if (err) {
pr_err("diag: In %s, unable to send buffering mode packet to peripheral %d, err: %d\n",
__func__, peripheral, err);
goto fail;
}
- err = diag_send_buffering_wm_values(peripheral, params);
+ err = diag_send_buffering_wm_values(peripheral, diag_id, params);
if (err) {
pr_err("diag: In %s, unable to send buffering wm value packet to peripheral %d, err: %d\n",
__func__, peripheral, err);
goto fail;
}
- err = __diag_send_real_time_update(peripheral, mode);
+ err = __diag_send_real_time_update(peripheral, mode, diag_id);
if (err) {
pr_err("diag: In %s, unable to send mode update to peripheral %d, mode: %d, err: %d\n",
__func__, peripheral, mode, err);
goto fail;
}
- driver->buffering_mode[peripheral].peripheral = peripheral;
- driver->buffering_mode[peripheral].mode = params->mode;
- driver->buffering_mode[peripheral].low_wm_val = params->low_wm_val;
- driver->buffering_mode[peripheral].high_wm_val = params->high_wm_val;
+ driver->buffering_mode[params->peripheral].peripheral =
+ params->peripheral;
+ driver->buffering_mode[params->peripheral].mode =
+ params->mode;
+ driver->buffering_mode[params->peripheral].low_wm_val =
+ params->low_wm_val;
+ driver->buffering_mode[params->peripheral].high_wm_val =
+ params->high_wm_val;
if (params->mode == DIAG_BUFFERING_MODE_STREAMING)
- driver->buffering_flag[peripheral] = 0;
+ driver->buffering_flag[params->peripheral] = 0;
fail:
mutex_unlock(&driver->mode_lock);
return err;
@@ -1337,10 +1403,12 @@
return err;
}
-int diag_send_peripheral_drain_immediate(uint8_t peripheral)
+int diag_send_peripheral_drain_immediate(uint8_t pd,
+ uint8_t diag_id, int peripheral)
{
int err = 0;
struct diag_ctrl_drain_immediate ctrl_pkt;
+ struct diag_ctrl_drain_immediate_v2 ctrl_pkt_v2;
if (!driver->feature[peripheral].peripheral_buffering) {
pr_debug("diag: In %s, peripheral %d doesn't support buffering\n",
@@ -1355,32 +1423,57 @@
return -ENODEV;
}
- ctrl_pkt.pkt_id = DIAG_CTRL_MSG_PERIPHERAL_BUF_DRAIN_IMM;
- /* The length of the ctrl pkt is size of version and stream id */
- ctrl_pkt.len = sizeof(uint32_t) + sizeof(uint8_t);
- ctrl_pkt.version = 1;
- ctrl_pkt.stream_id = 1;
-
- err = diagfwd_write(peripheral, TYPE_CNTL, &ctrl_pkt, sizeof(ctrl_pkt));
- if (err && err != -ENODEV) {
- pr_err("diag: Unable to send drain immediate ctrl packet to peripheral %d, err: %d\n",
- peripheral, err);
+ if (diag_id && driver->feature[peripheral].pd_buffering) {
+ ctrl_pkt_v2.pkt_id = DIAG_CTRL_MSG_PERIPHERAL_BUF_DRAIN_IMM;
+ /*
+ * The length of the ctrl pkt is size of version,
+ * diag_id and stream id
+ */
+ ctrl_pkt_v2.len = sizeof(uint32_t) + (2 * sizeof(uint8_t));
+ ctrl_pkt_v2.version = 2;
+ ctrl_pkt_v2.diag_id = diag_id;
+ ctrl_pkt_v2.stream_id = 1;
+ err = diagfwd_write(peripheral, TYPE_CNTL, &ctrl_pkt_v2,
+ sizeof(ctrl_pkt_v2));
+ if (err && err != -ENODEV) {
+ pr_err("diag: Unable to send drain immediate ctrl packet to peripheral %d, err: %d\n",
+ peripheral, err);
+ }
+ } else {
+ ctrl_pkt.pkt_id = DIAG_CTRL_MSG_PERIPHERAL_BUF_DRAIN_IMM;
+ /*
+ * The length of the ctrl pkt is
+ * size of version and stream id
+ */
+ ctrl_pkt.len = sizeof(uint32_t) + sizeof(uint8_t);
+ ctrl_pkt.version = 1;
+ ctrl_pkt.stream_id = 1;
+ err = diagfwd_write(peripheral, TYPE_CNTL, &ctrl_pkt,
+ sizeof(ctrl_pkt));
+ if (err && err != -ENODEV) {
+ pr_err("diag: Unable to send drain immediate ctrl packet to peripheral %d, err: %d\n",
+ peripheral, err);
+ }
}
return err;
}
int diag_send_buffering_tx_mode_pkt(uint8_t peripheral,
- struct diag_buffering_mode_t *params)
+ uint8_t diag_id, struct diag_buffering_mode_t *params)
{
int err = 0;
struct diag_ctrl_peripheral_tx_mode ctrl_pkt;
+ struct diag_ctrl_peripheral_tx_mode_v2 ctrl_pkt_v2;
if (!params)
return -EIO;
- if (peripheral >= NUM_PERIPHERALS)
+ if (peripheral >= NUM_PERIPHERALS) {
+ pr_err("diag: In %s, invalid peripheral %d\n", __func__,
+ peripheral);
return -EINVAL;
+ }
if (!driver->feature[peripheral].peripheral_buffering) {
pr_debug("diag: In %s, peripheral %d doesn't support buffering\n",
@@ -1388,9 +1481,6 @@
return -EINVAL;
}
- if (params->peripheral != peripheral)
- return -EINVAL;
-
switch (params->mode) {
case DIAG_BUFFERING_MODE_STREAMING:
case DIAG_BUFFERING_MODE_THRESHOLD:
@@ -1402,36 +1492,67 @@
return -EINVAL;
}
- ctrl_pkt.pkt_id = DIAG_CTRL_MSG_CONFIG_PERIPHERAL_TX_MODE;
- /* Control packet length is size of version, stream_id and tx_mode */
- ctrl_pkt.len = sizeof(uint32_t) + (2 * sizeof(uint8_t));
- ctrl_pkt.version = 1;
- ctrl_pkt.stream_id = 1;
- ctrl_pkt.tx_mode = params->mode;
+ if (diag_id &&
+ driver->feature[peripheral].pd_buffering) {
- err = diagfwd_write(peripheral, TYPE_CNTL, &ctrl_pkt, sizeof(ctrl_pkt));
- if (err && err != -ENODEV) {
- pr_err("diag: Unable to send tx_mode ctrl packet to peripheral %d, err: %d\n",
- peripheral, err);
- goto fail;
+ ctrl_pkt_v2.pkt_id = DIAG_CTRL_MSG_CONFIG_PERIPHERAL_TX_MODE;
+ /*
+ * Control packet length is size of version, diag_id,
+ * stream_id and tx_mode
+ */
+ ctrl_pkt_v2.len = sizeof(uint32_t) + (3 * sizeof(uint8_t));
+ ctrl_pkt_v2.version = 2;
+ ctrl_pkt_v2.diag_id = diag_id;
+ ctrl_pkt_v2.stream_id = 1;
+ ctrl_pkt_v2.tx_mode = params->mode;
+
+ err = diagfwd_write(peripheral, TYPE_CNTL, &ctrl_pkt_v2,
+ sizeof(ctrl_pkt_v2));
+ if (err && err != -ENODEV) {
+ pr_err("diag: Unable to send tx_mode ctrl packet to peripheral %d, err: %d\n",
+ peripheral, err);
+ goto fail;
+ }
+ } else {
+ ctrl_pkt.pkt_id = DIAG_CTRL_MSG_CONFIG_PERIPHERAL_TX_MODE;
+ /*
+ * Control packet length is size of version,
+ * stream_id and tx_mode
+ */
+ ctrl_pkt.len = sizeof(uint32_t) + (2 * sizeof(uint8_t));
+ ctrl_pkt.version = 1;
+ ctrl_pkt.stream_id = 1;
+ ctrl_pkt.tx_mode = params->mode;
+
+ err = diagfwd_write(peripheral, TYPE_CNTL, &ctrl_pkt,
+ sizeof(ctrl_pkt));
+ if (err && err != -ENODEV) {
+ pr_err("diag: Unable to send tx_mode ctrl packet to peripheral %d, err: %d\n",
+ peripheral, err);
+ goto fail;
+ }
}
- driver->buffering_mode[peripheral].mode = params->mode;
+ driver->buffering_mode[params->peripheral].mode = params->mode;
fail:
return err;
}
int diag_send_buffering_wm_values(uint8_t peripheral,
- struct diag_buffering_mode_t *params)
+ uint8_t diag_id, struct diag_buffering_mode_t *params)
{
int err = 0;
struct diag_ctrl_set_wq_val ctrl_pkt;
+ struct diag_ctrl_set_wq_val_v2 ctrl_pkt_v2;
if (!params)
return -EIO;
- if (peripheral >= NUM_PERIPHERALS)
+ if (peripheral >= NUM_PERIPHERALS) {
+ pr_err("diag: In %s, invalid peripheral %d\n", __func__,
+ peripheral);
return -EINVAL;
+ }
if (!driver->feature[peripheral].peripheral_buffering) {
pr_debug("diag: In %s, peripheral %d doesn't support buffering\n",
@@ -1446,9 +1567,6 @@
return -ENODEV;
}
- if (params->peripheral != peripheral)
- return -EINVAL;
-
switch (params->mode) {
case DIAG_BUFFERING_MODE_STREAMING:
case DIAG_BUFFERING_MODE_THRESHOLD:
@@ -1460,21 +1578,45 @@
return -EINVAL;
}
- ctrl_pkt.pkt_id = DIAG_CTRL_MSG_CONFIG_PERIPHERAL_WMQ_VAL;
- /* Control packet length is size of version, stream_id and wmq values */
- ctrl_pkt.len = sizeof(uint32_t) + (3 * sizeof(uint8_t));
- ctrl_pkt.version = 1;
- ctrl_pkt.stream_id = 1;
- ctrl_pkt.high_wm_val = params->high_wm_val;
- ctrl_pkt.low_wm_val = params->low_wm_val;
+ if (diag_id &&
+ driver->feature[peripheral].pd_buffering) {
+ ctrl_pkt_v2.pkt_id = DIAG_CTRL_MSG_CONFIG_PERIPHERAL_WMQ_VAL;
+ /*
+ * Control packet length is size of version, diag_id,
+ * stream_id and wmq values
+ */
+ ctrl_pkt_v2.len = sizeof(uint32_t) + (4 * sizeof(uint8_t));
+ ctrl_pkt_v2.version = 2;
+ ctrl_pkt_v2.diag_id = diag_id;
+ ctrl_pkt_v2.stream_id = 1;
+ ctrl_pkt_v2.high_wm_val = params->high_wm_val;
+ ctrl_pkt_v2.low_wm_val = params->low_wm_val;
- err = diagfwd_write(peripheral, TYPE_CNTL, &ctrl_pkt,
- sizeof(ctrl_pkt));
- if (err && err != -ENODEV) {
- pr_err("diag: Unable to send watermark values to peripheral %d, err: %d\n",
- peripheral, err);
+ err = diagfwd_write(peripheral, TYPE_CNTL, &ctrl_pkt_v2,
+ sizeof(ctrl_pkt_v2));
+ if (err && err != -ENODEV) {
+ pr_err("diag: Unable to send watermark values to peripheral %d, err: %d\n",
+ peripheral, err);
+ }
+ } else {
+ ctrl_pkt.pkt_id = DIAG_CTRL_MSG_CONFIG_PERIPHERAL_WMQ_VAL;
+ /*
+ * Control packet length is size of version,
+ * stream_id and wmq values
+ */
+ ctrl_pkt.len = sizeof(uint32_t) + (3 * sizeof(uint8_t));
+ ctrl_pkt.version = 1;
+ ctrl_pkt.stream_id = 1;
+ ctrl_pkt.high_wm_val = params->high_wm_val;
+ ctrl_pkt.low_wm_val = params->low_wm_val;
+
+ err = diagfwd_write(peripheral, TYPE_CNTL, &ctrl_pkt,
+ sizeof(ctrl_pkt));
+ if (err && err != -ENODEV) {
+ pr_err("diag: Unable to send watermark values to peripheral %d, err: %d\n",
+ peripheral, err);
+ }
}
-
return err;
}
diff --git a/drivers/char/diag/diagfwd_cntl.h b/drivers/char/diag/diagfwd_cntl.h
index 1d8d167..848ad87 100644
--- a/drivers/char/diag/diagfwd_cntl.h
+++ b/drivers/char/diag/diagfwd_cntl.h
@@ -69,6 +69,7 @@
#define F_DIAG_DCI_EXTENDED_HEADER_SUPPORT 14
#define F_DIAG_DIAGID_SUPPORT 15
#define F_DIAG_PKT_HEADER_UNTAG 16
+#define F_DIAG_PD_BUFFERING 17
#define ENABLE_SEPARATE_CMDRSP 1
#define DISABLE_SEPARATE_CMDRSP 0
@@ -86,7 +87,8 @@
#define ENABLE_PKT_HEADER_UNTAGGING 1
#define DISABLE_PKT_HEADER_UNTAGGING 0
-#define DIAG_MODE_PKT_LEN 36
+#define DIAG_MODE_PKT_LEN 36
+#define DIAG_MODE_PKT_LEN_V2 37
struct diag_ctrl_pkt_header_t {
uint32_t pkt_id;
@@ -172,6 +174,21 @@
uint32_t event_stale_timer_val;
} __packed;
+struct diag_ctrl_msg_diagmode_v2 {
+ uint32_t ctrl_pkt_id;
+ uint32_t ctrl_pkt_data_len;
+ uint32_t version;
+ uint32_t sleep_vote;
+ uint32_t real_time;
+ uint32_t use_nrt_values;
+ uint32_t commit_threshold;
+ uint32_t sleep_threshold;
+ uint32_t sleep_time;
+ uint32_t drain_timer_val;
+ uint32_t event_stale_timer_val;
+ uint8_t diag_id;
+} __packed;
+
struct diag_ctrl_msg_stm {
uint32_t ctrl_pkt_id;
uint32_t ctrl_pkt_data_len;
@@ -250,6 +267,15 @@
uint8_t tx_mode;
} __packed;
+struct diag_ctrl_peripheral_tx_mode_v2 {
+ uint32_t pkt_id;
+ uint32_t len;
+ uint32_t version;
+ uint8_t diag_id;
+ uint8_t stream_id;
+ uint8_t tx_mode;
+} __packed;
+
struct diag_ctrl_drain_immediate {
uint32_t pkt_id;
uint32_t len;
@@ -257,6 +283,14 @@
uint8_t stream_id;
} __packed;
+struct diag_ctrl_drain_immediate_v2 {
+ uint32_t pkt_id;
+ uint32_t len;
+ uint32_t version;
+ uint8_t diag_id;
+ uint8_t stream_id;
+} __packed;
+
struct diag_ctrl_set_wq_val {
uint32_t pkt_id;
uint32_t len;
@@ -266,6 +300,16 @@
uint8_t low_wm_val;
} __packed;
+struct diag_ctrl_set_wq_val_v2 {
+ uint32_t pkt_id;
+ uint32_t len;
+ uint32_t version;
+ uint8_t diag_id;
+ uint8_t stream_id;
+ uint8_t high_wm_val;
+ uint8_t low_wm_val;
+} __packed;
+
struct diag_ctrl_diagid {
uint32_t pkt_id;
uint32_t len;
@@ -290,9 +334,10 @@
void diag_update_real_time_vote(uint16_t proc, uint8_t real_time, int index);
void diag_real_time_work_fn(struct work_struct *work);
int diag_send_stm_state(uint8_t peripheral, uint8_t stm_control_data);
-int diag_send_peripheral_drain_immediate(uint8_t peripheral);
+int diag_send_peripheral_drain_immediate(uint8_t pd,
+ uint8_t diag_id, int peripheral);
int diag_send_buffering_tx_mode_pkt(uint8_t peripheral,
- struct diag_buffering_mode_t *params);
+ uint8_t diag_id, struct diag_buffering_mode_t *params);
int diag_send_buffering_wm_values(uint8_t peripheral,
- struct diag_buffering_mode_t *params);
+ uint8_t diag_id, struct diag_buffering_mode_t *params);
#endif
diff --git a/drivers/char/hw_random/msm_rng.c b/drivers/char/hw_random/msm_rng.c
index d5dd8ae..fdcef1d 100644
--- a/drivers/char/hw_random/msm_rng.c
+++ b/drivers/char/hw_random/msm_rng.c
@@ -53,6 +53,9 @@
#define MAX_HW_FIFO_DEPTH 16 /* FIFO is 16 words deep */
#define MAX_HW_FIFO_SIZE (MAX_HW_FIFO_DEPTH * 4) /* FIFO is 32 bits wide */
+#define RETRY_MAX_CNT 5 /* max retry times to read register */
+#define RETRY_DELAY_INTERVAL 440 /* retry delay interval in us */
+
struct msm_rng_device {
struct platform_device *pdev;
void __iomem *base;
@@ -96,7 +99,7 @@
struct platform_device *pdev;
void __iomem *base;
size_t currsize = 0;
- u32 val;
+ u32 val = 0;
u32 *retdata = data;
int ret;
int failed = 0;
@@ -113,39 +116,41 @@
if (msm_rng_dev->qrng_perf_client) {
ret = msm_bus_scale_client_update_request(
msm_rng_dev->qrng_perf_client, 1);
- if (ret)
+ if (ret) {
pr_err("bus_scale_client_update_req failed!\n");
+ goto bus_err;
+ }
}
/* enable PRNG clock */
ret = clk_prepare_enable(msm_rng_dev->prng_clk);
if (ret) {
- dev_err(&pdev->dev, "failed to enable clock in callback\n");
+ pr_err("failed to enable prng clock\n");
goto err;
}
/* read random data from h/w */
do {
/* check status bit if data is available */
- while (!(readl_relaxed(base + PRNG_STATUS_OFFSET)
+ if (!(readl_relaxed(base + PRNG_STATUS_OFFSET)
& 0x00000001)) {
- if (failed == 10) {
- pr_err("Data not available after retry\n");
+ if (failed++ == RETRY_MAX_CNT) {
+ if (currsize == 0)
+ pr_err("Data not available\n");
break;
}
- pr_err("msm_rng:Data not available!\n");
- msleep_interruptible(10);
- failed++;
+ udelay(RETRY_DELAY_INTERVAL);
+ } else {
+
+ /* read FIFO */
+ val = readl_relaxed(base + PRNG_DATA_OUT_OFFSET);
+
+ /* write data back to callers pointer */
+ *(retdata++) = val;
+ currsize += 4;
+ /* make sure we stay on 32bit boundary */
+ if ((max - currsize) < 4)
+ break;
}
- /* read FIFO */
- val = readl_relaxed(base + PRNG_DATA_OUT_OFFSET);
-
- /* write data back to callers pointer */
- *(retdata++) = val;
- currsize += 4;
- /* make sure we stay on 32bit boundary */
- if ((max - currsize) < 4)
- break;
-
} while (currsize < max);
/* vote to turn off clock */
@@ -157,6 +162,7 @@
if (ret)
pr_err("bus_scale_client_update_req failed!\n");
}
+bus_err:
mutex_unlock(&msm_rng_dev->rng_lock);
val = 0L;
diff --git a/drivers/clk/mvebu/ap806-system-controller.c b/drivers/clk/mvebu/ap806-system-controller.c
index 02023ba..962e0c5 100644
--- a/drivers/clk/mvebu/ap806-system-controller.c
+++ b/drivers/clk/mvebu/ap806-system-controller.c
@@ -55,21 +55,39 @@
freq_mode = reg & AP806_SAR_CLKFREQ_MODE_MASK;
switch (freq_mode) {
- case 0x0 ... 0x5:
+ case 0x0:
+ case 0x1:
cpuclk_freq = 2000;
break;
- case 0x6 ... 0xB:
+ case 0x6:
+ case 0x7:
cpuclk_freq = 1800;
break;
- case 0xC ... 0x11:
+ case 0x4:
+ case 0xB:
+ case 0xD:
cpuclk_freq = 1600;
break;
- case 0x12 ... 0x16:
+ case 0x1a:
cpuclk_freq = 1400;
break;
- case 0x17 ... 0x19:
+ case 0x14:
+ case 0x17:
cpuclk_freq = 1300;
break;
+ case 0x19:
+ cpuclk_freq = 1200;
+ break;
+ case 0x13:
+ case 0x1d:
+ cpuclk_freq = 1000;
+ break;
+ case 0x1c:
+ cpuclk_freq = 800;
+ break;
+ case 0x1b:
+ cpuclk_freq = 600;
+ break;
default:
dev_err(&pdev->dev, "invalid SAR value\n");
return -EINVAL;
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index d47b66e..87d067a 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -235,4 +235,21 @@
subsystems via QMP mailboxes.
Say Y to support the clocks managed by AOP on platforms such as sdm845.
+config MDM_GCC_SDXPOORWILLS
+ tristate "SDXPOORWILLS Global Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on sdxpoorwills devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, SD/eMMC, etc.
+
+config MDM_CLOCK_CPU_SDXPOORWILLS
+ tristate "SDXPOORWILLS CPU Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the cpu clock controller on sdxpoorwills
+ based devices.
+ Say Y if you want to support CPU clock scaling using
+ CPUfreq drivers for dyanmic power management.
+
source "drivers/clk/qcom/mdss/Kconfig"
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 6a8c43b..8cb46a7 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -22,7 +22,9 @@
obj-$(CONFIG_IPQ_GCC_4019) += gcc-ipq4019.o
obj-$(CONFIG_IPQ_GCC_806X) += gcc-ipq806x.o
obj-$(CONFIG_IPQ_LCC_806X) += lcc-ipq806x.o
+obj-$(CONFIG_MDM_CLOCK_CPU_SDXPOORWILLS) += clk-cpu-a7.o
obj-$(CONFIG_MDM_GCC_9615) += gcc-mdm9615.o
+obj-$(CONFIG_MDM_GCC_SDXPOORWILLS) += gcc-sdxpoorwills.o
obj-$(CONFIG_MDM_LCC_9615) += lcc-mdm9615.o
obj-$(CONFIG_MSM_CAMCC_SDM845) += camcc-sdm845.o
obj-$(CONFIG_MSM_CLK_AOP_QMP) += clk-aop-qmp.o
diff --git a/drivers/clk/qcom/camcc-sdm845.c b/drivers/clk/qcom/camcc-sdm845.c
index 5caa975..836c25c 100644
--- a/drivers/clk/qcom/camcc-sdm845.c
+++ b/drivers/clk/qcom/camcc-sdm845.c
@@ -1971,6 +1971,87 @@
cam_cc_sdm845_clocks[CAM_CC_CSIPHY3_CLK] = &cam_cc_csiphy3_clk.clkr;
cam_cc_sdm845_clocks[CAM_CC_CSI3PHYTIMER_CLK_SRC] =
&cam_cc_csi3phytimer_clk_src.clkr;
+ cam_cc_bps_clk_src.clkr.hw.init->rate_max[VDD_CX_MIN] = 0;
+ cam_cc_bps_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] = 0;
+ cam_cc_cci_clk_src.clkr.hw.init->rate_max[VDD_CX_MIN] = 0;
+ cam_cc_cci_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] = 0;
+ cam_cc_cphy_rx_clk_src.freq_tbl = ftbl_cam_cc_cphy_rx_clk_src_sdm845_v2;
+ cam_cc_cphy_rx_clk_src.clkr.hw.init->rate_max[VDD_CX_MIN] = 0;
+ cam_cc_cphy_rx_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] = 0;
+ cam_cc_cphy_rx_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW] = 384000000;
+ cam_cc_csi0phytimer_clk_src.clkr.hw.init->rate_max[VDD_CX_MIN] = 0;
+ cam_cc_csi0phytimer_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] = 0;
+ cam_cc_csi1phytimer_clk_src.clkr.hw.init->rate_max[VDD_CX_MIN] = 0;
+ cam_cc_csi1phytimer_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] = 0;
+ cam_cc_csi2phytimer_clk_src.clkr.hw.init->rate_max[VDD_CX_MIN] = 0;
+ cam_cc_csi2phytimer_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] = 0;
+ cam_cc_fast_ahb_clk_src.clkr.hw.init->rate_max[VDD_CX_MIN] = 0;
+ cam_cc_fast_ahb_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] = 0;
+ cam_cc_fd_core_clk_src.freq_tbl = ftbl_cam_cc_fd_core_clk_src_sdm845_v2;
+ cam_cc_fd_core_clk_src.clkr.hw.init->rate_max[VDD_CX_MIN] = 0;
+ cam_cc_fd_core_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] = 0;
+ cam_cc_icp_clk_src.freq_tbl = ftbl_cam_cc_icp_clk_src_sdm845_v2;
+ cam_cc_icp_clk_src.clkr.hw.init->rate_max[VDD_CX_MIN] = 0;
+ cam_cc_icp_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] = 0;
+ cam_cc_icp_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW_L1] = 600000000;
+ cam_cc_ife_0_clk_src.clkr.hw.init->rate_max[VDD_CX_MIN] = 0;
+ cam_cc_ife_0_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] = 0;
+ cam_cc_ife_0_csid_clk_src.clkr.hw.init->rate_max[VDD_CX_MIN] = 0;
+ cam_cc_ife_0_csid_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] = 0;
+ cam_cc_ife_0_csid_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW] =
+ 384000000;
+ cam_cc_ife_1_clk_src.clkr.hw.init->rate_max[VDD_CX_MIN] = 0;
+ cam_cc_ife_1_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] = 0;
+ cam_cc_ife_1_csid_clk_src.clkr.hw.init->rate_max[VDD_CX_MIN] = 0;
+ cam_cc_ife_1_csid_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] = 0;
+ cam_cc_ife_1_csid_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW] =
+ 384000000;
+ cam_cc_ife_lite_clk_src.clkr.hw.init->rate_max[VDD_CX_MIN] = 0;
+ cam_cc_ife_lite_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] = 0;
+ cam_cc_ife_lite_csid_clk_src.clkr.hw.init->rate_max[VDD_CX_MIN] = 0;
+ cam_cc_ife_lite_csid_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] = 0;
+ cam_cc_ife_lite_csid_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW] =
+ 384000000;
+ cam_cc_ipe_0_clk_src.clkr.hw.init->rate_max[VDD_CX_MIN] = 0;
+ cam_cc_ipe_0_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] = 0;
+ cam_cc_ipe_0_clk_src.clkr.hw.init->rate_max[VDD_CX_NOMINAL] = 600000000;
+ cam_cc_ipe_1_clk_src.clkr.hw.init->rate_max[VDD_CX_MIN] = 0;
+ cam_cc_ipe_1_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] = 0;
+ cam_cc_ipe_1_clk_src.clkr.hw.init->rate_max[VDD_CX_NOMINAL] = 600000000;
+ cam_cc_jpeg_clk_src.clkr.hw.init->rate_max[VDD_CX_MIN] = 0;
+ cam_cc_jpeg_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] = 0;
+ cam_cc_lrme_clk_src.freq_tbl = ftbl_cam_cc_lrme_clk_src_sdm845_v2;
+ cam_cc_lrme_clk_src.clkr.hw.init->rate_max[VDD_CX_MIN] = 0;
+ cam_cc_lrme_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] = 0;
+ cam_cc_lrme_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW] = 269333333;
+ cam_cc_lrme_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW_L1] = 320000000;
+ cam_cc_lrme_clk_src.clkr.hw.init->rate_max[VDD_CX_NOMINAL] = 400000000;
+ cam_cc_mclk0_clk_src.clkr.hw.init->rate_max[VDD_CX_MIN] = 0;
+ cam_cc_mclk0_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] = 0;
+ cam_cc_mclk0_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW] = 34285714;
+ cam_cc_mclk1_clk_src.clkr.hw.init->rate_max[VDD_CX_MIN] = 0;
+ cam_cc_mclk1_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] = 0;
+ cam_cc_mclk1_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW] = 34285714;
+ cam_cc_mclk2_clk_src.clkr.hw.init->rate_max[VDD_CX_MIN] = 0;
+ cam_cc_mclk2_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] = 0;
+ cam_cc_mclk2_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW] = 34285714;
+ cam_cc_mclk3_clk_src.clkr.hw.init->rate_max[VDD_CX_MIN] = 0;
+ cam_cc_mclk3_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] = 0;
+ cam_cc_mclk3_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW] = 34285714;
+ cam_cc_slow_ahb_clk_src.clkr.hw.init->rate_max[VDD_CX_MIN] = 0;
+ cam_cc_slow_ahb_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] = 0;
+ cam_cc_slow_ahb_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW] = 80000000;
+ cam_cc_slow_ahb_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW_L1] =
+ 80000000;
+}
+
+static void cam_cc_sdm845_fixup_sdm670(void)
+{
+ cam_cc_sdm845_clocks[CAM_CC_CSI3PHYTIMER_CLK] =
+ &cam_cc_csi3phytimer_clk.clkr;
+ cam_cc_sdm845_clocks[CAM_CC_CSIPHY3_CLK] = &cam_cc_csiphy3_clk.clkr;
+ cam_cc_sdm845_clocks[CAM_CC_CSI3PHYTIMER_CLK_SRC] =
+ &cam_cc_csi3phytimer_clk_src.clkr;
cam_cc_cphy_rx_clk_src.freq_tbl = ftbl_cam_cc_cphy_rx_clk_src_sdm845_v2;
cam_cc_cphy_rx_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] = 384000000;
cam_cc_cphy_rx_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW] = 384000000;
@@ -1991,11 +2072,6 @@
80000000;
}
-static void cam_cc_sdm845_fixup_sdm670(void)
-{
- cam_cc_sdm845_fixup_sdm845v2();
-}
-
static int cam_cc_sdm845_fixup(struct platform_device *pdev)
{
const char *compat = NULL;
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index afb2c01..bf9b99d 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -22,6 +22,8 @@
#include "clk-alpha-pll.h"
#define PLL_MODE 0x00
+#define PLL_STANDBY 0x0
+#define PLL_RUN 0x1
# define PLL_OUTCTRL BIT(0)
# define PLL_BYPASSNL BIT(1)
# define PLL_RESET_N BIT(2)
@@ -51,25 +53,40 @@
#define PLL_TEST_CTL 0x1c
#define PLL_TEST_CTL_U 0x20
#define PLL_STATUS 0x24
+#define PLL_UPDATE BIT(22)
+#define PLL_ACK_LATCH BIT(29)
+#define PLL_CALIBRATION_MASK (0x7<<3)
+#define PLL_CALIBRATION_CONTROL 2
+#define PLL_HW_UPDATE_LOGIC_BYPASS BIT(23)
+#define ALPHA_16_BIT_PLL_RATE_MARGIN 500
/*
* Even though 40 bits are present, use only 32 for ease of calculation.
*/
#define ALPHA_REG_BITWIDTH 40
#define ALPHA_BITWIDTH 32
-#define FABIA_BITWIDTH 16
+#define SUPPORTS_16BIT_ALPHA 16
#define FABIA_USER_CTL_LO 0xc
#define FABIA_USER_CTL_HI 0x10
#define FABIA_FRAC_VAL 0x38
#define FABIA_OPMODE 0x2c
-#define FABIA_PLL_STANDBY 0x0
-#define FABIA_PLL_RUN 0x1
#define FABIA_PLL_OUT_MASK 0x7
-#define FABIA_PLL_RATE_MARGIN 500
#define FABIA_PLL_ACK_LATCH BIT(29)
#define FABIA_PLL_UPDATE BIT(22)
-#define FABIA_PLL_HW_UPDATE_LOGIC_BYPASS BIT(23)
+
+#define TRION_PLL_CAL_VAL 0x44
+#define TRION_PLL_CAL_L_VAL 0x8
+#define TRION_PLL_USER_CTL 0xc
+#define TRION_PLL_USER_CTL_U 0x10
+#define TRION_PLL_USER_CTL_U1 0x14
+#define TRION_PLL_CONFIG_CTL_U 0x1c
+#define TRION_PLL_CONFIG_CTL_U1 0x20
+#define TRION_PLL_OPMODE 0x38
+#define TRION_PLL_ALPHA_VAL 0x40
+
+#define TRION_PLL_OUT_MASK 0x7
+#define TRION_PLL_ENABLE_STATE_READ BIT(4)
#define to_clk_alpha_pll(_hw) container_of(to_clk_regmap(_hw), \
struct clk_alpha_pll, clkr)
@@ -121,6 +138,10 @@
return wait_for_pll(pll, mask, 0, "offline");
}
+static int wait_for_pll_latch_ack(struct clk_alpha_pll *pll, u32 mask)
+{
+ return wait_for_pll(pll, mask, 0, "latch_ack");
+}
/* alpha pll with hwfsm support */
@@ -294,8 +315,8 @@
{
int alpha_bw = ALPHA_BITWIDTH;
- if (pll->type == FABIA_PLL)
- alpha_bw = FABIA_BITWIDTH;
+ if (pll->type == FABIA_PLL || pll->type == TRION_PLL)
+ alpha_bw = SUPPORTS_16BIT_ALPHA;
return (prate * l) + ((prate * a) >> alpha_bw);
}
@@ -326,9 +347,9 @@
return rate;
}
- /* Fabia PLLs only have 16 bits to program the fractional divider */
- if (pll->type == FABIA_PLL)
- alpha_bw = FABIA_BITWIDTH;
+ /* Some PLLs only have 16 bits to program the fractional divider */
+ if (pll->type == FABIA_PLL || pll->type == TRION_PLL)
+ alpha_bw = SUPPORTS_16BIT_ALPHA;
/* Upper ALPHA_BITWIDTH bits of Alpha */
quotient = remainder << alpha_bw;
@@ -415,7 +436,8 @@
unsigned long min_freq, max_freq;
rate = alpha_pll_round_rate(pll, rate, *prate, &l, &a);
- if (pll->type == FABIA_PLL || alpha_pll_find_vco(pll, rate))
+ if (pll->type == FABIA_PLL || pll->type == TRION_PLL ||
+ alpha_pll_find_vco(pll, rate))
return rate;
min_freq = pll->vco_table[0].min_freq;
@@ -523,8 +545,8 @@
clk_fabia_pll_latch_input(pll, regmap);
regmap_update_bits(regmap, pll->offset + PLL_MODE,
- FABIA_PLL_HW_UPDATE_LOGIC_BYPASS,
- FABIA_PLL_HW_UPDATE_LOGIC_BYPASS);
+ PLL_HW_UPDATE_LOGIC_BYPASS,
+ PLL_HW_UPDATE_LOGIC_BYPASS);
regmap_update_bits(regmap, pll->offset + PLL_MODE,
PLL_RESET_N, PLL_RESET_N);
@@ -560,7 +582,7 @@
return ret;
/* Set operation mode to STANDBY */
- regmap_write(pll->clkr.regmap, off + FABIA_OPMODE, FABIA_PLL_STANDBY);
+ regmap_write(pll->clkr.regmap, off + FABIA_OPMODE, PLL_STANDBY);
/* PLL should be in STANDBY mode before continuing */
mb();
@@ -572,7 +594,7 @@
return ret;
/* Set operation mode to RUN */
- regmap_write(pll->clkr.regmap, off + FABIA_OPMODE, FABIA_PLL_RUN);
+ regmap_write(pll->clkr.regmap, off + FABIA_OPMODE, PLL_RUN);
ret = wait_for_pll_enable(pll, PLL_LOCK_DET);
if (ret)
@@ -624,7 +646,7 @@
return;
/* Place the PLL mode in STANDBY */
- regmap_write(pll->clkr.regmap, off + FABIA_OPMODE, FABIA_PLL_STANDBY);
+ regmap_write(pll->clkr.regmap, off + FABIA_OPMODE, PLL_STANDBY);
}
static unsigned long
@@ -659,7 +681,7 @@
* Due to limited number of bits for fractional rate programming, the
* rounded up rate could be marginally higher than the requested rate.
*/
- if (rrate > (rate + FABIA_PLL_RATE_MARGIN) || rrate < rate) {
+ if (rrate > (rate + ALPHA_16_BIT_PLL_RATE_MARGIN) || rrate < rate) {
pr_err("Call set rate on the PLL with rounded rates!\n");
return -EINVAL;
}
@@ -879,3 +901,436 @@
.set_rate = clk_generic_pll_postdiv_set_rate,
};
EXPORT_SYMBOL_GPL(clk_generic_pll_postdiv_ops);
+
+static int trion_pll_is_enabled(struct clk_alpha_pll *pll,
+ struct regmap *regmap)
+{
+ u32 mode_val, opmode_val, off = pll->offset;
+ int ret;
+
+ ret = regmap_read(regmap, off + PLL_MODE, &mode_val);
+ ret |= regmap_read(regmap, off + TRION_PLL_OPMODE, &opmode_val);
+ if (ret)
+ return 0;
+
+ return ((opmode_val & PLL_RUN) && (mode_val & PLL_OUTCTRL));
+}
+
+int clk_trion_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+ const struct pll_config *config)
+{
+ int ret = 0;
+
+ if (trion_pll_is_enabled(pll, regmap)) {
+ pr_debug("PLL is already enabled. Skipping configuration.\n");
+
+ /*
+ * Set the PLL_HW_UPDATE_LOGIC_BYPASS bit to latch the input
+ * before continuing.
+ */
+ regmap_update_bits(regmap, pll->offset + PLL_MODE,
+ PLL_HW_UPDATE_LOGIC_BYPASS,
+ PLL_HW_UPDATE_LOGIC_BYPASS);
+
+ pll->inited = true;
+ return ret;
+ }
+
+ /*
+ * Disable the PLL if it's already been initialized. Not doing so might
+ * lead to the PLL running with the old frequency configuration.
+ */
+ if (pll->inited) {
+ ret = regmap_update_bits(regmap, pll->offset + PLL_MODE,
+ PLL_RESET_N, 0);
+ if (ret)
+ return ret;
+ }
+
+ if (config->l)
+ regmap_write(regmap, pll->offset + PLL_L_VAL,
+ config->l);
+
+ regmap_write(regmap, pll->offset + TRION_PLL_CAL_L_VAL,
+ TRION_PLL_CAL_VAL);
+
+ if (config->frac)
+ regmap_write(regmap, pll->offset + TRION_PLL_ALPHA_VAL,
+ config->frac);
+
+ if (config->config_ctl_val)
+ regmap_write(regmap, pll->offset + PLL_CONFIG_CTL,
+ config->config_ctl_val);
+
+ if (config->config_ctl_hi_val)
+ regmap_write(regmap, pll->offset + TRION_PLL_CONFIG_CTL_U,
+ config->config_ctl_hi_val);
+
+ if (config->config_ctl_hi1_val)
+ regmap_write(regmap, pll->offset + TRION_PLL_CONFIG_CTL_U1,
+ config->config_ctl_hi1_val);
+
+ if (config->post_div_mask)
+ regmap_update_bits(regmap, pll->offset + TRION_PLL_USER_CTL,
+ config->post_div_mask, config->post_div_val);
+
+ /* Disable state read */
+ regmap_update_bits(regmap, pll->offset + TRION_PLL_USER_CTL_U,
+ TRION_PLL_ENABLE_STATE_READ, 0);
+
+ regmap_update_bits(regmap, pll->offset + PLL_MODE,
+ PLL_HW_UPDATE_LOGIC_BYPASS,
+ PLL_HW_UPDATE_LOGIC_BYPASS);
+
+ /* Set calibration control to Automatic */
+ regmap_update_bits(regmap, pll->offset + TRION_PLL_USER_CTL_U,
+ PLL_CALIBRATION_MASK, PLL_CALIBRATION_CONTROL);
+
+ /* Disable PLL output */
+ ret = regmap_update_bits(regmap, pll->offset + PLL_MODE,
+ PLL_OUTCTRL, 0);
+ if (ret)
+ return ret;
+
+ /* Set operation mode to OFF */
+ regmap_write(regmap, pll->offset + TRION_PLL_OPMODE, PLL_STANDBY);
+
+ /* PLL should be in OFF mode before continuing */
+ wmb();
+
+ /* Place the PLL in STANDBY mode */
+ ret = regmap_update_bits(regmap, pll->offset + PLL_MODE,
+ PLL_RESET_N, PLL_RESET_N);
+ if (ret)
+ return ret;
+
+ pll->inited = true;
+
+ return ret;
+}
+
+static int clk_alpha_pll_latch_l_val(struct clk_alpha_pll *pll)
+{
+ int ret;
+
+ /* Latch the input to the PLL */
+ ret = regmap_update_bits(pll->clkr.regmap, pll->offset + PLL_MODE,
+ PLL_UPDATE, PLL_UPDATE);
+ if (ret)
+ return ret;
+
+ /* Wait for 2 reference cycle before checking ACK bit */
+ udelay(1);
+
+ ret = wait_for_pll_latch_ack(pll, PLL_ACK_LATCH);
+ if (ret)
+ return ret;
+
+ /* Return latch input to 0 */
+ ret = regmap_update_bits(pll->clkr.regmap, pll->offset + PLL_MODE,
+ PLL_UPDATE, (u32)~PLL_UPDATE);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int clk_trion_pll_enable(struct clk_hw *hw)
+{
+ int ret = 0;
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 val, off = pll->offset;
+
+ ret = regmap_read(pll->clkr.regmap, off + PLL_MODE, &val);
+ if (ret)
+ return ret;
+
+ /* If in FSM mode, just vote for it */
+ if (val & PLL_VOTE_FSM_ENA) {
+ ret = clk_enable_regmap(hw);
+ if (ret)
+ return ret;
+ return wait_for_pll_enable(pll, PLL_ACTIVE_FLAG);
+ }
+
+ if (unlikely(!pll->inited)) {
+ ret = clk_trion_pll_configure(pll, pll->clkr.regmap,
+ pll->config);
+ if (ret) {
+ pr_err("Failed to configure %s\n", clk_hw_get_name(hw));
+ return ret;
+ }
+ }
+
+ /* Skip If PLL is already running */
+ if (trion_pll_is_enabled(pll, pll->clkr.regmap))
+ return ret;
+
+ /* Set operation mode to RUN */
+ regmap_write(pll->clkr.regmap, off + TRION_PLL_OPMODE, PLL_RUN);
+
+ ret = wait_for_pll_enable(pll, PLL_LOCK_DET);
+ if (ret)
+ return ret;
+
+ /* Enable PLL main output */
+ ret = regmap_update_bits(pll->clkr.regmap, off + TRION_PLL_USER_CTL,
+ TRION_PLL_OUT_MASK, TRION_PLL_OUT_MASK);
+ if (ret)
+ return ret;
+
+ /* Enable Global PLL outputs */
+ ret = regmap_update_bits(pll->clkr.regmap, off + PLL_MODE,
+ PLL_OUTCTRL, PLL_OUTCTRL);
+ if (ret)
+ return ret;
+
+ /* Ensure that the write above goes through before returning. */
+ mb();
+ return ret;
+}
+
+static void clk_trion_pll_disable(struct clk_hw *hw)
+{
+ int ret;
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 val, off = pll->offset;
+
+ ret = regmap_read(pll->clkr.regmap, off + PLL_MODE, &val);
+ if (ret)
+ return;
+
+ /* If in FSM mode, just unvote it */
+ if (val & PLL_VOTE_FSM_ENA) {
+ clk_disable_regmap(hw);
+ return;
+ }
+
+ /* Disable Global PLL outputs */
+ ret = regmap_update_bits(pll->clkr.regmap, off + PLL_MODE,
+ PLL_OUTCTRL, 0);
+ if (ret)
+ return;
+
+ /* Disable the main PLL output */
+ ret = regmap_update_bits(pll->clkr.regmap, off + TRION_PLL_USER_CTL,
+ TRION_PLL_OUT_MASK, 0);
+ if (ret)
+ return;
+
+ /* Place the PLL into STANDBY mode */
+ regmap_write(pll->clkr.regmap, off + TRION_PLL_OPMODE, PLL_STANDBY);
+
+ regmap_update_bits(pll->clkr.regmap, off + PLL_MODE,
+ PLL_RESET_N, PLL_RESET_N);
+}
+
+static unsigned long
+clk_trion_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ u32 l, frac = 0;
+ u64 prate = parent_rate;
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 off = pll->offset;
+
+ regmap_read(pll->clkr.regmap, off + PLL_L_VAL, &l);
+ regmap_read(pll->clkr.regmap, off + TRION_PLL_ALPHA_VAL, &frac);
+
+ return alpha_pll_calc_rate(pll, prate, l, frac);
+}
+
+static int clk_trion_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ unsigned long rrate;
+ bool is_enabled;
+ int ret;
+ u32 l, val, off = pll->offset;
+ u64 a;
+
+ rrate = alpha_pll_round_rate(pll, rate, prate, &l, &a);
+ /*
+ * Due to limited number of bits for fractional rate programming, the
+ * rounded up rate could be marginally higher than the requested rate.
+ */
+ if (rrate > (rate + ALPHA_16_BIT_PLL_RATE_MARGIN) || rrate < rate) {
+ pr_err("Trion_pll: Call clk_set_rate with rounded rates!\n");
+ return -EINVAL;
+ }
+
+ is_enabled = clk_hw_is_enabled(hw);
+
+ if (is_enabled)
+ hw->init->ops->disable(hw);
+
+ regmap_write(pll->clkr.regmap, off + PLL_L_VAL, l);
+ regmap_write(pll->clkr.regmap, off + TRION_PLL_ALPHA_VAL, a);
+
+ ret = regmap_read(pll->clkr.regmap, off + PLL_MODE, &val);
+ if (ret)
+ return ret;
+
+ /*
+ * If PLL is in Standby or RUN mode then only latch the L value
+ * Else PLL is in OFF mode and just configure L register - as per
+ * HPG no need to latch input.
+ */
+ if (val & PLL_RESET_N)
+ clk_alpha_pll_latch_l_val(pll);
+
+ if (is_enabled)
+ hw->init->ops->enable(hw);
+
+ /* Wait for PLL output to stabilize */
+ udelay(100);
+
+ return ret;
+}
+
+static int clk_trion_pll_is_enabled(struct clk_hw *hw)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+
+ return trion_pll_is_enabled(pll, pll->clkr.regmap);
+}
+
+static void clk_trion_pll_list_registers(struct seq_file *f, struct clk_hw *hw)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ int size, i, val;
+
+ static struct clk_register_data data[] = {
+ {"PLL_MODE", 0x0},
+ {"PLL_L_VAL", 0x4},
+ {"PLL_USER_CTL", 0xc},
+ {"PLL_USER_CTL_U", 0x10},
+ {"PLL_USER_CTL_U1", 0x14},
+ {"PLL_CONFIG_CTL", 0x18},
+ {"PLL_CONFIG_CTL_U", 0x1c},
+ {"PLL_CONFIG_CTL_U1", 0x20},
+ {"PLL_OPMODE", 0x38},
+ };
+
+ static struct clk_register_data data1[] = {
+ {"APSS_PLL_VOTE", 0x0},
+ };
+
+ size = ARRAY_SIZE(data);
+
+ for (i = 0; i < size; i++) {
+ regmap_read(pll->clkr.regmap, pll->offset + data[i].offset,
+ &val);
+ seq_printf(f, "%20s: 0x%.8x\n", data[i].name, val);
+ }
+
+ regmap_read(pll->clkr.regmap, pll->offset + data[0].offset, &val);
+
+ if (val & PLL_VOTE_FSM_ENA) {
+ regmap_read(pll->clkr.regmap, pll->clkr.enable_reg +
+ data1[0].offset, &val);
+ seq_printf(f, "%20s: 0x%.8x\n", data1[0].name, val);
+ }
+}
+
+const struct clk_ops clk_trion_pll_ops = {
+ .enable = clk_trion_pll_enable,
+ .disable = clk_trion_pll_disable,
+ .recalc_rate = clk_trion_pll_recalc_rate,
+ .round_rate = clk_alpha_pll_round_rate,
+ .set_rate = clk_trion_pll_set_rate,
+ .is_enabled = clk_trion_pll_is_enabled,
+ .list_registers = clk_trion_pll_list_registers,
+};
+EXPORT_SYMBOL(clk_trion_pll_ops);
+
+const struct clk_ops clk_trion_fixed_pll_ops = {
+ .enable = clk_trion_pll_enable,
+ .disable = clk_trion_pll_disable,
+ .recalc_rate = clk_trion_pll_recalc_rate,
+ .round_rate = clk_alpha_pll_round_rate,
+ .is_enabled = clk_trion_pll_is_enabled,
+ .list_registers = clk_trion_pll_list_registers,
+};
+EXPORT_SYMBOL(clk_trion_fixed_pll_ops);
+
+static unsigned long clk_trion_pll_postdiv_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
+ u32 i, cal_div = 1, val;
+
+ if (!pll->post_div_table) {
+ pr_err("Missing the post_div_table for the PLL\n");
+ return -EINVAL;
+ }
+
+ regmap_read(pll->clkr.regmap, pll->offset + TRION_PLL_USER_CTL, &val);
+
+ val >>= pll->post_div_shift;
+ val &= PLL_POST_DIV_MASK;
+
+ for (i = 0; i < pll->num_post_div; i++) {
+ if (pll->post_div_table[i].val == val) {
+ cal_div = pll->post_div_table[i].div;
+ break;
+ }
+ }
+
+ return (parent_rate / cal_div);
+}
+
+static long clk_trion_pll_postdiv_round_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long *prate)
+{
+ struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
+
+ if (!pll->post_div_table)
+ return -EINVAL;
+
+ return divider_round_rate(hw, rate, prate, pll->post_div_table,
+ pll->width, CLK_DIVIDER_ROUND_CLOSEST);
+}
+
+static int clk_trion_pll_postdiv_set_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate)
+{
+ struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
+ int i, val = 0, cal_div, ret;
+
+ /*
+ * If the PLL is in FSM mode, then treat the set_rate callback
+ * as a no-operation.
+ */
+ ret = regmap_read(pll->clkr.regmap, pll->offset + PLL_MODE, &val);
+ if (ret)
+ return ret;
+
+ if (val & PLL_VOTE_FSM_ENA)
+ return 0;
+
+ if (!pll->post_div_table) {
+ pr_err("Missing the post_div_table for the PLL\n");
+ return -EINVAL;
+ }
+
+ cal_div = DIV_ROUND_UP_ULL((u64)parent_rate, rate);
+ for (i = 0; i < pll->num_post_div; i++) {
+ if (pll->post_div_table[i].div == cal_div) {
+ val = pll->post_div_table[i].val;
+ break;
+ }
+ }
+
+ return regmap_update_bits(pll->clkr.regmap,
+ pll->offset + TRION_PLL_USER_CTL,
+ PLL_POST_DIV_MASK << pll->post_div_shift,
+ val << pll->post_div_shift);
+}
+
+const struct clk_ops clk_trion_pll_postdiv_ops = {
+ .recalc_rate = clk_trion_pll_postdiv_recalc_rate,
+ .round_rate = clk_trion_pll_postdiv_round_rate,
+ .set_rate = clk_trion_pll_postdiv_set_rate,
+};
+EXPORT_SYMBOL(clk_trion_pll_postdiv_ops);
diff --git a/drivers/clk/qcom/clk-alpha-pll.h b/drivers/clk/qcom/clk-alpha-pll.h
index 2656cd6..c5fecb1 100644
--- a/drivers/clk/qcom/clk-alpha-pll.h
+++ b/drivers/clk/qcom/clk-alpha-pll.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -27,6 +27,7 @@
enum pll_type {
ALPHA_PLL,
FABIA_PLL,
+ TRION_PLL,
};
/**
@@ -35,7 +36,7 @@
* @inited: flag that's set when the PLL is initialized
* @vco_table: array of VCO settings
* @clkr: regmap clock handle
- * @is_fabia: Set if the PLL type is FABIA
+ * @pll_type: Specify the type of PLL
*/
struct clk_alpha_pll {
u32 offset;
@@ -79,10 +80,15 @@
extern const struct clk_ops clk_fabia_pll_ops;
extern const struct clk_ops clk_fabia_fixed_pll_ops;
extern const struct clk_ops clk_generic_pll_postdiv_ops;
+extern const struct clk_ops clk_trion_pll_ops;
+extern const struct clk_ops clk_trion_fixed_pll_ops;
+extern const struct clk_ops clk_trion_pll_postdiv_ops;
void clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct pll_config *config);
void clk_fabia_pll_configure(struct clk_alpha_pll *pll,
struct regmap *regmap, const struct pll_config *config);
+int clk_trion_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+ const struct pll_config *config);
#endif
diff --git a/drivers/clk/qcom/clk-cpu-a7.c b/drivers/clk/qcom/clk-cpu-a7.c
new file mode 100644
index 0000000..c0cc00f8
--- /dev/null
+++ b/drivers/clk/qcom/clk-cpu-a7.c
@@ -0,0 +1,718 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/cpu.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/pm_opp.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <dt-bindings/clock/qcom,cpu-a7.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-debug.h"
+#include "clk-rcg.h"
+#include "clk-regmap-mux-div.h"
+#include "common.h"
+#include "vdd-level-sdm845.h"
+
+#define SYS_APC0_AUX_CLK_SRC 1
+
+#define PLL_MODE_REG 0x0
+#define PLL_OPMODE_RUN 0x1
+#define PLL_OPMODE_REG 0x38
+#define PLL_MODE_OUTCTRL BIT(0)
+
+#define to_clk_regmap_mux_div(_hw) \
+ container_of(to_clk_regmap(_hw), struct clk_regmap_mux_div, clkr)
+
+static DEFINE_VDD_REGULATORS(vdd_cx, VDD_CX_NUM, 1, vdd_corner);
+static DEFINE_VDD_REGS_INIT(vdd_cpu, 1);
+
+enum apcs_clk_parent_index {
+ XO_AO_INDEX,
+ SYS_APC0_AUX_CLK_INDEX,
+ APCS_CPU_PLL_INDEX,
+};
+
+enum {
+ P_SYS_APC0_AUX_CLK,
+ P_APCS_CPU_PLL,
+ P_BI_TCXO_AO,
+};
+
+static const struct parent_map apcs_clk_parent_map[] = {
+ [XO_AO_INDEX] = { P_BI_TCXO_AO, 0 },
+ [SYS_APC0_AUX_CLK_INDEX] = { P_SYS_APC0_AUX_CLK, 1 },
+ [APCS_CPU_PLL_INDEX] = { P_APCS_CPU_PLL, 5 },
+};
+
+static const char *const apcs_clk_parent_name[] = {
+ [XO_AO_INDEX] = "bi_tcxo_ao",
+ [SYS_APC0_AUX_CLK_INDEX] = "sys_apc0_aux_clk",
+ [APCS_CPU_PLL_INDEX] = "apcs_cpu_pll",
+};
+
+static int a7cc_clk_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate, u8 index)
+{
+ struct clk_regmap_mux_div *cpuclk = to_clk_regmap_mux_div(hw);
+
+ return __mux_div_set_src_div(cpuclk, cpuclk->parent_map[index].cfg,
+ cpuclk->div);
+}
+
+static int a7cc_clk_set_parent(struct clk_hw *hw, u8 index)
+{
+ /*
+ * Since a7cc_clk_set_rate_and_parent() is defined and set_parent()
+ * will never gets called from clk_change_rate() so return 0.
+ */
+ return 0;
+}
+
+static int a7cc_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate)
+{
+ struct clk_regmap_mux_div *cpuclk = to_clk_regmap_mux_div(hw);
+
+ /*
+ * Parent is same as the last rate.
+ * Here just configure new div.
+ */
+ return __mux_div_set_src_div(cpuclk, cpuclk->src, cpuclk->div);
+}
+
+static int a7cc_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ int ret;
+ u32 div = 1;
+ struct clk_hw *xo, *apc0_auxclk_hw, *apcs_cpu_pll_hw;
+ unsigned long apc0_auxclk_rate, rate = req->rate;
+ struct clk_rate_request parent_req = { };
+ struct clk_regmap_mux_div *cpuclk = to_clk_regmap_mux_div(hw);
+ unsigned long mask = BIT(cpuclk->hid_width) - 1;
+
+ xo = clk_hw_get_parent_by_index(hw, XO_AO_INDEX);
+ if (rate == clk_hw_get_rate(xo)) {
+ req->best_parent_hw = xo;
+ req->best_parent_rate = rate;
+ cpuclk->div = div;
+ cpuclk->src = cpuclk->parent_map[XO_AO_INDEX].cfg;
+ return 0;
+ }
+
+ apc0_auxclk_hw = clk_hw_get_parent_by_index(hw, SYS_APC0_AUX_CLK_INDEX);
+ apcs_cpu_pll_hw = clk_hw_get_parent_by_index(hw, APCS_CPU_PLL_INDEX);
+
+ apc0_auxclk_rate = clk_hw_get_rate(apc0_auxclk_hw);
+ if (rate <= apc0_auxclk_rate) {
+ req->best_parent_hw = apc0_auxclk_hw;
+ req->best_parent_rate = apc0_auxclk_rate;
+
+ div = DIV_ROUND_UP((2 * req->best_parent_rate), rate) - 1;
+ div = min_t(unsigned long, div, mask);
+
+ req->rate = clk_rcg2_calc_rate(req->best_parent_rate, 0,
+ 0, 0, div);
+ cpuclk->src = cpuclk->parent_map[SYS_APC0_AUX_CLK_INDEX].cfg;
+ } else {
+ parent_req.rate = rate;
+ parent_req.best_parent_hw = apcs_cpu_pll_hw;
+
+ req->best_parent_hw = apcs_cpu_pll_hw;
+ ret = __clk_determine_rate(req->best_parent_hw, &parent_req);
+ if (ret)
+ return ret;
+
+ req->best_parent_rate = parent_req.rate;
+ cpuclk->src = cpuclk->parent_map[APCS_CPU_PLL_INDEX].cfg;
+ }
+ cpuclk->div = div;
+
+ return 0;
+}
+
+static void a7cc_clk_list_registers(struct seq_file *f, struct clk_hw *hw)
+{
+ struct clk_regmap_mux_div *cpuclk = to_clk_regmap_mux_div(hw);
+ int i = 0, size = 0, val;
+
+ static struct clk_register_data data[] = {
+ {"CMD_RCGR", 0x0},
+ {"CFG_RCGR", 0x4},
+ };
+
+ size = ARRAY_SIZE(data);
+ for (i = 0; i < size; i++) {
+ regmap_read(cpuclk->clkr.regmap,
+ cpuclk->reg_offset + data[i].offset, &val);
+ seq_printf(f, "%20s: 0x%.8x\n", data[i].name, val);
+ }
+}
+
+static unsigned long a7cc_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long prate)
+{
+ struct clk_regmap_mux_div *cpuclk = to_clk_regmap_mux_div(hw);
+ const char *name = clk_hw_get_name(hw);
+ struct clk_hw *parent;
+ int ret = 0;
+ unsigned long parent_rate;
+ u32 i, div, src = 0;
+ u32 num_parents = clk_hw_get_num_parents(hw);
+
+ ret = mux_div_get_src_div(cpuclk, &src, &div);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < num_parents; i++) {
+ if (src == cpuclk->parent_map[i].cfg) {
+ parent = clk_hw_get_parent_by_index(hw, i);
+ parent_rate = clk_hw_get_rate(parent);
+ return clk_rcg2_calc_rate(parent_rate, 0, 0, 0, div);
+ }
+ }
+ pr_err("%s: Can't find parent %d\n", name, src);
+ return ret;
+}
+
+static int a7cc_clk_enable(struct clk_hw *hw)
+{
+ return clk_regmap_mux_div_ops.enable(hw);
+}
+
+static void a7cc_clk_disable(struct clk_hw *hw)
+{
+ clk_regmap_mux_div_ops.disable(hw);
+}
+
+static u8 a7cc_clk_get_parent(struct clk_hw *hw)
+{
+ return clk_regmap_mux_div_ops.get_parent(hw);
+}
+
+/*
+ * We use the notifier function for switching to a temporary safe configuration
+ * (mux and divider), while the APSS pll is reconfigured.
+ */
+static int a7cc_notifier_cb(struct notifier_block *nb, unsigned long event,
+ void *data)
+{
+ int ret = 0;
+ struct clk_regmap_mux_div *cpuclk = container_of(nb,
+ struct clk_regmap_mux_div, clk_nb);
+
+ if (event == PRE_RATE_CHANGE)
+ /* set the mux to safe source(sys_apc0_aux_clk) & div */
+ ret = __mux_div_set_src_div(cpuclk, SYS_APC0_AUX_CLK_SRC, 1);
+
+ if (event == ABORT_RATE_CHANGE)
+ pr_err("Error in configuring PLL - stay at safe src only\n");
+
+ return notifier_from_errno(ret);
+}
+
+static const struct clk_ops a7cc_clk_ops = {
+ .enable = a7cc_clk_enable,
+ .disable = a7cc_clk_disable,
+ .get_parent = a7cc_clk_get_parent,
+ .set_rate = a7cc_clk_set_rate,
+ .set_parent = a7cc_clk_set_parent,
+ .set_rate_and_parent = a7cc_clk_set_rate_and_parent,
+ .determine_rate = a7cc_clk_determine_rate,
+ .recalc_rate = a7cc_clk_recalc_rate,
+ .debug_init = clk_debug_measure_add,
+ .list_registers = a7cc_clk_list_registers,
+};
+
+/*
+ * As per HW, sys_apc0_aux_clk runs at 300MHz and configured by BOOT
+ * So adding it as dummy clock.
+ */
+
+static struct clk_dummy sys_apc0_aux_clk = {
+ .rrate = 300000000,
+ .hw.init = &(struct clk_init_data){
+ .name = "sys_apc0_aux_clk",
+ .ops = &clk_dummy_ops,
+ },
+};
+
+/* Initial configuration for 1497.6MHz(Turbo) */
+static const struct pll_config apcs_cpu_pll_config = {
+ .l = 0x4E,
+};
+
+static struct pll_vco trion_vco[] = {
+ { 249600000, 2000000000, 0 },
+};
+
+static struct clk_alpha_pll apcs_cpu_pll = {
+ .type = TRION_PLL,
+ .vco_table = trion_vco,
+ .num_vco = ARRAY_SIZE(trion_vco),
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "apcs_cpu_pll",
+ .parent_names = (const char *[]){ "bi_tcxo_ao" },
+ .num_parents = 1,
+ .ops = &clk_trion_pll_ops,
+ VDD_CX_FMAX_MAP4(LOWER, 345600000,
+ LOW, 576000000,
+ NOMINAL, 1094400000,
+ HIGH, 1497600000),
+ },
+};
+
+static struct clk_regmap_mux_div apcs_clk = {
+ .hid_width = 5,
+ .hid_shift = 0,
+ .src_width = 3,
+ .src_shift = 8,
+ .safe_src = 1,
+ .safe_div = 1,
+ .parent_map = apcs_clk_parent_map,
+ .clk_nb.notifier_call = a7cc_notifier_cb,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "apcs_clk",
+ .parent_names = apcs_clk_parent_name,
+ .num_parents = 3,
+ .vdd_class = &vdd_cpu,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &a7cc_clk_ops,
+ },
+};
+
+static const struct of_device_id match_table[] = {
+ { .compatible = "qcom,cpu-sdxpoorwills" },
+ {}
+};
+
+static const struct regmap_config cpu_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x7F10,
+ .fast_io = true,
+};
+
+static struct clk_hw *cpu_clks_hws[] = {
+ [SYS_APC0_AUX_CLK] = &sys_apc0_aux_clk.hw,
+ [APCS_CPU_PLL] = &apcs_cpu_pll.clkr.hw,
+ [APCS_CLK] = &apcs_clk.clkr.hw,
+};
+
+static void a7cc_clk_get_speed_bin(struct platform_device *pdev, int *bin,
+ int *version)
+{
+ struct resource *res;
+ void __iomem *base;
+ u32 pte_efuse, valid;
+
+ *bin = 0;
+ *version = 0;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "efuse");
+ if (!res) {
+ dev_info(&pdev->dev,
+ "No speed/PVS binning available. Defaulting to 0!\n");
+ return;
+ }
+
+ base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!base) {
+ dev_info(&pdev->dev,
+ "Unable to read efuse data. Defaulting to 0!\n");
+ return;
+ }
+
+ pte_efuse = readl_relaxed(base);
+ devm_iounmap(&pdev->dev, base);
+
+ *bin = pte_efuse & 0x7;
+ valid = (pte_efuse >> 3) & 0x1;
+ *version = (pte_efuse >> 4) & 0x3;
+
+ if (!valid) {
+ dev_info(&pdev->dev, "Speed bin not set. Defaulting to 0!\n");
+ *bin = 0;
+ } else {
+ dev_info(&pdev->dev, "Speed bin: %d\n", *bin);
+ }
+
+ dev_info(&pdev->dev, "PVS version: %d\n", *version);
+}
+
+static int a7cc_clk_get_fmax_vdd_class(struct platform_device *pdev,
+ struct clk_init_data *clk_intd, char *prop_name)
+{
+ struct device_node *of = pdev->dev.of_node;
+ int prop_len, i, j;
+ struct clk_vdd_class *vdd = clk_intd->vdd_class;
+ int num = vdd->num_regulators + 1;
+ u32 *array;
+
+ if (!of_find_property(of, prop_name, &prop_len)) {
+ dev_err(&pdev->dev, "missing %s\n", prop_name);
+ return -EINVAL;
+ }
+
+ prop_len /= sizeof(u32);
+ if (prop_len % num) {
+ dev_err(&pdev->dev, "bad length %d\n", prop_len);
+ return -EINVAL;
+ }
+
+ prop_len /= num;
+ vdd->level_votes = devm_kzalloc(&pdev->dev, prop_len * sizeof(int),
+ GFP_KERNEL);
+ if (!vdd->level_votes)
+ return -ENOMEM;
+
+ vdd->vdd_uv = devm_kzalloc(&pdev->dev,
+ prop_len * sizeof(int) * (num - 1), GFP_KERNEL);
+ if (!vdd->vdd_uv)
+ return -ENOMEM;
+
+ clk_intd->rate_max = devm_kzalloc(&pdev->dev,
+ prop_len * sizeof(unsigned long), GFP_KERNEL);
+ if (!clk_intd->rate_max)
+ return -ENOMEM;
+
+ array = devm_kzalloc(&pdev->dev,
+ prop_len * sizeof(u32) * num, GFP_KERNEL);
+ if (!array)
+ return -ENOMEM;
+
+ of_property_read_u32_array(of, prop_name, array, prop_len * num);
+ for (i = 0; i < prop_len; i++) {
+ clk_intd->rate_max[i] = array[num * i];
+ for (j = 1; j < num; j++) {
+ vdd->vdd_uv[(num - 1) * i + (j - 1)] =
+ array[num * i + j];
+ }
+ }
+
+ devm_kfree(&pdev->dev, array);
+ vdd->num_levels = prop_len;
+ vdd->cur_level = prop_len;
+ clk_intd->num_rate_max = prop_len;
+
+ return 0;
+}
+
+/*
+ * Find the voltage level required for a given clock rate.
+ */
+static int find_vdd_level(struct clk_init_data *clk_intd, unsigned long rate)
+{
+ int level;
+
+ for (level = 0; level < clk_intd->num_rate_max; level++)
+ if (rate <= clk_intd->rate_max[level])
+ break;
+
+ if (level == clk_intd->num_rate_max) {
+ pr_err("Rate %lu for %s is greater than highest Fmax\n", rate,
+ clk_intd->name);
+ return -EINVAL;
+ }
+
+ return level;
+}
+
+static int
+a7cc_clk_add_opp(struct clk_hw *hw, struct device *dev, unsigned long max_rate)
+{
+ unsigned long rate = 0;
+ int level, uv, j = 1;
+ long ret;
+ struct clk_init_data *clk_intd = (struct clk_init_data *)hw->init;
+ struct clk_vdd_class *vdd = clk_intd->vdd_class;
+
+ if (IS_ERR_OR_NULL(dev)) {
+ pr_err("%s: Invalid parameters\n", __func__);
+ return -EINVAL;
+ }
+
+ while (1) {
+ rate = clk_intd->rate_max[j++];
+ level = find_vdd_level(clk_intd, rate);
+ if (level <= 0) {
+ pr_warn("clock-cpu: no corner for %lu.\n", rate);
+ return -EINVAL;
+ }
+
+ uv = vdd->vdd_uv[level];
+ if (uv < 0) {
+ pr_warn("clock-cpu: no uv for %lu.\n", rate);
+ return -EINVAL;
+ }
+
+ ret = dev_pm_opp_add(dev, rate, uv);
+ if (ret) {
+ pr_warn("clock-cpu: failed to add OPP for %lu\n", rate);
+ return rate;
+ }
+
+ if (rate >= max_rate)
+ break;
+ }
+
+ return 0;
+}
+
+static void a7cc_clk_print_opp_table(int a7_cpu)
+{
+ struct dev_pm_opp *oppfmax, *oppfmin;
+ unsigned long apc_fmax, apc_fmin;
+ u32 max_a7ss_index = apcs_clk.clkr.hw.init->num_rate_max;
+
+ apc_fmax = apcs_clk.clkr.hw.init->rate_max[max_a7ss_index - 1];
+ apc_fmin = apcs_clk.clkr.hw.init->rate_max[1];
+
+ rcu_read_lock();
+
+ oppfmax = dev_pm_opp_find_freq_exact(get_cpu_device(a7_cpu),
+ apc_fmax, true);
+ oppfmin = dev_pm_opp_find_freq_exact(get_cpu_device(a7_cpu),
+ apc_fmin, true);
+ pr_info("Clock_cpu: OPP voltage for %lu: %ld\n", apc_fmin,
+ dev_pm_opp_get_voltage(oppfmin));
+ pr_info("Clock_cpu: OPP voltage for %lu: %ld\n", apc_fmax,
+ dev_pm_opp_get_voltage(oppfmax));
+
+ rcu_read_unlock();
+}
+
+static void a7cc_clk_populate_opp_table(struct platform_device *pdev)
+{
+ unsigned long apc_fmax;
+ int cpu, a7_cpu = 0;
+ u32 max_a7ss_index = apcs_clk.clkr.hw.init->num_rate_max;
+
+ apc_fmax = apcs_clk.clkr.hw.init->rate_max[max_a7ss_index - 1];
+
+ for_each_possible_cpu(cpu) {
+ a7_cpu = cpu;
+ WARN(a7cc_clk_add_opp(&apcs_clk.clkr.hw, get_cpu_device(cpu),
+ apc_fmax),
+ "Failed to add OPP levels for apcs_clk\n");
+ }
+ /* One time print during bootup */
+ dev_info(&pdev->dev, "OPP tables populated (cpu %d)\n", a7_cpu);
+
+ a7cc_clk_print_opp_table(a7_cpu);
+}
+
+static int a7cc_driver_probe(struct platform_device *pdev)
+{
+ struct clk *clk;
+ void __iomem *base;
+ u32 opmode_regval, mode_regval;
+ struct resource *res;
+ struct clk_onecell_data *data;
+ struct device *dev = &pdev->dev;
+ struct device_node *of = pdev->dev.of_node;
+ int i, ret, speed_bin, version, cpu;
+ int num_clks = ARRAY_SIZE(cpu_clks_hws);
+ u32 a7cc_clk_init_rate = 0;
+ char prop_name[] = "qcom,speedX-bin-vX";
+ struct clk *ext_xo_clk;
+
+ /* Require the RPMH-XO clock to be registered before */
+ ext_xo_clk = devm_clk_get(dev, "xo_ao");
+ if (IS_ERR(ext_xo_clk)) {
+ if (PTR_ERR(ext_xo_clk) != -EPROBE_DEFER)
+ dev_err(dev, "Unable to get xo clock\n");
+ return PTR_ERR(ext_xo_clk);
+ }
+
+ /* Get speed bin information */
+ a7cc_clk_get_speed_bin(pdev, &speed_bin, &version);
+
+ /* Rail Regulator for apcs_pll */
+ vdd_cx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_dig_ao");
+ if (IS_ERR(vdd_cx.regulator[0])) {
+ if (!(PTR_ERR(vdd_cx.regulator[0]) == -EPROBE_DEFER))
+ dev_err(&pdev->dev,
+ "Unable to get vdd_dig_ao regulator\n");
+ return PTR_ERR(vdd_cx.regulator[0]);
+ }
+
+ /* Rail Regulator for APSS a7ss mux */
+ vdd_cpu.regulator[0] = devm_regulator_get(&pdev->dev, "cpu-vdd");
+ if (IS_ERR(vdd_cpu.regulator[0])) {
+ if (!(PTR_ERR(vdd_cpu.regulator[0]) == -EPROBE_DEFER))
+ dev_err(&pdev->dev,
+ "Unable to get cpu-vdd regulator\n");
+ return PTR_ERR(vdd_cpu.regulator[0]);
+ }
+
+ snprintf(prop_name, ARRAY_SIZE(prop_name),
+ "qcom,speed%d-bin-v%d", speed_bin, version);
+
+ ret = a7cc_clk_get_fmax_vdd_class(pdev,
+ (struct clk_init_data *)apcs_clk.clkr.hw.init, prop_name);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Can't get speed bin for apcs_clk. Falling back to zero\n");
+ ret = a7cc_clk_get_fmax_vdd_class(pdev,
+ (struct clk_init_data *)apcs_clk.clkr.hw.init,
+ "qcom,speed0-bin-v0");
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Unable to get speed bin for apcs_clk freq-corner mapping info\n");
+ return ret;
+ }
+ }
+
+ ret = of_property_read_u32(of, "qcom,a7cc-init-rate",
+ &a7cc_clk_init_rate);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "unable to find qcom,a7cc_clk_init_rate property,ret=%d\n",
+ ret);
+ return -EINVAL;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apcs_pll");
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base)) {
+ dev_err(&pdev->dev, "Failed to map apcs_cpu_pll register base\n");
+ return PTR_ERR(base);
+ }
+
+ apcs_cpu_pll.clkr.regmap = devm_regmap_init_mmio(dev, base,
+ &cpu_regmap_config);
+ if (IS_ERR(apcs_cpu_pll.clkr.regmap)) {
+ dev_err(&pdev->dev, "Couldn't get regmap for apcs_cpu_pll\n");
+ return PTR_ERR(apcs_cpu_pll.clkr.regmap);
+ }
+
+ ret = of_property_read_u32(of, "qcom,rcg-reg-offset",
+ &apcs_clk.reg_offset);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "unable to find qcom,rcg-reg-offset property,ret=%d\n",
+ ret);
+ return -EINVAL;
+ }
+
+ apcs_clk.clkr.regmap = apcs_cpu_pll.clkr.regmap;
+
+ /* Read PLLs OPMODE and mode register */
+ ret = regmap_read(apcs_cpu_pll.clkr.regmap, PLL_OPMODE_REG,
+ &opmode_regval);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(apcs_cpu_pll.clkr.regmap, PLL_MODE_REG,
+ &mode_regval);
+ if (ret)
+ return ret;
+
+ /* Configure APSS PLL only if it is not enabled and running */
+ if (!(opmode_regval & PLL_OPMODE_RUN) &&
+ !(mode_regval & PLL_MODE_OUTCTRL))
+ clk_trion_pll_configure(&apcs_cpu_pll,
+ apcs_cpu_pll.clkr.regmap, &apcs_cpu_pll_config);
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->clk_num = num_clks;
+
+ data->clks = devm_kzalloc(dev, num_clks * sizeof(struct clk *),
+ GFP_KERNEL);
+ if (!data->clks)
+ return -ENOMEM;
+
+ /* Register clocks with clock framework */
+ for (i = 0; i < num_clks; i++) {
+ clk = devm_clk_register(dev, cpu_clks_hws[i]);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+ data->clks[i] = clk;
+ }
+
+ ret = of_clk_add_provider(dev->of_node, of_clk_src_onecell_get, data);
+ if (ret) {
+ dev_err(&pdev->dev, "CPU clock driver registeration failed\n");
+ return ret;
+ }
+
+ ret = clk_notifier_register(apcs_cpu_pll.clkr.hw.clk, &apcs_clk.clk_nb);
+ if (ret) {
+ dev_err(dev, "failed to register clock notifier: %d\n", ret);
+ return ret;
+ }
+
+ /* Put proxy vote for APSS PLL */
+ clk_prepare_enable(apcs_cpu_pll.clkr.hw.clk);
+
+ /* Set to TURBO boot frequency */
+ ret = clk_set_rate(apcs_clk.clkr.hw.clk, a7cc_clk_init_rate);
+ if (ret)
+ dev_err(&pdev->dev, "Unable to set init rate on apcs_clk\n");
+
+ /*
+ * We don't want the CPU clocks to be turned off at late init
+ * if CPUFREQ or HOTPLUG configs are disabled. So, bump up the
+ * refcount of these clocks. Any cpufreq/hotplug manager can assume
+ * that the clocks have already been prepared and enabled by the time
+ * they take over.
+ */
+
+ get_online_cpus();
+ for_each_online_cpu(cpu)
+ WARN(clk_prepare_enable(apcs_clk.clkr.hw.clk),
+ "Unable to turn on CPU clock\n");
+ put_online_cpus();
+
+ /* Remove proxy vote for APSS PLL */
+ clk_disable_unprepare(apcs_cpu_pll.clkr.hw.clk);
+
+ a7cc_clk_populate_opp_table(pdev);
+
+ dev_info(dev, "CPU clock Driver probed successfully\n");
+
+ return ret;
+}
+
+static struct platform_driver a7_clk_driver = {
+ .probe = a7cc_driver_probe,
+ .driver = {
+ .name = "qcom-cpu-sdxpoorwills",
+ .of_match_table = match_table,
+ },
+};
+
+static int __init a7_clk_init(void)
+{
+ return platform_driver_register(&a7_clk_driver);
+}
+subsys_initcall(a7_clk_init);
+
+static void __exit a7_clk_exit(void)
+{
+ platform_driver_unregister(&a7_clk_driver);
+}
+module_exit(a7_clk_exit);
+
+MODULE_ALIAS("platform:cpu");
+MODULE_DESCRIPTION("A7 CPU clock Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/clk-cpu-osm.c b/drivers/clk/qcom/clk-cpu-osm.c
index ec4c83e..7e665ca 100644
--- a/drivers/clk/qcom/clk-cpu-osm.c
+++ b/drivers/clk/qcom/clk-cpu-osm.c
@@ -31,7 +31,9 @@
#include <linux/sched.h>
#include <linux/cpufreq.h>
#include <linux/slab.h>
+#include <linux/regulator/consumer.h>
#include <dt-bindings/clock/qcom,cpucc-sdm845.h>
+#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
#include "common.h"
#include "clk-regmap.h"
@@ -53,6 +55,9 @@
#define VOLT_REG 0x114
#define CORE_DCVS_CTRL 0xbc
+#define EFUSE_SHIFT(v1) ((v1) ? 3 : 2)
+#define EFUSE_MASK 0x7
+
#define DCVS_PERF_STATE_DESIRED_REG_0_V1 0x780
#define DCVS_PERF_STATE_DESIRED_REG_0_V2 0x920
#define DCVS_PERF_STATE_DESIRED_REG(n, v1) \
@@ -65,6 +70,9 @@
(((v1) ? OSM_CYCLE_COUNTER_STATUS_REG_0_V1 \
: OSM_CYCLE_COUNTER_STATUS_REG_0_V2) + 4 * (n))
+static DEFINE_VDD_REGS_INIT(vdd_l3_mx_ao, 1);
+static DEFINE_VDD_REGS_INIT(vdd_pwrcl_mx_ao, 1);
+
struct osm_entry {
u16 virtual_corner;
u16 open_loop_volt;
@@ -85,6 +93,8 @@
u64 total_cycle_counter;
u32 prev_cycle_counter;
u32 max_core_count;
+ u32 mx_turbo_freq;
+ unsigned int cpr_rc;
};
static bool is_sdm845v1;
@@ -131,6 +141,18 @@
return (req <= new && new < best) || (best < req && best < new);
}
+static int clk_osm_search_table(struct osm_entry *table, int entries, long rate)
+{
+ int index;
+
+ for (index = 0; index < entries; index++) {
+ if (rate == table[index].frequency)
+ return index;
+ }
+
+ return -EINVAL;
+}
+
static long clk_osm_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
@@ -161,23 +183,62 @@
return rrate;
}
-static int clk_osm_search_table(struct osm_entry *table, int entries, long rate)
+static int clk_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
{
+ struct clk_osm *c = to_clk_osm(hw);
+ struct clk_hw *p_hw = clk_hw_get_parent(hw);
+ struct clk_osm *parent = to_clk_osm(p_hw);
int index = 0;
- for (index = 0; index < entries; index++) {
- if (rate == table[index].frequency)
- return index;
+ if (!c || !parent)
+ return -EINVAL;
+
+ index = clk_osm_search_table(parent->osm_table,
+ parent->num_entries, rate);
+ if (index < 0) {
+ pr_err("cannot set %s to %lu\n", clk_hw_get_name(hw), rate);
+ return -EINVAL;
}
- return -EINVAL;
+ clk_osm_write_reg(parent, index,
+ DCVS_PERF_STATE_DESIRED_REG(c->core_num,
+ is_sdm845v1));
+
+ /* Make sure the write goes through before proceeding */
+ clk_osm_mb(parent);
+
+ return 0;
}
-const struct clk_ops clk_ops_cpu_osm = {
- .round_rate = clk_osm_round_rate,
- .list_rate = clk_osm_list_rate,
- .debug_init = clk_debug_measure_add,
-};
+static unsigned long clk_cpu_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_osm *c = to_clk_osm(hw);
+ struct clk_hw *p_hw = clk_hw_get_parent(hw);
+ struct clk_osm *parent = to_clk_osm(p_hw);
+ int index = 0;
+
+ if (!c || !parent)
+ return -EINVAL;
+
+ index = clk_osm_read_reg(parent,
+ DCVS_PERF_STATE_DESIRED_REG(c->core_num,
+ is_sdm845v1));
+ return parent->osm_table[index].frequency;
+}
+
+static long clk_cpu_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct clk_hw *parent_hw = clk_hw_get_parent(hw);
+
+ if (!parent_hw)
+ return -EINVAL;
+
+ *parent_rate = rate;
+ return clk_hw_round_rate(parent_hw, rate);
+}
static int l3_clk_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
@@ -233,7 +294,6 @@
return cpuclk->osm_table[index].frequency;
}
-
static struct clk_ops clk_ops_l3_osm = {
.round_rate = clk_osm_round_rate,
.list_rate = clk_osm_list_rate,
@@ -242,18 +302,23 @@
.debug_init = clk_debug_measure_add,
};
+static struct clk_ops clk_ops_core;
+static struct clk_ops clk_ops_cpu_osm;
+
static struct clk_init_data osm_clks_init[] = {
[0] = {
.name = "l3_clk",
.parent_names = (const char *[]){ "bi_tcxo_ao" },
.num_parents = 1,
.ops = &clk_ops_l3_osm,
+ .vdd_class = &vdd_l3_mx_ao,
},
[1] = {
.name = "pwrcl_clk",
.parent_names = (const char *[]){ "bi_tcxo_ao" },
.num_parents = 1,
.ops = &clk_ops_cpu_osm,
+ .vdd_class = &vdd_pwrcl_mx_ao,
},
[2] = {
.name = "perfcl_clk",
@@ -287,7 +352,8 @@
.name = "cpu0_pwrcl_clk",
.parent_names = (const char *[]){ "pwrcl_clk" },
.num_parents = 1,
- .ops = &clk_dummy_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_ops_core,
},
};
@@ -299,7 +365,8 @@
.name = "cpu1_pwrcl_clk",
.parent_names = (const char *[]){ "pwrcl_clk" },
.num_parents = 1,
- .ops = &clk_dummy_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_ops_core,
},
};
@@ -311,7 +378,8 @@
.name = "cpu2_pwrcl_clk",
.parent_names = (const char *[]){ "pwrcl_clk" },
.num_parents = 1,
- .ops = &clk_dummy_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_ops_core,
},
};
@@ -323,7 +391,8 @@
.name = "cpu3_pwrcl_clk",
.parent_names = (const char *[]){ "pwrcl_clk" },
.num_parents = 1,
- .ops = &clk_dummy_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_ops_core,
},
};
@@ -335,7 +404,8 @@
.name = "cpu4_pwrcl_clk",
.parent_names = (const char *[]){ "pwrcl_clk" },
.num_parents = 1,
- .ops = &clk_dummy_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_ops_core,
},
};
@@ -347,7 +417,8 @@
.name = "cpu5_pwrcl_clk",
.parent_names = (const char *[]){ "pwrcl_clk" },
.num_parents = 1,
- .ops = &clk_dummy_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_ops_core,
},
};
@@ -366,7 +437,8 @@
.name = "cpu4_perfcl_clk",
.parent_names = (const char *[]){ "perfcl_clk" },
.num_parents = 1,
- .ops = &clk_dummy_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_ops_core,
},
};
@@ -378,7 +450,8 @@
.name = "cpu5_perfcl_clk",
.parent_names = (const char *[]){ "perfcl_clk" },
.num_parents = 1,
- .ops = &clk_dummy_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_ops_core,
},
};
@@ -390,7 +463,8 @@
.name = "cpu6_perfcl_clk",
.parent_names = (const char *[]){ "perfcl_clk" },
.num_parents = 1,
- .ops = &clk_dummy_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_ops_core,
},
};
@@ -402,7 +476,8 @@
.name = "cpu7_perfcl_clk",
.parent_names = (const char *[]){ "perfcl_clk" },
.num_parents = 1,
- .ops = &clk_dummy_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_ops_core,
},
};
@@ -515,13 +590,23 @@
}
static void
-osm_set_index(struct clk_osm *c, unsigned int index, unsigned int num)
+osm_set_index(struct clk_osm *c, unsigned int index)
{
- clk_osm_write_reg(c, index,
- DCVS_PERF_STATE_DESIRED_REG(num, is_sdm845v1));
+ struct clk_hw *p_hw = clk_hw_get_parent(&c->hw);
+ struct clk_osm *parent = to_clk_osm(p_hw);
+ unsigned long rate = 0;
- /* Make sure the write goes through before proceeding */
- clk_osm_mb(c);
+ if (index >= OSM_TABLE_SIZE) {
+ pr_err("Passing an index (%u) that's greater than max (%d)\n",
+ index, OSM_TABLE_SIZE - 1);
+ return;
+ }
+
+ rate = parent->osm_table[index].frequency;
+ if (!rate)
+ return;
+
+ clk_set_rate(c->hw.clk, clk_round_rate(c->hw.clk, rate));
}
static int
@@ -529,7 +614,7 @@
{
struct clk_osm *c = policy->driver_data;
- osm_set_index(c, index, c->core_num);
+ osm_set_index(c, index);
return 0;
}
@@ -849,6 +934,7 @@
static int clk_osm_read_lut(struct platform_device *pdev, struct clk_osm *c)
{
u32 data, src, lval, i, j = OSM_TABLE_SIZE;
+ struct clk_vdd_class *vdd = osm_clks_init[c->cluster_num].vdd_class;
for (i = 0; i < OSM_TABLE_SIZE; i++) {
data = clk_osm_read_reg(c, FREQ_REG + i * OSM_REG_SIZE);
@@ -881,6 +967,29 @@
if (!osm_clks_init[c->cluster_num].rate_max)
return -ENOMEM;
+ if (vdd) {
+ vdd->level_votes = devm_kcalloc(&pdev->dev, j,
+ sizeof(*vdd->level_votes), GFP_KERNEL);
+ if (!vdd->level_votes)
+ return -ENOMEM;
+
+ vdd->vdd_uv = devm_kcalloc(&pdev->dev, j, sizeof(*vdd->vdd_uv),
+ GFP_KERNEL);
+ if (!vdd->vdd_uv)
+ return -ENOMEM;
+
+ for (i = 0; i < j; i++) {
+ if (c->osm_table[i].frequency < c->mx_turbo_freq ||
+ (c->cpr_rc > 1))
+ vdd->vdd_uv[i] = RPMH_REGULATOR_LEVEL_NOM;
+ else
+ vdd->vdd_uv[i] = RPMH_REGULATOR_LEVEL_TURBO;
+ }
+ vdd->num_levels = j;
+ vdd->cur_level = j;
+ vdd->use_max_uV = true;
+ }
+
for (i = 0; i < j; i++)
osm_clks_init[c->cluster_num].rate_max[i] =
c->osm_table[i].frequency;
@@ -964,12 +1073,17 @@
static int clk_cpu_osm_driver_probe(struct platform_device *pdev)
{
- int rc = 0, i;
- u32 val;
+ int rc = 0, i, cpu;
+ bool is_sdm670 = false;
+ u32 *array;
+ u32 val, pte_efuse;
+ void __iomem *vbase;
int num_clks = ARRAY_SIZE(osm_qcom_clk_hws);
struct clk *ext_xo_clk, *clk;
+ struct clk_osm *osm_clk;
struct device *dev = &pdev->dev;
struct clk_onecell_data *clk_data;
+ struct resource *res;
struct cpu_cycle_counter_cb cb = {
.get_cpu_cycle_counter = clk_osm_get_cpu_cycle_counter,
};
@@ -989,8 +1103,68 @@
"qcom,clk-cpu-osm");
if (of_device_is_compatible(pdev->dev.of_node,
- "qcom,clk-cpu-osm-sdm670"))
+ "qcom,clk-cpu-osm-sdm670")) {
+ is_sdm670 = true;
clk_cpu_osm_driver_sdm670_fixup();
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cpr_rc");
+ if (res) {
+ vbase = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!vbase) {
+ dev_err(&pdev->dev, "Unable to map in cpr_rc base\n");
+ return -ENOMEM;
+ }
+ pte_efuse = readl_relaxed(vbase);
+ l3_clk.cpr_rc = pwrcl_clk.cpr_rc = perfcl_clk.cpr_rc =
+ ((pte_efuse >> EFUSE_SHIFT(is_sdm845v1 | is_sdm670))
+ & EFUSE_MASK);
+ pr_info("LOCAL_CPR_RC: %u\n", l3_clk.cpr_rc);
+ devm_iounmap(&pdev->dev, vbase);
+ } else {
+ dev_err(&pdev->dev,
+ "Unable to get platform resource for cpr_rc\n");
+ return -ENOMEM;
+ }
+
+ vdd_l3_mx_ao.regulator[0] = devm_regulator_get(&pdev->dev,
+ "vdd_l3_mx_ao");
+ if (IS_ERR(vdd_l3_mx_ao.regulator[0])) {
+ if (PTR_ERR(vdd_l3_mx_ao.regulator[0]) != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "Unable to get vdd_l3_mx_ao regulator\n");
+ return PTR_ERR(vdd_l3_mx_ao.regulator[0]);
+ }
+
+ vdd_pwrcl_mx_ao.regulator[0] = devm_regulator_get(&pdev->dev,
+ "vdd_pwrcl_mx_ao");
+ if (IS_ERR(vdd_pwrcl_mx_ao.regulator[0])) {
+ if (PTR_ERR(vdd_pwrcl_mx_ao.regulator[0]) != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "Unable to get vdd_pwrcl_mx_ao regulator\n");
+ return PTR_ERR(vdd_pwrcl_mx_ao.regulator[0]);
+ }
+
+ array = devm_kcalloc(&pdev->dev, MAX_CLUSTER_CNT, sizeof(*array),
+ GFP_KERNEL);
+ if (!array)
+ return -ENOMEM;
+
+ rc = of_property_read_u32_array(pdev->dev.of_node, "qcom,mx-turbo-freq",
+ array, MAX_CLUSTER_CNT);
+ if (rc) {
+ dev_err(&pdev->dev, "unable to find qcom,mx-turbo-freq property, rc=%d\n",
+ rc);
+ devm_kfree(&pdev->dev, array);
+ return rc;
+ }
+
+ l3_clk.mx_turbo_freq = array[l3_clk.cluster_num];
+ pwrcl_clk.mx_turbo_freq = array[pwrcl_clk.cluster_num];
+ perfcl_clk.mx_turbo_freq = array[perfcl_clk.cluster_num];
+
+ devm_kfree(&pdev->dev, array);
clk_data = devm_kzalloc(&pdev->dev, sizeof(struct clk_onecell_data),
GFP_KERNEL);
@@ -1046,6 +1220,16 @@
spin_lock_init(&pwrcl_clk.lock);
spin_lock_init(&perfcl_clk.lock);
+ clk_ops_core = clk_dummy_ops;
+ clk_ops_core.set_rate = clk_cpu_set_rate;
+ clk_ops_core.round_rate = clk_cpu_round_rate;
+ clk_ops_core.recalc_rate = clk_cpu_recalc_rate;
+
+ clk_ops_cpu_osm = clk_dummy_ops;
+ clk_ops_cpu_osm.round_rate = clk_osm_round_rate;
+ clk_ops_cpu_osm.list_rate = clk_osm_list_rate;
+ clk_ops_cpu_osm.debug_init = clk_debug_measure_add;
+
/* Register OSM l3, pwr and perf clocks with Clock Framework */
for (i = 0; i < num_clks; i++) {
if (!osm_qcom_clk_hws[i])
@@ -1076,6 +1260,16 @@
WARN(clk_prepare_enable(l3_misc_vote_clk.hw.clk),
"clk: Failed to enable misc clock for L3\n");
+ /*
+ * Call clk_prepare_enable for the silver clock explicitly in order to
+ * place an implicit vote on MX
+ */
+ for_each_online_cpu(cpu) {
+ osm_clk = logical_cpu_to_clk(cpu);
+ if (!osm_clk)
+ return -EINVAL;
+ clk_prepare_enable(osm_clk->hw.clk);
+ }
populate_opp_table(pdev);
of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
diff --git a/drivers/clk/qcom/clk-pll.h b/drivers/clk/qcom/clk-pll.h
index 9682799..70f7612 100644
--- a/drivers/clk/qcom/clk-pll.h
+++ b/drivers/clk/qcom/clk-pll.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013, 2016-2017, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -83,6 +83,8 @@
u32 aux2_output_mask;
u32 early_output_mask;
u32 config_ctl_val;
+ u32 config_ctl_hi_val;
+ u32 config_ctl_hi1_val;
};
void clk_pll_configure_sr(struct clk_pll *pll, struct regmap *regmap,
diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h
index 60758b4..aaf2324 100644
--- a/drivers/clk/qcom/clk-rcg.h
+++ b/drivers/clk/qcom/clk-rcg.h
@@ -188,4 +188,6 @@
extern int clk_rcg2_get_dfs_clock_rate(struct clk_rcg2 *clk,
struct device *dev, u8 rcg_flags);
+extern unsigned long
+clk_rcg2_calc_rate(unsigned long rate, u32 m, u32 n, u32 mode, u32 hid_div);
#endif
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index 8d5e527..35bcf5a 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -223,8 +223,8 @@
* rate = ----------- x ---
* hid_div n
*/
-static unsigned long
-calc_rate(unsigned long rate, u32 m, u32 n, u32 mode, u32 hid_div)
+unsigned long
+clk_rcg2_calc_rate(unsigned long rate, u32 m, u32 n, u32 mode, u32 hid_div)
{
if (hid_div) {
rate *= 2;
@@ -240,6 +240,7 @@
return rate;
}
+EXPORT_SYMBOL(clk_rcg2_calc_rate);
static unsigned long
clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
@@ -274,7 +275,7 @@
hid_div = cfg >> CFG_SRC_DIV_SHIFT;
hid_div &= mask;
- return calc_rate(parent_rate, m, n, mode, hid_div);
+ return clk_rcg2_calc_rate(parent_rate, m, n, mode, hid_div);
}
static int _freq_tbl_determine_rate(struct clk_hw *hw,
@@ -764,7 +765,7 @@
hid_div >>= CFG_SRC_DIV_SHIFT;
hid_div &= mask;
- req->rate = calc_rate(req->best_parent_rate,
+ req->rate = clk_rcg2_calc_rate(req->best_parent_rate,
frac->num, frac->den,
!!frac->den, hid_div);
return 0;
@@ -804,7 +805,7 @@
div = DIV_ROUND_UP((2 * parent_rate), req->rate) - 1;
div = min_t(u32, div, mask);
- req->rate = calc_rate(parent_rate, 0, 0, 0, div);
+ req->rate = clk_rcg2_calc_rate(parent_rate, 0, 0, 0, div);
return 0;
}
@@ -862,7 +863,7 @@
div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
div = min_t(u32, div, mask);
- req->rate = calc_rate(parent_rate, 0, 0, 0, div);
+ req->rate = clk_rcg2_calc_rate(parent_rate, 0, 0, 0, div);
return 0;
}
@@ -1318,7 +1319,7 @@
dfs_freq_tbl[i].n = n;
/* calculate the final frequency */
- calc_freq = calc_rate(prate, dfs_freq_tbl[i].m,
+ calc_freq = clk_rcg2_calc_rate(prate, dfs_freq_tbl[i].m,
dfs_freq_tbl[i].n, mode,
dfs_freq_tbl[i].pre_div);
diff --git a/drivers/clk/qcom/clk-regmap-mux-div.h b/drivers/clk/qcom/clk-regmap-mux-div.h
index 63a696a..6cd8d4f 100644
--- a/drivers/clk/qcom/clk-regmap-mux-div.h
+++ b/drivers/clk/qcom/clk-regmap-mux-div.h
@@ -42,6 +42,7 @@
* on and runs at only one rate.
* @parent_map: pointer to parent_map struct
* @clkr: handle between common and hardware-specific interfaces
+ * @clk_nb: clock notifier registered for clock rate change
*/
struct clk_regmap_mux_div {
@@ -57,6 +58,7 @@
unsigned long safe_freq;
const struct parent_map *parent_map;
struct clk_regmap clkr;
+ struct notifier_block clk_nb;
};
extern const struct clk_ops clk_regmap_mux_div_ops;
diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c
index 2109132..1f90d46 100644
--- a/drivers/clk/qcom/clk-rpmh.c
+++ b/drivers/clk/qcom/clk-rpmh.c
@@ -318,17 +318,30 @@
static const struct of_device_id clk_rpmh_match_table[] = {
{ .compatible = "qcom,rpmh-clk-sdm845", .data = &clk_rpmh_sdm845},
{ .compatible = "qcom,rpmh-clk-sdm670", .data = &clk_rpmh_sdm845},
+ { .compatible = "qcom,rpmh-clk-sdxpoorwills", .data = &clk_rpmh_sdm845},
{ }
};
MODULE_DEVICE_TABLE(of, clk_rpmh_match_table);
-static void clk_rpmh_sdm670_fixup_sdm670(void)
+static void clk_rpmh_sdm670_fixup(void)
{
sdm845_rpmh_clocks[RPMH_RF_CLK3] = NULL;
sdm845_rpmh_clocks[RPMH_RF_CLK3_A] = NULL;
}
-static int clk_rpmh_sdm670_fixup(struct platform_device *pdev)
+static void clk_rpmh_sdxpoorwills_fixup(void)
+{
+ sdm845_rpmh_clocks[RPMH_LN_BB_CLK2] = NULL;
+ sdm845_rpmh_clocks[RPMH_LN_BB_CLK2_A] = NULL;
+ sdm845_rpmh_clocks[RPMH_LN_BB_CLK3] = NULL;
+ sdm845_rpmh_clocks[RPMH_LN_BB_CLK3_A] = NULL;
+ sdm845_rpmh_clocks[RPMH_RF_CLK2] = NULL;
+ sdm845_rpmh_clocks[RPMH_RF_CLK2_A] = NULL;
+ sdm845_rpmh_clocks[RPMH_RF_CLK3] = NULL;
+ sdm845_rpmh_clocks[RPMH_RF_CLK3_A] = NULL;
+}
+
+static int clk_rpmh_fixup(struct platform_device *pdev)
{
const char *compat = NULL;
int compatlen = 0;
@@ -338,7 +351,9 @@
return -EINVAL;
if (!strcmp(compat, "qcom,rpmh-clk-sdm670"))
- clk_rpmh_sdm670_fixup_sdm670();
+ clk_rpmh_sdm670_fixup();
+ else if (!strcmp(compat, "qcom,rpmh-clk-sdxpoorwills"))
+ clk_rpmh_sdxpoorwills_fixup();
return 0;
}
@@ -410,7 +425,7 @@
goto err2;
}
- ret = clk_rpmh_sdm670_fixup(pdev);
+ ret = clk_rpmh_fixup(pdev);
if (ret)
return ret;
diff --git a/drivers/clk/qcom/gcc-sdxpoorwills.c b/drivers/clk/qcom/gcc-sdxpoorwills.c
new file mode 100644
index 0000000..1b5cf61
--- /dev/null
+++ b/drivers/clk/qcom/gcc-sdxpoorwills.c
@@ -0,0 +1,1916 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "clk: %s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/clock/qcom,gcc-sdxpoorwills.h>
+
+#include "common.h"
+#include "clk-regmap.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-branch.h"
+#include "reset.h"
+
+#include "clk-alpha-pll.h"
+#include "vdd-level-sdm845.h"
+
+#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
+
+static DEFINE_VDD_REGULATORS(vdd_cx, VDD_CX_NUM, 1, vdd_corner);
+
+enum {
+ P_BI_TCXO,
+ P_CORE_BI_PLL_TEST_SE,
+ P_GPLL0_OUT_EVEN,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL4_OUT_EVEN,
+ P_SLEEP_CLK,
+};
+
+static const struct parent_map gcc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_0[] = {
+ "bi_tcxo",
+ "gpll0",
+ "gpll0_out_even",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_1[] = {
+ "bi_tcxo",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_SLEEP_CLK, 5 },
+ { P_GPLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_2[] = {
+ "bi_tcxo",
+ "gpll0",
+ "core_pi_sleep_clk",
+ "gpll0_out_even",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_SLEEP_CLK, 5 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_3[] = {
+ "bi_tcxo",
+ "core_pi_sleep_clk",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL4_OUT_EVEN, 2 },
+ { P_GPLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_4[] = {
+ "bi_tcxo",
+ "gpll0",
+ "gpll4_out_even",
+ "gpll0_out_even",
+ "core_bi_pll_test_se",
+};
+
+static struct pll_vco trion_vco[] = {
+ { 249600000, 2000000000, 0 },
+};
+
+static struct clk_alpha_pll gpll0 = {
+ .offset = 0x0,
+ .vco_table = trion_vco,
+ .num_vco = ARRAY_SIZE(trion_vco),
+ .type = TRION_PLL,
+ .clkr = {
+ .enable_reg = 0x6d000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll0",
+ .parent_names = (const char *[]){ "bi_tcxo" },
+ .num_parents = 1,
+ .ops = &clk_trion_fixed_pll_ops,
+ VDD_CX_FMAX_MAP4(
+ MIN, 615000000,
+ LOW, 1066000000,
+ LOW_L1, 1600000000,
+ NOMINAL, 2000000000),
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_trion_even[] = {
+ { 0x0, 1 },
+ { 0x1, 2 },
+ { 0x3, 4 },
+ { 0x7, 8 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_trion_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_trion_even),
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll0_out_even",
+ .parent_names = (const char *[]){ "gpll0" },
+ .num_parents = 1,
+ .ops = &clk_trion_pll_postdiv_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll4 = {
+ .offset = 0x76000,
+ .vco_table = trion_vco,
+ .num_vco = ARRAY_SIZE(trion_vco),
+ .type = TRION_PLL,
+ .clkr = {
+ .enable_reg = 0x6d000,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll4",
+ .parent_names = (const char *[]){ "bi_tcxo" },
+ .num_parents = 1,
+ .ops = &clk_trion_fixed_pll_ops,
+ VDD_CX_FMAX_MAP4(
+ MIN, 615000000,
+ LOW, 1066000000,
+ LOW_L1, 1600000000,
+ NOMINAL, 2000000000),
+ },
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll4_out_even = {
+ .offset = 0x76000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_trion_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_trion_even),
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll4_out_even",
+ .parent_names = (const char *[]){ "gpll4" },
+ .num_parents = 1,
+ .ops = &clk_trion_pll_postdiv_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_blsp1_qup1_i2c_apps_clk_src[] = {
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_blsp1_qup1_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x11024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup1_i2c_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ VDD_CX_FMAX_MAP3(
+ MIN, 9600000,
+ LOWER, 19200000,
+ LOW, 50000000),
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_blsp1_qup1_spi_apps_clk_src[] = {
+ F(960000, P_BI_TCXO, 10, 1, 2),
+ F(4800000, P_BI_TCXO, 4, 0, 0),
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(15000000, P_GPLL0_OUT_EVEN, 5, 1, 4),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(24000000, P_GPLL0_OUT_MAIN, 12.5, 1, 2),
+ F(25000000, P_GPLL0_OUT_MAIN, 12, 1, 2),
+ F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_blsp1_qup1_spi_apps_clk_src = {
+ .cmd_rcgr = 0x1100c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup1_spi_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ VDD_CX_FMAX_MAP4(
+ MIN, 9600000,
+ LOWER, 19200000,
+ LOW, 25000000,
+ NOMINAL, 50000000),
+ },
+};
+
+static struct clk_rcg2 gcc_blsp1_qup2_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x13024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup2_i2c_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ VDD_CX_FMAX_MAP3(
+ MIN, 9600000,
+ LOWER, 19200000,
+ LOW, 50000000),
+ },
+};
+
+static struct clk_rcg2 gcc_blsp1_qup2_spi_apps_clk_src = {
+ .cmd_rcgr = 0x1300c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup2_spi_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ VDD_CX_FMAX_MAP4(
+ MIN, 9600000,
+ LOWER, 19200000,
+ LOW, 25000000,
+ NOMINAL, 50000000),
+ },
+};
+
+static struct clk_rcg2 gcc_blsp1_qup3_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x15024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup3_i2c_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ VDD_CX_FMAX_MAP3(
+ MIN, 9600000,
+ LOWER, 19200000,
+ LOW, 50000000),
+ },
+};
+
+static struct clk_rcg2 gcc_blsp1_qup3_spi_apps_clk_src = {
+ .cmd_rcgr = 0x1500c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup3_spi_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ VDD_CX_FMAX_MAP4(
+ MIN, 9600000,
+ LOWER, 19200000,
+ LOW, 25000000,
+ NOMINAL, 50000000),
+ },
+};
+
+static struct clk_rcg2 gcc_blsp1_qup4_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x17024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup4_i2c_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ VDD_CX_FMAX_MAP3(
+ MIN, 9600000,
+ LOWER, 19200000,
+ LOW, 50000000),
+ },
+};
+
+static struct clk_rcg2 gcc_blsp1_qup4_spi_apps_clk_src = {
+ .cmd_rcgr = 0x1700c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup4_spi_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ VDD_CX_FMAX_MAP4(
+ MIN, 9600000,
+ LOWER, 19200000,
+ LOW, 25000000,
+ NOMINAL, 50000000),
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_blsp1_uart1_apps_clk_src[] = {
+ F(3686400, P_GPLL0_OUT_EVEN, 1, 192, 15625),
+ F(7372800, P_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(14745600, P_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(16000000, P_GPLL0_OUT_EVEN, 1, 4, 75),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(19354839, P_GPLL0_OUT_MAIN, 15.5, 1, 2),
+ F(20000000, P_GPLL0_OUT_MAIN, 15, 1, 2),
+ F(20689655, P_GPLL0_OUT_MAIN, 14.5, 1, 2),
+ F(21428571, P_GPLL0_OUT_MAIN, 14, 1, 2),
+ F(22222222, P_GPLL0_OUT_MAIN, 13.5, 1, 2),
+ F(23076923, P_GPLL0_OUT_MAIN, 13, 1, 2),
+ F(24000000, P_GPLL0_OUT_MAIN, 5, 1, 5),
+ F(25000000, P_GPLL0_OUT_MAIN, 12, 1, 2),
+ F(26086957, P_GPLL0_OUT_MAIN, 11.5, 1, 2),
+ F(27272727, P_GPLL0_OUT_MAIN, 11, 1, 2),
+ F(28571429, P_GPLL0_OUT_MAIN, 10.5, 1, 2),
+ F(32000000, P_GPLL0_OUT_MAIN, 1, 4, 75),
+ F(40000000, P_GPLL0_OUT_MAIN, 15, 0, 0),
+ F(46400000, P_GPLL0_OUT_MAIN, 1, 29, 375),
+ F(48000000, P_GPLL0_OUT_MAIN, 12.5, 0, 0),
+ F(51200000, P_GPLL0_OUT_MAIN, 1, 32, 375),
+ F(56000000, P_GPLL0_OUT_MAIN, 1, 7, 75),
+ F(58982400, P_GPLL0_OUT_MAIN, 1, 1536, 15625),
+ F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ F(63157895, P_GPLL0_OUT_MAIN, 9.5, 0, 0),
+ { }
+};
+
+
+static struct clk_rcg2 gcc_blsp1_uart1_apps_clk_src = {
+ .cmd_rcgr = 0x1200c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_uart1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart1_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ VDD_CX_FMAX_MAP4(
+ MIN, 9600000,
+ LOWER, 19200000,
+ LOW, 48000000,
+ NOMINAL, 63157895),
+ },
+};
+
+static struct clk_rcg2 gcc_blsp1_uart2_apps_clk_src = {
+ .cmd_rcgr = 0x1400c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_uart1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart2_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ VDD_CX_FMAX_MAP4(
+ MIN, 9600000,
+ LOWER, 19200000,
+ LOW, 48000000,
+ NOMINAL, 63157895),
+ },
+};
+
+static struct clk_rcg2 gcc_blsp1_uart3_apps_clk_src = {
+ .cmd_rcgr = 0x1600c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_uart1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart3_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ VDD_CX_FMAX_MAP4(
+ MIN, 9600000,
+ LOWER, 19200000,
+ LOW, 48000000,
+ NOMINAL, 63157895),
+ },
+};
+
+static struct clk_rcg2 gcc_blsp1_uart4_apps_clk_src = {
+ .cmd_rcgr = 0x1800c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_uart1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart4_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ VDD_CX_FMAX_MAP4(
+ MIN, 9600000,
+ LOWER, 19200000,
+ LOW, 48000000,
+ NOMINAL, 63157895),
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_cpuss_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(133333333, P_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_cpuss_ahb_clk_src = {
+ .cmd_rcgr = 0x24010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_cpuss_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_ahb_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ VDD_CX_FMAX_MAP4(
+ MIN, 19200000,
+ LOWER, 50000000,
+ NOMINAL, 100000000,
+ HIGH, 133333333),
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_cpuss_rbcpr_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_cpuss_rbcpr_clk_src = {
+ .cmd_rcgr = 0x2402c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_rbcpr_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ VDD_CX_FMAX_MAP2(
+ MIN, 19200000,
+ NOMINAL, 50000000),
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_emac_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(125000000, P_GPLL4_OUT_EVEN, 4, 0, 0),
+ F(250000000, P_GPLL4_OUT_EVEN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_emac_clk_src = {
+ .cmd_rcgr = 0x47020,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_emac_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_emac_clk_src",
+ .parent_names = gcc_parent_names_4,
+ .num_parents = 5,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ VDD_CX_FMAX_MAP4(
+ MIN, 19200000,
+ LOWER, 50000000,
+ LOW, 125000000,
+ NOMINAL, 250000000),
+ },
+};
+
+static struct clk_rcg2 gcc_emac_ptp_clk_src = {
+ .cmd_rcgr = 0x47038,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_emac_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_emac_ptp_clk_src",
+ .parent_names = gcc_parent_names_4,
+ .num_parents = 5,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ VDD_CX_FMAX_MAP4(
+ MIN, 19200000,
+ LOWER, 50000000,
+ LOW, 125000000,
+ NOMINAL, 250000000),
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_gp1_clk_src = {
+ .cmd_rcgr = 0x2b004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk_src",
+ .parent_names = gcc_parent_names_2,
+ .num_parents = 5,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ VDD_CX_FMAX_MAP4(
+ MIN, 19200000,
+ LOWER, 50000000,
+ LOW, 100000000,
+ NOMINAL, 200000000),
+ },
+};
+
+static struct clk_rcg2 gcc_gp2_clk_src = {
+ .cmd_rcgr = 0x2c004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk_src",
+ .parent_names = gcc_parent_names_2,
+ .num_parents = 5,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ VDD_CX_FMAX_MAP4(
+ MIN, 19200000,
+ LOWER, 50000000,
+ LOW, 100000000,
+ NOMINAL, 200000000),
+ },
+};
+
+static struct clk_rcg2 gcc_gp3_clk_src = {
+ .cmd_rcgr = 0x2d004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk_src",
+ .parent_names = gcc_parent_names_2,
+ .num_parents = 5,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ VDD_CX_FMAX_MAP4(
+ MIN, 19200000,
+ LOWER, 50000000,
+ LOW, 100000000,
+ NOMINAL, 200000000),
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_aux_phy_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_aux_phy_clk_src = {
+ .cmd_rcgr = 0x37030,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_pcie_aux_phy_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_aux_phy_clk_src",
+ .parent_names = gcc_parent_names_3,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ VDD_CX_FMAX_MAP1(
+ MIN, 19200000),
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_phy_refgen_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_phy_refgen_clk_src = {
+ .cmd_rcgr = 0x39010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_phy_refgen_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_phy_refgen_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ VDD_CX_FMAX_MAP2(
+ MIN, 19200000,
+ LOW, 100000000),
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = {
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pdm2_clk_src = {
+ .cmd_rcgr = 0x19010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pdm2_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ VDD_CX_FMAX_MAP3(
+ MIN, 9600000,
+ LOWER, 19200000,
+ LOW, 60000000),
+ },
+};
+
+static struct clk_rcg2 gcc_sdcc1_apps_clk_src = {
+ .cmd_rcgr = 0xf00c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ VDD_CX_FMAX_MAP4(
+ MIN, 19200000,
+ LOWER, 50000000,
+ LOW, 100000000,
+ NOMINAL, 200000000),
+ },
+};
+
+static struct clk_rcg2 gcc_spmi_fetcher_clk_src = {
+ .cmd_rcgr = 0x3f00c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_pcie_aux_phy_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_spmi_fetcher_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ VDD_CX_FMAX_MAP1(
+ MIN, 19200000),
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_master_clk_src[] = {
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_master_clk_src = {
+ .cmd_rcgr = 0xb01c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_master_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_master_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ VDD_CX_FMAX_MAP5(
+ MIN, 50000000,
+ LOWER, 75000000,
+ LOW, 100000000,
+ NOMINAL, 200000000,
+ HIGH, 240000000),
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_mock_utmi_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(40000000, P_GPLL0_OUT_EVEN, 7.5, 0, 0),
+ F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_mock_utmi_clk_src = {
+ .cmd_rcgr = 0xb034,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_mock_utmi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_mock_utmi_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ VDD_CX_FMAX_MAP3(
+ MIN, 19200000,
+ LOWER, 40000000,
+ LOW, 60000000),
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb3_phy_aux_clk_src[] = {
+ F(1000000, P_BI_TCXO, 1, 5, 96),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb3_phy_aux_clk_src = {
+ .cmd_rcgr = 0xb05c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_usb3_phy_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_phy_aux_clk_src",
+ .parent_names = gcc_parent_names_3,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ VDD_CX_FMAX_MAP1(
+ MIN, 19200000),
+ },
+};
+
+static struct clk_branch gcc_blsp1_ahb_clk = {
+ .halt_reg = 0x10004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x6d004,
+ .enable_mask = BIT(25),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup1_i2c_apps_clk = {
+ .halt_reg = 0x11008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x11008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup1_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "gcc_blsp1_qup1_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup1_spi_apps_clk = {
+ .halt_reg = 0x11004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x11004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup1_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "gcc_blsp1_qup1_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup2_i2c_apps_clk = {
+ .halt_reg = 0x13008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup2_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "gcc_blsp1_qup2_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = {
+ .halt_reg = 0x13004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup2_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "gcc_blsp1_qup2_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup3_i2c_apps_clk = {
+ .halt_reg = 0x15008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup3_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "gcc_blsp1_qup3_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup3_spi_apps_clk = {
+ .halt_reg = 0x15004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup3_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "gcc_blsp1_qup3_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup4_i2c_apps_clk = {
+ .halt_reg = 0x17008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x17008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup4_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "gcc_blsp1_qup4_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup4_spi_apps_clk = {
+ .halt_reg = 0x17004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x17004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup4_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "gcc_blsp1_qup4_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_sleep_clk = {
+ .halt_reg = 0x10008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x6d004,
+ .enable_mask = BIT(26),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart1_apps_clk = {
+ .halt_reg = 0x12004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x12004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart1_apps_clk",
+ .parent_names = (const char *[]){
+ "gcc_blsp1_uart1_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart2_apps_clk = {
+ .halt_reg = 0x14004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart2_apps_clk",
+ .parent_names = (const char *[]){
+ "gcc_blsp1_uart2_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart3_apps_clk = {
+ .halt_reg = 0x16004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x16004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart3_apps_clk",
+ .parent_names = (const char *[]){
+ "gcc_blsp1_uart3_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart4_apps_clk = {
+ .halt_reg = 0x18004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x18004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart4_apps_clk",
+ .parent_names = (const char *[]){
+ "gcc_blsp1_uart4_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x1c004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1c004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x6d004,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_boot_rom_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ce1_ahb_clk = {
+ .halt_reg = 0x2100c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2100c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x6d004,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ce1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ce1_axi_clk = {
+ .halt_reg = 0x21008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x6d004,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ce1_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ce1_clk = {
+ .halt_reg = 0x21004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x6d004,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ce1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cpuss_ahb_clk = {
+ .halt_reg = 0x24000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x6d004,
+ .enable_mask = BIT(21),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_ahb_clk",
+ .parent_names = (const char *[]){
+ "gcc_cpuss_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cpuss_gnoc_clk = {
+ .halt_reg = 0x24004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x24004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x6d004,
+ .enable_mask = BIT(22),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_gnoc_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cpuss_rbcpr_clk = {
+ .halt_reg = 0x24008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x24008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_rbcpr_clk",
+ .parent_names = (const char *[]){
+ "gcc_cpuss_rbcpr_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_eth_axi_clk = {
+ .halt_reg = 0x4701c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4701c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_eth_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_eth_ptp_clk = {
+ .halt_reg = 0x47018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x47018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_eth_ptp_clk",
+ .parent_names = (const char *[]){
+ "gcc_emac_ptp_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_eth_rgmii_clk = {
+ .halt_reg = 0x47010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x47010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_eth_rgmii_clk",
+ .parent_names = (const char *[]){
+ "gcc_emac_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_eth_slave_ahb_clk = {
+ .halt_reg = 0x47014,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x47014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x47014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_eth_slave_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x2b000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2b000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk",
+ .parent_names = (const char *[]){
+ "gcc_gp1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x2c000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2c000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk",
+ .parent_names = (const char *[]){
+ "gcc_gp2_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x2d000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2d000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk",
+ .parent_names = (const char *[]){
+ "gcc_gp3_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_cfg_ahb_clk = {
+ .halt_reg = 0x40000,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x40000,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x40000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_gate2 gcc_mss_gpll0_div_clk_src = {
+ .udelay = 500,
+ .clkr = {
+ .enable_reg = 0x6d004,
+ .enable_mask = BIT(17),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_gpll0_div_clk_src",
+ .ops = &clk_gate2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_snoc_axi_clk = {
+ .halt_reg = 0x40148,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x40148,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_snoc_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_aux_clk = {
+ .halt_reg = 0x37020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x6d00c,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_aux_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_cfg_ahb_clk = {
+ .halt_reg = 0x3701c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x3701c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x6d00c,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_mstr_axi_clk = {
+ .halt_reg = 0x37018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x6d00c,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_phy_refgen_clk = {
+ .halt_reg = 0x39028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x39028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_phy_refgen_clk",
+ .parent_names = (const char *[]){
+ "gcc_pcie_phy_refgen_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_pipe_clk = {
+ .halt_reg = 0x37028,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x6d00c,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_sleep_clk = {
+ .halt_reg = 0x37024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x6d00c,
+ .enable_mask = BIT(6),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_sleep_clk",
+ .parent_names = (const char *[]){
+ "gcc_pcie_aux_phy_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_slv_axi_clk = {
+ .halt_reg = 0x37014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x37014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x6d00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_slv_q2a_axi_clk = {
+ .halt_reg = 0x37010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x6d00c,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x1900c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1900c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk",
+ .parent_names = (const char *[]){
+ "gcc_pdm2_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x19004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x19004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x19004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_xo4_clk = {
+ .halt_reg = 0x19008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x19008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_xo4_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_prng_ahb_clk = {
+ .halt_reg = 0x1a004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x6d004,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_prng_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ahb_clk = {
+ .halt_reg = 0xf008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_apps_clk = {
+ .halt_reg = 0xf004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_apps_clk",
+ .parent_names = (const char *[]){
+ "gcc_sdcc1_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_spmi_fetcher_ahb_clk = {
+ .halt_reg = 0x3f008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3f008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_spmi_fetcher_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_spmi_fetcher_clk = {
+ .halt_reg = 0x3f004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3f004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_spmi_fetcher_clk",
+ .parent_names = (const char *[]){
+ "gcc_spmi_fetcher_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sys_noc_cpuss_ahb_clk = {
+ .halt_reg = 0x400c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x6d004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sys_noc_cpuss_ahb_clk",
+ .parent_names = (const char *[]){
+ "gcc_cpuss_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sys_noc_usb3_clk = {
+ .halt_reg = 0x4018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sys_noc_usb3_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb30_master_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_master_clk = {
+ .halt_reg = 0xb010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_master_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb30_master_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_mock_utmi_clk = {
+ .halt_reg = 0xb018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_mock_utmi_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb30_mock_utmi_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sleep_clk = {
+ .halt_reg = 0xb014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_phy_aux_clk = {
+ .halt_reg = 0xb050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_phy_aux_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb3_phy_aux_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_phy_pipe_clk = {
+ .halt_reg = 0xb054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_phy_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_phy_cfg_ahb2phy_clk = {
+ .halt_reg = 0xe004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0xe004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xe004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb_phy_cfg_ahb2phy_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap *gcc_sdxpoorwills_clocks[] = {
+ [GCC_BLSP1_AHB_CLK] = &gcc_blsp1_ahb_clk.clkr,
+ [GCC_BLSP1_QUP1_I2C_APPS_CLK] = &gcc_blsp1_qup1_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP1_I2C_APPS_CLK_SRC] =
+ &gcc_blsp1_qup1_i2c_apps_clk_src.clkr,
+ [GCC_BLSP1_QUP1_SPI_APPS_CLK] = &gcc_blsp1_qup1_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP1_SPI_APPS_CLK_SRC] =
+ &gcc_blsp1_qup1_spi_apps_clk_src.clkr,
+ [GCC_BLSP1_QUP2_I2C_APPS_CLK] = &gcc_blsp1_qup2_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_I2C_APPS_CLK_SRC] =
+ &gcc_blsp1_qup2_i2c_apps_clk_src.clkr,
+ [GCC_BLSP1_QUP2_SPI_APPS_CLK] = &gcc_blsp1_qup2_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_SPI_APPS_CLK_SRC] =
+ &gcc_blsp1_qup2_spi_apps_clk_src.clkr,
+ [GCC_BLSP1_QUP3_I2C_APPS_CLK] = &gcc_blsp1_qup3_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP3_I2C_APPS_CLK_SRC] =
+ &gcc_blsp1_qup3_i2c_apps_clk_src.clkr,
+ [GCC_BLSP1_QUP3_SPI_APPS_CLK] = &gcc_blsp1_qup3_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP3_SPI_APPS_CLK_SRC] =
+ &gcc_blsp1_qup3_spi_apps_clk_src.clkr,
+ [GCC_BLSP1_QUP4_I2C_APPS_CLK] = &gcc_blsp1_qup4_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP4_I2C_APPS_CLK_SRC] =
+ &gcc_blsp1_qup4_i2c_apps_clk_src.clkr,
+ [GCC_BLSP1_QUP4_SPI_APPS_CLK] = &gcc_blsp1_qup4_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP4_SPI_APPS_CLK_SRC] =
+ &gcc_blsp1_qup4_spi_apps_clk_src.clkr,
+ [GCC_BLSP1_SLEEP_CLK] = &gcc_blsp1_sleep_clk.clkr,
+ [GCC_BLSP1_UART1_APPS_CLK] = &gcc_blsp1_uart1_apps_clk.clkr,
+ [GCC_BLSP1_UART1_APPS_CLK_SRC] = &gcc_blsp1_uart1_apps_clk_src.clkr,
+ [GCC_BLSP1_UART2_APPS_CLK] = &gcc_blsp1_uart2_apps_clk.clkr,
+ [GCC_BLSP1_UART2_APPS_CLK_SRC] = &gcc_blsp1_uart2_apps_clk_src.clkr,
+ [GCC_BLSP1_UART3_APPS_CLK] = &gcc_blsp1_uart3_apps_clk.clkr,
+ [GCC_BLSP1_UART3_APPS_CLK_SRC] = &gcc_blsp1_uart3_apps_clk_src.clkr,
+ [GCC_BLSP1_UART4_APPS_CLK] = &gcc_blsp1_uart4_apps_clk.clkr,
+ [GCC_BLSP1_UART4_APPS_CLK_SRC] = &gcc_blsp1_uart4_apps_clk_src.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CE1_AHB_CLK] = &gcc_ce1_ahb_clk.clkr,
+ [GCC_CE1_AXI_CLK] = &gcc_ce1_axi_clk.clkr,
+ [GCC_CE1_CLK] = &gcc_ce1_clk.clkr,
+ [GCC_CPUSS_AHB_CLK] = &gcc_cpuss_ahb_clk.clkr,
+ [GCC_CPUSS_AHB_CLK_SRC] = &gcc_cpuss_ahb_clk_src.clkr,
+ [GCC_CPUSS_GNOC_CLK] = &gcc_cpuss_gnoc_clk.clkr,
+ [GCC_CPUSS_RBCPR_CLK] = &gcc_cpuss_rbcpr_clk.clkr,
+ [GCC_CPUSS_RBCPR_CLK_SRC] = &gcc_cpuss_rbcpr_clk_src.clkr,
+ [GCC_EMAC_CLK_SRC] = &gcc_emac_clk_src.clkr,
+ [GCC_EMAC_PTP_CLK_SRC] = &gcc_emac_ptp_clk_src.clkr,
+ [GCC_ETH_AXI_CLK] = &gcc_eth_axi_clk.clkr,
+ [GCC_ETH_PTP_CLK] = &gcc_eth_ptp_clk.clkr,
+ [GCC_ETH_RGMII_CLK] = &gcc_eth_rgmii_clk.clkr,
+ [GCC_ETH_SLAVE_AHB_CLK] = &gcc_eth_slave_ahb_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr,
+ [GCC_MSS_CFG_AHB_CLK] = &gcc_mss_cfg_ahb_clk.clkr,
+ [GCC_MSS_GPLL0_DIV_CLK_SRC] = &gcc_mss_gpll0_div_clk_src.clkr,
+ [GCC_MSS_SNOC_AXI_CLK] = &gcc_mss_snoc_axi_clk.clkr,
+ [GCC_PCIE_AUX_CLK] = &gcc_pcie_aux_clk.clkr,
+ [GCC_PCIE_AUX_PHY_CLK_SRC] = &gcc_pcie_aux_phy_clk_src.clkr,
+ [GCC_PCIE_CFG_AHB_CLK] = &gcc_pcie_cfg_ahb_clk.clkr,
+ [GCC_PCIE_MSTR_AXI_CLK] = &gcc_pcie_mstr_axi_clk.clkr,
+ [GCC_PCIE_PHY_REFGEN_CLK] = &gcc_pcie_phy_refgen_clk.clkr,
+ [GCC_PCIE_PHY_REFGEN_CLK_SRC] = &gcc_pcie_phy_refgen_clk_src.clkr,
+ [GCC_PCIE_PIPE_CLK] = &gcc_pcie_pipe_clk.clkr,
+ [GCC_PCIE_SLEEP_CLK] = &gcc_pcie_sleep_clk.clkr,
+ [GCC_PCIE_SLV_AXI_CLK] = &gcc_pcie_slv_axi_clk.clkr,
+ [GCC_PCIE_SLV_Q2A_AXI_CLK] = &gcc_pcie_slv_q2a_axi_clk.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr,
+ [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
+ [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
+ [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+ [GCC_SDCC1_APPS_CLK_SRC] = &gcc_sdcc1_apps_clk_src.clkr,
+ [GCC_SPMI_FETCHER_AHB_CLK] = &gcc_spmi_fetcher_ahb_clk.clkr,
+ [GCC_SPMI_FETCHER_CLK] = &gcc_spmi_fetcher_clk.clkr,
+ [GCC_SPMI_FETCHER_CLK_SRC] = &gcc_spmi_fetcher_clk_src.clkr,
+ [GCC_SYS_NOC_CPUSS_AHB_CLK] = &gcc_sys_noc_cpuss_ahb_clk.clkr,
+ [GCC_SYS_NOC_USB3_CLK] = &gcc_sys_noc_usb3_clk.clkr,
+ [GCC_USB30_MASTER_CLK] = &gcc_usb30_master_clk.clkr,
+ [GCC_USB30_MASTER_CLK_SRC] = &gcc_usb30_master_clk_src.clkr,
+ [GCC_USB30_MOCK_UTMI_CLK] = &gcc_usb30_mock_utmi_clk.clkr,
+ [GCC_USB30_MOCK_UTMI_CLK_SRC] = &gcc_usb30_mock_utmi_clk_src.clkr,
+ [GCC_USB30_SLEEP_CLK] = &gcc_usb30_sleep_clk.clkr,
+ [GCC_USB3_PHY_AUX_CLK] = &gcc_usb3_phy_aux_clk.clkr,
+ [GCC_USB3_PHY_AUX_CLK_SRC] = &gcc_usb3_phy_aux_clk_src.clkr,
+ [GCC_USB3_PHY_PIPE_CLK] = &gcc_usb3_phy_pipe_clk.clkr,
+ [GCC_USB_PHY_CFG_AHB2PHY_CLK] = &gcc_usb_phy_cfg_ahb2phy_clk.clkr,
+ [GPLL0] = &gpll0.clkr,
+ [GPLL0_OUT_EVEN] = &gpll0_out_even.clkr,
+ [GPLL4] = &gpll4.clkr,
+ [GPLL4_OUT_EVEN] = &gpll4_out_even.clkr,
+};
+
+static const struct qcom_reset_map gcc_sdxpoorwills_resets[] = {
+ [GCC_BLSP1_QUP1_BCR] = { 0x11000 },
+ [GCC_BLSP1_QUP2_BCR] = { 0x13000 },
+ [GCC_BLSP1_QUP3_BCR] = { 0x15000 },
+ [GCC_BLSP1_QUP4_BCR] = { 0x17000 },
+ [GCC_BLSP1_UART2_BCR] = { 0x14000 },
+ [GCC_BLSP1_UART3_BCR] = { 0x16000 },
+ [GCC_BLSP1_UART4_BCR] = { 0x18000 },
+ [GCC_CE1_BCR] = { 0x21000 },
+ [GCC_EMAC_BCR] = { 0x47000 },
+ [GCC_PCIE_BCR] = { 0x37000 },
+ [GCC_PCIE_PHY_BCR] = { 0x39000 },
+ [GCC_PDM_BCR] = { 0x19000 },
+ [GCC_PRNG_BCR] = { 0x1a000 },
+ [GCC_SDCC1_BCR] = { 0xf000 },
+ [GCC_SPMI_FETCHER_BCR] = { 0x3f000 },
+ [GCC_USB30_BCR] = { 0xb000 },
+ [GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0xe000 },
+};
+
+
+static const struct regmap_config gcc_sdxpoorwills_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x9b040,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_sdxpoorwills_desc = {
+ .config = &gcc_sdxpoorwills_regmap_config,
+ .clks = gcc_sdxpoorwills_clocks,
+ .num_clks = ARRAY_SIZE(gcc_sdxpoorwills_clocks),
+ .resets = gcc_sdxpoorwills_resets,
+ .num_resets = ARRAY_SIZE(gcc_sdxpoorwills_resets),
+};
+
+static const struct of_device_id gcc_sdxpoorwills_match_table[] = {
+ { .compatible = "qcom,gcc-sdxpoorwills" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_sdxpoorwills_match_table);
+
+static int gcc_sdxpoorwills_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, &gcc_sdxpoorwills_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ vdd_cx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_cx");
+ if (IS_ERR(vdd_cx.regulator[0])) {
+ if (!(PTR_ERR(vdd_cx.regulator[0]) == -EPROBE_DEFER))
+ dev_err(&pdev->dev,
+ "Unable to get vdd_cx regulator\n");
+ return PTR_ERR(vdd_cx.regulator[0]);
+ }
+
+ ret = qcom_cc_really_probe(pdev, &gcc_sdxpoorwills_desc, regmap);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register GCC clocks\n");
+ return ret;
+ }
+
+ dev_info(&pdev->dev, "Registered GCC clocks\n");
+
+ return ret;
+}
+
+static struct platform_driver gcc_sdxpoorwills_driver = {
+ .probe = gcc_sdxpoorwills_probe,
+ .driver = {
+ .name = "gcc-sdxpoorwills",
+ .of_match_table = gcc_sdxpoorwills_match_table,
+ },
+};
+
+static int __init gcc_sdxpoorwills_init(void)
+{
+ return platform_driver_register(&gcc_sdxpoorwills_driver);
+}
+core_initcall(gcc_sdxpoorwills_init);
+
+static void __exit gcc_sdxpoorwills_exit(void)
+{
+ platform_driver_unregister(&gcc_sdxpoorwills_driver);
+}
+module_exit(gcc_sdxpoorwills_exit);
+
+MODULE_DESCRIPTION("QTI GCC SDXPOORWILLS Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:gcc-sdxpoorwills");
diff --git a/drivers/clk/samsung/clk-exynos5433.c b/drivers/clk/samsung/clk-exynos5433.c
index ea16086..2fe0573 100644
--- a/drivers/clk/samsung/clk-exynos5433.c
+++ b/drivers/clk/samsung/clk-exynos5433.c
@@ -2559,8 +2559,10 @@
FRATE(0, "phyclk_mipidphy1_bitclkdiv8_phy", NULL, 0, 188000000),
FRATE(0, "phyclk_mipidphy1_rxclkesc0_phy", NULL, 0, 100000000),
/* PHY clocks from MIPI_DPHY0 */
- FRATE(0, "phyclk_mipidphy0_bitclkdiv8_phy", NULL, 0, 188000000),
- FRATE(0, "phyclk_mipidphy0_rxclkesc0_phy", NULL, 0, 100000000),
+ FRATE(CLK_PHYCLK_MIPIDPHY0_BITCLKDIV8_PHY, "phyclk_mipidphy0_bitclkdiv8_phy",
+ NULL, 0, 188000000),
+ FRATE(CLK_PHYCLK_MIPIDPHY0_RXCLKESC0_PHY, "phyclk_mipidphy0_rxclkesc0_phy",
+ NULL, 0, 100000000),
/* PHY clocks from HDMI_PHY */
FRATE(CLK_PHYCLK_HDMIPHY_TMDS_CLKO_PHY, "phyclk_hdmiphy_tmds_clko_phy",
NULL, 0, 300000000),
diff --git a/drivers/clk/sunxi-ng/ccu_common.c b/drivers/clk/sunxi-ng/ccu_common.c
index 51d4bac..01d0594 100644
--- a/drivers/clk/sunxi-ng/ccu_common.c
+++ b/drivers/clk/sunxi-ng/ccu_common.c
@@ -70,6 +70,11 @@
goto err_clk_unreg;
reset = kzalloc(sizeof(*reset), GFP_KERNEL);
+ if (!reset) {
+ ret = -ENOMEM;
+ goto err_alloc_reset;
+ }
+
reset->rcdev.of_node = node;
reset->rcdev.ops = &ccu_reset_ops;
reset->rcdev.owner = THIS_MODULE;
@@ -85,6 +90,16 @@
return 0;
err_of_clk_unreg:
+ kfree(reset);
+err_alloc_reset:
+ of_clk_del_provider(node);
err_clk_unreg:
+ while (--i >= 0) {
+ struct clk_hw *hw = desc->hw_clks->hws[i];
+
+ if (!hw)
+ continue;
+ clk_hw_unregister(hw);
+ }
return ret;
}
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 062d297..e8c7af52 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1245,8 +1245,6 @@
if (new_policy) {
/* related_cpus should at least include policy->cpus. */
cpumask_copy(policy->related_cpus, policy->cpus);
- /* Clear mask of registered CPUs */
- cpumask_clear(policy->real_cpus);
}
/*
diff --git a/drivers/cpuidle/lpm-levels.c b/drivers/cpuidle/lpm-levels.c
index fc1b4e4..03fce64 100644
--- a/drivers/cpuidle/lpm-levels.c
+++ b/drivers/cpuidle/lpm-levels.c
@@ -39,6 +39,7 @@
#include <soc/qcom/event_timer.h>
#include <soc/qcom/lpm-stats.h>
#include <soc/qcom/system_pm.h>
+#include <soc/qcom/minidump.h>
#include <asm/arch_timer.h>
#include <asm/suspend.h>
#include <asm/cpuidle.h>
@@ -121,9 +122,6 @@
const struct cpumask *cpu, int child_idx, bool from_idle,
int64_t time);
-static bool menu_select;
-module_param_named(menu_select, menu_select, bool, 0664);
-
static int msm_pm_sleep_time_override;
module_param_named(sleep_time_override,
msm_pm_sleep_time_override, int, 0664);
@@ -981,7 +979,7 @@
if (suspend_in_progress && from_idle && level->notify_rpm)
continue;
- if (level->is_reset && !system_sleep_allowed())
+ if (level->notify_rpm && !system_sleep_allowed())
continue;
best_level = i;
@@ -1641,6 +1639,7 @@
int ret;
int size;
struct kobject *module_kobj = NULL;
+ struct md_region md_entry;
get_online_cpus();
lpm_root_node = lpm_of_parse_cluster(pdev);
@@ -1697,6 +1696,14 @@
goto failed;
}
+ /* Add lpm_debug to Minidump*/
+ strlcpy(md_entry.name, "KLPMDEBUG", sizeof(md_entry.name));
+ md_entry.virt_addr = (uintptr_t)lpm_debug;
+ md_entry.phys_addr = lpm_debug_phys;
+ md_entry.size = size;
+ if (msm_minidump_add_region(&md_entry))
+ pr_info("Failed to add lpm_debug in Minidump\n");
+
return 0;
failed:
free_cluster_node(lpm_root_node);
diff --git a/drivers/cpuidle/lpm-levels.h b/drivers/cpuidle/lpm-levels.h
index 71416f7..2f7a55d 100644
--- a/drivers/cpuidle/lpm-levels.h
+++ b/drivers/cpuidle/lpm-levels.h
@@ -33,7 +33,6 @@
struct power_params pwr;
unsigned int psci_id;
bool is_reset;
- bool hyp_psci;
int reset_level;
};
diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c
index 17b19a6..71980c4 100644
--- a/drivers/crypto/ccp/ccp-dev-v5.c
+++ b/drivers/crypto/ccp/ccp-dev-v5.c
@@ -278,8 +278,7 @@
CCP_AES_ENCRYPT(&function) = op->u.aes.action;
CCP_AES_MODE(&function) = op->u.aes.mode;
CCP_AES_TYPE(&function) = op->u.aes.type;
- if (op->u.aes.mode == CCP_AES_MODE_CFB)
- CCP_AES_SIZE(&function) = 0x7f;
+ CCP_AES_SIZE(&function) = op->u.aes.size;
CCP5_CMD_FUNCTION(&desc) = function.raw;
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
index e23c36c..347b771 100644
--- a/drivers/crypto/ccp/ccp-dev.h
+++ b/drivers/crypto/ccp/ccp-dev.h
@@ -470,6 +470,7 @@
enum ccp_aes_type type;
enum ccp_aes_mode mode;
enum ccp_aes_action action;
+ unsigned int size;
};
struct ccp_xts_aes_op {
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index 64deb00..7d4cd51 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -692,6 +692,14 @@
goto e_ctx;
}
}
+ switch (aes->mode) {
+ case CCP_AES_MODE_CFB: /* CFB128 only */
+ case CCP_AES_MODE_CTR:
+ op.u.aes.size = AES_BLOCK_SIZE * BITS_PER_BYTE - 1;
+ break;
+ default:
+ op.u.aes.size = 0;
+ }
/* Prepare the input and output data workareas. For in-place
* operations we need to set the dma direction to BIDIRECTIONAL
diff --git a/drivers/crypto/msm/ice.c b/drivers/crypto/msm/ice.c
index 6fa91ae..182097c 100644
--- a/drivers/crypto/msm/ice.c
+++ b/drivers/crypto/msm/ice.c
@@ -25,26 +25,8 @@
#include <soc/qcom/scm.h>
#include <soc/qcom/qseecomi.h>
#include "iceregs.h"
-
-#ifdef CONFIG_PFK
#include <linux/pfk.h>
-#else
-#include <linux/bio.h>
-static inline int pfk_load_key_start(const struct bio *bio,
- struct ice_crypto_setting *ice_setting, bool *is_pfe, bool async)
-{
- return 0;
-}
-static inline int pfk_load_key_end(const struct bio *bio, bool *is_pfe)
-{
- return 0;
-}
-
-static inline void pfk_clear_on_reset(void)
-{
-}
-#endif
#define TZ_SYSCALL_CREATE_SMC_ID(o, s, f) \
((uint32_t)((((o & 0x3f) << 24) | (s & 0xff) << 8) | (f & 0xff)))
@@ -144,6 +126,9 @@
return -EPERM;
}
+ if (!setting)
+ return -EINVAL;
+
if ((short)(crypto_data->key_index) >= 0) {
memcpy(&setting->crypto_data, crypto_data,
@@ -1451,7 +1436,7 @@
int ret = 0;
bool is_pfe = false;
- if (!pdev || !req || !setting) {
+ if (!pdev || !req) {
pr_err("%s: Invalid params passed\n", __func__);
return -EINVAL;
}
@@ -1470,6 +1455,7 @@
/* It is not an error to have a request with no bio */
return 0;
}
+ //pr_err("%s bio is %pK\n", __func__, req->bio);
ret = pfk_load_key_start(req->bio, &pfk_crypto_data, &is_pfe, async);
if (is_pfe) {
@@ -1633,7 +1619,7 @@
list_for_each_entry(ice_dev, &ice_devices, list) {
if (!strcmp(ice_dev->ice_instance_type, storage_type)) {
- pr_info("%s: found ice device %p\n", __func__, ice_dev);
+ pr_debug("%s: ice device %pK\n", __func__, ice_dev);
return ice_dev;
}
}
diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c
index 38ed10d..7cf6d31 100644
--- a/drivers/crypto/vmx/aes_ctr.c
+++ b/drivers/crypto/vmx/aes_ctr.c
@@ -80,11 +80,13 @@
int ret;
struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
+ preempt_disable();
pagefault_disable();
enable_kernel_vsx();
ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
disable_kernel_vsx();
pagefault_enable();
+ preempt_enable();
ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen);
return ret;
@@ -99,11 +101,13 @@
u8 *dst = walk->dst.virt.addr;
unsigned int nbytes = walk->nbytes;
+ preempt_disable();
pagefault_disable();
enable_kernel_vsx();
aes_p8_encrypt(ctrblk, keystream, &ctx->enc_key);
disable_kernel_vsx();
pagefault_enable();
+ preempt_enable();
crypto_xor(keystream, src, nbytes);
memcpy(dst, keystream, nbytes);
@@ -132,6 +136,7 @@
blkcipher_walk_init(&walk, dst, src, nbytes);
ret = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
+ preempt_disable();
pagefault_disable();
enable_kernel_vsx();
aes_p8_ctr32_encrypt_blocks(walk.src.virt.addr,
@@ -143,6 +148,7 @@
walk.iv);
disable_kernel_vsx();
pagefault_enable();
+ preempt_enable();
/* We need to update IV mostly for last bytes/round */
inc = (nbytes & AES_BLOCK_MASK) / AES_BLOCK_SIZE;
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index ee181c5..6e197c1 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -2984,8 +2984,11 @@
int err = -ENODEV;
int i;
+ if (!x86_match_cpu(amd64_cpuids))
+ return -ENODEV;
+
if (amd_cache_northbridges() < 0)
- goto err_ret;
+ return -ENODEV;
opstate_init();
@@ -2998,14 +3001,16 @@
if (!msrs)
goto err_free;
- for (i = 0; i < amd_nb_num(); i++)
- if (probe_one_instance(i)) {
+ for (i = 0; i < amd_nb_num(); i++) {
+ err = probe_one_instance(i);
+ if (err) {
/* unwind properly */
while (--i >= 0)
remove_one_instance(i);
goto err_pci;
}
+ }
setup_pci_device();
@@ -3025,7 +3030,6 @@
kfree(ecc_stngs);
ecc_stngs = NULL;
-err_ret:
return err;
}
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index c088704..dcb5f94 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -16,6 +16,7 @@
#include <linux/slab.h>
#include <linux/mmzone.h>
#include <linux/edac.h>
+#include <asm/cpu_device_id.h>
#include <asm/msr.h>
#include "edac_core.h"
#include "mce_amd.h"
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 9338ff7..642fa03 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -1206,6 +1206,8 @@
tristate "Microchip MCP23xxx I/O expander"
depends on OF_GPIO
select GPIOLIB_IRQCHIP
+ select REGMAP_I2C if I2C
+ select REGMAP if SPI_MASTER
help
SPI/I2C driver for Microchip MCP23S08/MCP23S17/MCP23008/MCP23017
I/O expanders.
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 7fe8fd8..743a12d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -315,6 +315,10 @@
amdgpu_dpm_enable_vce(adev, false);
} else {
amdgpu_asic_set_vce_clocks(adev, 0, 0);
+ amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_PG_STATE_GATE);
+ amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_CG_STATE_GATE);
}
} else {
schedule_delayed_work(&adev->vce.idle_work, VCE_IDLE_TIMEOUT);
@@ -340,6 +344,11 @@
amdgpu_dpm_enable_vce(adev, true);
} else {
amdgpu_asic_set_vce_clocks(adev, 53300, 40000);
+ amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_CG_STATE_UNGATE);
+ amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_PG_STATE_UNGATE);
+
}
}
mutex_unlock(&adev->vce.idle_mutex);
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index ab3df6d..3f445df91 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -89,6 +89,10 @@
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (!(adev->flags & AMD_IS_APU) &&
+ (RREG32_SMC(ixCC_HARVEST_FUSES) & CC_HARVEST_FUSES__UVD_DISABLE_MASK))
+ return -ENOENT;
+
uvd_v6_0_set_ring_funcs(adev);
uvd_v6_0_set_irq_funcs(adev);
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
index afe0480..8b009b5 100644
--- a/drivers/gpu/drm/arm/malidp_planes.c
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -182,7 +182,8 @@
/* setup the rotation and axis flip bits */
if (plane->state->rotation & DRM_ROTATE_MASK)
- val = ilog2(plane->state->rotation & DRM_ROTATE_MASK) << LAYER_ROT_OFFSET;
+ val |= ilog2(plane->state->rotation & DRM_ROTATE_MASK) <<
+ LAYER_ROT_OFFSET;
if (plane->state->rotation & DRM_REFLECT_X)
val |= LAYER_H_FLIP;
if (plane->state->rotation & DRM_REFLECT_Y)
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index 213d892..a68f94d 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -325,7 +325,7 @@
adv7511->rgb = config->input_colorspace == HDMI_COLORSPACE_RGB;
}
-static void adv7511_power_on(struct adv7511 *adv7511)
+static void __adv7511_power_on(struct adv7511 *adv7511)
{
adv7511->current_edid_segment = -1;
@@ -354,6 +354,11 @@
regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
ADV7511_REG_POWER2_HPD_SRC_MASK,
ADV7511_REG_POWER2_HPD_SRC_NONE);
+}
+
+static void adv7511_power_on(struct adv7511 *adv7511)
+{
+ __adv7511_power_on(adv7511);
/*
* Most of the registers are reset during power down or when HPD is low.
@@ -362,21 +367,23 @@
if (adv7511->type == ADV7533)
adv7533_dsi_power_on(adv7511);
-
adv7511->powered = true;
}
-static void adv7511_power_off(struct adv7511 *adv7511)
+static void __adv7511_power_off(struct adv7511 *adv7511)
{
/* TODO: setup additional power down modes */
regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
ADV7511_POWER_POWER_DOWN,
ADV7511_POWER_POWER_DOWN);
regcache_mark_dirty(adv7511->regmap);
+}
+static void adv7511_power_off(struct adv7511 *adv7511)
+{
+ __adv7511_power_off(adv7511);
if (adv7511->type == ADV7533)
adv7533_dsi_power_off(adv7511);
-
adv7511->powered = false;
}
@@ -567,23 +574,20 @@
/* Reading the EDID only works if the device is powered */
if (!adv7511->powered) {
- regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
- ADV7511_POWER_POWER_DOWN, 0);
- if (adv7511->i2c_main->irq) {
- regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(0),
- ADV7511_INT0_EDID_READY);
- regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(1),
- ADV7511_INT1_DDC_ERROR);
- }
- adv7511->current_edid_segment = -1;
+ unsigned int edid_i2c_addr =
+ (adv7511->i2c_main->addr << 1) + 4;
+
+ __adv7511_power_on(adv7511);
+
+ /* Reset the EDID_I2C_ADDR register as it might be cleared */
+ regmap_write(adv7511->regmap, ADV7511_REG_EDID_I2C_ADDR,
+ edid_i2c_addr);
}
edid = drm_do_get_edid(connector, adv7511_get_edid_block, adv7511);
if (!adv7511->powered)
- regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
- ADV7511_POWER_POWER_DOWN,
- ADV7511_POWER_POWER_DOWN);
+ __adv7511_power_off(adv7511);
kfree(adv7511->edid);
adv7511->edid = edid;
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 362b8cd..80a903b 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -218,7 +218,7 @@
ret = drm_debugfs_init(minor, minor->index, drm_debugfs_root);
if (ret) {
DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
- return ret;
+ goto err_debugfs;
}
ret = device_add(minor->kdev);
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index 76a1e43..d9a5762 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -360,6 +360,7 @@
if (dsi->mode_flags & MIPI_DSI_MODE_LPM)
msg->flags |= MIPI_DSI_MSG_USE_LPM;
+ msg->flags |= MIPI_DSI_MSG_LASTCOMMAND;
return ops->transfer(dsi->host, msg);
}
diff --git a/drivers/gpu/drm/drm_property.c b/drivers/gpu/drm/drm_property.c
index ef80ec6..9b79a5b 100644
--- a/drivers/gpu/drm/drm_property.c
+++ b/drivers/gpu/drm/drm_property.c
@@ -557,7 +557,7 @@
if (!length || length > ULONG_MAX - sizeof(struct drm_property_blob))
return ERR_PTR(-EINVAL);
- blob = vmalloc(sizeof(struct drm_property_blob)+length);
+ blob = vzalloc(sizeof(struct drm_property_blob)+length);
if (!blob)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index fbd13fa..603d842 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -1193,6 +1193,17 @@
if (!node)
return -ENOMEM;
+ /*
+ * To avoid an integer overflow for the later size computations, we
+ * enforce a maximum number of submitted commands here. This limit is
+ * sufficient for all conceivable usage cases of the G2D.
+ */
+ if (req->cmd_nr > G2D_CMDLIST_DATA_NUM ||
+ req->cmd_buf_nr > G2D_CMDLIST_DATA_NUM) {
+ dev_err(dev, "number of submitted G2D commands exceeds limit\n");
+ return -EINVAL;
+ }
+
node->event = NULL;
if (req->event_type != G2D_EVENT_NOT) {
@@ -1250,7 +1261,11 @@
cmdlist->data[cmdlist->last++] = G2D_INTEN_ACF;
}
- /* Check size of cmdlist: last 2 is about G2D_BITBLT_START */
+ /*
+ * Check the size of cmdlist. The 2 that is added last comes from
+ * the implicit G2D_BITBLT_START that is appended once we have
+ * checked all the submitted commands.
+ */
size = cmdlist->last + req->cmd_nr * 2 + req->cmd_buf_nr * 2 + 2;
if (size > G2D_CMDLIST_DATA_NUM) {
dev_err(dev, "cmdlist size is too big\n");
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_tcon.c b/drivers/gpu/drm/fsl-dcu/fsl_tcon.c
index 3194e54..faacc81 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_tcon.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_tcon.c
@@ -89,9 +89,13 @@
goto err_node_put;
}
- of_node_put(np);
- clk_prepare_enable(tcon->ipg_clk);
+ ret = clk_prepare_enable(tcon->ipg_clk);
+ if (ret) {
+ dev_err(dev, "Couldn't enable the TCON clock\n");
+ goto err_node_put;
+ }
+ of_node_put(np);
dev_info(dev, "Using TCON in bypass mode\n");
return tcon;
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index afa3d01..7fdc42e 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -3558,9 +3558,16 @@
dev_priv->psr.psr2_support ? "supported" : "not supported");
}
- /* Read the eDP Display control capabilities registers */
- if ((intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
- drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
+ /*
+ * Read the eDP display control registers.
+ *
+ * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in
+ * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it
+ * set, but require eDP 1.4+ detection (e.g. for supported link rates
+ * method). The display control registers should read zero if they're
+ * not supported anyway.
+ */
+ if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
sizeof(intel_dp->edp_dpcd))
DRM_DEBUG_KMS("EDP DPCD : %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index a19ec06..3ce9ba3 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -457,7 +457,6 @@
struct intel_pipe_wm {
struct intel_wm_level wm[5];
- struct intel_wm_level raw_wm[5];
uint32_t linetime;
bool fbc_wm_enabled;
bool pipe_enabled;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 49de476..277a802 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -27,6 +27,7 @@
#include <linux/cpufreq.h>
#include <drm/drm_plane_helper.h>
+#include <drm/drm_atomic_helper.h>
#include "i915_drv.h"
#include "intel_drv.h"
#include "../../../platform/x86/intel_ips.h"
@@ -2017,9 +2018,9 @@
const struct intel_crtc *intel_crtc,
int level,
struct intel_crtc_state *cstate,
- struct intel_plane_state *pristate,
- struct intel_plane_state *sprstate,
- struct intel_plane_state *curstate,
+ const struct intel_plane_state *pristate,
+ const struct intel_plane_state *sprstate,
+ const struct intel_plane_state *curstate,
struct intel_wm_level *result)
{
uint16_t pri_latency = dev_priv->wm.pri_latency[level];
@@ -2341,28 +2342,24 @@
struct intel_pipe_wm *pipe_wm;
struct drm_device *dev = state->dev;
const struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_plane *intel_plane;
- struct intel_plane_state *pristate = NULL;
- struct intel_plane_state *sprstate = NULL;
- struct intel_plane_state *curstate = NULL;
+ struct drm_plane *plane;
+ const struct drm_plane_state *plane_state;
+ const struct intel_plane_state *pristate = NULL;
+ const struct intel_plane_state *sprstate = NULL;
+ const struct intel_plane_state *curstate = NULL;
int level, max_level = ilk_wm_max_level(dev), usable_level;
struct ilk_wm_maximums max;
pipe_wm = &cstate->wm.ilk.optimal;
- for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
- struct intel_plane_state *ps;
+ drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, &cstate->base) {
+ const struct intel_plane_state *ps = to_intel_plane_state(plane_state);
- ps = intel_atomic_get_existing_plane_state(state,
- intel_plane);
- if (!ps)
- continue;
-
- if (intel_plane->base.type == DRM_PLANE_TYPE_PRIMARY)
+ if (plane->type == DRM_PLANE_TYPE_PRIMARY)
pristate = ps;
- else if (intel_plane->base.type == DRM_PLANE_TYPE_OVERLAY)
+ else if (plane->type == DRM_PLANE_TYPE_OVERLAY)
sprstate = ps;
- else if (intel_plane->base.type == DRM_PLANE_TYPE_CURSOR)
+ else if (plane->type == DRM_PLANE_TYPE_CURSOR)
curstate = ps;
}
@@ -2384,11 +2381,9 @@
if (pipe_wm->sprites_scaled)
usable_level = 0;
- ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate,
- pristate, sprstate, curstate, &pipe_wm->raw_wm[0]);
-
memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
- pipe_wm->wm[0] = pipe_wm->raw_wm[0];
+ ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate,
+ pristate, sprstate, curstate, &pipe_wm->wm[0]);
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
pipe_wm->linetime = hsw_compute_linetime_wm(cstate);
@@ -2398,8 +2393,8 @@
ilk_compute_wm_reg_maximums(dev, 1, &max);
- for (level = 1; level <= max_level; level++) {
- struct intel_wm_level *wm = &pipe_wm->raw_wm[level];
+ for (level = 1; level <= usable_level; level++) {
+ struct intel_wm_level *wm = &pipe_wm->wm[level];
ilk_compute_wm_level(dev_priv, intel_crtc, level, cstate,
pristate, sprstate, curstate, wm);
@@ -2409,13 +2404,10 @@
* register maximums since such watermarks are
* always invalid.
*/
- if (level > usable_level)
- continue;
-
- if (ilk_validate_wm_level(level, &max, wm))
- pipe_wm->wm[level] = *wm;
- else
- usable_level = level;
+ if (!ilk_validate_wm_level(level, &max, wm)) {
+ memset(wm, 0, sizeof(*wm));
+ break;
+ }
}
return 0;
diff --git a/drivers/gpu/drm/msm/dp/dp_audio.c b/drivers/gpu/drm/msm/dp/dp_audio.c
index 0a03298..51cb6c5 100644
--- a/drivers/gpu/drm/msm/dp/dp_audio.c
+++ b/drivers/gpu/drm/msm/dp/dp_audio.c
@@ -23,13 +23,6 @@
#include "dp_audio.h"
#include "dp_panel.h"
-#define HEADER_BYTE_2_BIT 0
-#define PARITY_BYTE_2_BIT 8
-#define HEADER_BYTE_1_BIT 16
-#define PARITY_BYTE_1_BIT 24
-#define HEADER_BYTE_3_BIT 16
-#define PARITY_BYTE_3_BIT 24
-
struct dp_audio_private {
struct platform_device *ext_pdev;
struct platform_device *pdev;
@@ -50,71 +43,6 @@
struct dp_audio dp_audio;
};
-static u8 dp_audio_get_g0_value(u8 data)
-{
- u8 c[4];
- u8 g[4];
- u8 ret_data = 0;
- u8 i;
-
- for (i = 0; i < 4; i++)
- c[i] = (data >> i) & 0x01;
-
- g[0] = c[3];
- g[1] = c[0] ^ c[3];
- g[2] = c[1];
- g[3] = c[2];
-
- for (i = 0; i < 4; i++)
- ret_data = ((g[i] & 0x01) << i) | ret_data;
-
- return ret_data;
-}
-
-static u8 dp_audio_get_g1_value(u8 data)
-{
- u8 c[4];
- u8 g[4];
- u8 ret_data = 0;
- u8 i;
-
- for (i = 0; i < 4; i++)
- c[i] = (data >> i) & 0x01;
-
- g[0] = c[0] ^ c[3];
- g[1] = c[0] ^ c[1] ^ c[3];
- g[2] = c[1] ^ c[2];
- g[3] = c[2] ^ c[3];
-
- for (i = 0; i < 4; i++)
- ret_data = ((g[i] & 0x01) << i) | ret_data;
-
- return ret_data;
-}
-
-static u8 dp_audio_calculate_parity(u32 data)
-{
- u8 x0 = 0;
- u8 x1 = 0;
- u8 ci = 0;
- u8 iData = 0;
- u8 i = 0;
- u8 parity_byte;
- u8 num_byte = (data & 0xFF00) > 0 ? 8 : 2;
-
- for (i = 0; i < num_byte; i++) {
- iData = (data >> i*4) & 0xF;
-
- ci = iData ^ x1;
- x1 = x0 ^ dp_audio_get_g1_value(ci);
- x0 = dp_audio_get_g0_value(ci);
- }
-
- parity_byte = x1 | (x0 << 4);
-
- return parity_byte;
-}
-
static u32 dp_audio_get_header(struct dp_catalog_audio *catalog,
enum dp_catalog_audio_sdp_type sdp,
enum dp_catalog_audio_header_type header)
@@ -148,7 +76,7 @@
DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_1);
new_value = 0x02;
- parity_byte = dp_audio_calculate_parity(new_value);
+ parity_byte = dp_header_get_parity(new_value);
value |= ((new_value << HEADER_BYTE_1_BIT)
| (parity_byte << PARITY_BYTE_1_BIT));
pr_debug("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
@@ -160,7 +88,7 @@
value = dp_audio_get_header(catalog,
DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_2);
new_value = value;
- parity_byte = dp_audio_calculate_parity(new_value);
+ parity_byte = dp_header_get_parity(new_value);
value |= ((new_value << HEADER_BYTE_2_BIT)
| (parity_byte << PARITY_BYTE_2_BIT));
pr_debug("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
@@ -174,7 +102,7 @@
DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_3);
new_value = audio->channels - 1;
- parity_byte = dp_audio_calculate_parity(new_value);
+ parity_byte = dp_header_get_parity(new_value);
value |= ((new_value << HEADER_BYTE_3_BIT)
| (parity_byte << PARITY_BYTE_3_BIT));
pr_debug("Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
@@ -195,7 +123,7 @@
DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_1);
new_value = 0x1;
- parity_byte = dp_audio_calculate_parity(new_value);
+ parity_byte = dp_header_get_parity(new_value);
value |= ((new_value << HEADER_BYTE_1_BIT)
| (parity_byte << PARITY_BYTE_1_BIT));
pr_debug("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
@@ -208,7 +136,7 @@
DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_2);
new_value = 0x17;
- parity_byte = dp_audio_calculate_parity(new_value);
+ parity_byte = dp_header_get_parity(new_value);
value |= ((new_value << HEADER_BYTE_2_BIT)
| (parity_byte << PARITY_BYTE_2_BIT));
pr_debug("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
@@ -221,7 +149,7 @@
DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_3);
new_value = (0x0 | (0x11 << 2));
- parity_byte = dp_audio_calculate_parity(new_value);
+ parity_byte = dp_header_get_parity(new_value);
value |= ((new_value << HEADER_BYTE_3_BIT)
| (parity_byte << PARITY_BYTE_3_BIT));
pr_debug("Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
@@ -241,7 +169,7 @@
DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_1);
new_value = 0x84;
- parity_byte = dp_audio_calculate_parity(new_value);
+ parity_byte = dp_header_get_parity(new_value);
value |= ((new_value << HEADER_BYTE_1_BIT)
| (parity_byte << PARITY_BYTE_1_BIT));
pr_debug("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
@@ -254,7 +182,7 @@
DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_2);
new_value = 0x1b;
- parity_byte = dp_audio_calculate_parity(new_value);
+ parity_byte = dp_header_get_parity(new_value);
value |= ((new_value << HEADER_BYTE_2_BIT)
| (parity_byte << PARITY_BYTE_2_BIT));
pr_debug("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
@@ -267,7 +195,7 @@
DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_3);
new_value = (0x0 | (0x11 << 2));
- parity_byte = dp_audio_calculate_parity(new_value);
+ parity_byte = dp_header_get_parity(new_value);
value |= ((new_value << HEADER_BYTE_3_BIT)
| (parity_byte << PARITY_BYTE_3_BIT));
pr_debug("Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
@@ -287,7 +215,7 @@
DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_1);
new_value = 0x05;
- parity_byte = dp_audio_calculate_parity(new_value);
+ parity_byte = dp_header_get_parity(new_value);
value |= ((new_value << HEADER_BYTE_1_BIT)
| (parity_byte << PARITY_BYTE_1_BIT));
pr_debug("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
@@ -300,7 +228,7 @@
DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_2);
new_value = 0x0F;
- parity_byte = dp_audio_calculate_parity(new_value);
+ parity_byte = dp_header_get_parity(new_value);
value |= ((new_value << HEADER_BYTE_2_BIT)
| (parity_byte << PARITY_BYTE_2_BIT));
pr_debug("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
@@ -313,7 +241,7 @@
DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_3);
new_value = 0x0;
- parity_byte = dp_audio_calculate_parity(new_value);
+ parity_byte = dp_header_get_parity(new_value);
value |= ((new_value << HEADER_BYTE_3_BIT)
| (parity_byte << PARITY_BYTE_3_BIT));
pr_debug("Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
@@ -333,7 +261,7 @@
DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_1);
new_value = 0x06;
- parity_byte = dp_audio_calculate_parity(new_value);
+ parity_byte = dp_header_get_parity(new_value);
value |= ((new_value << HEADER_BYTE_1_BIT)
| (parity_byte << PARITY_BYTE_1_BIT));
pr_debug("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
@@ -346,7 +274,7 @@
DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_2);
new_value = 0x0F;
- parity_byte = dp_audio_calculate_parity(new_value);
+ parity_byte = dp_header_get_parity(new_value);
value |= ((new_value << HEADER_BYTE_2_BIT)
| (parity_byte << PARITY_BYTE_2_BIT));
pr_debug("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c
index acbaec4..2d76d13 100644
--- a/drivers/gpu/drm/msm/dp/dp_aux.c
+++ b/drivers/gpu/drm/msm/dp/dp_aux.c
@@ -42,6 +42,7 @@
bool no_send_stop;
u32 offset;
u32 segment;
+ atomic_t aborted;
struct drm_dp_aux drm_aux;
};
@@ -279,6 +280,20 @@
aux->catalog->reset(aux->catalog);
}
+static void dp_aux_abort_transaction(struct dp_aux *dp_aux)
+{
+ struct dp_aux_private *aux;
+
+ if (!dp_aux) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+
+ atomic_set(&aux->aborted, 1);
+}
+
static void dp_aux_update_offset_and_segment(struct dp_aux_private *aux,
struct drm_dp_aux_msg *input_msg)
{
@@ -330,17 +345,19 @@
aux->no_send_stop = true;
/*
- * Send the segment address for every i2c read in which the
- * middle-of-tranaction flag is set. This is required to support EDID
- * reads of more than 2 blocks as the segment address is reset to 0
+ * Send the segment address for i2c reads for segment > 0 and for which
+ * the middle-of-transaction flag is set. This is required to support
+ * EDID reads of more than 2 blocks as the segment address is reset to 0
* since we are overriding the middle-of-transaction flag for read
* transactions.
*/
- memset(&helper_msg, 0, sizeof(helper_msg));
- helper_msg.address = segment_address;
- helper_msg.buffer = &aux->segment;
- helper_msg.size = 1;
- dp_aux_cmd_fifo_tx(aux, &helper_msg);
+ if (aux->segment) {
+ memset(&helper_msg, 0, sizeof(helper_msg));
+ helper_msg.address = segment_address;
+ helper_msg.buffer = &aux->segment;
+ helper_msg.size = 1;
+ dp_aux_cmd_fifo_tx(aux, &helper_msg);
+ }
/*
* Send the offset address for every i2c read in which the
@@ -377,6 +394,11 @@
mutex_lock(&aux->mutex);
+ if (atomic_read(&aux->aborted)) {
+ ret = -ETIMEDOUT;
+ goto unlock_exit;
+ }
+
aux->native = msg->request & (DP_AUX_NATIVE_WRITE & DP_AUX_NATIVE_READ);
/* Ignore address only message */
@@ -411,7 +433,7 @@
}
ret = dp_aux_cmd_fifo_tx(aux, msg);
- if ((ret < 0) && aux->native) {
+ if ((ret < 0) && aux->native && !atomic_read(&aux->aborted)) {
aux->retry_cnt++;
if (!(aux->retry_cnt % retry_count))
aux->catalog->update_aux_cfg(aux->catalog,
@@ -467,6 +489,7 @@
aux->catalog->setup(aux->catalog, aux_cfg);
aux->catalog->reset(aux->catalog);
aux->catalog->enable(aux->catalog, true);
+ atomic_set(&aux->aborted, 0);
aux->retry_cnt = 0;
}
@@ -481,6 +504,7 @@
aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+ atomic_set(&aux->aborted, 1);
aux->catalog->enable(aux->catalog, false);
}
@@ -558,6 +582,7 @@
dp_aux->drm_aux_register = dp_aux_register;
dp_aux->drm_aux_deregister = dp_aux_deregister;
dp_aux->reconfig = dp_aux_reconfig;
+ dp_aux->abort = dp_aux_abort_transaction;
return dp_aux;
error:
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.h b/drivers/gpu/drm/msm/dp/dp_aux.h
index 85761ce..e8cb1cc 100644
--- a/drivers/gpu/drm/msm/dp/dp_aux.h
+++ b/drivers/gpu/drm/msm/dp/dp_aux.h
@@ -36,6 +36,7 @@
void (*init)(struct dp_aux *aux, struct dp_aux_cfg *aux_cfg);
void (*deinit)(struct dp_aux *aux);
void (*reconfig)(struct dp_aux *aux);
+ void (*abort)(struct dp_aux *aux);
};
struct dp_aux *dp_aux_get(struct device *dev, struct dp_catalog_aux *catalog,
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c
index fc7cb22..c237a23 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog.c
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.c
@@ -337,7 +337,7 @@
struct dp_catalog_private *catalog;
struct drm_msm_ext_hdr_metadata *hdr;
void __iomem *base;
- u32 header, data;
+ u32 header, parity, data;
if (!panel) {
pr_err("invalid input\n");
@@ -348,67 +348,90 @@
hdr = &panel->hdr_data.hdr_meta;
base = catalog->io->dp_link.base;
- header = dp_read(base + MMSS_DP_VSCEXT_0);
- header |= panel->hdr_data.vscext_header_byte1;
- dp_write(base + MMSS_DP_VSCEXT_0, header);
+ /* HEADER BYTE 1 */
+ header = panel->hdr_data.vscext_header_byte1;
+ parity = dp_header_get_parity(header);
+ data = ((header << HEADER_BYTE_1_BIT)
+ | (parity << PARITY_BYTE_1_BIT));
+ dp_write(base + MMSS_DP_VSCEXT_0, data);
+ pr_debug("Header#1: 0x%x, parity = 0x%x\n", header, parity);
+ pr_debug("DP_VSCEXT_0: 0x%x\n", data);
- header = dp_read(base + MMSS_DP_VSCEXT_1);
- header |= panel->hdr_data.vscext_header_byte2;
- dp_write(base + MMSS_DP_VSCEXT_1, header);
+ /* HEADER BYTE 2 */
+ header = panel->hdr_data.vscext_header_byte2;
+ parity = dp_header_get_parity(header);
+ data = ((header << HEADER_BYTE_2_BIT)
+ | (parity << PARITY_BYTE_2_BIT));
+ dp_write(base + MMSS_DP_VSCEXT_1, data);
+ pr_debug("Header#2: 0x%x, parity = 0x%x\n", header, parity);
+ pr_debug("DP_VSCEXT_1: 0x%x\n", data);
- header = dp_read(base + MMSS_DP_VSCEXT_1);
- header |= panel->hdr_data.vscext_header_byte3;
- dp_write(base + MMSS_DP_VSCEXT_1, header);
+ /* HEADER BYTE 3 */
+ header = panel->hdr_data.vscext_header_byte3;
+ parity = dp_header_get_parity(header);
+ data = ((header << HEADER_BYTE_3_BIT)
+ | (parity << PARITY_BYTE_3_BIT));
+ data |= dp_read(base + MMSS_DP_VSCEXT_1);
+ dp_write(base + MMSS_DP_VSCEXT_1, data);
+ pr_debug("Header#3: 0x%x, parity = 0x%x\n", header, parity);
+ pr_debug("DP_VSCEXT_1: 0x%x\n", data);
- header = panel->hdr_data.version;
- header |= panel->hdr_data.length << 8;
- header |= hdr->eotf << 16;
- dp_write(base + MMSS_DP_VSCEXT_2, header);
+ data = panel->hdr_data.version;
+ data |= panel->hdr_data.length << 8;
+ data |= hdr->eotf << 16;
+ pr_debug("DP_VSCEXT_2: 0x%x\n", data);
+ dp_write(base + MMSS_DP_VSCEXT_2, data);
data = (DP_GET_LSB(hdr->display_primaries_x[0]) |
(DP_GET_MSB(hdr->display_primaries_x[0]) << 8) |
(DP_GET_LSB(hdr->display_primaries_y[0]) << 16) |
(DP_GET_MSB(hdr->display_primaries_y[0]) << 24));
+ pr_debug("DP_VSCEXT_3: 0x%x\n", data);
dp_write(base + MMSS_DP_VSCEXT_3, data);
data = (DP_GET_LSB(hdr->display_primaries_x[1]) |
(DP_GET_MSB(hdr->display_primaries_x[1]) << 8) |
(DP_GET_LSB(hdr->display_primaries_y[1]) << 16) |
(DP_GET_MSB(hdr->display_primaries_y[1]) << 24));
+ pr_debug("DP_VSCEXT_4: 0x%x\n", data);
dp_write(base + MMSS_DP_VSCEXT_4, data);
data = (DP_GET_LSB(hdr->display_primaries_x[2]) |
(DP_GET_MSB(hdr->display_primaries_x[2]) << 8) |
(DP_GET_LSB(hdr->display_primaries_y[2]) << 16) |
(DP_GET_MSB(hdr->display_primaries_y[2]) << 24));
+ pr_debug("DP_VSCEXT_5: 0x%x\n", data);
dp_write(base + MMSS_DP_VSCEXT_5, data);
data = (DP_GET_LSB(hdr->white_point_x) |
(DP_GET_MSB(hdr->white_point_x) << 8) |
(DP_GET_LSB(hdr->white_point_y) << 16) |
(DP_GET_MSB(hdr->white_point_y) << 24));
+ pr_debug("DP_VSCEXT_6: 0x%x\n", data);
dp_write(base + MMSS_DP_VSCEXT_6, data);
data = (DP_GET_LSB(hdr->max_luminance) |
(DP_GET_MSB(hdr->max_luminance) << 8) |
(DP_GET_LSB(hdr->min_luminance) << 16) |
(DP_GET_MSB(hdr->min_luminance) << 24));
+ pr_debug("DP_VSCEXT_7: 0x%x\n", data);
dp_write(base + MMSS_DP_VSCEXT_7, data);
data = (DP_GET_LSB(hdr->max_content_light_level) |
(DP_GET_MSB(hdr->max_content_light_level) << 8) |
(DP_GET_LSB(hdr->max_average_light_level) << 16) |
(DP_GET_MSB(hdr->max_average_light_level) << 24));
+ pr_debug("DP_VSCEXT_8: 0x%x\n", data);
dp_write(base + MMSS_DP_VSCEXT_8, data);
dp_write(base + MMSS_DP_VSCEXT_9, 0x00);
}
-static void dp_catalog_panel_setup_vsc_sdp(struct dp_catalog_panel *panel)
+static void dp_catalog_panel_setup_ext_sdp(struct dp_catalog_panel *panel)
{
struct dp_catalog_private *catalog;
void __iomem *base;
- u32 value;
+ u32 header, parity, data;
if (!panel) {
pr_err("invalid input\n");
@@ -418,30 +441,105 @@
dp_catalog_get_priv(panel);
base = catalog->io->dp_link.base;
- value = dp_read(base + MMSS_DP_GENERIC0_0);
- value |= panel->hdr_data.vsc_header_byte1;
- dp_write(base + MMSS_DP_GENERIC0_0, value);
+ /* HEADER BYTE 1 */
+ header = panel->hdr_data.ext_header_byte1;
+ parity = dp_header_get_parity(header);
+ data = ((header << HEADER_BYTE_1_BIT)
+ | (parity << PARITY_BYTE_1_BIT));
+ dp_write(base + MMSS_DP_EXTENSION_0, data);
+ pr_debug("Header#1: 0x%x, parity = 0x%x\n", header, parity);
+ pr_debug("DP_EXTENSION_0: 0x%x\n", data);
- value = dp_read(base + MMSS_DP_GENERIC0_1);
- value |= panel->hdr_data.vsc_header_byte2;
- dp_write(base + MMSS_DP_GENERIC0_1, value);
+ /* HEADER BYTE 2 */
+ header = panel->hdr_data.ext_header_byte2;
+ parity = dp_header_get_parity(header);
+ data = ((header << HEADER_BYTE_2_BIT)
+ | (parity << PARITY_BYTE_2_BIT));
+ dp_write(base + MMSS_DP_EXTENSION_1, data);
+ pr_debug("Header#2: 0x%x, parity = 0x%x\n", header, parity);
+ pr_debug("DP_EXTENSION_1: 0x%x\n", data);
- value = dp_read(base + MMSS_DP_GENERIC0_1);
- value |= panel->hdr_data.vsc_header_byte3;
- dp_write(base + MMSS_DP_GENERIC0_1, value);
+ dp_write(base + MMSS_DP_EXTENSION_1, 0x5AA55AA5);
+ dp_write(base + MMSS_DP_EXTENSION_2, 0x5AA55AA5);
+ dp_write(base + MMSS_DP_EXTENSION_3, 0x5AA55AA5);
+ dp_write(base + MMSS_DP_EXTENSION_4, 0x5AA55AA5);
+ dp_write(base + MMSS_DP_EXTENSION_5, 0x5AA55AA5);
+ dp_write(base + MMSS_DP_EXTENSION_6, 0x5AA55AA5);
+ dp_write(base + MMSS_DP_EXTENSION_7, 0x5AA55AA5);
+ dp_write(base + MMSS_DP_EXTENSION_8, 0x5AA55AA5);
+ dp_write(base + MMSS_DP_EXTENSION_9, 0x5AA55AA5);
+}
+
+static void dp_catalog_panel_setup_vsc_sdp(struct dp_catalog_panel *panel)
+{
+ struct dp_catalog_private *catalog;
+ void __iomem *base;
+ u32 header, parity, data;
+ u8 bpc;
+
+ if (!panel) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ dp_catalog_get_priv(panel);
+ base = catalog->io->ctrl_io.base;
+
+ /* HEADER BYTE 1 */
+ header = panel->hdr_data.vsc_header_byte1;
+ parity = dp_header_get_parity(header);
+ data = ((header << HEADER_BYTE_1_BIT)
+ | (parity << PARITY_BYTE_1_BIT));
+ dp_write(base + MMSS_DP_GENERIC0_0, data);
+ pr_debug("Header#1: 0x%x, parity = 0x%x\n", header, parity);
+ pr_debug("DP_GENERIC0_0: 0x%x\n", data);
+
+ /* HEADER BYTE 2 */
+ header = panel->hdr_data.vsc_header_byte2;
+ parity = dp_header_get_parity(header);
+ data = ((header << HEADER_BYTE_2_BIT)
+ | (parity << PARITY_BYTE_2_BIT));
+ dp_write(base + MMSS_DP_GENERIC0_1, data);
+ pr_debug("Header#2: 0x%x, parity = 0x%x\n", header, parity);
+ pr_debug("DP_GENERIC0_1: 0x%x\n", data);
+
+ /* HEADER BYTE 3 */
+ header = panel->hdr_data.vsc_header_byte3;
+ parity = dp_header_get_parity(header);
+ data = ((header << HEADER_BYTE_3_BIT)
+ | (parity << PARITY_BYTE_3_BIT));
+ data |= dp_read(base + MMSS_DP_GENERIC0_1);
+ dp_write(base + MMSS_DP_GENERIC0_1, data);
+ pr_debug("Header#3: 0x%x, parity = 0x%x\n", header, parity);
+ pr_debug("DP_GENERIC0_1: 0x%x\n", data);
dp_write(base + MMSS_DP_GENERIC0_2, 0x00);
dp_write(base + MMSS_DP_GENERIC0_3, 0x00);
dp_write(base + MMSS_DP_GENERIC0_4, 0x00);
dp_write(base + MMSS_DP_GENERIC0_5, 0x00);
- value = (panel->hdr_data.colorimetry & 0xF) |
+
+ switch (panel->hdr_data.bpc) {
+ default:
+ case 10:
+ bpc = BIT(1);
+ break;
+ case 8:
+ bpc = BIT(0);
+ break;
+ case 6:
+ bpc = 0;
+ break;
+ }
+
+ data = (panel->hdr_data.colorimetry & 0xF) |
((panel->hdr_data.pixel_encoding & 0xF) << 4) |
- ((panel->hdr_data.bpc & 0x7) << 8) |
+ (bpc << 8) |
((panel->hdr_data.dynamic_range & 0x1) << 15) |
((panel->hdr_data.content_type & 0x7) << 16);
- dp_write(base + MMSS_DP_GENERIC0_6, value);
+ pr_debug("DP_GENERIC0_6: 0x%x\n", data);
+ dp_write(base + MMSS_DP_GENERIC0_6, data);
dp_write(base + MMSS_DP_GENERIC0_7, 0x00);
dp_write(base + MMSS_DP_GENERIC0_8, 0x00);
dp_write(base + MMSS_DP_GENERIC0_9, 0x00);
@@ -462,19 +560,31 @@
base = catalog->io->dp_link.base;
cfg = dp_read(base + MMSS_DP_SDP_CFG);
+ /* EXTENSION_SDP_EN */
+ cfg |= BIT(4);
+
/* VSCEXT_SDP_EN */
cfg |= BIT(16);
/* GEN0_SDP_EN */
cfg |= BIT(17);
+ /* GEN1_SDP_EN */
+ cfg |= BIT(18);
dp_write(base + MMSS_DP_SDP_CFG, cfg);
cfg2 = dp_read(base + MMSS_DP_SDP_CFG2);
- /* Generic0 SDP Payload is 19 bytes which is > 16, so Bit16 is 1 */
- cfg2 |= BIT(16);
+ /* EXTN_SDPSIZE */
+ cfg2 |= BIT(15);
+
+ /* GENERIC0_SDPSIZE */
+ cfg |= BIT(16);
+
+ /* GENERIC1_SDPSIZE */
+ cfg |= BIT(17);
dp_write(base + MMSS_DP_SDP_CFG2, cfg2);
+ dp_catalog_panel_setup_ext_sdp(panel);
dp_catalog_panel_setup_vsc_sdp(panel);
dp_catalog_panel_setup_infoframe_sdp(panel);
@@ -484,27 +594,8 @@
dp_write(base + DP_MISC1_MISC0, cfg);
- cfg = dp_read(base + DP_CONFIGURATION_CTRL);
- /* Send VSC */
- cfg |= BIT(7);
-
- switch (panel->hdr_data.bpc) {
- default:
- case 10:
- cfg |= BIT(9);
- break;
- case 8:
- cfg |= BIT(8);
- break;
- }
-
- dp_write(base + DP_CONFIGURATION_CTRL, cfg);
-
- cfg = dp_read(base + DP_COMPRESSION_MODE_CTRL);
-
- /* Trigger SDP values in registers */
- cfg |= BIT(8);
- dp_write(base + DP_COMPRESSION_MODE_CTRL, cfg);
+ dp_write(base + MMSS_DP_SDP_CFG3, 0x01);
+ dp_write(base + MMSS_DP_SDP_CFG3, 0x00);
}
static void dp_catalog_ctrl_update_transfer_unit(struct dp_catalog_ctrl *ctrl)
@@ -1179,6 +1270,8 @@
dp_catalog_get_priv(audio);
base = catalog->io->dp_link.base;
+ sdp_cfg = dp_read(base + MMSS_DP_SDP_CFG);
+
/* AUDIO_TIMESTAMP_SDP_EN */
sdp_cfg |= BIT(1);
/* AUDIO_STREAM_SDP_EN */
@@ -1312,6 +1405,131 @@
wmb();
}
+static void dp_catalog_config_spd_header(struct dp_catalog_panel *panel)
+{
+ struct dp_catalog_private *catalog;
+ void __iomem *base;
+ u32 value, new_value;
+ u8 parity_byte;
+
+ if (!panel)
+ return;
+
+ dp_catalog_get_priv(panel);
+ base = catalog->io->dp_link.base;
+
+ /* Config header and parity byte 1 */
+ value = dp_read(base + MMSS_DP_GENERIC1_0);
+
+ new_value = 0x83;
+ parity_byte = dp_header_get_parity(new_value);
+ value |= ((new_value << HEADER_BYTE_1_BIT)
+ | (parity_byte << PARITY_BYTE_1_BIT));
+ pr_debug("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
+ value, parity_byte);
+ dp_write(base + MMSS_DP_GENERIC1_0, value);
+
+ /* Config header and parity byte 2 */
+ value = dp_read(base + MMSS_DP_GENERIC1_1);
+
+ new_value = 0x1b;
+ parity_byte = dp_header_get_parity(new_value);
+ value |= ((new_value << HEADER_BYTE_2_BIT)
+ | (parity_byte << PARITY_BYTE_2_BIT));
+ pr_debug("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
+ value, parity_byte);
+ dp_write(base + MMSS_DP_GENERIC1_1, value);
+
+ /* Config header and parity byte 3 */
+ value = dp_read(base + MMSS_DP_GENERIC1_1);
+
+ new_value = (0x0 | (0x12 << 2));
+ parity_byte = dp_header_get_parity(new_value);
+ value |= ((new_value << HEADER_BYTE_3_BIT)
+ | (parity_byte << PARITY_BYTE_3_BIT));
+ pr_debug("Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
+ new_value, parity_byte);
+ dp_write(base + MMSS_DP_GENERIC1_1, value);
+}
+
+static void dp_catalog_panel_config_spd(struct dp_catalog_panel *panel)
+{
+ struct dp_catalog_private *catalog;
+ void __iomem *base;
+ u32 spd_cfg = 0, spd_cfg2 = 0;
+ u8 *vendor = NULL, *product = NULL;
+ /*
+ * Source Device Information
+ * 00h unknown
+ * 01h Digital STB
+ * 02h DVD
+ * 03h D-VHS
+ * 04h HDD Video
+ * 05h DVC
+ * 06h DSC
+ * 07h Video CD
+ * 08h Game
+ * 09h PC general
+ * 0ah Bluray-Disc
+ * 0bh Super Audio CD
+ * 0ch HD DVD
+ * 0dh PMP
+ * 0eh-ffh reserved
+ */
+ u32 device_type = 0;
+
+ if (!panel)
+ return;
+
+ dp_catalog_get_priv(panel);
+ base = catalog->io->dp_link.base;
+
+ dp_catalog_config_spd_header(panel);
+
+ vendor = panel->spd_vendor_name;
+ product = panel->spd_product_description;
+
+ dp_write(base + MMSS_DP_GENERIC1_2, ((vendor[0] & 0x7f) |
+ ((vendor[1] & 0x7f) << 8) |
+ ((vendor[2] & 0x7f) << 16) |
+ ((vendor[3] & 0x7f) << 24)));
+ dp_write(base + MMSS_DP_GENERIC1_3, ((vendor[4] & 0x7f) |
+ ((vendor[5] & 0x7f) << 8) |
+ ((vendor[6] & 0x7f) << 16) |
+ ((vendor[7] & 0x7f) << 24)));
+ dp_write(base + MMSS_DP_GENERIC1_4, ((product[0] & 0x7f) |
+ ((product[1] & 0x7f) << 8) |
+ ((product[2] & 0x7f) << 16) |
+ ((product[3] & 0x7f) << 24)));
+ dp_write(base + MMSS_DP_GENERIC1_5, ((product[4] & 0x7f) |
+ ((product[5] & 0x7f) << 8) |
+ ((product[6] & 0x7f) << 16) |
+ ((product[7] & 0x7f) << 24)));
+ dp_write(base + MMSS_DP_GENERIC1_6, ((product[8] & 0x7f) |
+ ((product[9] & 0x7f) << 8) |
+ ((product[10] & 0x7f) << 16) |
+ ((product[11] & 0x7f) << 24)));
+ dp_write(base + MMSS_DP_GENERIC1_7, ((product[12] & 0x7f) |
+ ((product[13] & 0x7f) << 8) |
+ ((product[14] & 0x7f) << 16) |
+ ((product[15] & 0x7f) << 24)));
+ dp_write(base + MMSS_DP_GENERIC1_8, device_type);
+ dp_write(base + MMSS_DP_GENERIC1_9, 0x00);
+
+ spd_cfg = dp_read(base + MMSS_DP_SDP_CFG);
+ /* GENERIC1_SDP for SPD Infoframe */
+ spd_cfg |= BIT(18);
+ dp_write(base + MMSS_DP_SDP_CFG, spd_cfg);
+
+ spd_cfg2 = dp_read(base + MMSS_DP_SDP_CFG2);
+ /* 28 data bytes for SPD Infoframe with GENERIC1 set */
+ spd_cfg2 |= BIT(17);
+ dp_write(base + MMSS_DP_SDP_CFG2, spd_cfg2);
+
+ dp_write(base + MMSS_DP_SDP_CFG3, 0x1);
+ dp_write(base + MMSS_DP_SDP_CFG3, 0x0);
+}
+
struct dp_catalog *dp_catalog_get(struct device *dev, struct dp_io *io)
{
int rc = 0;
@@ -1364,6 +1582,7 @@
.timing_cfg = dp_catalog_panel_timing_cfg,
.config_hdr = dp_catalog_panel_config_hdr,
.tpg_config = dp_catalog_panel_tpg_cfg,
+ .config_spd = dp_catalog_panel_config_spd,
};
if (!io) {
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.h b/drivers/gpu/drm/msm/dp/dp_catalog.h
index c70e8d1..b270545 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog.h
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.h
@@ -37,6 +37,11 @@
#define DP_INTR_CRC_UPDATED BIT(9)
struct dp_catalog_hdr_data {
+ u32 ext_header_byte0;
+ u32 ext_header_byte1;
+ u32 ext_header_byte2;
+ u32 ext_header_byte3;
+
u32 vsc_header_byte0;
u32 vsc_header_byte1;
u32 vsc_header_byte2;
@@ -109,6 +114,13 @@
u32 (*read_phy_pattern)(struct dp_catalog_ctrl *ctrl);
};
+#define HEADER_BYTE_2_BIT 0
+#define PARITY_BYTE_2_BIT 8
+#define HEADER_BYTE_1_BIT 16
+#define PARITY_BYTE_1_BIT 24
+#define HEADER_BYTE_3_BIT 16
+#define PARITY_BYTE_3_BIT 24
+
enum dp_catalog_audio_sdp_type {
DP_AUDIO_SDP_STREAM,
DP_AUDIO_SDP_TIMESTAMP,
@@ -144,6 +156,8 @@
u32 sync_start;
u32 width_blanking;
u32 dp_active;
+ u8 *spd_vendor_name;
+ u8 *spd_product_description;
struct dp_catalog_hdr_data hdr_data;
@@ -159,6 +173,7 @@
int (*timing_cfg)(struct dp_catalog_panel *panel);
void (*config_hdr)(struct dp_catalog_panel *panel);
void (*tpg_config)(struct dp_catalog_panel *panel, bool enable);
+ void (*config_spd)(struct dp_catalog_panel *panel);
};
struct dp_catalog {
@@ -168,6 +183,71 @@
struct dp_catalog_panel panel;
};
+static inline u8 dp_ecc_get_g0_value(u8 data)
+{
+ u8 c[4];
+ u8 g[4];
+ u8 ret_data = 0;
+ u8 i;
+
+ for (i = 0; i < 4; i++)
+ c[i] = (data >> i) & 0x01;
+
+ g[0] = c[3];
+ g[1] = c[0] ^ c[3];
+ g[2] = c[1];
+ g[3] = c[2];
+
+ for (i = 0; i < 4; i++)
+ ret_data = ((g[i] & 0x01) << i) | ret_data;
+
+ return ret_data;
+}
+
+static inline u8 dp_ecc_get_g1_value(u8 data)
+{
+ u8 c[4];
+ u8 g[4];
+ u8 ret_data = 0;
+ u8 i;
+
+ for (i = 0; i < 4; i++)
+ c[i] = (data >> i) & 0x01;
+
+ g[0] = c[0] ^ c[3];
+ g[1] = c[0] ^ c[1] ^ c[3];
+ g[2] = c[1] ^ c[2];
+ g[3] = c[2] ^ c[3];
+
+ for (i = 0; i < 4; i++)
+ ret_data = ((g[i] & 0x01) << i) | ret_data;
+
+ return ret_data;
+}
+
+static inline u8 dp_header_get_parity(u32 data)
+{
+ u8 x0 = 0;
+ u8 x1 = 0;
+ u8 ci = 0;
+ u8 iData = 0;
+ u8 i = 0;
+ u8 parity_byte;
+ u8 num_byte = (data & 0xFF00) > 0 ? 8 : 2;
+
+ for (i = 0; i < num_byte; i++) {
+ iData = (data >> i*4) & 0xF;
+
+ ci = iData ^ x1;
+ x1 = x0 ^ dp_ecc_get_g1_value(ci);
+ x0 = dp_ecc_get_g0_value(ci);
+ }
+
+ parity_byte = x1 | (x0 << 4);
+
+ return parity_byte;
+}
+
struct dp_catalog *dp_catalog_get(struct device *dev, struct dp_io *io);
void dp_catalog_put(struct dp_catalog *catalog);
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index 65672c9..576ed52 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -761,18 +761,18 @@
return drm_dp_dpcd_write(ctrl->aux->drm_aux, 0x103, buf, 4);
}
-static void dp_ctrl_update_vx_px(struct dp_ctrl_private *ctrl)
+static int dp_ctrl_update_vx_px(struct dp_ctrl_private *ctrl)
{
struct dp_link *link = ctrl->link;
ctrl->catalog->update_vx_px(ctrl->catalog,
link->phy_params.v_level, link->phy_params.p_level);
- dp_ctrl_update_sink_vx_px(ctrl, link->phy_params.v_level,
+ return dp_ctrl_update_sink_vx_px(ctrl, link->phy_params.v_level,
link->phy_params.p_level);
}
-static void dp_ctrl_train_pattern_set(struct dp_ctrl_private *ctrl,
+static int dp_ctrl_train_pattern_set(struct dp_ctrl_private *ctrl,
u8 pattern)
{
u8 buf[4];
@@ -780,7 +780,8 @@
pr_debug("sink: pattern=%x\n", pattern);
buf[0] = pattern;
- drm_dp_dpcd_write(ctrl->aux->drm_aux, DP_TRAINING_PATTERN_SET, buf, 1);
+ return drm_dp_dpcd_write(ctrl->aux->drm_aux,
+ DP_TRAINING_PATTERN_SET, buf, 1);
}
static int dp_ctrl_read_link_status(struct dp_ctrl_private *ctrl,
@@ -817,9 +818,18 @@
wmb();
ctrl->catalog->set_pattern(ctrl->catalog, 0x01);
- dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_1 |
+ ret = dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_1 |
DP_LINK_SCRAMBLING_DISABLE); /* train_1 */
- dp_ctrl_update_vx_px(ctrl);
+ if (ret <= 0) {
+ ret = -EINVAL;
+ return ret;
+ }
+
+ ret = dp_ctrl_update_vx_px(ctrl);
+ if (ret <= 0) {
+ ret = -EINVAL;
+ return ret;
+ }
tries = 0;
old_v_level = ctrl->link->phy_params.v_level;
@@ -856,7 +866,11 @@
pr_debug("clock recovery not done, adjusting vx px\n");
ctrl->link->adjust_levels(ctrl->link, link_status);
- dp_ctrl_update_vx_px(ctrl);
+ ret = dp_ctrl_update_vx_px(ctrl);
+ if (ret <= 0) {
+ ret = -EINVAL;
+ break;
+ }
}
return ret;
@@ -910,9 +924,18 @@
else
pattern = DP_TRAINING_PATTERN_2;
- dp_ctrl_update_vx_px(ctrl);
+ ret = dp_ctrl_update_vx_px(ctrl);
+ if (ret <= 0) {
+ ret = -EINVAL;
+ return ret;
+ }
ctrl->catalog->set_pattern(ctrl->catalog, pattern);
- dp_ctrl_train_pattern_set(ctrl, pattern | DP_RECOVERED_CLOCK_OUT_EN);
+ ret = dp_ctrl_train_pattern_set(ctrl,
+ pattern | DP_RECOVERED_CLOCK_OUT_EN);
+ if (ret <= 0) {
+ ret = -EINVAL;
+ return ret;
+ }
do {
drm_dp_link_train_channel_eq_delay(ctrl->panel->dpcd);
@@ -932,7 +955,11 @@
tries++;
ctrl->link->adjust_levels(ctrl->link, link_status);
- dp_ctrl_update_vx_px(ctrl);
+ ret = dp_ctrl_update_vx_px(ctrl);
+ if (ret <= 0) {
+ ret = -EINVAL;
+ break;
+ }
} while (1);
return ret;
@@ -954,9 +981,16 @@
ctrl->link->link_params.bw_code);
link_info.capabilities = ctrl->panel->link_info.capabilities;
- drm_dp_link_configure(ctrl->aux->drm_aux, &link_info);
- drm_dp_dpcd_write(ctrl->aux->drm_aux, DP_MAIN_LINK_CHANNEL_CODING_SET,
- &encoding, 1);
+ ret = drm_dp_link_configure(ctrl->aux->drm_aux, &link_info);
+ if (ret)
+ goto end;
+
+ ret = drm_dp_dpcd_write(ctrl->aux->drm_aux,
+ DP_MAIN_LINK_CHANNEL_CODING_SET, &encoding, 1);
+ if (ret <= 0) {
+ ret = -EINVAL;
+ goto end;
+ }
ret = dp_ctrl_link_train_1(ctrl);
if (ret) {
@@ -992,11 +1026,6 @@
ctrl->catalog->mainlink_ctrl(ctrl->catalog, true);
- ret = ctrl->link->psm_config(ctrl->link,
- &ctrl->panel->link_info, false);
- if (ret)
- goto end;
-
if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN)
goto end;
diff --git a/drivers/gpu/drm/msm/dp/dp_debug.c b/drivers/gpu/drm/msm/dp/dp_debug.c
index a530642..d00f159 100644
--- a/drivers/gpu/drm/msm/dp/dp_debug.c
+++ b/drivers/gpu/drm/msm/dp/dp_debug.c
@@ -200,9 +200,13 @@
if (kstrtoint(buf, 10, &hpd) != 0)
goto end;
- debug->usbpd->connect(debug->usbpd, hpd);
+ hpd &= 0x3;
+
+ debug->dp_debug.psm_enabled = !!(hpd & BIT(1));
+
+ debug->usbpd->simulate_connect(debug->usbpd, !!(hpd & BIT(0)));
end:
- return -len;
+ return len;
}
static ssize_t dp_debug_write_edid_modes(struct file *file,
@@ -666,7 +670,10 @@
dir = debugfs_create_dir(DEBUG_NAME, NULL);
if (IS_ERR_OR_NULL(dir)) {
- rc = PTR_ERR(dir);
+ if (!dir)
+ rc = -EINVAL;
+ else
+ rc = PTR_ERR(dir);
pr_err("[%s] debugfs create dir failed, rc = %d\n",
DEBUG_NAME, rc);
goto error;
@@ -749,6 +756,8 @@
return 0;
error_remove_dir:
+ if (!file)
+ rc = -EINVAL;
debugfs_remove_recursive(dir);
error:
return rc;
diff --git a/drivers/gpu/drm/msm/dp/dp_debug.h b/drivers/gpu/drm/msm/dp/dp_debug.h
index 6e3e9a9..3b2d23e 100644
--- a/drivers/gpu/drm/msm/dp/dp_debug.h
+++ b/drivers/gpu/drm/msm/dp/dp_debug.h
@@ -29,6 +29,7 @@
*/
struct dp_debug {
bool debug_en;
+ bool psm_enabled;
int aspect_ratio;
int vdisplay;
int hdisplay;
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index c0623d8..01a2a9c 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -61,7 +61,6 @@
/* state variables */
bool core_initialized;
bool power_on;
- bool hpd_irq_on;
bool audio_supported;
struct platform_device *pdev;
@@ -85,8 +84,10 @@
struct dp_display_mode mode;
struct dp_display dp_display;
- struct workqueue_struct *hdcp_workqueue;
+ struct workqueue_struct *wq;
struct delayed_work hdcp_cb_work;
+ struct work_struct connect_work;
+ struct work_struct attention_work;
struct mutex hdcp_mutex;
struct mutex session_lock;
int hdcp_status;
@@ -191,26 +192,13 @@
dp->hdcp_status = status;
if (dp->dp_display.is_connected)
- queue_delayed_work(dp->hdcp_workqueue, &dp->hdcp_cb_work, HZ/4);
-}
-
-static int dp_display_create_hdcp_workqueue(struct dp_display_private *dp)
-{
- dp->hdcp_workqueue = create_workqueue("sdm_dp_hdcp");
- if (IS_ERR_OR_NULL(dp->hdcp_workqueue)) {
- pr_err("Error creating hdcp_workqueue\n");
- return -EPERM;
- }
-
- INIT_DELAYED_WORK(&dp->hdcp_cb_work, dp_display_hdcp_cb_work);
-
- return 0;
+ queue_delayed_work(dp->wq, &dp->hdcp_cb_work, HZ/4);
}
static void dp_display_destroy_hdcp_workqueue(struct dp_display_private *dp)
{
- if (dp->hdcp_workqueue)
- destroy_workqueue(dp->hdcp_workqueue);
+ if (dp->wq)
+ destroy_workqueue(dp->wq);
}
static void dp_display_update_hdcp_info(struct dp_display_private *dp)
@@ -286,16 +274,10 @@
mutex_init(&dp->hdcp_mutex);
- rc = dp_display_create_hdcp_workqueue(dp);
- if (rc) {
- pr_err("Failed to create HDCP workqueue\n");
- goto error;
- }
-
hdcp_init_data.client_id = HDCP_CLIENT_DP;
hdcp_init_data.drm_aux = dp->aux->drm_aux;
hdcp_init_data.cb_data = (void *)dp;
- hdcp_init_data.workq = dp->hdcp_workqueue;
+ hdcp_init_data.workq = dp->wq;
hdcp_init_data.mutex = &dp->hdcp_mutex;
hdcp_init_data.sec_access = true;
hdcp_init_data.notify_status = dp_display_notify_hdcp_status_cb;
@@ -466,16 +448,6 @@
static int dp_display_send_hpd_notification(struct dp_display_private *dp,
bool hpd)
{
- if ((hpd && dp->dp_display.is_connected) ||
- (!hpd && !dp->dp_display.is_connected)) {
- pr_info("HPD already %s\n", (hpd ? "on" : "off"));
- return 0;
- }
-
- /* reset video pattern flag on disconnect */
- if (!hpd)
- dp->panel->video_test = false;
-
dp->dp_display.is_connected = hpd;
reinit_completion(&dp->notification_comp);
dp_display_send_hpd_event(&dp->dp_display);
@@ -484,6 +456,8 @@
pr_warn("%s timeout\n", hpd ? "connect" : "disconnect");
/* cancel any pending request */
dp->ctrl->abort(dp->ctrl);
+ dp->aux->abort(dp->aux);
+
return -EINVAL;
}
@@ -497,12 +471,20 @@
dp->aux->init(dp->aux, dp->parser->aux_cfg);
- if (dp->link->psm_enabled)
- goto notify;
+ if (dp->debug->psm_enabled) {
+ dp->link->psm_config(dp->link, &dp->panel->link_info, false);
+ dp->debug->psm_enabled = false;
+ }
rc = dp->panel->read_sink_caps(dp->panel, dp->dp_display.connector);
- if (rc)
- goto notify;
+ if (rc) {
+ if (rc == -ETIMEDOUT) {
+ pr_err("Sink cap read failed, skip notification\n");
+ goto end;
+ } else {
+ goto notify;
+ }
+ }
dp->link->process_request(dp->link);
@@ -557,22 +539,28 @@
dp->core_initialized = false;
}
-static void dp_display_process_hpd_low(struct dp_display_private *dp)
+static int dp_display_process_hpd_low(struct dp_display_private *dp)
{
- /* cancel any pending request */
- dp->ctrl->abort(dp->ctrl);
+ int rc = 0;
- if (dp_display_is_hdcp_enabled(dp) && dp->hdcp.ops->off) {
- cancel_delayed_work_sync(&dp->hdcp_cb_work);
- dp->hdcp.ops->off(dp->hdcp.data);
+ if (!dp->dp_display.is_connected) {
+ pr_debug("HPD already off\n");
+ return 0;
}
+ if (dp_display_is_hdcp_enabled(dp) && dp->hdcp.ops->off)
+ dp->hdcp.ops->off(dp->hdcp.data);
+
if (dp->audio_supported)
dp->audio->off(dp->audio);
- dp_display_send_hpd_notification(dp, false);
+ rc = dp_display_send_hpd_notification(dp, false);
dp->aux->deinit(dp->aux);
+
+ dp->panel->video_test = false;
+
+ return rc;
}
static int dp_display_usbpd_configure_cb(struct device *dev)
@@ -596,7 +584,7 @@
dp_display_host_init(dp);
if (dp->usbpd->hpd_high)
- dp_display_process_hpd_high(dp);
+ queue_work(dp->wq, &dp->connect_work);
end:
return rc;
}
@@ -616,6 +604,24 @@
dp->power_on = false;
}
+static int dp_display_handle_disconnect(struct dp_display_private *dp)
+{
+ int rc;
+
+ rc = dp_display_process_hpd_low(dp);
+
+ mutex_lock(&dp->session_lock);
+ if (rc && dp->power_on)
+ dp_display_clean(dp);
+
+ if (!dp->usbpd->alt_mode_cfg_done)
+ dp_display_host_deinit(dp);
+
+ mutex_unlock(&dp->session_lock);
+
+ return rc;
+}
+
static int dp_display_usbpd_disconnect_cb(struct device *dev)
{
int rc = 0;
@@ -634,54 +640,52 @@
goto end;
}
+ if (dp->debug->psm_enabled)
+ dp->link->psm_config(dp->link, &dp->panel->link_info, true);
+
/* cancel any pending request */
dp->ctrl->abort(dp->ctrl);
+ dp->aux->abort(dp->aux);
- if (dp->audio_supported)
- dp->audio->off(dp->audio);
+ /* wait for idle state */
+ flush_workqueue(dp->wq);
- rc = dp_display_send_hpd_notification(dp, false);
-
- mutex_lock(&dp->session_lock);
-
- /* if cable is disconnected, reset psm_enabled flag */
- if (!dp->usbpd->alt_mode_cfg_done)
- dp->link->psm_enabled = false;
-
- if ((rc < 0) && dp->power_on)
- dp_display_clean(dp);
-
- dp_display_host_deinit(dp);
-
- mutex_unlock(&dp->session_lock);
+ dp_display_handle_disconnect(dp);
end:
return rc;
}
-static void dp_display_handle_video_request(struct dp_display_private *dp)
-{
- if (dp->link->sink_request & DP_TEST_LINK_VIDEO_PATTERN) {
- /* force disconnect followed by connect */
- dp->usbpd->connect(dp->usbpd, false);
- dp->panel->video_test = true;
- dp->usbpd->connect(dp->usbpd, true);
- dp->link->send_test_response(dp->link);
- }
-}
-
-static int dp_display_handle_hpd_irq(struct dp_display_private *dp)
+static void dp_display_attention_work(struct work_struct *work)
{
bool req_handled;
+ struct dp_display_private *dp = container_of(work,
+ struct dp_display_private, attention_work);
+
+ if (dp_display_is_hdcp_enabled(dp) && dp->hdcp.ops->cp_irq) {
+ if (!dp->hdcp.ops->cp_irq(dp->hdcp.data))
+ return;
+ }
if (dp->link->sink_request & DS_PORT_STATUS_CHANGED) {
- dp_display_send_hpd_notification(dp, false);
+ dp_display_handle_disconnect(dp);
if (dp_display_is_sink_count_zero(dp)) {
pr_debug("sink count is zero, nothing to do\n");
- return 0;
+ return;
}
- return dp_display_process_hpd_high(dp);
+ queue_work(dp->wq, &dp->connect_work);
+ return;
+ }
+
+ if (dp->link->sink_request & DP_TEST_LINK_VIDEO_PATTERN) {
+ dp_display_handle_disconnect(dp);
+
+ dp->panel->video_test = true;
+ dp_display_send_hpd_notification(dp, true);
+ dp->link->send_test_response(dp->link);
+
+ return;
}
mutex_lock(&dp->audio->ops_lock);
@@ -696,15 +700,10 @@
dp->audio->off(dp->audio);
dp->audio->on(dp->audio);
}
-
- dp_display_handle_video_request(dp);
-
- return 0;
}
static int dp_display_usbpd_attention_cb(struct device *dev)
{
- int rc = 0;
struct dp_display_private *dp;
if (!dev) {
@@ -718,32 +717,36 @@
return -ENODEV;
}
- if (dp->usbpd->hpd_irq) {
- dp->hpd_irq_on = true;
+ if (dp->usbpd->hpd_irq && dp->usbpd->hpd_high) {
+ dp->link->process_request(dp->link);
+ queue_work(dp->wq, &dp->attention_work);
+ } else if (dp->usbpd->hpd_high) {
+ queue_work(dp->wq, &dp->connect_work);
+ } else {
+ /* cancel any pending request */
+ dp->ctrl->abort(dp->ctrl);
+ dp->aux->abort(dp->aux);
- if (dp_display_is_hdcp_enabled(dp) && dp->hdcp.ops->cp_irq) {
- if (!dp->hdcp.ops->cp_irq(dp->hdcp.data))
- goto end;
- }
+ /* wait for idle state */
+ flush_workqueue(dp->wq);
- rc = dp->link->process_request(dp->link);
- /* check for any test request issued by sink */
- if (!rc)
- dp_display_handle_hpd_irq(dp);
-
- dp->hpd_irq_on = false;
- goto end;
+ dp_display_handle_disconnect(dp);
}
- if (!dp->usbpd->hpd_high) {
- dp_display_process_hpd_low(dp);
- goto end;
+ return 0;
+}
+
+static void dp_display_connect_work(struct work_struct *work)
+{
+ struct dp_display_private *dp = container_of(work,
+ struct dp_display_private, connect_work);
+
+ if (dp->dp_display.is_connected) {
+ pr_debug("HPD already on\n");
+ return;
}
- if (dp->usbpd->alt_mode_cfg_done)
- dp_display_process_hpd_high(dp);
-end:
- return rc;
+ dp_display_process_hpd_high(dp);
}
static void dp_display_deinit_sub_modules(struct dp_display_private *dp)
@@ -953,6 +956,8 @@
goto end;
}
+ dp->aux->init(dp->aux, dp->parser->aux_cfg);
+
rc = dp->ctrl->on(dp->ctrl);
if (dp->debug->tpg_state)
@@ -983,25 +988,27 @@
goto end;
}
+ dp->panel->spd_config(dp->panel);
+
if (dp->audio_supported) {
dp->audio->bw_code = dp->link->link_params.bw_code;
dp->audio->lane_count = dp->link->link_params.lane_count;
dp->audio->on(dp->audio);
}
- complete_all(&dp->notification_comp);
-
dp_display_update_hdcp_info(dp);
if (dp_display_is_hdcp_enabled(dp)) {
cancel_delayed_work_sync(&dp->hdcp_cb_work);
dp->hdcp_status = HDCP_STATE_AUTHENTICATING;
- queue_delayed_work(dp->hdcp_workqueue,
- &dp->hdcp_cb_work, HZ / 2);
+ queue_delayed_work(dp->wq, &dp->hdcp_cb_work, HZ / 2);
}
-
end:
+ /* clear framework event notifier */
+ dp_display->send_hpd_event = NULL;
+
+ complete_all(&dp->notification_comp);
mutex_unlock(&dp->session_lock);
return 0;
}
@@ -1032,12 +1039,7 @@
dp->hdcp.ops->off(dp->hdcp.data);
}
- if (dp->usbpd->alt_mode_cfg_done && (dp->usbpd->hpd_high ||
- dp->usbpd->forced_disconnect))
- dp->link->psm_config(dp->link, &dp->panel->link_info, true);
-
dp->ctrl->push_idle(dp->ctrl);
-
end:
mutex_unlock(&dp->session_lock);
return 0;
@@ -1189,6 +1191,21 @@
return dp->panel->setup_hdr(dp->panel, hdr);
}
+static int dp_display_create_workqueue(struct dp_display_private *dp)
+{
+ dp->wq = create_singlethread_workqueue("drm_dp");
+ if (IS_ERR_OR_NULL(dp->wq)) {
+ pr_err("Error creating wq\n");
+ return -EPERM;
+ }
+
+ INIT_DELAYED_WORK(&dp->hdcp_cb_work, dp_display_hdcp_cb_work);
+ INIT_WORK(&dp->connect_work, dp_display_connect_work);
+ INIT_WORK(&dp->attention_work, dp_display_attention_work);
+
+ return 0;
+}
+
static int dp_display_probe(struct platform_device *pdev)
{
int rc = 0;
@@ -1196,12 +1213,15 @@
if (!pdev || !pdev->dev.of_node) {
pr_err("pdev not found\n");
- return -ENODEV;
+ rc = -ENODEV;
+ goto bail;
}
dp = devm_kzalloc(&pdev->dev, sizeof(*dp), GFP_KERNEL);
- if (!dp)
- return -ENOMEM;
+ if (!dp) {
+ rc = -ENOMEM;
+ goto bail;
+ }
init_completion(&dp->notification_comp);
@@ -1210,8 +1230,14 @@
rc = dp_init_sub_modules(dp);
if (rc) {
- devm_kfree(&pdev->dev, dp);
- return -EPROBE_DEFER;
+ rc = -EPROBE_DEFER;
+ goto err_dev;
+ }
+
+ rc = dp_display_create_workqueue(dp);
+ if (rc) {
+ pr_err("Failed to create workqueue\n");
+ goto err_sub_mod;
}
platform_set_drvdata(pdev, dp);
@@ -1235,10 +1261,16 @@
rc = component_add(&pdev->dev, &dp_display_comp_ops);
if (rc) {
pr_err("component add failed, rc=%d\n", rc);
- dp_display_deinit_sub_modules(dp);
- devm_kfree(&pdev->dev, dp);
+ goto err_sub_mod;
}
+ return 0;
+
+err_sub_mod:
+ dp_display_deinit_sub_modules(dp);
+err_dev:
+ devm_kfree(&pdev->dev, dp);
+bail:
return rc;
}
@@ -1301,7 +1333,7 @@
return ret;
}
-module_init(dp_display_init);
+late_initcall(dp_display_init);
static void __exit dp_display_cleanup(void)
{
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c
index 1915254..2c29ad2 100644
--- a/drivers/gpu/drm/msm/dp/dp_drm.c
+++ b/drivers/gpu/drm/msm/dp/dp_drm.c
@@ -287,12 +287,11 @@
return dp->pre_kickoff(dp, params->hdr_meta);
}
-int dp_connector_post_init(struct drm_connector *connector,
- void *info, void *display, struct msm_mode_info *mode_info)
+int dp_connector_post_init(struct drm_connector *connector, void *display)
{
struct dp_display *dp_display = display;
- if (!info || !dp_display)
+ if (!dp_display)
return -EINVAL;
dp_display->connector = connector;
@@ -510,9 +509,6 @@
mode->vrefresh = drm_mode_vrefresh(mode);
- if (mode->vrefresh > 60)
- return MODE_BAD;
-
if (mode->clock > dp_disp->max_pclk_khz)
return MODE_BAD;
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.h b/drivers/gpu/drm/msm/dp/dp_drm.h
index e856be1..1673212 100644
--- a/drivers/gpu/drm/msm/dp/dp_drm.h
+++ b/drivers/gpu/drm/msm/dp/dp_drm.h
@@ -45,15 +45,10 @@
/**
* dp_connector_post_init - callback to perform additional initialization steps
* @connector: Pointer to drm connector structure
- * @info: Pointer to sde connector info structure
* @display: Pointer to private display handle
- * @mode_info: Pointer to mode info structure
* Returns: Zero on success
*/
-int dp_connector_post_init(struct drm_connector *connector,
- void *info,
- void *display,
- struct msm_mode_info *mode_info);
+int dp_connector_post_init(struct drm_connector *connector, void *display);
/**
* dp_connector_detect - callback to determine if connector is connected
diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c
index 84ba4ef..3ca247c 100644
--- a/drivers/gpu/drm/msm/dp/dp_link.c
+++ b/drivers/gpu/drm/msm/dp/dp_link.c
@@ -987,8 +987,6 @@
if (ret)
pr_err("Failed to %s low power mode\n",
(enable ? "enter" : "exit"));
- else
- dp_link->psm_enabled = enable;
return ret;
}
diff --git a/drivers/gpu/drm/msm/dp/dp_link.h b/drivers/gpu/drm/msm/dp/dp_link.h
index 4bb7be5..6f79b6a 100644
--- a/drivers/gpu/drm/msm/dp/dp_link.h
+++ b/drivers/gpu/drm/msm/dp/dp_link.h
@@ -86,7 +86,6 @@
struct dp_link {
u32 sink_request;
u32 test_response;
- bool psm_enabled;
struct dp_link_sink_count sink_count;
struct dp_link_test_video test_video;
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
index 041581d..96f9d3a 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.c
+++ b/drivers/gpu/drm/msm/dp/dp_panel.c
@@ -29,10 +29,11 @@
struct dp_aux *aux;
struct dp_link *link;
struct dp_catalog_panel *catalog;
- bool aux_cfg_update_done;
bool custom_edid;
bool custom_dpcd;
bool panel_on;
+ u8 spd_vendor_name[8];
+ u8 spd_product_description[16];
};
static const struct dp_panel_info fail_safe = {
@@ -52,6 +53,13 @@
.bpp = 24,
};
+/* OEM NAME */
+static const u8 vendor_name[8] = {81, 117, 97, 108, 99, 111, 109, 109};
+
+/* MODEL NAME */
+static const u8 product_desc[16] = {83, 110, 97, 112, 100, 114, 97, 103,
+ 111, 110, 0, 0, 0, 0, 0, 0};
+
static int dp_panel_read_dpcd(struct dp_panel *dp_panel)
{
int rlen, rc = 0;
@@ -77,7 +85,11 @@
dp_panel->dpcd, (DP_RECEIVER_CAP_SIZE + 1));
if (rlen < (DP_RECEIVER_CAP_SIZE + 1)) {
pr_err("dpcd read failed, rlen=%d\n", rlen);
- rc = -EINVAL;
+ if (rlen == -ETIMEDOUT)
+ rc = rlen;
+ else
+ rc = -EINVAL;
+
goto end;
}
@@ -193,8 +205,6 @@
static int dp_panel_read_edid(struct dp_panel *dp_panel,
struct drm_connector *connector)
{
- int retry_cnt = 0;
- const int max_retry = 10;
struct dp_panel_private *panel;
if (!dp_panel) {
@@ -209,24 +219,19 @@
return 0;
}
- do {
- sde_get_edid(connector, &panel->aux->drm_aux->ddc,
- (void **)&dp_panel->edid_ctrl);
- if (!dp_panel->edid_ctrl->edid) {
- pr_err("EDID read failed\n");
- retry_cnt++;
- panel->aux->reconfig(panel->aux);
- panel->aux_cfg_update_done = true;
- } else {
- u8 *buf = (u8 *)dp_panel->edid_ctrl->edid;
- u32 size = buf[0x7F] ? 256 : 128;
+ sde_get_edid(connector, &panel->aux->drm_aux->ddc,
+ (void **)&dp_panel->edid_ctrl);
+ if (!dp_panel->edid_ctrl->edid) {
+ pr_err("EDID read failed\n");
+ } else {
+ u8 *buf = (u8 *)dp_panel->edid_ctrl->edid;
+ u32 size = buf[0x7E] ? 256 : 128;
- print_hex_dump(KERN_DEBUG, "[drm-dp] SINK EDID: ",
- DUMP_PREFIX_NONE, 16, 1, buf, size, false);
+ print_hex_dump(KERN_DEBUG, "[drm-dp] SINK EDID: ",
+ DUMP_PREFIX_NONE, 16, 1, buf, size, false);
- return 0;
- }
- } while (retry_cnt < max_retry);
+ return 0;
+ }
return -EINVAL;
}
@@ -250,6 +255,10 @@
dp_panel->link_info.num_lanes) ||
((drm_dp_link_rate_to_bw_code(dp_panel->link_info.rate)) >
dp_panel->max_bw_code)) {
+ if ((rc == -ETIMEDOUT) || (rc == -ENODEV)) {
+ pr_err("DPCD read failed, return early\n");
+ return rc;
+ }
pr_err("panel dpcd read failed/incorrect, set default params\n");
dp_panel_set_default_link_params(dp_panel);
}
@@ -260,12 +269,6 @@
return rc;
}
- if (panel->aux_cfg_update_done) {
- pr_debug("read DPCD with updated AUX config\n");
- dp_panel_read_dpcd(dp_panel);
- panel->aux_cfg_update_done = false;
- }
-
return 0;
}
@@ -669,6 +672,11 @@
panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
hdr = &panel->catalog->hdr_data;
+ hdr->ext_header_byte0 = 0x00;
+ hdr->ext_header_byte1 = 0x04;
+ hdr->ext_header_byte2 = 0x1F;
+ hdr->ext_header_byte3 = 0x00;
+
hdr->vsc_header_byte0 = 0x00;
hdr->vsc_header_byte1 = 0x07;
hdr->vsc_header_byte2 = 0x05;
@@ -680,17 +688,19 @@
/* VSC SDP Payload for DB17 */
hdr->dynamic_range = CEA;
- hdr->bpc = 10;
/* VSC SDP Payload for DB18 */
hdr->content_type = GRAPHICS;
+ hdr->bpc = dp_panel->pinfo.bpp / 3;
+
hdr->vscext_header_byte0 = 0x00;
hdr->vscext_header_byte1 = 0x87;
hdr->vscext_header_byte2 = 0x1D;
hdr->vscext_header_byte3 = 0x13 << 2;
hdr->version = 0x01;
+ hdr->length = 0x1A;
memcpy(&hdr->hdr_meta, hdr_meta, sizeof(hdr->hdr_meta));
@@ -699,6 +709,32 @@
return rc;
}
+static int dp_panel_spd_config(struct dp_panel *dp_panel)
+{
+ int rc = 0;
+ struct dp_panel_private *panel;
+
+ if (!dp_panel) {
+ pr_err("invalid input\n");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ if (!dp_panel->spd_enabled) {
+ pr_debug("SPD Infoframe not enabled\n");
+ goto end;
+ }
+
+ panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+
+ panel->catalog->spd_vendor_name = panel->spd_vendor_name;
+ panel->catalog->spd_product_description =
+ panel->spd_product_description;
+ panel->catalog->config_spd(panel->catalog);
+end:
+ return rc;
+}
+
struct dp_panel *dp_panel_get(struct dp_panel_in *in)
{
int rc = 0;
@@ -723,8 +759,10 @@
panel->link = in->link;
dp_panel = &panel->dp_panel;
- panel->aux_cfg_update_done = false;
dp_panel->max_bw_code = DP_LINK_BW_8_1;
+ dp_panel->spd_enabled = true;
+ memcpy(panel->spd_vendor_name, vendor_name, (sizeof(u8) * 8));
+ memcpy(panel->spd_product_description, product_desc, (sizeof(u8) * 16));
dp_panel->init = dp_panel_init_panel_info;
dp_panel->deinit = dp_panel_deinit_panel_info;
@@ -737,9 +775,10 @@
dp_panel->set_edid = dp_panel_set_edid;
dp_panel->set_dpcd = dp_panel_set_dpcd;
dp_panel->tpg_config = dp_panel_tpg_config;
+ dp_panel->spd_config = dp_panel_spd_config;
+ dp_panel->setup_hdr = dp_panel_setup_hdr;
dp_panel_edid_register(panel);
- dp_panel->setup_hdr = dp_panel_setup_hdr;
return dp_panel;
error:
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.h b/drivers/gpu/drm/msm/dp/dp_panel.h
index 6cc3f4d..128f694 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.h
+++ b/drivers/gpu/drm/msm/dp/dp_panel.h
@@ -68,6 +68,7 @@
struct sde_edid_ctrl *edid_ctrl;
struct dp_panel_info pinfo;
bool video_test;
+ bool spd_enabled;
u32 vic;
u32 max_pclk_khz;
@@ -91,6 +92,7 @@
int (*setup_hdr)(struct dp_panel *dp_panel,
struct drm_msm_ext_hdr_metadata *hdr_meta);
void (*tpg_config)(struct dp_panel *dp_panel, bool enable);
+ int (*spd_config)(struct dp_panel *dp_panel);
};
/**
diff --git a/drivers/gpu/drm/msm/dp/dp_usbpd.c b/drivers/gpu/drm/msm/dp/dp_usbpd.c
index 98781abb..3ddc499 100644
--- a/drivers/gpu/drm/msm/dp/dp_usbpd.c
+++ b/drivers/gpu/drm/msm/dp/dp_usbpd.c
@@ -64,6 +64,7 @@
};
struct dp_usbpd_private {
+ bool forced_disconnect;
u32 vdo;
struct device *dev;
struct usbpd *pd;
@@ -345,7 +346,7 @@
dp_usbpd_send_event(pd, DP_USBPD_EVT_STATUS);
break;
case USBPD_SVDM_ATTENTION:
- if (pd->dp_usbpd.forced_disconnect)
+ if (pd->forced_disconnect)
break;
pd->vdo = *vdos;
@@ -396,7 +397,7 @@
}
}
-static int dp_usbpd_connect(struct dp_usbpd *dp_usbpd, bool hpd)
+static int dp_usbpd_simulate_connect(struct dp_usbpd *dp_usbpd, bool hpd)
{
int rc = 0;
struct dp_usbpd_private *pd;
@@ -410,7 +411,7 @@
pd = container_of(dp_usbpd, struct dp_usbpd_private, dp_usbpd);
dp_usbpd->hpd_high = hpd;
- dp_usbpd->forced_disconnect = !hpd;
+ pd->forced_disconnect = !hpd;
if (hpd)
pd->dp_cb->configure(pd->dev);
@@ -469,7 +470,7 @@
}
dp_usbpd = &usbpd->dp_usbpd;
- dp_usbpd->connect = dp_usbpd_connect;
+ dp_usbpd->simulate_connect = dp_usbpd_simulate_connect;
return dp_usbpd;
error:
diff --git a/drivers/gpu/drm/msm/dp/dp_usbpd.h b/drivers/gpu/drm/msm/dp/dp_usbpd.h
index 5b392f5..e70ad7d 100644
--- a/drivers/gpu/drm/msm/dp/dp_usbpd.h
+++ b/drivers/gpu/drm/msm/dp/dp_usbpd.h
@@ -49,7 +49,7 @@
* @hpd_irq: Change in the status since last message
* @alt_mode_cfg_done: bool to specify alt mode status
* @debug_en: bool to specify debug mode
- * @connect: simulate disconnect or connect for debug mode
+ * @simulate_connect: simulate disconnect or connect for debug mode
*/
struct dp_usbpd {
enum dp_usbpd_port port;
@@ -63,9 +63,8 @@
bool hpd_irq;
bool alt_mode_cfg_done;
bool debug_en;
- bool forced_disconnect;
- int (*connect)(struct dp_usbpd *dp_usbpd, bool hpd);
+ int (*simulate_connect)(struct dp_usbpd *dp_usbpd, bool hpd);
};
/**
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
index a74216b..1f10e3c 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
@@ -955,12 +955,30 @@
u8 *cmdbuf;
struct dsi_mode_info *timing;
+ /* override cmd fetch mode during secure session */
+ if (dsi_ctrl->secure_mode) {
+ flags &= ~DSI_CTRL_CMD_FETCH_MEMORY;
+ flags |= DSI_CTRL_CMD_FIFO_STORE;
+ pr_debug("[%s] override to TPG during secure session\n",
+ dsi_ctrl->name);
+ }
+
rc = mipi_dsi_create_packet(&packet, msg);
if (rc) {
pr_err("Failed to create message packet, rc=%d\n", rc);
goto error;
}
+ /* fail cmds more than the supported size in TPG mode */
+ if ((flags & DSI_CTRL_CMD_FIFO_STORE) &&
+ (msg->tx_len > DSI_CTRL_MAX_CMD_FIFO_STORE_SIZE)) {
+ pr_err("[%s] TPG cmd size:%zd not supported, secure:%d\n",
+ dsi_ctrl->name, msg->tx_len,
+ dsi_ctrl->secure_mode);
+ rc = -ENOTSUPP;
+ goto error;
+ }
+
rc = dsi_ctrl_copy_and_pad_cmd(dsi_ctrl,
&packet,
&buffer,
@@ -1554,6 +1572,7 @@
mutex_unlock(&dsi_ctrl_list_lock);
mutex_init(&dsi_ctrl->ctrl_lock);
+ dsi_ctrl->secure_mode = false;
dsi_ctrl->pdev = pdev;
platform_set_drvdata(pdev, dsi_ctrl);
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
index a33bbfe..f5b08a0 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
@@ -46,6 +46,9 @@
#define DSI_CTRL_CMD_FETCH_MEMORY 0x20
#define DSI_CTRL_CMD_LAST_COMMAND 0x40
+/* max size supported for dsi cmd transfer using TPG */
+#define DSI_CTRL_MAX_CMD_FIFO_STORE_SIZE 64
+
/**
* enum dsi_power_state - defines power states for dsi controller.
* @DSI_CTRL_POWER_VREG_OFF: Digital and analog supplies for DSI controller
@@ -191,8 +194,9 @@
* Origin is top left of this CTRL.
* @tx_cmd_buf: Tx command buffer.
* @cmd_buffer_iova: cmd buffer mapped address.
- * @vaddr: CPU virtual address of cmd buffer.
* @cmd_buffer_size: Size of command buffer.
+ * @vaddr: CPU virtual address of cmd buffer.
+ * @secure_mode: Indicates if secure-session is in progress
* @debugfs_root: Root for debugfs entries.
* @misr_enable: Frame MISR enable/disable
* @misr_cache: Cached Frame MISR value
@@ -236,6 +240,7 @@
u32 cmd_buffer_iova;
u32 cmd_len;
void *vaddr;
+ u32 secure_mode;
/* Debug Information */
struct dentry *debugfs_root;
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
index c9c1d4c..2286603 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
@@ -1972,6 +1972,7 @@
display_ctrl->ctrl->cmd_buffer_size = display->cmd_buffer_size;
display_ctrl->ctrl->cmd_buffer_iova = display->cmd_buffer_iova;
display_ctrl->ctrl->vaddr = display->vaddr;
+ display_ctrl->ctrl->secure_mode = is_detach ? true : false;
}
end:
@@ -4452,8 +4453,9 @@
void *data;
u32 version = 0;
- display = container_of(work, struct dsi_display, fifo_overflow_work);
- if (!display || (display->panel->panel_mode != DSI_OP_VIDEO_MODE))
+ display = container_of(work, struct dsi_display, lp_rx_timeout_work);
+ if (!display || !display->panel ||
+ (display->panel->panel_mode != DSI_OP_VIDEO_MODE))
return;
pr_debug("handle DSI LP RX Timeout error\n");
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
index 91da637..b0a06e1 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
@@ -356,7 +356,7 @@
.mode_set = dsi_bridge_mode_set,
};
-int dsi_conn_post_init(struct drm_connector *connector,
+int dsi_conn_set_info_blob(struct drm_connector *connector,
void *info, void *display, struct msm_mode_info *mode_info)
{
struct dsi_display *dsi_display = display;
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.h b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.h
index 9a47969..ec58479 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.h
@@ -33,14 +33,14 @@
};
/**
- * dsi_conn_post_init - callback to perform additional initialization steps
+ * dsi_conn_set_info_blob - callback to perform info blob initialization
* @connector: Pointer to drm connector structure
* @info: Pointer to sde connector info structure
* @display: Pointer to private display handle
* @mode_info: Pointer to mode info structure
* Returns: Zero on success
*/
-int dsi_conn_post_init(struct drm_connector *connector,
+int dsi_conn_set_info_blob(struct drm_connector *connector,
void *info,
void *display,
struct msm_mode_info *mode_info);
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 5bb474d..e5c3082 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -122,6 +122,7 @@
PLANE_PROP_BLEND_OP,
PLANE_PROP_SRC_CONFIG,
PLANE_PROP_FB_TRANSLATION_MODE,
+ PLANE_PROP_MULTIRECT_MODE,
/* total # of properties */
PLANE_PROP_COUNT
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index cc75fb5..2581caf 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -34,8 +34,8 @@
struct msm_gpu *gpu, uint32_t nr_bos, uint32_t nr_cmds)
{
struct msm_gem_submit *submit;
- uint64_t sz = sizeof(*submit) + (nr_bos * sizeof(submit->bos[0])) +
- (nr_cmds * sizeof(submit->cmd[0]));
+ uint64_t sz = sizeof(*submit) + ((u64)nr_bos * sizeof(submit->bos[0])) +
+ ((u64)nr_cmds * sizeof(submit->cmd[0]));
if (sz > SIZE_MAX)
return NULL;
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c
index c99fb0c..fb1f578 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.c
+++ b/drivers/gpu/drm/msm/sde/sde_connector.c
@@ -577,23 +577,26 @@
return rc;
}
-void sde_connector_clk_ctrl(struct drm_connector *connector, bool enable)
+int sde_connector_clk_ctrl(struct drm_connector *connector, bool enable)
{
struct sde_connector *c_conn;
struct dsi_display *display;
u32 state = enable ? DSI_CLK_ON : DSI_CLK_OFF;
+ int rc = 0;
if (!connector) {
SDE_ERROR("invalid connector\n");
- return;
+ return -EINVAL;
}
c_conn = to_sde_connector(connector);
display = (struct dsi_display *) c_conn->display;
if (display && c_conn->ops.clk_ctrl)
- c_conn->ops.clk_ctrl(display->mdp_clk_handle,
+ rc = c_conn->ops.clk_ctrl(display->mdp_clk_handle,
DSI_ALL_CLKS, state);
+
+ return rc;
}
static void sde_connector_destroy(struct drm_connector *connector)
@@ -621,6 +624,8 @@
drm_property_unreference_blob(c_conn->blob_dither);
if (c_conn->blob_mode_info)
drm_property_unreference_blob(c_conn->blob_mode_info);
+ if (c_conn->blob_ext_hdr)
+ drm_property_unreference_blob(c_conn->blob_ext_hdr);
msm_property_destroy(&c_conn->property_info);
if (c_conn->bl_device)
@@ -998,6 +1003,9 @@
}
break;
case CONNECTOR_PROP_RETIRE_FENCE:
+ if (!val)
+ goto end;
+
rc = sde_fence_create(&c_conn->retire_fence, &fence_fd, 0);
if (rc) {
SDE_ERROR("fence create failed rc:%d\n", rc);
@@ -1651,16 +1659,15 @@
SDE_DEBUG_CONN(c_conn, "invalid connector state\n");
}
- if (!c_conn->ops.post_init) {
- SDE_ERROR_CONN(c_conn, "post_init not defined\n");
- goto exit;
- }
-
- rc = c_conn->ops.post_init(conn, info, c_conn->display,
- &mode_info);
- if (rc) {
- SDE_ERROR_CONN(c_conn, "post-init failed, %d\n", rc);
- goto exit;
+ if (c_conn->ops.set_info_blob) {
+ rc = c_conn->ops.set_info_blob(conn, info,
+ c_conn->display, &mode_info);
+ if (rc) {
+ SDE_ERROR_CONN(c_conn,
+ "set_info_blob failed, %d\n",
+ rc);
+ goto exit;
+ }
}
blob = c_conn->blob_caps;
@@ -1797,6 +1804,14 @@
CONNECTOR_PROP_COUNT, CONNECTOR_PROP_BLOBCOUNT,
sizeof(struct sde_connector_state));
+ if (c_conn->ops.post_init) {
+ rc = c_conn->ops.post_init(&c_conn->base, display);
+ if (rc) {
+ SDE_ERROR("post-init failed, %d\n", rc);
+ goto error_cleanup_fence;
+ }
+ }
+
msm_property_install_blob(&c_conn->property_info,
"capabilities",
DRM_MODE_PROP_IMMUTABLE,
@@ -1849,10 +1864,19 @@
_sde_connector_install_dither_property(dev, sde_kms, c_conn);
if (connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
+ struct drm_msm_ext_hdr_properties hdr = {0};
+
msm_property_install_blob(&c_conn->property_info,
"ext_hdr_properties",
DRM_MODE_PROP_IMMUTABLE,
CONNECTOR_PROP_EXT_HDR_INFO);
+
+ /* set default values to avoid reading uninitialized data */
+ msm_property_set_blob(&c_conn->property_info,
+ &c_conn->blob_ext_hdr,
+ &hdr,
+ sizeof(hdr),
+ CONNECTOR_PROP_EXT_HDR_INFO);
}
msm_property_install_volatile_range(&c_conn->property_info,
@@ -1912,6 +1936,8 @@
drm_property_unreference_blob(c_conn->blob_dither);
if (c_conn->blob_mode_info)
drm_property_unreference_blob(c_conn->blob_mode_info);
+ if (c_conn->blob_ext_hdr)
+ drm_property_unreference_blob(c_conn->blob_ext_hdr);
msm_property_destroy(&c_conn->property_info);
error_cleanup_fence:
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.h b/drivers/gpu/drm/msm/sde/sde_connector.h
index 18fc66d..a7bad7c 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.h
+++ b/drivers/gpu/drm/msm/sde/sde_connector.h
@@ -36,12 +36,21 @@
/**
* post_init - perform additional initialization steps
* @connector: Pointer to drm connector structure
+ * @display: Pointer to private display handle
+ * Returns: Zero on success
+ */
+ int (*post_init)(struct drm_connector *connector,
+ void *display);
+
+ /**
+ * set_info_blob - initialize given info blob
+ * @connector: Pointer to drm connector structure
* @info: Pointer to sde connector info structure
* @display: Pointer to private display handle
* @mode_info: Pointer to mode info structure
* Returns: Zero on success
*/
- int (*post_init)(struct drm_connector *connector,
+ int (*set_info_blob)(struct drm_connector *connector,
void *info,
void *display,
struct msm_mode_info *mode_info);
@@ -547,8 +556,9 @@
* sde_connector_clk_ctrl - enables/disables the connector clks
* @connector: Pointer to drm connector object
* @enable: true/false to enable/disable
+ * Returns: Zero on success
*/
-void sde_connector_clk_ctrl(struct drm_connector *connector, bool enable);
+int sde_connector_clk_ctrl(struct drm_connector *connector, bool enable);
/**
* sde_connector_get_dpms - query dpms setting
diff --git a/drivers/gpu/drm/msm/sde/sde_core_irq.c b/drivers/gpu/drm/msm/sde/sde_core_irq.c
index b6c6234..a6f22c9 100644
--- a/drivers/gpu/drm/msm/sde/sde_core_irq.c
+++ b/drivers/gpu/drm/msm/sde/sde_core_irq.c
@@ -460,6 +460,7 @@
{
struct msm_drm_private *priv;
int i;
+ int rc;
if (!sde_kms) {
SDE_ERROR("invalid sde_kms\n");
@@ -473,7 +474,14 @@
}
priv = sde_kms->dev->dev_private;
- sde_power_resource_enable(&priv->phandle, sde_kms->core_client, true);
+ rc = sde_power_resource_enable(&priv->phandle, sde_kms->core_client,
+ true);
+ if (rc) {
+ SDE_ERROR("failed to enable power resource %d\n", rc);
+ SDE_EVT32(rc, SDE_EVTLOG_ERROR);
+ return;
+ }
+
sde_clear_all_irqs(sde_kms);
sde_disable_all_irqs(sde_kms);
sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
@@ -504,6 +512,7 @@
{
struct msm_drm_private *priv;
int i;
+ int rc;
if (!sde_kms) {
SDE_ERROR("invalid sde_kms\n");
@@ -517,7 +526,14 @@
}
priv = sde_kms->dev->dev_private;
- sde_power_resource_enable(&priv->phandle, sde_kms->core_client, true);
+ rc = sde_power_resource_enable(&priv->phandle, sde_kms->core_client,
+ true);
+ if (rc) {
+ SDE_ERROR("failed to enable power resource %d\n", rc);
+ SDE_EVT32(rc, SDE_EVTLOG_ERROR);
+ return;
+ }
+
for (i = 0; i < sde_kms->irq_obj.total_irqs; i++)
if (atomic_read(&sde_kms->irq_obj.enable_counts[i]) ||
!list_empty(&sde_kms->irq_obj.irq_cb_tbl[i]))
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index d37888f..057d4a9 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -3048,6 +3048,11 @@
return;
}
+ if (!sde_kms_power_resource_is_enabled(crtc->dev)) {
+ SDE_ERROR("power resource is not enabled\n");
+ return;
+ }
+
SDE_DEBUG("crtc%d\n", crtc->base.id);
sde_crtc = to_sde_crtc(crtc);
@@ -3137,6 +3142,11 @@
return;
}
+ if (!sde_kms_power_resource_is_enabled(crtc->dev)) {
+ SDE_ERROR("power resource is not enabled\n");
+ return;
+ }
+
SDE_DEBUG("crtc%d\n", crtc->base.id);
sde_crtc = to_sde_crtc(crtc);
@@ -3396,16 +3406,18 @@
* _sde_crtc_reset_hw - attempt hardware reset on errors
* @crtc: Pointer to DRM crtc instance
* @old_state: Pointer to crtc state for previous commit
+ * @dump_status: Whether or not to dump debug status before reset
* Returns: Zero if current commit should still be attempted
*/
static int _sde_crtc_reset_hw(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_crtc_state *old_state, bool dump_status)
{
struct drm_plane *plane_halt[MAX_PLANES];
struct drm_plane *plane;
const struct drm_plane_state *pstate;
struct sde_crtc *sde_crtc;
struct sde_hw_ctl *ctl;
+ enum sde_ctl_rot_op_mode old_rot_op_mode;
signed int i, plane_count;
int rc;
@@ -3413,6 +3425,13 @@
return -EINVAL;
sde_crtc = to_sde_crtc(crtc);
+ old_rot_op_mode = to_sde_crtc_state(old_state)->sbuf_cfg.rot_op_mode;
+ SDE_EVT32(DRMID(crtc), old_rot_op_mode,
+ dump_status, SDE_EVTLOG_FUNC_ENTRY);
+
+ if (dump_status)
+ SDE_DBG_DUMP("all", "dbg_bus", "vbif_dbg_bus");
+
for (i = 0; i < sde_crtc->num_mixers; ++i) {
ctl = sde_crtc->mixers[i].hw_ctl;
if (!ctl || !ctl->ops.reset)
@@ -3428,11 +3447,19 @@
}
}
- /* early out if simple ctl reset succeeded */
- if (i == sde_crtc->num_mixers) {
- SDE_EVT32(DRMID(crtc), i);
+ /*
+ * Early out if simple ctl reset succeeded and previous commit
+ * did not involve the rotator.
+ *
+ * If the previous commit had rotation enabled, then the ctl
+ * reset would also have reset the rotator h/w. The rotator
+ * programming for the current commit may need to be repeated,
+ * depending on the rotation mode; don't handle this for now
+ * and just force a hard reset in those cases.
+ */
+ if (i == sde_crtc->num_mixers &&
+ old_rot_op_mode == SDE_CTL_ROT_OP_MODE_OFFLINE)
return false;
- }
SDE_DEBUG("crtc%d: issuing hard reset\n", DRMID(crtc));
@@ -3605,7 +3632,8 @@
* preparing for the kickoff
*/
if (reset_req) {
- if (_sde_crtc_reset_hw(crtc, old_state))
+ if (_sde_crtc_reset_hw(crtc, old_state,
+ !sde_crtc->reset_request))
is_error = true;
/* force offline rotation mode since the commit has no pipes */
@@ -3613,6 +3641,7 @@
cstate->sbuf_cfg.rot_op_mode =
SDE_CTL_ROT_OP_MODE_OFFLINE;
}
+ sde_crtc->reset_request = reset_req;
/* wait for frame_event_done completion */
SDE_ATRACE_BEGIN("wait_for_frame_done_event");
@@ -4026,6 +4055,12 @@
SDE_ERROR("invalid crtc\n");
return;
}
+
+ if (!sde_kms_power_resource_is_enabled(crtc->dev)) {
+ SDE_ERROR("power resource is not enabled\n");
+ return;
+ }
+
sde_crtc = to_sde_crtc(crtc);
cstate = to_sde_crtc_state(crtc->state);
priv = crtc->dev->dev_private;
@@ -4140,6 +4175,11 @@
}
priv = crtc->dev->dev_private;
+ if (!sde_kms_power_resource_is_enabled(crtc->dev)) {
+ SDE_ERROR("power resource is not enabled\n");
+ return;
+ }
+
SDE_DEBUG("crtc%d\n", crtc->base.id);
SDE_EVT32_VERBOSE(DRMID(crtc));
sde_crtc = to_sde_crtc(crtc);
@@ -5081,6 +5121,9 @@
cstate->bw_split_vote = true;
break;
case CRTC_PROP_OUTPUT_FENCE:
+ if (!val)
+ goto exit;
+
ret = _sde_crtc_get_output_fence(crtc, state, &fence_fd);
if (ret) {
SDE_ERROR("fence create failed rc:%d\n", ret);
@@ -5811,8 +5854,15 @@
priv = kms->dev->dev_private;
ret = 0;
if (crtc_drm->enabled) {
- sde_power_resource_enable(&priv->phandle, kms->core_client,
- true);
+ ret = sde_power_resource_enable(&priv->phandle,
+ kms->core_client, true);
+ if (ret) {
+ SDE_ERROR("failed to enable power resource %d\n", ret);
+ SDE_EVT32(ret, SDE_EVTLOG_ERROR);
+ kfree(node);
+ return ret;
+ }
+
INIT_LIST_HEAD(&node->irq.list);
ret = node->func(crtc_drm, true, &node->irq);
sde_power_resource_enable(&priv->phandle, kms->core_client,
@@ -5866,7 +5916,15 @@
return 0;
}
priv = kms->dev->dev_private;
- sde_power_resource_enable(&priv->phandle, kms->core_client, true);
+ ret = sde_power_resource_enable(&priv->phandle, kms->core_client, true);
+ if (ret) {
+ SDE_ERROR("failed to enable power resource %d\n", ret);
+ SDE_EVT32(ret, SDE_EVTLOG_ERROR);
+ list_del(&node->list);
+ kfree(node);
+ return ret;
+ }
+
ret = node->func(crtc_drm, false, &node->irq);
list_del(&node->list);
kfree(node);
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h
index 1d5b65e..c6b4afa 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.h
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.h
@@ -188,6 +188,8 @@
* @enabled : whether the SDE CRTC is currently enabled. updated in the
* commit-thread, not state-swap time which is earlier, so
* safe to make decisions on during VBLANK on/off work
+ * @reset_request : whether or not a h/w request was requested for the previous
+ * frame
* @ds_reconfig : force reconfiguration of the destination scaler block
* @feature_list : list of color processing features supported on a crtc
* @active_list : list of color processing features are active
@@ -247,6 +249,7 @@
bool vblank_requested;
bool suspend;
bool enabled;
+ bool reset_request;
bool ds_reconfig;
struct list_head feature_list;
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
index d7a3f24..a7dffba 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.c
@@ -247,6 +247,70 @@
#define to_sde_encoder_virt(x) container_of(x, struct sde_encoder_virt, base)
+static void _sde_encoder_pm_qos_add_request(struct drm_encoder *drm_enc)
+{
+ struct msm_drm_private *priv;
+ struct sde_kms *sde_kms;
+ struct pm_qos_request *req;
+ u32 cpu_mask;
+ u32 cpu_dma_latency;
+ int cpu;
+
+ if (!drm_enc->dev || !drm_enc->dev->dev_private) {
+ SDE_ERROR("drm device invalid\n");
+ return;
+ }
+
+ priv = drm_enc->dev->dev_private;
+ if (!priv->kms) {
+ SDE_ERROR("invalid kms\n");
+ return;
+ }
+
+ sde_kms = to_sde_kms(priv->kms);
+ if (!sde_kms || !sde_kms->catalog)
+ return;
+
+ cpu_mask = sde_kms->catalog->perf.cpu_mask;
+ cpu_dma_latency = sde_kms->catalog->perf.cpu_dma_latency;
+ if (!cpu_mask)
+ return;
+
+ req = &sde_kms->pm_qos_cpu_req;
+ req->type = PM_QOS_REQ_AFFINE_CORES;
+ cpumask_empty(&req->cpus_affine);
+ for_each_possible_cpu(cpu) {
+ if ((1 << cpu) & cpu_mask)
+ cpumask_set_cpu(cpu, &req->cpus_affine);
+ }
+ pm_qos_add_request(req, PM_QOS_CPU_DMA_LATENCY, cpu_dma_latency);
+
+ SDE_EVT32_VERBOSE(DRMID(drm_enc), cpu_mask, cpu_dma_latency);
+}
+
+static void _sde_encoder_pm_qos_remove_request(struct drm_encoder *drm_enc)
+{
+ struct msm_drm_private *priv;
+ struct sde_kms *sde_kms;
+
+ if (!drm_enc->dev || !drm_enc->dev->dev_private) {
+ SDE_ERROR("drm device invalid\n");
+ return;
+ }
+
+ priv = drm_enc->dev->dev_private;
+ if (!priv->kms) {
+ SDE_ERROR("invalid kms\n");
+ return;
+ }
+
+ sde_kms = to_sde_kms(priv->kms);
+ if (!sde_kms || !sde_kms->catalog || !sde_kms->catalog->perf.cpu_mask)
+ return;
+
+ pm_qos_remove_request(&sde_kms->pm_qos_cpu_req);
+}
+
static struct drm_connector_state *_sde_encoder_get_conn_state(
struct drm_encoder *drm_enc)
{
@@ -1671,37 +1735,61 @@
}
}
-static void _sde_encoder_resource_control_helper(struct drm_encoder *drm_enc,
+static int _sde_encoder_resource_control_helper(struct drm_encoder *drm_enc,
bool enable)
{
struct msm_drm_private *priv;
struct sde_kms *sde_kms;
struct sde_encoder_virt *sde_enc;
+ int rc;
+ bool is_cmd_mode, is_primary;
sde_enc = to_sde_encoder_virt(drm_enc);
priv = drm_enc->dev->dev_private;
sde_kms = to_sde_kms(priv->kms);
+ is_cmd_mode = sde_enc->disp_info.capabilities &
+ MSM_DISPLAY_CAP_CMD_MODE;
+ is_primary = sde_enc->disp_info.is_primary;
+
SDE_DEBUG_ENC(sde_enc, "enable:%d\n", enable);
SDE_EVT32(DRMID(drm_enc), enable);
if (!sde_enc->cur_master) {
SDE_ERROR("encoder master not set\n");
- return;
+ return -EINVAL;
}
if (enable) {
/* enable SDE core clks */
- sde_power_resource_enable(&priv->phandle,
+ rc = sde_power_resource_enable(&priv->phandle,
sde_kms->core_client, true);
+ if (rc) {
+ SDE_ERROR("failed to enable power resource %d\n", rc);
+ SDE_EVT32(rc, SDE_EVTLOG_ERROR);
+ return rc;
+ }
/* enable DSI clks */
- sde_connector_clk_ctrl(sde_enc->cur_master->connector, true);
+ rc = sde_connector_clk_ctrl(sde_enc->cur_master->connector,
+ true);
+ if (rc) {
+ SDE_ERROR("failed to enable clk control %d\n", rc);
+ sde_power_resource_enable(&priv->phandle,
+ sde_kms->core_client, false);
+ return rc;
+ }
/* enable all the irq */
_sde_encoder_irq_control(drm_enc, true);
+ if (is_cmd_mode && is_primary)
+ _sde_encoder_pm_qos_add_request(drm_enc);
+
} else {
+ if (is_cmd_mode && is_primary)
+ _sde_encoder_pm_qos_remove_request(drm_enc);
+
/* disable all the irq */
_sde_encoder_irq_control(drm_enc, false);
@@ -1713,6 +1801,7 @@
sde_kms->core_client, false);
}
+ return 0;
}
static int sde_encoder_resource_control(struct drm_encoder *drm_enc,
@@ -1791,7 +1880,19 @@
_sde_encoder_irq_control(drm_enc, true);
} else {
/* enable all the clks and resources */
- _sde_encoder_resource_control_helper(drm_enc, true);
+ ret = _sde_encoder_resource_control_helper(drm_enc,
+ true);
+ if (ret) {
+ SDE_ERROR_ENC(sde_enc,
+ "sw_event:%d, rc in state %d\n",
+ sw_event, sde_enc->rc_state);
+ SDE_EVT32(DRMID(drm_enc), sw_event,
+ sde_enc->rc_state,
+ SDE_EVTLOG_ERROR);
+ mutex_unlock(&sde_enc->rc_lock);
+ return ret;
+ }
+
_sde_encoder_resource_control_rsc_update(drm_enc, true);
}
@@ -1949,7 +2050,18 @@
/* return if the resource control is already in ON state */
if (sde_enc->rc_state != SDE_ENC_RC_STATE_ON) {
/* enable all the clks and resources */
- _sde_encoder_resource_control_helper(drm_enc, true);
+ ret = _sde_encoder_resource_control_helper(drm_enc,
+ true);
+ if (ret) {
+ SDE_ERROR_ENC(sde_enc,
+ "sw_event:%d, rc in state %d\n",
+ sw_event, sde_enc->rc_state);
+ SDE_EVT32(DRMID(drm_enc), sw_event,
+ sde_enc->rc_state,
+ SDE_EVTLOG_ERROR);
+ mutex_unlock(&sde_enc->rc_lock);
+ return ret;
+ }
_sde_encoder_resource_control_rsc_update(drm_enc, true);
@@ -2078,6 +2190,11 @@
return;
}
+ if (!sde_kms_power_resource_is_enabled(drm_enc->dev)) {
+ SDE_ERROR("power resource is not enabled\n");
+ return;
+ }
+
sde_enc = to_sde_encoder_virt(drm_enc);
SDE_DEBUG_ENC(sde_enc, "\n");
@@ -2285,6 +2402,11 @@
}
sde_enc = to_sde_encoder_virt(drm_enc);
+ if (!sde_kms_power_resource_is_enabled(drm_enc->dev)) {
+ SDE_ERROR("power resource is not enabled\n");
+ return;
+ }
+
ret = _sde_encoder_get_mode_info(drm_enc, &mode_info);
if (ret) {
SDE_ERROR_ENC(sde_enc, "failed to get mode info\n");
@@ -2379,6 +2501,11 @@
return;
}
+ if (!sde_kms_power_resource_is_enabled(drm_enc->dev)) {
+ SDE_ERROR("power resource is not enabled\n");
+ return;
+ }
+
sde_enc = to_sde_encoder_virt(drm_enc);
SDE_DEBUG_ENC(sde_enc, "\n");
@@ -3271,6 +3398,7 @@
struct sde_encoder_virt *sde_enc;
struct sde_encoder_phys *phys;
bool needs_hw_reset = false;
+ uint32_t ln_cnt1, ln_cnt2;
unsigned int i;
int rc, ret = 0;
@@ -3283,6 +3411,13 @@
SDE_DEBUG_ENC(sde_enc, "\n");
SDE_EVT32(DRMID(drm_enc));
+ /* save this for later, in case of errors */
+ if (sde_enc->cur_master && sde_enc->cur_master->ops.get_wr_line_count)
+ ln_cnt1 = sde_enc->cur_master->ops.get_wr_line_count(
+ sde_enc->cur_master);
+ else
+ ln_cnt1 = -EINVAL;
+
/* prepare for next kickoff, may include waiting on previous kickoff */
SDE_ATRACE_BEGIN("enc_prepare_for_kickoff");
for (i = 0; i < sde_enc->num_phys_encs; i++) {
@@ -3301,11 +3436,24 @@
}
SDE_ATRACE_END("enc_prepare_for_kickoff");
- sde_encoder_resource_control(drm_enc, SDE_ENC_RC_EVENT_KICKOFF);
+ rc = sde_encoder_resource_control(drm_enc, SDE_ENC_RC_EVENT_KICKOFF);
+ if (rc) {
+ SDE_ERROR_ENC(sde_enc, "resource kickoff failed rc %d\n", rc);
+ return rc;
+ }
/* if any phys needs reset, reset all phys, in-order */
if (needs_hw_reset) {
- SDE_EVT32(DRMID(drm_enc), SDE_EVTLOG_FUNC_CASE1);
+ /* query line count before cur_master is updated */
+ if (sde_enc->cur_master &&
+ sde_enc->cur_master->ops.get_wr_line_count)
+ ln_cnt2 = sde_enc->cur_master->ops.get_wr_line_count(
+ sde_enc->cur_master);
+ else
+ ln_cnt2 = -EINVAL;
+
+ SDE_EVT32(DRMID(drm_enc), ln_cnt1, ln_cnt2,
+ SDE_EVTLOG_FUNC_CASE1);
for (i = 0; i < sde_enc->num_phys_encs; i++) {
phys = sde_enc->phys_encs[i];
if (phys && phys->ops.hw_reset)
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
index edfdc0b..cfe2126 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
@@ -132,7 +132,8 @@
* @restore: Restore all the encoder configs.
* @is_autorefresh_enabled: provides the autorefresh current
* enable/disable state.
- * @get_line_count: Obtain current vertical line count
+ * @get_line_count: Obtain current internal vertical line count
+ * @get_wr_line_count: Obtain current output vertical line count
* @wait_dma_trigger: Returns true if lut dma has to trigger and wait
* unitl transaction is complete.
* @wait_for_active: Wait for display scan line to be in active area
@@ -182,6 +183,7 @@
void (*restore)(struct sde_encoder_phys *phys);
bool (*is_autorefresh_enabled)(struct sde_encoder_phys *phys);
int (*get_line_count)(struct sde_encoder_phys *phys);
+ int (*get_wr_line_count)(struct sde_encoder_phys *phys);
bool (*wait_dma_trigger)(struct sde_encoder_phys *phys);
int (*wait_for_active)(struct sde_encoder_phys *phys);
};
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
index 756984b..d7cbfbe 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
@@ -448,9 +448,7 @@
cmd_enc->pp_timeout_report_cnt = PP_TIMEOUT_MAX_TRIALS;
frame_event |= SDE_ENCODER_FRAME_EVENT_PANEL_DEAD;
- sde_encoder_helper_unregister_irq(phys_enc, INTR_IDX_RDPTR);
SDE_DBG_DUMP("panic");
- sde_encoder_helper_register_irq(phys_enc, INTR_IDX_RDPTR);
} else if (cmd_enc->pp_timeout_report_cnt == 1) {
/* to avoid flooding, only log first time, and "dead" time */
SDE_ERROR_CMDENC(cmd_enc,
@@ -461,10 +459,6 @@
atomic_read(&phys_enc->pending_kickoff_cnt));
SDE_EVT32(DRMID(phys_enc->parent), SDE_EVTLOG_FATAL);
-
- sde_encoder_helper_unregister_irq(phys_enc, INTR_IDX_RDPTR);
- SDE_DBG_DUMP("all", "dbg_bus", "vbif_dbg_bus");
- sde_encoder_helper_register_irq(phys_enc, INTR_IDX_RDPTR);
}
atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
@@ -958,6 +952,28 @@
return hw_pp->ops.get_line_count(hw_pp);
}
+static int sde_encoder_phys_cmd_get_write_line_count(
+ struct sde_encoder_phys *phys_enc)
+{
+ struct sde_hw_pingpong *hw_pp;
+ struct sde_hw_pp_vsync_info info;
+
+ if (!phys_enc || !phys_enc->hw_pp)
+ return -EINVAL;
+
+ if (!sde_encoder_phys_cmd_is_master(phys_enc))
+ return -EINVAL;
+
+ hw_pp = phys_enc->hw_pp;
+ if (!hw_pp->ops.get_vsync_info)
+ return -EINVAL;
+
+ if (hw_pp->ops.get_vsync_info(hw_pp, &info))
+ return -EINVAL;
+
+ return (int)info.wr_ptr_line_count;
+}
+
static void sde_encoder_phys_cmd_disable(struct sde_encoder_phys *phys_enc)
{
struct sde_encoder_phys_cmd *cmd_enc =
@@ -1302,6 +1318,7 @@
ops->is_autorefresh_enabled =
sde_encoder_phys_cmd_is_autorefresh_enabled;
ops->get_line_count = sde_encoder_phys_cmd_get_line_count;
+ ops->get_wr_line_count = sde_encoder_phys_cmd_get_write_line_count;
ops->wait_for_active = NULL;
}
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
index 47aa5e9..aaf50f6 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
@@ -823,19 +823,9 @@
if (vid_enc->error_count >= KICKOFF_MAX_ERRORS) {
vid_enc->error_count = KICKOFF_MAX_ERRORS;
- sde_encoder_helper_unregister_irq(
- phys_enc, INTR_IDX_VSYNC);
SDE_DBG_DUMP("panic");
- sde_encoder_helper_register_irq(
- phys_enc, INTR_IDX_VSYNC);
} else if (vid_enc->error_count == 1) {
SDE_EVT32(DRMID(phys_enc->parent), SDE_EVTLOG_FATAL);
-
- sde_encoder_helper_unregister_irq(
- phys_enc, INTR_IDX_VSYNC);
- SDE_DBG_DUMP("all", "dbg_bus", "vbif_dbg_bus");
- sde_encoder_helper_register_irq(
- phys_enc, INTR_IDX_VSYNC);
}
/* request a ctl reset before the next flush */
@@ -1111,6 +1101,7 @@
ops->trigger_flush = sde_encoder_helper_trigger_flush;
ops->hw_reset = sde_encoder_helper_hw_reset;
ops->get_line_count = sde_encoder_phys_vid_get_line_count;
+ ops->get_wr_line_count = sde_encoder_phys_vid_get_line_count;
ops->wait_dma_trigger = sde_encoder_phys_vid_wait_dma_trigger;
ops->wait_for_active = sde_encoder_phys_vid_wait_for_active;
}
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
index bf7d3da..42cf015 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
@@ -27,7 +27,8 @@
#define to_sde_encoder_phys_wb(x) \
container_of(x, struct sde_encoder_phys_wb, base)
-#define WBID(wb_enc) ((wb_enc) ? wb_enc->wb_dev->wb_idx : -1)
+#define WBID(wb_enc) \
+ ((wb_enc && wb_enc->wb_dev) ? wb_enc->wb_dev->wb_idx - WB_0 : -1)
#define TO_S15D16(_x_) ((_x_) << 7)
@@ -867,11 +868,11 @@
wb_enc->irq_idx, true);
if (irq_status) {
SDE_DEBUG("wb:%d done but irq not triggered\n",
- wb_enc->wb_dev->wb_idx - WB_0);
+ WBID(wb_enc));
sde_encoder_phys_wb_done_irq(wb_enc, wb_enc->irq_idx);
} else {
SDE_ERROR("wb:%d kickoff timed out\n",
- wb_enc->wb_dev->wb_idx - WB_0);
+ WBID(wb_enc));
atomic_add_unless(
&phys_enc->pending_retire_fence_cnt, -1, 0);
@@ -904,8 +905,7 @@
if (!rc) {
wb_time = (u64)ktime_to_us(wb_enc->end_time) -
(u64)ktime_to_us(wb_enc->start_time);
- SDE_DEBUG("wb:%d took %llu us\n",
- wb_enc->wb_dev->wb_idx - WB_0, wb_time);
+ SDE_DEBUG("wb:%d took %llu us\n", WBID(wb_enc), wb_time);
}
/* cleanup writeback framebuffer */
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index 2acbb0c..5d359be 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -103,6 +103,7 @@
struct msm_drm_private *priv;
struct sde_danger_safe_status status;
int i;
+ int rc;
if (!kms || !kms->dev || !kms->dev->dev_private || !kms->hw_mdp) {
SDE_ERROR("invalid arg(s)\n");
@@ -112,7 +113,13 @@
priv = kms->dev->dev_private;
memset(&status, 0, sizeof(struct sde_danger_safe_status));
- sde_power_resource_enable(&priv->phandle, kms->core_client, true);
+ rc = sde_power_resource_enable(&priv->phandle, kms->core_client, true);
+ if (rc) {
+ SDE_ERROR("failed to enable power resource %d\n", rc);
+ SDE_EVT32(rc, SDE_EVTLOG_ERROR);
+ return rc;
+ }
+
if (danger_status) {
seq_puts(s, "\nDanger signal status:\n");
if (kms->hw_mdp->ops.get_danger_status)
@@ -541,7 +548,13 @@
return;
priv = dev->dev_private;
- sde_power_resource_enable(&priv->phandle, sde_kms->core_client, true);
+ rc = sde_power_resource_enable(&priv->phandle, sde_kms->core_client,
+ true);
+ if (rc) {
+ SDE_ERROR("failed to enable power resource %d\n", rc);
+ SDE_EVT32(rc, SDE_EVTLOG_ERROR);
+ return;
+ }
for_each_crtc_in_state(state, crtc, crtc_state, i) {
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
@@ -587,10 +600,20 @@
static void sde_kms_commit(struct msm_kms *kms,
struct drm_atomic_state *old_state)
{
+ struct sde_kms *sde_kms;
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
int i;
+ if (!kms || !old_state)
+ return;
+ sde_kms = to_sde_kms(kms);
+
+ if (!sde_kms_power_resource_is_enabled(sde_kms->dev)) {
+ SDE_ERROR("power resource is not enabled\n");
+ return;
+ }
+
for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
if (crtc->state->active) {
SDE_EVT32(DRMID(crtc));
@@ -618,6 +641,11 @@
return;
priv = sde_kms->dev->dev_private;
+ if (!sde_kms_power_resource_is_enabled(sde_kms->dev)) {
+ SDE_ERROR("power resource is not enabled\n");
+ return;
+ }
+
for_each_crtc_in_state(old_state, crtc, old_crtc_state, i)
sde_crtc_complete_commit(crtc, old_crtc_state);
@@ -829,7 +857,7 @@
struct sde_kms *sde_kms)
{
static const struct sde_connector_ops dsi_ops = {
- .post_init = dsi_conn_post_init,
+ .set_info_blob = dsi_conn_set_info_blob,
.detect = dsi_conn_detect,
.get_modes = dsi_connector_get_modes,
.put_modes = dsi_connector_put_modes,
@@ -848,6 +876,7 @@
};
static const struct sde_connector_ops wb_ops = {
.post_init = sde_wb_connector_post_init,
+ .set_info_blob = sde_wb_connector_set_info_blob,
.detect = sde_wb_connector_detect,
.get_modes = sde_wb_connector_get_modes,
.set_property = sde_wb_connector_set_property,
@@ -1974,7 +2003,7 @@
} else if (global_crtc && (global_crtc != cur_crtc)) {
SDE_ERROR(
"crtc%d-sec%d not allowed during crtc%d-sec%d\n",
- cur_crtc->base.id, sec_session,
+ cur_crtc ? cur_crtc->base.id : -1, sec_session,
global_crtc->base.id, global_sec_session);
return -EPERM;
}
@@ -2470,41 +2499,6 @@
return ret;
}
-static void _sde_kms_pm_qos_add_request(struct sde_kms *sde_kms)
-{
- struct pm_qos_request *req;
- u32 cpu_mask;
- u32 cpu_dma_latency;
- int cpu;
-
- if (!sde_kms || !sde_kms->catalog)
- return;
-
- cpu_mask = sde_kms->catalog->perf.cpu_mask;
- cpu_dma_latency = sde_kms->catalog->perf.cpu_dma_latency;
- if (!cpu_mask)
- return;
-
- req = &sde_kms->pm_qos_cpu_req;
- req->type = PM_QOS_REQ_AFFINE_CORES;
- cpumask_empty(&req->cpus_affine);
- for_each_possible_cpu(cpu) {
- if ((1 << cpu) & cpu_mask)
- cpumask_set_cpu(cpu, &req->cpus_affine);
- }
- pm_qos_add_request(req, PM_QOS_CPU_DMA_LATENCY, cpu_dma_latency);
-
- SDE_EVT32_VERBOSE(cpu_mask, cpu_dma_latency);
-}
-
-static void _sde_kms_pm_qos_remove_request(struct sde_kms *sde_kms)
-{
- if (!sde_kms || !sde_kms->catalog || !sde_kms->catalog->perf.cpu_mask)
- return;
-
- pm_qos_remove_request(&sde_kms->pm_qos_cpu_req);
-}
-
/* the caller api needs to turn on clock before calling this function */
static int _sde_kms_cont_splash_res_init(struct sde_kms *sde_kms)
{
@@ -2582,9 +2576,7 @@
if (event_type == SDE_POWER_EVENT_POST_ENABLE) {
sde_irq_update(msm_kms, true);
sde_vbif_init_memtypes(sde_kms);
- _sde_kms_pm_qos_add_request(sde_kms);
} else if (event_type == SDE_POWER_EVENT_PRE_DISABLE) {
- _sde_kms_pm_qos_remove_request(sde_kms);
sde_irq_update(msm_kms, false);
}
}
@@ -2681,6 +2673,7 @@
struct sde_kms *sde_kms;
struct drm_device *dev;
struct msm_drm_private *priv;
+ bool splash_mem_found = false;
int i, rc = -EINVAL;
if (!kms) {
@@ -2775,8 +2768,10 @@
rc = _sde_kms_get_splash_data(&sde_kms->splash_data);
if (rc) {
- SDE_ERROR("sde splash data fetch failed: %d\n", rc);
- goto error;
+ SDE_DEBUG("sde splash data fetch failed: %d\n", rc);
+ splash_mem_found = false;
+ } else {
+ splash_mem_found = true;
}
rc = sde_power_resource_enable(&priv->phandle, sde_kms->core_client,
@@ -2802,7 +2797,12 @@
sde_dbg_init_dbg_buses(sde_kms->core_rev);
- _sde_kms_cont_splash_res_init(sde_kms);
+ /*
+ * Attempt continuous splash handoff only if reserved
+ * splash memory is found.
+ */
+ if (splash_mem_found)
+ _sde_kms_cont_splash_res_init(sde_kms);
/* Initialize reg dma block which is a singleton */
rc = sde_reg_dma_init(sde_kms->reg_dma, sde_kms->catalog,
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.h b/drivers/gpu/drm/msm/sde/sde_kms.h
index 26c45e2..501797b 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.h
+++ b/drivers/gpu/drm/msm/sde/sde_kms.h
@@ -244,6 +244,23 @@
bool sde_is_custom_client(void);
/**
+ * sde_kms_power_resource_is_enabled - whether or not power resource is enabled
+ * @dev: Pointer to drm device
+ * Return: true if power resource is enabled; false otherwise
+ */
+static inline bool sde_kms_power_resource_is_enabled(struct drm_device *dev)
+{
+ struct msm_drm_private *priv;
+
+ if (!dev || !dev->dev_private)
+ return false;
+
+ priv = dev->dev_private;
+
+ return sde_power_resource_is_enabled(&priv->phandle);
+}
+
+/**
* sde_kms_is_suspend_state - whether or not the system is pm suspended
* @dev: Pointer to drm device
* Return: Suspend status
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
index 067c4604..ab48c4a 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.c
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -58,6 +58,9 @@
#define SDE_PLANE_COLOR_FILL_FLAG BIT(31)
+#define TIME_MULTIPLEX_RECT(r0, r1, buffer_lines) \
+ ((r0).y >= ((r1).y + (r1).h + buffer_lines))
+
/* multirect rect index */
enum {
R0,
@@ -515,6 +518,7 @@
struct sde_plane *psde;
struct msm_drm_private *priv;
struct sde_kms *sde_kms;
+ int rc;
if (!plane || !plane->dev) {
SDE_ERROR("invalid arguments\n");
@@ -533,7 +537,13 @@
if (!psde->is_rt_pipe)
goto end;
- sde_power_resource_enable(&priv->phandle, sde_kms->core_client, true);
+ rc = sde_power_resource_enable(&priv->phandle, sde_kms->core_client,
+ true);
+ if (rc) {
+ SDE_ERROR("failed to enable power resource %d\n", rc);
+ SDE_EVT32(rc, SDE_EVTLOG_ERROR);
+ return rc;
+ }
_sde_plane_set_qos_ctrl(plane, enable, SDE_PLANE_QOS_PANIC_CTRL);
@@ -2780,6 +2790,12 @@
pstate->multirect_mode = SDE_SSPP_MULTIRECT_NONE;
}
+/**
+ * multi_rect validate API allows to validate only R0 and R1 RECT
+ * passing for each plane. Client of this API must not pass multiple
+ * plane which are not sharing same XIN client. Such calls will fail
+ * even though kernel client is passing valid multirect configuration.
+ */
int sde_plane_validate_multirect_v2(struct sde_multirect_plane_states *plane)
{
struct sde_plane_state *pstate[R_MAX];
@@ -2787,37 +2803,44 @@
struct sde_rect src[R_MAX], dst[R_MAX];
struct sde_plane *sde_plane[R_MAX];
const struct sde_format *fmt[R_MAX];
+ int xin_id[R_MAX];
bool q16_data = true;
- int i, buffer_lines;
+ int i, j, buffer_lines, width_threshold[R_MAX];
unsigned int max_tile_height = 1;
bool parallel_fetch_qualified = true;
- bool has_tiled_rect = false;
+ enum sde_sspp_multirect_mode mode = SDE_SSPP_MULTIRECT_NONE;
+ const struct msm_format *msm_fmt;
for (i = 0; i < R_MAX; i++) {
- const struct msm_format *msm_fmt;
-
drm_state[i] = i ? plane->r1 : plane->r0;
- msm_fmt = msm_framebuffer_format(drm_state[i]->fb);
- fmt[i] = to_sde_format(msm_fmt);
-
- if (SDE_FORMAT_IS_UBWC(fmt[i])) {
- has_tiled_rect = true;
- if (fmt[i]->tile_height > max_tile_height)
- max_tile_height = fmt[i]->tile_height;
+ if (!drm_state[i]) {
+ SDE_ERROR("drm plane state is NULL\n");
+ return -EINVAL;
}
- }
-
- for (i = 0; i < R_MAX; i++) {
- int width_threshold;
pstate[i] = to_sde_plane_state(drm_state[i]);
sde_plane[i] = to_sde_plane(drm_state[i]->plane);
+ xin_id[i] = sde_plane[i]->pipe_hw->cap->xin_id;
- if (pstate[i] == NULL) {
- SDE_ERROR("SDE plane state of plane id %d is NULL\n",
- drm_state[i]->plane->base.id);
+ for (j = 0; j < i; j++) {
+ if (xin_id[i] != xin_id[j]) {
+ SDE_ERROR_PLANE(sde_plane[i],
+ "invalid multirect validate call base:%d xin_id:%d curr:%d xin:%d\n",
+ j, xin_id[j], i, xin_id[i]);
+ return -EINVAL;
+ }
+ }
+
+ msm_fmt = msm_framebuffer_format(drm_state[i]->fb);
+ if (!msm_fmt) {
+ SDE_ERROR_PLANE(sde_plane[i], "null fb\n");
return -EINVAL;
}
+ fmt[i] = to_sde_format(msm_fmt);
+
+ if (SDE_FORMAT_IS_UBWC(fmt[i]) &&
+ (fmt[i]->tile_height > max_tile_height))
+ max_tile_height = fmt[i]->tile_height;
POPULATE_RECT(&src[i], drm_state[i]->src_x, drm_state[i]->src_y,
drm_state[i]->src_w, drm_state[i]->src_h, q16_data);
@@ -2844,41 +2867,81 @@
* So we cannot support more than half of the supported SSPP
* width for tiled formats.
*/
- width_threshold = sde_plane[i]->pipe_sblk->maxlinewidth;
- if (has_tiled_rect)
- width_threshold /= 2;
+ width_threshold[i] = sde_plane[i]->pipe_sblk->maxlinewidth;
+ if (SDE_FORMAT_IS_UBWC(fmt[i]))
+ width_threshold[i] /= 2;
- if (parallel_fetch_qualified && src[i].w > width_threshold)
+ if (parallel_fetch_qualified && src[i].w > width_threshold[i])
parallel_fetch_qualified = false;
+ if (sde_plane[i]->is_virtual)
+ mode = sde_plane_get_property(pstate[i],
+ PLANE_PROP_MULTIRECT_MODE);
}
- /* Validate RECT's and set the mode */
-
- /* Prefer PARALLEL FETCH Mode over TIME_MX Mode */
- if (parallel_fetch_qualified) {
- pstate[R0]->multirect_mode = SDE_SSPP_MULTIRECT_PARALLEL;
- pstate[R1]->multirect_mode = SDE_SSPP_MULTIRECT_PARALLEL;
-
- goto done;
- }
-
- /* TIME_MX Mode */
buffer_lines = 2 * max_tile_height;
- if ((dst[R1].y >= dst[R0].y + dst[R0].h + buffer_lines) ||
- (dst[R0].y >= dst[R1].y + dst[R1].h + buffer_lines)) {
- pstate[R0]->multirect_mode = SDE_SSPP_MULTIRECT_TIME_MX;
- pstate[R1]->multirect_mode = SDE_SSPP_MULTIRECT_TIME_MX;
- } else {
- SDE_ERROR(
- "No multirect mode possible for the planes (%d - %d)\n",
- drm_state[R0]->plane->base.id,
- drm_state[R1]->plane->base.id);
- return -EINVAL;
+ /**
+ * fallback to driver mode selection logic if client is using
+ * multirect plane without setting property.
+ *
+ * validate multirect mode configuration based on rectangle
+ */
+ switch (mode) {
+ case SDE_SSPP_MULTIRECT_NONE:
+ if (parallel_fetch_qualified)
+ mode = SDE_SSPP_MULTIRECT_PARALLEL;
+ else if (TIME_MULTIPLEX_RECT(dst[R1], dst[R0], buffer_lines) ||
+ TIME_MULTIPLEX_RECT(dst[R0], dst[R1], buffer_lines))
+ mode = SDE_SSPP_MULTIRECT_TIME_MX;
+ else
+ SDE_ERROR(
+ "planes(%d - %d) multirect mode selection fail\n",
+ drm_state[R0]->plane->base.id,
+ drm_state[R1]->plane->base.id);
+ break;
+
+ case SDE_SSPP_MULTIRECT_PARALLEL:
+ if (!parallel_fetch_qualified) {
+ SDE_ERROR("R0 plane:%d width_threshold:%d src_w:%d\n",
+ drm_state[R0]->plane->base.id,
+ width_threshold[R0], src[R0].w);
+ SDE_ERROR("R1 plane:%d width_threshold:%d src_w:%d\n",
+ drm_state[R1]->plane->base.id,
+ width_threshold[R1], src[R1].w);
+ SDE_ERROR("parallel fetch not qualified\n");
+ mode = SDE_SSPP_MULTIRECT_NONE;
+ }
+ break;
+
+ case SDE_SSPP_MULTIRECT_TIME_MX:
+ if (!TIME_MULTIPLEX_RECT(dst[R1], dst[R0], buffer_lines) &&
+ !TIME_MULTIPLEX_RECT(dst[R0], dst[R1], buffer_lines)) {
+ SDE_ERROR(
+ "buffer_lines:%d R0 plane:%d dst_y:%d dst_h:%d\n",
+ buffer_lines, drm_state[R0]->plane->base.id,
+ dst[R0].y, dst[R0].h);
+ SDE_ERROR(
+ "buffer_lines:%d R1 plane:%d dst_y:%d dst_h:%d\n",
+ buffer_lines, drm_state[R1]->plane->base.id,
+ dst[R1].y, dst[R1].h);
+ SDE_ERROR("time multiplexed fetch not qualified\n");
+ mode = SDE_SSPP_MULTIRECT_NONE;
+ }
+ break;
+
+ default:
+ SDE_ERROR("bad mode:%d selection\n", mode);
+ mode = SDE_SSPP_MULTIRECT_NONE;
+ break;
}
-done:
+ for (i = 0; i < R_MAX; i++)
+ pstate[i]->multirect_mode = mode;
+
+ if (mode == SDE_SSPP_MULTIRECT_NONE)
+ return -EINVAL;
+
if (sde_plane[R0]->is_virtual) {
pstate[R0]->multirect_index = SDE_SSPP_RECT_1;
pstate[R1]->multirect_index = SDE_SSPP_RECT_0;
@@ -2891,6 +2954,7 @@
pstate[R0]->multirect_mode, pstate[R0]->multirect_index);
SDE_DEBUG_PLANE(sde_plane[R1], "R1: %d - %d\n",
pstate[R1]->multirect_mode, pstate[R1]->multirect_index);
+
return 0;
}
@@ -3602,6 +3666,7 @@
case PLANE_PROP_CSC_V1:
pstate->dirty |= SDE_PLANE_DIRTY_FORMAT;
break;
+ case PLANE_PROP_MULTIRECT_MODE:
case PLANE_PROP_COLOR_FILL:
/* potentially need to refresh everything */
pstate->dirty = SDE_PLANE_DIRTY_ALL;
@@ -4022,6 +4087,11 @@
{SDE_DRM_FB_NON_SEC_DIR_TRANS, "non_sec_direct_translation"},
{SDE_DRM_FB_SEC_DIR_TRANS, "sec_direct_translation"},
};
+ static const struct drm_prop_enum_list e_multirect_mode[] = {
+ {SDE_SSPP_MULTIRECT_NONE, "none"},
+ {SDE_SSPP_MULTIRECT_PARALLEL, "parallel"},
+ {SDE_SSPP_MULTIRECT_TIME_MX, "serial"},
+ };
const struct sde_format_extended *format_list;
struct sde_kms_info *info;
struct sde_plane *psde = to_sde_plane(plane);
@@ -4171,6 +4241,10 @@
format_list = psde->pipe_sblk->virt_format_list;
sde_kms_info_add_keyint(info, "primary_smart_plane_id",
master_plane_id);
+ msm_property_install_enum(&psde->property_info,
+ "multirect_mode", 0x0, 0, e_multirect_mode,
+ ARRAY_SIZE(e_multirect_mode),
+ PLANE_PROP_MULTIRECT_MODE);
}
if (format_list) {
diff --git a/drivers/gpu/drm/msm/sde/sde_vbif.c b/drivers/gpu/drm/msm/sde/sde_vbif.c
index 522f7f9..0dbc027 100644
--- a/drivers/gpu/drm/msm/sde/sde_vbif.c
+++ b/drivers/gpu/drm/msm/sde/sde_vbif.c
@@ -102,15 +102,6 @@
"wait failed for pipe halt:xin_id %u, clk_ctrl %u, rc %u\n",
xin_id, clk_ctrl, rc);
SDE_EVT32(xin_id, clk_ctrl, rc, SDE_EVTLOG_ERROR);
- return rc;
- }
-
- status = vbif->ops.get_halt_ctrl(vbif, xin_id);
- if (status == 0) {
- SDE_ERROR("halt failed for pipe xin_id %u halt clk_ctrl %u\n",
- xin_id, clk_ctrl);
- SDE_EVT32(xin_id, clk_ctrl, SDE_EVTLOG_ERROR);
- return -ETIMEDOUT;
}
/* open xin client to enable transactions */
@@ -118,7 +109,7 @@
if (forced_on)
mdp->ops.setup_clk_force_ctrl(mdp, clk_ctrl, false);
- return 0;
+ return rc;
}
/**
diff --git a/drivers/gpu/drm/msm/sde/sde_wb.c b/drivers/gpu/drm/msm/sde/sde_wb.c
index a4c8518..71c8b63 100644
--- a/drivers/gpu/drm/msm/sde/sde_wb.c
+++ b/drivers/gpu/drm/msm/sde/sde_wb.c
@@ -352,48 +352,20 @@
return 0;
}
-int sde_wb_connector_post_init(struct drm_connector *connector,
+int sde_wb_connector_set_info_blob(struct drm_connector *connector,
void *info, void *display, struct msm_mode_info *mode_info)
{
- struct sde_connector *c_conn;
struct sde_wb_device *wb_dev = display;
const struct sde_format_extended *format_list;
- static const struct drm_prop_enum_list e_fb_translation_mode[] = {
- {SDE_DRM_FB_NON_SEC, "non_sec"},
- {SDE_DRM_FB_SEC, "sec"},
- };
if (!connector || !info || !display || !wb_dev->wb_cfg) {
SDE_ERROR("invalid params\n");
return -EINVAL;
}
- c_conn = to_sde_connector(connector);
- wb_dev->connector = connector;
- wb_dev->detect_status = connector_status_connected;
format_list = wb_dev->wb_cfg->format_list;
/*
- * Add extra connector properties
- */
- msm_property_install_range(&c_conn->property_info, "FB_ID",
- 0x0, 0, ~0, 0, CONNECTOR_PROP_OUT_FB);
- msm_property_install_range(&c_conn->property_info, "DST_X",
- 0x0, 0, UINT_MAX, 0, CONNECTOR_PROP_DST_X);
- msm_property_install_range(&c_conn->property_info, "DST_Y",
- 0x0, 0, UINT_MAX, 0, CONNECTOR_PROP_DST_Y);
- msm_property_install_range(&c_conn->property_info, "DST_W",
- 0x0, 0, UINT_MAX, 0, CONNECTOR_PROP_DST_W);
- msm_property_install_range(&c_conn->property_info, "DST_H",
- 0x0, 0, UINT_MAX, 0, CONNECTOR_PROP_DST_H);
- msm_property_install_enum(&c_conn->property_info,
- "fb_translation_mode",
- 0x0,
- 0, e_fb_translation_mode,
- ARRAY_SIZE(e_fb_translation_mode),
- CONNECTOR_PROP_FB_TRANSLATION_MODE);
-
- /*
* Populate info buffer
*/
if (format_list) {
@@ -423,6 +395,47 @@
return 0;
}
+int sde_wb_connector_post_init(struct drm_connector *connector, void *display)
+{
+ struct sde_connector *c_conn;
+ struct sde_wb_device *wb_dev = display;
+ static const struct drm_prop_enum_list e_fb_translation_mode[] = {
+ {SDE_DRM_FB_NON_SEC, "non_sec"},
+ {SDE_DRM_FB_SEC, "sec"},
+ };
+
+ if (!connector || !display || !wb_dev->wb_cfg) {
+ SDE_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ c_conn = to_sde_connector(connector);
+ wb_dev->connector = connector;
+ wb_dev->detect_status = connector_status_connected;
+
+ /*
+ * Add extra connector properties
+ */
+ msm_property_install_range(&c_conn->property_info, "FB_ID",
+ 0x0, 0, ~0, 0, CONNECTOR_PROP_OUT_FB);
+ msm_property_install_range(&c_conn->property_info, "DST_X",
+ 0x0, 0, UINT_MAX, 0, CONNECTOR_PROP_DST_X);
+ msm_property_install_range(&c_conn->property_info, "DST_Y",
+ 0x0, 0, UINT_MAX, 0, CONNECTOR_PROP_DST_Y);
+ msm_property_install_range(&c_conn->property_info, "DST_W",
+ 0x0, 0, UINT_MAX, 0, CONNECTOR_PROP_DST_W);
+ msm_property_install_range(&c_conn->property_info, "DST_H",
+ 0x0, 0, UINT_MAX, 0, CONNECTOR_PROP_DST_H);
+ msm_property_install_enum(&c_conn->property_info,
+ "fb_translation_mode",
+ 0x0,
+ 0, e_fb_translation_mode,
+ ARRAY_SIZE(e_fb_translation_mode),
+ CONNECTOR_PROP_FB_TRANSLATION_MODE);
+
+ return 0;
+}
+
struct drm_framebuffer *sde_wb_get_output_fb(struct sde_wb_device *wb_dev)
{
struct drm_framebuffer *fb;
diff --git a/drivers/gpu/drm/msm/sde/sde_wb.h b/drivers/gpu/drm/msm/sde/sde_wb.h
index 5e31664..d414bd0 100644
--- a/drivers/gpu/drm/msm/sde/sde_wb.h
+++ b/drivers/gpu/drm/msm/sde/sde_wb.h
@@ -131,12 +131,20 @@
/**
* sde_wb_connector_post_init - perform writeback specific initialization
* @connector: Pointer to drm connector structure
+ * @display: Pointer to private display structure
+ * Returns: Zero on success
+ */
+int sde_wb_connector_post_init(struct drm_connector *connector, void *display);
+
+/**
+ * sde_wb_connector_set_info_blob - perform writeback info blob initialization
+ * @connector: Pointer to drm connector structure
* @info: Pointer to connector info
* @display: Pointer to private display structure
* @mode_info: Pointer to the mode info structure
* Returns: Zero on success
*/
-int sde_wb_connector_post_init(struct drm_connector *connector,
+int sde_wb_connector_set_info_blob(struct drm_connector *connector,
void *info,
void *display,
struct msm_mode_info *mode_info);
diff --git a/drivers/gpu/drm/msm/sde_dbg.c b/drivers/gpu/drm/msm/sde_dbg.c
index 295e841..6b5be3b 100644
--- a/drivers/gpu/drm/msm/sde_dbg.c
+++ b/drivers/gpu/drm/msm/sde_dbg.c
@@ -2034,12 +2034,13 @@
/**
* _sde_dbg_enable_power - use callback to turn power on for hw register access
* @enable: whether to turn power on or off
+ * Return: zero if success; error code otherwise
*/
-static inline void _sde_dbg_enable_power(int enable)
+static inline int _sde_dbg_enable_power(int enable)
{
if (!sde_dbg_base.power_ctrl.enable_fn)
- return;
- sde_dbg_base.power_ctrl.enable_fn(
+ return -EINVAL;
+ return sde_dbg_base.power_ctrl.enable_fn(
sde_dbg_base.power_ctrl.handle,
sde_dbg_base.power_ctrl.client,
enable);
@@ -2063,6 +2064,7 @@
u32 *dump_addr = NULL;
char *end_addr;
int i;
+ int rc;
if (!len_bytes)
return;
@@ -2103,8 +2105,13 @@
}
}
- if (!from_isr)
- _sde_dbg_enable_power(true);
+ if (!from_isr) {
+ rc = _sde_dbg_enable_power(true);
+ if (rc) {
+ pr_err("failed to enable power %d\n", rc);
+ return;
+ }
+ }
for (i = 0; i < len_align; i++) {
u32 x0, x4, x8, xc;
@@ -2288,6 +2295,7 @@
u32 offset;
void __iomem *mem_base = NULL;
struct sde_dbg_reg_base *reg_base;
+ int rc;
if (!bus || !bus->cmn.entries_size)
return;
@@ -2333,7 +2341,12 @@
}
}
- _sde_dbg_enable_power(true);
+ rc = _sde_dbg_enable_power(true);
+ if (rc) {
+ pr_err("failed to enable power %d\n", rc);
+ return;
+ }
+
for (i = 0; i < bus->cmn.entries_size; i++) {
head = bus->entries + i;
writel_relaxed(TEST_MASK(head->block_id, head->test_id),
@@ -2427,6 +2440,7 @@
struct vbif_debug_bus_entry *dbg_bus;
u32 bus_size;
struct sde_dbg_reg_base *reg_base;
+ int rc;
if (!bus || !bus->cmn.entries_size)
return;
@@ -2484,7 +2498,11 @@
}
}
- _sde_dbg_enable_power(true);
+ rc = _sde_dbg_enable_power(true);
+ if (rc) {
+ pr_err("failed to enable power %d\n", rc);
+ return;
+ }
value = readl_relaxed(mem_base + MMSS_VBIF_CLKON);
writel_relaxed(value | BIT(1), mem_base + MMSS_VBIF_CLKON);
@@ -2969,6 +2987,7 @@
size_t off;
u32 data, cnt;
char buf[24];
+ int rc;
if (!file)
return -EINVAL;
@@ -2999,7 +3018,12 @@
return -EFAULT;
}
- _sde_dbg_enable_power(true);
+ rc = _sde_dbg_enable_power(true);
+ if (rc) {
+ mutex_unlock(&sde_dbg_base.mutex);
+ pr_err("failed to enable power %d\n", rc);
+ return rc;
+ }
writel_relaxed(data, dbg->base + off);
@@ -3024,6 +3048,7 @@
{
struct sde_dbg_reg_base *dbg;
size_t len;
+ int rc;
if (!file)
return -EINVAL;
@@ -3060,7 +3085,12 @@
ptr = dbg->base + dbg->off;
tot = 0;
- _sde_dbg_enable_power(true);
+ rc = _sde_dbg_enable_power(true);
+ if (rc) {
+ mutex_unlock(&sde_dbg_base.mutex);
+ pr_err("failed to enable power %d\n", rc);
+ return rc;
+ }
for (cnt = dbg->cnt; cnt > 0; cnt -= ROW_BYTES) {
hex_dump_to_buffer(ptr, min(cnt, ROW_BYTES),
diff --git a/drivers/gpu/drm/msm/sde_power_handle.c b/drivers/gpu/drm/msm/sde_power_handle.c
index 43fcf0d..34a826d 100644
--- a/drivers/gpu/drm/msm/sde_power_handle.c
+++ b/drivers/gpu/drm/msm/sde_power_handle.c
@@ -983,6 +983,16 @@
return rc;
}
+int sde_power_resource_is_enabled(struct sde_power_handle *phandle)
+{
+ if (!phandle) {
+ pr_err("invalid input argument\n");
+ return false;
+ }
+
+ return phandle->current_usecase_ndx != VOTE_INDEX_DISABLE;
+}
+
int sde_power_clk_set_rate(struct sde_power_handle *phandle, char *clock_name,
u64 rate)
{
diff --git a/drivers/gpu/drm/msm/sde_power_handle.h b/drivers/gpu/drm/msm/sde_power_handle.h
index 9cc78aa..72975e7 100644
--- a/drivers/gpu/drm/msm/sde_power_handle.h
+++ b/drivers/gpu/drm/msm/sde_power_handle.h
@@ -225,6 +225,14 @@
struct sde_power_client *pclient, bool enable);
/**
+ * sde_power_resource_is_enabled() - return true if power resource is enabled
+ * @pdata: power handle containing the resources
+ *
+ * Return: true if enabled; false otherwise
+ */
+int sde_power_resource_is_enabled(struct sde_power_handle *pdata);
+
+/**
* sde_power_data_bus_state_update() - update data bus state
* @pdata: power handle containing the resources
* @enable: take enable vs disable path
diff --git a/drivers/gpu/drm/msm/sde_rsc_hw.c b/drivers/gpu/drm/msm/sde_rsc_hw.c
index 654a2ad..a0d1245 100644
--- a/drivers/gpu/drm/msm/sde_rsc_hw.c
+++ b/drivers/gpu/drm/msm/sde_rsc_hw.c
@@ -204,17 +204,17 @@
/* tcs sleep & wake sequence */
dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x2c,
- 0x2089e6a6, rsc->debug_mode);
+ 0x89e686a6, rsc->debug_mode);
dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x30,
- 0xe7a7e9a9, rsc->debug_mode);
+ 0xa7e9a920, rsc->debug_mode);
dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x34,
- 0x00002089, rsc->debug_mode);
+ 0x2089e787, rsc->debug_mode);
/* branch address */
dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_CFG_BR_ADDR_0_DRV0,
0x2a, rsc->debug_mode);
dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_CFG_BR_ADDR_1_DRV0,
- 0x30, rsc->debug_mode);
+ 0x31, rsc->debug_mode);
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 36005bd..29abd28 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -721,7 +721,7 @@
* allocation taken by fbdev
*/
if (!(dev_priv->capabilities & SVGA_CAP_3D))
- mem_size *= 2;
+ mem_size *= 3;
dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
dev_priv->prim_bb_mem =
diff --git a/drivers/gpu/msm/a6xx_reg.h b/drivers/gpu/msm/a6xx_reg.h
index b75ecdf..ee696e2 100644
--- a/drivers/gpu/msm/a6xx_reg.h
+++ b/drivers/gpu/msm/a6xx_reg.h
@@ -68,6 +68,7 @@
#define A6XX_CP_MEM_POOL_SIZE 0x8C3
#define A6XX_CP_CHICKEN_DBG 0x841
#define A6XX_CP_ADDR_MODE_CNTL 0x842
+#define A6XX_CP_DBG_ECO_CNTL 0x843
#define A6XX_CP_PROTECT_CNTL 0x84F
#define A6XX_CP_PROTECT_REG 0x850
#define A6XX_CP_CONTEXT_SWITCH_CNTL 0x8A0
@@ -676,6 +677,7 @@
#define A6XX_UCHE_PERFCTR_UCHE_SEL_9 0xE25
#define A6XX_UCHE_PERFCTR_UCHE_SEL_10 0xE26
#define A6XX_UCHE_PERFCTR_UCHE_SEL_11 0xE27
+#define A6XX_UCHE_GBIF_GX_CONFIG 0xE3A
/* SP registers */
#define A6XX_SP_ADDR_MODE_CNTL 0xAE01
diff --git a/drivers/gpu/msm/adreno-gpulist.h b/drivers/gpu/msm/adreno-gpulist.h
index d0e6d73..770cf3b 100644
--- a/drivers/gpu/msm/adreno-gpulist.h
+++ b/drivers/gpu/msm/adreno-gpulist.h
@@ -347,7 +347,8 @@
.minor = 0,
.patchid = ANY_ID,
.features = ADRENO_64BIT | ADRENO_RPMH | ADRENO_IFPC |
- ADRENO_GPMU | ADRENO_CONTENT_PROTECTION | ADRENO_LM,
+ ADRENO_GPMU | ADRENO_CONTENT_PROTECTION | ADRENO_LM |
+ ADRENO_IOCOHERENT,
.sqefw_name = "a630_sqe.fw",
.zap_name = "a630_zap",
.gpudev = &adreno_a6xx_gpudev,
@@ -375,7 +376,7 @@
.num_protected_regs = 0x20,
.busy_mask = 0xFFFFFFFE,
.gpmufw_name = "a630_gmu.bin",
- .gpmu_major = 0x0,
- .gpmu_minor = 0x005,
+ .gpmu_major = 0x1,
+ .gpmu_minor = 0x001,
},
};
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 9f09aba..13fe0a7 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -119,6 +119,7 @@
.skipsaverestore = 1,
.usesgmem = 1,
},
+ .priv = BIT(ADRENO_DEVICE_PREEMPTION_EXECUTION),
};
/* Ptr to array for the current set of fault detect registers */
@@ -613,6 +614,7 @@
struct adreno_irq *irq_params = gpudev->irq;
irqreturn_t ret = IRQ_NONE;
unsigned int status = 0, fence = 0, fence_retries = 0, tmp, int_bit;
+ unsigned int status_retries = 0;
int i;
atomic_inc(&adreno_dev->pending_irq_refcnt);
@@ -652,6 +654,32 @@
adreno_readreg(adreno_dev, ADRENO_REG_RBBM_INT_0_STATUS, &status);
/*
+ * Read status again to make sure the bits aren't transitory.
+ * Transitory bits mean that they are spurious interrupts and are
+ * seen while preemption is on going. Empirical experiments have
+ * shown that the transitory bits are a timing thing and they
+ * go away in the small time window between two or three consecutive
+ * reads. If they don't go away, log the message and return.
+ */
+ while (status_retries < STATUS_RETRY_MAX) {
+ unsigned int new_status;
+
+ adreno_readreg(adreno_dev, ADRENO_REG_RBBM_INT_0_STATUS,
+ &new_status);
+
+ if (status == new_status)
+ break;
+
+ status = new_status;
+ status_retries++;
+ }
+
+ if (status_retries == STATUS_RETRY_MAX) {
+ KGSL_DRV_CRIT_RATELIMIT(device, "STATUS bits are not stable\n");
+ return ret;
+ }
+
+ /*
* Clear all the interrupt bits but ADRENO_INT_RBBM_AHB_ERROR. Because
* even if we clear it here, it will stay high until it is cleared
* in its respective handler. Otherwise, the interrupt handler will
@@ -1121,6 +1149,9 @@
if (!ADRENO_FEATURE(adreno_dev, ADRENO_CONTENT_PROTECTION))
device->mmu.secured = false;
+ if (ADRENO_FEATURE(adreno_dev, ADRENO_IOCOHERENT))
+ device->mmu.features |= KGSL_MMU_IO_COHERENT;
+
status = adreno_ringbuffer_probe(adreno_dev, nopreempt);
if (status)
goto out;
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index c5db02a..0b4e1df 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -121,6 +121,8 @@
#define ADRENO_HW_NAP BIT(14)
/* The GMU supports min voltage*/
#define ADRENO_MIN_VOLT BIT(15)
+/* The core supports IO-coherent memory */
+#define ADRENO_IOCOHERENT BIT(16)
/*
* Adreno GPU quirks - control bits for various workarounds
@@ -167,6 +169,9 @@
/* Number of times to poll the AHB fence in ISR */
#define FENCE_RETRY_MAX 100
+/* Number of times to see if INT_0_STATUS changed or not */
+#define STATUS_RETRY_MAX 3
+
/* One cannot wait forever for the core to idle, so set an upper limit to the
* amount of time to wait for the core to go idle
*/
@@ -267,6 +272,7 @@
* preempt_level: The level of preemption (for 6XX)
* skipsaverestore: To skip saverestore during L1 preemption (for 6XX)
* usesgmem: enable GMEM save/restore across preemption (for 6XX)
+ * count: Track the number of preemptions triggered
*/
struct adreno_preemption {
atomic_t state;
@@ -277,6 +283,7 @@
unsigned int preempt_level;
bool skipsaverestore;
bool usesgmem;
+ unsigned int count;
};
@@ -625,6 +632,12 @@
ADRENO_REG_CP_PROTECT_REG_0,
ADRENO_REG_CP_CONTEXT_SWITCH_SMMU_INFO_LO,
ADRENO_REG_CP_CONTEXT_SWITCH_SMMU_INFO_HI,
+ ADRENO_REG_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_LO,
+ ADRENO_REG_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_HI,
+ ADRENO_REG_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR_LO,
+ ADRENO_REG_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR_HI,
+ ADRENO_REG_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_LO,
+ ADRENO_REG_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_HI,
ADRENO_REG_RBBM_STATUS,
ADRENO_REG_RBBM_STATUS3,
ADRENO_REG_RBBM_PERFCTR_CTL,
@@ -844,6 +857,13 @@
struct adreno_snapshot_sizes *sect_sizes;
};
+enum adreno_cp_marker_type {
+ IFPC_DISABLE,
+ IFPC_ENABLE,
+ IB1LIST_START,
+ IB1LIST_END,
+};
+
struct adreno_gpudev {
/*
* These registers are in a different location on different devices,
@@ -891,7 +911,8 @@
unsigned int *cmds,
struct kgsl_context *context);
int (*preemption_yield_enable)(unsigned int *);
- unsigned int (*set_marker)(unsigned int *cmds, int start);
+ unsigned int (*set_marker)(unsigned int *cmds,
+ enum adreno_cp_marker_type type);
unsigned int (*preemption_post_ibsubmit)(
struct adreno_device *adreno_dev,
unsigned int *cmds);
@@ -926,6 +947,9 @@
bool (*sptprac_is_on)(struct adreno_device *);
unsigned int (*ccu_invalidate)(struct adreno_device *adreno_dev,
unsigned int *cmds);
+ int (*perfcounter_update)(struct adreno_device *adreno_dev,
+ struct adreno_perfcount_register *reg,
+ bool update_reg);
};
/**
@@ -1911,4 +1935,7 @@
return ret;
}
+void adreno_gmu_fenced_write(struct adreno_device *adreno_dev,
+ enum adreno_regs offset, unsigned int val,
+ unsigned int fence_mask);
#endif /*__ADRENO_H */
diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c
index f3e8650..768a4bb 100644
--- a/drivers/gpu/msm/adreno_a5xx.c
+++ b/drivers/gpu/msm/adreno_a5xx.c
@@ -193,6 +193,8 @@
kgsl_free_global(&adreno_dev->dev, &crit_pkts_refbuf2);
kgsl_free_global(&adreno_dev->dev, &crit_pkts_refbuf3);
+ kgsl_iommu_unmap_global_secure_pt_entry(KGSL_DEVICE(adreno_dev),
+ &crit_pkts_refbuf0);
kgsl_sharedmem_free(&crit_pkts_refbuf0);
}
@@ -231,8 +233,10 @@
if (ret)
return ret;
- kgsl_add_global_secure_entry(&adreno_dev->dev,
+ ret = kgsl_iommu_map_global_secure_pt_entry(&adreno_dev->dev,
&crit_pkts_refbuf0);
+ if (ret)
+ return ret;
ret = kgsl_allocate_global(&adreno_dev->dev,
&crit_pkts_refbuf1,
@@ -293,8 +297,13 @@
INIT_WORK(&adreno_dev->irq_storm_work, a5xx_irq_storm_worker);
- if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_CRITICAL_PACKETS))
- a5xx_critical_packet_construct(adreno_dev);
+ if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_CRITICAL_PACKETS)) {
+ int ret;
+
+ ret = a5xx_critical_packet_construct(adreno_dev);
+ if (ret)
+ a5xx_critical_packet_destroy(adreno_dev);
+ }
a5xx_crashdump_init(adreno_dev);
}
diff --git a/drivers/gpu/msm/adreno_a5xx_snapshot.c b/drivers/gpu/msm/adreno_a5xx_snapshot.c
index 6dc62866..d1a6005 100644
--- a/drivers/gpu/msm/adreno_a5xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a5xx_snapshot.c
@@ -621,7 +621,8 @@
header->index = info->bank;
header->size = block->sz;
- memcpy(data, registers.hostptr + info->offset, block->sz);
+ memcpy(data, registers.hostptr + info->offset,
+ block->sz * sizeof(unsigned int));
return SHADER_SECTION_SZ(block->sz);
}
diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c
index 6f6acf7..1d065f4 100644
--- a/drivers/gpu/msm/adreno_a6xx.c
+++ b/drivers/gpu/msm/adreno_a6xx.c
@@ -13,6 +13,7 @@
#include <linux/firmware.h>
#include <soc/qcom/subsystem_restart.h>
#include <linux/pm_opp.h>
+#include <linux/jiffies.h>
#include "adreno.h"
#include "a6xx_reg.h"
@@ -52,6 +53,7 @@
static const struct adreno_vbif_data a615_gbif[] = {
{A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3},
+ {A6XX_UCHE_GBIF_GX_CONFIG, 0x10200F9},
{0, 0},
};
@@ -173,12 +175,12 @@
};
static const struct kgsl_hwcg_reg a615_hwcg_regs[] = {
- {A6XX_RBBM_CLOCK_CNTL_SP0, 0x22222222},
+ {A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
{A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
- {A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000081},
+ {A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
{A6XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
- {A6XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
- {A6XX_RBBM_CLOCK_CNTL_TP1, 0x22222222},
+ {A6XX_RBBM_CLOCK_CNTL_TP0, 0x02222222},
+ {A6XX_RBBM_CLOCK_CNTL_TP1, 0x02222222},
{A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
{A6XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
{A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
@@ -222,7 +224,7 @@
{A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
{A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
{A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
- {A6XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
+ {A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
{A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
{A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
{A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
@@ -266,7 +268,8 @@
{ 0x0, 0x4F9, 0 },
{ 0x501, 0xA, 0 },
{ 0x511, 0x44, 0 },
- { 0xE00, 0xE, 1 },
+ { 0xE00, 0x1, 1 },
+ { 0xE03, 0xB, 1 },
{ 0x8E00, 0x0, 1 },
{ 0x8E50, 0xF, 1 },
{ 0xBE02, 0x0, 1 },
@@ -281,6 +284,7 @@
{ 0xA630, 0x0, 1 },
};
+/* IFPC & Preemption static powerup restore list */
static struct reg_list_pair {
uint32_t offset;
uint32_t val;
@@ -315,6 +319,48 @@
{ A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE, 0x0 },
};
+/* IFPC only static powerup restore list */
+static struct reg_list_pair a6xx_ifpc_pwrup_reglist[] = {
+ { A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x0 },
+ { A6XX_CP_CHICKEN_DBG, 0x0 },
+ { A6XX_CP_ADDR_MODE_CNTL, 0x0 },
+ { A6XX_CP_DBG_ECO_CNTL, 0x0 },
+ { A6XX_CP_PROTECT_CNTL, 0x0 },
+ { A6XX_CP_PROTECT_REG, 0x0 },
+ { A6XX_CP_PROTECT_REG+1, 0x0 },
+ { A6XX_CP_PROTECT_REG+2, 0x0 },
+ { A6XX_CP_PROTECT_REG+3, 0x0 },
+ { A6XX_CP_PROTECT_REG+4, 0x0 },
+ { A6XX_CP_PROTECT_REG+5, 0x0 },
+ { A6XX_CP_PROTECT_REG+6, 0x0 },
+ { A6XX_CP_PROTECT_REG+7, 0x0 },
+ { A6XX_CP_PROTECT_REG+8, 0x0 },
+ { A6XX_CP_PROTECT_REG+9, 0x0 },
+ { A6XX_CP_PROTECT_REG+10, 0x0 },
+ { A6XX_CP_PROTECT_REG+11, 0x0 },
+ { A6XX_CP_PROTECT_REG+12, 0x0 },
+ { A6XX_CP_PROTECT_REG+13, 0x0 },
+ { A6XX_CP_PROTECT_REG+14, 0x0 },
+ { A6XX_CP_PROTECT_REG+15, 0x0 },
+ { A6XX_CP_PROTECT_REG+16, 0x0 },
+ { A6XX_CP_PROTECT_REG+17, 0x0 },
+ { A6XX_CP_PROTECT_REG+18, 0x0 },
+ { A6XX_CP_PROTECT_REG+19, 0x0 },
+ { A6XX_CP_PROTECT_REG+20, 0x0 },
+ { A6XX_CP_PROTECT_REG+21, 0x0 },
+ { A6XX_CP_PROTECT_REG+22, 0x0 },
+ { A6XX_CP_PROTECT_REG+23, 0x0 },
+ { A6XX_CP_PROTECT_REG+24, 0x0 },
+ { A6XX_CP_PROTECT_REG+25, 0x0 },
+ { A6XX_CP_PROTECT_REG+26, 0x0 },
+ { A6XX_CP_PROTECT_REG+27, 0x0 },
+ { A6XX_CP_PROTECT_REG+28, 0x0 },
+ { A6XX_CP_PROTECT_REG+29, 0x0 },
+ { A6XX_CP_PROTECT_REG+30, 0x0 },
+ { A6XX_CP_PROTECT_REG+31, 0x0 },
+ { A6XX_CP_AHB_CNTL, 0x0 },
+};
+
static void _update_always_on_regs(struct adreno_device *adreno_dev)
{
struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
@@ -331,7 +377,7 @@
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
if (kgsl_allocate_global(device, &adreno_dev->pwrup_reglist,
- PAGE_SIZE, KGSL_MEMFLAGS_GPUREADONLY, 0,
+ PAGE_SIZE, 0, KGSL_MEMDESC_CONTIG | KGSL_MEMDESC_PRIVILEGED,
"powerup_register_list")) {
adreno_dev->pwrup_reglist.gpuaddr = 0;
return;
@@ -428,7 +474,41 @@
kgsl_regwrite(device, A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1);
}
-#define RBBM_CLOCK_CNTL_ON 0x8AA8AA02
+static inline unsigned int
+__get_rbbm_clock_cntl_on(struct adreno_device *adreno_dev)
+{
+ if (adreno_is_a615(adreno_dev))
+ return 0x8AA8AA82;
+ else
+ return 0x8AA8AA02;
+}
+
+static inline unsigned int
+__get_gmu_ao_cgc_mode_cntl(struct adreno_device *adreno_dev)
+{
+ if (adreno_is_a615(adreno_dev))
+ return 0x00000222;
+ else
+ return 0x00020222;
+}
+
+static inline unsigned int
+__get_gmu_ao_cgc_delay_cntl(struct adreno_device *adreno_dev)
+{
+ if (adreno_is_a615(adreno_dev))
+ return 0x00000111;
+ else
+ return 0x00010111;
+}
+
+static inline unsigned int
+__get_gmu_ao_cgc_hyst_cntl(struct adreno_device *adreno_dev)
+{
+ if (adreno_is_a615(adreno_dev))
+ return 0x00000555;
+ else
+ return 0x00005555;
+}
static void a6xx_hwcg_set(struct adreno_device *adreno_dev, bool on)
{
@@ -442,16 +522,16 @@
if (kgsl_gmu_isenabled(device)) {
kgsl_gmu_regwrite(device, A6XX_GPU_GMU_AO_GMU_CGC_MODE_CNTL,
- on ? 0x00020222 : 0);
+ on ? __get_gmu_ao_cgc_mode_cntl(adreno_dev) : 0);
kgsl_gmu_regwrite(device, A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL,
- on ? 0x00010111 : 0);
+ on ? __get_gmu_ao_cgc_delay_cntl(adreno_dev) : 0);
kgsl_gmu_regwrite(device, A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL,
- on ? 0x00050555 : 0);
+ on ? __get_gmu_ao_cgc_hyst_cntl(adreno_dev) : 0);
}
kgsl_regread(device, A6XX_RBBM_CLOCK_CNTL, &value);
- if (value == RBBM_CLOCK_CNTL_ON && on)
+ if (value == __get_rbbm_clock_cntl_on(adreno_dev) && on)
return;
if (value == 0 && !on)
@@ -478,7 +558,7 @@
/* enable top level HWCG */
kgsl_regwrite(device, A6XX_RBBM_CLOCK_CNTL,
- on ? RBBM_CLOCK_CNTL_ON : 0);
+ on ? __get_rbbm_clock_cntl_on(adreno_dev) : 0);
}
#define LM_DEFAULT_LIMIT 6000
@@ -500,17 +580,46 @@
static void a6xx_patch_pwrup_reglist(struct adreno_device *adreno_dev)
{
uint32_t i;
+ struct cpu_gpu_lock *lock;
+ struct reg_list_pair *r;
/* Set up the register values */
- for (i = 0; i < ARRAY_SIZE(a6xx_pwrup_reglist); i++) {
- struct reg_list_pair *r = &a6xx_pwrup_reglist[i];
-
+ for (i = 0; i < ARRAY_SIZE(a6xx_ifpc_pwrup_reglist); i++) {
+ r = &a6xx_ifpc_pwrup_reglist[i];
kgsl_regread(KGSL_DEVICE(adreno_dev), r->offset, &r->val);
}
- /* Copy Preemption register/data pairs */
- memcpy(adreno_dev->pwrup_reglist.hostptr, &a6xx_pwrup_reglist,
- sizeof(a6xx_pwrup_reglist));
+ for (i = 0; i < ARRAY_SIZE(a6xx_pwrup_reglist); i++) {
+ r = &a6xx_pwrup_reglist[i];
+ kgsl_regread(KGSL_DEVICE(adreno_dev), r->offset, &r->val);
+ }
+
+ lock = (struct cpu_gpu_lock *) adreno_dev->pwrup_reglist.hostptr;
+ lock->flag_ucode = 0;
+ lock->flag_kmd = 0;
+ lock->turn = 0;
+
+ /*
+ * The overall register list is composed of
+ * 1. Static IFPC-only registers
+ * 2. Static IFPC + preemption registers
+ * 2. Dynamic IFPC + preemption registers (ex: perfcounter selects)
+ *
+ * The CP views the second and third entries as one dynamic list
+ * starting from list_offset. Thus, list_length should be the sum
+ * of all three lists above (of which the third list will start off
+ * empty). And list_offset should be specified as the size in dwords
+ * of the static IFPC-only register list.
+ */
+ lock->list_length = (sizeof(a6xx_ifpc_pwrup_reglist) +
+ sizeof(a6xx_pwrup_reglist)) >> 2;
+ lock->list_offset = sizeof(a6xx_ifpc_pwrup_reglist) >> 2;
+
+ memcpy(adreno_dev->pwrup_reglist.hostptr + sizeof(*lock),
+ a6xx_ifpc_pwrup_reglist, sizeof(a6xx_ifpc_pwrup_reglist));
+ memcpy(adreno_dev->pwrup_reglist.hostptr + sizeof(*lock)
+ + sizeof(a6xx_ifpc_pwrup_reglist),
+ a6xx_pwrup_reglist, sizeof(a6xx_pwrup_reglist));
}
/*
@@ -717,13 +826,16 @@
/* Register initialization list */
#define CP_INIT_REGISTER_INIT_LIST BIT(7)
+/* Register initialization list with spinlock */
+#define CP_INIT_REGISTER_INIT_LIST_WITH_SPINLOCK BIT(8)
+
#define CP_INIT_MASK (CP_INIT_MAX_CONTEXT | \
CP_INIT_ERROR_DETECTION_CONTROL | \
CP_INIT_HEADER_DUMP | \
CP_INIT_DEFAULT_RESET_STATE | \
CP_INIT_UCODE_WORKAROUND_MASK | \
CP_INIT_OPERATION_MODE_MASK | \
- CP_INIT_REGISTER_INIT_LIST)
+ CP_INIT_REGISTER_INIT_LIST_WITH_SPINLOCK)
static void _set_ordinals(struct adreno_device *adreno_dev,
unsigned int *cmds, unsigned int count)
@@ -759,13 +871,21 @@
if (CP_INIT_MASK & CP_INIT_OPERATION_MODE_MASK)
*cmds++ = 0x00000002;
- if (CP_INIT_MASK & CP_INIT_REGISTER_INIT_LIST) {
+ if (CP_INIT_MASK & CP_INIT_REGISTER_INIT_LIST_WITH_SPINLOCK) {
+ uint64_t gpuaddr = adreno_dev->pwrup_reglist.gpuaddr;
+
+ *cmds++ = lower_32_bits(gpuaddr);
+ *cmds++ = upper_32_bits(gpuaddr);
+ *cmds++ = 0;
+
+ } else if (CP_INIT_MASK & CP_INIT_REGISTER_INIT_LIST) {
uint64_t gpuaddr = adreno_dev->pwrup_reglist.gpuaddr;
*cmds++ = lower_32_bits(gpuaddr);
*cmds++ = upper_32_bits(gpuaddr);
/* Size is in dwords */
- *cmds++ = sizeof(a6xx_pwrup_reglist) >> 2;
+ *cmds++ = (sizeof(a6xx_ifpc_pwrup_reglist) +
+ sizeof(a6xx_pwrup_reglist)) >> 2;
}
/* Pad rest of the cmds with 0's */
@@ -822,7 +942,8 @@
rb->preemption_desc.gpuaddr);
*cmds++ = 2;
- cmds += cp_gpuaddr(adreno_dev, cmds, 0);
+ cmds += cp_gpuaddr(adreno_dev, cmds,
+ rb->secure_preemption_desc.gpuaddr);
/* Turn CP protection ON */
*cmds++ = cp_type7_packet(CP_SET_PROTECTED_MODE, 1);
@@ -913,6 +1034,38 @@
return a6xx_post_start(adreno_dev);
}
+unsigned int a6xx_set_marker(
+ unsigned int *cmds, enum adreno_cp_marker_type type)
+{
+ unsigned int cmd = 0;
+
+ *cmds++ = cp_type7_packet(CP_SET_MARKER, 1);
+
+ /*
+ * Indicate the beginning and end of the IB1 list with a SET_MARKER.
+ * Among other things, this will implicitly enable and disable
+ * preemption respectively. IFPC can also be disabled and enabled
+ * with a SET_MARKER. Bit 8 tells the CP the marker is for IFPC.
+ */
+ switch (type) {
+ case IFPC_DISABLE:
+ cmd = 0x101;
+ break;
+ case IFPC_ENABLE:
+ cmd = 0x100;
+ break;
+ case IB1LIST_START:
+ cmd = 0xD;
+ break;
+ case IB1LIST_END:
+ cmd = 0xE;
+ break;
+ }
+
+ *cmds++ = cmd;
+ return 2;
+}
+
static int _load_firmware(struct kgsl_device *device, const char *fwfile,
struct adreno_firmware *firmware)
{
@@ -2894,8 +3047,16 @@
A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_1, },
};
+/*
+ * ADRENO_PERFCOUNTER_GROUP_RESTORE flag is enabled by default
+ * because most of the perfcounter groups need to be restored
+ * as part of preemption and IFPC. Perfcounter groups that are
+ * not restored as part of preemption and IFPC should be defined
+ * using A6XX_PERFCOUNTER_GROUP_FLAGS macro
+ */
#define A6XX_PERFCOUNTER_GROUP(offset, name) \
- ADRENO_PERFCOUNTER_GROUP(a6xx, offset, name)
+ ADRENO_PERFCOUNTER_GROUP_FLAGS(a6xx, offset, name, \
+ ADRENO_PERFCOUNTER_GROUP_RESTORE)
#define A6XX_PERFCOUNTER_GROUP_FLAGS(offset, name, flags) \
ADRENO_PERFCOUNTER_GROUP_FLAGS(a6xx, offset, name, flags)
@@ -2906,7 +3067,7 @@
static struct adreno_perfcount_group a6xx_perfcounter_groups
[KGSL_PERFCOUNTER_GROUP_MAX] = {
A6XX_PERFCOUNTER_GROUP(CP, cp),
- A6XX_PERFCOUNTER_GROUP(RBBM, rbbm),
+ A6XX_PERFCOUNTER_GROUP_FLAGS(RBBM, rbbm, 0),
A6XX_PERFCOUNTER_GROUP(PC, pc),
A6XX_PERFCOUNTER_GROUP(VFD, vfd),
A6XX_PERFCOUNTER_GROUP(HLSQ, hlsq),
@@ -2921,7 +3082,7 @@
A6XX_PERFCOUNTER_GROUP(SP, sp),
A6XX_PERFCOUNTER_GROUP(RB, rb),
A6XX_PERFCOUNTER_GROUP(VSC, vsc),
- A6XX_PERFCOUNTER_GROUP(VBIF, vbif),
+ A6XX_PERFCOUNTER_GROUP_FLAGS(VBIF, vbif, 0),
A6XX_PERFCOUNTER_GROUP_FLAGS(VBIF_PWR, vbif_pwr,
ADRENO_PERFCOUNTER_GROUP_FIXED),
A6XX_PERFCOUNTER_GROUP_FLAGS(PWR, pwr,
@@ -3070,6 +3231,22 @@
A6XX_CP_CONTEXT_SWITCH_SMMU_INFO_LO),
ADRENO_REG_DEFINE(ADRENO_REG_CP_CONTEXT_SWITCH_SMMU_INFO_HI,
A6XX_CP_CONTEXT_SWITCH_SMMU_INFO_HI),
+ ADRENO_REG_DEFINE(
+ ADRENO_REG_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_LO,
+ A6XX_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_LO),
+ ADRENO_REG_DEFINE(
+ ADRENO_REG_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_HI,
+ A6XX_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_HI),
+ ADRENO_REG_DEFINE(
+ ADRENO_REG_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR_LO,
+ A6XX_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR_LO),
+ ADRENO_REG_DEFINE(
+ ADRENO_REG_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR_HI,
+ A6XX_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR_HI),
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_LO,
+ A6XX_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_LO),
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_HI,
+ A6XX_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_HI),
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_STATUS, A6XX_RBBM_STATUS),
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_STATUS3, A6XX_RBBM_STATUS3),
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_CTL, A6XX_RBBM_PERFCTR_CNTL),
@@ -3162,6 +3339,69 @@
.offset_0 = ADRENO_REG_REGISTER_MAX,
};
+static int a6xx_perfcounter_update(struct adreno_device *adreno_dev,
+ struct adreno_perfcount_register *reg, bool update_reg)
+{
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ struct cpu_gpu_lock *lock = adreno_dev->pwrup_reglist.hostptr;
+ struct reg_list_pair *reg_pair = (struct reg_list_pair *)(lock + 1);
+ unsigned int i;
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+ int ret = 0;
+
+ lock->flag_kmd = 1;
+ /* Write flag_kmd before turn */
+ wmb();
+ lock->turn = 0;
+ /* Write these fields before looping */
+ mb();
+
+ /*
+ * Spin here while GPU ucode holds the lock, lock->flag_ucode will
+ * be set to 0 after GPU ucode releases the lock. Minimum wait time
+ * is 1 second and this should be enough for GPU to release the lock
+ */
+ while (lock->flag_ucode == 1 && lock->turn == 0) {
+ cpu_relax();
+ /* Get the latest updates from GPU */
+ rmb();
+ /*
+ * Make sure we wait at least 1sec for the lock,
+ * if we did not get it after 1sec return an error.
+ */
+ if (time_after(jiffies, timeout) &&
+ (lock->flag_ucode == 1 && lock->turn == 0)) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+ }
+
+ /* Read flag_ucode and turn before list_length */
+ rmb();
+ /*
+ * If the perfcounter select register is already present in reglist
+ * update it, otherwise append the <select register, value> pair to
+ * the end of the list.
+ */
+ for (i = 0; i < lock->list_length >> 1; i++)
+ if (reg_pair[i].offset == reg->select)
+ break;
+
+ reg_pair[i].offset = reg->select;
+ reg_pair[i].val = reg->countable;
+ if (i == lock->list_length >> 1)
+ lock->list_length += 2;
+
+ if (update_reg)
+ kgsl_regwrite(device, reg->select, reg->countable);
+
+unlock:
+ /* All writes done before releasing the lock */
+ wmb();
+ lock->flag_kmd = 0;
+ return ret;
+}
+
struct adreno_gpudev adreno_a6xx_gpudev = {
.reg_offsets = &a6xx_reg_offsets,
.start = a6xx_start,
@@ -3204,4 +3444,5 @@
.gx_is_on = a6xx_gx_is_on,
.sptprac_is_on = a6xx_sptprac_is_on,
.ccu_invalidate = a6xx_ccu_invalidate,
+ .perfcounter_update = a6xx_perfcounter_update,
};
diff --git a/drivers/gpu/msm/adreno_a6xx.h b/drivers/gpu/msm/adreno_a6xx.h
index dd8af80..bf1111c 100644
--- a/drivers/gpu/msm/adreno_a6xx.h
+++ b/drivers/gpu/msm/adreno_a6xx.h
@@ -75,6 +75,24 @@
#define A6XX_CP_SMMU_INFO_MAGIC_REF 0x241350D5UL
+/**
+ * struct cpu_gpu_spinlock - CP spinlock structure for power up list
+ * @flag_ucode: flag value set by CP
+ * @flag_kmd: flag value set by KMD
+ * @turn: turn variable set by both CP and KMD
+ * @list_length: this tells CP the last dword in the list:
+ * 16 + (4 * (List_Length - 1))
+ * @list_offset: this tells CP the start of preemption only list:
+ * 16 + (4 * List_Offset)
+ */
+struct cpu_gpu_lock {
+ uint32_t flag_ucode;
+ uint32_t flag_kmd;
+ uint32_t turn;
+ uint16_t list_length;
+ uint16_t list_offset;
+};
+
#define A6XX_CP_CTXRECORD_MAGIC_REF 0xAE399D6EUL
/* Size of each CP preemption record */
#define A6XX_CP_CTXRECORD_SIZE_IN_BYTES (2112 * 1024)
@@ -100,7 +118,8 @@
struct adreno_ringbuffer *rb,
unsigned int *cmds, struct kgsl_context *context);
-unsigned int a6xx_set_marker(unsigned int *cmds, int start);
+unsigned int a6xx_set_marker(unsigned int *cmds,
+ enum adreno_cp_marker_type type);
void a6xx_preemption_callback(struct adreno_device *adreno_dev, int bit);
diff --git a/drivers/gpu/msm/adreno_a6xx_preempt.c b/drivers/gpu/msm/adreno_a6xx_preempt.c
index 1eec381..d92d1e0 100644
--- a/drivers/gpu/msm/adreno_a6xx_preempt.c
+++ b/drivers/gpu/msm/adreno_a6xx_preempt.c
@@ -35,6 +35,25 @@
struct adreno_ringbuffer *rb = adreno_dev->cur_rb;
unsigned int wptr;
unsigned long flags;
+ struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
+
+ /*
+ * Need to make sure GPU is up before we read the
+ * WPTR as fence doesn't wake GPU on read operation.
+ */
+ if (in_interrupt() == 0) {
+ int status;
+
+ if (gpudev->oob_set) {
+ status = gpudev->oob_set(adreno_dev,
+ OOB_PREEMPTION_SET_MASK,
+ OOB_PREEMPTION_CHECK_MASK,
+ OOB_PREEMPTION_CLEAR_MASK);
+ if (status)
+ return;
+ }
+ }
+
spin_lock_irqsave(&rb->preempt_lock, flags);
@@ -55,6 +74,12 @@
msecs_to_jiffies(adreno_drawobj_timeout);
spin_unlock_irqrestore(&rb->preempt_lock, flags);
+
+ if (in_interrupt() == 0) {
+ if (gpudev->oob_clear)
+ gpudev->oob_clear(adreno_dev,
+ OOB_PREEMPTION_CLEAR_MASK);
+ }
}
static inline bool adreno_move_preempt_state(struct adreno_device *adreno_dev,
@@ -204,7 +229,7 @@
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
struct kgsl_iommu *iommu = KGSL_IOMMU_PRIV(device);
struct adreno_ringbuffer *next;
- uint64_t ttbr0;
+ uint64_t ttbr0, gpuaddr;
unsigned int contextidr;
unsigned long flags;
uint32_t preempt_level, usesgmem, skipsaverestore;
@@ -267,6 +292,8 @@
kgsl_sharedmem_writel(device, &next->preemption_desc,
PREEMPT_RECORD(wptr), next->wptr);
+ preempt->count++;
+
spin_unlock_irqrestore(&next->preempt_lock, flags);
/* And write it to the smmu info */
@@ -275,24 +302,57 @@
kgsl_sharedmem_writel(device, &iommu->smmu_info,
PREEMPT_SMMU_RECORD(context_idr), contextidr);
- kgsl_regwrite(device,
- A6XX_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_LO,
- lower_32_bits(next->preemption_desc.gpuaddr));
- kgsl_regwrite(device,
- A6XX_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_HI,
- upper_32_bits(next->preemption_desc.gpuaddr));
+ kgsl_sharedmem_readq(&device->scratch, &gpuaddr,
+ SCRATCH_PREEMPTION_CTXT_RESTORE_ADDR_OFFSET(next->id));
- if (next->drawctxt_active) {
- struct kgsl_context *context = &next->drawctxt_active->base;
- uint64_t gpuaddr = context->user_ctxt_record->memdesc.gpuaddr;
+ /*
+ * Set a keepalive bit before the first preemption register write.
+ * This is required since while each individual write to the context
+ * switch registers will wake the GPU from collapse, it will not in
+ * itself cause GPU activity. Thus, the GPU could technically be
+ * re-collapsed between subsequent register writes leading to a
+ * prolonged preemption sequence. The keepalive bit prevents any
+ * further power collapse while it is set.
+ * It is more efficient to use a keepalive+wake-on-fence approach here
+ * rather than an OOB. Both keepalive and the fence are effectively
+ * free when the GPU is already powered on, whereas an OOB requires an
+ * unconditional handshake with the GMU.
+ */
+ kgsl_gmu_regrmw(device, A6XX_GMU_AO_SPARE_CNTL, 0x0, 0x2);
- kgsl_regwrite(device,
- A6XX_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_LO,
- lower_32_bits(gpuaddr));
- kgsl_regwrite(device,
- A6XX_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_HI,
- upper_32_bits(gpuaddr));
- }
+ /*
+ * Fenced writes on this path will make sure the GPU is woken up
+ * in case it was power collapsed by the GMU.
+ */
+ adreno_gmu_fenced_write(adreno_dev,
+ ADRENO_REG_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_LO,
+ lower_32_bits(next->preemption_desc.gpuaddr),
+ FENCE_STATUS_WRITEDROPPED1_MASK);
+
+ adreno_gmu_fenced_write(adreno_dev,
+ ADRENO_REG_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_HI,
+ upper_32_bits(next->preemption_desc.gpuaddr),
+ FENCE_STATUS_WRITEDROPPED1_MASK);
+
+ adreno_gmu_fenced_write(adreno_dev,
+ ADRENO_REG_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR_LO,
+ lower_32_bits(next->secure_preemption_desc.gpuaddr),
+ FENCE_STATUS_WRITEDROPPED1_MASK);
+
+ adreno_gmu_fenced_write(adreno_dev,
+ ADRENO_REG_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR_HI,
+ upper_32_bits(next->secure_preemption_desc.gpuaddr),
+ FENCE_STATUS_WRITEDROPPED1_MASK);
+
+ adreno_gmu_fenced_write(adreno_dev,
+ ADRENO_REG_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_LO,
+ lower_32_bits(gpuaddr),
+ FENCE_STATUS_WRITEDROPPED1_MASK);
+
+ adreno_gmu_fenced_write(adreno_dev,
+ ADRENO_REG_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_HI,
+ upper_32_bits(gpuaddr),
+ FENCE_STATUS_WRITEDROPPED1_MASK);
adreno_dev->next_rb = next;
@@ -305,10 +365,20 @@
adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_TRIGGERED);
/* Trigger the preemption */
- adreno_writereg(adreno_dev, ADRENO_REG_CP_PREEMPT,
- ((preempt_level << 6) & 0xC0) |
- ((skipsaverestore << 9) & 0x200) |
- ((usesgmem << 8) & 0x100) | 0x1);
+ adreno_gmu_fenced_write(adreno_dev,
+ ADRENO_REG_CP_PREEMPT,
+ (((preempt_level << 6) & 0xC0) |
+ ((skipsaverestore << 9) & 0x200) |
+ ((usesgmem << 8) & 0x100) | 0x1),
+ FENCE_STATUS_WRITEDROPPED1_MASK);
+
+ /*
+ * Once preemption has been requested with the final register write,
+ * the preemption process starts and the GPU is considered busy.
+ * We can now safely clear the preemption keepalive bit, allowing
+ * power collapse to resume its regular activity.
+ */
+ kgsl_gmu_regrmw(device, A6XX_GMU_AO_SPARE_CNTL, 0x2, 0x0);
}
void a6xx_preemption_callback(struct adreno_device *adreno_dev, int bit)
@@ -374,34 +444,20 @@
mutex_unlock(&device->mutex);
}
-unsigned int a6xx_set_marker(unsigned int *cmds, int start)
-{
- *cmds++ = cp_type7_packet(CP_SET_MARKER, 1);
-
- /*
- * Indicate the beginning and end of the IB1 list with a SET_MARKER.
- * Among other things, this will implicitly enable and disable
- * preemption respectively.
- */
- if (start)
- *cmds++ = 0xD;
- else
- *cmds++ = 0xE;
-
- return 2;
-}
-
unsigned int a6xx_preemption_pre_ibsubmit(
struct adreno_device *adreno_dev,
struct adreno_ringbuffer *rb,
unsigned int *cmds, struct kgsl_context *context)
{
unsigned int *cmds_orig = cmds;
+ uint64_t gpuaddr = 0;
- if (context)
+ if (context) {
+ gpuaddr = context->user_ctxt_record->memdesc.gpuaddr;
*cmds++ = cp_type7_packet(CP_SET_PSEUDO_REGISTER, 15);
- else
+ } else {
*cmds++ = cp_type7_packet(CP_SET_PSEUDO_REGISTER, 12);
+ }
/* NULL SMMU_INFO buffer - we track in KMD */
*cmds++ = SET_PSEUDO_REGISTER_SAVE_REGISTER_SMMU_INFO;
@@ -411,10 +467,10 @@
cmds += cp_gpuaddr(adreno_dev, cmds, rb->preemption_desc.gpuaddr);
*cmds++ = SET_PSEUDO_REGISTER_SAVE_REGISTER_PRIV_SECURE_SAVE_ADDR;
- cmds += cp_gpuaddr(adreno_dev, cmds, 0);
+ cmds += cp_gpuaddr(adreno_dev, cmds,
+ rb->secure_preemption_desc.gpuaddr);
if (context) {
- uint64_t gpuaddr = context->user_ctxt_record->memdesc.gpuaddr;
*cmds++ = SET_PSEUDO_REGISTER_SAVE_REGISTER_NON_PRIV_SAVE_ADDR;
cmds += cp_gpuaddr(adreno_dev, cmds, gpuaddr);
@@ -431,6 +487,20 @@
cmds += cp_gpuaddr(adreno_dev, cmds,
rb->perfcounter_save_restore_desc.gpuaddr);
+ if (context) {
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ struct adreno_context *drawctxt = ADRENO_CONTEXT(context);
+ struct adreno_ringbuffer *rb = drawctxt->rb;
+ uint64_t dest =
+ SCRATCH_PREEMPTION_CTXT_RESTORE_GPU_ADDR(device,
+ rb->id);
+
+ *cmds++ = cp_mem_packet(adreno_dev, CP_MEM_WRITE, 2, 2);
+ cmds += cp_gpuaddr(adreno_dev, cmds, dest);
+ *cmds++ = lower_32_bits(gpuaddr);
+ *cmds++ = upper_32_bits(gpuaddr);
+ }
+
return (unsigned int) (cmds - cmds_orig);
}
@@ -438,6 +508,18 @@
unsigned int *cmds)
{
unsigned int *cmds_orig = cmds;
+ struct adreno_ringbuffer *rb = adreno_dev->cur_rb;
+
+ if (rb) {
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ uint64_t dest = SCRATCH_PREEMPTION_CTXT_RESTORE_GPU_ADDR(device,
+ rb->id);
+
+ *cmds++ = cp_mem_packet(adreno_dev, CP_MEM_WRITE, 2, 2);
+ cmds += cp_gpuaddr(adreno_dev, cmds, dest);
+ *cmds++ = 0;
+ *cmds++ = 0;
+ }
*cmds++ = cp_type7_packet(CP_CONTEXT_SWITCH_YIELD, 4);
cmds += cp_gpuaddr(adreno_dev, cmds, 0x0);
@@ -505,6 +587,17 @@
if (ret)
return ret;
+ ret = kgsl_allocate_user(device, &rb->secure_preemption_desc,
+ A6XX_CP_CTXRECORD_SIZE_IN_BYTES,
+ KGSL_MEMFLAGS_SECURE | KGSL_MEMDESC_PRIVILEGED);
+ if (ret)
+ return ret;
+
+ ret = kgsl_iommu_map_global_secure_pt_entry(device,
+ &rb->secure_preemption_desc);
+ if (ret)
+ return ret;
+
ret = kgsl_allocate_global(device, &rb->perfcounter_save_restore_desc,
A6XX_CP_PERFCOUNTER_SAVE_RESTORE_SIZE, 0,
KGSL_MEMDESC_PRIVILEGED, "perfcounter_save_restore_desc");
@@ -578,6 +671,9 @@
FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
kgsl_free_global(device, &rb->preemption_desc);
kgsl_free_global(device, &rb->perfcounter_save_restore_desc);
+ kgsl_iommu_unmap_global_secure_pt_entry(device,
+ &rb->secure_preemption_desc);
+ kgsl_sharedmem_free(&rb->secure_preemption_desc);
}
}
@@ -645,16 +741,20 @@
{
struct kgsl_device *device = context->device;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ uint64_t flags = 0;
if (!adreno_is_preemption_setup_enabled(adreno_dev))
return 0;
+ if (context->flags & KGSL_CONTEXT_SECURE)
+ flags |= KGSL_MEMFLAGS_SECURE;
+
/*
* gpumem_alloc_entry takes an extra refcount. Put it only when
* destroying the context to keep the context record valid
*/
context->user_ctxt_record = gpumem_alloc_entry(context->dev_priv,
- A6XX_CP_CTXRECORD_USER_RESTORE_SIZE, 0);
+ A6XX_CP_CTXRECORD_USER_RESTORE_SIZE, flags);
if (IS_ERR(context->user_ctxt_record)) {
int ret = PTR_ERR(context->user_ctxt_record);
diff --git a/drivers/gpu/msm/adreno_a6xx_snapshot.c b/drivers/gpu/msm/adreno_a6xx_snapshot.c
index c1a76bc..b9a2f8d 100644
--- a/drivers/gpu/msm/adreno_a6xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a6xx_snapshot.c
@@ -640,7 +640,7 @@
header->size = block->sz;
memcpy(data, a6xx_crashdump_registers.hostptr + info->offset,
- block->sz);
+ block->sz * sizeof(unsigned int));
return SHADER_SECTION_SZ(block->sz);
}
diff --git a/drivers/gpu/msm/adreno_perfcounter.c b/drivers/gpu/msm/adreno_perfcounter.c
index 03db16d..94fdbc2 100644
--- a/drivers/gpu/msm/adreno_perfcounter.c
+++ b/drivers/gpu/msm/adreno_perfcounter.c
@@ -768,6 +768,21 @@
reg->value = 0;
}
+static inline bool _perfcounter_inline_update(
+ struct adreno_device *adreno_dev, unsigned int group)
+{
+ if (adreno_is_a6xx(adreno_dev)) {
+ if ((group == KGSL_PERFCOUNTER_GROUP_HLSQ) ||
+ (group == KGSL_PERFCOUNTER_GROUP_SP) ||
+ (group == KGSL_PERFCOUNTER_GROUP_TP))
+ return true;
+ else
+ return false;
+ }
+
+ return true;
+}
+
static int _perfcounter_enable_default(struct adreno_device *adreno_dev,
struct adreno_perfcounters *counters, unsigned int group,
unsigned int counter, unsigned int countable)
@@ -775,6 +790,7 @@
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
struct adreno_perfcount_register *reg;
+ struct adreno_perfcount_group *grp;
int i;
int ret = 0;
@@ -789,15 +805,20 @@
if (countable == invalid_countable.countables[i])
return -EACCES;
}
- reg = &(counters->groups[group].regs[counter]);
+ grp = &(counters->groups[group]);
+ reg = &(grp->regs[counter]);
- if (!adreno_is_a6xx(adreno_dev) &&
- test_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv)) {
+ if (_perfcounter_inline_update(adreno_dev, group) &&
+ test_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv)) {
struct adreno_ringbuffer *rb = &adreno_dev->ringbuffers[0];
unsigned int buf[4];
unsigned int *cmds = buf;
int ret;
+ if (gpudev->perfcounter_update && (grp->flags &
+ ADRENO_PERFCOUNTER_GROUP_RESTORE))
+ gpudev->perfcounter_update(adreno_dev, reg, false);
+
cmds += cp_wait_for_idle(adreno_dev, cmds);
*cmds++ = cp_register(adreno_dev, reg->select, 1);
*cmds++ = countable;
@@ -834,12 +855,16 @@
}
} else {
/* Select the desired perfcounter */
- kgsl_regwrite(device, reg->select, countable);
+ if (gpudev->perfcounter_update && (grp->flags &
+ ADRENO_PERFCOUNTER_GROUP_RESTORE))
+ ret = gpudev->perfcounter_update(adreno_dev, reg, true);
+ else
+ kgsl_regwrite(device, reg->select, countable);
}
if (!ret)
reg->value = 0;
- return 0;
+ return ret;
}
/**
diff --git a/drivers/gpu/msm/adreno_perfcounter.h b/drivers/gpu/msm/adreno_perfcounter.h
index 8c4db38..bcbc738 100644
--- a/drivers/gpu/msm/adreno_perfcounter.h
+++ b/drivers/gpu/msm/adreno_perfcounter.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2015, 2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -70,6 +70,13 @@
#define ADRENO_PERFCOUNTER_GROUP_FIXED BIT(0)
+/*
+ * ADRENO_PERFCOUNTER_GROUP_RESTORE indicates CP needs to restore the select
+ * registers of this perfcounter group as part of preemption and IFPC
+ */
+#define ADRENO_PERFCOUNTER_GROUP_RESTORE BIT(1)
+
+
/**
* adreno_perfcounts: all available perfcounter groups
* @groups: available groups for this device
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index 70043db..01d9f71 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -80,44 +80,6 @@
local_irq_restore(flags);
}
-/*
- * Wait time before trying to write the register again.
- * Hopefully the GMU has finished waking up during this delay.
- * This delay must be less than the IFPC main hysteresis or
- * the GMU will start shutting down before we try again.
- */
-#define GMU_WAKEUP_DELAY 10
-/* Max amount of tries to wake up the GMU. */
-#define GMU_WAKEUP_RETRY_MAX 60
-
-/*
- * Check the WRITEDROPPED0 bit in the
- * FENCE_STATUS regsiter to check if the write went
- * through. If it didn't then we retry the write.
- */
-static inline void _gmu_wptr_update_if_dropped(struct adreno_device *adreno_dev,
- struct adreno_ringbuffer *rb)
-{
- unsigned int val, i;
-
- for (i = 0; i < GMU_WAKEUP_RETRY_MAX; i++) {
- adreno_read_gmureg(adreno_dev, ADRENO_REG_GMU_AHB_FENCE_STATUS,
- &val);
-
- /* If !writedropped, then wptr update was successful */
- if (!(val & 0x1))
- return;
-
- /* Wait a small amount of time before trying again */
- udelay(GMU_WAKEUP_DELAY);
-
- /* Try to write WPTR again */
- adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_WPTR, rb->_wptr);
- }
-
- dev_err(adreno_dev->dev.dev, "GMU WPTR update timed out\n");
-}
-
static void adreno_ringbuffer_wptr(struct adreno_device *adreno_dev,
struct adreno_ringbuffer *rb)
{
@@ -132,15 +94,14 @@
* been submitted.
*/
kgsl_pwrscale_busy(KGSL_DEVICE(adreno_dev));
- adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_WPTR,
- rb->_wptr);
/*
- * If GMU, ensure the write posted after a possible
+ * Ensure the write posted after a possible
* GMU wakeup (write could have dropped during wakeup)
*/
- if (kgsl_gmu_isenabled(KGSL_DEVICE(adreno_dev)))
- _gmu_wptr_update_if_dropped(adreno_dev, rb);
+ adreno_gmu_fenced_write(adreno_dev,
+ ADRENO_REG_CP_RB_WPTR, rb->_wptr,
+ FENCE_STATUS_WRITEDROPPED0_MASK);
}
}
@@ -425,6 +386,7 @@
struct kgsl_context *context = NULL;
bool secured_ctxt = false;
static unsigned int _seq_cnt;
+ struct adreno_firmware *fw = ADRENO_FW(adreno_dev, ADRENO_FW_SQE);
if (drawctxt != NULL && kgsl_context_detached(&drawctxt->base) &&
!(flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE))
@@ -494,11 +456,11 @@
if (gpudev->preemption_pre_ibsubmit &&
adreno_is_preemption_execution_enabled(adreno_dev))
- total_sizedwords += 22;
+ total_sizedwords += 27;
if (gpudev->preemption_post_ibsubmit &&
adreno_is_preemption_execution_enabled(adreno_dev))
- total_sizedwords += 5;
+ total_sizedwords += 10;
/*
* a5xx uses 64 bit memory address. pm4 commands that involve read/write
@@ -559,8 +521,13 @@
*ringcmds++ = KGSL_CMD_INTERNAL_IDENTIFIER;
}
- if (gpudev->set_marker)
- ringcmds += gpudev->set_marker(ringcmds, 1);
+ if (gpudev->set_marker) {
+ /* Firmware versions before 1.49 do not support IFPC markers */
+ if (adreno_is_a6xx(adreno_dev) && (fw->version & 0xFFF) < 0x149)
+ ringcmds += gpudev->set_marker(ringcmds, IB1LIST_START);
+ else
+ ringcmds += gpudev->set_marker(ringcmds, IFPC_DISABLE);
+ }
if (flags & KGSL_CMD_FLAGS_PWRON_FIXUP) {
/* Disable protected mode for the fixup */
@@ -680,8 +647,12 @@
*ringcmds++ = timestamp;
}
- if (gpudev->set_marker)
- ringcmds += gpudev->set_marker(ringcmds, 0);
+ if (gpudev->set_marker) {
+ if (adreno_is_a6xx(adreno_dev) && (fw->version & 0xFFF) < 0x149)
+ ringcmds += gpudev->set_marker(ringcmds, IB1LIST_END);
+ else
+ ringcmds += gpudev->set_marker(ringcmds, IFPC_ENABLE);
+ }
if (adreno_is_a3xx(adreno_dev)) {
/* Dummy set-constant to trigger context rollover */
@@ -796,8 +767,9 @@
struct kgsl_drawobj_profiling_buffer *profile_buffer = NULL;
unsigned int dwords = 0;
struct adreno_submit_time local;
-
struct kgsl_mem_entry *entry = cmdobj->profiling_buf_entry;
+ struct adreno_firmware *fw = ADRENO_FW(adreno_dev, ADRENO_FW_SQE);
+ bool set_ib1list_marker = false;
if (entry)
profile_buffer = kgsl_gpuaddr_to_vaddr(&entry->memdesc,
@@ -907,6 +879,17 @@
dwords += 8;
}
+ /*
+ * Prior to SQE FW version 1.49, there was only one marker for
+ * both preemption and IFPC. Only include the IB1LIST markers if
+ * we are using a firmware that supports them.
+ */
+ if (gpudev->set_marker && numibs && adreno_is_a6xx(adreno_dev) &&
+ ((fw->version & 0xFFF) >= 0x149)) {
+ set_ib1list_marker = true;
+ dwords += 4;
+ }
+
if (gpudev->ccu_invalidate)
dwords += 4;
@@ -940,6 +923,9 @@
}
if (numibs) {
+ if (set_ib1list_marker)
+ cmds += gpudev->set_marker(cmds, IB1LIST_START);
+
list_for_each_entry(ib, &cmdobj->cmdlist, node) {
/*
* Skip 0 sized IBs - these are presumed to have been
@@ -958,6 +944,9 @@
/* preamble is required on only for first command */
use_preamble = false;
}
+
+ if (set_ib1list_marker)
+ cmds += gpudev->set_marker(cmds, IB1LIST_END);
}
if (gpudev->ccu_invalidate)
diff --git a/drivers/gpu/msm/adreno_ringbuffer.h b/drivers/gpu/msm/adreno_ringbuffer.h
index 72fc5bf3..fbee627 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.h
+++ b/drivers/gpu/msm/adreno_ringbuffer.h
@@ -92,6 +92,8 @@
* @drawctxt_active: The last pagetable that this ringbuffer is set to
* @preemption_desc: The memory descriptor containing
* preemption info written/read by CP
+ * @secure_preemption_desc: The memory descriptor containing
+ * preemption info written/read by CP for secure contexts
* @perfcounter_save_restore_desc: Used by CP to save/restore the perfcounter
* values across preemption
* @pagetable_desc: Memory to hold information about the pagetables being used
@@ -120,6 +122,7 @@
struct kgsl_event_group events;
struct adreno_context *drawctxt_active;
struct kgsl_memdesc preemption_desc;
+ struct kgsl_memdesc secure_preemption_desc;
struct kgsl_memdesc perfcounter_save_restore_desc;
struct kgsl_memdesc pagetable_desc;
struct adreno_dispatcher_drawqueue dispatch_q;
diff --git a/drivers/gpu/msm/adreno_sysfs.c b/drivers/gpu/msm/adreno_sysfs.c
index fcf0417..e309ab0 100644
--- a/drivers/gpu/msm/adreno_sysfs.c
+++ b/drivers/gpu/msm/adreno_sysfs.c
@@ -29,6 +29,13 @@
.store = _ ## _name ## _store, \
}
+#define _ADRENO_SYSFS_ATTR_RO(_name, __show) \
+struct adreno_sysfs_attribute adreno_attr_##_name = { \
+ .attr = __ATTR(_name, 0644, __show, NULL), \
+ .show = _ ## _name ## _show, \
+ .store = NULL, \
+}
+
#define ADRENO_SYSFS_ATTR(_a) \
container_of((_a), struct adreno_sysfs_attribute, attr)
@@ -331,6 +338,13 @@
return kgsl_gmu_isenabled(device) && gmu->idle_level >= GPU_HW_IFPC;
}
+static unsigned int _preempt_count_show(struct adreno_device *adreno_dev)
+{
+ struct adreno_preemption *preempt = &adreno_dev->preempt;
+
+ return preempt->count;
+}
+
static ssize_t _sysfs_store_u32(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
@@ -411,9 +425,13 @@
#define ADRENO_SYSFS_U32(_name) \
_ADRENO_SYSFS_ATTR(_name, _sysfs_show_u32, _sysfs_store_u32)
+#define ADRENO_SYSFS_RO_U32(_name) \
+ _ADRENO_SYSFS_ATTR_RO(_name, _sysfs_show_u32)
+
static ADRENO_SYSFS_U32(ft_policy);
static ADRENO_SYSFS_U32(ft_pagefault_policy);
static ADRENO_SYSFS_U32(preempt_level);
+static ADRENO_SYSFS_RO_U32(preempt_count);
static ADRENO_SYSFS_BOOL(usesgmem);
static ADRENO_SYSFS_BOOL(skipsaverestore);
static ADRENO_SYSFS_BOOL(ft_long_ib_detect);
@@ -451,6 +469,7 @@
&adreno_attr_usesgmem.attr,
&adreno_attr_skipsaverestore.attr,
&adreno_attr_ifpc.attr,
+ &adreno_attr_preempt_count.attr,
NULL,
};
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 2e1ceea..5d07380 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -1805,18 +1805,15 @@
long gpumem_free_entry(struct kgsl_mem_entry *entry)
{
- pid_t ptname = 0;
-
if (!kgsl_mem_entry_set_pend(entry))
return -EBUSY;
trace_kgsl_mem_free(entry);
-
- if (entry->memdesc.pagetable != NULL)
- ptname = entry->memdesc.pagetable->name;
-
- kgsl_memfree_add(entry->priv->pid, ptname, entry->memdesc.gpuaddr,
- entry->memdesc.size, entry->memdesc.flags);
+ kgsl_memfree_add(entry->priv->pid,
+ entry->memdesc.pagetable ?
+ entry->memdesc.pagetable->name : 0,
+ entry->memdesc.gpuaddr, entry->memdesc.size,
+ entry->memdesc.flags);
kgsl_mem_entry_put(entry);
@@ -1835,6 +1832,12 @@
/* Free the memory for all event types */
trace_kgsl_mem_timestamp_free(device, entry, KGSL_CONTEXT_ID(context),
timestamp, 0);
+ kgsl_memfree_add(entry->priv->pid,
+ entry->memdesc.pagetable ?
+ entry->memdesc.pagetable->name : 0,
+ entry->memdesc.gpuaddr, entry->memdesc.size,
+ entry->memdesc.flags);
+
kgsl_mem_entry_put(entry);
}
@@ -1928,6 +1931,13 @@
{
struct kgsl_mem_entry *entry = priv;
+ trace_kgsl_mem_free(entry);
+ kgsl_memfree_add(entry->priv->pid,
+ entry->memdesc.pagetable ?
+ entry->memdesc.pagetable->name : 0,
+ entry->memdesc.gpuaddr, entry->memdesc.size,
+ entry->memdesc.flags);
+
INIT_WORK(&entry->work, _deferred_put);
queue_work(kgsl_driver.mem_workqueue, &entry->work);
return true;
@@ -1960,15 +1970,15 @@
handle = kgsl_sync_fence_async_wait(event.fd,
gpuobj_free_fence_func, entry, NULL, 0);
- /* if handle is NULL the fence has already signaled */
- if (handle == NULL)
- return gpumem_free_entry(entry);
-
if (IS_ERR(handle)) {
kgsl_mem_entry_unset_pend(entry);
return PTR_ERR(handle);
}
+ /* if handle is NULL the fence has already signaled */
+ if (handle == NULL)
+ gpuobj_free_fence_func(entry);
+
return 0;
}
@@ -2284,7 +2294,8 @@
param->flags &= KGSL_MEMFLAGS_GPUREADONLY
| KGSL_CACHEMODE_MASK
| KGSL_MEMTYPE_MASK
- | KGSL_MEMFLAGS_FORCE_32BIT;
+ | KGSL_MEMFLAGS_FORCE_32BIT
+ | KGSL_MEMFLAGS_IOCOHERENT;
/* Specifying SECURE is an explicit error */
if (param->flags & KGSL_MEMFLAGS_SECURE)
@@ -2378,7 +2389,12 @@
| KGSL_MEMALIGN_MASK
| KGSL_MEMFLAGS_USE_CPU_MAP
| KGSL_MEMFLAGS_SECURE
- | KGSL_MEMFLAGS_FORCE_32BIT;
+ | KGSL_MEMFLAGS_FORCE_32BIT
+ | KGSL_MEMFLAGS_IOCOHERENT;
+
+ /* Disable IO coherence if it is not supported on the chip */
+ if (!MMU_FEATURE(mmu, KGSL_MMU_IO_COHERENT))
+ param->flags &= ~((uint64_t)KGSL_MEMFLAGS_IOCOHERENT);
entry->memdesc.flags = param->flags;
@@ -2663,7 +2679,13 @@
| KGSL_MEMTYPE_MASK
| KGSL_MEMALIGN_MASK
| KGSL_MEMFLAGS_USE_CPU_MAP
- | KGSL_MEMFLAGS_SECURE;
+ | KGSL_MEMFLAGS_SECURE
+ | KGSL_MEMFLAGS_IOCOHERENT;
+
+ /* Disable IO coherence if it is not supported on the chip */
+ if (!MMU_FEATURE(mmu, KGSL_MMU_IO_COHERENT))
+ param->flags &= ~((uint64_t)KGSL_MEMFLAGS_IOCOHERENT);
+
entry->memdesc.flags = ((uint64_t) param->flags)
| KGSL_MEMFLAGS_FORCE_32BIT;
@@ -3062,6 +3084,7 @@
int ret;
struct kgsl_process_private *private = dev_priv->process_priv;
struct kgsl_mem_entry *entry;
+ struct kgsl_mmu *mmu = &dev_priv->device->mmu;
unsigned int align;
flags &= KGSL_MEMFLAGS_GPUREADONLY
@@ -3070,14 +3093,15 @@
| KGSL_MEMALIGN_MASK
| KGSL_MEMFLAGS_USE_CPU_MAP
| KGSL_MEMFLAGS_SECURE
- | KGSL_MEMFLAGS_FORCE_32BIT;
+ | KGSL_MEMFLAGS_FORCE_32BIT
+ | KGSL_MEMFLAGS_IOCOHERENT;
/* Turn off SVM if the system doesn't support it */
- if (!kgsl_mmu_use_cpu_map(&dev_priv->device->mmu))
+ if (!kgsl_mmu_use_cpu_map(mmu))
flags &= ~((uint64_t) KGSL_MEMFLAGS_USE_CPU_MAP);
/* Return not supported error if secure memory isn't enabled */
- if (!kgsl_mmu_is_secured(&dev_priv->device->mmu) &&
+ if (!kgsl_mmu_is_secured(mmu) &&
(flags & KGSL_MEMFLAGS_SECURE)) {
dev_WARN_ONCE(dev_priv->device->dev, 1,
"Secure memory not supported");
@@ -3106,11 +3130,15 @@
flags = kgsl_filter_cachemode(flags);
+ /* Disable IO coherence if it is not supported on the chip */
+ if (!MMU_FEATURE(mmu, KGSL_MMU_IO_COHERENT))
+ flags &= ~((uint64_t)KGSL_MEMFLAGS_IOCOHERENT);
+
entry = kgsl_mem_entry_create();
if (entry == NULL)
return ERR_PTR(-ENOMEM);
- if (MMU_FEATURE(&dev_priv->device->mmu, KGSL_MMU_NEED_GUARD_PAGE))
+ if (MMU_FEATURE(mmu, KGSL_MMU_NEED_GUARD_PAGE))
entry->memdesc.priv |= KGSL_MEMDESC_GUARD_PAGE;
if (flags & KGSL_MEMFLAGS_SECURE)
diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h
index f80da79..023e63e 100644
--- a/drivers/gpu/msm/kgsl.h
+++ b/drivers/gpu/msm/kgsl.h
@@ -75,6 +75,7 @@
* Used Data:
* Offset: Length(bytes): What
* 0x0: 4 * KGSL_PRIORITY_MAX_RB_LEVELS: RB0 RPTR
+ * 0x10: 8 * KGSL_PRIORITY_MAX_RB_LEVELS: RB0 CTXT RESTORE ADDR
*/
/* Shadow global helpers */
@@ -82,6 +83,13 @@
#define SCRATCH_RPTR_GPU_ADDR(dev, id) \
((dev)->scratch.gpuaddr + SCRATCH_RPTR_OFFSET(id))
+#define SCRATCH_PREEMPTION_CTXT_RESTORE_ADDR_OFFSET(id) \
+ (SCRATCH_RPTR_OFFSET(KGSL_PRIORITY_MAX_RB_LEVELS) + \
+ ((id) * sizeof(uint64_t)))
+#define SCRATCH_PREEMPTION_CTXT_RESTORE_GPU_ADDR(dev, id) \
+ ((dev)->scratch.gpuaddr + \
+ SCRATCH_PREEMPTION_CTXT_RESTORE_ADDR_OFFSET(id))
+
/* Timestamp window used to detect rollovers (half of integer range) */
#define KGSL_TIMESTAMP_WINDOW 0x80000000
diff --git a/drivers/gpu/msm/kgsl_gmu.c b/drivers/gpu/msm/kgsl_gmu.c
index 56496f7..0a7424a 100644
--- a/drivers/gpu/msm/kgsl_gmu.c
+++ b/drivers/gpu/msm/kgsl_gmu.c
@@ -1620,3 +1620,46 @@
device->gmu.pdev = NULL;
}
+
+/*
+ * adreno_gmu_fenced_write() - Check if there is a GMU and it is enabled
+ * @adreno_dev: Pointer to the Adreno device device that owns the GMU
+ * @offset: 32bit register enum that is to be written
+ * @val: The value to be written to the register
+ * @fence_mask: The value to poll the fence status register
+ *
+ * Check the WRITEDROPPED0/1 bit in the FENCE_STATUS regsiter to check if
+ * the write to the fenced register went through. If it didn't then we retry
+ * the write until it goes through or we time out.
+ */
+void adreno_gmu_fenced_write(struct adreno_device *adreno_dev,
+ enum adreno_regs offset, unsigned int val,
+ unsigned int fence_mask)
+{
+ unsigned int status, i;
+
+ adreno_writereg(adreno_dev, offset, val);
+
+ if (!kgsl_gmu_isenabled(KGSL_DEVICE(adreno_dev)))
+ return;
+
+ for (i = 0; i < GMU_WAKEUP_RETRY_MAX; i++) {
+ adreno_read_gmureg(adreno_dev, ADRENO_REG_GMU_AHB_FENCE_STATUS,
+ &status);
+
+ /*
+ * If !writedropped0/1, then the write to fenced register
+ * was successful
+ */
+ if (!(status & fence_mask))
+ return;
+ /* Wait a small amount of time before trying again */
+ udelay(GMU_WAKEUP_DELAY_US);
+
+ /* Try to write the fenced register again */
+ adreno_writereg(adreno_dev, offset, val);
+ }
+
+ dev_err(adreno_dev->dev.dev,
+ "GMU fenced register write timed out: reg %x\n", offset);
+}
diff --git a/drivers/gpu/msm/kgsl_gmu.h b/drivers/gpu/msm/kgsl_gmu.h
index 60d9cf8..90e87e4 100644
--- a/drivers/gpu/msm/kgsl_gmu.h
+++ b/drivers/gpu/msm/kgsl_gmu.h
@@ -30,8 +30,11 @@
GMU_INT_HOST_AHB_BUS_ERR | \
GMU_INT_FENCE_ERR)
-#define MAX_GMUFW_SIZE 0x2000 /* in dwords */
-#define FENCE_RANGE_MASK ((0x1 << 31) | (0x0A << 18) | (0x8A0))
+#define MAX_GMUFW_SIZE 0x2000 /* in bytes */
+#define FENCE_RANGE_MASK ((0x1 << 31) | ((0xA << 2) << 18) | (0x8A0))
+
+#define FENCE_STATUS_WRITEDROPPED0_MASK 0x1
+#define FENCE_STATUS_WRITEDROPPED1_MASK 0x2
/* Bitmask for GPU low power mode enabling and hysterisis*/
#define SPTP_ENABLE_MASK (BIT(2) | BIT(0))
@@ -78,6 +81,19 @@
#define OOB_PERFCNTR_SET_MASK BIT(17)
#define OOB_PERFCNTR_CHECK_MASK BIT(25)
#define OOB_PERFCNTR_CLEAR_MASK BIT(25)
+#define OOB_PREEMPTION_SET_MASK BIT(18)
+#define OOB_PREEMPTION_CHECK_MASK BIT(26)
+#define OOB_PREEMPTION_CLEAR_MASK BIT(26)
+
+/*
+ * Wait time before trying to write the register again.
+ * Hopefully the GMU has finished waking up during this delay.
+ * This delay must be less than the IFPC main hysteresis or
+ * the GMU will start shutting down before we try again.
+ */
+#define GMU_WAKEUP_DELAY_US 10
+/* Max amount of tries to wake up the GMU. */
+#define GMU_WAKEUP_RETRY_MAX 60
/* Bits for the flags field in the gmu structure */
enum gmu_flags {
diff --git a/drivers/gpu/msm/kgsl_hfi.c b/drivers/gpu/msm/kgsl_hfi.c
index eef5f45..3a5b489 100644
--- a/drivers/gpu/msm/kgsl_hfi.c
+++ b/drivers/gpu/msm/kgsl_hfi.c
@@ -183,7 +183,7 @@
rsp->ret_hdr.size,
rsp->ret_hdr.seqnum);
- spin_lock(&hfi->msglock);
+ spin_lock_bh(&hfi->msglock);
list_for_each_entry_safe(msg, next, &hfi->msglist, node) {
if (msg->msg_id == rsp->ret_hdr.id &&
msg->seqnum == rsp->ret_hdr.seqnum) {
@@ -193,7 +193,7 @@
}
if (in_queue == false) {
- spin_unlock(&hfi->msglock);
+ spin_unlock_bh(&hfi->msglock);
dev_err(&gmu->pdev->dev,
"Cannot find receiver of ack msg with id=%d\n",
rsp->ret_hdr.id);
@@ -202,7 +202,7 @@
memcpy(&msg->results, (void *) rsp, rsp->hdr.size << 2);
complete(&msg->msg_complete);
- spin_unlock(&hfi->msglock);
+ spin_unlock_bh(&hfi->msglock);
}
static void receive_err_msg(struct gmu_device *gmu, struct hfi_msg_rsp *rsp)
@@ -231,9 +231,9 @@
ret_msg->msg_id = msg->id;
ret_msg->seqnum = msg->seqnum;
- spin_lock(&hfi->msglock);
+ spin_lock_bh(&hfi->msglock);
list_add_tail(&ret_msg->node, &hfi->msglist);
- spin_unlock(&hfi->msglock);
+ spin_unlock_bh(&hfi->msglock);
if (hfi_cmdq_write(gmu, HFI_CMD_QUEUE, msg) != size) {
rc = -EINVAL;
@@ -253,9 +253,9 @@
/* If we got here we succeeded */
rc = 0;
done:
- spin_lock(&hfi->msglock);
+ spin_lock_bh(&hfi->msglock);
list_del(&ret_msg->node);
- spin_unlock(&hfi->msglock);
+ spin_unlock_bh(&hfi->msglock);
return rc;
}
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index dc0e733..ab3ab31 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -110,7 +110,7 @@
};
static struct global_pt_entry global_pt_entries[GLOBAL_PT_ENTRIES];
-static struct kgsl_memdesc *kgsl_global_secure_pt_entry;
+static int secure_global_size;
static int global_pt_count;
uint64_t global_pt_alloc;
static struct kgsl_memdesc gpu_qdss_desc;
@@ -162,24 +162,33 @@
return 0;
}
-static void kgsl_iommu_unmap_global_secure_pt_entry(struct kgsl_pagetable
- *pagetable)
+void kgsl_iommu_unmap_global_secure_pt_entry(struct kgsl_device *device,
+ struct kgsl_memdesc *entry)
{
- struct kgsl_memdesc *entry = kgsl_global_secure_pt_entry;
+ if (!kgsl_mmu_is_secured(&device->mmu))
+ return;
- if (entry != NULL)
- kgsl_mmu_unmap(pagetable, entry);
+ if (entry != NULL && entry->pagetable->name == KGSL_MMU_SECURE_PT)
+ kgsl_mmu_unmap(entry->pagetable, entry);
}
-static int kgsl_map_global_secure_pt_entry(struct kgsl_pagetable *pagetable)
+int kgsl_iommu_map_global_secure_pt_entry(struct kgsl_device *device,
+ struct kgsl_memdesc *entry)
{
int ret = 0;
- struct kgsl_memdesc *entry = kgsl_global_secure_pt_entry;
+
+ if (!kgsl_mmu_is_secured(&device->mmu))
+ return -ENOTSUPP;
if (entry != NULL) {
+ struct kgsl_pagetable *pagetable = device->mmu.securepagetable;
entry->pagetable = pagetable;
+ entry->gpuaddr = KGSL_IOMMU_SECURE_BASE + secure_global_size;
+
ret = kgsl_mmu_map(pagetable, entry);
+ if (ret == 0)
+ secure_global_size += entry->size;
}
return ret;
}
@@ -224,13 +233,6 @@
global_pt_count++;
}
-void kgsl_add_global_secure_entry(struct kgsl_device *device,
- struct kgsl_memdesc *memdesc)
-{
- memdesc->gpuaddr = KGSL_IOMMU_SECURE_BASE;
- kgsl_global_secure_pt_entry = memdesc;
-}
-
struct kgsl_memdesc *kgsl_iommu_get_qdss_global_entry(void)
{
return &gpu_qdss_desc;
@@ -1068,7 +1070,6 @@
if (pt->name == KGSL_MMU_SECURE_PT) {
ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_SECURE];
- kgsl_iommu_unmap_global_secure_pt_entry(pt);
} else {
ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
kgsl_iommu_unmap_globals(pt);
@@ -1089,13 +1090,10 @@
struct kgsl_pagetable *pagetable,
struct kgsl_iommu_pt *pt)
{
- unsigned int secure_global_size = kgsl_global_secure_pt_entry != NULL ?
- kgsl_global_secure_pt_entry->size : 0;
if (mmu->secured && pagetable->name == KGSL_MMU_SECURE_PT) {
- pt->compat_va_start = KGSL_IOMMU_SECURE_BASE +
- secure_global_size;
+ pt->compat_va_start = KGSL_IOMMU_SECURE_BASE;
pt->compat_va_end = KGSL_IOMMU_SECURE_END;
- pt->va_start = KGSL_IOMMU_SECURE_BASE + secure_global_size;
+ pt->va_start = KGSL_IOMMU_SECURE_BASE;
pt->va_end = KGSL_IOMMU_SECURE_END;
} else {
pt->compat_va_start = KGSL_IOMMU_SVM_BASE32;
@@ -1120,20 +1118,15 @@
struct kgsl_pagetable *pagetable,
struct kgsl_iommu_pt *pt)
{
- unsigned int secure_global_size = kgsl_global_secure_pt_entry != NULL ?
- kgsl_global_secure_pt_entry->size : 0;
if (mmu->secured) {
if (pagetable->name == KGSL_MMU_SECURE_PT) {
- pt->compat_va_start = KGSL_IOMMU_SECURE_BASE +
- secure_global_size;
+ pt->compat_va_start = KGSL_IOMMU_SECURE_BASE;
pt->compat_va_end = KGSL_IOMMU_SECURE_END;
- pt->va_start = KGSL_IOMMU_SECURE_BASE +
- secure_global_size;
+ pt->va_start = KGSL_IOMMU_SECURE_BASE;
pt->va_end = KGSL_IOMMU_SECURE_END;
} else {
pt->va_start = KGSL_IOMMU_SVM_BASE32;
- pt->va_end = KGSL_IOMMU_SECURE_BASE +
- secure_global_size;
+ pt->va_end = KGSL_IOMMU_SECURE_BASE;
pt->compat_va_start = pt->va_start;
pt->compat_va_end = pt->va_end;
}
@@ -1363,8 +1356,6 @@
ctx->regbase = iommu->regbase + KGSL_IOMMU_CB0_OFFSET
+ (cb_num << KGSL_IOMMU_CB_SHIFT);
- ret = kgsl_map_global_secure_pt_entry(pt);
-
done:
if (ret)
_free_pt(ctx, pt);
@@ -1608,6 +1599,18 @@
kgsl_setup_qdss_desc(device);
kgsl_setup_qtimer_desc(device);
+ if (!mmu->secured)
+ goto done;
+
+ mmu->securepagetable = kgsl_mmu_getpagetable(mmu,
+ KGSL_MMU_SECURE_PT);
+ if (IS_ERR(mmu->securepagetable)) {
+ status = PTR_ERR(mmu->securepagetable);
+ mmu->securepagetable = NULL;
+ } else if (mmu->securepagetable == NULL) {
+ status = -ENOMEM;
+ }
+
done:
if (status)
kgsl_iommu_close(mmu);
@@ -1689,17 +1692,9 @@
if (ctx->dev == NULL || !mmu->secured)
return 0;
- if (mmu->securepagetable == NULL) {
- mmu->securepagetable = kgsl_mmu_getpagetable(mmu,
- KGSL_MMU_SECURE_PT);
- if (IS_ERR(mmu->securepagetable)) {
- ret = PTR_ERR(mmu->securepagetable);
- mmu->securepagetable = NULL;
- return ret;
- } else if (mmu->securepagetable == NULL) {
- return -ENOMEM;
- }
- }
+ if (mmu->securepagetable == NULL)
+ return -ENOMEM;
+
iommu_pt = mmu->securepagetable->priv;
ret = _attach_pt(iommu_pt, ctx);
@@ -1840,6 +1835,9 @@
if (memdesc->priv & KGSL_MEMDESC_PRIVILEGED)
flags |= IOMMU_PRIV;
+ if (memdesc->flags & KGSL_MEMFLAGS_IOCOHERENT)
+ flags |= IOMMU_CACHE;
+
return flags;
}
@@ -2502,6 +2500,13 @@
end = pt->va_end;
}
+ /*
+ * When mapping secure buffers, adjust the start of the va range
+ * to the end of secure global buffers.
+ */
+ if (kgsl_memdesc_is_secured(memdesc))
+ start += secure_global_size;
+
spin_lock(&pagetable->lock);
addr = _get_unmapped_area(pagetable, start, end, size, align);
diff --git a/drivers/gpu/msm/kgsl_mmu.h b/drivers/gpu/msm/kgsl_mmu.h
index 7a8ab74..430a140 100644
--- a/drivers/gpu/msm/kgsl_mmu.h
+++ b/drivers/gpu/msm/kgsl_mmu.h
@@ -138,6 +138,8 @@
#define KGSL_MMU_PAGED BIT(8)
/* The device requires a guard page */
#define KGSL_MMU_NEED_GUARD_PAGE BIT(9)
+/* The device supports IO coherency */
+#define KGSL_MMU_IO_COHERENT BIT(10)
/**
* struct kgsl_mmu - Master definition for KGSL MMU devices
@@ -174,7 +176,9 @@
struct kgsl_pagetable *kgsl_mmu_getpagetable_ptbase(struct kgsl_mmu *mmu,
u64 ptbase);
-void kgsl_add_global_secure_entry(struct kgsl_device *device,
+int kgsl_iommu_map_global_secure_pt_entry(struct kgsl_device *device,
+ struct kgsl_memdesc *memdesc);
+void kgsl_iommu_unmap_global_secure_pt_entry(struct kgsl_device *device,
struct kgsl_memdesc *memdesc);
void kgsl_print_global_pt_entries(struct seq_file *s);
void kgsl_mmu_putpagetable(struct kgsl_pagetable *pagetable);
diff --git a/drivers/gpu/msm/kgsl_pwrscale.c b/drivers/gpu/msm/kgsl_pwrscale.c
index 20590ea..6825c2b 100644
--- a/drivers/gpu/msm/kgsl_pwrscale.c
+++ b/drivers/gpu/msm/kgsl_pwrscale.c
@@ -372,7 +372,7 @@
}
if (nap_time && go_time) {
percent_nap = 100 * nap_time;
- do_div(percent_nap, nap_time + go_time);
+ div64_s64(percent_nap, nap_time + go_time);
}
trace_kgsl_popp_nap(device, (int)nap_time / 1000, nap,
percent_nap);
@@ -843,13 +843,17 @@
}
b = pwr->bus_mod;
- if (_check_fast_hint(bus_flag) &&
- ((pwr_level->bus_freq + pwr->bus_mod) < pwr_level->bus_max))
+ if (_check_fast_hint(bus_flag))
pwr->bus_mod++;
- else if (_check_slow_hint(bus_flag) &&
- ((pwr_level->bus_freq + pwr->bus_mod) > pwr_level->bus_min))
+ else if (_check_slow_hint(bus_flag))
pwr->bus_mod--;
+ /* trim calculated change to fit range */
+ if (pwr_level->bus_freq + pwr->bus_mod < pwr_level->bus_min)
+ pwr->bus_mod = -(pwr_level->bus_freq - pwr_level->bus_min);
+ else if (pwr_level->bus_freq + pwr->bus_mod > pwr_level->bus_max)
+ pwr->bus_mod = pwr_level->bus_max - pwr_level->bus_freq;
+
/* Update bus vote if AB or IB is modified */
if ((pwr->bus_mod != b) || (pwr->bus_ab_mbytes != ab_mbytes)) {
pwr->bus_percent_ab = device->pwrscale.bus_profile.percent_ab;
diff --git a/drivers/gpu/msm/kgsl_sync.h b/drivers/gpu/msm/kgsl_sync.h
index d58859d..7c9f334e 100644
--- a/drivers/gpu/msm/kgsl_sync.h
+++ b/drivers/gpu/msm/kgsl_sync.h
@@ -13,7 +13,7 @@
#ifndef __KGSL_SYNC_H
#define __KGSL_SYNC_H
-#include "sync_file.h"
+#include <linux/sync_file.h>
#include "kgsl_device.h"
#define KGSL_TIMELINE_NAME_LEN 32
diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c
index 6263ea8..8f11d34 100644
--- a/drivers/i2c/busses/i2c-riic.c
+++ b/drivers/i2c/busses/i2c-riic.c
@@ -80,6 +80,7 @@
#define ICIER_TEIE 0x40
#define ICIER_RIE 0x20
#define ICIER_NAKIE 0x10
+#define ICIER_SPIE 0x08
#define ICSR2_NACKF 0x10
@@ -216,11 +217,10 @@
return IRQ_NONE;
}
- if (riic->is_last || riic->err)
+ if (riic->is_last || riic->err) {
+ riic_clear_set_bit(riic, 0, ICIER_SPIE, RIIC_ICIER);
writeb(ICCR2_SP, riic->base + RIIC_ICCR2);
-
- writeb(0, riic->base + RIIC_ICIER);
- complete(&riic->msg_done);
+ }
return IRQ_HANDLED;
}
@@ -240,13 +240,13 @@
if (riic->bytes_left == 1) {
/* STOP must come before we set ACKBT! */
- if (riic->is_last)
+ if (riic->is_last) {
+ riic_clear_set_bit(riic, 0, ICIER_SPIE, RIIC_ICIER);
writeb(ICCR2_SP, riic->base + RIIC_ICCR2);
+ }
riic_clear_set_bit(riic, 0, ICMR3_ACKBT, RIIC_ICMR3);
- writeb(0, riic->base + RIIC_ICIER);
- complete(&riic->msg_done);
} else {
riic_clear_set_bit(riic, ICMR3_ACKBT, 0, RIIC_ICMR3);
}
@@ -259,6 +259,21 @@
return IRQ_HANDLED;
}
+static irqreturn_t riic_stop_isr(int irq, void *data)
+{
+ struct riic_dev *riic = data;
+
+ /* read back registers to confirm writes have fully propagated */
+ writeb(0, riic->base + RIIC_ICSR2);
+ readb(riic->base + RIIC_ICSR2);
+ writeb(0, riic->base + RIIC_ICIER);
+ readb(riic->base + RIIC_ICIER);
+
+ complete(&riic->msg_done);
+
+ return IRQ_HANDLED;
+}
+
static u32 riic_func(struct i2c_adapter *adap)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
@@ -326,6 +341,7 @@
{ .res_num = 0, .isr = riic_tend_isr, .name = "riic-tend" },
{ .res_num = 1, .isr = riic_rdrf_isr, .name = "riic-rdrf" },
{ .res_num = 2, .isr = riic_tdre_isr, .name = "riic-tdre" },
+ { .res_num = 3, .isr = riic_stop_isr, .name = "riic-stop" },
{ .res_num = 5, .isr = riic_tend_isr, .name = "riic-nack" },
};
diff --git a/drivers/iio/magnetometer/mag3110.c b/drivers/iio/magnetometer/mag3110.c
index f2b3bd7..b4f643f 100644
--- a/drivers/iio/magnetometer/mag3110.c
+++ b/drivers/iio/magnetometer/mag3110.c
@@ -222,29 +222,39 @@
int val, int val2, long mask)
{
struct mag3110_data *data = iio_priv(indio_dev);
- int rate;
+ int rate, ret;
- if (iio_buffer_enabled(indio_dev))
- return -EBUSY;
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
switch (mask) {
case IIO_CHAN_INFO_SAMP_FREQ:
rate = mag3110_get_samp_freq_index(data, val, val2);
- if (rate < 0)
- return -EINVAL;
+ if (rate < 0) {
+ ret = -EINVAL;
+ break;
+ }
data->ctrl_reg1 &= ~MAG3110_CTRL_DR_MASK;
data->ctrl_reg1 |= rate << MAG3110_CTRL_DR_SHIFT;
- return i2c_smbus_write_byte_data(data->client,
+ ret = i2c_smbus_write_byte_data(data->client,
MAG3110_CTRL_REG1, data->ctrl_reg1);
+ break;
case IIO_CHAN_INFO_CALIBBIAS:
- if (val < -10000 || val > 10000)
- return -EINVAL;
- return i2c_smbus_write_word_swapped(data->client,
+ if (val < -10000 || val > 10000) {
+ ret = -EINVAL;
+ break;
+ }
+ ret = i2c_smbus_write_word_swapped(data->client,
MAG3110_OFF_X + 2 * chan->scan_index, val << 1);
+ break;
default:
- return -EINVAL;
+ ret = -EINVAL;
+ break;
}
+ iio_device_release_direct_mode(indio_dev);
+ return ret;
}
static irqreturn_t mag3110_trigger_handler(int irq, void *p)
diff --git a/drivers/iio/pressure/ms5611_core.c b/drivers/iio/pressure/ms5611_core.c
index a74ed1f..8cc7156 100644
--- a/drivers/iio/pressure/ms5611_core.c
+++ b/drivers/iio/pressure/ms5611_core.c
@@ -308,6 +308,7 @@
{
struct ms5611_state *st = iio_priv(indio_dev);
const struct ms5611_osr *osr = NULL;
+ int ret;
if (mask != IIO_CHAN_INFO_OVERSAMPLING_RATIO)
return -EINVAL;
@@ -321,12 +322,11 @@
if (!osr)
return -EINVAL;
- mutex_lock(&st->lock);
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
- if (iio_buffer_enabled(indio_dev)) {
- mutex_unlock(&st->lock);
- return -EBUSY;
- }
+ mutex_lock(&st->lock);
if (chan->type == IIO_TEMP)
st->temp_osr = osr;
@@ -334,6 +334,8 @@
st->pressure_osr = osr;
mutex_unlock(&st->lock);
+ iio_device_release_direct_mode(indio_dev);
+
return 0;
}
diff --git a/drivers/iio/proximity/sx9500.c b/drivers/iio/proximity/sx9500.c
index 1f06282..9ea147f 100644
--- a/drivers/iio/proximity/sx9500.c
+++ b/drivers/iio/proximity/sx9500.c
@@ -387,14 +387,18 @@
int *val, int *val2, long mask)
{
struct sx9500_data *data = iio_priv(indio_dev);
+ int ret;
switch (chan->type) {
case IIO_PROXIMITY:
switch (mask) {
case IIO_CHAN_INFO_RAW:
- if (iio_buffer_enabled(indio_dev))
- return -EBUSY;
- return sx9500_read_proximity(data, chan, val);
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+ ret = sx9500_read_proximity(data, chan, val);
+ iio_device_release_direct_mode(indio_dev);
+ return ret;
case IIO_CHAN_INFO_SAMP_FREQ:
return sx9500_read_samp_freq(data, val, val2);
default:
diff --git a/drivers/iio/trigger/iio-trig-interrupt.c b/drivers/iio/trigger/iio-trig-interrupt.c
index 572bc6f..e18f12b 100644
--- a/drivers/iio/trigger/iio-trig-interrupt.c
+++ b/drivers/iio/trigger/iio-trig-interrupt.c
@@ -58,7 +58,7 @@
trig_info = kzalloc(sizeof(*trig_info), GFP_KERNEL);
if (!trig_info) {
ret = -ENOMEM;
- goto error_put_trigger;
+ goto error_free_trigger;
}
iio_trigger_set_drvdata(trig, trig_info);
trig_info->irq = irq;
@@ -83,8 +83,8 @@
free_irq(irq, trig);
error_free_trig_info:
kfree(trig_info);
-error_put_trigger:
- iio_trigger_put(trig);
+error_free_trigger:
+ iio_trigger_free(trig);
error_ret:
return ret;
}
@@ -99,7 +99,7 @@
iio_trigger_unregister(trig);
free_irq(trig_info->irq, trig);
kfree(trig_info);
- iio_trigger_put(trig);
+ iio_trigger_free(trig);
return 0;
}
diff --git a/drivers/iio/trigger/iio-trig-sysfs.c b/drivers/iio/trigger/iio-trig-sysfs.c
index 3dfab2b..202e8b8 100644
--- a/drivers/iio/trigger/iio-trig-sysfs.c
+++ b/drivers/iio/trigger/iio-trig-sysfs.c
@@ -174,7 +174,7 @@
return 0;
out2:
- iio_trigger_put(t->trig);
+ iio_trigger_free(t->trig);
free_t:
kfree(t);
out1:
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 282c9fb..786f640 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -325,6 +325,27 @@
return cpu_to_be16(MLX5_CAP_ROCE(dev->mdev, r_roce_min_src_udp_port));
}
+int mlx5_get_roce_gid_type(struct mlx5_ib_dev *dev, u8 port_num,
+ int index, enum ib_gid_type *gid_type)
+{
+ struct ib_gid_attr attr;
+ union ib_gid gid;
+ int ret;
+
+ ret = ib_get_cached_gid(&dev->ib_dev, port_num, index, &gid, &attr);
+ if (ret)
+ return ret;
+
+ if (!attr.ndev)
+ return -ENODEV;
+
+ dev_put(attr.ndev);
+
+ *gid_type = attr.gid_type;
+
+ return 0;
+}
+
static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev)
{
if (MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_IB)
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 7d68990..86e1e081 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -892,6 +892,8 @@
__be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
int index);
+int mlx5_get_roce_gid_type(struct mlx5_ib_dev *dev, u8 port_num,
+ int index, enum ib_gid_type *gid_type);
/* GSI QP helper functions */
struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd,
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index aee3942..2665414 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -2226,6 +2226,7 @@
{
enum rdma_link_layer ll = rdma_port_get_link_layer(&dev->ib_dev, port);
int err;
+ enum ib_gid_type gid_type;
if (attr_mask & IB_QP_PKEY_INDEX)
path->pkey_index = cpu_to_be16(alt ? attr->alt_pkey_index :
@@ -2244,10 +2245,16 @@
if (ll == IB_LINK_LAYER_ETHERNET) {
if (!(ah->ah_flags & IB_AH_GRH))
return -EINVAL;
+ err = mlx5_get_roce_gid_type(dev, port, ah->grh.sgid_index,
+ &gid_type);
+ if (err)
+ return err;
memcpy(path->rmac, ah->dmac, sizeof(ah->dmac));
path->udp_sport = mlx5_get_roce_udp_sport(dev, port,
ah->grh.sgid_index);
path->dci_cfi_prio_sl = (ah->sl & 0x7) << 4;
+ if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
+ path->ecn_dscp = (ah->grh.traffic_class >> 2) & 0x3f;
} else {
path->fl_free_ar = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0;
path->fl_free_ar |=
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index 9f46be5..9d08478 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -633,6 +633,7 @@
goto exit;
}
rmr->state = RXE_MEM_STATE_FREE;
+ rxe_drop_ref(rmr);
wqe->state = wqe_state_done;
wqe->status = IB_WC_SUCCESS;
} else if (wqe->wr.opcode == IB_WR_REG_MR) {
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index 8f9aba7..39101b1 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -893,6 +893,7 @@
return RESPST_ERROR;
}
rmr->state = RXE_MEM_STATE_FREE;
+ rxe_drop_ref(rmr);
}
wc->qp = &qp->ibqp;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 0616a65..7576166 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -1392,7 +1392,7 @@
while (!list_empty(&priv->cm.reap_list)) {
p = list_entry(priv->cm.reap_list.next, typeof(*p), list);
- list_del(&p->list);
+ list_del_init(&p->list);
spin_unlock_irqrestore(&priv->lock, flags);
netif_tx_unlock_bh(dev);
ipoib_cm_tx_destroy(p);
diff --git a/drivers/input/keyboard/mpr121_touchkey.c b/drivers/input/keyboard/mpr121_touchkey.c
index 0fd612d..aaf43be 100644
--- a/drivers/input/keyboard/mpr121_touchkey.c
+++ b/drivers/input/keyboard/mpr121_touchkey.c
@@ -87,7 +87,8 @@
struct mpr121_touchkey *mpr121 = dev_id;
struct i2c_client *client = mpr121->client;
struct input_dev *input = mpr121->input_dev;
- unsigned int key_num, key_val, pressed;
+ unsigned long bit_changed;
+ unsigned int key_num;
int reg;
reg = i2c_smbus_read_byte_data(client, ELE_TOUCH_STATUS_1_ADDR);
@@ -105,19 +106,23 @@
reg &= TOUCH_STATUS_MASK;
/* use old press bit to figure out which bit changed */
- key_num = ffs(reg ^ mpr121->statusbits) - 1;
- pressed = reg & (1 << key_num);
+ bit_changed = reg ^ mpr121->statusbits;
mpr121->statusbits = reg;
+ for_each_set_bit(key_num, &bit_changed, mpr121->keycount) {
+ unsigned int key_val, pressed;
- key_val = mpr121->keycodes[key_num];
+ pressed = reg & BIT(key_num);
+ key_val = mpr121->keycodes[key_num];
- input_event(input, EV_MSC, MSC_SCAN, key_num);
- input_report_key(input, key_val, pressed);
+ input_event(input, EV_MSC, MSC_SCAN, key_num);
+ input_report_key(input, key_val, pressed);
+
+ dev_dbg(&client->dev, "key %d %d %s\n", key_num, key_val,
+ pressed ? "pressed" : "released");
+
+ }
input_sync(input);
- dev_dbg(&client->dev, "key %d %d %s\n", key_num, key_val,
- pressed ? "pressed" : "released");
-
out:
return IRQ_HANDLED;
}
@@ -231,6 +236,7 @@
input_dev->id.bustype = BUS_I2C;
input_dev->dev.parent = &client->dev;
input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP);
+ input_set_capability(input_dev, EV_MSC, MSC_SCAN);
input_dev->keycode = mpr121->keycodes;
input_dev->keycodesize = sizeof(mpr121->keycodes[0]);
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index b8c50d8..c9d491b 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -1240,6 +1240,7 @@
{ "ELAN0605", 0 },
{ "ELAN0609", 0 },
{ "ELAN060B", 0 },
+ { "ELAN060C", 0 },
{ "ELAN0611", 0 },
{ "ELAN1000", 0 },
{ }
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index e6f9b2d..d3d975a 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -1040,13 +1040,8 @@
}
}
- /* Nuke the existing Config, as we're going to rewrite it */
- val &= ~(STRTAB_STE_0_CFG_MASK << STRTAB_STE_0_CFG_SHIFT);
-
- if (ste->valid)
- val |= STRTAB_STE_0_V;
- else
- val &= ~STRTAB_STE_0_V;
+ /* Nuke the existing STE_0 value, as we're going to rewrite it */
+ val = ste->valid ? STRTAB_STE_0_V : 0;
if (ste->bypass) {
val |= disable_bypass ? STRTAB_STE_0_CFG_ABORT
@@ -1081,7 +1076,6 @@
val |= (ste->s1_cfg->cdptr_dma & STRTAB_STE_0_S1CTXPTR_MASK
<< STRTAB_STE_0_S1CTXPTR_SHIFT) |
STRTAB_STE_0_CFG_S1_TRANS;
-
}
if (ste->s2_cfg) {
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index eae12c9..45fbd09 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -2214,13 +2214,13 @@
return;
}
- arm_smmu_domain_remove_master(smmu_domain, fwspec);
+ if (atomic_domain)
+ arm_smmu_power_on_atomic(smmu->pwr);
+ else
+ arm_smmu_power_on(smmu->pwr);
- /* Remove additional vote for atomic power */
- if (atomic_domain) {
- WARN_ON(arm_smmu_power_on_atomic(smmu->pwr));
- arm_smmu_power_off(smmu->pwr);
- }
+ arm_smmu_domain_remove_master(smmu_domain, fwspec);
+ arm_smmu_power_off(smmu->pwr);
}
static int arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain)
@@ -3465,7 +3465,7 @@
/* Force bypass transaction to be Non-Shareable & not io-coherent */
reg &= ~(sCR0_SHCFG_MASK << sCR0_SHCFG_SHIFT);
- reg |= sCR0_SHCFG_NSH;
+ reg |= sCR0_SHCFG_NSH << sCR0_SHCFG_SHIFT;
/* Push the button */
__arm_smmu_tlb_sync(smmu);
@@ -4341,7 +4341,7 @@
#define DEBUG_PAR_PA_SHIFT 12
#define DEBUG_PAR_FAULT_VAL 0x1
-#define TBU_DBG_TIMEOUT_US 30000
+#define TBU_DBG_TIMEOUT_US 100
#define QSMMUV500_ACTLR_DEEP_PREFETCH_MASK 0x3
#define QSMMUV500_ACTLR_DEEP_PREFETCH_SHIFT 0x8
@@ -4509,11 +4509,12 @@
.free_pages_exact = arm_smmu_free_pages_exact,
};
-static int qsmmuv500_tbu_halt(struct qsmmuv500_tbu_device *tbu)
+static int qsmmuv500_tbu_halt(struct qsmmuv500_tbu_device *tbu,
+ struct arm_smmu_domain *smmu_domain)
{
unsigned long flags;
- u32 val;
- void __iomem *base;
+ u32 halt, fsr, sctlr_orig, sctlr, status;
+ void __iomem *base, *cb_base;
spin_lock_irqsave(&tbu->halt_lock, flags);
if (tbu->halt_count) {
@@ -4522,19 +4523,49 @@
return 0;
}
+ cb_base = ARM_SMMU_CB_BASE(smmu_domain->smmu) +
+ ARM_SMMU_CB(smmu_domain->smmu, smmu_domain->cfg.cbndx);
base = tbu->base;
- val = readl_relaxed(base + DEBUG_SID_HALT_REG);
- val |= DEBUG_SID_HALT_VAL;
- writel_relaxed(val, base + DEBUG_SID_HALT_REG);
+ halt = readl_relaxed(base + DEBUG_SID_HALT_REG);
+ halt |= DEBUG_SID_HALT_VAL;
+ writel_relaxed(halt, base + DEBUG_SID_HALT_REG);
- if (readl_poll_timeout_atomic(base + DEBUG_SR_HALT_ACK_REG,
- val, (val & DEBUG_SR_HALT_ACK_VAL),
- 0, TBU_DBG_TIMEOUT_US)) {
+ if (!readl_poll_timeout_atomic(base + DEBUG_SR_HALT_ACK_REG, status,
+ (status & DEBUG_SR_HALT_ACK_VAL),
+ 0, TBU_DBG_TIMEOUT_US))
+ goto out;
+
+ fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
+ if (!(fsr & FSR_FAULT)) {
dev_err(tbu->dev, "Couldn't halt TBU!\n");
spin_unlock_irqrestore(&tbu->halt_lock, flags);
return -ETIMEDOUT;
}
+ /*
+ * We are in a fault; Our request to halt the bus will not complete
+ * until transactions in front of us (such as the fault itself) have
+ * completed. Disable iommu faults and terminate any existing
+ * transactions.
+ */
+ sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
+ sctlr = sctlr_orig & ~(SCTLR_CFCFG | SCTLR_CFIE);
+ writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
+
+ writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
+ writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
+
+ if (readl_poll_timeout_atomic(base + DEBUG_SR_HALT_ACK_REG, status,
+ (status & DEBUG_SR_HALT_ACK_VAL),
+ 0, TBU_DBG_TIMEOUT_US)) {
+ dev_err(tbu->dev, "Couldn't halt TBU from fault context!\n");
+ writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
+ spin_unlock_irqrestore(&tbu->halt_lock, flags);
+ return -ETIMEDOUT;
+ }
+
+ writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
+out:
tbu->halt_count = 1;
spin_unlock_irqrestore(&tbu->halt_lock, flags);
return 0;
@@ -4635,6 +4666,14 @@
void __iomem *cb_base;
u32 sctlr_orig, sctlr;
int needs_redo = 0;
+ ktime_t timeout;
+
+ /* only 36 bit iova is supported */
+ if (iova >= (1ULL << 36)) {
+ dev_err_ratelimited(smmu->dev, "ECATS: address too large: %pad\n",
+ &iova);
+ return 0;
+ }
cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
tbu = qsmmuv500_find_tbu(smmu, sid);
@@ -4645,35 +4684,23 @@
if (ret)
return 0;
- /*
- * Disable client transactions & wait for existing operations to
- * complete.
- */
- ret = qsmmuv500_tbu_halt(tbu);
+ ret = qsmmuv500_tbu_halt(tbu, smmu_domain);
if (ret)
goto out_power_off;
+ /*
+ * ECATS can trigger the fault interrupt, so disable it temporarily
+ * and check for an interrupt manually.
+ */
+ sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
+ sctlr = sctlr_orig & ~(SCTLR_CFCFG | SCTLR_CFIE);
+ writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
+
/* Only one concurrent atos operation */
ret = qsmmuv500_ecats_lock(smmu_domain, tbu, &flags);
if (ret)
goto out_resume;
- /*
- * We can be called from an interrupt handler with FSR already set
- * so terminate the faulting transaction prior to starting ecats.
- * No new racing faults can occur since we in the halted state.
- * ECATS can trigger the fault interrupt, so disable it temporarily
- * and check for an interrupt manually.
- */
- fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
- if (fsr & FSR_FAULT) {
- writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
- writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
- }
- sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
- sctlr = sctlr_orig & ~(SCTLR_CFCFG | SCTLR_CFIE);
- writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
-
redo:
/* Set address and stream-id */
val = readq_relaxed(tbu->base + DEBUG_SID_HALT_REG);
@@ -4692,16 +4719,26 @@
writeq_relaxed(val, tbu->base + DEBUG_TXN_TRIGG_REG);
ret = 0;
- if (readl_poll_timeout_atomic(tbu->base + DEBUG_SR_HALT_ACK_REG,
- val, !(val & DEBUG_SR_ECATS_RUNNING_VAL),
- 0, TBU_DBG_TIMEOUT_US)) {
- dev_err(tbu->dev, "ECATS translation timed out!\n");
+ //based on readx_poll_timeout_atomic
+ timeout = ktime_add_us(ktime_get(), TBU_DBG_TIMEOUT_US);
+ for (;;) {
+ val = readl_relaxed(tbu->base + DEBUG_SR_HALT_ACK_REG);
+ if (!(val & DEBUG_SR_ECATS_RUNNING_VAL))
+ break;
+ val = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
+ if (val & FSR_FAULT)
+ break;
+ if (ktime_compare(ktime_get(), timeout) > 0) {
+ dev_err(tbu->dev, "ECATS translation timed out!\n");
+ ret = -ETIMEDOUT;
+ break;
+ }
}
fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
if (fsr & FSR_FAULT) {
dev_err(tbu->dev, "ECATS generated a fault interrupt! FSR = %llx\n",
- val);
+ fsr);
ret = -EINVAL;
writel_relaxed(val, cb_base + ARM_SMMU_CB_FSR);
@@ -4828,7 +4865,7 @@
* Prefetch only works properly if the start and end of all
* buffers in the page table are aligned to 16 Kb.
*/
- if ((iommudata->actlr >> QSMMUV500_ACTLR_DEEP_PREFETCH_SHIFT) &&
+ if ((iommudata->actlr >> QSMMUV500_ACTLR_DEEP_PREFETCH_SHIFT) &
QSMMUV500_ACTLR_DEEP_PREFETCH_MASK)
smmu_domain->qsmmuv500_errata2_min_align = true;
diff --git a/drivers/iommu/iommu-debug.c b/drivers/iommu/iommu-debug.c
index 6d79cfb..22a708e 100644
--- a/drivers/iommu/iommu-debug.c
+++ b/drivers/iommu/iommu-debug.c
@@ -165,6 +165,7 @@
struct iommu_debug_device {
struct device *dev;
struct iommu_domain *domain;
+ struct dma_iommu_mapping *mapping;
u64 iova;
u64 phys;
size_t len;
@@ -1251,6 +1252,8 @@
if (arm_iommu_attach_device(dev, dma_mapping))
goto out_release_mapping;
+
+ ddev->mapping = dma_mapping;
pr_err("Attached\n");
} else {
if (!dev->archdata.mapping) {
@@ -1264,7 +1267,7 @@
goto out;
}
arm_iommu_detach_device(dev);
- arm_iommu_release_mapping(dev->archdata.mapping);
+ arm_iommu_release_mapping(ddev->mapping);
pr_err("Detached\n");
}
retval = count;
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 785d689..b8f30cd 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -668,6 +668,15 @@
LEDs in both PWM and light pattern generator (LPG) modes. For older
PMICs, it also supports WLEDs and flash LEDs.
+config LEDS_QPNP_FLASH
+ tristate "Support for QPNP Flash LEDs"
+ depends on LEDS_CLASS && MFD_SPMI_PMIC
+ help
+ This driver supports the flash LED functionality of Qualcomm
+ Technologies, Inc. QPNP PMICs. This driver supports PMICs up through
+ PM8994. It can configure the flash LED target current for several
+ independent channels.
+
config LEDS_QPNP_FLASH_V2
tristate "Support for QPNP V2 Flash LEDs"
depends on LEDS_CLASS && MFD_SPMI_PMIC
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 2ff9a7c..ba9bb8d 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -72,6 +72,7 @@
obj-$(CONFIG_LEDS_PM8058) += leds-pm8058.o
obj-$(CONFIG_LEDS_MLXCPLD) += leds-mlxcpld.o
obj-$(CONFIG_LEDS_QPNP) += leds-qpnp.o
+obj-$(CONFIG_LEDS_QPNP_FLASH) += leds-qpnp-flash.o
obj-$(CONFIG_LEDS_QPNP_FLASH_V2) += leds-qpnp-flash-v2.o
obj-$(CONFIG_LEDS_QPNP_WLED) += leds-qpnp-wled.o
obj-$(CONFIG_LEDS_QPNP_HAPTICS) += leds-qpnp-haptics.o
diff --git a/drivers/leds/leds-qpnp-flash.c b/drivers/leds/leds-qpnp-flash.c
new file mode 100644
index 0000000..3b07af8
--- /dev/null
+++ b/drivers/leds/leds-qpnp-flash.c
@@ -0,0 +1,2709 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/regmap.h>
+#include <linux/errno.h>
+#include <linux/leds.h>
+#include <linux/slab.h>
+#include <linux/of_device.h>
+#include <linux/spmi.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+#include <linux/workqueue.h>
+#include <linux/power_supply.h>
+#include <linux/leds-qpnp-flash.h>
+#include <linux/qpnp/qpnp-adc.h>
+#include <linux/qpnp/qpnp-revid.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include "leds.h"
+
+#define FLASH_LED_PERIPHERAL_SUBTYPE(base) (base + 0x05)
+#define FLASH_SAFETY_TIMER(base) (base + 0x40)
+#define FLASH_MAX_CURRENT(base) (base + 0x41)
+#define FLASH_LED0_CURRENT(base) (base + 0x42)
+#define FLASH_LED1_CURRENT(base) (base + 0x43)
+#define FLASH_CLAMP_CURRENT(base) (base + 0x44)
+#define FLASH_MODULE_ENABLE_CTRL(base) (base + 0x46)
+#define FLASH_LED_STROBE_CTRL(base) (base + 0x47)
+#define FLASH_LED_TMR_CTRL(base) (base + 0x48)
+#define FLASH_HEADROOM(base) (base + 0x4A)
+#define FLASH_STARTUP_DELAY(base) (base + 0x4B)
+#define FLASH_MASK_ENABLE(base) (base + 0x4C)
+#define FLASH_VREG_OK_FORCE(base) (base + 0x4F)
+#define FLASH_FAULT_DETECT(base) (base + 0x51)
+#define FLASH_THERMAL_DRATE(base) (base + 0x52)
+#define FLASH_CURRENT_RAMP(base) (base + 0x54)
+#define FLASH_VPH_PWR_DROOP(base) (base + 0x5A)
+#define FLASH_HDRM_SNS_ENABLE_CTRL0(base) (base + 0x5C)
+#define FLASH_HDRM_SNS_ENABLE_CTRL1(base) (base + 0x5D)
+#define FLASH_LED_UNLOCK_SECURE(base) (base + 0xD0)
+#define FLASH_PERPH_RESET_CTRL(base) (base + 0xDA)
+#define FLASH_TORCH(base) (base + 0xE4)
+
+#define FLASH_STATUS_REG_MASK 0xFF
+#define FLASH_LED_FAULT_STATUS(base) (base + 0x08)
+#define INT_LATCHED_STS(base) (base + 0x18)
+#define IN_POLARITY_HIGH(base) (base + 0x12)
+#define INT_SET_TYPE(base) (base + 0x11)
+#define INT_EN_SET(base) (base + 0x15)
+#define INT_LATCHED_CLR(base) (base + 0x14)
+
+#define FLASH_HEADROOM_MASK 0x03
+#define FLASH_STARTUP_DLY_MASK 0x03
+#define FLASH_VREG_OK_FORCE_MASK 0xC0
+#define FLASH_FAULT_DETECT_MASK 0x80
+#define FLASH_THERMAL_DERATE_MASK 0xBF
+#define FLASH_SECURE_MASK 0xFF
+#define FLASH_TORCH_MASK 0x03
+#define FLASH_CURRENT_MASK 0x7F
+#define FLASH_TMR_MASK 0x03
+#define FLASH_TMR_SAFETY 0x00
+#define FLASH_SAFETY_TIMER_MASK 0x7F
+#define FLASH_MODULE_ENABLE_MASK 0xE0
+#define FLASH_STROBE_MASK 0xC0
+#define FLASH_CURRENT_RAMP_MASK 0xBF
+#define FLASH_VPH_PWR_DROOP_MASK 0xF3
+#define FLASH_LED_HDRM_SNS_ENABLE_MASK 0x81
+#define FLASH_MASK_MODULE_CONTRL_MASK 0xE0
+#define FLASH_FOLLOW_OTST2_RB_MASK 0x08
+
+#define FLASH_LED_TRIGGER_DEFAULT "none"
+#define FLASH_LED_HEADROOM_DEFAULT_MV 500
+#define FLASH_LED_STARTUP_DELAY_DEFAULT_US 128
+#define FLASH_LED_CLAMP_CURRENT_DEFAULT_MA 200
+#define FLASH_LED_THERMAL_DERATE_THRESHOLD_DEFAULT_C 80
+#define FLASH_LED_RAMP_UP_STEP_DEFAULT_US 3
+#define FLASH_LED_RAMP_DN_STEP_DEFAULT_US 3
+#define FLASH_LED_VPH_PWR_DROOP_THRESHOLD_DEFAULT_MV 3200
+#define FLASH_LED_VPH_PWR_DROOP_DEBOUNCE_TIME_DEFAULT_US 10
+#define FLASH_LED_THERMAL_DERATE_RATE_DEFAULT_PERCENT 2
+#define FLASH_RAMP_UP_DELAY_US_MIN 1000
+#define FLASH_RAMP_UP_DELAY_US_MAX 1001
+#define FLASH_RAMP_DN_DELAY_US_MIN 2160
+#define FLASH_RAMP_DN_DELAY_US_MAX 2161
+#define FLASH_BOOST_REGULATOR_PROBE_DELAY_MS 2000
+#define FLASH_TORCH_MAX_LEVEL 0x0F
+#define FLASH_MAX_LEVEL 0x4F
+#define FLASH_LED_FLASH_HW_VREG_OK 0x40
+#define FLASH_LED_FLASH_SW_VREG_OK 0x80
+#define FLASH_LED_STROBE_TYPE_HW 0x04
+#define FLASH_DURATION_DIVIDER 10
+#define FLASH_LED_HEADROOM_DIVIDER 100
+#define FLASH_LED_HEADROOM_OFFSET 2
+#define FLASH_LED_MAX_CURRENT_MA 1000
+#define FLASH_LED_THERMAL_THRESHOLD_MIN 95
+#define FLASH_LED_THERMAL_DEVIDER 10
+#define FLASH_LED_VPH_DROOP_THRESHOLD_MIN_MV 2500
+#define FLASH_LED_VPH_DROOP_THRESHOLD_DIVIDER 100
+#define FLASH_LED_HDRM_SNS_ENABLE 0x81
+#define FLASH_LED_HDRM_SNS_DISABLE 0x01
+#define FLASH_LED_UA_PER_MA 1000
+#define FLASH_LED_MASK_MODULE_MASK2_ENABLE 0x20
+#define FLASH_LED_MASK3_ENABLE_SHIFT 7
+#define FLASH_LED_MODULE_CTRL_DEFAULT 0x60
+#define FLASH_LED_CURRENT_READING_DELAY_MIN 5000
+#define FLASH_LED_CURRENT_READING_DELAY_MAX 5001
+#define FLASH_LED_OPEN_FAULT_DETECTED 0xC
+
+#define FLASH_UNLOCK_SECURE 0xA5
+#define FLASH_LED_TORCH_ENABLE 0x00
+#define FLASH_LED_TORCH_DISABLE 0x03
+#define FLASH_MODULE_ENABLE 0x80
+#define FLASH_LED0_TRIGGER 0x80
+#define FLASH_LED1_TRIGGER 0x40
+#define FLASH_LED0_ENABLEMENT 0x40
+#define FLASH_LED1_ENABLEMENT 0x20
+#define FLASH_LED_DISABLE 0x00
+#define FLASH_LED_MIN_CURRENT_MA 13
+#define FLASH_SUBTYPE_DUAL 0x01
+#define FLASH_SUBTYPE_SINGLE 0x02
+
+/*
+ * ID represents physical LEDs for individual control purpose.
+ */
+enum flash_led_id {
+ FLASH_LED_0 = 0,
+ FLASH_LED_1,
+ FLASH_LED_SWITCH,
+};
+
+enum flash_led_type {
+ FLASH = 0,
+ TORCH,
+ SWITCH,
+};
+
+enum thermal_derate_rate {
+ RATE_1_PERCENT = 0,
+ RATE_1P25_PERCENT,
+ RATE_2_PERCENT,
+ RATE_2P5_PERCENT,
+ RATE_5_PERCENT,
+};
+
+enum current_ramp_steps {
+ RAMP_STEP_0P2_US = 0,
+ RAMP_STEP_0P4_US,
+ RAMP_STEP_0P8_US,
+ RAMP_STEP_1P6_US,
+ RAMP_STEP_3P3_US,
+ RAMP_STEP_6P7_US,
+ RAMP_STEP_13P5_US,
+ RAMP_STEP_27US,
+};
+
+struct flash_regulator_data {
+ struct regulator *regs;
+ const char *reg_name;
+ u32 max_volt_uv;
+};
+
+/*
+ * Configurations for each individual LED
+ */
+struct flash_node_data {
+ struct platform_device *pdev;
+ struct regmap *regmap;
+ struct led_classdev cdev;
+ struct work_struct work;
+ struct flash_regulator_data *reg_data;
+ u16 max_current;
+ u16 prgm_current;
+ u16 prgm_current2;
+ u16 duration;
+ u8 id;
+ u8 type;
+ u8 trigger;
+ u8 enable;
+ u8 num_regulators;
+ bool flash_on;
+};
+
+/*
+ * Flash LED configuration read from device tree
+ */
+struct flash_led_platform_data {
+ unsigned int temp_threshold_num;
+ unsigned int temp_derate_curr_num;
+ unsigned int *die_temp_derate_curr_ma;
+ unsigned int *die_temp_threshold_degc;
+ u16 ramp_up_step;
+ u16 ramp_dn_step;
+ u16 vph_pwr_droop_threshold;
+ u16 headroom;
+ u16 clamp_current;
+ u8 thermal_derate_threshold;
+ u8 vph_pwr_droop_debounce_time;
+ u8 startup_dly;
+ u8 thermal_derate_rate;
+ bool pmic_charger_support;
+ bool self_check_en;
+ bool thermal_derate_en;
+ bool current_ramp_en;
+ bool vph_pwr_droop_en;
+ bool hdrm_sns_ch0_en;
+ bool hdrm_sns_ch1_en;
+ bool power_detect_en;
+ bool mask3_en;
+ bool follow_rb_disable;
+ bool die_current_derate_en;
+};
+
+struct qpnp_flash_led_buffer {
+ struct mutex debugfs_lock; /* Prevent thread concurrency */
+ size_t rpos;
+ size_t wpos;
+ size_t len;
+ struct qpnp_flash_led *led;
+ u32 buffer_cnt;
+ char data[0];
+};
+
+/*
+ * Flash LED data structure containing flash LED attributes
+ */
+struct qpnp_flash_led {
+ struct pmic_revid_data *revid_data;
+ struct platform_device *pdev;
+ struct regmap *regmap;
+ struct flash_led_platform_data *pdata;
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *gpio_state_active;
+ struct pinctrl_state *gpio_state_suspend;
+ struct flash_node_data *flash_node;
+ struct power_supply *battery_psy;
+ struct workqueue_struct *ordered_workq;
+ struct qpnp_vadc_chip *vadc_dev;
+ struct mutex flash_led_lock;
+ struct dentry *dbgfs_root;
+ int num_leds;
+ u16 base;
+ u16 current_addr;
+ u16 current2_addr;
+ u8 peripheral_type;
+ u8 fault_reg;
+ bool gpio_enabled;
+ bool charging_enabled;
+ bool strobe_debug;
+ bool dbg_feature_en;
+ bool open_fault;
+};
+
+static u8 qpnp_flash_led_ctrl_dbg_regs[] = {
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48,
+ 0x4A, 0x4B, 0x4C, 0x4F, 0x51, 0x52, 0x54, 0x55, 0x5A, 0x5C, 0x5D,
+};
+
+static int flash_led_dbgfs_file_open(struct qpnp_flash_led *led,
+ struct file *file)
+{
+ struct qpnp_flash_led_buffer *log;
+ size_t logbufsize = SZ_4K;
+
+ log = kzalloc(logbufsize, GFP_KERNEL);
+ if (!log)
+ return -ENOMEM;
+
+ log->rpos = 0;
+ log->wpos = 0;
+ log->len = logbufsize - sizeof(*log);
+ mutex_init(&log->debugfs_lock);
+ log->led = led;
+
+ log->buffer_cnt = 1;
+ file->private_data = log;
+
+ return 0;
+}
+
+static int flash_led_dfs_open(struct inode *inode, struct file *file)
+{
+ struct qpnp_flash_led *led = inode->i_private;
+
+ return flash_led_dbgfs_file_open(led, file);
+}
+
+static int flash_led_dfs_close(struct inode *inode, struct file *file)
+{
+ struct qpnp_flash_led_buffer *log = file->private_data;
+
+ if (log) {
+ file->private_data = NULL;
+ mutex_destroy(&log->debugfs_lock);
+ kfree(log);
+ }
+
+ return 0;
+}
+
+#define MIN_BUFFER_WRITE_LEN 20
+static int print_to_log(struct qpnp_flash_led_buffer *log,
+ const char *fmt, ...)
+{
+ va_list args;
+ int cnt;
+ char *log_buf;
+ size_t size = log->len - log->wpos;
+
+ if (size < MIN_BUFFER_WRITE_LEN)
+ return 0; /* not enough buffer left */
+
+ log_buf = &log->data[log->wpos];
+ va_start(args, fmt);
+ cnt = vscnprintf(log_buf, size, fmt, args);
+ va_end(args);
+
+ log->wpos += cnt;
+ return cnt;
+}
+
+static ssize_t flash_led_dfs_latched_reg_read(struct file *fp, char __user *buf,
+ size_t count, loff_t *ppos) {
+ struct qpnp_flash_led_buffer *log = fp->private_data;
+ struct qpnp_flash_led *led;
+ uint val;
+ int rc = 0;
+ size_t len;
+ size_t ret;
+
+ if (!log) {
+ pr_err("error: file private data is NULL\n");
+ return -EFAULT;
+ }
+ led = log->led;
+
+ mutex_lock(&log->debugfs_lock);
+ if ((log->rpos >= log->wpos && log->buffer_cnt == 0) ||
+ ((log->len - log->wpos) < MIN_BUFFER_WRITE_LEN))
+ goto unlock_mutex;
+
+ rc = regmap_read(led->regmap, INT_LATCHED_STS(led->base), &val);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Unable to read from address %x, rc(%d)\n",
+ INT_LATCHED_STS(led->base), rc);
+ goto unlock_mutex;
+ }
+ log->buffer_cnt--;
+
+ rc = print_to_log(log, "0x%05X ", INT_LATCHED_STS(led->base));
+ if (rc == 0)
+ goto unlock_mutex;
+
+ rc = print_to_log(log, "0x%02X ", val);
+ if (rc == 0)
+ goto unlock_mutex;
+
+ if (log->wpos > 0 && log->data[log->wpos - 1] == ' ')
+ log->data[log->wpos - 1] = '\n';
+
+ len = min(count, log->wpos - log->rpos);
+
+ ret = copy_to_user(buf, &log->data[log->rpos], len);
+ if (ret) {
+ pr_err("error copy register value to user\n");
+ rc = -EFAULT;
+ goto unlock_mutex;
+ }
+
+ len -= ret;
+ *ppos += len;
+ log->rpos += len;
+
+ rc = len;
+
+unlock_mutex:
+ mutex_unlock(&log->debugfs_lock);
+ return rc;
+}
+
+static ssize_t flash_led_dfs_fault_reg_read(struct file *fp, char __user *buf,
+ size_t count, loff_t *ppos) {
+ struct qpnp_flash_led_buffer *log = fp->private_data;
+ struct qpnp_flash_led *led;
+ int rc = 0;
+ size_t len;
+ size_t ret;
+
+ if (!log) {
+ pr_err("error: file private data is NULL\n");
+ return -EFAULT;
+ }
+ led = log->led;
+
+ mutex_lock(&log->debugfs_lock);
+ if ((log->rpos >= log->wpos && log->buffer_cnt == 0) ||
+ ((log->len - log->wpos) < MIN_BUFFER_WRITE_LEN))
+ goto unlock_mutex;
+
+ log->buffer_cnt--;
+
+ rc = print_to_log(log, "0x%05X ", FLASH_LED_FAULT_STATUS(led->base));
+ if (rc == 0)
+ goto unlock_mutex;
+
+ rc = print_to_log(log, "0x%02X ", led->fault_reg);
+ if (rc == 0)
+ goto unlock_mutex;
+
+ if (log->wpos > 0 && log->data[log->wpos - 1] == ' ')
+ log->data[log->wpos - 1] = '\n';
+
+ len = min(count, log->wpos - log->rpos);
+
+ ret = copy_to_user(buf, &log->data[log->rpos], len);
+ if (ret) {
+ pr_err("error copy register value to user\n");
+ rc = -EFAULT;
+ goto unlock_mutex;
+ }
+
+ len -= ret;
+ *ppos += len;
+ log->rpos += len;
+
+ rc = len;
+
+unlock_mutex:
+ mutex_unlock(&log->debugfs_lock);
+ return rc;
+}
+
+static ssize_t flash_led_dfs_fault_reg_enable(struct file *file,
+ const char __user *buf, size_t count, loff_t *ppos) {
+
+ u8 *val;
+ int pos = 0;
+ int cnt = 0;
+ int data;
+ size_t ret = 0;
+
+ struct qpnp_flash_led_buffer *log = file->private_data;
+ struct qpnp_flash_led *led;
+ char *kbuf;
+
+ if (!log) {
+ pr_err("error: file private data is NULL\n");
+ return -EFAULT;
+ }
+ led = log->led;
+
+ mutex_lock(&log->debugfs_lock);
+ kbuf = kmalloc(count + 1, GFP_KERNEL);
+ if (!kbuf) {
+ ret = -ENOMEM;
+ goto unlock_mutex;
+ }
+
+ ret = copy_from_user(kbuf, buf, count);
+ if (!ret) {
+ pr_err("failed to copy data from user\n");
+ ret = -EFAULT;
+ goto free_buf;
+ }
+
+ count -= ret;
+ *ppos += count;
+ kbuf[count] = '\0';
+ val = kbuf;
+ while (sscanf(kbuf + pos, "%i", &data) == 1) {
+ pos++;
+ val[cnt++] = data & 0xff;
+ }
+
+ if (!cnt)
+ goto free_buf;
+
+ ret = count;
+ if (*val == 1)
+ led->strobe_debug = true;
+ else
+ led->strobe_debug = false;
+
+free_buf:
+ kfree(kbuf);
+unlock_mutex:
+ mutex_unlock(&log->debugfs_lock);
+ return ret;
+}
+
+static ssize_t flash_led_dfs_dbg_enable(struct file *file,
+ const char __user *buf, size_t count, loff_t *ppos) {
+
+ u8 *val;
+ int pos = 0;
+ int cnt = 0;
+ int data;
+ size_t ret = 0;
+ struct qpnp_flash_led_buffer *log = file->private_data;
+ struct qpnp_flash_led *led;
+ char *kbuf;
+
+ if (!log) {
+ pr_err("error: file private data is NULL\n");
+ return -EFAULT;
+ }
+ led = log->led;
+
+ mutex_lock(&log->debugfs_lock);
+ kbuf = kmalloc(count + 1, GFP_KERNEL);
+ if (!kbuf) {
+ ret = -ENOMEM;
+ goto unlock_mutex;
+ }
+
+ ret = copy_from_user(kbuf, buf, count);
+ if (ret == count) {
+ pr_err("failed to copy data from user\n");
+ ret = -EFAULT;
+ goto free_buf;
+ }
+ count -= ret;
+ *ppos += count;
+ kbuf[count] = '\0';
+ val = kbuf;
+ while (sscanf(kbuf + pos, "%i", &data) == 1) {
+ pos++;
+ val[cnt++] = data & 0xff;
+ }
+
+ if (!cnt)
+ goto free_buf;
+
+ ret = count;
+ if (*val == 1)
+ led->dbg_feature_en = true;
+ else
+ led->dbg_feature_en = false;
+
+free_buf:
+ kfree(kbuf);
+unlock_mutex:
+ mutex_unlock(&log->debugfs_lock);
+ return ret;
+}
+
+static const struct file_operations flash_led_dfs_latched_reg_fops = {
+ .open = flash_led_dfs_open,
+ .release = flash_led_dfs_close,
+ .read = flash_led_dfs_latched_reg_read,
+};
+
+static const struct file_operations flash_led_dfs_strobe_reg_fops = {
+ .open = flash_led_dfs_open,
+ .release = flash_led_dfs_close,
+ .read = flash_led_dfs_fault_reg_read,
+ .write = flash_led_dfs_fault_reg_enable,
+};
+
+static const struct file_operations flash_led_dfs_dbg_feature_fops = {
+ .open = flash_led_dfs_open,
+ .release = flash_led_dfs_close,
+ .write = flash_led_dfs_dbg_enable,
+};
+
+static int
+qpnp_led_masked_write(struct qpnp_flash_led *led, u16 addr, u8 mask, u8 val)
+{
+ int rc;
+
+ rc = regmap_update_bits(led->regmap, addr, mask, val);
+ if (rc)
+ dev_err(&led->pdev->dev,
+ "Unable to update_bits to addr=%x, rc(%d)\n", addr, rc);
+
+ dev_dbg(&led->pdev->dev, "Write 0x%02X to addr 0x%02X\n", val, addr);
+
+ return rc;
+}
+
+static int qpnp_flash_led_get_allowed_die_temp_curr(struct qpnp_flash_led *led,
+ int64_t die_temp_degc)
+{
+ int die_temp_curr_ma;
+
+ if (die_temp_degc >= led->pdata->die_temp_threshold_degc[0])
+ die_temp_curr_ma = 0;
+ else if (die_temp_degc >= led->pdata->die_temp_threshold_degc[1])
+ die_temp_curr_ma = led->pdata->die_temp_derate_curr_ma[0];
+ else if (die_temp_degc >= led->pdata->die_temp_threshold_degc[2])
+ die_temp_curr_ma = led->pdata->die_temp_derate_curr_ma[1];
+ else if (die_temp_degc >= led->pdata->die_temp_threshold_degc[3])
+ die_temp_curr_ma = led->pdata->die_temp_derate_curr_ma[2];
+ else if (die_temp_degc >= led->pdata->die_temp_threshold_degc[4])
+ die_temp_curr_ma = led->pdata->die_temp_derate_curr_ma[3];
+ else
+ die_temp_curr_ma = led->pdata->die_temp_derate_curr_ma[4];
+
+ return die_temp_curr_ma;
+}
+
+static int64_t qpnp_flash_led_get_die_temp(struct qpnp_flash_led *led)
+{
+ struct qpnp_vadc_result die_temp_result;
+ int rc;
+
+ rc = qpnp_vadc_read(led->vadc_dev, SPARE2, &die_temp_result);
+ if (rc) {
+ pr_err("failed to read the die temp\n");
+ return -EINVAL;
+ }
+
+ return die_temp_result.physical;
+}
+
+static int qpnp_get_pmic_revid(struct qpnp_flash_led *led)
+{
+ struct device_node *revid_dev_node;
+
+ revid_dev_node = of_parse_phandle(led->pdev->dev.of_node,
+ "qcom,pmic-revid", 0);
+ if (!revid_dev_node) {
+ dev_err(&led->pdev->dev,
+ "qcom,pmic-revid property missing\n");
+ return -EINVAL;
+ }
+
+ led->revid_data = get_revid_data(revid_dev_node);
+ if (IS_ERR(led->revid_data)) {
+ pr_err("Couldn't get revid data rc = %ld\n",
+ PTR_ERR(led->revid_data));
+ return PTR_ERR(led->revid_data);
+ }
+
+ return 0;
+}
+
+static int
+qpnp_flash_led_get_max_avail_current(struct flash_node_data *flash_node,
+ struct qpnp_flash_led *led)
+{
+ union power_supply_propval prop;
+ int64_t chg_temp_milidegc, die_temp_degc;
+ int max_curr_avail_ma = 2000;
+ int allowed_die_temp_curr_ma = 2000;
+ int rc;
+
+ if (led->pdata->power_detect_en) {
+ if (!led->battery_psy) {
+ dev_err(&led->pdev->dev,
+ "Failed to query power supply\n");
+ return -EINVAL;
+ }
+
+ /*
+ * When charging is enabled, enforce this new enablement
+ * sequence to reduce fuel gauge reading resolution.
+ */
+ if (led->charging_enabled) {
+ rc = qpnp_led_masked_write(led,
+ FLASH_MODULE_ENABLE_CTRL(led->base),
+ FLASH_MODULE_ENABLE, FLASH_MODULE_ENABLE);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Module enable reg write failed\n");
+ return -EINVAL;
+ }
+
+ usleep_range(FLASH_LED_CURRENT_READING_DELAY_MIN,
+ FLASH_LED_CURRENT_READING_DELAY_MAX);
+ }
+
+ power_supply_get_property(led->battery_psy,
+ POWER_SUPPLY_PROP_FLASH_CURRENT_MAX, &prop);
+ if (!prop.intval) {
+ dev_err(&led->pdev->dev,
+ "battery too low for flash\n");
+ return -EINVAL;
+ }
+
+ max_curr_avail_ma = (prop.intval / FLASH_LED_UA_PER_MA);
+ }
+
+ /*
+ * When thermal mitigation is available, this logic will execute to
+ * derate current based upon the PMIC die temperature.
+ */
+ if (led->pdata->die_current_derate_en) {
+ chg_temp_milidegc = qpnp_flash_led_get_die_temp(led);
+ if (chg_temp_milidegc < 0)
+ return -EINVAL;
+
+ die_temp_degc = div_s64(chg_temp_milidegc, 1000);
+ allowed_die_temp_curr_ma =
+ qpnp_flash_led_get_allowed_die_temp_curr(led,
+ die_temp_degc);
+ if (allowed_die_temp_curr_ma < 0)
+ return -EINVAL;
+ }
+
+ max_curr_avail_ma = (max_curr_avail_ma >= allowed_die_temp_curr_ma)
+ ? allowed_die_temp_curr_ma : max_curr_avail_ma;
+
+ return max_curr_avail_ma;
+}
+
+static ssize_t qpnp_flash_led_die_temp_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct qpnp_flash_led *led;
+ struct flash_node_data *flash_node;
+ unsigned long val;
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ ssize_t ret;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ flash_node = container_of(led_cdev, struct flash_node_data, cdev);
+ led = dev_get_drvdata(&flash_node->pdev->dev);
+
+ /*'0' for disable die_temp feature; non-zero to enable feature*/
+ if (val == 0)
+ led->pdata->die_current_derate_en = false;
+ else
+ led->pdata->die_current_derate_en = true;
+
+ return count;
+}
+
+static ssize_t qpnp_led_strobe_type_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct flash_node_data *flash_node;
+ unsigned long state;
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ ssize_t ret = -EINVAL;
+
+ ret = kstrtoul(buf, 10, &state);
+ if (ret)
+ return ret;
+
+ flash_node = container_of(led_cdev, struct flash_node_data, cdev);
+
+ /* '0' for sw strobe; '1' for hw strobe */
+ if (state == 1)
+ flash_node->trigger |= FLASH_LED_STROBE_TYPE_HW;
+ else
+ flash_node->trigger &= ~FLASH_LED_STROBE_TYPE_HW;
+
+ return count;
+}
+
+static ssize_t qpnp_flash_led_dump_regs_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct qpnp_flash_led *led;
+ struct flash_node_data *flash_node;
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ int rc, i, count = 0;
+ u16 addr;
+ uint val;
+
+ flash_node = container_of(led_cdev, struct flash_node_data, cdev);
+ led = dev_get_drvdata(&flash_node->pdev->dev);
+ for (i = 0; i < ARRAY_SIZE(qpnp_flash_led_ctrl_dbg_regs); i++) {
+ addr = led->base + qpnp_flash_led_ctrl_dbg_regs[i];
+ rc = regmap_read(led->regmap, addr, &val);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Unable to read from addr=%x, rc(%d)\n",
+ addr, rc);
+ return -EINVAL;
+ }
+
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "REG_0x%x = 0x%02x\n", addr, val);
+
+ if (count >= PAGE_SIZE)
+ return PAGE_SIZE - 1;
+ }
+
+ return count;
+}
+
+static ssize_t qpnp_flash_led_current_derate_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct qpnp_flash_led *led;
+ struct flash_node_data *flash_node;
+ unsigned long val;
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ ssize_t ret;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ flash_node = container_of(led_cdev, struct flash_node_data, cdev);
+ led = dev_get_drvdata(&flash_node->pdev->dev);
+
+ /*'0' for disable derate feature; non-zero to enable derate feature */
+ if (val == 0)
+ led->pdata->power_detect_en = false;
+ else
+ led->pdata->power_detect_en = true;
+
+ return count;
+}
+
+static ssize_t qpnp_flash_led_max_current_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct qpnp_flash_led *led;
+ struct flash_node_data *flash_node;
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ int max_curr_avail_ma = 0;
+
+ flash_node = container_of(led_cdev, struct flash_node_data, cdev);
+ led = dev_get_drvdata(&flash_node->pdev->dev);
+
+ if (led->flash_node[0].flash_on)
+ max_curr_avail_ma += led->flash_node[0].max_current;
+ if (led->flash_node[1].flash_on)
+ max_curr_avail_ma += led->flash_node[1].max_current;
+
+ if (led->pdata->power_detect_en ||
+ led->pdata->die_current_derate_en) {
+ max_curr_avail_ma =
+ qpnp_flash_led_get_max_avail_current(flash_node, led);
+
+ if (max_curr_avail_ma < 0)
+ return -EINVAL;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", max_curr_avail_ma);
+}
+
+static struct device_attribute qpnp_flash_led_attrs[] = {
+ __ATTR(strobe, 0664, NULL, qpnp_led_strobe_type_store),
+ __ATTR(reg_dump, 0664, qpnp_flash_led_dump_regs_show, NULL),
+ __ATTR(enable_current_derate, 0664, NULL,
+ qpnp_flash_led_current_derate_store),
+ __ATTR(max_allowed_current, 0664, qpnp_flash_led_max_current_show,
+ NULL),
+ __ATTR(enable_die_temp_current_derate, 0664, NULL,
+ qpnp_flash_led_die_temp_store),
+};
+
+static int qpnp_flash_led_get_thermal_derate_rate(const char *rate)
+{
+ /*
+ * return 5% derate as default value if user specifies
+ * a value un-supported
+ */
+ if (strcmp(rate, "1_PERCENT") == 0)
+ return RATE_1_PERCENT;
+ else if (strcmp(rate, "1P25_PERCENT") == 0)
+ return RATE_1P25_PERCENT;
+ else if (strcmp(rate, "2_PERCENT") == 0)
+ return RATE_2_PERCENT;
+ else if (strcmp(rate, "2P5_PERCENT") == 0)
+ return RATE_2P5_PERCENT;
+ else if (strcmp(rate, "5_PERCENT") == 0)
+ return RATE_5_PERCENT;
+ else
+ return RATE_5_PERCENT;
+}
+
+static int qpnp_flash_led_get_ramp_step(const char *step)
+{
+ /*
+ * return 27 us as default value if user specifies
+ * a value un-supported
+ */
+ if (strcmp(step, "0P2_US") == 0)
+ return RAMP_STEP_0P2_US;
+ else if (strcmp(step, "0P4_US") == 0)
+ return RAMP_STEP_0P4_US;
+ else if (strcmp(step, "0P8_US") == 0)
+ return RAMP_STEP_0P8_US;
+ else if (strcmp(step, "1P6_US") == 0)
+ return RAMP_STEP_1P6_US;
+ else if (strcmp(step, "3P3_US") == 0)
+ return RAMP_STEP_3P3_US;
+ else if (strcmp(step, "6P7_US") == 0)
+ return RAMP_STEP_6P7_US;
+ else if (strcmp(step, "13P5_US") == 0)
+ return RAMP_STEP_13P5_US;
+ else
+ return RAMP_STEP_27US;
+}
+
+static u8 qpnp_flash_led_get_droop_debounce_time(u8 val)
+{
+ /*
+ * return 10 us as default value if user specifies
+ * a value un-supported
+ */
+ switch (val) {
+ case 0:
+ return 0;
+ case 10:
+ return 1;
+ case 32:
+ return 2;
+ case 64:
+ return 3;
+ default:
+ return 1;
+ }
+}
+
+static u8 qpnp_flash_led_get_startup_dly(u8 val)
+{
+ /*
+ * return 128 us as default value if user specifies
+ * a value un-supported
+ */
+ switch (val) {
+ case 10:
+ return 0;
+ case 32:
+ return 1;
+ case 64:
+ return 2;
+ case 128:
+ return 3;
+ default:
+ return 3;
+ }
+}
+
+static int
+qpnp_flash_led_get_peripheral_type(struct qpnp_flash_led *led)
+{
+ int rc;
+ uint val;
+
+ rc = regmap_read(led->regmap,
+ FLASH_LED_PERIPHERAL_SUBTYPE(led->base), &val);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Unable to read peripheral subtype\n");
+ return -EINVAL;
+ }
+
+ return val;
+}
+
+static int qpnp_flash_led_module_disable(struct qpnp_flash_led *led,
+ struct flash_node_data *flash_node)
+{
+ union power_supply_propval psy_prop;
+ int rc;
+ uint val, tmp;
+
+ rc = regmap_read(led->regmap, FLASH_LED_STROBE_CTRL(led->base), &val);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Unable to read strobe reg\n");
+ return -EINVAL;
+ }
+
+ tmp = (~flash_node->trigger) & val;
+ if (!tmp) {
+ if (flash_node->type == TORCH) {
+ rc = qpnp_led_masked_write(led,
+ FLASH_LED_UNLOCK_SECURE(led->base),
+ FLASH_SECURE_MASK, FLASH_UNLOCK_SECURE);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Secure reg write failed\n");
+ return -EINVAL;
+ }
+
+ rc = qpnp_led_masked_write(led,
+ FLASH_TORCH(led->base),
+ FLASH_TORCH_MASK, FLASH_LED_TORCH_DISABLE);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Torch reg write failed\n");
+ return -EINVAL;
+ }
+ }
+
+ if (led->battery_psy &&
+ led->revid_data->pmic_subtype == PMI8996_SUBTYPE &&
+ !led->revid_data->rev3) {
+ psy_prop.intval = false;
+ rc = power_supply_set_property(led->battery_psy,
+ POWER_SUPPLY_PROP_FLASH_TRIGGER,
+ &psy_prop);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Failed to enble charger i/p current limit\n");
+ return -EINVAL;
+ }
+ }
+
+ rc = qpnp_led_masked_write(led,
+ FLASH_MODULE_ENABLE_CTRL(led->base),
+ FLASH_MODULE_ENABLE_MASK,
+ FLASH_LED_MODULE_CTRL_DEFAULT);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Module disable failed\n");
+ return -EINVAL;
+ }
+
+ if (led->pinctrl) {
+ rc = pinctrl_select_state(led->pinctrl,
+ led->gpio_state_suspend);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "failed to disable GPIO\n");
+ return -EINVAL;
+ }
+ led->gpio_enabled = false;
+ }
+
+ if (led->battery_psy) {
+ psy_prop.intval = false;
+ rc = power_supply_set_property(led->battery_psy,
+ POWER_SUPPLY_PROP_FLASH_ACTIVE,
+ &psy_prop);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Failed to setup OTG pulse skip enable\n");
+ return -EINVAL;
+ }
+ }
+ }
+
+ if (flash_node->trigger & FLASH_LED0_TRIGGER) {
+ rc = qpnp_led_masked_write(led,
+ led->current_addr,
+ FLASH_CURRENT_MASK, 0x00);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "current register write failed\n");
+ return -EINVAL;
+ }
+ }
+
+ if (flash_node->trigger & FLASH_LED1_TRIGGER) {
+ rc = qpnp_led_masked_write(led,
+ led->current2_addr,
+ FLASH_CURRENT_MASK, 0x00);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "current register write failed\n");
+ return -EINVAL;
+ }
+ }
+
+ if (flash_node->id == FLASH_LED_SWITCH)
+ flash_node->trigger &= FLASH_LED_STROBE_TYPE_HW;
+
+ return 0;
+}
+
+static enum
+led_brightness qpnp_flash_led_brightness_get(struct led_classdev *led_cdev)
+{
+ return led_cdev->brightness;
+}
+
+static int flash_regulator_parse_dt(struct qpnp_flash_led *led,
+ struct flash_node_data *flash_node) {
+
+ int i = 0, rc;
+ struct device_node *node = flash_node->cdev.dev->of_node;
+ struct device_node *temp = NULL;
+ const char *temp_string;
+ u32 val;
+
+ flash_node->reg_data = devm_kzalloc(&led->pdev->dev,
+ sizeof(struct flash_regulator_data *) *
+ flash_node->num_regulators,
+ GFP_KERNEL);
+ if (!flash_node->reg_data) {
+ dev_err(&led->pdev->dev,
+ "Unable to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ for_each_child_of_node(node, temp) {
+ rc = of_property_read_string(temp, "regulator-name",
+ &temp_string);
+ if (!rc)
+ flash_node->reg_data[i].reg_name = temp_string;
+ else {
+ dev_err(&led->pdev->dev,
+ "Unable to read regulator name\n");
+ return rc;
+ }
+
+ rc = of_property_read_u32(temp, "max-voltage", &val);
+ if (!rc) {
+ flash_node->reg_data[i].max_volt_uv = val;
+ } else if (rc != -EINVAL) {
+ dev_err(&led->pdev->dev,
+ "Unable to read max voltage\n");
+ return rc;
+ }
+
+ i++;
+ }
+
+ return 0;
+}
+
+static int flash_regulator_setup(struct qpnp_flash_led *led,
+ struct flash_node_data *flash_node, bool on)
+{
+ int i, rc = 0;
+
+ if (on == false) {
+ i = flash_node->num_regulators;
+ goto error_regulator_setup;
+ }
+
+ for (i = 0; i < flash_node->num_regulators; i++) {
+ flash_node->reg_data[i].regs =
+ regulator_get(flash_node->cdev.dev,
+ flash_node->reg_data[i].reg_name);
+ if (IS_ERR(flash_node->reg_data[i].regs)) {
+ rc = PTR_ERR(flash_node->reg_data[i].regs);
+ dev_err(&led->pdev->dev,
+ "Failed to get regulator\n");
+ goto error_regulator_setup;
+ }
+
+ if (regulator_count_voltages(flash_node->reg_data[i].regs)
+ > 0) {
+ rc = regulator_set_voltage(flash_node->reg_data[i].regs,
+ flash_node->reg_data[i].max_volt_uv,
+ flash_node->reg_data[i].max_volt_uv);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "regulator set voltage failed\n");
+ regulator_put(flash_node->reg_data[i].regs);
+ goto error_regulator_setup;
+ }
+ }
+ }
+
+ return rc;
+
+error_regulator_setup:
+ while (i--) {
+ if (regulator_count_voltages(flash_node->reg_data[i].regs)
+ > 0) {
+ regulator_set_voltage(flash_node->reg_data[i].regs,
+ 0, flash_node->reg_data[i].max_volt_uv);
+ }
+
+ regulator_put(flash_node->reg_data[i].regs);
+ }
+
+ return rc;
+}
+
+static int flash_regulator_enable(struct qpnp_flash_led *led,
+ struct flash_node_data *flash_node, bool on)
+{
+ int i, rc = 0;
+
+ if (on == false) {
+ i = flash_node->num_regulators;
+ goto error_regulator_enable;
+ }
+
+ for (i = 0; i < flash_node->num_regulators; i++) {
+ rc = regulator_enable(flash_node->reg_data[i].regs);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "regulator enable failed\n");
+ goto error_regulator_enable;
+ }
+ }
+
+ return rc;
+
+error_regulator_enable:
+ while (i--)
+ regulator_disable(flash_node->reg_data[i].regs);
+
+ return rc;
+}
+
+int qpnp_flash_led_prepare(struct led_trigger *trig, int options,
+ int *max_current)
+{
+ struct led_classdev *led_cdev = trigger_to_lcdev(trig);
+ struct flash_node_data *flash_node;
+ struct qpnp_flash_led *led;
+ int rc;
+
+ if (!led_cdev) {
+ pr_err("Invalid led_trigger provided\n");
+ return -EINVAL;
+ }
+
+ flash_node = container_of(led_cdev, struct flash_node_data, cdev);
+ led = dev_get_drvdata(&flash_node->pdev->dev);
+
+ if (!(options & FLASH_LED_PREPARE_OPTIONS_MASK)) {
+ dev_err(&led->pdev->dev, "Invalid options %d\n", options);
+ return -EINVAL;
+ }
+
+ if (options & ENABLE_REGULATOR) {
+ rc = flash_regulator_enable(led, flash_node, true);
+ if (rc < 0) {
+ dev_err(&led->pdev->dev,
+ "enable regulator failed, rc=%d\n", rc);
+ return rc;
+ }
+ }
+
+ if (options & DISABLE_REGULATOR) {
+ rc = flash_regulator_enable(led, flash_node, false);
+ if (rc < 0) {
+ dev_err(&led->pdev->dev,
+ "disable regulator failed, rc=%d\n", rc);
+ return rc;
+ }
+ }
+
+ if (options & QUERY_MAX_CURRENT) {
+ rc = qpnp_flash_led_get_max_avail_current(flash_node, led);
+ if (rc < 0) {
+ dev_err(&led->pdev->dev,
+ "query max current failed, rc=%d\n", rc);
+ return rc;
+ }
+ *max_current = rc;
+ }
+
+ return 0;
+}
+
+static void qpnp_flash_led_work(struct work_struct *work)
+{
+ struct flash_node_data *flash_node = container_of(work,
+ struct flash_node_data, work);
+ struct qpnp_flash_led *led = dev_get_drvdata(&flash_node->pdev->dev);
+ union power_supply_propval psy_prop;
+ int rc, brightness = flash_node->cdev.brightness;
+ int max_curr_avail_ma = 0;
+ int total_curr_ma = 0;
+ int i;
+ u8 val = 0;
+ uint temp;
+
+ mutex_lock(&led->flash_led_lock);
+
+ if (!brightness)
+ goto turn_off;
+
+ if (led->open_fault) {
+ dev_err(&led->pdev->dev, "Open fault detected\n");
+ mutex_unlock(&led->flash_led_lock);
+ return;
+ }
+
+ if (!flash_node->flash_on && flash_node->num_regulators > 0) {
+ rc = flash_regulator_enable(led, flash_node, true);
+ if (rc) {
+ mutex_unlock(&led->flash_led_lock);
+ return;
+ }
+ }
+
+ if (!led->gpio_enabled && led->pinctrl) {
+ rc = pinctrl_select_state(led->pinctrl,
+ led->gpio_state_active);
+ if (rc) {
+ dev_err(&led->pdev->dev, "failed to enable GPIO\n");
+ goto error_enable_gpio;
+ }
+ led->gpio_enabled = true;
+ }
+
+ if (led->dbg_feature_en) {
+ rc = qpnp_led_masked_write(led,
+ INT_SET_TYPE(led->base),
+ FLASH_STATUS_REG_MASK, 0x1F);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "INT_SET_TYPE write failed\n");
+ goto exit_flash_led_work;
+ }
+
+ rc = qpnp_led_masked_write(led,
+ IN_POLARITY_HIGH(led->base),
+ FLASH_STATUS_REG_MASK, 0x1F);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "IN_POLARITY_HIGH write failed\n");
+ goto exit_flash_led_work;
+ }
+
+ rc = qpnp_led_masked_write(led,
+ INT_EN_SET(led->base),
+ FLASH_STATUS_REG_MASK, 0x1F);
+ if (rc) {
+ dev_err(&led->pdev->dev, "INT_EN_SET write failed\n");
+ goto exit_flash_led_work;
+ }
+
+ rc = qpnp_led_masked_write(led,
+ INT_LATCHED_CLR(led->base),
+ FLASH_STATUS_REG_MASK, 0x1F);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "INT_LATCHED_CLR write failed\n");
+ goto exit_flash_led_work;
+ }
+ }
+
+ if (led->flash_node[led->num_leds - 1].id == FLASH_LED_SWITCH &&
+ flash_node->id != FLASH_LED_SWITCH) {
+ led->flash_node[led->num_leds - 1].trigger |=
+ (0x80 >> flash_node->id);
+ if (flash_node->id == FLASH_LED_0)
+ led->flash_node[led->num_leds - 1].prgm_current =
+ flash_node->prgm_current;
+ else if (flash_node->id == FLASH_LED_1)
+ led->flash_node[led->num_leds - 1].prgm_current2 =
+ flash_node->prgm_current;
+ }
+
+ if (flash_node->type == TORCH) {
+ rc = qpnp_led_masked_write(led,
+ FLASH_LED_UNLOCK_SECURE(led->base),
+ FLASH_SECURE_MASK, FLASH_UNLOCK_SECURE);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Secure reg write failed\n");
+ goto exit_flash_led_work;
+ }
+
+ rc = qpnp_led_masked_write(led,
+ FLASH_TORCH(led->base),
+ FLASH_TORCH_MASK, FLASH_LED_TORCH_ENABLE);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Torch reg write failed\n");
+ goto exit_flash_led_work;
+ }
+
+ if (flash_node->id == FLASH_LED_SWITCH) {
+ val = (u8)(flash_node->prgm_current *
+ FLASH_TORCH_MAX_LEVEL
+ / flash_node->max_current);
+ rc = qpnp_led_masked_write(led,
+ led->current_addr,
+ FLASH_CURRENT_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Torch reg write failed\n");
+ goto exit_flash_led_work;
+ }
+
+ val = (u8)(flash_node->prgm_current2 *
+ FLASH_TORCH_MAX_LEVEL
+ / flash_node->max_current);
+ rc = qpnp_led_masked_write(led,
+ led->current2_addr,
+ FLASH_CURRENT_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Torch reg write failed\n");
+ goto exit_flash_led_work;
+ }
+ } else {
+ val = (u8)(flash_node->prgm_current *
+ FLASH_TORCH_MAX_LEVEL /
+ flash_node->max_current);
+ if (flash_node->id == FLASH_LED_0) {
+ rc = qpnp_led_masked_write(led,
+ led->current_addr,
+ FLASH_CURRENT_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "current reg write failed\n");
+ goto exit_flash_led_work;
+ }
+ } else {
+ rc = qpnp_led_masked_write(led,
+ led->current2_addr,
+ FLASH_CURRENT_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "current reg write failed\n");
+ goto exit_flash_led_work;
+ }
+ }
+ }
+
+ rc = qpnp_led_masked_write(led,
+ FLASH_MAX_CURRENT(led->base),
+ FLASH_CURRENT_MASK, FLASH_TORCH_MAX_LEVEL);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Max current reg write failed\n");
+ goto exit_flash_led_work;
+ }
+
+ rc = qpnp_led_masked_write(led,
+ FLASH_MODULE_ENABLE_CTRL(led->base),
+ FLASH_MODULE_ENABLE_MASK, FLASH_MODULE_ENABLE);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Module enable reg write failed\n");
+ goto exit_flash_led_work;
+ }
+
+ if (led->pdata->hdrm_sns_ch0_en ||
+ led->pdata->hdrm_sns_ch1_en) {
+ if (flash_node->id == FLASH_LED_SWITCH) {
+ rc = qpnp_led_masked_write(led,
+ FLASH_HDRM_SNS_ENABLE_CTRL0(led->base),
+ FLASH_LED_HDRM_SNS_ENABLE_MASK,
+ flash_node->trigger &
+ FLASH_LED0_TRIGGER ?
+ FLASH_LED_HDRM_SNS_ENABLE :
+ FLASH_LED_HDRM_SNS_DISABLE);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Headroom sense enable failed\n");
+ goto exit_flash_led_work;
+ }
+
+ rc = qpnp_led_masked_write(led,
+ FLASH_HDRM_SNS_ENABLE_CTRL1(led->base),
+ FLASH_LED_HDRM_SNS_ENABLE_MASK,
+ flash_node->trigger &
+ FLASH_LED1_TRIGGER ?
+ FLASH_LED_HDRM_SNS_ENABLE :
+ FLASH_LED_HDRM_SNS_DISABLE);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Headroom sense enable failed\n");
+ goto exit_flash_led_work;
+ }
+ } else if (flash_node->id == FLASH_LED_0) {
+ rc = qpnp_led_masked_write(led,
+ FLASH_HDRM_SNS_ENABLE_CTRL0(led->base),
+ FLASH_LED_HDRM_SNS_ENABLE_MASK,
+ FLASH_LED_HDRM_SNS_ENABLE);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Headroom sense disable failed\n");
+ goto exit_flash_led_work;
+ }
+ } else if (flash_node->id == FLASH_LED_1) {
+ rc = qpnp_led_masked_write(led,
+ FLASH_HDRM_SNS_ENABLE_CTRL1(led->base),
+ FLASH_LED_HDRM_SNS_ENABLE_MASK,
+ FLASH_LED_HDRM_SNS_ENABLE);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Headroom sense disable failed\n");
+ goto exit_flash_led_work;
+ }
+ }
+ }
+
+ rc = qpnp_led_masked_write(led,
+ FLASH_LED_STROBE_CTRL(led->base),
+ (flash_node->id == FLASH_LED_SWITCH ? FLASH_STROBE_MASK
+ | FLASH_LED_STROBE_TYPE_HW
+ : flash_node->trigger |
+ FLASH_LED_STROBE_TYPE_HW),
+ flash_node->trigger);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Strobe reg write failed\n");
+ goto exit_flash_led_work;
+ }
+ } else if (flash_node->type == FLASH) {
+ if (flash_node->trigger & FLASH_LED0_TRIGGER)
+ max_curr_avail_ma += flash_node->max_current;
+ if (flash_node->trigger & FLASH_LED1_TRIGGER)
+ max_curr_avail_ma += flash_node->max_current;
+
+ psy_prop.intval = true;
+ rc = power_supply_set_property(led->battery_psy,
+ POWER_SUPPLY_PROP_FLASH_ACTIVE,
+ &psy_prop);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Failed to setup OTG pulse skip enable\n");
+ goto exit_flash_led_work;
+ }
+
+ if (led->pdata->power_detect_en ||
+ led->pdata->die_current_derate_en) {
+ if (led->battery_psy) {
+ power_supply_get_property(led->battery_psy,
+ POWER_SUPPLY_PROP_STATUS,
+ &psy_prop);
+ if (psy_prop.intval < 0) {
+ dev_err(&led->pdev->dev,
+ "Invalid battery status\n");
+ goto exit_flash_led_work;
+ }
+
+ if (psy_prop.intval ==
+ POWER_SUPPLY_STATUS_CHARGING)
+ led->charging_enabled = true;
+ else if (psy_prop.intval ==
+ POWER_SUPPLY_STATUS_DISCHARGING
+ || psy_prop.intval ==
+ POWER_SUPPLY_STATUS_NOT_CHARGING)
+ led->charging_enabled = false;
+ }
+ max_curr_avail_ma =
+ qpnp_flash_led_get_max_avail_current
+ (flash_node, led);
+ if (max_curr_avail_ma < 0) {
+ dev_err(&led->pdev->dev,
+ "Failed to get max avail curr\n");
+ goto exit_flash_led_work;
+ }
+ }
+
+ if (flash_node->id == FLASH_LED_SWITCH) {
+ if (flash_node->trigger & FLASH_LED0_TRIGGER)
+ total_curr_ma += flash_node->prgm_current;
+ if (flash_node->trigger & FLASH_LED1_TRIGGER)
+ total_curr_ma += flash_node->prgm_current2;
+
+ if (max_curr_avail_ma < total_curr_ma) {
+ flash_node->prgm_current =
+ (flash_node->prgm_current *
+ max_curr_avail_ma) / total_curr_ma;
+ flash_node->prgm_current2 =
+ (flash_node->prgm_current2 *
+ max_curr_avail_ma) / total_curr_ma;
+ }
+
+ val = (u8)(flash_node->prgm_current *
+ FLASH_MAX_LEVEL / flash_node->max_current);
+ rc = qpnp_led_masked_write(led,
+ led->current_addr, FLASH_CURRENT_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Current register write failed\n");
+ goto exit_flash_led_work;
+ }
+
+ val = (u8)(flash_node->prgm_current2 *
+ FLASH_MAX_LEVEL / flash_node->max_current);
+ rc = qpnp_led_masked_write(led,
+ led->current2_addr, FLASH_CURRENT_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Current register write failed\n");
+ goto exit_flash_led_work;
+ }
+ } else {
+ if (max_curr_avail_ma < flash_node->prgm_current) {
+ dev_err(&led->pdev->dev,
+ "battery only supprots %d mA\n",
+ max_curr_avail_ma);
+ flash_node->prgm_current =
+ (u16)max_curr_avail_ma;
+ }
+
+ val = (u8)(flash_node->prgm_current *
+ FLASH_MAX_LEVEL
+ / flash_node->max_current);
+ if (flash_node->id == FLASH_LED_0) {
+ rc = qpnp_led_masked_write(
+ led,
+ led->current_addr,
+ FLASH_CURRENT_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "current reg write failed\n");
+ goto exit_flash_led_work;
+ }
+ } else if (flash_node->id == FLASH_LED_1) {
+ rc = qpnp_led_masked_write(
+ led,
+ led->current2_addr,
+ FLASH_CURRENT_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "current reg write failed\n");
+ goto exit_flash_led_work;
+ }
+ }
+ }
+
+ val = (u8)((flash_node->duration - FLASH_DURATION_DIVIDER)
+ / FLASH_DURATION_DIVIDER);
+ rc = qpnp_led_masked_write(led,
+ FLASH_SAFETY_TIMER(led->base),
+ FLASH_SAFETY_TIMER_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Safety timer reg write failed\n");
+ goto exit_flash_led_work;
+ }
+
+ rc = qpnp_led_masked_write(led,
+ FLASH_MAX_CURRENT(led->base),
+ FLASH_CURRENT_MASK, FLASH_MAX_LEVEL);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Max current reg write failed\n");
+ goto exit_flash_led_work;
+ }
+
+ if (!led->charging_enabled) {
+ rc = qpnp_led_masked_write(led,
+ FLASH_MODULE_ENABLE_CTRL(led->base),
+ FLASH_MODULE_ENABLE, FLASH_MODULE_ENABLE);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Module enable reg write failed\n");
+ goto exit_flash_led_work;
+ }
+
+ usleep_range(FLASH_RAMP_UP_DELAY_US_MIN,
+ FLASH_RAMP_UP_DELAY_US_MAX);
+ }
+
+ if (led->revid_data->pmic_subtype == PMI8996_SUBTYPE &&
+ !led->revid_data->rev3) {
+ rc = power_supply_set_property(led->battery_psy,
+ POWER_SUPPLY_PROP_FLASH_TRIGGER,
+ &psy_prop);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Failed to disable charger i/p curr limit\n");
+ goto exit_flash_led_work;
+ }
+ }
+
+ if (led->pdata->hdrm_sns_ch0_en ||
+ led->pdata->hdrm_sns_ch1_en) {
+ if (flash_node->id == FLASH_LED_SWITCH) {
+ rc = qpnp_led_masked_write(led,
+ FLASH_HDRM_SNS_ENABLE_CTRL0(led->base),
+ FLASH_LED_HDRM_SNS_ENABLE_MASK,
+ (flash_node->trigger &
+ FLASH_LED0_TRIGGER ?
+ FLASH_LED_HDRM_SNS_ENABLE :
+ FLASH_LED_HDRM_SNS_DISABLE));
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Headroom sense enable failed\n");
+ goto exit_flash_led_work;
+ }
+
+ rc = qpnp_led_masked_write(led,
+ FLASH_HDRM_SNS_ENABLE_CTRL1(led->base),
+ FLASH_LED_HDRM_SNS_ENABLE_MASK,
+ (flash_node->trigger &
+ FLASH_LED1_TRIGGER ?
+ FLASH_LED_HDRM_SNS_ENABLE :
+ FLASH_LED_HDRM_SNS_DISABLE));
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Headroom sense enable failed\n");
+ goto exit_flash_led_work;
+ }
+ } else if (flash_node->id == FLASH_LED_0) {
+ rc = qpnp_led_masked_write(led,
+ FLASH_HDRM_SNS_ENABLE_CTRL0(led->base),
+ FLASH_LED_HDRM_SNS_ENABLE_MASK,
+ FLASH_LED_HDRM_SNS_ENABLE);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Headroom sense disable failed\n");
+ goto exit_flash_led_work;
+ }
+ } else if (flash_node->id == FLASH_LED_1) {
+ rc = qpnp_led_masked_write(led,
+ FLASH_HDRM_SNS_ENABLE_CTRL1(led->base),
+ FLASH_LED_HDRM_SNS_ENABLE_MASK,
+ FLASH_LED_HDRM_SNS_ENABLE);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Headroom sense disable failed\n");
+ goto exit_flash_led_work;
+ }
+ }
+ }
+
+ rc = qpnp_led_masked_write(led,
+ FLASH_LED_STROBE_CTRL(led->base),
+ (flash_node->id == FLASH_LED_SWITCH ? FLASH_STROBE_MASK
+ | FLASH_LED_STROBE_TYPE_HW
+ : flash_node->trigger |
+ FLASH_LED_STROBE_TYPE_HW),
+ flash_node->trigger);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Strobe reg write failed\n");
+ goto exit_flash_led_work;
+ }
+
+ if (led->strobe_debug && led->dbg_feature_en) {
+ udelay(2000);
+ rc = regmap_read(led->regmap,
+ FLASH_LED_FAULT_STATUS(led->base),
+ &temp);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Unable to read from addr= %x, rc(%d)\n",
+ FLASH_LED_FAULT_STATUS(led->base), rc);
+ goto exit_flash_led_work;
+ }
+ led->fault_reg = temp;
+ }
+ } else {
+ pr_err("Both Torch and Flash cannot be select at same time\n");
+ for (i = 0; i < led->num_leds; i++)
+ led->flash_node[i].flash_on = false;
+ goto turn_off;
+ }
+
+ flash_node->flash_on = true;
+ mutex_unlock(&led->flash_led_lock);
+
+ return;
+
+turn_off:
+ if (led->flash_node[led->num_leds - 1].id == FLASH_LED_SWITCH &&
+ flash_node->id != FLASH_LED_SWITCH)
+ led->flash_node[led->num_leds - 1].trigger &=
+ ~(0x80 >> flash_node->id);
+ if (flash_node->type == TORCH) {
+ /*
+ * Checking LED fault status detects hardware open fault.
+ * If fault occurs, all subsequent LED enablement requests
+ * will be rejected to protect hardware.
+ */
+ rc = regmap_read(led->regmap,
+ FLASH_LED_FAULT_STATUS(led->base), &temp);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Failed to read out fault status register\n");
+ goto exit_flash_led_work;
+ }
+
+ led->open_fault |= (val & FLASH_LED_OPEN_FAULT_DETECTED);
+ }
+
+ rc = qpnp_led_masked_write(led,
+ FLASH_LED_STROBE_CTRL(led->base),
+ (flash_node->id == FLASH_LED_SWITCH ? FLASH_STROBE_MASK
+ | FLASH_LED_STROBE_TYPE_HW
+ : flash_node->trigger
+ | FLASH_LED_STROBE_TYPE_HW),
+ FLASH_LED_DISABLE);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Strobe disable failed\n");
+ goto exit_flash_led_work;
+ }
+
+ usleep_range(FLASH_RAMP_DN_DELAY_US_MIN, FLASH_RAMP_DN_DELAY_US_MAX);
+exit_flash_hdrm_sns:
+ if (led->pdata->hdrm_sns_ch0_en) {
+ if (flash_node->id == FLASH_LED_0 ||
+ flash_node->id == FLASH_LED_SWITCH) {
+ rc = qpnp_led_masked_write(led,
+ FLASH_HDRM_SNS_ENABLE_CTRL0(led->base),
+ FLASH_LED_HDRM_SNS_ENABLE_MASK,
+ FLASH_LED_HDRM_SNS_DISABLE);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Headroom sense disable failed\n");
+ goto exit_flash_hdrm_sns;
+ }
+ }
+ }
+
+ if (led->pdata->hdrm_sns_ch1_en) {
+ if (flash_node->id == FLASH_LED_1 ||
+ flash_node->id == FLASH_LED_SWITCH) {
+ rc = qpnp_led_masked_write(led,
+ FLASH_HDRM_SNS_ENABLE_CTRL1(led->base),
+ FLASH_LED_HDRM_SNS_ENABLE_MASK,
+ FLASH_LED_HDRM_SNS_DISABLE);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Headroom sense disable failed\n");
+ goto exit_flash_hdrm_sns;
+ }
+ }
+ }
+exit_flash_led_work:
+ rc = qpnp_flash_led_module_disable(led, flash_node);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Module disable failed\n");
+ goto exit_flash_led_work;
+ }
+error_enable_gpio:
+ if (flash_node->flash_on && flash_node->num_regulators > 0)
+ flash_regulator_enable(led, flash_node, false);
+
+ flash_node->flash_on = false;
+ mutex_unlock(&led->flash_led_lock);
+}
+
+static void qpnp_flash_led_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct flash_node_data *flash_node;
+ struct qpnp_flash_led *led;
+
+ flash_node = container_of(led_cdev, struct flash_node_data, cdev);
+ led = dev_get_drvdata(&flash_node->pdev->dev);
+
+ if (value < LED_OFF) {
+ pr_err("Invalid brightness value\n");
+ return;
+ }
+
+ if (value > flash_node->cdev.max_brightness)
+ value = flash_node->cdev.max_brightness;
+
+ flash_node->cdev.brightness = value;
+ if (led->flash_node[led->num_leds - 1].id ==
+ FLASH_LED_SWITCH) {
+ if (flash_node->type == TORCH)
+ led->flash_node[led->num_leds - 1].type = TORCH;
+ else if (flash_node->type == FLASH)
+ led->flash_node[led->num_leds - 1].type = FLASH;
+
+ led->flash_node[led->num_leds - 1].max_current
+ = flash_node->max_current;
+
+ if (flash_node->id == FLASH_LED_0 ||
+ flash_node->id == FLASH_LED_1) {
+ if (value < FLASH_LED_MIN_CURRENT_MA && value != 0)
+ value = FLASH_LED_MIN_CURRENT_MA;
+
+ flash_node->prgm_current = value;
+ flash_node->flash_on = value ? true : false;
+ } else if (flash_node->id == FLASH_LED_SWITCH) {
+ if (!value) {
+ flash_node->prgm_current = 0;
+ flash_node->prgm_current2 = 0;
+ }
+ }
+ } else {
+ if (value < FLASH_LED_MIN_CURRENT_MA && value != 0)
+ value = FLASH_LED_MIN_CURRENT_MA;
+ flash_node->prgm_current = value;
+ }
+
+ queue_work(led->ordered_workq, &flash_node->work);
+}
+
+static int qpnp_flash_led_init_settings(struct qpnp_flash_led *led)
+{
+ int rc;
+ u8 val, temp_val;
+ uint val_int;
+
+ rc = qpnp_led_masked_write(led,
+ FLASH_MODULE_ENABLE_CTRL(led->base),
+ FLASH_MODULE_ENABLE_MASK,
+ FLASH_LED_MODULE_CTRL_DEFAULT);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Module disable failed\n");
+ return rc;
+ }
+
+ rc = qpnp_led_masked_write(led,
+ FLASH_LED_STROBE_CTRL(led->base),
+ FLASH_STROBE_MASK, FLASH_LED_DISABLE);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Strobe disable failed\n");
+ return rc;
+ }
+
+ rc = qpnp_led_masked_write(led,
+ FLASH_LED_TMR_CTRL(led->base),
+ FLASH_TMR_MASK, FLASH_TMR_SAFETY);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "LED timer ctrl reg write failed(%d)\n", rc);
+ return rc;
+ }
+
+ val = (u8)(led->pdata->headroom / FLASH_LED_HEADROOM_DIVIDER -
+ FLASH_LED_HEADROOM_OFFSET);
+ rc = qpnp_led_masked_write(led,
+ FLASH_HEADROOM(led->base),
+ FLASH_HEADROOM_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Headroom reg write failed\n");
+ return rc;
+ }
+
+ val = qpnp_flash_led_get_startup_dly(led->pdata->startup_dly);
+
+ rc = qpnp_led_masked_write(led,
+ FLASH_STARTUP_DELAY(led->base),
+ FLASH_STARTUP_DLY_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Startup delay reg write failed\n");
+ return rc;
+ }
+
+ val = (u8)(led->pdata->clamp_current * FLASH_MAX_LEVEL /
+ FLASH_LED_MAX_CURRENT_MA);
+ rc = qpnp_led_masked_write(led,
+ FLASH_CLAMP_CURRENT(led->base),
+ FLASH_CURRENT_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Clamp current reg write failed\n");
+ return rc;
+ }
+
+ if (led->pdata->pmic_charger_support)
+ val = FLASH_LED_FLASH_HW_VREG_OK;
+ else
+ val = FLASH_LED_FLASH_SW_VREG_OK;
+ rc = qpnp_led_masked_write(led,
+ FLASH_VREG_OK_FORCE(led->base),
+ FLASH_VREG_OK_FORCE_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev, "VREG OK force reg write failed\n");
+ return rc;
+ }
+
+ if (led->pdata->self_check_en)
+ val = FLASH_MODULE_ENABLE;
+ else
+ val = FLASH_LED_DISABLE;
+ rc = qpnp_led_masked_write(led,
+ FLASH_FAULT_DETECT(led->base),
+ FLASH_FAULT_DETECT_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Fault detect reg write failed\n");
+ return rc;
+ }
+
+ val = 0x0;
+ val |= led->pdata->mask3_en << FLASH_LED_MASK3_ENABLE_SHIFT;
+ val |= FLASH_LED_MASK_MODULE_MASK2_ENABLE;
+ rc = qpnp_led_masked_write(led, FLASH_MASK_ENABLE(led->base),
+ FLASH_MASK_MODULE_CONTRL_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Mask module enable failed\n");
+ return rc;
+ }
+
+ rc = regmap_read(led->regmap, FLASH_PERPH_RESET_CTRL(led->base),
+ &val_int);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Unable to read from address %x, rc(%d)\n",
+ FLASH_PERPH_RESET_CTRL(led->base), rc);
+ return -EINVAL;
+ }
+ val = (u8)val_int;
+
+ if (led->pdata->follow_rb_disable) {
+ rc = qpnp_led_masked_write(led,
+ FLASH_LED_UNLOCK_SECURE(led->base),
+ FLASH_SECURE_MASK, FLASH_UNLOCK_SECURE);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Secure reg write failed\n");
+ return -EINVAL;
+ }
+
+ val |= FLASH_FOLLOW_OTST2_RB_MASK;
+ rc = qpnp_led_masked_write(led,
+ FLASH_PERPH_RESET_CTRL(led->base),
+ FLASH_FOLLOW_OTST2_RB_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "failed to reset OTST2_RB bit\n");
+ return rc;
+ }
+ } else {
+ rc = qpnp_led_masked_write(led,
+ FLASH_LED_UNLOCK_SECURE(led->base),
+ FLASH_SECURE_MASK, FLASH_UNLOCK_SECURE);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Secure reg write failed\n");
+ return -EINVAL;
+ }
+
+ val &= ~FLASH_FOLLOW_OTST2_RB_MASK;
+ rc = qpnp_led_masked_write(led,
+ FLASH_PERPH_RESET_CTRL(led->base),
+ FLASH_FOLLOW_OTST2_RB_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "failed to reset OTST2_RB bit\n");
+ return rc;
+ }
+ }
+
+ if (!led->pdata->thermal_derate_en)
+ val = 0x0;
+ else {
+ val = led->pdata->thermal_derate_en << 7;
+ val |= led->pdata->thermal_derate_rate << 3;
+ val |= (led->pdata->thermal_derate_threshold -
+ FLASH_LED_THERMAL_THRESHOLD_MIN) /
+ FLASH_LED_THERMAL_DEVIDER;
+ }
+ rc = qpnp_led_masked_write(led,
+ FLASH_THERMAL_DRATE(led->base),
+ FLASH_THERMAL_DERATE_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Thermal derate reg write failed\n");
+ return rc;
+ }
+
+ if (!led->pdata->current_ramp_en)
+ val = 0x0;
+ else {
+ val = led->pdata->current_ramp_en << 7;
+ val |= led->pdata->ramp_up_step << 3;
+ val |= led->pdata->ramp_dn_step;
+ }
+ rc = qpnp_led_masked_write(led,
+ FLASH_CURRENT_RAMP(led->base),
+ FLASH_CURRENT_RAMP_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Current ramp reg write failed\n");
+ return rc;
+ }
+
+ if (!led->pdata->vph_pwr_droop_en)
+ val = 0x0;
+ else {
+ val = led->pdata->vph_pwr_droop_en << 7;
+ val |= ((led->pdata->vph_pwr_droop_threshold -
+ FLASH_LED_VPH_DROOP_THRESHOLD_MIN_MV) /
+ FLASH_LED_VPH_DROOP_THRESHOLD_DIVIDER) << 4;
+ temp_val =
+ qpnp_flash_led_get_droop_debounce_time(
+ led->pdata->vph_pwr_droop_debounce_time);
+ if (temp_val == 0xFF) {
+ dev_err(&led->pdev->dev, "Invalid debounce time\n");
+ return temp_val;
+ }
+
+ val |= temp_val;
+ }
+ rc = qpnp_led_masked_write(led,
+ FLASH_VPH_PWR_DROOP(led->base),
+ FLASH_VPH_PWR_DROOP_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev, "VPH PWR droop reg write failed\n");
+ return rc;
+ }
+
+ led->battery_psy = power_supply_get_by_name("battery");
+ if (!led->battery_psy) {
+ dev_err(&led->pdev->dev,
+ "Failed to get battery power supply\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int qpnp_flash_led_parse_each_led_dt(struct qpnp_flash_led *led,
+ struct flash_node_data *flash_node)
+{
+ const char *temp_string;
+ struct device_node *node = flash_node->cdev.dev->of_node;
+ struct device_node *temp = NULL;
+ int rc = 0, num_regs = 0;
+ u32 val;
+
+ rc = of_property_read_string(node, "label", &temp_string);
+ if (!rc) {
+ if (strcmp(temp_string, "flash") == 0)
+ flash_node->type = FLASH;
+ else if (strcmp(temp_string, "torch") == 0)
+ flash_node->type = TORCH;
+ else if (strcmp(temp_string, "switch") == 0)
+ flash_node->type = SWITCH;
+ else {
+ dev_err(&led->pdev->dev, "Wrong flash LED type\n");
+ return -EINVAL;
+ }
+ } else if (rc < 0) {
+ dev_err(&led->pdev->dev, "Unable to read flash type\n");
+ return rc;
+ }
+
+ rc = of_property_read_u32(node, "qcom,current", &val);
+ if (!rc) {
+ if (val < FLASH_LED_MIN_CURRENT_MA)
+ val = FLASH_LED_MIN_CURRENT_MA;
+ flash_node->prgm_current = val;
+ } else if (rc != -EINVAL) {
+ dev_err(&led->pdev->dev, "Unable to read current\n");
+ return rc;
+ }
+
+ rc = of_property_read_u32(node, "qcom,id", &val);
+ if (!rc)
+ flash_node->id = (u8)val;
+ else if (rc != -EINVAL) {
+ dev_err(&led->pdev->dev, "Unable to read led ID\n");
+ return rc;
+ }
+
+ if (flash_node->type == SWITCH || flash_node->type == FLASH) {
+ rc = of_property_read_u32(node, "qcom,duration", &val);
+ if (!rc)
+ flash_node->duration = (u16)val;
+ else if (rc != -EINVAL) {
+ dev_err(&led->pdev->dev, "Unable to read duration\n");
+ return rc;
+ }
+ }
+
+ switch (led->peripheral_type) {
+ case FLASH_SUBTYPE_SINGLE:
+ flash_node->trigger = FLASH_LED0_TRIGGER;
+ break;
+ case FLASH_SUBTYPE_DUAL:
+ if (flash_node->id == FLASH_LED_0)
+ flash_node->trigger = FLASH_LED0_TRIGGER;
+ else if (flash_node->id == FLASH_LED_1)
+ flash_node->trigger = FLASH_LED1_TRIGGER;
+ break;
+ default:
+ dev_err(&led->pdev->dev, "Invalid peripheral type\n");
+ }
+
+ while ((temp = of_get_next_child(node, temp))) {
+ if (of_find_property(temp, "regulator-name", NULL))
+ num_regs++;
+ }
+
+ if (num_regs)
+ flash_node->num_regulators = num_regs;
+
+ return rc;
+}
+
+static int qpnp_flash_led_parse_common_dt(
+ struct qpnp_flash_led *led,
+ struct device_node *node)
+{
+ int rc;
+ u32 val, temp_val;
+ const char *temp;
+
+ led->pdata->headroom = FLASH_LED_HEADROOM_DEFAULT_MV;
+ rc = of_property_read_u32(node, "qcom,headroom", &val);
+ if (!rc)
+ led->pdata->headroom = (u16)val;
+ else if (rc != -EINVAL) {
+ dev_err(&led->pdev->dev, "Unable to read headroom\n");
+ return rc;
+ }
+
+ led->pdata->startup_dly = FLASH_LED_STARTUP_DELAY_DEFAULT_US;
+ rc = of_property_read_u32(node, "qcom,startup-dly", &val);
+ if (!rc)
+ led->pdata->startup_dly = (u8)val;
+ else if (rc != -EINVAL) {
+ dev_err(&led->pdev->dev, "Unable to read startup delay\n");
+ return rc;
+ }
+
+ led->pdata->clamp_current = FLASH_LED_CLAMP_CURRENT_DEFAULT_MA;
+ rc = of_property_read_u32(node, "qcom,clamp-current", &val);
+ if (!rc) {
+ if (val < FLASH_LED_MIN_CURRENT_MA)
+ val = FLASH_LED_MIN_CURRENT_MA;
+ led->pdata->clamp_current = (u16)val;
+ } else if (rc != -EINVAL) {
+ dev_err(&led->pdev->dev, "Unable to read clamp current\n");
+ return rc;
+ }
+
+ led->pdata->pmic_charger_support =
+ of_property_read_bool(node,
+ "qcom,pmic-charger-support");
+
+ led->pdata->self_check_en =
+ of_property_read_bool(node, "qcom,self-check-enabled");
+
+ led->pdata->thermal_derate_en =
+ of_property_read_bool(node,
+ "qcom,thermal-derate-enabled");
+
+ if (led->pdata->thermal_derate_en) {
+ led->pdata->thermal_derate_rate =
+ FLASH_LED_THERMAL_DERATE_RATE_DEFAULT_PERCENT;
+ rc = of_property_read_string(node, "qcom,thermal-derate-rate",
+ &temp);
+ if (!rc) {
+ temp_val =
+ qpnp_flash_led_get_thermal_derate_rate(temp);
+ if (temp_val < 0) {
+ dev_err(&led->pdev->dev,
+ "Invalid thermal derate rate\n");
+ return -EINVAL;
+ }
+
+ led->pdata->thermal_derate_rate = (u8)temp_val;
+ } else {
+ dev_err(&led->pdev->dev,
+ "Unable to read thermal derate rate\n");
+ return -EINVAL;
+ }
+
+ led->pdata->thermal_derate_threshold =
+ FLASH_LED_THERMAL_DERATE_THRESHOLD_DEFAULT_C;
+ rc = of_property_read_u32(node, "qcom,thermal-derate-threshold",
+ &val);
+ if (!rc)
+ led->pdata->thermal_derate_threshold = (u8)val;
+ else if (rc != -EINVAL) {
+ dev_err(&led->pdev->dev,
+ "Unable to read thermal derate threshold\n");
+ return rc;
+ }
+ }
+
+ led->pdata->current_ramp_en =
+ of_property_read_bool(node,
+ "qcom,current-ramp-enabled");
+ if (led->pdata->current_ramp_en) {
+ led->pdata->ramp_up_step = FLASH_LED_RAMP_UP_STEP_DEFAULT_US;
+ rc = of_property_read_string(node, "qcom,ramp_up_step", &temp);
+ if (!rc) {
+ temp_val = qpnp_flash_led_get_ramp_step(temp);
+ if (temp_val < 0) {
+ dev_err(&led->pdev->dev,
+ "Invalid ramp up step values\n");
+ return -EINVAL;
+ }
+ led->pdata->ramp_up_step = (u8)temp_val;
+ } else if (rc != -EINVAL) {
+ dev_err(&led->pdev->dev,
+ "Unable to read ramp up steps\n");
+ return rc;
+ }
+
+ led->pdata->ramp_dn_step = FLASH_LED_RAMP_DN_STEP_DEFAULT_US;
+ rc = of_property_read_string(node, "qcom,ramp_dn_step", &temp);
+ if (!rc) {
+ temp_val = qpnp_flash_led_get_ramp_step(temp);
+ if (temp_val < 0) {
+ dev_err(&led->pdev->dev,
+ "Invalid ramp down step values\n");
+ return rc;
+ }
+ led->pdata->ramp_dn_step = (u8)temp_val;
+ } else if (rc != -EINVAL) {
+ dev_err(&led->pdev->dev,
+ "Unable to read ramp down steps\n");
+ return rc;
+ }
+ }
+
+ led->pdata->vph_pwr_droop_en = of_property_read_bool(node,
+ "qcom,vph-pwr-droop-enabled");
+ if (led->pdata->vph_pwr_droop_en) {
+ led->pdata->vph_pwr_droop_threshold =
+ FLASH_LED_VPH_PWR_DROOP_THRESHOLD_DEFAULT_MV;
+ rc = of_property_read_u32(node,
+ "qcom,vph-pwr-droop-threshold", &val);
+ if (!rc) {
+ led->pdata->vph_pwr_droop_threshold = (u16)val;
+ } else if (rc != -EINVAL) {
+ dev_err(&led->pdev->dev,
+ "Unable to read VPH PWR droop threshold\n");
+ return rc;
+ }
+
+ led->pdata->vph_pwr_droop_debounce_time =
+ FLASH_LED_VPH_PWR_DROOP_DEBOUNCE_TIME_DEFAULT_US;
+ rc = of_property_read_u32(node,
+ "qcom,vph-pwr-droop-debounce-time", &val);
+ if (!rc)
+ led->pdata->vph_pwr_droop_debounce_time = (u8)val;
+ else if (rc != -EINVAL) {
+ dev_err(&led->pdev->dev,
+ "Unable to read VPH PWR droop debounce time\n");
+ return rc;
+ }
+ }
+
+ led->pdata->hdrm_sns_ch0_en = of_property_read_bool(node,
+ "qcom,headroom-sense-ch0-enabled");
+
+ led->pdata->hdrm_sns_ch1_en = of_property_read_bool(node,
+ "qcom,headroom-sense-ch1-enabled");
+
+ led->pdata->power_detect_en = of_property_read_bool(node,
+ "qcom,power-detect-enabled");
+
+ led->pdata->mask3_en = of_property_read_bool(node,
+ "qcom,otst2-module-enabled");
+
+ led->pdata->follow_rb_disable = of_property_read_bool(node,
+ "qcom,follow-otst2-rb-disabled");
+
+ led->pdata->die_current_derate_en = of_property_read_bool(node,
+ "qcom,die-current-derate-enabled");
+
+ if (led->pdata->die_current_derate_en) {
+ led->vadc_dev = qpnp_get_vadc(&led->pdev->dev, "die-temp");
+ if (IS_ERR(led->vadc_dev)) {
+ pr_err("VADC channel property Missing\n");
+ return -EINVAL;
+ }
+
+ if (of_find_property(node, "qcom,die-temp-threshold",
+ &led->pdata->temp_threshold_num)) {
+ if (led->pdata->temp_threshold_num > 0) {
+ led->pdata->die_temp_threshold_degc =
+ devm_kzalloc(&led->pdev->dev,
+ led->pdata->temp_threshold_num,
+ GFP_KERNEL);
+
+ if (led->pdata->die_temp_threshold_degc
+ == NULL) {
+ dev_err(&led->pdev->dev,
+ "failed to allocate die temp array\n");
+ return -ENOMEM;
+ }
+ led->pdata->temp_threshold_num /=
+ sizeof(unsigned int);
+
+ rc = of_property_read_u32_array(node,
+ "qcom,die-temp-threshold",
+ led->pdata->die_temp_threshold_degc,
+ led->pdata->temp_threshold_num);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "couldn't read temp threshold rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+ }
+
+ if (of_find_property(node, "qcom,die-temp-derate-current",
+ &led->pdata->temp_derate_curr_num)) {
+ if (led->pdata->temp_derate_curr_num > 0) {
+ led->pdata->die_temp_derate_curr_ma =
+ devm_kzalloc(&led->pdev->dev,
+ led->pdata->temp_derate_curr_num,
+ GFP_KERNEL);
+ if (led->pdata->die_temp_derate_curr_ma
+ == NULL) {
+ dev_err(&led->pdev->dev,
+ "failed to allocate die derate current array\n");
+ return -ENOMEM;
+ }
+ led->pdata->temp_derate_curr_num /=
+ sizeof(unsigned int);
+
+ rc = of_property_read_u32_array(node,
+ "qcom,die-temp-derate-current",
+ led->pdata->die_temp_derate_curr_ma,
+ led->pdata->temp_derate_curr_num);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "couldn't read temp limits rc =%d\n",
+ rc);
+ return rc;
+ }
+ }
+ }
+ if (led->pdata->temp_threshold_num !=
+ led->pdata->temp_derate_curr_num) {
+ pr_err("Both array size are not same\n");
+ return -EINVAL;
+ }
+ }
+
+ led->pinctrl = devm_pinctrl_get(&led->pdev->dev);
+ if (IS_ERR_OR_NULL(led->pinctrl)) {
+ dev_err(&led->pdev->dev, "Unable to acquire pinctrl\n");
+ led->pinctrl = NULL;
+ return 0;
+ }
+
+ led->gpio_state_active = pinctrl_lookup_state(led->pinctrl,
+ "flash_led_enable");
+ if (IS_ERR_OR_NULL(led->gpio_state_active)) {
+ dev_err(&led->pdev->dev, "Cannot lookup LED active state\n");
+ devm_pinctrl_put(led->pinctrl);
+ led->pinctrl = NULL;
+ return PTR_ERR(led->gpio_state_active);
+ }
+
+ led->gpio_state_suspend = pinctrl_lookup_state(led->pinctrl,
+ "flash_led_disable");
+ if (IS_ERR_OR_NULL(led->gpio_state_suspend)) {
+ dev_err(&led->pdev->dev, "Cannot lookup LED disable state\n");
+ devm_pinctrl_put(led->pinctrl);
+ led->pinctrl = NULL;
+ return PTR_ERR(led->gpio_state_suspend);
+ }
+
+ return 0;
+}
+
+static int qpnp_flash_led_probe(struct platform_device *pdev)
+{
+ struct qpnp_flash_led *led;
+ unsigned int base;
+ struct device_node *node, *temp;
+ struct dentry *root, *file;
+ int rc, i = 0, j, num_leds = 0;
+ u32 val;
+
+ root = NULL;
+ node = pdev->dev.of_node;
+ if (node == NULL) {
+ dev_info(&pdev->dev, "No flash device defined\n");
+ return -ENODEV;
+ }
+
+ rc = of_property_read_u32(pdev->dev.of_node, "reg", &base);
+ if (rc < 0) {
+ dev_err(&pdev->dev,
+ "Couldn't find reg in node = %s rc = %d\n",
+ pdev->dev.of_node->full_name, rc);
+ return rc;
+ }
+
+ led = devm_kzalloc(&pdev->dev, sizeof(*led), GFP_KERNEL);
+ if (!led)
+ return -ENOMEM;
+
+ led->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!led->regmap) {
+ dev_err(&pdev->dev, "Couldn't get parent's regmap\n");
+ return -EINVAL;
+ }
+
+ led->base = base;
+ led->pdev = pdev;
+ led->current_addr = FLASH_LED0_CURRENT(led->base);
+ led->current2_addr = FLASH_LED1_CURRENT(led->base);
+
+ led->pdata = devm_kzalloc(&pdev->dev, sizeof(*led->pdata), GFP_KERNEL);
+ if (!led->pdata)
+ return -ENOMEM;
+
+ led->peripheral_type = (u8)qpnp_flash_led_get_peripheral_type(led);
+ if (led->peripheral_type < 0) {
+ dev_err(&pdev->dev, "Failed to get peripheral type\n");
+ return rc;
+ }
+
+ rc = qpnp_flash_led_parse_common_dt(led, node);
+ if (rc) {
+ dev_err(&pdev->dev,
+ "Failed to get common config for flash LEDs\n");
+ return rc;
+ }
+
+ rc = qpnp_flash_led_init_settings(led);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed to initialize flash LED\n");
+ return rc;
+ }
+
+ rc = qpnp_get_pmic_revid(led);
+ if (rc)
+ return rc;
+
+ temp = NULL;
+ while ((temp = of_get_next_child(node, temp)))
+ num_leds++;
+
+ if (!num_leds)
+ return -ECHILD;
+
+ led->flash_node = devm_kzalloc(&pdev->dev,
+ (sizeof(struct flash_node_data) * num_leds),
+ GFP_KERNEL);
+ if (!led->flash_node) {
+ dev_err(&pdev->dev, "Unable to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ mutex_init(&led->flash_led_lock);
+
+ led->ordered_workq = alloc_ordered_workqueue("flash_led_workqueue", 0);
+ if (!led->ordered_workq) {
+ dev_err(&pdev->dev, "Failed to allocate ordered workqueue\n");
+ return -ENOMEM;
+ }
+
+ for_each_child_of_node(node, temp) {
+ led->flash_node[i].cdev.brightness_set =
+ qpnp_flash_led_brightness_set;
+ led->flash_node[i].cdev.brightness_get =
+ qpnp_flash_led_brightness_get;
+ led->flash_node[i].pdev = pdev;
+
+ INIT_WORK(&led->flash_node[i].work, qpnp_flash_led_work);
+ rc = of_property_read_string(temp, "qcom,led-name",
+ &led->flash_node[i].cdev.name);
+ if (rc < 0) {
+ dev_err(&led->pdev->dev,
+ "Unable to read flash name\n");
+ return rc;
+ }
+
+ rc = of_property_read_string(temp, "qcom,default-led-trigger",
+ &led->flash_node[i].cdev.default_trigger);
+ if (rc < 0) {
+ dev_err(&led->pdev->dev,
+ "Unable to read trigger name\n");
+ return rc;
+ }
+
+ rc = of_property_read_u32(temp, "qcom,max-current", &val);
+ if (!rc) {
+ if (val < FLASH_LED_MIN_CURRENT_MA)
+ val = FLASH_LED_MIN_CURRENT_MA;
+ led->flash_node[i].max_current = (u16)val;
+ led->flash_node[i].cdev.max_brightness = val;
+ } else {
+ dev_err(&led->pdev->dev,
+ "Unable to read max current\n");
+ return rc;
+ }
+ rc = led_classdev_register(&pdev->dev,
+ &led->flash_node[i].cdev);
+ if (rc) {
+ dev_err(&pdev->dev, "Unable to register led\n");
+ goto error_led_register;
+ }
+
+ led->flash_node[i].cdev.dev->of_node = temp;
+
+ rc = qpnp_flash_led_parse_each_led_dt(led, &led->flash_node[i]);
+ if (rc) {
+ dev_err(&pdev->dev,
+ "Failed to parse config for each LED\n");
+ goto error_led_register;
+ }
+
+ if (led->flash_node[i].num_regulators) {
+ rc = flash_regulator_parse_dt(led, &led->flash_node[i]);
+ if (rc) {
+ dev_err(&pdev->dev,
+ "Unable to parse regulator data\n");
+ goto error_led_register;
+ }
+
+ rc = flash_regulator_setup(led, &led->flash_node[i],
+ true);
+ if (rc) {
+ dev_err(&pdev->dev,
+ "Unable to set up regulator\n");
+ goto error_led_register;
+ }
+ }
+
+ for (j = 0; j < ARRAY_SIZE(qpnp_flash_led_attrs); j++) {
+ rc =
+ sysfs_create_file(&led->flash_node[i].cdev.dev->kobj,
+ &qpnp_flash_led_attrs[j].attr);
+ if (rc)
+ goto error_led_register;
+ }
+
+ i++;
+ }
+
+ led->num_leds = i;
+
+ root = debugfs_create_dir("flashLED", NULL);
+ if (IS_ERR_OR_NULL(root)) {
+ pr_err("Error creating top level directory err%ld",
+ (long)root);
+ if (PTR_ERR(root) == -ENODEV)
+ pr_err("debugfs is not enabled in kernel");
+ goto error_led_debugfs;
+ }
+
+ led->dbgfs_root = root;
+ file = debugfs_create_file("enable_debug", 0600, root, led,
+ &flash_led_dfs_dbg_feature_fops);
+ if (!file) {
+ pr_err("error creating 'enable_debug' entry\n");
+ goto error_led_debugfs;
+ }
+
+ file = debugfs_create_file("latched", 0600, root, led,
+ &flash_led_dfs_latched_reg_fops);
+ if (!file) {
+ pr_err("error creating 'latched' entry\n");
+ goto error_led_debugfs;
+ }
+
+ file = debugfs_create_file("strobe", 0600, root, led,
+ &flash_led_dfs_strobe_reg_fops);
+ if (!file) {
+ pr_err("error creating 'strobe' entry\n");
+ goto error_led_debugfs;
+ }
+
+ dev_set_drvdata(&pdev->dev, led);
+
+ return 0;
+
+error_led_debugfs:
+ i = led->num_leds - 1;
+ j = ARRAY_SIZE(qpnp_flash_led_attrs) - 1;
+error_led_register:
+ for (; i >= 0; i--) {
+ for (; j >= 0; j--)
+ sysfs_remove_file(&led->flash_node[i].cdev.dev->kobj,
+ &qpnp_flash_led_attrs[j].attr);
+ j = ARRAY_SIZE(qpnp_flash_led_attrs) - 1;
+ led_classdev_unregister(&led->flash_node[i].cdev);
+ }
+ debugfs_remove_recursive(root);
+ mutex_destroy(&led->flash_led_lock);
+ destroy_workqueue(led->ordered_workq);
+
+ return rc;
+}
+
+static int qpnp_flash_led_remove(struct platform_device *pdev)
+{
+ struct qpnp_flash_led *led = dev_get_drvdata(&pdev->dev);
+ int i, j;
+
+ for (i = led->num_leds - 1; i >= 0; i--) {
+ if (led->flash_node[i].reg_data) {
+ if (led->flash_node[i].flash_on)
+ flash_regulator_enable(led,
+ &led->flash_node[i], false);
+ flash_regulator_setup(led, &led->flash_node[i],
+ false);
+ }
+ for (j = 0; j < ARRAY_SIZE(qpnp_flash_led_attrs); j++)
+ sysfs_remove_file(&led->flash_node[i].cdev.dev->kobj,
+ &qpnp_flash_led_attrs[j].attr);
+ led_classdev_unregister(&led->flash_node[i].cdev);
+ }
+ debugfs_remove_recursive(led->dbgfs_root);
+ mutex_destroy(&led->flash_led_lock);
+ destroy_workqueue(led->ordered_workq);
+
+ return 0;
+}
+
+static const struct of_device_id spmi_match_table[] = {
+ { .compatible = "qcom,qpnp-flash-led",},
+ { },
+};
+
+static struct platform_driver qpnp_flash_led_driver = {
+ .driver = {
+ .name = "qcom,qpnp-flash-led",
+ .of_match_table = spmi_match_table,
+ },
+ .probe = qpnp_flash_led_probe,
+ .remove = qpnp_flash_led_remove,
+};
+
+static int __init qpnp_flash_led_init(void)
+{
+ return platform_driver_register(&qpnp_flash_led_driver);
+}
+late_initcall(qpnp_flash_led_init);
+
+static void __exit qpnp_flash_led_exit(void)
+{
+ platform_driver_unregister(&qpnp_flash_led_driver);
+}
+module_exit(qpnp_flash_led_exit);
+
+MODULE_DESCRIPTION("QPNP Flash LED driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("leds:leds-qpnp-flash");
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index 4003831..7b1935a 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -3118,6 +3118,9 @@
state->pdata.blank_data = 1;
state->pdata.op_format_mode_sel = ADV7604_OP_FORMAT_MODE0;
state->pdata.bus_order = ADV7604_BUS_ORDER_RGB;
+ state->pdata.dr_str_data = ADV76XX_DR_STR_MEDIUM_HIGH;
+ state->pdata.dr_str_clk = ADV76XX_DR_STR_MEDIUM_HIGH;
+ state->pdata.dr_str_sync = ADV76XX_DR_STR_MEDIUM_HIGH;
return 0;
}
diff --git a/drivers/media/pci/bt8xx/dvb-bt8xx.c b/drivers/media/pci/bt8xx/dvb-bt8xx.c
index e69d338..ae550a1 100644
--- a/drivers/media/pci/bt8xx/dvb-bt8xx.c
+++ b/drivers/media/pci/bt8xx/dvb-bt8xx.c
@@ -680,6 +680,7 @@
/* DST is not a frontend, attaching the ASIC */
if (dvb_attach(dst_attach, state, &card->dvb_adapter) == NULL) {
pr_err("%s: Could not find a Twinhan DST\n", __func__);
+ kfree(state);
break;
}
/* Attach other DST peripherals if any */
diff --git a/drivers/media/platform/exynos4-is/fimc-is.c b/drivers/media/platform/exynos4-is/fimc-is.c
index 518ad34..7f92144 100644
--- a/drivers/media/platform/exynos4-is/fimc-is.c
+++ b/drivers/media/platform/exynos4-is/fimc-is.c
@@ -825,12 +825,13 @@
is->irq = irq_of_parse_and_map(dev->of_node, 0);
if (!is->irq) {
dev_err(dev, "no irq found\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_iounmap;
}
ret = fimc_is_get_clocks(is);
if (ret < 0)
- return ret;
+ goto err_iounmap;
platform_set_drvdata(pdev, is);
@@ -891,6 +892,8 @@
free_irq(is->irq, is);
err_clk:
fimc_is_put_clocks(is);
+err_iounmap:
+ iounmap(is->pmu_regs);
return ret;
}
@@ -947,6 +950,7 @@
fimc_is_unregister_subdevs(is);
vb2_dma_contig_clear_max_seg_size(dev);
fimc_is_put_clocks(is);
+ iounmap(is->pmu_regs);
fimc_is_debugfs_remove(is);
release_firmware(is->fw.f_w);
fimc_is_free_cpu_memory(is);
diff --git a/drivers/media/platform/msm/camera/Makefile b/drivers/media/platform/msm/camera/Makefile
index 48fa1c0..9e0aee9 100644
--- a/drivers/media/platform/msm/camera/Makefile
+++ b/drivers/media/platform/msm/camera/Makefile
@@ -10,3 +10,4 @@
obj-$(CONFIG_SPECTRA_CAMERA) += cam_icp/
obj-$(CONFIG_SPECTRA_CAMERA) += cam_jpeg/
obj-$(CONFIG_SPECTRA_CAMERA) += cam_fd/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_lrme/
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.c b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.c
index 3fbb3f0..6d699cf 100644
--- a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.c
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.c
@@ -67,11 +67,15 @@
return false;
}
-void cam_cdm_cpas_cb(uint32_t client_handle, void *userdata,
- enum cam_camnoc_irq_type evt_type, uint32_t evt_data)
+bool cam_cdm_cpas_cb(uint32_t client_handle, void *userdata,
+ struct cam_cpas_irq_data *irq_data)
{
- CAM_ERR(CAM_CDM, "CPAS error callback type=%d with data=%x", evt_type,
- evt_data);
+ if (!irq_data)
+ return false;
+
+ CAM_DBG(CAM_CDM, "CPAS error callback type=%d", irq_data->irq_type);
+
+ return false;
}
struct cam_cdm_utils_ops *cam_cdm_get_ops(
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.h b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.h
index fa3ae04..497832b 100644
--- a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.h
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.h
@@ -32,8 +32,8 @@
uint32_t arg_size);
bool cam_cdm_set_cam_hw_version(
uint32_t ver, struct cam_hw_version *cam_version);
-void cam_cdm_cpas_cb(uint32_t client_handle, void *userdata,
- enum cam_camnoc_irq_type evt_type, uint32_t evt_data);
+bool cam_cdm_cpas_cb(uint32_t client_handle, void *userdata,
+ struct cam_cpas_irq_data *irq_data);
struct cam_cdm_utils_ops *cam_cdm_get_ops(
uint32_t ver, struct cam_hw_version *cam_version, bool by_cam_version);
int cam_virtual_cdm_submit_bl(struct cam_hw_info *cdm_hw,
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context.c b/drivers/media/platform/msm/camera/cam_core/cam_context.c
index d039d75..84402e4 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_context.c
+++ b/drivers/media/platform/msm/camera/cam_core/cam_context.c
@@ -134,8 +134,8 @@
rc = ctx->state_machine[ctx->state].crm_ops.unlink(
ctx, unlink);
} else {
- CAM_ERR(CAM_CORE, "No crm unlink in dev %d, state %d",
- ctx->dev_hdl, ctx->state);
+ CAM_ERR(CAM_CORE, "No crm unlink in dev %d, name %s, state %d",
+ ctx->dev_hdl, ctx->dev_name, ctx->state);
rc = -EPROTO;
}
mutex_unlock(&ctx->ctx_mutex);
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c b/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c
index f8c0692..0a1c2cf 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c
+++ b/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c
@@ -178,6 +178,7 @@
req->ctx = NULL;
req->flushed = 0;
spin_lock(&ctx->lock);
+ list_del_init(&req->list);
list_add_tail(&req->list, &ctx->free_req_list);
spin_unlock(&ctx->lock);
}
@@ -200,7 +201,6 @@
return -EINVAL;
}
- cam_context_stop_dev_to_hw(ctx);
arg.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
arg.active_req = false;
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c
index 4b0cc74..0e5ce85 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c
+++ b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c
@@ -24,6 +24,18 @@
struct cam_camnoc_info *camnoc_info;
+#define CAMNOC_SLAVE_MAX_ERR_CODE 7
+static const char * const camnoc_salve_err_code[] = {
+ "Target Error", /* err code 0 */
+ "Address decode error", /* err code 1 */
+ "Unsupported request", /* err code 2 */
+ "Disconnected target", /* err code 3 */
+ "Security violation", /* err code 4 */
+ "Hidden security violation", /* err code 5 */
+ "Timeout Error", /* err code 6 */
+ "Unknown Error", /* unknown err code */
+};
+
static int cam_cpastop_get_hw_info(struct cam_hw_info *cpas_hw,
struct cam_cpas_hw_caps *hw_caps)
{
@@ -106,91 +118,155 @@
}
static int cam_cpastop_handle_errlogger(struct cam_cpas *cpas_core,
- struct cam_hw_soc_info *soc_info)
+ struct cam_hw_soc_info *soc_info,
+ struct cam_camnoc_irq_slave_err_data *slave_err)
{
- uint32_t reg_value[4];
- int i;
- int size = camnoc_info->error_logger_size;
int camnoc_index = cpas_core->regbase_index[CAM_CPAS_REG_CAMNOC];
+ int err_code_index = 0;
- for (i = 0; (i + 3) < size; i = i + 4) {
- reg_value[0] = cam_io_r_mb(
- soc_info->reg_map[camnoc_index].mem_base +
- camnoc_info->error_logger[i]);
- reg_value[1] = cam_io_r_mb(
- soc_info->reg_map[camnoc_index].mem_base +
- camnoc_info->error_logger[i + 1]);
- reg_value[2] = cam_io_r_mb(
- soc_info->reg_map[camnoc_index].mem_base +
- camnoc_info->error_logger[i + 2]);
- reg_value[3] = cam_io_r_mb(
- soc_info->reg_map[camnoc_index].mem_base +
- camnoc_info->error_logger[i + 3]);
- CAM_ERR(CAM_CPAS,
- "offset[0x%x] values [0x%x] [0x%x] [0x%x] [0x%x]",
- camnoc_info->error_logger[i], reg_value[0],
- reg_value[1], reg_value[2], reg_value[3]);
+ if (!camnoc_info->err_logger) {
+ CAM_ERR_RATE_LIMIT(CAM_CPAS, "Invalid err logger info");
+ return -EINVAL;
}
- if ((i + 2) < size) {
- reg_value[0] = cam_io_r_mb(
- soc_info->reg_map[camnoc_index].mem_base +
- camnoc_info->error_logger[i]);
- reg_value[1] = cam_io_r_mb(
- soc_info->reg_map[camnoc_index].mem_base +
- camnoc_info->error_logger[i + 1]);
- reg_value[2] = cam_io_r_mb(
- soc_info->reg_map[camnoc_index].mem_base +
- camnoc_info->error_logger[i + 2]);
- CAM_ERR(CAM_CPAS, "offset[0x%x] values [0x%x] [0x%x] [0x%x]",
- camnoc_info->error_logger[i], reg_value[0],
- reg_value[1], reg_value[2]);
- i = i + 3;
- }
+ slave_err->mainctrl.value = cam_io_r_mb(
+ soc_info->reg_map[camnoc_index].mem_base +
+ camnoc_info->err_logger->mainctrl);
- if ((i + 1) < size) {
- reg_value[0] = cam_io_r_mb(
- soc_info->reg_map[camnoc_index].mem_base +
- camnoc_info->error_logger[i]);
- reg_value[1] = cam_io_r_mb(
- soc_info->reg_map[camnoc_index].mem_base +
- camnoc_info->error_logger[i + 1]);
- CAM_ERR(CAM_CPAS, "offset[0x%x] values [0x%x] [0x%x]",
- camnoc_info->error_logger[i], reg_value[0],
- reg_value[1]);
- i = i + 2;
- }
+ slave_err->errvld.value = cam_io_r_mb(
+ soc_info->reg_map[camnoc_index].mem_base +
+ camnoc_info->err_logger->errvld);
- if (i < size) {
- reg_value[0] = cam_io_r_mb(
- soc_info->reg_map[camnoc_index].mem_base +
- camnoc_info->error_logger[i]);
- CAM_ERR(CAM_CPAS, "offset[0x%x] values [0x%x]",
- camnoc_info->error_logger[i], reg_value[0]);
- }
+ slave_err->errlog0_low.value = cam_io_r_mb(
+ soc_info->reg_map[camnoc_index].mem_base +
+ camnoc_info->err_logger->errlog0_low);
+
+ slave_err->errlog0_high.value = cam_io_r_mb(
+ soc_info->reg_map[camnoc_index].mem_base +
+ camnoc_info->err_logger->errlog0_high);
+
+ slave_err->errlog1_low.value = cam_io_r_mb(
+ soc_info->reg_map[camnoc_index].mem_base +
+ camnoc_info->err_logger->errlog1_low);
+
+ slave_err->errlog1_high.value = cam_io_r_mb(
+ soc_info->reg_map[camnoc_index].mem_base +
+ camnoc_info->err_logger->errlog1_high);
+
+ slave_err->errlog2_low.value = cam_io_r_mb(
+ soc_info->reg_map[camnoc_index].mem_base +
+ camnoc_info->err_logger->errlog2_low);
+
+ slave_err->errlog2_high.value = cam_io_r_mb(
+ soc_info->reg_map[camnoc_index].mem_base +
+ camnoc_info->err_logger->errlog2_high);
+
+ slave_err->errlog3_low.value = cam_io_r_mb(
+ soc_info->reg_map[camnoc_index].mem_base +
+ camnoc_info->err_logger->errlog3_low);
+
+ slave_err->errlog3_high.value = cam_io_r_mb(
+ soc_info->reg_map[camnoc_index].mem_base +
+ camnoc_info->err_logger->errlog3_high);
+
+ CAM_ERR_RATE_LIMIT(CAM_CPAS,
+ "Possible memory configuration issue, fault at SMMU raised as CAMNOC SLAVE_IRQ");
+
+ CAM_ERR_RATE_LIMIT(CAM_CPAS,
+ "mainctrl[0x%x 0x%x] errvld[0x%x 0x%x] stall_en=%d, fault_en=%d, err_vld=%d",
+ camnoc_info->err_logger->mainctrl,
+ slave_err->mainctrl.value,
+ camnoc_info->err_logger->errvld,
+ slave_err->errvld.value,
+ slave_err->mainctrl.stall_en,
+ slave_err->mainctrl.fault_en,
+ slave_err->errvld.err_vld);
+
+ err_code_index = slave_err->errlog0_low.err_code;
+ if (err_code_index > CAMNOC_SLAVE_MAX_ERR_CODE)
+ err_code_index = CAMNOC_SLAVE_MAX_ERR_CODE;
+
+ CAM_ERR_RATE_LIMIT(CAM_CPAS,
+ "errlog0 low[0x%x 0x%x] high[0x%x 0x%x] loginfo_vld=%d, word_error=%d, non_secure=%d, device=%d, opc=%d, err_code=%d(%s) sizef=%d, addr_space=%d, len1=%d",
+ camnoc_info->err_logger->errlog0_low,
+ slave_err->errlog0_low.value,
+ camnoc_info->err_logger->errlog0_high,
+ slave_err->errlog0_high.value,
+ slave_err->errlog0_low.loginfo_vld,
+ slave_err->errlog0_low.word_error,
+ slave_err->errlog0_low.non_secure,
+ slave_err->errlog0_low.device,
+ slave_err->errlog0_low.opc,
+ slave_err->errlog0_low.err_code,
+ camnoc_salve_err_code[err_code_index],
+ slave_err->errlog0_low.sizef,
+ slave_err->errlog0_low.addr_space,
+ slave_err->errlog0_high.len1);
+
+ CAM_ERR_RATE_LIMIT(CAM_CPAS,
+ "errlog1_low[0x%x 0x%x] errlog1_high[0x%x 0x%x] errlog2_low[0x%x 0x%x] errlog2_high[0x%x 0x%x] errlog3_low[0x%x 0x%x] errlog3_high[0x%x 0x%x]",
+ camnoc_info->err_logger->errlog1_low,
+ slave_err->errlog1_low.value,
+ camnoc_info->err_logger->errlog1_high,
+ slave_err->errlog1_high.value,
+ camnoc_info->err_logger->errlog2_low,
+ slave_err->errlog2_low.value,
+ camnoc_info->err_logger->errlog2_high,
+ slave_err->errlog2_high.value,
+ camnoc_info->err_logger->errlog3_low,
+ slave_err->errlog3_low.value,
+ camnoc_info->err_logger->errlog3_high,
+ slave_err->errlog3_high.value);
return 0;
}
-static int cam_cpastop_handle_ubwc_err(struct cam_cpas *cpas_core,
- struct cam_hw_soc_info *soc_info, int i)
+static int cam_cpastop_handle_ubwc_enc_err(struct cam_cpas *cpas_core,
+ struct cam_hw_soc_info *soc_info, int i,
+ struct cam_camnoc_irq_ubwc_enc_data *enc_err)
{
- uint32_t reg_value;
int camnoc_index = cpas_core->regbase_index[CAM_CPAS_REG_CAMNOC];
- reg_value = cam_io_r_mb(soc_info->reg_map[camnoc_index].mem_base +
+ enc_err->encerr_status.value =
+ cam_io_r_mb(soc_info->reg_map[camnoc_index].mem_base +
camnoc_info->irq_err[i].err_status.offset);
- CAM_ERR(CAM_CPAS,
- "Dumping ubwc error status [%d]: offset[0x%x] value[0x%x]",
- i, camnoc_info->irq_err[i].err_status.offset, reg_value);
+ /* Let clients handle the UBWC errors */
+ CAM_DBG(CAM_CPAS,
+ "ubwc enc err [%d]: offset[0x%x] value[0x%x]",
+ i, camnoc_info->irq_err[i].err_status.offset,
+ enc_err->encerr_status.value);
- return reg_value;
+ return 0;
}
-static int cam_cpastop_handle_ahb_timeout_err(struct cam_hw_info *cpas_hw)
+static int cam_cpastop_handle_ubwc_dec_err(struct cam_cpas *cpas_core,
+ struct cam_hw_soc_info *soc_info, int i,
+ struct cam_camnoc_irq_ubwc_dec_data *dec_err)
{
- CAM_ERR(CAM_CPAS, "ahb timout error");
+ int camnoc_index = cpas_core->regbase_index[CAM_CPAS_REG_CAMNOC];
+
+ dec_err->decerr_status.value =
+ cam_io_r_mb(soc_info->reg_map[camnoc_index].mem_base +
+ camnoc_info->irq_err[i].err_status.offset);
+
+ /* Let clients handle the UBWC errors */
+ CAM_DBG(CAM_CPAS,
+ "ubwc dec err status [%d]: offset[0x%x] value[0x%x] thr_err=%d, fcl_err=%d, len_md_err=%d, format_err=%d",
+ i, camnoc_info->irq_err[i].err_status.offset,
+ dec_err->decerr_status.value,
+ dec_err->decerr_status.thr_err,
+ dec_err->decerr_status.fcl_err,
+ dec_err->decerr_status.len_md_err,
+ dec_err->decerr_status.format_err);
+
+ return 0;
+}
+
+static int cam_cpastop_handle_ahb_timeout_err(struct cam_hw_info *cpas_hw,
+ struct cam_camnoc_irq_ahb_timeout_data *ahb_err)
+{
+ CAM_ERR_RATE_LIMIT(CAM_CPAS, "ahb timout error");
return 0;
}
@@ -228,10 +304,11 @@
}
static void cam_cpastop_notify_clients(struct cam_cpas *cpas_core,
- enum cam_camnoc_hw_irq_type irq_type, uint32_t irq_data)
+ struct cam_cpas_irq_data *irq_data)
{
int i;
struct cam_cpas_client *cpas_client;
+ bool error_handled = false;
CAM_DBG(CAM_CPAS,
"Notify CB : num_clients=%d, registered=%d, started=%d",
@@ -243,13 +320,15 @@
cpas_client = cpas_core->cpas_client[i];
if (cpas_client->data.cam_cpas_client_cb) {
CAM_DBG(CAM_CPAS,
- "Calling client CB %d : %d 0x%x",
- i, irq_type, irq_data);
- cpas_client->data.cam_cpas_client_cb(
+ "Calling client CB %d : %d",
+ i, irq_data->irq_type);
+ error_handled =
+ cpas_client->data.cam_cpas_client_cb(
cpas_client->data.client_handle,
cpas_client->data.userdata,
- (enum cam_camnoc_irq_type)irq_type,
irq_data);
+ if (error_handled)
+ break;
}
}
}
@@ -263,7 +342,7 @@
struct cam_hw_soc_info *soc_info;
int i;
enum cam_camnoc_hw_irq_type irq_type;
- uint32_t irq_data;
+ struct cam_cpas_irq_data irq_data;
payload = container_of(work, struct cam_cpas_work_payload, work);
if (!payload) {
@@ -280,23 +359,30 @@
(camnoc_info->irq_err[i].enable)) {
irq_type = camnoc_info->irq_err[i].irq_type;
CAM_ERR(CAM_CPAS, "Error occurred, type=%d", irq_type);
- irq_data = 0;
+ memset(&irq_data, 0x0, sizeof(irq_data));
+ irq_data.irq_type = (enum cam_camnoc_irq_type)irq_type;
switch (irq_type) {
case CAM_CAMNOC_HW_IRQ_SLAVE_ERROR:
- irq_data = cam_cpastop_handle_errlogger(
- cpas_core, soc_info);
+ cam_cpastop_handle_errlogger(
+ cpas_core, soc_info,
+ &irq_data.u.slave_err);
break;
case CAM_CAMNOC_HW_IRQ_IFE02_UBWC_ENCODE_ERROR:
case CAM_CAMNOC_HW_IRQ_IFE13_UBWC_ENCODE_ERROR:
- case CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR:
case CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR:
- irq_data = cam_cpastop_handle_ubwc_err(
- cpas_core, soc_info, i);
+ cam_cpastop_handle_ubwc_enc_err(
+ cpas_core, soc_info, i,
+ &irq_data.u.enc_err);
+ break;
+ case CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR:
+ cam_cpastop_handle_ubwc_dec_err(
+ cpas_core, soc_info, i,
+ &irq_data.u.dec_err);
break;
case CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT:
- irq_data = cam_cpastop_handle_ahb_timeout_err(
- cpas_hw);
+ cam_cpastop_handle_ahb_timeout_err(
+ cpas_hw, &irq_data.u.ahb_err);
break;
case CAM_CAMNOC_HW_IRQ_CAMNOC_TEST:
CAM_DBG(CAM_CPAS, "TEST IRQ");
@@ -306,8 +392,7 @@
break;
}
- cam_cpastop_notify_clients(cpas_core, irq_type,
- irq_data);
+ cam_cpastop_notify_clients(cpas_core, &irq_data);
payload->irq_status &=
~camnoc_info->irq_err[i].sbm_port;
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.h b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.h
index e3639a6..73f7e9b 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.h
@@ -173,6 +173,34 @@
};
/**
+ * struct cam_camnoc_err_logger_info : CAMNOC error logger register offsets
+ *
+ * @mainctrl: Register offset for mainctrl
+ * @errvld: Register offset for errvld
+ * @errlog0_low: Register offset for errlog0_low
+ * @errlog0_high: Register offset for errlog0_high
+ * @errlog1_low: Register offset for errlog1_low
+ * @errlog1_high: Register offset for errlog1_high
+ * @errlog2_low: Register offset for errlog2_low
+ * @errlog2_high: Register offset for errlog2_high
+ * @errlog3_low: Register offset for errlog3_low
+ * @errlog3_high: Register offset for errlog3_high
+ *
+ */
+struct cam_camnoc_err_logger_info {
+ uint32_t mainctrl;
+ uint32_t errvld;
+ uint32_t errlog0_low;
+ uint32_t errlog0_high;
+ uint32_t errlog1_low;
+ uint32_t errlog1_high;
+ uint32_t errlog2_low;
+ uint32_t errlog2_high;
+ uint32_t errlog3_low;
+ uint32_t errlog3_high;
+};
+
+/**
* struct cam_camnoc_info : Overall CAMNOC settings info
*
* @specific: Pointer to CAMNOC SPECIFICTONTTPTR settings
@@ -180,8 +208,7 @@
* @irq_sbm: Pointer to CAMNOC IRQ SBM settings
* @irq_err: Pointer to CAMNOC IRQ Error settings
* @irq_err_size: Array size of IRQ Error settings
- * @error_logger: Pointer to CAMNOC IRQ Error logger read registers
- * @error_logger_size: Array size of IRQ Error logger
+ * @err_logger: Pointer to CAMNOC IRQ Error logger read registers
* @errata_wa_list: HW Errata workaround info
*
*/
@@ -191,8 +218,7 @@
struct cam_camnoc_irq_sbm *irq_sbm;
struct cam_camnoc_irq_err *irq_err;
int irq_err_size;
- uint32_t *error_logger;
- int error_logger_size;
+ struct cam_camnoc_err_logger_info *err_logger;
struct cam_cpas_hw_errata_wa_list *errata_wa_list;
};
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop100.h b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop100.h
index b30cd05..2654b47 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop100.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop100.h
@@ -498,19 +498,17 @@
}
};
-uint32_t slave_error_logger[] = {
- 0x2700, /* ERRLOGGER_SWID_LOW */
- 0x2704, /* ERRLOGGER_SWID_HIGH */
- 0x2708, /* ERRLOGGER_MAINCTL_LOW */
- 0x2710, /* ERRLOGGER_ERRVLD_LOW */
- 0x2720, /* ERRLOGGER_ERRLOG0_LOW */
- 0x2724, /* ERRLOGGER_ERRLOG0_HIGH */
- 0x2728, /* ERRLOGGER_ERRLOG1_LOW */
- 0x272c, /* ERRLOGGER_ERRLOG1_HIGH */
- 0x2730, /* ERRLOGGER_ERRLOG2_LOW */
- 0x2734, /* ERRLOGGER_ERRLOG2_HIGH */
- 0x2738, /* ERRLOGGER_ERRLOG3_LOW */
- 0x273c, /* ERRLOGGER_ERRLOG3_HIGH */
+static struct cam_camnoc_err_logger_info cam170_cpas100_err_logger_offsets = {
+ .mainctrl = 0x2708, /* ERRLOGGER_MAINCTL_LOW */
+ .errvld = 0x2710, /* ERRLOGGER_ERRVLD_LOW */
+ .errlog0_low = 0x2720, /* ERRLOGGER_ERRLOG0_LOW */
+ .errlog0_high = 0x2724, /* ERRLOGGER_ERRLOG0_HIGH */
+ .errlog1_low = 0x2728, /* ERRLOGGER_ERRLOG1_LOW */
+ .errlog1_high = 0x272c, /* ERRLOGGER_ERRLOG1_HIGH */
+ .errlog2_low = 0x2730, /* ERRLOGGER_ERRLOG2_LOW */
+ .errlog2_high = 0x2734, /* ERRLOGGER_ERRLOG2_HIGH */
+ .errlog3_low = 0x2738, /* ERRLOGGER_ERRLOG3_LOW */
+ .errlog3_high = 0x273c, /* ERRLOGGER_ERRLOG3_HIGH */
};
static struct cam_cpas_hw_errata_wa_list cam170_cpas100_errata_wa_list = {
@@ -533,9 +531,7 @@
.irq_err = &cam_cpas100_irq_err[0],
.irq_err_size = sizeof(cam_cpas100_irq_err) /
sizeof(cam_cpas100_irq_err[0]),
- .error_logger = &slave_error_logger[0],
- .error_logger_size = sizeof(slave_error_logger) /
- sizeof(slave_error_logger[0]),
+ .err_logger = &cam170_cpas100_err_logger_offsets,
.errata_wa_list = &cam170_cpas100_errata_wa_list,
};
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop_v170_110.h b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop_v170_110.h
index b1aef1f..4418fb1 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop_v170_110.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop_v170_110.h
@@ -505,19 +505,17 @@
},
};
-static uint32_t cam_cpas110_slave_error_logger[] = {
- 0x2700, /* ERRLOGGER_SWID_LOW */
- 0x2704, /* ERRLOGGER_SWID_HIGH */
- 0x2708, /* ERRLOGGER_MAINCTL_LOW */
- 0x2710, /* ERRLOGGER_ERRVLD_LOW */
- 0x2720, /* ERRLOGGER_ERRLOG0_LOW */
- 0x2724, /* ERRLOGGER_ERRLOG0_HIGH */
- 0x2728, /* ERRLOGGER_ERRLOG1_LOW */
- 0x272c, /* ERRLOGGER_ERRLOG1_HIGH */
- 0x2730, /* ERRLOGGER_ERRLOG2_LOW */
- 0x2734, /* ERRLOGGER_ERRLOG2_HIGH */
- 0x2738, /* ERRLOGGER_ERRLOG3_LOW */
- 0x273c, /* ERRLOGGER_ERRLOG3_HIGH */
+static struct cam_camnoc_err_logger_info cam170_cpas110_err_logger_offsets = {
+ .mainctrl = 0x2708, /* ERRLOGGER_MAINCTL_LOW */
+ .errvld = 0x2710, /* ERRLOGGER_ERRVLD_LOW */
+ .errlog0_low = 0x2720, /* ERRLOGGER_ERRLOG0_LOW */
+ .errlog0_high = 0x2724, /* ERRLOGGER_ERRLOG0_HIGH */
+ .errlog1_low = 0x2728, /* ERRLOGGER_ERRLOG1_LOW */
+ .errlog1_high = 0x272c, /* ERRLOGGER_ERRLOG1_HIGH */
+ .errlog2_low = 0x2730, /* ERRLOGGER_ERRLOG2_LOW */
+ .errlog2_high = 0x2734, /* ERRLOGGER_ERRLOG2_HIGH */
+ .errlog3_low = 0x2738, /* ERRLOGGER_ERRLOG3_LOW */
+ .errlog3_high = 0x273c, /* ERRLOGGER_ERRLOG3_HIGH */
};
static struct cam_cpas_hw_errata_wa_list cam170_cpas110_errata_wa_list = {
@@ -540,9 +538,7 @@
.irq_err = &cam_cpas110_irq_err[0],
.irq_err_size = sizeof(cam_cpas110_irq_err) /
sizeof(cam_cpas110_irq_err[0]),
- .error_logger = &cam_cpas110_slave_error_logger[0],
- .error_logger_size = sizeof(cam_cpas110_slave_error_logger) /
- sizeof(cam_cpas110_slave_error_logger[0]),
+ .err_logger = &cam170_cpas110_err_logger_offsets,
.errata_wa_list = &cam170_cpas110_errata_wa_list,
};
diff --git a/drivers/media/platform/msm/camera/cam_cpas/include/cam_cpas_api.h b/drivers/media/platform/msm/camera/cam_cpas/include/cam_cpas_api.h
index e0da384..c844ef7 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/include/cam_cpas_api.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/include/cam_cpas_api.h
@@ -82,6 +82,183 @@
};
/**
+ * struct cam_camnoc_irq_slave_err_data : Data for Slave error.
+ *
+ * @mainctrl : Err logger mainctrl info
+ * @errvld : Err logger errvld info
+ * @errlog0_low : Err logger errlog0_low info
+ * @errlog0_high : Err logger errlog0_high info
+ * @errlog1_low : Err logger errlog1_low info
+ * @errlog1_high : Err logger errlog1_high info
+ * @errlog2_low : Err logger errlog2_low info
+ * @errlog2_high : Err logger errlog2_high info
+ * @errlog3_low : Err logger errlog3_low info
+ * @errlog3_high : Err logger errlog3_high info
+ *
+ */
+struct cam_camnoc_irq_slave_err_data {
+ union {
+ struct {
+ uint32_t stall_en : 1; /* bit 0 */
+ uint32_t fault_en : 1; /* bit 1 */
+ uint32_t rsv : 30; /* bits 2-31 */
+ };
+ uint32_t value;
+ } mainctrl;
+ union {
+ struct {
+ uint32_t err_vld : 1; /* bit 0 */
+ uint32_t rsv : 31; /* bits 1-31 */
+ };
+ uint32_t value;
+ } errvld;
+ union {
+ struct {
+ uint32_t loginfo_vld : 1; /* bit 0 */
+ uint32_t word_error : 1; /* bit 1 */
+ uint32_t non_secure : 1; /* bit 2 */
+ uint32_t device : 1; /* bit 3 */
+ uint32_t opc : 3; /* bits 4 - 6 */
+ uint32_t rsv0 : 1; /* bit 7 */
+ uint32_t err_code : 3; /* bits 8 - 10 */
+ uint32_t sizef : 3; /* bits 11 - 13 */
+ uint32_t rsv1 : 2; /* bits 14 - 15 */
+ uint32_t addr_space : 6; /* bits 16 - 21 */
+ uint32_t rsv2 : 10; /* bits 22 - 31 */
+ };
+ uint32_t value;
+ } errlog0_low;
+ union {
+ struct {
+ uint32_t len1 : 10; /* bits 0 - 9 */
+ uint32_t rsv : 22; /* bits 10 - 31 */
+ };
+ uint32_t value;
+ } errlog0_high;
+ union {
+ struct {
+ uint32_t path : 16; /* bits 0 - 15 */
+ uint32_t rsv : 16; /* bits 16 - 31 */
+ };
+ uint32_t value;
+ } errlog1_low;
+ union {
+ struct {
+ uint32_t extid : 18; /* bits 0 - 17 */
+ uint32_t rsv : 14; /* bits 18 - 31 */
+ };
+ uint32_t value;
+ } errlog1_high;
+ union {
+ struct {
+ uint32_t errlog2_lsb : 32; /* bits 0 - 31 */
+ };
+ uint32_t value;
+ } errlog2_low;
+ union {
+ struct {
+ uint32_t errlog2_msb : 16; /* bits 0 - 16 */
+ uint32_t rsv : 16; /* bits 16 - 31 */
+ };
+ uint32_t value;
+ } errlog2_high;
+ union {
+ struct {
+ uint32_t errlog3_lsb : 32; /* bits 0 - 31 */
+ };
+ uint32_t value;
+ } errlog3_low;
+ union {
+ struct {
+ uint32_t errlog3_msb : 32; /* bits 0 - 31 */
+ };
+ uint32_t value;
+ } errlog3_high;
+};
+
+/**
+ * struct cam_camnoc_irq_ubwc_enc_data : Data for UBWC Encode error.
+ *
+ * @encerr_status : Encode error status
+ *
+ */
+struct cam_camnoc_irq_ubwc_enc_data {
+ union {
+ struct {
+ uint32_t encerrstatus : 3; /* bits 0 - 2 */
+ uint32_t rsv : 29; /* bits 3 - 31 */
+ };
+ uint32_t value;
+ } encerr_status;
+};
+
+/**
+ * struct cam_camnoc_irq_ubwc_dec_data : Data for UBWC Decode error.
+ *
+ * @decerr_status : Decoder error status
+ * @thr_err : Set to 1 if
+ * At least one of the bflc_len fields in the bit steam exceeds
+ * its threshold value. This error is possible only for
+ * RGBA1010102, TP10, and RGB565 formats
+ * @fcl_err : Set to 1 if
+ * Fast clear with a legal non-RGB format
+ * @len_md_err : Set to 1 if
+ * The calculated burst length does not match burst length
+ * specified by the metadata value
+ * @format_err : Set to 1 if
+ * Illegal format
+ * 1. bad format :2,3,6
+ * 2. For 32B MAL, metadata=6
+ * 3. For 32B MAL RGB565, Metadata != 0,1,7
+ * 4. For 64B MAL RGB565, metadata[3:1] == 1,2
+ *
+ */
+struct cam_camnoc_irq_ubwc_dec_data {
+ union {
+ struct {
+ uint32_t thr_err : 1; /* bit 0 */
+ uint32_t fcl_err : 1; /* bit 1 */
+ uint32_t len_md_err : 1; /* bit 2 */
+ uint32_t format_err : 1; /* bit 3 */
+ uint32_t rsv : 28; /* bits 4 - 31 */
+ };
+ uint32_t value;
+ } decerr_status;
+};
+
+struct cam_camnoc_irq_ahb_timeout_data {
+ uint32_t data;
+};
+
+/**
+ * struct cam_cpas_irq_data : CAMNOC IRQ data
+ *
+ * @irq_type : To identify the type of IRQ
+ * @u : Union of irq err data information
+ * @slave_err : Data for Slave error.
+ * Valid if type is CAM_CAMNOC_IRQ_SLAVE_ERROR
+ * @enc_err : Data for UBWC Encode error.
+ * Valid if type is one of below:
+ * CAM_CAMNOC_IRQ_IFE02_UBWC_ENCODE_ERROR
+ * CAM_CAMNOC_IRQ_IFE13_UBWC_ENCODE_ERROR
+ * CAM_CAMNOC_IRQ_IPE_BPS_UBWC_ENCODE_ERROR
+ * @dec_err : Data for UBWC Decode error.
+ * Valid if type is CAM_CAMNOC_IRQ_IPE_BPS_UBWC_DECODE_ERROR
+ * @ahb_err : Data for Slave error.
+ * Valid if type is CAM_CAMNOC_IRQ_AHB_TIMEOUT
+ *
+ */
+struct cam_cpas_irq_data {
+ enum cam_camnoc_irq_type irq_type;
+ union {
+ struct cam_camnoc_irq_slave_err_data slave_err;
+ struct cam_camnoc_irq_ubwc_enc_data enc_err;
+ struct cam_camnoc_irq_ubwc_dec_data dec_err;
+ struct cam_camnoc_irq_ahb_timeout_data ahb_err;
+ } u;
+};
+
+/**
* struct cam_cpas_register_params : Register params for cpas client
*
* @identifier : Input identifier string which is the device label
@@ -107,11 +284,10 @@
uint32_t cell_index;
struct device *dev;
void *userdata;
- void (*cam_cpas_client_cb)(
+ bool (*cam_cpas_client_cb)(
uint32_t client_handle,
void *userdata,
- enum cam_camnoc_irq_type event_type,
- uint32_t event_data);
+ struct cam_cpas_irq_data *irq_data);
uint32_t client_handle;
};
diff --git a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_soc.c b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_soc.c
index 9045dc1..f27d016 100644
--- a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_soc.c
+++ b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_soc.c
@@ -20,11 +20,16 @@
#include "cam_fd_hw_core.h"
#include "cam_fd_hw_soc.h"
-static void cam_fd_hw_util_cpas_callback(uint32_t handle, void *userdata,
- enum cam_camnoc_irq_type event_type, uint32_t event_data)
+static bool cam_fd_hw_util_cpas_callback(uint32_t handle, void *userdata,
+ struct cam_cpas_irq_data *irq_data)
{
- CAM_DBG(CAM_FD, "CPAS hdl=%d, udata=%pK, event=%d, event_data=%d",
- handle, userdata, event_type, event_data);
+ if (!irq_data)
+ return false;
+
+ CAM_DBG(CAM_FD, "CPAS hdl=%d, udata=%pK, irq_type=%d",
+ handle, userdata, irq_data->irq_type);
+
+ return false;
}
static int cam_fd_hw_soc_util_setup_regbase_indices(
diff --git a/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_intf.h b/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_intf.h
index e892772..ce7a8b3 100644
--- a/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_intf.h
+++ b/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_intf.h
@@ -60,10 +60,12 @@
* hfi_read_message() - function for hfi read
* @pmsg: buffer to place read message for hfi queue
* @q_id: queue id
+ * @words_read: total number of words read from the queue
+ * returned as output to the caller
*
- * Returns size read in words/failure(negative value)
+ * Returns success(zero)/failure(non zero)
*/
-int64_t hfi_read_message(uint32_t *pmsg, uint8_t q_id);
+int hfi_read_message(uint32_t *pmsg, uint8_t q_id, uint32_t *words_read);
/**
* hfi_init() - function initialize hfi after firmware download
diff --git a/drivers/media/platform/msm/camera/cam_icp/hfi.c b/drivers/media/platform/msm/camera/cam_icp/hfi.c
index 16fa33a..a8855ae 100644
--- a/drivers/media/platform/msm/camera/cam_icp/hfi.c
+++ b/drivers/media/platform/msm/camera/cam_icp/hfi.c
@@ -109,7 +109,19 @@
new_write_idx << BYTE_WORD_SHIFT);
}
+ /*
+ * To make sure command data in a command queue before
+ * updating write index
+ */
+ wmb();
+
q->qhdr_write_idx = new_write_idx;
+
+ /*
+ * Before raising interrupt make sure command data is ready for
+ * firmware to process
+ */
+ wmb();
cam_io_w((uint32_t)INTR_ENABLE,
g_hfi->csr_base + HFI_REG_A5_CSR_HOST2ICPINT);
err:
@@ -117,13 +129,14 @@
return rc;
}
-int64_t hfi_read_message(uint32_t *pmsg, uint8_t q_id)
+int hfi_read_message(uint32_t *pmsg, uint8_t q_id,
+ uint32_t *words_read)
{
struct hfi_qtbl *q_tbl_ptr;
struct hfi_q_hdr *q;
uint32_t new_read_idx, size_in_words, word_diff, temp;
uint32_t *read_q, *read_ptr, *write_ptr;
- int64_t rc = 0;
+ int rc = 0;
if (!pmsg) {
CAM_ERR(CAM_HFI, "Invalid msg");
@@ -202,7 +215,7 @@
}
q->qhdr_read_idx = new_read_idx;
- rc = size_in_words;
+ *words_read = size_in_words;
err:
mutex_unlock(&hfi_msg_q_mutex);
return rc;
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_dev.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_dev.c
index 99e2e79..14c3c9c 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_dev.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_dev.c
@@ -50,6 +50,40 @@
};
EXPORT_SYMBOL(cam_a5_hw_info);
+static bool cam_a5_cpas_cb(uint32_t client_handle, void *userdata,
+ struct cam_cpas_irq_data *irq_data)
+{
+ bool error_handled = false;
+
+ if (!irq_data)
+ return error_handled;
+
+ switch (irq_data->irq_type) {
+ case CAM_CAMNOC_IRQ_IPE_BPS_UBWC_DECODE_ERROR:
+ CAM_ERR_RATE_LIMIT(CAM_ICP,
+ "IPE/BPS UBWC Decode error type=%d status=%x thr_err=%d, fcl_err=%d, len_md_err=%d, format_err=%d",
+ irq_data->irq_type,
+ irq_data->u.dec_err.decerr_status.value,
+ irq_data->u.dec_err.decerr_status.thr_err,
+ irq_data->u.dec_err.decerr_status.fcl_err,
+ irq_data->u.dec_err.decerr_status.len_md_err,
+ irq_data->u.dec_err.decerr_status.format_err);
+ error_handled = true;
+ break;
+ case CAM_CAMNOC_IRQ_IPE_BPS_UBWC_ENCODE_ERROR:
+ CAM_ERR_RATE_LIMIT(CAM_ICP,
+ "IPE/BPS UBWC Encode error type=%d status=%x",
+ irq_data->irq_type,
+ irq_data->u.enc_err.encerr_status.value);
+ error_handled = true;
+ break;
+ default:
+ break;
+ }
+
+ return error_handled;
+}
+
int cam_a5_register_cpas(struct cam_hw_soc_info *soc_info,
struct cam_a5_device_core_info *core_info,
uint32_t hw_idx)
@@ -59,7 +93,7 @@
cpas_register_params.dev = &soc_info->pdev->dev;
memcpy(cpas_register_params.identifier, "icp", sizeof("icp"));
- cpas_register_params.cam_cpas_client_cb = NULL;
+ cpas_register_params.cam_cpas_client_cb = cam_a5_cpas_cb;
cpas_register_params.cell_index = hw_idx;
cpas_register_params.userdata = NULL;
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
index 72f2803..340a1e2 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
@@ -1150,11 +1150,12 @@
{
uint32_t *msg_ptr = NULL, *pkt_ptr = NULL;
struct hfi_msg_debug *dbg_msg;
- int64_t read_len, size_processed = 0;
+ uint32_t read_len, size_processed = 0;
char *dbg_buf;
+ int rc = 0;
- read_len = hfi_read_message(icp_hw_mgr.dbg_buf, Q_DBG);
- if (read_len < 0)
+ rc = hfi_read_message(icp_hw_mgr.dbg_buf, Q_DBG, &read_len);
+ if (rc)
return;
msg_ptr = (uint32_t *)icp_hw_mgr.dbg_buf;
@@ -1179,7 +1180,8 @@
static int cam_icp_process_msg_pkt_type(
struct cam_icp_hw_mgr *hw_mgr,
- uint32_t *msg_ptr)
+ uint32_t *msg_ptr,
+ uint32_t *msg_processed_len)
{
int rc = 0;
int size_processed = 0;
@@ -1230,19 +1232,17 @@
break;
}
- if (rc)
- return rc;
-
- return size_processed;
+ *msg_processed_len = size_processed;
+ return rc;
}
static int32_t cam_icp_mgr_process_msg(void *priv, void *data)
{
- int64_t read_len, msg_processed_len;
- int rc = 0;
+ uint32_t read_len, msg_processed_len;
uint32_t *msg_ptr = NULL;
struct hfi_msg_work_data *task_data;
struct cam_icp_hw_mgr *hw_mgr;
+ int rc = 0;
if (!data || !priv) {
CAM_ERR(CAM_ICP, "Invalid data");
@@ -1252,25 +1252,24 @@
task_data = data;
hw_mgr = priv;
- read_len = hfi_read_message(icp_hw_mgr.msg_buf, Q_MSG);
- if (read_len < 0) {
- rc = read_len;
+ rc = hfi_read_message(icp_hw_mgr.msg_buf, Q_MSG, &read_len);
+ if (rc) {
CAM_DBG(CAM_ICP, "Unable to read msg q");
} else {
read_len = read_len << BYTE_WORD_SHIFT;
msg_ptr = (uint32_t *)icp_hw_mgr.msg_buf;
while (true) {
- msg_processed_len = cam_icp_process_msg_pkt_type(
- hw_mgr, msg_ptr);
- if (msg_processed_len < 0) {
- rc = msg_processed_len;
+ rc = cam_icp_process_msg_pkt_type(hw_mgr, msg_ptr,
+ &msg_processed_len);
+ if (rc)
return rc;
- }
read_len -= msg_processed_len;
- if (read_len > 0)
+ if (read_len > 0) {
msg_ptr += (msg_processed_len >>
BYTE_WORD_SHIFT);
+ msg_processed_len = 0;
+ }
else
break;
}
@@ -1645,6 +1644,7 @@
for (i = 0; i < CAM_FRAME_CMD_MAX; i++)
clear_bit(i, hw_mgr->ctx_data[ctx_id].hfi_frame_process.bitmap);
kfree(hw_mgr->ctx_data[ctx_id].hfi_frame_process.bitmap);
+ hw_mgr->ctx_data[ctx_id].hfi_frame_process.bitmap = NULL;
cam_icp_hw_mgr_clk_info_update(hw_mgr, &hw_mgr->ctx_data[ctx_id]);
hw_mgr->ctx_data[ctx_id].clk_info.curr_fc = 0;
hw_mgr->ctx_data[ctx_id].clk_info.base_clk = 0;
@@ -2069,6 +2069,7 @@
ctx_data = config_args->ctxt_to_hw_map;
mutex_lock(&ctx_data->ctx_mutex);
if (!ctx_data->in_use) {
+ mutex_unlock(&ctx_data->ctx_mutex);
CAM_ERR(CAM_ICP, "ctx is not in use");
return -EINVAL;
}
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
index 19dd794..cfe5071 100644
--- a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
@@ -304,7 +304,7 @@
ctx_isp->sof_timestamp_val);
CAM_DBG(CAM_ISP, " sof status:%d", sof_event_status);
- if (cam_req_mgr_notify_frame_message(&req_msg,
+ if (cam_req_mgr_notify_message(&req_msg,
V4L_EVENT_CAM_REQ_MGR_SOF, V4L_EVENT_CAM_REQ_MGR_EVENT))
CAM_ERR(CAM_ISP,
"Error in notifying the sof time for req id:%lld",
@@ -427,6 +427,13 @@
return rc;
}
+static int __cam_isp_ctx_reg_upd_in_hw_error(
+ struct cam_isp_context *ctx_isp, void *evt_data)
+{
+ ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
+ return 0;
+}
+
static int __cam_isp_ctx_sof_in_activated_state(
struct cam_isp_context *ctx_isp, void *evt_data)
{
@@ -689,8 +696,13 @@
void *evt_data)
{
int rc = 0;
- struct cam_ctx_request *req;
+ uint32_t i = 0;
+ bool found = 0;
+ struct cam_ctx_request *req = NULL;
+ struct cam_ctx_request *req_temp;
+ struct cam_isp_ctx_req *req_isp = NULL;
struct cam_req_mgr_error_notify notify;
+ uint64_t error_request_id;
struct cam_context *ctx = ctx_isp->base;
struct cam_isp_hw_error_event_data *error_event_data =
@@ -701,7 +713,7 @@
CAM_DBG(CAM_ISP, "Enter error_type = %d", error_type);
if ((error_type == CAM_ISP_HW_ERROR_OVERFLOW) ||
(error_type == CAM_ISP_HW_ERROR_BUSIF_OVERFLOW))
- notify.error = CRM_KMD_ERR_FATAL;
+ notify.error = CRM_KMD_ERR_OVERFLOW;
/*
* Need to check the active req
@@ -712,31 +724,92 @@
if (list_empty(&ctx->active_req_list)) {
CAM_ERR_RATE_LIMIT(CAM_ISP,
"handling error with no active request");
- rc = -EINVAL;
- goto end;
+ } else {
+ list_for_each_entry_safe(req, req_temp,
+ &ctx->active_req_list, list) {
+ req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+ if (!req_isp->bubble_report) {
+ for (i = 0; i < req_isp->num_fence_map_out;
+ i++) {
+ CAM_ERR(CAM_ISP, "req %llu, Sync fd %x",
+ req->request_id,
+ req_isp->fence_map_out[i].
+ sync_id);
+ if (req_isp->fence_map_out[i].sync_id
+ != -1) {
+ rc = cam_sync_signal(
+ req_isp->fence_map_out[i].
+ sync_id,
+ CAM_SYNC_STATE_SIGNALED_ERROR);
+ req_isp->fence_map_out[i].
+ sync_id = -1;
+ }
+ }
+ list_del_init(&req->list);
+ list_add_tail(&req->list, &ctx->free_req_list);
+ ctx_isp->active_req_cnt--;
+ } else {
+ found = 1;
+ break;
+ }
+ }
}
- req = list_first_entry(&ctx->active_req_list,
- struct cam_ctx_request, list);
+ if (found) {
+ list_for_each_entry_safe_reverse(req, req_temp,
+ &ctx->active_req_list, list) {
+ req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+ list_del_init(&req->list);
+ list_add(&req->list, &ctx->pending_req_list);
+ ctx_isp->active_req_cnt--;
+ }
+ }
+
+ do {
+ if (list_empty(&ctx->pending_req_list)) {
+ error_request_id = ctx_isp->last_applied_req_id + 1;
+ req_isp = NULL;
+ break;
+ }
+ req = list_first_entry(&ctx->pending_req_list,
+ struct cam_ctx_request, list);
+ req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+ error_request_id = ctx_isp->last_applied_req_id;
+
+ if (req_isp->bubble_report)
+ break;
+
+ for (i = 0; i < req_isp->num_fence_map_out; i++) {
+ if (req_isp->fence_map_out[i].sync_id != -1)
+ rc = cam_sync_signal(
+ req_isp->fence_map_out[i].sync_id,
+ CAM_SYNC_STATE_SIGNALED_ERROR);
+ req_isp->fence_map_out[i].sync_id = -1;
+ }
+ list_del_init(&req->list);
+ list_add_tail(&req->list, &ctx->free_req_list);
+
+ } while (req->request_id < ctx_isp->last_applied_req_id);
+
if (ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_err) {
notify.link_hdl = ctx->link_hdl;
notify.dev_hdl = ctx->dev_hdl;
- notify.req_id = req->request_id;
+ notify.req_id = error_request_id;
+
+ if (req_isp && req_isp->bubble_report)
+ notify.error = CRM_KMD_ERR_BUBBLE;
+
+ CAM_WARN(CAM_ISP, "Notify CRM: req %lld, frame %lld\n",
+ error_request_id, ctx_isp->frame_id);
ctx->ctx_crm_intf->notify_err(¬ify);
- CAM_ERR_RATE_LIMIT(CAM_ISP, "Notify CRM about ERROR frame %lld",
- ctx_isp->frame_id);
+ ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_HW_ERROR;
} else {
CAM_ERR_RATE_LIMIT(CAM_ISP, "Can not notify ERRROR to CRM");
rc = -EFAULT;
}
- list_del_init(&req->list);
- list_add(&req->list, &ctx->pending_req_list);
- /* might need to check if active list is empty */
-
-end:
CAM_DBG(CAM_ISP, "Exit");
return rc;
}
@@ -746,7 +819,7 @@
/* SOF */
{
.irq_ops = {
- NULL,
+ __cam_isp_ctx_handle_error,
__cam_isp_ctx_sof_in_activated_state,
__cam_isp_ctx_reg_upd_in_sof,
__cam_isp_ctx_notify_sof_in_actived_state,
@@ -779,7 +852,7 @@
/* BUBBLE */
{
.irq_ops = {
- NULL,
+ __cam_isp_ctx_handle_error,
__cam_isp_ctx_sof_in_activated_state,
NULL,
__cam_isp_ctx_notify_sof_in_actived_state,
@@ -790,7 +863,7 @@
/* Bubble Applied */
{
.irq_ops = {
- NULL,
+ __cam_isp_ctx_handle_error,
__cam_isp_ctx_sof_in_activated_state,
__cam_isp_ctx_reg_upd_in_activated_state,
__cam_isp_ctx_epoch_in_bubble_applied,
@@ -798,6 +871,17 @@
__cam_isp_ctx_buf_done_in_bubble_applied,
},
},
+ /* HW ERROR */
+ {
+ .irq_ops = {
+ NULL,
+ __cam_isp_ctx_sof_in_activated_state,
+ __cam_isp_ctx_reg_upd_in_hw_error,
+ NULL,
+ NULL,
+ NULL,
+ },
+ },
/* HALT */
{
},
@@ -878,7 +962,9 @@
} else {
spin_lock_bh(&ctx->lock);
ctx_isp->substate_activated = next_state;
- CAM_DBG(CAM_ISP, "new state %d", next_state);
+ ctx_isp->last_applied_req_id = apply->request_id;
+ CAM_DBG(CAM_ISP, "new substate state %d, applied req %lld",
+ next_state, ctx_isp->last_applied_req_id);
spin_unlock_bh(&ctx->lock);
}
end:
@@ -1613,7 +1699,7 @@
req->request_id = packet->header.request_id;
req->status = 1;
- if (ctx->state == CAM_CTX_ACTIVATED && ctx->ctx_crm_intf->add_req) {
+ if (ctx->state >= CAM_CTX_READY && ctx->ctx_crm_intf->add_req) {
add_req.link_hdl = ctx->link_hdl;
add_req.dev_hdl = ctx->dev_hdl;
add_req.req_id = req->request_id;
@@ -2005,6 +2091,24 @@
return rc;
}
+static int __cam_isp_ctx_unlink_in_activated(struct cam_context *ctx,
+ struct cam_req_mgr_core_dev_link_setup *unlink)
+{
+ int rc = 0;
+
+ CAM_WARN(CAM_ISP,
+ "Received unlink in activated state. It's unexpected");
+ rc = __cam_isp_ctx_stop_dev_in_activated_unlock(ctx);
+ if (rc)
+ CAM_WARN(CAM_ISP, "Stop device failed rc=%d", rc);
+
+ rc = __cam_isp_ctx_unlink_in_ready(ctx, unlink);
+ if (rc)
+ CAM_ERR(CAM_ISP, "Unlink failed rc=%d", rc);
+
+ return rc;
+}
+
static int __cam_isp_ctx_apply_req(struct cam_context *ctx,
struct cam_req_mgr_apply_request *apply)
{
@@ -2116,6 +2220,7 @@
.config_dev = __cam_isp_ctx_config_dev_in_top_state,
},
.crm_ops = {
+ .unlink = __cam_isp_ctx_unlink_in_activated,
.apply_req = __cam_isp_ctx_apply_req,
.flush_req = __cam_isp_ctx_flush_req_in_top_state,
},
@@ -2184,4 +2289,3 @@
memset(ctx, 0, sizeof(*ctx));
return rc;
}
-
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h
index cec0e80..347290c 100644
--- a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h
@@ -50,6 +50,7 @@
CAM_ISP_CTX_ACTIVATED_EPOCH,
CAM_ISP_CTX_ACTIVATED_BUBBLE,
CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED,
+ CAM_ISP_CTX_ACTIVATED_HW_ERROR,
CAM_ISP_CTX_ACTIVATED_HALT,
CAM_ISP_CTX_ACTIVATED_MAX,
};
@@ -111,6 +112,7 @@
* @reported_req_id: Last reported request id
* @subscribe_event: The irq event mask that CRM subscribes to, IFE will
* invoke CRM cb at those event.
+ * @last_applied_req_id: Last applied request id
*
*/
struct cam_isp_context {
@@ -129,6 +131,7 @@
int32_t active_req_cnt;
int64_t reported_req_id;
uint32_t subscribe_event;
+ int64_t last_applied_req_id;
};
/**
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
index ee01c5e..0362758 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
@@ -41,10 +41,12 @@
(CAM_ISP_PACKET_META_GENERIC_BLOB_COMMON + 1)
#define CAM_ISP_GENERIC_BLOB_TYPE_MAX \
- (CAM_ISP_GENERIC_BLOB_TYPE_HFR_CONFIG + 1)
+ (CAM_ISP_GENERIC_BLOB_TYPE_BW_CONFIG + 1)
static uint32_t blob_type_hw_cmd_map[CAM_ISP_GENERIC_BLOB_TYPE_MAX] = {
CAM_ISP_HW_CMD_GET_HFR_UPDATE,
+ CAM_ISP_HW_CMD_CLOCK_UPDATE,
+ CAM_ISP_HW_CMD_BW_UPDATE,
};
static struct cam_ife_hw_mgr g_ife_hw_mgr;
@@ -138,6 +140,39 @@
return rc;
}
+static int cam_ife_hw_mgr_reset_csid_res(
+ struct cam_ife_hw_mgr_res *isp_hw_res)
+{
+ int i;
+ int rc = 0;
+ struct cam_hw_intf *hw_intf;
+ struct cam_csid_reset_cfg_args csid_reset_args;
+
+ csid_reset_args.reset_type = CAM_IFE_CSID_RESET_PATH;
+
+ for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+ if (!isp_hw_res->hw_res[i])
+ continue;
+ csid_reset_args.node_res = isp_hw_res->hw_res[i];
+ hw_intf = isp_hw_res->hw_res[i]->hw_intf;
+ CAM_DBG(CAM_ISP, "Resetting csid hardware %d",
+ hw_intf->hw_idx);
+ if (hw_intf->hw_ops.reset) {
+ rc = hw_intf->hw_ops.reset(hw_intf->hw_priv,
+ &csid_reset_args,
+ sizeof(struct cam_csid_reset_cfg_args));
+ if (rc <= 0)
+ goto err;
+ }
+ }
+
+ return 0;
+err:
+ CAM_ERR(CAM_ISP, "RESET HW res failed: (type:%d, id:%d)",
+ isp_hw_res->res_type, isp_hw_res->res_id);
+ return rc;
+}
+
static int cam_ife_hw_mgr_init_hw_res(
struct cam_ife_hw_mgr_res *isp_hw_res)
{
@@ -168,7 +203,8 @@
}
static int cam_ife_hw_mgr_start_hw_res(
- struct cam_ife_hw_mgr_res *isp_hw_res)
+ struct cam_ife_hw_mgr_res *isp_hw_res,
+ struct cam_ife_hw_mgr_ctx *ctx)
{
int i;
int rc = -1;
@@ -179,6 +215,8 @@
continue;
hw_intf = isp_hw_res->hw_res[i]->hw_intf;
if (hw_intf->hw_ops.start) {
+ isp_hw_res->hw_res[i]->rdi_only_ctx =
+ ctx->is_rdi_only_context;
rc = hw_intf->hw_ops.start(hw_intf->hw_priv,
isp_hw_res->hw_res[i],
sizeof(struct cam_isp_resource_node));
@@ -833,7 +871,7 @@
struct cam_ife_hw_mgr *ife_hw_mgr;
struct cam_ife_hw_mgr_res *csid_res;
struct cam_ife_hw_mgr_res *cid_res;
- struct cam_hw_intf *hw_intf;
+ struct cam_hw_intf *hw_intf;
struct cam_csid_hw_reserve_resource_args csid_acquire;
ife_hw_mgr = ife_ctx->hw_mgr;
@@ -1424,6 +1462,8 @@
CAM_ERR(CAM_ISP, "Invalid context parameters");
return -EPERM;
}
+ if (atomic_read(&ctx->overflow_pending))
+ return -EINVAL;
CAM_DBG(CAM_ISP, "Enter ctx id:%d num_hw_upd_entries %d",
ctx->ctx_index, cfg->num_hw_update_entries);
@@ -1455,8 +1495,7 @@
return rc;
}
-static int cam_ife_mgr_stop_hw_in_overflow(void *hw_mgr_priv,
- void *stop_hw_args)
+static int cam_ife_mgr_stop_hw_in_overflow(void *stop_hw_args)
{
int rc = 0;
struct cam_hw_stop_args *stop_args = stop_hw_args;
@@ -1464,7 +1503,7 @@
struct cam_ife_hw_mgr_ctx *ctx;
uint32_t i, master_base_idx = 0;
- if (!hw_mgr_priv || !stop_hw_args) {
+ if (!stop_hw_args) {
CAM_ERR(CAM_ISP, "Invalid arguments");
return -EINVAL;
}
@@ -1477,7 +1516,6 @@
CAM_DBG(CAM_ISP, "Enter...ctx id:%d",
ctx->ctx_index);
- /* stop resource will remove the irq mask from the hardware */
if (!ctx->num_base) {
CAM_ERR(CAM_ISP, "Number of bases are zero");
return -EINVAL;
@@ -1491,17 +1529,13 @@
}
}
- /*
- * if Context does not have PIX resources and has only RDI resource
- * then take the first base index.
- */
-
if (i == ctx->num_base)
master_base_idx = ctx->base[0].idx;
+
/* stop the master CIDs first */
cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_cid,
- master_base_idx, CAM_CSID_HALT_IMMEDIATELY);
+ master_base_idx, CAM_CSID_HALT_IMMEDIATELY);
/* stop rest of the CIDs */
for (i = 0; i < ctx->num_base; i++) {
@@ -1513,7 +1547,7 @@
/* stop the master CSID path first */
cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_csid,
- master_base_idx, CAM_CSID_HALT_IMMEDIATELY);
+ master_base_idx, CAM_CSID_HALT_IMMEDIATELY);
/* Stop rest of the CSID paths */
for (i = 0; i < ctx->num_base; i++) {
@@ -1533,8 +1567,9 @@
for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++)
cam_ife_hw_mgr_stop_hw_res(&ctx->res_list_ife_out[i]);
- /* update vote bandwidth should be done at the HW layer */
+ /* Stop tasklet for context */
+ cam_tasklet_stop(ctx->common.tasklet_info);
CAM_DBG(CAM_ISP, "Exit...ctx id:%d rc :%d",
ctx->ctx_index, rc);
@@ -1664,40 +1699,27 @@
return rc;
}
-static int cam_ife_mgr_reset_hw(struct cam_ife_hw_mgr *hw_mgr,
+static int cam_ife_mgr_reset_vfe_hw(struct cam_ife_hw_mgr *hw_mgr,
uint32_t hw_idx)
{
uint32_t i = 0;
- struct cam_hw_intf *csid_hw_intf;
struct cam_hw_intf *vfe_hw_intf;
- struct cam_csid_reset_cfg_args csid_reset_args;
+ uint32_t vfe_reset_type;
if (!hw_mgr) {
CAM_DBG(CAM_ISP, "Invalid arguments");
return -EINVAL;
}
-
- /* Reset IFE CSID HW */
- csid_reset_args.reset_type = CAM_IFE_CSID_RESET_GLOBAL;
-
- for (i = 0; i < CAM_IFE_CSID_HW_NUM_MAX; i++) {
- if (hw_idx != hw_mgr->csid_devices[i]->hw_idx)
- continue;
-
- csid_hw_intf = hw_mgr->csid_devices[i];
- csid_hw_intf->hw_ops.reset(csid_hw_intf->hw_priv,
- &csid_reset_args,
- sizeof(struct cam_csid_reset_cfg_args));
- break;
- }
-
/* Reset VFE HW*/
+ vfe_reset_type = CAM_VFE_HW_RESET_HW;
+
for (i = 0; i < CAM_VFE_HW_NUM_MAX; i++) {
if (hw_idx != hw_mgr->ife_devices[i]->hw_idx)
continue;
CAM_DBG(CAM_ISP, "VFE (id = %d) reset", hw_idx);
vfe_hw_intf = hw_mgr->ife_devices[i];
- vfe_hw_intf->hw_ops.reset(vfe_hw_intf->hw_priv, NULL, 0);
+ vfe_hw_intf->hw_ops.reset(vfe_hw_intf->hw_priv,
+ &vfe_reset_type, sizeof(vfe_reset_type));
break;
}
@@ -1705,8 +1727,7 @@
return 0;
}
-static int cam_ife_mgr_restart_hw(void *hw_mgr_priv,
- void *start_hw_args)
+static int cam_ife_mgr_restart_hw(void *start_hw_args)
{
int rc = -1;
struct cam_hw_start_args *start_args = start_hw_args;
@@ -1714,7 +1735,7 @@
struct cam_ife_hw_mgr_res *hw_mgr_res;
uint32_t i;
- if (!hw_mgr_priv || !start_hw_args) {
+ if (!start_hw_args) {
CAM_ERR(CAM_ISP, "Invalid arguments");
return -EINVAL;
}
@@ -1725,12 +1746,14 @@
return -EPERM;
}
- CAM_DBG(CAM_ISP, "Enter... ctx id:%d", ctx->ctx_index);
-
CAM_DBG(CAM_ISP, "START IFE OUT ... in ctx id:%d", ctx->ctx_index);
+
+ cam_tasklet_start(ctx->common.tasklet_info);
+
/* start the IFE out devices */
for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++) {
- rc = cam_ife_hw_mgr_start_hw_res(&ctx->res_list_ife_out[i]);
+ rc = cam_ife_hw_mgr_start_hw_res(
+ &ctx->res_list_ife_out[i], ctx);
if (rc) {
CAM_ERR(CAM_ISP, "Can not start IFE OUT (%d)", i);
goto err;
@@ -1740,7 +1763,7 @@
CAM_DBG(CAM_ISP, "START IFE SRC ... in ctx id:%d", ctx->ctx_index);
/* Start the IFE mux in devices */
list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
- rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res);
+ rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res, ctx);
if (rc) {
CAM_ERR(CAM_ISP, "Can not start IFE MUX (%d)",
hw_mgr_res->res_id);
@@ -1751,7 +1774,7 @@
CAM_DBG(CAM_ISP, "START CSID HW ... in ctx id:%d", ctx->ctx_index);
/* Start the IFE CSID HW devices */
list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid, list) {
- rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res);
+ rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res, ctx);
if (rc) {
CAM_ERR(CAM_ISP, "Can not start IFE CSID (%d)",
hw_mgr_res->res_id);
@@ -1760,22 +1783,12 @@
}
CAM_DBG(CAM_ISP, "START CID SRC ... in ctx id:%d", ctx->ctx_index);
- /* Start the IFE CID HW devices */
- list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_cid, list) {
- rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res);
- if (rc) {
- CAM_ERR(CAM_ISP, "Can not start IFE CSID (%d)",
- hw_mgr_res->res_id);
- goto err;
- }
- }
-
/* Start IFE root node: do nothing */
CAM_DBG(CAM_ISP, "Exit...(success)");
return 0;
err:
- cam_ife_mgr_stop_hw(hw_mgr_priv, start_hw_args);
+ cam_ife_mgr_stop_hw_in_overflow(start_hw_args);
CAM_DBG(CAM_ISP, "Exit...(rc=%d)", rc);
return rc;
}
@@ -1900,7 +1913,8 @@
ctx->ctx_index);
/* start the IFE out devices */
for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++) {
- rc = cam_ife_hw_mgr_start_hw_res(&ctx->res_list_ife_out[i]);
+ rc = cam_ife_hw_mgr_start_hw_res(
+ &ctx->res_list_ife_out[i], ctx);
if (rc) {
CAM_ERR(CAM_ISP, "Can not start IFE OUT (%d)",
i);
@@ -1912,7 +1926,7 @@
ctx->ctx_index);
/* Start the IFE mux in devices */
list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
- rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res);
+ rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res, ctx);
if (rc) {
CAM_ERR(CAM_ISP, "Can not start IFE MUX (%d)",
hw_mgr_res->res_id);
@@ -1924,7 +1938,7 @@
ctx->ctx_index);
/* Start the IFE CSID HW devices */
list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid, list) {
- rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res);
+ rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res, ctx);
if (rc) {
CAM_ERR(CAM_ISP, "Can not start IFE CSID (%d)",
hw_mgr_res->res_id);
@@ -1936,10 +1950,10 @@
ctx->ctx_index);
/* Start the IFE CID HW devices */
list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_cid, list) {
- rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res);
+ rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res, ctx);
if (rc) {
CAM_ERR(CAM_ISP, "Can not start IFE CSID (%d)",
- hw_mgr_res->res_id);
+ hw_mgr_res->res_id);
goto err;
}
}
@@ -2102,6 +2116,168 @@
return rc;
}
+static int cam_isp_blob_clock_update(
+ uint32_t blob_type,
+ struct cam_isp_generic_blob_info *blob_info,
+ struct cam_isp_clock_config *clock_config,
+ struct cam_hw_prepare_update_args *prepare)
+{
+ struct cam_ife_hw_mgr_ctx *ctx = NULL;
+ struct cam_ife_hw_mgr_res *hw_mgr_res;
+ struct cam_hw_intf *hw_intf;
+ struct cam_vfe_clock_update_args clock_upd_args;
+ uint64_t clk_rate = 0;
+ int rc = -EINVAL;
+ uint32_t i;
+ uint32_t j;
+
+ ctx = prepare->ctxt_to_hw_map;
+
+ CAM_DBG(CAM_ISP,
+ "usage=%u left_clk= %lu right_clk=%lu",
+ clock_config->usage_type,
+ clock_config->left_pix_hz,
+ clock_config->right_pix_hz);
+
+ list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
+ for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+ clk_rate = 0;
+ if (!hw_mgr_res->hw_res[i])
+ continue;
+
+ if (hw_mgr_res->res_id == CAM_ISP_HW_VFE_IN_CAMIF)
+ if (i == CAM_ISP_HW_SPLIT_LEFT)
+ clk_rate =
+ clock_config->left_pix_hz;
+ else
+ clk_rate =
+ clock_config->right_pix_hz;
+ else if ((hw_mgr_res->res_id >= CAM_ISP_HW_VFE_IN_RDI0)
+ && (hw_mgr_res->res_id <=
+ CAM_ISP_HW_VFE_IN_RDI3))
+ for (j = 0; j < clock_config->num_rdi; j++)
+ clk_rate = max(clock_config->rdi_hz[j],
+ clk_rate);
+ else
+ if (hw_mgr_res->hw_res[i]) {
+ CAM_ERR(CAM_ISP, "Invalid res_id %u",
+ hw_mgr_res->res_id);
+ rc = -EINVAL;
+ return rc;
+ }
+
+ hw_intf = hw_mgr_res->hw_res[i]->hw_intf;
+ if (hw_intf && hw_intf->hw_ops.process_cmd) {
+ clock_upd_args.node_res =
+ hw_mgr_res->hw_res[i];
+ CAM_DBG(CAM_ISP,
+ "res_id=%u i= %d clk=%llu\n",
+ hw_mgr_res->res_id, i, clk_rate);
+
+ clock_upd_args.clk_rate = clk_rate;
+
+ rc = hw_intf->hw_ops.process_cmd(
+ hw_intf->hw_priv,
+ CAM_ISP_HW_CMD_CLOCK_UPDATE,
+ &clock_upd_args,
+ sizeof(
+ struct cam_vfe_clock_update_args));
+ if (rc)
+ CAM_ERR(CAM_ISP, "Clock Update failed");
+ } else
+ CAM_WARN(CAM_ISP, "NULL hw_intf!");
+ }
+ }
+
+ return rc;
+}
+
+static int cam_isp_blob_bw_update(
+ uint32_t blob_type,
+ struct cam_isp_generic_blob_info *blob_info,
+ struct cam_isp_bw_config *bw_config,
+ struct cam_hw_prepare_update_args *prepare)
+{
+ struct cam_ife_hw_mgr_ctx *ctx = NULL;
+ struct cam_ife_hw_mgr_res *hw_mgr_res;
+ struct cam_hw_intf *hw_intf;
+ struct cam_vfe_bw_update_args bw_upd_args;
+ uint64_t cam_bw_bps = 0;
+ uint64_t ext_bw_bps = 0;
+ int rc = -EINVAL;
+ uint32_t i;
+
+ ctx = prepare->ctxt_to_hw_map;
+
+ CAM_DBG(CAM_ISP,
+ "usage=%u left cam_bw_bps=%llu ext_bw_bps=%llu\n"
+ "right cam_bw_bps=%llu ext_bw_bps=%llu",
+ bw_config->usage_type,
+ bw_config->left_pix_vote.cam_bw_bps,
+ bw_config->left_pix_vote.ext_bw_bps,
+ bw_config->right_pix_vote.cam_bw_bps,
+ bw_config->right_pix_vote.ext_bw_bps);
+
+ list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
+ for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+ if (!hw_mgr_res->hw_res[i])
+ continue;
+
+ if (hw_mgr_res->res_id == CAM_ISP_HW_VFE_IN_CAMIF)
+ if (i == CAM_ISP_HW_SPLIT_LEFT) {
+ cam_bw_bps =
+ bw_config->left_pix_vote.cam_bw_bps;
+ ext_bw_bps =
+ bw_config->left_pix_vote.ext_bw_bps;
+ } else {
+ cam_bw_bps =
+ bw_config->right_pix_vote.cam_bw_bps;
+ ext_bw_bps =
+ bw_config->right_pix_vote.ext_bw_bps;
+ }
+ else if ((hw_mgr_res->res_id >= CAM_ISP_HW_VFE_IN_RDI0)
+ && (hw_mgr_res->res_id <=
+ CAM_ISP_HW_VFE_IN_RDI3)) {
+ uint32_t idx = hw_mgr_res->res_id -
+ CAM_ISP_HW_VFE_IN_RDI0;
+ if (idx >= bw_config->num_rdi)
+ continue;
+
+ cam_bw_bps =
+ bw_config->rdi_vote[idx].cam_bw_bps;
+ ext_bw_bps =
+ bw_config->rdi_vote[idx].ext_bw_bps;
+ } else
+ if (hw_mgr_res->hw_res[i]) {
+ CAM_ERR(CAM_ISP, "Invalid res_id %u",
+ hw_mgr_res->res_id);
+ rc = -EINVAL;
+ return rc;
+ }
+
+ hw_intf = hw_mgr_res->hw_res[i]->hw_intf;
+ if (hw_intf && hw_intf->hw_ops.process_cmd) {
+ bw_upd_args.node_res =
+ hw_mgr_res->hw_res[i];
+
+ bw_upd_args.camnoc_bw_bytes = cam_bw_bps;
+ bw_upd_args.external_bw_bytes = ext_bw_bps;
+
+ rc = hw_intf->hw_ops.process_cmd(
+ hw_intf->hw_priv,
+ CAM_ISP_HW_CMD_BW_UPDATE,
+ &bw_upd_args,
+ sizeof(struct cam_vfe_bw_update_args));
+ if (rc)
+ CAM_ERR(CAM_ISP, "BW Update failed");
+ } else
+ CAM_WARN(CAM_ISP, "NULL hw_intf!");
+ }
+ }
+
+ return rc;
+}
+
static int cam_isp_packet_generic_blob_handler(void *user_data,
uint32_t blob_type, uint32_t blob_size, uint8_t *blob_data)
{
@@ -2139,6 +2315,26 @@
CAM_ERR(CAM_ISP, "HFR Update Failed");
}
break;
+ case CAM_ISP_GENERIC_BLOB_TYPE_CLOCK_CONFIG: {
+ struct cam_isp_clock_config *clock_config =
+ (struct cam_isp_clock_config *)blob_data;
+
+ rc = cam_isp_blob_clock_update(blob_type, blob_info,
+ clock_config, prepare);
+ if (rc)
+ CAM_ERR(CAM_ISP, "Clock Update Failed");
+ }
+ break;
+ case CAM_ISP_GENERIC_BLOB_TYPE_BW_CONFIG: {
+ struct cam_isp_bw_config *bw_config =
+ (struct cam_isp_bw_config *)blob_data;
+
+ rc = cam_isp_blob_bw_update(blob_type, blob_info,
+ bw_config, prepare);
+ if (rc)
+ CAM_ERR(CAM_ISP, "Bandwidth Update Failed");
+ }
+ break;
default:
CAM_WARN(CAM_ISP, "Invalid blob type %d", blob_type);
break;
@@ -2363,11 +2559,12 @@
static int cam_ife_mgr_process_recovery_cb(void *priv, void *data)
{
int32_t rc = 0;
- struct cam_hw_event_recovery_data *recovery_data = priv;
- struct cam_hw_start_args start_args;
- struct cam_ife_hw_mgr *ife_hw_mgr = NULL;
- uint32_t hw_mgr_priv;
- uint32_t i = 0;
+ struct cam_hw_event_recovery_data *recovery_data = data;
+ struct cam_hw_start_args start_args;
+ struct cam_hw_stop_args stop_args;
+ struct cam_ife_hw_mgr *ife_hw_mgr = priv;
+ struct cam_ife_hw_mgr_res *hw_mgr_res;
+ uint32_t i = 0;
uint32_t error_type = recovery_data->error_type;
struct cam_ife_hw_mgr_ctx *ctx = NULL;
@@ -2384,20 +2581,57 @@
kfree(recovery_data);
return 0;
}
+ /* stop resources here */
+ CAM_DBG(CAM_ISP, "STOP: Number of affected context: %d",
+ recovery_data->no_of_context);
+ for (i = 0; i < recovery_data->no_of_context; i++) {
+ stop_args.ctxt_to_hw_map =
+ recovery_data->affected_ctx[i];
+ rc = cam_ife_mgr_stop_hw_in_overflow(&stop_args);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "CTX stop failed(%d)", rc);
+ return rc;
+ }
+ }
- ctx = recovery_data->affected_ctx[0];
- ife_hw_mgr = ctx->hw_mgr;
+ CAM_DBG(CAM_ISP, "RESET: CSID PATH");
+ for (i = 0; i < recovery_data->no_of_context; i++) {
+ ctx = recovery_data->affected_ctx[i];
+ list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid,
+ list) {
+ rc = cam_ife_hw_mgr_reset_csid_res(hw_mgr_res);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Failed RESET (%d)",
+ hw_mgr_res->res_id);
+ return rc;
+ }
+ }
+ }
+
+ CAM_DBG(CAM_ISP, "RESET: Calling VFE reset");
for (i = 0; i < CAM_VFE_HW_NUM_MAX; i++) {
if (recovery_data->affected_core[i])
- rc = cam_ife_mgr_reset_hw(ife_hw_mgr, i);
+ cam_ife_mgr_reset_vfe_hw(ife_hw_mgr, i);
}
+ CAM_DBG(CAM_ISP, "START: Number of affected context: %d",
+ recovery_data->no_of_context);
+
for (i = 0; i < recovery_data->no_of_context; i++) {
- start_args.ctxt_to_hw_map =
- recovery_data->affected_ctx[i];
- rc = cam_ife_mgr_restart_hw(&hw_mgr_priv, &start_args);
+ ctx = recovery_data->affected_ctx[i];
+ start_args.ctxt_to_hw_map = ctx;
+
+ atomic_set(&ctx->overflow_pending, 0);
+
+ rc = cam_ife_mgr_restart_hw(&start_args);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "CTX start failed(%d)", rc);
+ return rc;
+ }
+ CAM_DBG(CAM_ISP, "Started resources rc (%d)", rc);
}
+ CAM_DBG(CAM_ISP, "Recovery Done rc (%d)", rc);
break;
@@ -2423,8 +2657,6 @@
struct crm_workq_task *task = NULL;
struct cam_hw_event_recovery_data *recovery_data = NULL;
- return 0;
-
recovery_data = kzalloc(sizeof(struct cam_hw_event_recovery_data),
GFP_ATOMIC);
if (!recovery_data)
@@ -2443,7 +2675,9 @@
}
task->process_cb = &cam_ife_mgr_process_recovery_cb;
- rc = cam_req_mgr_workq_enqueue_task(task, recovery_data,
+ task->payload = recovery_data;
+ rc = cam_req_mgr_workq_enqueue_task(task,
+ recovery_data->affected_ctx[0]->hw_mgr,
CRM_TASK_PRIORITY_0);
return rc;
@@ -2456,9 +2690,9 @@
* affected_core[]
* b. Return 0 i.e.SUCCESS
*/
-static int cam_ife_hw_mgr_match_hw_idx(
+static int cam_ife_hw_mgr_is_ctx_affected(
struct cam_ife_hw_mgr_ctx *ife_hwr_mgr_ctx,
- uint32_t *affected_core)
+ uint32_t *affected_core, uint32_t size)
{
int32_t rc = -EPERM;
@@ -2468,22 +2702,25 @@
CAM_DBG(CAM_ISP, "Enter:max_idx = %d", max_idx);
- while (i < max_idx) {
+ if ((max_idx >= CAM_IFE_HW_NUM_MAX) ||
+ (size > CAM_IFE_HW_NUM_MAX)) {
+ CAM_ERR(CAM_ISP, "invalid parameter = %d", max_idx);
+ return rc;
+ }
+
+ for (i = 0; i < max_idx; i++) {
if (affected_core[ife_hwr_mgr_ctx->base[i].idx])
rc = 0;
else {
ctx_affected_core_idx[j] = ife_hwr_mgr_ctx->base[i].idx;
j = j + 1;
}
-
- i = i + 1;
}
if (rc == 0) {
while (j) {
if (affected_core[ctx_affected_core_idx[j-1]] != 1)
affected_core[ctx_affected_core_idx[j-1]] = 1;
-
j = j - 1;
}
}
@@ -2499,7 +2736,7 @@
* d. For any dual VFE context, if copanion VFE is also serving
* other context it should also notify the CRM with fatal error
*/
-static int cam_ife_hw_mgr_handle_overflow(
+static int cam_ife_hw_mgr_process_overflow(
struct cam_ife_hw_mgr_ctx *curr_ife_hwr_mgr_ctx,
struct cam_isp_hw_error_event_data *error_event_data,
uint32_t curr_core_idx,
@@ -2509,12 +2746,10 @@
struct cam_ife_hw_mgr_ctx *ife_hwr_mgr_ctx = NULL;
cam_hw_event_cb_func ife_hwr_irq_err_cb;
struct cam_ife_hw_mgr *ife_hwr_mgr = NULL;
- uint32_t hw_mgr_priv = 1;
struct cam_hw_stop_args stop_args;
uint32_t i = 0;
CAM_DBG(CAM_ISP, "Enter");
- return 0;
if (!recovery_data) {
CAM_ERR(CAM_ISP, "recovery_data parameter is NULL",
@@ -2535,9 +2770,12 @@
* with this context
*/
CAM_DBG(CAM_ISP, "Calling match Hw idx");
- if (cam_ife_hw_mgr_match_hw_idx(ife_hwr_mgr_ctx, affected_core))
+ if (cam_ife_hw_mgr_is_ctx_affected(ife_hwr_mgr_ctx,
+ affected_core, CAM_IFE_HW_NUM_MAX))
continue;
+ atomic_set(&ife_hwr_mgr_ctx->overflow_pending, 1);
+
ife_hwr_irq_err_cb =
ife_hwr_mgr_ctx->common.event_cb[CAM_ISP_HW_EVENT_ERROR];
@@ -2551,16 +2789,13 @@
ife_hwr_mgr_ctx;
/*
- * Stop the hw resources associated with this context
- * and call the error callback. In the call back function
- * corresponding ISP context will update CRM about fatal Error
+ * In the call back function corresponding ISP context
+ * will update CRM about fatal Error
*/
- if (!cam_ife_mgr_stop_hw_in_overflow(&hw_mgr_priv,
- &stop_args)) {
- CAM_DBG(CAM_ISP, "Calling Error handler CB");
- ife_hwr_irq_err_cb(ife_hwr_mgr_ctx->common.cb_priv,
- CAM_ISP_HW_EVENT_ERROR, error_event_data);
- }
+
+ ife_hwr_irq_err_cb(ife_hwr_mgr_ctx->common.cb_priv,
+ CAM_ISP_HW_EVENT_ERROR, error_event_data);
+
}
/* fill the affected_core in recovery data */
for (i = 0; i < CAM_IFE_HW_NUM_MAX; i++) {
@@ -2572,11 +2807,85 @@
return 0;
}
+static int cam_ife_hw_mgr_get_err_type(
+ void *handler_priv,
+ void *payload)
+{
+ struct cam_isp_resource_node *hw_res_l = NULL;
+ struct cam_isp_resource_node *hw_res_r = NULL;
+ struct cam_ife_hw_mgr_ctx *ife_hwr_mgr_ctx;
+ struct cam_vfe_top_irq_evt_payload *evt_payload;
+ struct cam_ife_hw_mgr_res *isp_ife_camif_res = NULL;
+ uint32_t status = 0;
+ uint32_t core_idx;
+
+ ife_hwr_mgr_ctx = handler_priv;
+ evt_payload = payload;
+
+ if (!evt_payload) {
+ CAM_ERR(CAM_ISP, "No payload");
+ return IRQ_HANDLED;
+ }
+
+ core_idx = evt_payload->core_index;
+ evt_payload->evt_id = CAM_ISP_HW_EVENT_ERROR;
+
+ list_for_each_entry(isp_ife_camif_res,
+ &ife_hwr_mgr_ctx->res_list_ife_src, list) {
+
+ if ((isp_ife_camif_res->res_type ==
+ CAM_IFE_HW_MGR_RES_UNINIT) ||
+ (isp_ife_camif_res->res_id != CAM_ISP_HW_VFE_IN_CAMIF))
+ continue;
+
+ hw_res_l = isp_ife_camif_res->hw_res[CAM_ISP_HW_SPLIT_LEFT];
+ hw_res_r = isp_ife_camif_res->hw_res[CAM_ISP_HW_SPLIT_RIGHT];
+
+ CAM_DBG(CAM_ISP, "is_dual_vfe ? = %d\n",
+ isp_ife_camif_res->is_dual_vfe);
+
+ /* ERROR check for Left VFE */
+ if (!hw_res_l) {
+ CAM_DBG(CAM_ISP, "VFE(L) Device is NULL");
+ break;
+ }
+
+ CAM_DBG(CAM_ISP, "core id= %d, HW id %d", core_idx,
+ hw_res_l->hw_intf->hw_idx);
+
+ if (core_idx == hw_res_l->hw_intf->hw_idx) {
+ status = hw_res_l->bottom_half_handler(
+ hw_res_l, evt_payload);
+ }
+
+ if (status)
+ break;
+
+ /* ERROR check for Right VFE */
+ if (!hw_res_r) {
+ CAM_DBG(CAM_ISP, "VFE(R) Device is NULL");
+ continue;
+ }
+ CAM_DBG(CAM_ISP, "core id= %d, HW id %d", core_idx,
+ hw_res_r->hw_intf->hw_idx);
+
+ if (core_idx == hw_res_r->hw_intf->hw_idx) {
+ status = hw_res_r->bottom_half_handler(
+ hw_res_r, evt_payload);
+ }
+
+ if (status)
+ break;
+ }
+ CAM_DBG(CAM_ISP, "Exit (status = %d)!", status);
+ return status;
+}
+
static int cam_ife_hw_mgr_handle_camif_error(
void *handler_priv,
void *payload)
{
- int32_t rc = 0;
+ int32_t error_status = CAM_ISP_HW_ERROR_NONE;
uint32_t core_idx;
struct cam_ife_hw_mgr_ctx *ife_hwr_mgr_ctx;
struct cam_vfe_top_irq_evt_payload *evt_payload;
@@ -2587,17 +2896,22 @@
evt_payload = payload;
core_idx = evt_payload->core_index;
- rc = evt_payload->error_type;
- CAM_DBG(CAM_ISP, "Enter: error_type (%d)", evt_payload->error_type);
- switch (evt_payload->error_type) {
+ error_status = cam_ife_hw_mgr_get_err_type(ife_hwr_mgr_ctx,
+ evt_payload);
+
+ if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
+ return error_status;
+
+ switch (error_status) {
case CAM_ISP_HW_ERROR_OVERFLOW:
case CAM_ISP_HW_ERROR_P2I_ERROR:
case CAM_ISP_HW_ERROR_VIOLATION:
+ CAM_DBG(CAM_ISP, "Enter: error_type (%d)", error_status);
error_event_data.error_type =
CAM_ISP_HW_ERROR_OVERFLOW;
- cam_ife_hw_mgr_handle_overflow(ife_hwr_mgr_ctx,
+ cam_ife_hw_mgr_process_overflow(ife_hwr_mgr_ctx,
&error_event_data,
core_idx,
&recovery_data);
@@ -2607,12 +2921,10 @@
cam_ife_hw_mgr_do_error_recovery(&recovery_data);
break;
default:
- CAM_DBG(CAM_ISP, "None error. Error type (%d)",
- evt_payload->error_type);
+ CAM_DBG(CAM_ISP, "None error (%d)", error_status);
}
- CAM_DBG(CAM_ISP, "Exit (%d)", rc);
- return rc;
+ return error_status;
}
/*
@@ -2677,6 +2989,8 @@
rup_status = hw_res->bottom_half_handler(
hw_res, evt_payload);
}
+ if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
+ break;
if (!rup_status) {
ife_hwr_irq_rup_cb(
@@ -2708,6 +3022,8 @@
rup_status = hw_res->bottom_half_handler(
hw_res, evt_payload);
+ if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
+ break;
if (!rup_status) {
/* Send the Reg update hw event */
ife_hwr_irq_rup_cb(
@@ -2829,6 +3145,9 @@
if (core_idx == hw_res_l->hw_intf->hw_idx) {
epoch_status = hw_res_l->bottom_half_handler(
hw_res_l, evt_payload);
+ if (atomic_read(
+ &ife_hwr_mgr_ctx->overflow_pending))
+ break;
if (!epoch_status)
ife_hwr_irq_epoch_cb(
ife_hwr_mgr_ctx->common.cb_priv,
@@ -2876,6 +3195,8 @@
core_index1,
evt_payload->evt_id);
+ if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
+ break;
if (!rc)
ife_hwr_irq_epoch_cb(
ife_hwr_mgr_ctx->common.cb_priv,
@@ -2936,6 +3257,8 @@
if (core_idx == hw_res_l->hw_intf->hw_idx) {
sof_status = hw_res_l->bottom_half_handler(hw_res_l,
evt_payload);
+ if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
+ break;
if (!sof_status) {
cam_ife_mgr_cmd_get_sof_timestamp(
ife_hwr_mgr_ctx,
@@ -2991,6 +3314,9 @@
core_index0 = hw_res_l->hw_intf->hw_idx;
core_index1 = hw_res_r->hw_intf->hw_idx;
+ if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
+ break;
+
rc = cam_ife_hw_mgr_check_irq_for_dual_vfe(ife_hwr_mgr_ctx,
core_index0, core_index1, evt_payload->evt_id);
@@ -3149,6 +3475,9 @@
if (core_idx == hw_res_l->hw_intf->hw_idx) {
eof_status = hw_res_l->bottom_half_handler(
hw_res_l, evt_payload);
+ if (atomic_read(
+ &ife_hwr_mgr_ctx->overflow_pending))
+ break;
if (!eof_status)
ife_hwr_irq_eof_cb(
ife_hwr_mgr_ctx->common.cb_priv,
@@ -3193,6 +3522,9 @@
core_index1,
evt_payload->evt_id);
+ if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
+ break;
+
if (!rc)
ife_hwr_irq_eof_cb(
ife_hwr_mgr_ctx->common.cb_priv,
@@ -3237,6 +3569,8 @@
ife_hwr_irq_wm_done_cb =
ife_hwr_mgr_ctx->common.event_cb[CAM_ISP_HW_EVENT_DONE];
+ evt_payload->evt_id = CAM_ISP_HW_EVENT_DONE;
+
for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++) {
isp_ife_out_res = &ife_hwr_mgr_ctx->res_list_ife_out[i];
@@ -3293,6 +3627,8 @@
buf_done_event_data.resource_handle[0] =
isp_ife_out_res->res_id;
+ if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
+ break;
/* Report for Successful buf_done event if any */
if (buf_done_event_data.num_handles > 0 &&
ife_hwr_irq_wm_done_cb) {
@@ -3330,7 +3666,7 @@
* the affected context and any successful buf_done event is not
* reported.
*/
- rc = cam_ife_hw_mgr_handle_overflow(ife_hwr_mgr_ctx,
+ rc = cam_ife_hw_mgr_process_overflow(ife_hwr_mgr_ctx,
&error_event_data, evt_payload->core_index,
&recovery_data);
@@ -3369,8 +3705,6 @@
evt_payload->irq_reg_val[5]);
CAM_DBG(CAM_ISP, "bus_irq_dual_comp_owrt: = %x",
evt_payload->irq_reg_val[6]);
-
- CAM_DBG(CAM_ISP, "Calling Buf_done");
/* WM Done */
return cam_ife_hw_mgr_handle_buf_done_for_hw_res(ife_hwr_mgr_ctx,
evt_payload_priv);
@@ -3401,8 +3735,15 @@
* for this context it needs to be handled remaining
* interrupts are ignored.
*/
- rc = cam_ife_hw_mgr_handle_camif_error(ife_hwr_mgr_ctx,
- evt_payload_priv);
+ if (g_ife_hw_mgr.debug_cfg.enable_recovery) {
+ CAM_DBG(CAM_ISP, "IFE Mgr recovery is enabled");
+ rc = cam_ife_hw_mgr_handle_camif_error(ife_hwr_mgr_ctx,
+ evt_payload_priv);
+ } else {
+ CAM_DBG(CAM_ISP, "recovery is not enabled");
+ rc = 0;
+ }
+
if (rc) {
CAM_ERR(CAM_ISP, "Encountered Error (%d), ignoring other irqs",
rc);
@@ -3501,6 +3842,15 @@
goto err;
}
+ if (!debugfs_create_u32("enable_recovery",
+ 0644,
+ g_ife_hw_mgr.debug_cfg.dentry,
+ &g_ife_hw_mgr.debug_cfg.enable_recovery)) {
+ CAM_ERR(CAM_ISP, "failed to create enable_recovery");
+ goto err;
+ }
+ g_ife_hw_mgr.debug_cfg.enable_recovery = 0;
+
return 0;
err:
@@ -3700,4 +4050,3 @@
g_ife_hw_mgr.mgr_common.img_iommu_hdl = -1;
return rc;
}
-
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h
index 1c35e5d..4d26138 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h
@@ -85,11 +85,13 @@
*
* @dentry: Debugfs entry
* @csid_debug: csid debug information
+ * @enable_recovery enable recovery
*
*/
struct cam_ife_hw_mgr_debug {
- struct dentry *dentry;
- uint64_t csid_debug;
+ struct dentry *dentry;
+ uint64_t csid_debug;
+ uint32_t enable_recovery;
};
/**
@@ -171,6 +173,7 @@
* @ife_csid_dev_caps csid device capability stored per core
* @ife_dev_caps ife device capability per core
* @work q work queue for IFE hw manager
+ * @debug_cfg debug configuration
*/
struct cam_ife_hw_mgr {
struct cam_isp_hw_mgr mgr_common;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c
index 876a540..3606af9 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c
@@ -97,6 +97,8 @@
struct cam_ife_hw_mgr_res *hw_mgr_res;
struct cam_isp_resource_node *res;
struct cam_isp_hw_dual_isp_update_args dual_isp_update_args;
+ uint32_t outport_id;
+ uint32_t ports_plane_idx;
size_t len = 0;
uint32_t *cpu_addr;
uint32_t i, j;
@@ -113,6 +115,14 @@
dual_config = (struct cam_isp_dual_config *)cpu_addr;
for (i = 0; i < dual_config->num_ports; i++) {
+
+ if (i >= CAM_ISP_IFE_OUT_RES_MAX) {
+ CAM_ERR(CAM_UTIL,
+ "failed update for i:%d > size_isp_out:%d",
+ i, size_isp_out);
+ return -EINVAL;
+ }
+
hw_mgr_res = &res_list_isp_out[i];
for (j = 0; j < CAM_ISP_HW_SPLIT_MAX; j++) {
if (!hw_mgr_res->hw_res[j])
@@ -122,6 +132,20 @@
continue;
res = hw_mgr_res->hw_res[j];
+
+ if (res->res_id < CAM_ISP_IFE_OUT_RES_BASE ||
+ res->res_id >= CAM_ISP_IFE_OUT_RES_MAX)
+ continue;
+
+ outport_id = res->res_id & 0xFF;
+
+ ports_plane_idx = (j * (dual_config->num_ports *
+ CAM_PACKET_MAX_PLANES)) +
+ (outport_id * CAM_PACKET_MAX_PLANES);
+
+ if (dual_config->stripes[ports_plane_idx].port_id == 0)
+ continue;
+
dual_isp_update_args.split_id = j;
dual_isp_update_args.res = res;
dual_isp_update_args.dual_cfg = dual_config;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_tasklet_util.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_tasklet_util.c
index 4a7eff8..8863275 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_tasklet_util.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_tasklet_util.c
@@ -261,6 +261,15 @@
*tasklet_info = NULL;
}
+static void cam_tasklet_flush(void *tasklet_info)
+{
+ unsigned long data;
+ struct cam_tasklet_info *tasklet = tasklet_info;
+
+ data = (unsigned long)tasklet;
+ cam_tasklet_action(data);
+}
+
int cam_tasklet_start(void *tasklet_info)
{
struct cam_tasklet_info *tasklet = tasklet_info;
@@ -290,6 +299,7 @@
{
struct cam_tasklet_info *tasklet = tasklet_info;
+ cam_tasklet_flush(tasklet);
atomic_set(&tasklet->tasklet_active, 0);
tasklet_disable(&tasklet->tasklet);
}
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h
index deef41f..07217f5 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h
@@ -392,6 +392,7 @@
* for RDI, set mode to none
* @master_idx: For Slave reservation, Give master IFE instance Index.
* Slave will synchronize with master Start and stop operations
+ * @clk_rate Clock rate
*
*/
struct cam_ife_csid_path_cfg {
@@ -409,6 +410,7 @@
uint32_t height;
enum cam_isp_hw_sync_mode sync_mode;
uint32_t master_idx;
+ uint64_t clk_rate;
};
/**
@@ -432,6 +434,7 @@
* @csid_rdin_reset_complete: rdi n completion
* @csid_debug: csid debug information to enable the SOT, EOT,
* SOF, EOF, measure etc in the csid hw
+ * @clk_rate Clock rate
*
*/
struct cam_ife_csid_hw {
@@ -452,6 +455,7 @@
struct completion csid_ipp_complete;
struct completion csid_rdin_complete[CAM_IFE_CSID_RDI_MAX];
uint64_t csid_debug;
+ uint64_t clk_rate;
};
int cam_ife_csid_hw_probe_init(struct cam_hw_intf *csid_hw_intf,
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h
index c81e6db..257a5ac 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h
@@ -90,6 +90,8 @@
CAM_ISP_HW_CMD_GET_HFR_UPDATE,
CAM_ISP_HW_CMD_GET_SECURE_MODE,
CAM_ISP_HW_CMD_STRIPE_UPDATE,
+ CAM_ISP_HW_CMD_CLOCK_UPDATE,
+ CAM_ISP_HW_CMD_BW_UPDATE,
CAM_ISP_HW_CMD_MAX,
};
@@ -110,6 +112,7 @@
* @tasklet_info: Tasklet structure that will be used to
* schedule IRQ events related to this resource
* @irq_handle: handle returned on subscribing for IRQ event
+ * @rdi_only_ctx: resouce belong to rdi only context or not
* @init: function pointer to init the HW resource
* @deinit: function pointer to deinit the HW resource
* @start: function pointer to start the HW resource
@@ -129,6 +132,7 @@
void *cdm_ops;
void *tasklet_info;
int irq_handle;
+ int rdi_only_ctx;
int (*init)(struct cam_isp_resource_node *rsrc_node,
void *init_args, uint32_t arg_size);
@@ -192,6 +196,8 @@
void *data;
struct cam_isp_hw_get_wm_update *wm_update;
struct cam_isp_port_hfr_config *hfr_update;
+ struct cam_isp_clock_config *clock_update;
+ struct cam_isp_bw_config *bw_update;
};
};
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h
index e8a5de5..b771ec6 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h
@@ -70,6 +70,12 @@
CAM_IFE_BUS_IRQ_REGISTERS_MAX,
};
+enum cam_vfe_reset_type {
+ CAM_VFE_HW_RESET_HW_AND_REG,
+ CAM_VFE_HW_RESET_HW,
+ CAM_VFE_HW_RESET_MAX,
+};
+
/*
* struct cam_vfe_hw_get_hw_cap:
*
@@ -155,6 +161,31 @@
};
/*
+ * struct cam_vfe_clock_update_args:
+ *
+ * @node_res: Resource to get the time stamp
+ * @clk_rate: Clock rate requested
+ */
+struct cam_vfe_clock_update_args {
+ struct cam_isp_resource_node *node_res;
+ uint64_t clk_rate;
+};
+
+/*
+ * struct cam_vfe_bw_update_args:
+ *
+ * @node_res: Resource to get the time stamp
+ * @camnoc_bw_bytes: Bandwidth vote request for CAMNOC
+ * @external_bw_bytes: Bandwidth vote request from CAMNOC
+ * out to the rest of the path-to-DDR
+ */
+struct cam_vfe_bw_update_args {
+ struct cam_isp_resource_node *node_res;
+ uint64_t camnoc_bw_bytes;
+ uint64_t external_bw_bytes;
+};
+
+/*
* struct cam_vfe_top_irq_evt_payload:
*
* @Brief: This structure is used to save payload for IRQ
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c
index 7a26370..187aeaf 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c
@@ -25,7 +25,6 @@
#include "cam_debug_util.h"
static const char drv_name[] = "vfe";
-
static uint32_t irq_reg_offset[CAM_IFE_IRQ_REGISTERS_MAX] = {
0x0000006C,
0x00000070,
@@ -34,6 +33,11 @@
static uint32_t camif_irq_reg_mask[CAM_IFE_IRQ_REGISTERS_MAX] = {
0x0003FD1F,
+ 0x00000000,
+};
+
+static uint32_t camif_irq_err_reg_mask[CAM_IFE_IRQ_REGISTERS_MAX] = {
+ 0x00000000,
0x0FFF7EBC,
};
@@ -83,6 +87,7 @@
}
spin_lock_irqsave(&vfe_core_info->spin_lock, flags);
+ (*evt_payload)->error_type = 0;
list_add_tail(&(*evt_payload)->list, &vfe_core_info->free_payload_list);
spin_unlock_irqrestore(&vfe_core_info->spin_lock, flags);
@@ -143,6 +148,60 @@
return rc;
}
+static int cam_vfe_irq_err_top_half(uint32_t evt_id,
+ struct cam_irq_th_payload *th_payload)
+{
+ int32_t rc;
+ int i;
+ struct cam_vfe_irq_handler_priv *handler_priv;
+ struct cam_vfe_top_irq_evt_payload *evt_payload;
+ struct cam_vfe_hw_core_info *core_info;
+
+ CAM_DBG(CAM_ISP, "IRQ status_0 = %x, IRQ status_1 = %x",
+ th_payload->evt_status_arr[0], th_payload->evt_status_arr[1]);
+
+ handler_priv = th_payload->handler_priv;
+ core_info = handler_priv->core_info;
+ /*
+ * need to handle overflow condition here, otherwise irq storm
+ * will block everything
+ */
+
+ if (th_payload->evt_status_arr[1]) {
+ CAM_ERR(CAM_ISP, "IRQ status_1: %x, Masking all interrupts",
+ th_payload->evt_status_arr[1]);
+ cam_irq_controller_disable_irq(core_info->vfe_irq_controller,
+ core_info->irq_err_handle);
+ }
+
+ rc = cam_vfe_get_evt_payload(handler_priv->core_info, &evt_payload);
+ if (rc) {
+ CAM_ERR_RATE_LIMIT(CAM_ISP,
+ "No tasklet_cmd is free in queue\n");
+ return rc;
+ }
+
+ cam_isp_hw_get_timestamp(&evt_payload->ts);
+
+ evt_payload->core_index = handler_priv->core_index;
+ evt_payload->core_info = handler_priv->core_info;
+ evt_payload->evt_id = evt_id;
+
+ for (i = 0; i < th_payload->num_registers; i++)
+ evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i];
+
+ for (; i < CAM_IFE_IRQ_REGISTERS_MAX; i++) {
+ evt_payload->irq_reg_val[i] = cam_io_r(handler_priv->mem_base +
+ irq_reg_offset[i]);
+ }
+
+ CAM_DBG(CAM_ISP, "Violation status = %x", evt_payload->irq_reg_val[2]);
+
+ th_payload->evt_payload_priv = evt_payload;
+
+ return rc;
+}
+
int cam_vfe_init_hw(void *hw_priv, void *init_hw_args, uint32_t arg_size)
{
struct cam_hw_info *vfe_hw = hw_priv;
@@ -150,6 +209,8 @@
struct cam_vfe_hw_core_info *core_info = NULL;
struct cam_isp_resource_node *isp_res = NULL;
int rc = 0;
+ uint32_t reset_core_args =
+ CAM_VFE_HW_RESET_HW_AND_REG;
CAM_DBG(CAM_ISP, "Enter");
if (!hw_priv) {
@@ -190,7 +251,7 @@
CAM_DBG(CAM_ISP, "Enable soc done");
/* Do HW Reset */
- rc = cam_vfe_reset(hw_priv, NULL, 0);
+ rc = cam_vfe_reset(hw_priv, &reset_core_args, sizeof(uint32_t));
if (rc) {
CAM_ERR(CAM_ISP, "Reset Failed rc=%d", rc);
goto deinint_vfe_res;
@@ -203,7 +264,9 @@
goto deinint_vfe_res;
}
- return 0;
+ vfe_hw->hw_state = CAM_HW_STATE_POWER_UP;
+ return rc;
+
deinint_vfe_res:
if (isp_res && isp_res->deinit)
isp_res->deinit(isp_res, NULL, 0);
@@ -306,7 +369,8 @@
reinit_completion(&vfe_hw->hw_complete);
CAM_DBG(CAM_ISP, "calling RESET");
- core_info->vfe_top->hw_ops.reset(core_info->vfe_top->top_priv, NULL, 0);
+ core_info->vfe_top->hw_ops.reset(core_info->vfe_top->top_priv,
+ reset_core_args, arg_size);
CAM_DBG(CAM_ISP, "waiting for vfe reset complete");
/* Wait for Completion or Timeout of 500ms */
rc = wait_for_completion_timeout(&vfe_hw->hw_complete, 500);
@@ -333,20 +397,37 @@
time_stamp->mono_time.tv_usec = ts.tv_nsec/1000;
}
-
-int cam_vfe_irq_top_half(uint32_t evt_id,
+static int cam_vfe_irq_top_half(uint32_t evt_id,
struct cam_irq_th_payload *th_payload)
{
int32_t rc;
int i;
struct cam_vfe_irq_handler_priv *handler_priv;
struct cam_vfe_top_irq_evt_payload *evt_payload;
+ struct cam_vfe_hw_core_info *core_info;
handler_priv = th_payload->handler_priv;
CAM_DBG(CAM_ISP, "IRQ status_0 = %x", th_payload->evt_status_arr[0]);
CAM_DBG(CAM_ISP, "IRQ status_1 = %x", th_payload->evt_status_arr[1]);
+ /*
+ * need to handle non-recoverable condition here, otherwise irq storm
+ * will block everything.
+ */
+ if (th_payload->evt_status_arr[0] & 0x3FC00) {
+ CAM_ERR(CAM_ISP,
+ "Encountered Error Irq_status0=0x%x Status1=0x%x",
+ th_payload->evt_status_arr[0],
+ th_payload->evt_status_arr[1]);
+ CAM_ERR(CAM_ISP,
+ "Stopping further IRQ processing from this HW index=%d",
+ handler_priv->core_index);
+ cam_io_w(0, handler_priv->mem_base + 0x60);
+ cam_io_w(0, handler_priv->mem_base + 0x5C);
+ return 0;
+ }
+
rc = cam_vfe_get_evt_payload(handler_priv->core_info, &evt_payload);
if (rc) {
CAM_ERR_RATE_LIMIT(CAM_ISP,
@@ -354,6 +435,7 @@
return rc;
}
+ core_info = handler_priv->core_info;
cam_isp_hw_get_timestamp(&evt_payload->ts);
evt_payload->core_index = handler_priv->core_index;
@@ -369,22 +451,6 @@
}
CAM_DBG(CAM_ISP, "Violation status = %x", evt_payload->irq_reg_val[2]);
- /*
- * need to handle overflow condition here, otherwise irq storm
- * will block everything.
- */
- if (evt_payload->irq_reg_val[1]) {
- CAM_ERR(CAM_ISP,
- "Encountered Error Irq_status1=0x%x. Stopping further IRQ processing from this HW",
- evt_payload->irq_reg_val[1]);
- CAM_ERR(CAM_ISP, "Violation status = %x",
- evt_payload->irq_reg_val[2]);
- cam_io_w(0, handler_priv->mem_base + 0x60);
- cam_io_w(0, handler_priv->mem_base + 0x5C);
-
- evt_payload->error_type = CAM_ISP_HW_ERROR_OVERFLOW;
- }
-
th_payload->evt_payload_priv = evt_payload;
CAM_DBG(CAM_ISP, "Exit");
@@ -465,7 +531,7 @@
struct cam_vfe_hw_core_info *core_info = NULL;
struct cam_hw_info *vfe_hw = hw_priv;
struct cam_isp_resource_node *isp_res;
- int rc = -ENODEV;
+ int rc = 0;
if (!hw_priv || !start_args ||
(arg_size != sizeof(struct cam_isp_resource_node))) {
@@ -475,35 +541,72 @@
core_info = (struct cam_vfe_hw_core_info *)vfe_hw->core_info;
isp_res = (struct cam_isp_resource_node *)start_args;
+ core_info->tasklet_info = isp_res->tasklet_info;
mutex_lock(&vfe_hw->hw_mutex);
if (isp_res->res_type == CAM_ISP_RESOURCE_VFE_IN) {
- if (isp_res->res_id == CAM_ISP_HW_VFE_IN_CAMIF)
- isp_res->irq_handle = cam_irq_controller_subscribe_irq(
- core_info->vfe_irq_controller,
- CAM_IRQ_PRIORITY_1,
- camif_irq_reg_mask, &core_info->irq_payload,
- cam_vfe_irq_top_half, cam_ife_mgr_do_tasklet,
- isp_res->tasklet_info, cam_tasklet_enqueue_cmd);
- else
- isp_res->irq_handle = cam_irq_controller_subscribe_irq(
- core_info->vfe_irq_controller,
- CAM_IRQ_PRIORITY_1,
- rdi_irq_reg_mask, &core_info->irq_payload,
- cam_vfe_irq_top_half, cam_ife_mgr_do_tasklet,
- isp_res->tasklet_info, cam_tasklet_enqueue_cmd);
+ if (isp_res->res_id == CAM_ISP_HW_VFE_IN_CAMIF) {
+ isp_res->irq_handle =
+ cam_irq_controller_subscribe_irq(
+ core_info->vfe_irq_controller,
+ CAM_IRQ_PRIORITY_1,
+ camif_irq_reg_mask,
+ &core_info->irq_payload,
+ cam_vfe_irq_top_half,
+ cam_ife_mgr_do_tasklet,
+ isp_res->tasklet_info,
+ cam_tasklet_enqueue_cmd);
+ if (isp_res->irq_handle < 1)
+ rc = -ENOMEM;
+ } else if (isp_res->rdi_only_ctx) {
+ isp_res->irq_handle =
+ cam_irq_controller_subscribe_irq(
+ core_info->vfe_irq_controller,
+ CAM_IRQ_PRIORITY_1,
+ rdi_irq_reg_mask,
+ &core_info->irq_payload,
+ cam_vfe_irq_top_half,
+ cam_ife_mgr_do_tasklet,
+ isp_res->tasklet_info,
+ cam_tasklet_enqueue_cmd);
+ if (isp_res->irq_handle < 1)
+ rc = -ENOMEM;
+ }
- if (isp_res->irq_handle > 0)
+ if (rc == 0) {
rc = core_info->vfe_top->hw_ops.start(
core_info->vfe_top->top_priv, isp_res,
sizeof(struct cam_isp_resource_node));
- else
+ if (rc)
+ CAM_ERR(CAM_ISP, "Start failed. type:%d",
+ isp_res->res_type);
+ } else {
CAM_ERR(CAM_ISP,
"Error! subscribe irq controller failed");
+ }
} else if (isp_res->res_type == CAM_ISP_RESOURCE_VFE_OUT) {
rc = core_info->vfe_bus->hw_ops.start(isp_res, NULL, 0);
} else {
CAM_ERR(CAM_ISP, "Invalid res type:%d", isp_res->res_type);
+ rc = -EFAULT;
+ }
+
+ if (!core_info->irq_err_handle) {
+ core_info->irq_err_handle =
+ cam_irq_controller_subscribe_irq(
+ core_info->vfe_irq_controller,
+ CAM_IRQ_PRIORITY_0,
+ camif_irq_err_reg_mask,
+ &core_info->irq_payload,
+ cam_vfe_irq_err_top_half,
+ cam_ife_mgr_do_tasklet,
+ core_info->tasklet_info,
+ cam_tasklet_enqueue_cmd);
+ if (core_info->irq_err_handle < 1) {
+ CAM_ERR(CAM_ISP, "Error handle subscribe failure");
+ rc = -ENOMEM;
+ core_info->irq_err_handle = 0;
+ }
}
mutex_unlock(&vfe_hw->hw_mutex);
@@ -534,12 +637,20 @@
rc = core_info->vfe_top->hw_ops.stop(
core_info->vfe_top->top_priv, isp_res,
sizeof(struct cam_isp_resource_node));
+
} else if (isp_res->res_type == CAM_ISP_RESOURCE_VFE_OUT) {
rc = core_info->vfe_bus->hw_ops.stop(isp_res, NULL, 0);
} else {
CAM_ERR(CAM_ISP, "Invalid res type:%d", isp_res->res_type);
}
+ if (core_info->irq_err_handle) {
+ cam_irq_controller_unsubscribe_irq(
+ core_info->vfe_irq_controller,
+ core_info->irq_err_handle);
+ core_info->irq_err_handle = 0;
+ }
+
mutex_unlock(&vfe_hw->hw_mutex);
return rc;
@@ -576,10 +687,11 @@
switch (cmd_type) {
case CAM_ISP_HW_CMD_GET_CHANGE_BASE:
case CAM_ISP_HW_CMD_GET_REG_UPDATE:
+ case CAM_ISP_HW_CMD_CLOCK_UPDATE:
+ case CAM_ISP_HW_CMD_BW_UPDATE:
rc = core_info->vfe_top->hw_ops.process_cmd(
core_info->vfe_top->top_priv, cmd_type, cmd_args,
arg_size);
-
break;
case CAM_ISP_HW_CMD_GET_BUF_UPDATE:
case CAM_ISP_HW_CMD_GET_HFR_UPDATE:
@@ -699,4 +811,3 @@
return rc;
}
-
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.h
index ee29e1cf..0674a6ad 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.h
@@ -50,12 +50,13 @@
void *vfe_irq_controller;
struct cam_vfe_top *vfe_top;
struct cam_vfe_bus *vfe_bus;
-
+ void *tasklet_info;
struct cam_vfe_top_irq_evt_payload evt_payload[CAM_VFE_EVT_MAX];
struct list_head free_payload_list;
struct cam_vfe_irq_handler_priv irq_payload;
uint32_t cpas_handle;
int irq_handle;
+ int irq_err_handle;
spinlock_t spin_lock;
};
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.c
index ed5e120..0f93664 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.c
@@ -15,6 +15,30 @@
#include "cam_vfe_soc.h"
#include "cam_debug_util.h"
+static bool cam_vfe_cpas_cb(uint32_t client_handle, void *userdata,
+ struct cam_cpas_irq_data *irq_data)
+{
+ bool error_handled = false;
+
+ if (!irq_data)
+ return error_handled;
+
+ switch (irq_data->irq_type) {
+ case CAM_CAMNOC_IRQ_IFE02_UBWC_ENCODE_ERROR:
+ case CAM_CAMNOC_IRQ_IFE13_UBWC_ENCODE_ERROR:
+ CAM_ERR_RATE_LIMIT(CAM_ISP,
+ "IFE UBWC Encode error type=%d status=%x",
+ irq_data->irq_type,
+ irq_data->u.enc_err.encerr_status.value);
+ error_handled = true;
+ break;
+ default:
+ break;
+ }
+
+ return error_handled;
+}
+
static int cam_vfe_get_dt_properties(struct cam_hw_soc_info *soc_info)
{
int rc = 0;
@@ -95,6 +119,8 @@
CAM_HW_IDENTIFIER_LENGTH);
cpas_register_param.cell_index = soc_info->index;
cpas_register_param.dev = soc_info->dev;
+ cpas_register_param.cam_cpas_client_cb = cam_vfe_cpas_cb;
+ cpas_register_param.userdata = soc_info;
rc = cam_cpas_register_client(&cpas_register_param);
if (rc) {
CAM_ERR(CAM_ISP, "CPAS registration failed rc=%d", rc);
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.h
index fb6ea6c..a4ba2e1 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.h
@@ -77,6 +77,8 @@
.epoch0_irq_mask = 0x00000004,
.reg_update_irq_mask = 0x00000010,
.eof_irq_mask = 0x00000002,
+ .error_irq_mask0 = 0x0003FC00,
+ .error_irq_mask1 = 0x0FFF7E80,
};
struct cam_vfe_top_ver2_reg_offset_module_ctrl lens_170_reg = {
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
index 13c477d..e94bb62 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
@@ -263,7 +263,7 @@
CAM_ERR(CAM_ISP, "No payload to put");
return -EINVAL;
}
-
+ (*evt_payload)->error_type = 0;
ife_irq_regs = (*evt_payload)->irq_reg_val;
status_reg0 = ife_irq_regs[CAM_IFE_IRQ_BUS_REG_STATUS0];
status_reg1 = ife_irq_regs[CAM_IFE_IRQ_BUS_REG_STATUS1];
@@ -992,6 +992,9 @@
rsrc_data->width = rsrc_data->width * 2;
rsrc_data->stride = rsrc_data->width;
rsrc_data->en_cfg = 0x1;
+
+ /* LSB aligned */
+ rsrc_data->pack_fmt |= 0x10;
} else {
/* Write master 5-6 DS ports, 10 PDAF */
uint32_t align_width;
@@ -1143,6 +1146,8 @@
common_data->mem_base + common_data->common_reg->sw_reset);
wm_res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+ rsrc_data->init_cfg_done = false;
+ rsrc_data->hfr_cfg_done = false;
return rc;
}
@@ -2150,6 +2155,7 @@
if (vfe_out->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE ||
vfe_out->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) {
+ CAM_DBG(CAM_ISP, "vfe_out res_state is %d", vfe_out->res_state);
return rc;
}
@@ -2297,12 +2303,15 @@
struct cam_irq_th_payload *th_payload)
{
int i = 0;
+ struct cam_vfe_bus_ver2_priv *bus_priv = th_payload->handler_priv;
CAM_ERR_RATE_LIMIT(CAM_ISP, "Bus Err IRQ");
for (i = 0; i < th_payload->num_registers; i++) {
CAM_ERR_RATE_LIMIT(CAM_ISP, "IRQ_Status%d: 0x%x", i,
th_payload->evt_status_arr[i]);
}
+ cam_irq_controller_disable_irq(bus_priv->common_data.bus_irq_controller,
+ bus_priv->error_irq_handle);
/* Returning error stops from enqueuing bottom half */
return -EFAULT;
@@ -3112,4 +3121,3 @@
return rc;
}
-
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/Makefile b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/Makefile
index ac8b497..9a2c12c 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/Makefile
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/Makefile
@@ -1,11 +1,13 @@
ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils/
ccflags-y += -Idrivers/media/platform/msm/camera/cam_cdm/
ccflags-y += -Idrivers/media/platform/msm/camera/cam_core/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include
ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller
ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include
ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw
ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/include
ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw
obj-$(CONFIG_SPECTRA_CAMERA) += cam_vfe_top.o cam_vfe_top_ver2.o cam_vfe_camif_ver2.o cam_vfe_rdi.o
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c
index cd90b57..9848454 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c
@@ -329,6 +329,7 @@
struct cam_vfe_mux_camif_data *camif_priv;
struct cam_vfe_top_irq_evt_payload *payload;
uint32_t irq_status0;
+ uint32_t irq_status1;
if (!handler_priv || !evt_payload_priv)
return ret;
@@ -337,6 +338,7 @@
camif_priv = camif_node->res_priv;
payload = evt_payload_priv;
irq_status0 = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS0];
+ irq_status1 = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS1];
CAM_DBG(CAM_ISP, "event ID:%d", payload->evt_id);
CAM_DBG(CAM_ISP, "irq_status_0 = %x", irq_status0);
@@ -367,6 +369,15 @@
ret = CAM_VFE_IRQ_STATUS_SUCCESS;
}
break;
+ case CAM_ISP_HW_EVENT_ERROR:
+ if (irq_status1 & camif_priv->reg_data->error_irq_mask1) {
+ CAM_DBG(CAM_ISP, "Received ERROR\n");
+ ret = CAM_ISP_HW_ERROR_OVERFLOW;
+ cam_vfe_put_evt_payload(payload->core_info, &payload);
+ } else {
+ ret = CAM_ISP_HW_ERROR_NONE;
+ }
+ break;
default:
break;
}
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.h
index 21058ac..4a73bd7 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.h
@@ -61,6 +61,8 @@
uint32_t epoch0_irq_mask;
uint32_t reg_update_irq_mask;
uint32_t eof_irq_mask;
+ uint32_t error_irq_mask0;
+ uint32_t error_irq_mask1;
};
struct cam_vfe_camif_ver2_hw_info {
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c
index 2c35046..1b8cdf3 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c
@@ -17,6 +17,11 @@
#include "cam_vfe_top.h"
#include "cam_vfe_top_ver2.h"
#include "cam_debug_util.h"
+#include "cam_cpas_api.h"
+#include "cam_vfe_soc.h"
+
+#define CAM_VFE_HW_RESET_HW_AND_REG_VAL 0x00003F9F
+#define CAM_VFE_HW_RESET_HW_VAL 0x00003F87
struct cam_vfe_top_ver2_common_data {
struct cam_hw_soc_info *soc_info;
@@ -26,8 +31,11 @@
struct cam_vfe_top_ver2_priv {
struct cam_vfe_top_ver2_common_data common_data;
- struct cam_vfe_camif *camif;
struct cam_isp_resource_node mux_rsrc[CAM_VFE_TOP_VER2_MUX_MAX];
+ unsigned long hw_clk_rate;
+ struct cam_axi_vote hw_axi_vote;
+ struct cam_axi_vote req_axi_vote[CAM_VFE_TOP_VER2_MUX_MAX];
+ unsigned long req_clk_rate[CAM_VFE_TOP_VER2_MUX_MAX];
};
static int cam_vfe_top_mux_get_base(struct cam_vfe_top_ver2_priv *top_priv,
@@ -77,6 +85,174 @@
return 0;
}
+static int cam_vfe_top_set_hw_clk_rate(
+ struct cam_vfe_top_ver2_priv *top_priv)
+{
+ struct cam_hw_soc_info *soc_info = NULL;
+ int i, rc = 0;
+ unsigned long max_clk_rate = 0;
+
+ soc_info = top_priv->common_data.soc_info;
+
+ for (i = 0; i < CAM_VFE_TOP_VER2_MUX_MAX; i++) {
+ if (top_priv->req_clk_rate[i] > max_clk_rate)
+ max_clk_rate = top_priv->req_clk_rate[i];
+ }
+ if (max_clk_rate == top_priv->hw_clk_rate)
+ return 0;
+
+ CAM_DBG(CAM_ISP, "VFE: Clock name=%s idx=%d clk=%lld",
+ soc_info->clk_name[soc_info->src_clk_idx],
+ soc_info->src_clk_idx, max_clk_rate);
+
+ rc = cam_soc_util_set_clk_rate(
+ soc_info->clk[soc_info->src_clk_idx],
+ soc_info->clk_name[soc_info->src_clk_idx],
+ max_clk_rate);
+
+ if (!rc)
+ top_priv->hw_clk_rate = max_clk_rate;
+ else
+ CAM_ERR(CAM_ISP, "Set Clock rate failed, rc=%d", rc);
+
+ return rc;
+}
+
+static int cam_vfe_top_set_axi_bw_vote(
+ struct cam_vfe_top_ver2_priv *top_priv)
+{
+ struct cam_axi_vote sum = {0, 0};
+ int i, rc = 0;
+ struct cam_hw_soc_info *soc_info =
+ top_priv->common_data.soc_info;
+ struct cam_vfe_soc_private *soc_private =
+ soc_info->soc_private;
+
+ if (!soc_private) {
+ CAM_ERR(CAM_ISP, "Error soc_private NULL");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < CAM_VFE_TOP_VER2_MUX_MAX; i++) {
+ sum.uncompressed_bw +=
+ top_priv->req_axi_vote[i].uncompressed_bw;
+ sum.compressed_bw +=
+ top_priv->req_axi_vote[i].compressed_bw;
+ }
+
+ CAM_DBG(CAM_ISP, "BW Vote: u=%lld c=%lld",
+ sum.uncompressed_bw,
+ sum.compressed_bw);
+
+ if ((top_priv->hw_axi_vote.uncompressed_bw ==
+ sum.uncompressed_bw) &&
+ (top_priv->hw_axi_vote.compressed_bw ==
+ sum.compressed_bw))
+ return 0;
+
+ rc = cam_cpas_update_axi_vote(
+ soc_private->cpas_handle,
+ &sum);
+ if (!rc) {
+ top_priv->hw_axi_vote.uncompressed_bw = sum.uncompressed_bw;
+ top_priv->hw_axi_vote.compressed_bw = sum.compressed_bw;
+ } else
+ CAM_ERR(CAM_ISP, "BW request failed, rc=%d", rc);
+
+ return rc;
+}
+
+static int cam_vfe_top_clock_update(
+ struct cam_vfe_top_ver2_priv *top_priv,
+ void *cmd_args, uint32_t arg_size)
+{
+ struct cam_vfe_clock_update_args *clk_update = NULL;
+ struct cam_isp_resource_node *res = NULL;
+ struct cam_hw_info *hw_info = NULL;
+ int i, rc = 0;
+
+ clk_update =
+ (struct cam_vfe_clock_update_args *)cmd_args;
+ res = clk_update->node_res;
+
+ if (!res || !res->hw_intf->hw_priv) {
+ CAM_ERR(CAM_ISP, "Invalid input res %pK", res);
+ return -EINVAL;
+ }
+
+ hw_info = res->hw_intf->hw_priv;
+
+ if (res->res_type != CAM_ISP_RESOURCE_VFE_IN ||
+ res->res_id >= CAM_ISP_HW_VFE_IN_MAX) {
+ CAM_ERR(CAM_ISP, "VFE:%d Invalid res_type:%d res id%d",
+ res->hw_intf->hw_idx, res->res_type,
+ res->res_id);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < CAM_VFE_TOP_VER2_MUX_MAX; i++) {
+ if (top_priv->mux_rsrc[i].res_id == res->res_id) {
+ top_priv->req_clk_rate[i] = clk_update->clk_rate;
+ break;
+ }
+ }
+
+ if (hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
+ CAM_DBG(CAM_ISP, "VFE:%d Not ready to set clocks yet :%d",
+ res->hw_intf->hw_idx,
+ hw_info->hw_state);
+ } else
+ rc = cam_vfe_top_set_hw_clk_rate(top_priv);
+
+ return rc;
+}
+
+static int cam_vfe_top_bw_update(
+ struct cam_vfe_top_ver2_priv *top_priv,
+ void *cmd_args, uint32_t arg_size)
+{
+ struct cam_vfe_bw_update_args *bw_update = NULL;
+ struct cam_isp_resource_node *res = NULL;
+ struct cam_hw_info *hw_info = NULL;
+ int rc = 0;
+ int i;
+
+ bw_update = (struct cam_vfe_bw_update_args *)cmd_args;
+ res = bw_update->node_res;
+
+ if (!res || !res->hw_intf->hw_priv)
+ return -EINVAL;
+
+ hw_info = res->hw_intf->hw_priv;
+
+ if (res->res_type != CAM_ISP_RESOURCE_VFE_IN ||
+ res->res_id >= CAM_ISP_HW_VFE_IN_MAX) {
+ CAM_ERR(CAM_ISP, "VFE:%d Invalid res_type:%d res id%d",
+ res->hw_intf->hw_idx, res->res_type,
+ res->res_id);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < CAM_VFE_TOP_VER2_MUX_MAX; i++) {
+ if (top_priv->mux_rsrc[i].res_id == res->res_id) {
+ top_priv->req_axi_vote[i].uncompressed_bw =
+ bw_update->camnoc_bw_bytes;
+ top_priv->req_axi_vote[i].compressed_bw =
+ bw_update->external_bw_bytes;
+ break;
+ }
+ }
+
+ if (hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
+ CAM_DBG(CAM_ISP, "VFE:%d Not ready to set BW yet :%d",
+ res->hw_intf->hw_idx,
+ hw_info->hw_state);
+ } else
+ rc = cam_vfe_top_set_axi_bw_vote(top_priv);
+
+ return rc;
+}
+
static int cam_vfe_top_mux_get_reg_update(
struct cam_vfe_top_ver2_priv *top_priv,
void *cmd_args, uint32_t arg_size)
@@ -108,12 +284,24 @@
struct cam_vfe_top_ver2_priv *top_priv = device_priv;
struct cam_hw_soc_info *soc_info = NULL;
struct cam_vfe_top_ver2_reg_offset_common *reg_common = NULL;
+ uint32_t *reset_reg_args = reset_core_args;
+ uint32_t reset_reg_val;
- if (!top_priv) {
+ if (!top_priv || !reset_reg_args) {
CAM_ERR(CAM_ISP, "Invalid arguments");
return -EINVAL;
}
+ switch (*reset_reg_args) {
+ case CAM_VFE_HW_RESET_HW_AND_REG:
+ reset_reg_val = CAM_VFE_HW_RESET_HW_AND_REG_VAL;
+ break;
+ default:
+ reset_reg_val = CAM_VFE_HW_RESET_HW_VAL;
+ break;
+ }
+
+ CAM_DBG(CAM_ISP, "reset reg value: %x", reset_reg_val);
soc_info = top_priv->common_data.soc_info;
reg_common = top_priv->common_data.common_reg;
@@ -122,7 +310,7 @@
CAM_SOC_GET_REG_MAP_START(soc_info, VFE_CORE_BASE_IDX) + 0x5C);
/* Reset HW */
- cam_io_w_mb(0x00003F9F,
+ cam_io_w_mb(reset_reg_val,
CAM_SOC_GET_REG_MAP_START(soc_info, VFE_CORE_BASE_IDX) +
reg_common->global_reset_cmd);
@@ -215,9 +403,21 @@
return -EINVAL;
}
- top_priv = (struct cam_vfe_top_ver2_priv *)device_priv;
+ top_priv = (struct cam_vfe_top_ver2_priv *)device_priv;
mux_res = (struct cam_isp_resource_node *)start_args;
+ rc = cam_vfe_top_set_hw_clk_rate(top_priv);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "set_hw_clk_rate failed, rc=%d", rc);
+ return rc;
+ }
+
+ rc = cam_vfe_top_set_axi_bw_vote(top_priv);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "set_hw_clk_rate failed, rc=%d", rc);
+ return rc;
+ }
+
if (mux_res->start) {
rc = mux_res->start(mux_res);
} else {
@@ -233,7 +433,7 @@
{
struct cam_vfe_top_ver2_priv *top_priv;
struct cam_isp_resource_node *mux_res;
- int rc = 0;
+ int i, rc = 0;
if (!device_priv || !stop_args) {
CAM_ERR(CAM_ISP, "Error! Invalid input arguments");
@@ -252,8 +452,16 @@
rc = -EINVAL;
}
- return rc;
+ if (!rc) {
+ for (i = 0; i < CAM_VFE_TOP_VER2_MUX_MAX; i++) {
+ if (top_priv->mux_rsrc[i].res_id == mux_res->res_id)
+ top_priv->req_clk_rate[i] = 0;
+ top_priv->req_axi_vote[i].compressed_bw = 0;
+ top_priv->req_axi_vote[i].uncompressed_bw = 0;
+ }
+ }
+ return rc;
}
int cam_vfe_top_read(void *device_priv,
@@ -288,6 +496,14 @@
rc = cam_vfe_top_mux_get_reg_update(top_priv, cmd_args,
arg_size);
break;
+ case CAM_ISP_HW_CMD_CLOCK_UPDATE:
+ rc = cam_vfe_top_clock_update(top_priv, cmd_args,
+ arg_size);
+ break;
+ case CAM_ISP_HW_CMD_BW_UPDATE:
+ rc = cam_vfe_top_bw_update(top_priv, cmd_args,
+ arg_size);
+ break;
default:
rc = -EINVAL;
CAM_ERR(CAM_ISP, "Error! Invalid cmd:%d", cmd_type);
@@ -323,12 +539,19 @@
goto free_vfe_top;
}
vfe_top->top_priv = top_priv;
+ top_priv->hw_clk_rate = 0;
+ top_priv->hw_axi_vote.compressed_bw = 0;
+ top_priv->hw_axi_vote.uncompressed_bw = 0;
for (i = 0, j = 0; i < CAM_VFE_TOP_VER2_MUX_MAX; i++) {
top_priv->mux_rsrc[i].res_type = CAM_ISP_RESOURCE_VFE_IN;
top_priv->mux_rsrc[i].hw_intf = hw_intf;
top_priv->mux_rsrc[i].res_state =
CAM_ISP_RESOURCE_STATE_AVAILABLE;
+ top_priv->req_clk_rate[i] = 0;
+ top_priv->req_axi_vote[i].compressed_bw = 0;
+ top_priv->req_axi_vote[i].uncompressed_bw = 0;
+
if (ver2_hw_info->mux_type[i] == CAM_VFE_CAMIF_VER_2_0) {
top_priv->mux_rsrc[i].res_id =
CAM_ISP_HW_VFE_IN_CAMIF;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/include/cam_vfe_top.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/include/cam_vfe_top.h
index dbb211f..81e3b48 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/include/cam_vfe_top.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/include/cam_vfe_top.h
@@ -29,21 +29,6 @@
struct cam_hw_ops hw_ops;
};
-struct cam_vfe_camif {
- void *camif_priv;
- int (*start_resource)(void *priv,
- struct cam_isp_resource_node *camif_res);
- int (*stop_resource)(void *priv,
- struct cam_isp_resource_node *camif_res);
- int (*acquire_resource)(void *priv,
- struct cam_isp_resource_node *camif_res,
- void *acquire_param);
- int (*release_resource)(void *priv,
- struct cam_isp_resource_node *camif_res);
- int (*process_cmd)(void *priv, uint32_t cmd_type, void *cmd_args,
- uint32_t arg_size);
-};
-
int cam_vfe_top_init(uint32_t top_version,
struct cam_hw_soc_info *soc_info,
struct cam_hw_intf *hw_intf,
diff --git a/drivers/media/platform/msm/camera/cam_lrme/Makefile b/drivers/media/platform/msm/camera/cam_lrme/Makefile
new file mode 100644
index 0000000..fba4529
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_lrme/Makefile
@@ -0,0 +1,14 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sync
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cdm
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_lrme
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw
+ccflags-y += -Idrivers/media/platform/msm/camera
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include/
+
+obj-$(CONFIG_SPECTRA_CAMERA) += lrme_hw_mgr/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_lrme_dev.o cam_lrme_context.o
diff --git a/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_context.c b/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_context.c
new file mode 100644
index 0000000..0aa5ade
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_context.c
@@ -0,0 +1,241 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include "cam_debug_util.h"
+#include "cam_lrme_context.h"
+
+static int __cam_lrme_ctx_acquire_dev_in_available(struct cam_context *ctx,
+ struct cam_acquire_dev_cmd *cmd)
+{
+ int rc = 0;
+ uint64_t ctxt_to_hw_map = (uint64_t)ctx->ctxt_to_hw_map;
+ struct cam_lrme_context *lrme_ctx = ctx->ctx_priv;
+
+ CAM_DBG(CAM_LRME, "Enter");
+
+ rc = cam_context_acquire_dev_to_hw(ctx, cmd);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to acquire");
+ return rc;
+ }
+
+ ctxt_to_hw_map |= (lrme_ctx->index << CAM_LRME_CTX_INDEX_SHIFT);
+ ctx->ctxt_to_hw_map = (void *)ctxt_to_hw_map;
+
+ ctx->state = CAM_CTX_ACQUIRED;
+
+ return rc;
+}
+
+static int __cam_lrme_ctx_release_dev_in_acquired(struct cam_context *ctx,
+ struct cam_release_dev_cmd *cmd)
+{
+ int rc = 0;
+
+ CAM_DBG(CAM_LRME, "Enter");
+
+ rc = cam_context_release_dev_to_hw(ctx, cmd);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to release");
+ return rc;
+ }
+
+ ctx->state = CAM_CTX_AVAILABLE;
+
+ return rc;
+}
+
+static int __cam_lrme_ctx_start_dev_in_acquired(struct cam_context *ctx,
+ struct cam_start_stop_dev_cmd *cmd)
+{
+ int rc = 0;
+
+ CAM_DBG(CAM_LRME, "Enter");
+
+ rc = cam_context_start_dev_to_hw(ctx, cmd);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to start");
+ return rc;
+ }
+
+ ctx->state = CAM_CTX_ACTIVATED;
+
+ return rc;
+}
+
+static int __cam_lrme_ctx_config_dev_in_activated(struct cam_context *ctx,
+ struct cam_config_dev_cmd *cmd)
+{
+ int rc;
+
+ CAM_DBG(CAM_LRME, "Enter");
+
+ rc = cam_context_prepare_dev_to_hw(ctx, cmd);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to config");
+ return rc;
+ }
+
+ return rc;
+}
+
+static int __cam_lrme_ctx_stop_dev_in_activated(struct cam_context *ctx,
+ struct cam_start_stop_dev_cmd *cmd)
+{
+ int rc = 0;
+
+ CAM_DBG(CAM_LRME, "Enter");
+
+ rc = cam_context_stop_dev_to_hw(ctx);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to stop dev");
+ return rc;
+ }
+
+ ctx->state = CAM_CTX_ACQUIRED;
+
+ return rc;
+}
+
+static int __cam_lrme_ctx_release_dev_in_activated(struct cam_context *ctx,
+ struct cam_release_dev_cmd *cmd)
+{
+ int rc = 0;
+
+ CAM_DBG(CAM_LRME, "Enter");
+
+ rc = __cam_lrme_ctx_stop_dev_in_activated(ctx, NULL);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to stop");
+ return rc;
+ }
+
+ rc = cam_context_release_dev_to_hw(ctx, cmd);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to release");
+ return rc;
+ }
+
+ ctx->state = CAM_CTX_AVAILABLE;
+
+ return rc;
+}
+
+static int __cam_lrme_ctx_handle_irq_in_activated(void *context,
+ uint32_t evt_id, void *evt_data)
+{
+ int rc;
+
+ CAM_DBG(CAM_LRME, "Enter");
+
+ rc = cam_context_buf_done_from_hw(context, evt_data, evt_id);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed in buf done, rc=%d", rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+/* top state machine */
+static struct cam_ctx_ops
+ cam_lrme_ctx_state_machine[CAM_CTX_STATE_MAX] = {
+ /* Uninit */
+ {
+ .ioctl_ops = {},
+ .crm_ops = {},
+ .irq_ops = NULL,
+ },
+ /* Available */
+ {
+ .ioctl_ops = {
+ .acquire_dev = __cam_lrme_ctx_acquire_dev_in_available,
+ },
+ .crm_ops = {},
+ .irq_ops = NULL,
+ },
+ /* Acquired */
+ {
+ .ioctl_ops = {
+ .release_dev = __cam_lrme_ctx_release_dev_in_acquired,
+ .start_dev = __cam_lrme_ctx_start_dev_in_acquired,
+ },
+ .crm_ops = {},
+ .irq_ops = NULL,
+ },
+ /* Ready */
+ {
+ .ioctl_ops = {},
+ .crm_ops = {},
+ .irq_ops = NULL,
+ },
+ /* Activate */
+ {
+ .ioctl_ops = {
+ .config_dev = __cam_lrme_ctx_config_dev_in_activated,
+ .release_dev = __cam_lrme_ctx_release_dev_in_activated,
+ .stop_dev = __cam_lrme_ctx_stop_dev_in_activated,
+ },
+ .crm_ops = {},
+ .irq_ops = __cam_lrme_ctx_handle_irq_in_activated,
+ },
+};
+
+int cam_lrme_context_init(struct cam_lrme_context *lrme_ctx,
+ struct cam_context *base_ctx,
+ struct cam_hw_mgr_intf *hw_intf,
+ uint64_t index)
+{
+ int rc = 0;
+
+ CAM_DBG(CAM_LRME, "Enter");
+
+ if (!base_ctx || !lrme_ctx) {
+ CAM_ERR(CAM_LRME, "Invalid input");
+ return -EINVAL;
+ }
+
+ memset(lrme_ctx, 0, sizeof(*lrme_ctx));
+
+ rc = cam_context_init(base_ctx, "lrme", NULL, hw_intf,
+ lrme_ctx->req_base, CAM_CTX_REQ_MAX);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to init context");
+ return rc;
+ }
+ lrme_ctx->base = base_ctx;
+ lrme_ctx->index = index;
+ base_ctx->ctx_priv = lrme_ctx;
+ base_ctx->state_machine = cam_lrme_ctx_state_machine;
+
+ return rc;
+}
+
+int cam_lrme_context_deinit(struct cam_lrme_context *lrme_ctx)
+{
+ int rc = 0;
+
+ CAM_DBG(CAM_LRME, "Enter");
+
+ if (!lrme_ctx) {
+ CAM_ERR(CAM_LRME, "No ctx to deinit");
+ return -EINVAL;
+ }
+
+ rc = cam_context_deinit(lrme_ctx->base);
+
+ memset(lrme_ctx, 0, sizeof(*lrme_ctx));
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_context.h b/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_context.h
new file mode 100644
index 0000000..882f7ac
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_context.h
@@ -0,0 +1,41 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_LRME_CONTEXT_H_
+#define _CAM_LRME_CONTEXT_H_
+
+#include "cam_context.h"
+#include "cam_context_utils.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_req_mgr_interface.h"
+#include "cam_sync_api.h"
+
+#define CAM_LRME_CTX_INDEX_SHIFT 32
+
+/**
+ * struct cam_lrme_context
+ *
+ * @base : Base context pointer for this LRME context
+ * @req_base : List of base request for this LRME context
+ */
+struct cam_lrme_context {
+ struct cam_context *base;
+ struct cam_ctx_request req_base[CAM_CTX_REQ_MAX];
+ uint64_t index;
+};
+
+int cam_lrme_context_init(struct cam_lrme_context *lrme_ctx,
+ struct cam_context *base_ctx, struct cam_hw_mgr_intf *hw_intf,
+ uint64_t index);
+int cam_lrme_context_deinit(struct cam_lrme_context *lrme_ctx);
+
+#endif /* _CAM_LRME_CONTEXT_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_dev.c b/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_dev.c
new file mode 100644
index 0000000..5be16ef
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_dev.c
@@ -0,0 +1,233 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include "cam_subdev.h"
+#include "cam_node.h"
+#include "cam_lrme_context.h"
+#include "cam_lrme_hw_mgr.h"
+#include "cam_lrme_hw_mgr_intf.h"
+
+#define CAM_LRME_DEV_NAME "cam-lrme"
+
+/**
+ * struct cam_lrme_dev
+ *
+ * @sd : Subdev information
+ * @ctx : List of base contexts
+ * @lrme_ctx : List of LRME contexts
+ * @lock : Mutex for LRME subdev
+ * @open_cnt : Open count of LRME subdev
+ */
+struct cam_lrme_dev {
+ struct cam_subdev sd;
+ struct cam_context ctx[CAM_CTX_MAX];
+ struct cam_lrme_context lrme_ctx[CAM_CTX_MAX];
+ struct mutex lock;
+ uint32_t open_cnt;
+};
+
+static struct cam_lrme_dev *g_lrme_dev;
+
+static int cam_lrme_dev_buf_done_cb(void *ctxt_to_hw_map, uint32_t evt_id,
+ void *evt_data)
+{
+ uint64_t index;
+ struct cam_context *ctx;
+ int rc;
+
+ index = CAM_LRME_DECODE_CTX_INDEX(ctxt_to_hw_map);
+ CAM_DBG(CAM_LRME, "ctx index %llu, evt_id %u\n", index, evt_id);
+ ctx = &g_lrme_dev->ctx[index];
+ rc = ctx->irq_cb_intf(ctx, evt_id, evt_data);
+ if (rc)
+ CAM_ERR(CAM_LRME, "irq callback failed");
+
+ return rc;
+}
+
+static int cam_lrme_dev_open(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh)
+{
+ struct cam_lrme_dev *lrme_dev = g_lrme_dev;
+
+ if (!lrme_dev) {
+ CAM_ERR(CAM_LRME,
+ "LRME Dev not initialized, dev=%pK", lrme_dev);
+ return -ENODEV;
+ }
+
+ mutex_lock(&lrme_dev->lock);
+ lrme_dev->open_cnt++;
+ mutex_unlock(&lrme_dev->lock);
+
+ return 0;
+}
+
+static int cam_lrme_dev_close(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh)
+{
+ struct cam_lrme_dev *lrme_dev = g_lrme_dev;
+ struct cam_node *node = v4l2_get_subdevdata(sd);
+
+ if (!lrme_dev) {
+ CAM_ERR(CAM_LRME, "Invalid args");
+ return -ENODEV;
+ }
+
+ mutex_lock(&lrme_dev->lock);
+ lrme_dev->open_cnt--;
+ mutex_unlock(&lrme_dev->lock);
+
+ if (!node) {
+ CAM_ERR(CAM_LRME, "Node is NULL");
+ return -EINVAL;
+ }
+
+ if (lrme_dev->open_cnt == 0)
+ cam_node_shutdown(node);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_internal_ops cam_lrme_subdev_internal_ops = {
+ .open = cam_lrme_dev_open,
+ .close = cam_lrme_dev_close,
+};
+
+static int cam_lrme_dev_probe(struct platform_device *pdev)
+{
+ int rc;
+ int i;
+ struct cam_hw_mgr_intf hw_mgr_intf;
+ struct cam_node *node;
+
+ g_lrme_dev = kzalloc(sizeof(struct cam_lrme_dev), GFP_KERNEL);
+ if (!g_lrme_dev) {
+ CAM_ERR(CAM_LRME, "No memory");
+ return -ENOMEM;
+ }
+ g_lrme_dev->sd.internal_ops = &cam_lrme_subdev_internal_ops;
+
+ mutex_init(&g_lrme_dev->lock);
+
+ rc = cam_subdev_probe(&g_lrme_dev->sd, pdev, CAM_LRME_DEV_NAME,
+ CAM_LRME_DEVICE_TYPE);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "LRME cam_subdev_probe failed");
+ goto free_mem;
+ }
+ node = (struct cam_node *)g_lrme_dev->sd.token;
+
+ rc = cam_lrme_hw_mgr_init(&hw_mgr_intf, cam_lrme_dev_buf_done_cb);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Can not initialized LRME HW manager");
+ goto unregister;
+ }
+
+ for (i = 0; i < CAM_CTX_MAX; i++) {
+ rc = cam_lrme_context_init(&g_lrme_dev->lrme_ctx[i],
+ &g_lrme_dev->ctx[i],
+ &node->hw_mgr_intf, i);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "LRME context init failed");
+ goto deinit_ctx;
+ }
+ }
+
+ rc = cam_node_init(node, &hw_mgr_intf, g_lrme_dev->ctx, CAM_CTX_MAX,
+ CAM_LRME_DEV_NAME);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "LRME node init failed");
+ goto deinit_ctx;
+ }
+
+ CAM_DBG(CAM_LRME, "%s probe complete", g_lrme_dev->sd.name);
+
+ return 0;
+
+deinit_ctx:
+ for (--i; i >= 0; i--) {
+ if (cam_lrme_context_deinit(&g_lrme_dev->lrme_ctx[i]))
+ CAM_ERR(CAM_LRME, "LRME context %d deinit failed", i);
+ }
+unregister:
+ if (cam_subdev_remove(&g_lrme_dev->sd))
+ CAM_ERR(CAM_LRME, "Failed in subdev remove");
+free_mem:
+ kfree(g_lrme_dev);
+
+ return rc;
+}
+
+static int cam_lrme_dev_remove(struct platform_device *pdev)
+{
+ int i;
+ int rc = 0;
+
+ for (i = 0; i < CAM_CTX_MAX; i++) {
+ rc = cam_lrme_context_deinit(&g_lrme_dev->lrme_ctx[i]);
+ if (rc)
+ CAM_ERR(CAM_LRME, "LRME context %d deinit failed", i);
+ }
+
+ rc = cam_lrme_hw_mgr_deinit();
+ if (rc)
+ CAM_ERR(CAM_LRME, "Failed in hw mgr deinit, rc=%d", rc);
+
+ rc = cam_subdev_remove(&g_lrme_dev->sd);
+ if (rc)
+ CAM_ERR(CAM_LRME, "Unregister failed");
+
+ mutex_destroy(&g_lrme_dev->lock);
+ kfree(g_lrme_dev);
+ g_lrme_dev = NULL;
+
+ return rc;
+}
+
+static const struct of_device_id cam_lrme_dt_match[] = {
+ {
+ .compatible = "qcom,cam-lrme"
+ },
+ {}
+};
+
+static struct platform_driver cam_lrme_driver = {
+ .probe = cam_lrme_dev_probe,
+ .remove = cam_lrme_dev_remove,
+ .driver = {
+ .name = "cam_lrme",
+ .owner = THIS_MODULE,
+ .of_match_table = cam_lrme_dt_match,
+ },
+};
+
+static int __init cam_lrme_dev_init_module(void)
+{
+ return platform_driver_register(&cam_lrme_driver);
+}
+
+static void __exit cam_lrme_dev_exit_module(void)
+{
+ platform_driver_unregister(&cam_lrme_driver);
+}
+
+module_init(cam_lrme_dev_init_module);
+module_exit(cam_lrme_dev_exit_module);
+MODULE_DESCRIPTION("MSM LRME driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/Makefile b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/Makefile
new file mode 100644
index 0000000..e4c8e0d
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/Makefile
@@ -0,0 +1,14 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sync
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cdm
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_lrme
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw
+ccflags-y += -Idrivers/media/platform/msm/camera
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+
+obj-$(CONFIG_SPECTRA_CAMERA) += lrme_hw/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_lrme_hw_mgr.o
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.c b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.c
new file mode 100644
index 0000000..448086d
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.c
@@ -0,0 +1,1034 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <media/cam_cpas.h>
+#include <media/cam_req_mgr.h>
+
+#include "cam_io_util.h"
+#include "cam_soc_util.h"
+#include "cam_mem_mgr_api.h"
+#include "cam_smmu_api.h"
+#include "cam_packet_util.h"
+#include "cam_lrme_context.h"
+#include "cam_lrme_hw_intf.h"
+#include "cam_lrme_hw_core.h"
+#include "cam_lrme_hw_soc.h"
+#include "cam_lrme_hw_mgr_intf.h"
+#include "cam_lrme_hw_mgr.h"
+
+static struct cam_lrme_hw_mgr g_lrme_hw_mgr;
+
+static int cam_lrme_mgr_util_reserve_device(struct cam_lrme_hw_mgr *hw_mgr,
+ struct cam_lrme_acquire_args *lrme_acquire_args)
+{
+ int i, index = 0;
+ uint32_t min_ctx = UINT_MAX;
+ struct cam_lrme_device *hw_device = NULL;
+
+ mutex_lock(&hw_mgr->hw_mgr_mutex);
+ if (!hw_mgr->device_count) {
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ CAM_ERR(CAM_LRME, "No device is registered");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < hw_mgr->device_count && i < CAM_LRME_HW_MAX; i++) {
+ hw_device = &hw_mgr->hw_device[i];
+ if (!hw_device->num_context) {
+ index = i;
+ break;
+ }
+ if (hw_device->num_context < min_ctx) {
+ min_ctx = hw_device->num_context;
+ index = i;
+ }
+ }
+
+ hw_device = &hw_mgr->hw_device[index];
+ hw_device->num_context++;
+
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+ CAM_DBG(CAM_LRME, "reserve device index %d", index);
+
+ return index;
+}
+
+static int cam_lrme_mgr_util_get_device(struct cam_lrme_hw_mgr *hw_mgr,
+ uint32_t device_index, struct cam_lrme_device **hw_device)
+{
+ if (!hw_mgr) {
+ CAM_ERR(CAM_LRME, "invalid params hw_mgr %pK", hw_mgr);
+ return -EINVAL;
+ }
+
+ if (device_index >= CAM_LRME_HW_MAX) {
+ CAM_ERR(CAM_LRME, "Wrong device index %d", device_index);
+ return -EINVAL;
+ }
+
+ *hw_device = &hw_mgr->hw_device[device_index];
+
+ return 0;
+}
+
+static int cam_lrme_mgr_util_packet_validate(struct cam_packet *packet)
+{
+ struct cam_cmd_buf_desc *cmd_desc = NULL;
+ int i, rc;
+
+ if (!packet) {
+ CAM_ERR(CAM_LRME, "Invalid args");
+ return -EINVAL;
+ }
+
+ CAM_DBG(CAM_LRME, "Packet request=%d, op_code=0x%x, size=%d, flags=%d",
+ packet->header.request_id, packet->header.op_code,
+ packet->header.size, packet->header.flags);
+ CAM_DBG(CAM_LRME,
+ "Packet cmdbuf(offset=%d, num=%d) io(offset=%d, num=%d)",
+ packet->cmd_buf_offset, packet->num_cmd_buf,
+ packet->io_configs_offset, packet->num_io_configs);
+ CAM_DBG(CAM_LRME,
+ "Packet Patch(offset=%d, num=%d) kmd(offset=%d, num=%d)",
+ packet->patch_offset, packet->num_patches,
+ packet->kmd_cmd_buf_offset, packet->kmd_cmd_buf_index);
+
+ if (cam_packet_util_validate_packet(packet)) {
+ CAM_ERR(CAM_LRME, "invalid packet:%d %d %d %d %d",
+ packet->kmd_cmd_buf_index,
+ packet->num_cmd_buf, packet->cmd_buf_offset,
+ packet->io_configs_offset, packet->header.size);
+ return -EINVAL;
+ }
+
+ if (!packet->num_io_configs) {
+ CAM_ERR(CAM_LRME, "no io configs");
+ return -EINVAL;
+ }
+
+ cmd_desc = (struct cam_cmd_buf_desc *)((uint8_t *)&packet->payload +
+ packet->cmd_buf_offset);
+
+ for (i = 0; i < packet->num_cmd_buf; i++) {
+ if (!cmd_desc[i].length)
+ continue;
+
+ CAM_DBG(CAM_LRME,
+ "CmdBuf[%d] hdl=%d, offset=%d, size=%d, len=%d, type=%d, meta_data=%d",
+ i,
+ cmd_desc[i].mem_handle, cmd_desc[i].offset,
+ cmd_desc[i].size, cmd_desc[i].length, cmd_desc[i].type,
+ cmd_desc[i].meta_data);
+
+ rc = cam_packet_util_validate_cmd_desc(&cmd_desc[i]);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Invalid cmd buffer %d", i);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static int cam_lrme_mgr_util_prepare_io_buffer(int32_t iommu_hdl,
+ struct cam_hw_prepare_update_args *prepare,
+ struct cam_lrme_hw_io_buffer *input_buf,
+ struct cam_lrme_hw_io_buffer *output_buf, uint32_t io_buf_size)
+{
+ int rc = -EINVAL;
+ uint32_t num_in_buf, num_out_buf, i, j, plane;
+ struct cam_buf_io_cfg *io_cfg;
+ uint64_t io_addr[CAM_PACKET_MAX_PLANES];
+ size_t size;
+
+ num_in_buf = 0;
+ num_out_buf = 0;
+ io_cfg = (struct cam_buf_io_cfg *)((uint8_t *)
+ &prepare->packet->payload +
+ prepare->packet->io_configs_offset);
+
+ for (i = 0; i < prepare->packet->num_io_configs; i++) {
+ CAM_DBG(CAM_LRME,
+ "IOConfig[%d] : handle[%d] Dir[%d] Res[%d] Fence[%d], Format[%d]",
+ i, io_cfg[i].mem_handle[0], io_cfg[i].direction,
+ io_cfg[i].resource_type,
+ io_cfg[i].fence, io_cfg[i].format);
+
+ if ((num_in_buf > io_buf_size) ||
+ (num_out_buf > io_buf_size)) {
+ CAM_ERR(CAM_LRME, "Invalid number of buffers %d %d %d",
+ num_in_buf, num_out_buf, io_buf_size);
+ return -EINVAL;
+ }
+
+ memset(io_addr, 0, sizeof(io_addr));
+ for (plane = 0; plane < CAM_PACKET_MAX_PLANES; plane++) {
+ if (!io_cfg[i].mem_handle[plane])
+ break;
+
+ rc = cam_mem_get_io_buf(io_cfg[i].mem_handle[plane],
+ iommu_hdl, &io_addr[plane], &size);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Cannot get io buf for %d %d",
+ plane, rc);
+ return -ENOMEM;
+ }
+
+ io_addr[plane] += io_cfg[i].offsets[plane];
+
+ if (io_addr[plane] >> 32) {
+ CAM_ERR(CAM_LRME, "Invalid io addr for %d %d",
+ plane, rc);
+ return -ENOMEM;
+ }
+
+ CAM_DBG(CAM_LRME, "IO Address[%d][%d] : %llu",
+ io_cfg[i].direction, plane, io_addr[plane]);
+ }
+
+ switch (io_cfg[i].direction) {
+ case CAM_BUF_INPUT: {
+ prepare->in_map_entries[num_in_buf].resource_handle =
+ io_cfg[i].resource_type;
+ prepare->in_map_entries[num_in_buf].sync_id =
+ io_cfg[i].fence;
+
+ input_buf[num_in_buf].valid = true;
+ for (j = 0; j < plane; j++)
+ input_buf[num_in_buf].io_addr[j] = io_addr[j];
+ input_buf[num_in_buf].num_plane = plane;
+ input_buf[num_in_buf].io_cfg = &io_cfg[i];
+
+ num_in_buf++;
+ break;
+ }
+ case CAM_BUF_OUTPUT: {
+ prepare->out_map_entries[num_out_buf].resource_handle =
+ io_cfg[i].resource_type;
+ prepare->out_map_entries[num_out_buf].sync_id =
+ io_cfg[i].fence;
+
+ output_buf[num_out_buf].valid = true;
+ for (j = 0; j < plane; j++)
+ output_buf[num_out_buf].io_addr[j] = io_addr[j];
+ output_buf[num_out_buf].num_plane = plane;
+ output_buf[num_out_buf].io_cfg = &io_cfg[i];
+
+ num_out_buf++;
+ break;
+ }
+ default:
+ CAM_ERR(CAM_LRME, "Unsupported io direction %d",
+ io_cfg[i].direction);
+ return -EINVAL;
+ }
+ }
+ prepare->num_in_map_entries = num_in_buf;
+ prepare->num_out_map_entries = num_out_buf;
+
+ return 0;
+}
+
+static int cam_lrme_mgr_util_prepare_hw_update_entries(
+ struct cam_lrme_hw_mgr *hw_mgr,
+ struct cam_hw_prepare_update_args *prepare,
+ struct cam_lrme_hw_cmd_config_args *config_args,
+ struct cam_kmd_buf_info *kmd_buf_info)
+{
+ int i, rc = 0;
+ struct cam_lrme_device *hw_device = NULL;
+ uint32_t *kmd_buf_addr;
+ uint32_t num_entry;
+ uint32_t kmd_buf_max_size;
+ uint32_t kmd_buf_used_bytes = 0;
+ struct cam_hw_update_entry *hw_entry;
+ struct cam_cmd_buf_desc *cmd_desc = NULL;
+
+ hw_device = config_args->hw_device;
+ if (!hw_device) {
+ CAM_ERR(CAM_LRME, "Invalid hw_device");
+ return -EINVAL;
+ }
+
+ kmd_buf_addr = (uint32_t *)((uint8_t *)kmd_buf_info->cpu_addr +
+ kmd_buf_info->used_bytes);
+ kmd_buf_max_size = kmd_buf_info->size - kmd_buf_info->used_bytes;
+
+ config_args->cmd_buf_addr = kmd_buf_addr;
+ config_args->size = kmd_buf_max_size;
+ config_args->config_buf_size = 0;
+
+ if (hw_device->hw_intf.hw_ops.process_cmd) {
+ rc = hw_device->hw_intf.hw_ops.process_cmd(
+ hw_device->hw_intf.hw_priv,
+ CAM_LRME_HW_CMD_PREPARE_HW_UPDATE,
+ config_args,
+ sizeof(struct cam_lrme_hw_cmd_config_args));
+ if (rc) {
+ CAM_ERR(CAM_LRME,
+ "Failed in CMD_PREPARE_HW_UPDATE %d", rc);
+ return rc;
+ }
+ } else {
+ CAM_ERR(CAM_LRME, "Can't find handle function");
+ return -EINVAL;
+ }
+
+ kmd_buf_used_bytes += config_args->config_buf_size;
+
+ if (!kmd_buf_used_bytes || (kmd_buf_used_bytes > kmd_buf_max_size)) {
+ CAM_ERR(CAM_LRME, "Invalid kmd used bytes %d (%d)",
+ kmd_buf_used_bytes, kmd_buf_max_size);
+ return -ENOMEM;
+ }
+
+ hw_entry = prepare->hw_update_entries;
+ num_entry = 0;
+
+ if (config_args->config_buf_size) {
+ if ((num_entry + 1) >= prepare->max_hw_update_entries) {
+ CAM_ERR(CAM_LRME, "Insufficient HW entries :%d %d",
+ num_entry, prepare->max_hw_update_entries);
+ return -EINVAL;
+ }
+
+ hw_entry[num_entry].handle = kmd_buf_info->handle;
+ hw_entry[num_entry].len = config_args->config_buf_size;
+ hw_entry[num_entry].offset = kmd_buf_info->offset;
+
+ kmd_buf_info->used_bytes += config_args->config_buf_size;
+ kmd_buf_info->offset += config_args->config_buf_size;
+ num_entry++;
+ }
+
+ cmd_desc = (struct cam_cmd_buf_desc *)((uint8_t *)
+ &prepare->packet->payload + prepare->packet->cmd_buf_offset);
+
+ for (i = 0; i < prepare->packet->num_cmd_buf; i++) {
+ if (!cmd_desc[i].length)
+ continue;
+
+ if ((num_entry + 1) >= prepare->max_hw_update_entries) {
+ CAM_ERR(CAM_LRME, "Exceed max num of entry");
+ return -EINVAL;
+ }
+
+ hw_entry[num_entry].handle = cmd_desc[i].mem_handle;
+ hw_entry[num_entry].len = cmd_desc[i].length;
+ hw_entry[num_entry].offset = cmd_desc[i].offset;
+ num_entry++;
+ }
+ prepare->num_hw_update_entries = num_entry;
+
+ CAM_DBG(CAM_LRME, "FinalConfig : hw_entries=%d, Sync(in=%d, out=%d)",
+ prepare->num_hw_update_entries, prepare->num_in_map_entries,
+ prepare->num_out_map_entries);
+
+ return rc;
+}
+
+static void cam_lrme_mgr_util_put_frame_req(
+ struct list_head *src_list,
+ struct list_head *list,
+ spinlock_t *lock)
+{
+ spin_lock(lock);
+ list_add_tail(list, src_list);
+ spin_unlock(lock);
+}
+
+static int cam_lrme_mgr_util_get_frame_req(
+ struct list_head *src_list,
+ struct cam_lrme_frame_request **frame_req,
+ spinlock_t *lock)
+{
+ int rc = 0;
+ struct cam_lrme_frame_request *req_ptr = NULL;
+
+ spin_lock(lock);
+ if (!list_empty(src_list)) {
+ req_ptr = list_first_entry(src_list,
+ struct cam_lrme_frame_request, frame_list);
+ list_del_init(&req_ptr->frame_list);
+ } else {
+ rc = -ENOENT;
+ }
+ *frame_req = req_ptr;
+ spin_unlock(lock);
+
+ return rc;
+}
+
+
+static int cam_lrme_mgr_util_submit_req(void *priv, void *data)
+{
+ struct cam_lrme_device *hw_device;
+ struct cam_lrme_hw_mgr *hw_mgr;
+ struct cam_lrme_frame_request *frame_req = NULL;
+ struct cam_lrme_hw_submit_args submit_args;
+ struct cam_lrme_mgr_work_data *work_data;
+ int rc;
+ int req_prio = 0;
+
+ if (!priv) {
+ CAM_ERR(CAM_LRME, "worker doesn't have private data");
+ return -EINVAL;
+ }
+
+ hw_mgr = (struct cam_lrme_hw_mgr *)priv;
+ work_data = (struct cam_lrme_mgr_work_data *)data;
+ hw_device = work_data->hw_device;
+
+ rc = cam_lrme_mgr_util_get_frame_req(&hw_device->
+ frame_pending_list_high, &frame_req, &hw_device->high_req_lock);
+
+ if (!frame_req) {
+ rc = cam_lrme_mgr_util_get_frame_req(&hw_device->
+ frame_pending_list_normal, &frame_req,
+ &hw_device->normal_req_lock);
+ if (frame_req)
+ req_prio = 1;
+ }
+
+ if (!frame_req) {
+ CAM_DBG(CAM_LRME, "No pending request");
+ return 0;
+ }
+
+ if (hw_device->hw_intf.hw_ops.process_cmd) {
+ submit_args.hw_update_entries = frame_req->hw_update_entries;
+ submit_args.num_hw_update_entries =
+ frame_req->num_hw_update_entries;
+ submit_args.frame_req = frame_req;
+
+ rc = hw_device->hw_intf.hw_ops.process_cmd(
+ hw_device->hw_intf.hw_priv,
+ CAM_LRME_HW_CMD_SUBMIT,
+ &submit_args, sizeof(struct cam_lrme_hw_submit_args));
+
+ if (rc == -EBUSY)
+ CAM_DBG(CAM_LRME, "device busy");
+ else if (rc)
+ CAM_ERR(CAM_LRME, "submit request failed rc %d", rc);
+ if (rc) {
+ req_prio == 0 ? spin_lock(&hw_device->high_req_lock) :
+ spin_lock(&hw_device->normal_req_lock);
+ list_add(&frame_req->frame_list,
+ (req_prio == 0 ?
+ &hw_device->frame_pending_list_high :
+ &hw_device->frame_pending_list_normal));
+ req_prio == 0 ? spin_unlock(&hw_device->high_req_lock) :
+ spin_unlock(&hw_device->normal_req_lock);
+ }
+ if (rc == -EBUSY)
+ rc = 0;
+ } else {
+ req_prio == 0 ? spin_lock(&hw_device->high_req_lock) :
+ spin_lock(&hw_device->normal_req_lock);
+ list_add(&frame_req->frame_list,
+ (req_prio == 0 ?
+ &hw_device->frame_pending_list_high :
+ &hw_device->frame_pending_list_normal));
+ req_prio == 0 ? spin_unlock(&hw_device->high_req_lock) :
+ spin_unlock(&hw_device->normal_req_lock);
+ rc = -EINVAL;
+ }
+
+ CAM_DBG(CAM_LRME, "End of submit, rc %d", rc);
+
+ return rc;
+}
+
+static int cam_lrme_mgr_util_schedule_frame_req(
+ struct cam_lrme_hw_mgr *hw_mgr, struct cam_lrme_device *hw_device)
+{
+ int rc = 0;
+ struct crm_workq_task *task;
+ struct cam_lrme_mgr_work_data *work_data;
+
+ task = cam_req_mgr_workq_get_task(hw_device->work);
+ if (!task) {
+ CAM_ERR(CAM_LRME, "Can not get task for worker");
+ return -ENOMEM;
+ }
+
+ work_data = (struct cam_lrme_mgr_work_data *)task->payload;
+ work_data->hw_device = hw_device;
+
+ task->process_cb = cam_lrme_mgr_util_submit_req;
+ CAM_DBG(CAM_LRME, "enqueue submit task");
+ rc = cam_req_mgr_workq_enqueue_task(task, hw_mgr, CRM_TASK_PRIORITY_0);
+
+ return rc;
+}
+
+static int cam_lrme_mgr_util_release(struct cam_lrme_hw_mgr *hw_mgr,
+ uint32_t device_index)
+{
+ int rc = 0;
+ struct cam_lrme_device *hw_device;
+
+ rc = cam_lrme_mgr_util_get_device(hw_mgr, device_index, &hw_device);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Error in getting device %d", rc);
+ return rc;
+ }
+
+ mutex_lock(&hw_mgr->hw_mgr_mutex);
+ hw_device->num_context--;
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+ return rc;
+}
+
+static int cam_lrme_mgr_cb(void *data,
+ struct cam_lrme_hw_cb_args *cb_args)
+{
+ struct cam_lrme_hw_mgr *hw_mgr = &g_lrme_hw_mgr;
+ int rc = 0;
+ bool frame_abort = true;
+ struct cam_lrme_frame_request *frame_req;
+ struct cam_lrme_device *hw_device;
+
+ if (!data || !cb_args) {
+ CAM_ERR(CAM_LRME, "Invalid input args");
+ return -EINVAL;
+ }
+
+ hw_device = (struct cam_lrme_device *)data;
+ frame_req = cb_args->frame_req;
+
+ if (cb_args->cb_type & CAM_LRME_CB_PUT_FRAME) {
+ memset(frame_req, 0x0, sizeof(*frame_req));
+ INIT_LIST_HEAD(&frame_req->frame_list);
+ cam_lrme_mgr_util_put_frame_req(&hw_mgr->frame_free_list,
+ &frame_req->frame_list,
+ &hw_mgr->free_req_lock);
+ cb_args->cb_type &= ~CAM_LRME_CB_PUT_FRAME;
+ frame_req = NULL;
+ }
+
+ if (cb_args->cb_type & CAM_LRME_CB_COMP_REG_UPDATE) {
+ cb_args->cb_type &= ~CAM_LRME_CB_COMP_REG_UPDATE;
+ CAM_DBG(CAM_LRME, "Reg update");
+ }
+
+ if (!frame_req)
+ return rc;
+
+ if (cb_args->cb_type & CAM_LRME_CB_BUF_DONE) {
+ cb_args->cb_type &= ~CAM_LRME_CB_BUF_DONE;
+ frame_abort = false;
+ } else if (cb_args->cb_type & CAM_LRME_CB_ERROR) {
+ cb_args->cb_type &= ~CAM_LRME_CB_ERROR;
+ frame_abort = true;
+ } else {
+ CAM_ERR(CAM_LRME, "Wrong cb type %d, req %lld",
+ cb_args->cb_type, frame_req->req_id);
+ return -EINVAL;
+ }
+
+ if (hw_mgr->event_cb) {
+ struct cam_hw_done_event_data buf_data;
+
+ buf_data.request_id = frame_req->req_id;
+ CAM_DBG(CAM_LRME, "frame req %llu, frame_abort %d",
+ frame_req->req_id, frame_abort);
+ rc = hw_mgr->event_cb(frame_req->ctxt_to_hw_map,
+ frame_abort, &buf_data);
+ } else {
+ CAM_ERR(CAM_LRME, "No cb function");
+ }
+ memset(frame_req, 0x0, sizeof(*frame_req));
+ INIT_LIST_HEAD(&frame_req->frame_list);
+ cam_lrme_mgr_util_put_frame_req(&hw_mgr->frame_free_list,
+ &frame_req->frame_list,
+ &hw_mgr->free_req_lock);
+
+ rc = cam_lrme_mgr_util_schedule_frame_req(hw_mgr, hw_device);
+
+ return rc;
+}
+
+static int cam_lrme_mgr_get_caps(void *hw_mgr_priv, void *hw_get_caps_args)
+{
+ int rc = 0;
+ struct cam_lrme_hw_mgr *hw_mgr = hw_mgr_priv;
+ struct cam_query_cap_cmd *args = hw_get_caps_args;
+
+ if (sizeof(struct cam_lrme_query_cap_cmd) != args->size) {
+ CAM_ERR(CAM_LRME,
+ "sizeof(struct cam_query_cap_cmd) = %lu, args->size = %d",
+ sizeof(struct cam_query_cap_cmd), args->size);
+ return -EFAULT;
+ }
+
+ if (copy_to_user((void __user *)args->caps_handle, &(hw_mgr->lrme_caps),
+ sizeof(struct cam_lrme_query_cap_cmd))) {
+ CAM_ERR(CAM_LRME, "copy to user failed");
+ return -EFAULT;
+ }
+
+ return rc;
+}
+
+static int cam_lrme_mgr_hw_acquire(void *hw_mgr_priv, void *hw_acquire_args)
+{
+ struct cam_lrme_hw_mgr *hw_mgr = hw_mgr_priv;
+ struct cam_hw_acquire_args *args =
+ (struct cam_hw_acquire_args *)hw_acquire_args;
+ struct cam_lrme_acquire_args lrme_acquire_args;
+ uint64_t device_index;
+
+ if (!hw_mgr_priv || !args) {
+ CAM_ERR(CAM_LRME,
+ "Invalid input params hw_mgr_priv %pK, acquire_args %pK",
+ hw_mgr_priv, args);
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&lrme_acquire_args,
+ (void __user *)args->acquire_info,
+ sizeof(struct cam_lrme_acquire_args))) {
+ CAM_ERR(CAM_LRME, "Failed to copy acquire args from user");
+ return -EFAULT;
+ }
+
+ device_index = cam_lrme_mgr_util_reserve_device(hw_mgr,
+ &lrme_acquire_args);
+ CAM_DBG(CAM_LRME, "Get device id %llu", device_index);
+
+ if (device_index >= hw_mgr->device_count) {
+ CAM_ERR(CAM_LRME, "Get wrong device id %llu", device_index);
+ return -EINVAL;
+ }
+
+ /* device_index is the right 4 bit in ctxt_to_hw_map */
+ args->ctxt_to_hw_map = (void *)device_index;
+
+ return 0;
+}
+
+static int cam_lrme_mgr_hw_release(void *hw_mgr_priv, void *hw_release_args)
+{
+ int rc = 0;
+ struct cam_lrme_hw_mgr *hw_mgr = hw_mgr_priv;
+ struct cam_hw_release_args *args =
+ (struct cam_hw_release_args *)hw_release_args;
+ uint64_t device_index;
+
+ if (!hw_mgr_priv || !hw_release_args) {
+ CAM_ERR(CAM_LRME, "Invalid arguments %pK, %pK",
+ hw_mgr_priv, hw_release_args);
+ return -EINVAL;
+ }
+
+ device_index = CAM_LRME_DECODE_DEVICE_INDEX(args->ctxt_to_hw_map);
+ if (device_index >= hw_mgr->device_count) {
+ CAM_ERR(CAM_LRME, "Invalid device index %llu", device_index);
+ return -EPERM;
+ }
+
+ rc = cam_lrme_mgr_util_release(hw_mgr, device_index);
+ if (rc)
+ CAM_ERR(CAM_LRME, "Failed in release device, rc=%d", rc);
+
+ return rc;
+}
+
+static int cam_lrme_mgr_hw_start(void *hw_mgr_priv, void *hw_start_args)
+{
+ int rc = 0;
+ struct cam_lrme_hw_mgr *hw_mgr = hw_mgr_priv;
+ struct cam_hw_start_args *args =
+ (struct cam_hw_start_args *)hw_start_args;
+ struct cam_lrme_device *hw_device;
+ uint32_t device_index;
+
+ if (!hw_mgr || !args) {
+ CAM_ERR(CAM_LRME, "Invald input params");
+ return -EINVAL;
+ }
+
+ device_index = CAM_LRME_DECODE_DEVICE_INDEX(args->ctxt_to_hw_map);
+ if (device_index >= hw_mgr->device_count) {
+ CAM_ERR(CAM_LRME, "Invalid device index %d", device_index);
+ return -EPERM;
+ }
+
+ CAM_DBG(CAM_LRME, "Start device index %d", device_index);
+
+ rc = cam_lrme_mgr_util_get_device(hw_mgr, device_index, &hw_device);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to get hw device");
+ return rc;
+ }
+
+ if (hw_device->hw_intf.hw_ops.start) {
+ rc = hw_device->hw_intf.hw_ops.start(
+ hw_device->hw_intf.hw_priv, NULL, 0);
+ } else {
+ CAM_ERR(CAM_LRME, "Invald start function");
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+static int cam_lrme_mgr_hw_stop(void *hw_mgr_priv, void *stop_args)
+{
+ int rc = 0;
+ struct cam_lrme_hw_mgr *hw_mgr = hw_mgr_priv;
+ struct cam_hw_stop_args *args =
+ (struct cam_hw_stop_args *)stop_args;
+ struct cam_lrme_device *hw_device;
+ uint32_t device_index;
+
+ if (!hw_mgr_priv || !stop_args) {
+ CAM_ERR(CAM_LRME, "Invalid arguments");
+ return -EINVAL;
+ }
+
+ device_index = CAM_LRME_DECODE_DEVICE_INDEX(args->ctxt_to_hw_map);
+ if (device_index >= hw_mgr->device_count) {
+ CAM_ERR(CAM_LRME, "Invalid device index %d", device_index);
+ return -EPERM;
+ }
+
+ CAM_DBG(CAM_LRME, "Stop device index %d", device_index);
+
+ rc = cam_lrme_mgr_util_get_device(hw_mgr, device_index, &hw_device);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to get hw device");
+ return rc;
+ }
+
+ if (hw_device->hw_intf.hw_ops.stop) {
+ rc = hw_device->hw_intf.hw_ops.stop(
+ hw_device->hw_intf.hw_priv, NULL, 0);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed in HW stop %d", rc);
+ goto end;
+ }
+ }
+
+end:
+ return rc;
+}
+
+static int cam_lrme_mgr_hw_prepare_update(void *hw_mgr_priv,
+ void *hw_prepare_update_args)
+{
+ int rc = 0, i;
+ struct cam_lrme_hw_mgr *hw_mgr = hw_mgr_priv;
+ struct cam_hw_prepare_update_args *args =
+ (struct cam_hw_prepare_update_args *)hw_prepare_update_args;
+ struct cam_lrme_device *hw_device;
+ struct cam_kmd_buf_info kmd_buf;
+ struct cam_lrme_hw_cmd_config_args config_args;
+ struct cam_lrme_frame_request *frame_req = NULL;
+ uint32_t device_index;
+
+ if (!hw_mgr_priv || !hw_prepare_update_args) {
+ CAM_ERR(CAM_LRME, "Invalid args %pK %pK",
+ hw_mgr_priv, hw_prepare_update_args);
+ return -EINVAL;
+ }
+
+ device_index = CAM_LRME_DECODE_DEVICE_INDEX(args->ctxt_to_hw_map);
+ if (device_index >= hw_mgr->device_count) {
+ CAM_ERR(CAM_LRME, "Invalid device index %d", device_index);
+ return -EPERM;
+ }
+
+ rc = cam_lrme_mgr_util_get_device(hw_mgr, device_index, &hw_device);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Error in getting device %d", rc);
+ goto error;
+ }
+
+ rc = cam_lrme_mgr_util_packet_validate(args->packet);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Error in packet validation %d", rc);
+ goto error;
+ }
+
+ rc = cam_packet_util_get_kmd_buffer(args->packet, &kmd_buf);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Error in get kmd buf buffer %d", rc);
+ goto error;
+ }
+
+ CAM_DBG(CAM_LRME,
+ "KMD Buf : hdl=%d, cpu_addr=%pK, offset=%d, size=%d, used=%d",
+ kmd_buf.handle, kmd_buf.cpu_addr, kmd_buf.offset,
+ kmd_buf.size, kmd_buf.used_bytes);
+
+ rc = cam_packet_util_process_patches(args->packet,
+ hw_mgr->device_iommu.non_secure, hw_mgr->device_iommu.secure);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Patch packet failed, rc=%d", rc);
+ return rc;
+ }
+
+ memset(&config_args, 0, sizeof(config_args));
+ config_args.hw_device = hw_device;
+
+ rc = cam_lrme_mgr_util_prepare_io_buffer(
+ hw_mgr->device_iommu.non_secure, args,
+ config_args.input_buf, config_args.output_buf,
+ CAM_LRME_MAX_IO_BUFFER);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Error in prepare IO Buf %d", rc);
+ goto error;
+ }
+ /* Check port number */
+ if (args->num_in_map_entries == 0 || args->num_out_map_entries == 0) {
+ CAM_ERR(CAM_LRME, "Error in port number in %d, out %d",
+ args->num_in_map_entries, args->num_out_map_entries);
+ goto error;
+ }
+
+ rc = cam_lrme_mgr_util_prepare_hw_update_entries(hw_mgr, args,
+ &config_args, &kmd_buf);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Error in hw update entries %d", rc);
+ goto error;
+ }
+
+ rc = cam_lrme_mgr_util_get_frame_req(&hw_mgr->frame_free_list,
+ &frame_req, &hw_mgr->free_req_lock);
+ if (rc || !frame_req) {
+ CAM_ERR(CAM_LRME, "Can not get free frame request");
+ goto error;
+ }
+
+ frame_req->ctxt_to_hw_map = args->ctxt_to_hw_map;
+ frame_req->req_id = args->packet->header.request_id;
+ frame_req->hw_device = hw_device;
+ frame_req->num_hw_update_entries = args->num_hw_update_entries;
+ for (i = 0; i < args->num_hw_update_entries; i++)
+ frame_req->hw_update_entries[i] = args->hw_update_entries[i];
+
+ args->priv = frame_req;
+
+ CAM_DBG(CAM_LRME, "FramePrepare : Frame[%lld]", frame_req->req_id);
+
+ return 0;
+
+error:
+ return rc;
+}
+
+static int cam_lrme_mgr_hw_config(void *hw_mgr_priv,
+ void *hw_config_args)
+{
+ int rc = 0;
+ struct cam_lrme_hw_mgr *hw_mgr = hw_mgr_priv;
+ struct cam_hw_config_args *args =
+ (struct cam_hw_config_args *)hw_config_args;
+ struct cam_lrme_frame_request *frame_req;
+ struct cam_lrme_device *hw_device = NULL;
+ enum cam_lrme_hw_mgr_ctx_priority priority;
+
+ if (!hw_mgr_priv || !hw_config_args) {
+ CAM_ERR(CAM_LRME, "Invalid arguments, hw_mgr %pK, config %pK",
+ hw_mgr_priv, hw_config_args);
+ return -EINVAL;
+ }
+
+ if (!args->num_hw_update_entries) {
+ CAM_ERR(CAM_LRME, "No hw update entries");
+ return -EINVAL;
+ }
+
+ frame_req = (struct cam_lrme_frame_request *)args->priv;
+ if (!frame_req) {
+ CAM_ERR(CAM_LRME, "No frame request");
+ return -EINVAL;
+ }
+
+ hw_device = frame_req->hw_device;
+ if (!hw_device)
+ return -EINVAL;
+
+ priority = CAM_LRME_DECODE_PRIORITY(args->ctxt_to_hw_map);
+ if (priority == CAM_LRME_PRIORITY_HIGH) {
+ cam_lrme_mgr_util_put_frame_req(
+ &hw_device->frame_pending_list_high,
+ &frame_req->frame_list, &hw_device->high_req_lock);
+ } else {
+ cam_lrme_mgr_util_put_frame_req(
+ &hw_device->frame_pending_list_normal,
+ &frame_req->frame_list, &hw_device->normal_req_lock);
+ }
+
+ CAM_DBG(CAM_LRME, "schedule req %llu", frame_req->req_id);
+ rc = cam_lrme_mgr_util_schedule_frame_req(hw_mgr, hw_device);
+
+ return rc;
+}
+
+int cam_lrme_mgr_register_device(
+ struct cam_hw_intf *lrme_hw_intf,
+ struct cam_iommu_handle *device_iommu,
+ struct cam_iommu_handle *cdm_iommu)
+{
+ struct cam_lrme_device *hw_device;
+ char buf[128];
+ int i, rc;
+
+ hw_device = &g_lrme_hw_mgr.hw_device[lrme_hw_intf->hw_idx];
+
+ g_lrme_hw_mgr.device_iommu = *device_iommu;
+ g_lrme_hw_mgr.cdm_iommu = *cdm_iommu;
+
+ memcpy(&hw_device->hw_intf, lrme_hw_intf, sizeof(struct cam_hw_intf));
+
+ spin_lock_init(&hw_device->high_req_lock);
+ spin_lock_init(&hw_device->normal_req_lock);
+ INIT_LIST_HEAD(&hw_device->frame_pending_list_high);
+ INIT_LIST_HEAD(&hw_device->frame_pending_list_normal);
+
+ rc = snprintf(buf, sizeof(buf), "cam_lrme_device_submit_worker%d",
+ lrme_hw_intf->hw_idx);
+ CAM_DBG(CAM_LRME, "Create submit workq for %s", buf);
+ rc = cam_req_mgr_workq_create(buf,
+ CAM_LRME_WORKQ_NUM_TASK,
+ &hw_device->work, CRM_WORKQ_USAGE_NON_IRQ);
+ if (rc) {
+ CAM_ERR(CAM_LRME,
+ "Unable to create a worker, rc=%d", rc);
+ return rc;
+ }
+
+ for (i = 0; i < CAM_LRME_WORKQ_NUM_TASK; i++)
+ hw_device->work->task.pool[i].payload =
+ &hw_device->work_data[i];
+
+ if (hw_device->hw_intf.hw_ops.process_cmd) {
+ struct cam_lrme_hw_cmd_set_cb cb_args;
+
+ cb_args.cam_lrme_hw_mgr_cb = cam_lrme_mgr_cb;
+ cb_args.data = hw_device;
+
+ rc = hw_device->hw_intf.hw_ops.process_cmd(
+ hw_device->hw_intf.hw_priv,
+ CAM_LRME_HW_CMD_REGISTER_CB,
+ &cb_args, sizeof(cb_args));
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Register cb failed");
+ goto destroy_workqueue;
+ }
+ CAM_DBG(CAM_LRME, "cb registered");
+ }
+
+ if (hw_device->hw_intf.hw_ops.get_hw_caps) {
+ rc = hw_device->hw_intf.hw_ops.get_hw_caps(
+ hw_device->hw_intf.hw_priv, &hw_device->hw_caps,
+ sizeof(hw_device->hw_caps));
+ if (rc)
+ CAM_ERR(CAM_LRME, "Get caps failed");
+ } else {
+ CAM_ERR(CAM_LRME, "No get_hw_caps function");
+ goto destroy_workqueue;
+ }
+ g_lrme_hw_mgr.lrme_caps.dev_caps[lrme_hw_intf->hw_idx] =
+ hw_device->hw_caps;
+ g_lrme_hw_mgr.device_count++;
+ g_lrme_hw_mgr.lrme_caps.device_iommu = g_lrme_hw_mgr.device_iommu;
+ g_lrme_hw_mgr.lrme_caps.cdm_iommu = g_lrme_hw_mgr.cdm_iommu;
+ g_lrme_hw_mgr.lrme_caps.num_devices = g_lrme_hw_mgr.device_count;
+
+ hw_device->valid = true;
+
+ CAM_DBG(CAM_LRME, "device registration done");
+ return 0;
+
+destroy_workqueue:
+ cam_req_mgr_workq_destroy(&hw_device->work);
+
+ return rc;
+}
+
+int cam_lrme_mgr_deregister_device(int device_index)
+{
+ struct cam_lrme_device *hw_device;
+
+ hw_device = &g_lrme_hw_mgr.hw_device[device_index];
+ cam_req_mgr_workq_destroy(&hw_device->work);
+ memset(hw_device, 0x0, sizeof(struct cam_lrme_device));
+ g_lrme_hw_mgr.device_count--;
+
+ return 0;
+}
+
+int cam_lrme_hw_mgr_deinit(void)
+{
+ mutex_destroy(&g_lrme_hw_mgr.hw_mgr_mutex);
+ memset(&g_lrme_hw_mgr, 0x0, sizeof(g_lrme_hw_mgr));
+
+ return 0;
+}
+
+int cam_lrme_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf,
+ cam_hw_event_cb_func cam_lrme_dev_buf_done_cb)
+{
+ int i, rc = 0;
+ struct cam_lrme_frame_request *frame_req;
+
+ if (!hw_mgr_intf)
+ return -EINVAL;
+
+ CAM_DBG(CAM_LRME, "device count %d", g_lrme_hw_mgr.device_count);
+ if (g_lrme_hw_mgr.device_count > CAM_LRME_HW_MAX) {
+ CAM_ERR(CAM_LRME, "Invalid count of devices");
+ return -EINVAL;
+ }
+
+ memset(hw_mgr_intf, 0, sizeof(*hw_mgr_intf));
+
+ mutex_init(&g_lrme_hw_mgr.hw_mgr_mutex);
+ spin_lock_init(&g_lrme_hw_mgr.free_req_lock);
+ INIT_LIST_HEAD(&g_lrme_hw_mgr.frame_free_list);
+
+ /* Init hw mgr frame requests and add to free list */
+ for (i = 0; i < CAM_CTX_REQ_MAX * CAM_CTX_MAX; i++) {
+ frame_req = &g_lrme_hw_mgr.frame_req[i];
+
+ memset(frame_req, 0x0, sizeof(*frame_req));
+ INIT_LIST_HEAD(&frame_req->frame_list);
+
+ list_add_tail(&frame_req->frame_list,
+ &g_lrme_hw_mgr.frame_free_list);
+ }
+
+ hw_mgr_intf->hw_mgr_priv = &g_lrme_hw_mgr;
+ hw_mgr_intf->hw_get_caps = cam_lrme_mgr_get_caps;
+ hw_mgr_intf->hw_acquire = cam_lrme_mgr_hw_acquire;
+ hw_mgr_intf->hw_release = cam_lrme_mgr_hw_release;
+ hw_mgr_intf->hw_start = cam_lrme_mgr_hw_start;
+ hw_mgr_intf->hw_stop = cam_lrme_mgr_hw_stop;
+ hw_mgr_intf->hw_prepare_update = cam_lrme_mgr_hw_prepare_update;
+ hw_mgr_intf->hw_config = cam_lrme_mgr_hw_config;
+ hw_mgr_intf->hw_read = NULL;
+ hw_mgr_intf->hw_write = NULL;
+ hw_mgr_intf->hw_close = NULL;
+
+ g_lrme_hw_mgr.event_cb = cam_lrme_dev_buf_done_cb;
+
+ CAM_DBG(CAM_LRME, "Hw mgr init done");
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.h b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.h
new file mode 100644
index 0000000..f7ce4d2
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.h
@@ -0,0 +1,120 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_LRME_HW_MGR_H_
+#define _CAM_LRME_HW_MGR_H_
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include <media/cam_lrme.h>
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_req_mgr_workq.h"
+#include "cam_lrme_hw_intf.h"
+#include "cam_context.h"
+
+#define CAM_LRME_HW_MAX 1
+#define CAM_LRME_WORKQ_NUM_TASK 10
+
+#define CAM_LRME_DECODE_DEVICE_INDEX(ctxt_to_hw_map) \
+ ((uint64_t)ctxt_to_hw_map & 0xF)
+
+#define CAM_LRME_DECODE_PRIORITY(ctxt_to_hw_map) \
+ (((uint64_t)ctxt_to_hw_map & 0xF0) >> 4)
+
+#define CAM_LRME_DECODE_CTX_INDEX(ctxt_to_hw_map) \
+ ((uint64_t)ctxt_to_hw_map >> CAM_LRME_CTX_INDEX_SHIFT)
+
+/**
+ * enum cam_lrme_hw_mgr_ctx_priority
+ *
+ * CAM_LRME_PRIORITY_HIGH : High priority client
+ * CAM_LRME_PRIORITY_NORMAL : Normal priority client
+ */
+enum cam_lrme_hw_mgr_ctx_priority {
+ CAM_LRME_PRIORITY_HIGH,
+ CAM_LRME_PRIORITY_NORMAL,
+};
+
+/**
+ * struct cam_lrme_mgr_work_data : HW Mgr work data
+ *
+ * hw_device : Pointer to the hw device
+ */
+struct cam_lrme_mgr_work_data {
+ struct cam_lrme_device *hw_device;
+};
+
+/**
+ * struct cam_lrme_device : LRME HW device
+ *
+ * @hw_caps : HW device's capabilities
+ * @hw_intf : HW device's interface information
+ * @num_context : Number of contexts using this device
+ * @valid : Whether this device is valid
+ * @work : HW device's work queue
+ * @work_data : HW device's work data
+ * @frame_pending_list_high : High priority request queue
+ * @frame_pending_list_normal : Normal priority request queue
+ * @high_req_lock : Spinlock of high priority queue
+ * @normal_req_lock : Spinlock of normal priority queue
+ */
+struct cam_lrme_device {
+ struct cam_lrme_dev_cap hw_caps;
+ struct cam_hw_intf hw_intf;
+ uint32_t num_context;
+ bool valid;
+ struct cam_req_mgr_core_workq *work;
+ struct cam_lrme_mgr_work_data work_data[CAM_LRME_WORKQ_NUM_TASK];
+ struct list_head frame_pending_list_high;
+ struct list_head frame_pending_list_normal;
+ spinlock_t high_req_lock;
+ spinlock_t normal_req_lock;
+};
+
+/**
+ * struct cam_lrme_hw_mgr : LRME HW manager
+ *
+ * @device_count : Number of HW devices
+ * @frame_free_list : List of free frame request
+ * @hw_mgr_mutex : Mutex to protect HW manager data
+ * @free_req_lock :Spinlock to protect frame_free_list
+ * @hw_device : List of HW devices
+ * @device_iommu : Device iommu
+ * @cdm_iommu : cdm iommu
+ * @frame_req : List of frame request to use
+ * @lrme_caps : LRME capabilities
+ * @event_cb : IRQ callback function
+ */
+struct cam_lrme_hw_mgr {
+ uint32_t device_count;
+ struct list_head frame_free_list;
+ struct mutex hw_mgr_mutex;
+ spinlock_t free_req_lock;
+ struct cam_lrme_device hw_device[CAM_LRME_HW_MAX];
+ struct cam_iommu_handle device_iommu;
+ struct cam_iommu_handle cdm_iommu;
+ struct cam_lrme_frame_request frame_req[CAM_CTX_REQ_MAX * CAM_CTX_MAX];
+ struct cam_lrme_query_cap_cmd lrme_caps;
+ cam_hw_event_cb_func event_cb;
+};
+
+int cam_lrme_mgr_register_device(struct cam_hw_intf *lrme_hw_intf,
+ struct cam_iommu_handle *device_iommu,
+ struct cam_iommu_handle *cdm_iommu);
+int cam_lrme_mgr_deregister_device(int device_index);
+
+#endif /* _CAM_LRME_HW_MGR_H_ */
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr_intf.h
similarity index 61%
copy from arch/arm64/boot/dts/qcom/sdm845-qvr.dts
copy to drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr_intf.h
index 5513c92..8bb609c 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr_intf.h
@@ -10,15 +10,16 @@
* GNU General Public License for more details.
*/
+#ifndef _CAM_LRME_HW_MGR_INTF_H_
+#define _CAM_LRME_HW_MGR_INTF_H_
-/dts-v1/;
+#include <linux/of.h>
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "cam_debug_util.h"
+#include "cam_hw_mgr_intf.h"
-/ {
- model = "Qualcomm Technologies, Inc. SDM845 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
-};
+int cam_lrme_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf,
+ cam_hw_event_cb_func cam_lrme_dev_buf_done_cb);
+int cam_lrme_hw_mgr_deinit(void);
+
+#endif /* _CAM_LRME_HW_MGR_INTF_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/Makefile b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/Makefile
new file mode 100644
index 0000000..c65d862
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/Makefile
@@ -0,0 +1,13 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sync
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cdm
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_lrme
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw
+ccflags-y += -Idrivers/media/platform/msm/camera0
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_lrme_hw_dev.o cam_lrme_hw_core.o cam_lrme_hw_soc.o
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.c b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.c
new file mode 100644
index 0000000..0318739
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.c
@@ -0,0 +1,1022 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "cam_lrme_hw_core.h"
+#include "cam_lrme_hw_soc.h"
+#include "cam_smmu_api.h"
+
+static void cam_lrme_cdm_write_reg_val_pair(uint32_t *buffer,
+ uint32_t *index, uint32_t reg_offset, uint32_t reg_value)
+{
+ buffer[(*index)++] = reg_offset;
+ buffer[(*index)++] = reg_value;
+}
+
+static void cam_lrme_hw_util_fill_fe_reg(struct cam_lrme_hw_io_buffer *io_buf,
+ uint32_t index, uint32_t *reg_val_pair, uint32_t *num_cmd,
+ struct cam_lrme_hw_info *hw_info)
+{
+ uint32_t reg_val;
+
+ /* 1. config buffer size */
+ reg_val = io_buf->io_cfg->planes[0].width;
+ reg_val |= (io_buf->io_cfg->planes[0].height << 16);
+ cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
+ hw_info->bus_rd_reg.bus_client_reg[index].rd_buffer_size,
+ reg_val);
+
+ CAM_DBG(CAM_LRME,
+ "width %d", io_buf->io_cfg->planes[0].width);
+ CAM_DBG(CAM_LRME,
+ "height %d", io_buf->io_cfg->planes[0].height);
+
+ /* 2. config image address */
+ cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
+ hw_info->bus_rd_reg.bus_client_reg[index].addr_image,
+ io_buf->io_addr[0]);
+
+ CAM_DBG(CAM_LRME, "io addr %llu", io_buf->io_addr[0]);
+
+ /* 3. config stride */
+ reg_val = io_buf->io_cfg->planes[0].plane_stride;
+ cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
+ hw_info->bus_rd_reg.bus_client_reg[index].rd_stride,
+ reg_val);
+
+ CAM_DBG(CAM_LRME, "plane_stride %d",
+ io_buf->io_cfg->planes[0].plane_stride);
+
+ /* 4. enable client */
+ cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
+ hw_info->bus_rd_reg.bus_client_reg[index].core_cfg, 0x1);
+
+ /* 5. unpack_cfg */
+ cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
+ hw_info->bus_rd_reg.bus_client_reg[index].unpack_cfg_0, 0x0);
+}
+
+static void cam_lrme_hw_util_fill_we_reg(struct cam_lrme_hw_io_buffer *io_buf,
+ uint32_t index, uint32_t *reg_val_pair, uint32_t *num_cmd,
+ struct cam_lrme_hw_info *hw_info)
+{
+ /* config client mode */
+ cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
+ hw_info->bus_wr_reg.bus_client_reg[index].cfg,
+ 0x1);
+
+ /* image address */
+ cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
+ hw_info->bus_wr_reg.bus_client_reg[index].addr_image,
+ io_buf->io_addr[0]);
+ CAM_DBG(CAM_LRME, "io addr %llu", io_buf->io_addr[0]);
+
+ /* buffer width and height */
+ cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
+ hw_info->bus_wr_reg.bus_client_reg[index].buffer_width_cfg,
+ io_buf->io_cfg->planes[0].width);
+ CAM_DBG(CAM_LRME, "width %d", io_buf->io_cfg->planes[0].width);
+
+ cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
+ hw_info->bus_wr_reg.bus_client_reg[index].buffer_height_cfg,
+ io_buf->io_cfg->planes[0].height);
+ CAM_DBG(CAM_LRME, "height %d", io_buf->io_cfg->planes[0].height);
+
+ /* packer cfg */
+ cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
+ hw_info->bus_wr_reg.bus_client_reg[index].packer_cfg,
+ (index == 0) ? 0x1 : 0x5);
+
+ /* client stride */
+ cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
+ hw_info->bus_wr_reg.bus_client_reg[index].wr_stride,
+ io_buf->io_cfg->planes[0].meta_stride);
+ CAM_DBG(CAM_LRME, "plane_stride %d",
+ io_buf->io_cfg->planes[0].plane_stride);
+}
+
+
+static int cam_lrme_hw_util_process_config_hw(struct cam_hw_info *lrme_hw,
+ struct cam_lrme_hw_cmd_config_args *config_args)
+{
+ int i;
+ struct cam_hw_soc_info *soc_info = &lrme_hw->soc_info;
+ struct cam_lrme_cdm_info *hw_cdm_info;
+ uint32_t *cmd_buf_addr = config_args->cmd_buf_addr;
+ uint32_t reg_val_pair[CAM_LRME_MAX_REG_PAIR_NUM];
+ struct cam_lrme_hw_io_buffer *io_buf;
+ struct cam_lrme_hw_info *hw_info =
+ ((struct cam_lrme_core *)lrme_hw->core_info)->hw_info;
+ uint32_t num_cmd = 0;
+ uint32_t size;
+ uint32_t mem_base, available_size = config_args->size;
+ uint32_t output_res_mask = 0, input_res_mask = 0;
+
+
+ if (!cmd_buf_addr) {
+ CAM_ERR(CAM_LRME, "Invalid input args");
+ return -EINVAL;
+ }
+
+ hw_cdm_info =
+ ((struct cam_lrme_core *)lrme_hw->core_info)->hw_cdm_info;
+
+ for (i = 0; i < CAM_LRME_MAX_IO_BUFFER; i++) {
+ io_buf = &config_args->input_buf[i];
+
+ if (io_buf->valid == false)
+ break;
+
+ if (io_buf->io_cfg->direction != CAM_BUF_INPUT) {
+ CAM_ERR(CAM_LRME, "Incorrect direction %d %d",
+ io_buf->io_cfg->direction, CAM_BUF_INPUT);
+ return -EINVAL;
+ }
+ CAM_DBG(CAM_LRME,
+ "resource_type %d", io_buf->io_cfg->resource_type);
+
+ switch (io_buf->io_cfg->resource_type) {
+ case CAM_LRME_IO_TYPE_TAR:
+ cam_lrme_hw_util_fill_fe_reg(io_buf, 0, reg_val_pair,
+ &num_cmd, hw_info);
+
+ input_res_mask |= CAM_LRME_INPUT_PORT_TYPE_TAR;
+ break;
+ case CAM_LRME_IO_TYPE_REF:
+ cam_lrme_hw_util_fill_fe_reg(io_buf, 1, reg_val_pair,
+ &num_cmd, hw_info);
+
+ input_res_mask |= CAM_LRME_INPUT_PORT_TYPE_REF;
+ break;
+ default:
+ CAM_ERR(CAM_LRME, "wrong resource_type %d",
+ io_buf->io_cfg->resource_type);
+ return -EINVAL;
+ }
+ }
+
+ for (i = 0; i < CAM_LRME_BUS_RD_MAX_CLIENTS; i++)
+ if (!((input_res_mask >> i) & 0x1))
+ cam_lrme_cdm_write_reg_val_pair(reg_val_pair, &num_cmd,
+ hw_info->bus_rd_reg.bus_client_reg[i].core_cfg,
+ 0x0);
+
+ for (i = 0; i < CAM_LRME_MAX_IO_BUFFER; i++) {
+ io_buf = &config_args->output_buf[i];
+
+ if (io_buf->valid == false)
+ break;
+
+ if (io_buf->io_cfg->direction != CAM_BUF_OUTPUT) {
+ CAM_ERR(CAM_LRME, "Incorrect direction %d %d",
+ io_buf->io_cfg->direction, CAM_BUF_INPUT);
+ return -EINVAL;
+ }
+
+ CAM_DBG(CAM_LRME, "resource_type %d",
+ io_buf->io_cfg->resource_type);
+ switch (io_buf->io_cfg->resource_type) {
+ case CAM_LRME_IO_TYPE_DS2:
+ cam_lrme_hw_util_fill_we_reg(io_buf, 0, reg_val_pair,
+ &num_cmd, hw_info);
+
+ output_res_mask |= CAM_LRME_OUTPUT_PORT_TYPE_DS2;
+ break;
+ case CAM_LRME_IO_TYPE_RES:
+ cam_lrme_hw_util_fill_we_reg(io_buf, 1, reg_val_pair,
+ &num_cmd, hw_info);
+
+ output_res_mask |= CAM_LRME_OUTPUT_PORT_TYPE_RES;
+ break;
+
+ default:
+ CAM_ERR(CAM_LRME, "wrong resource_type %d",
+ io_buf->io_cfg->resource_type);
+ return -EINVAL;
+ }
+ }
+
+ for (i = 0; i < CAM_LRME_BUS_RD_MAX_CLIENTS; i++)
+ if (!((output_res_mask >> i) & 0x1))
+ cam_lrme_cdm_write_reg_val_pair(reg_val_pair, &num_cmd,
+ hw_info->bus_wr_reg.bus_client_reg[i].cfg, 0x0);
+
+ if (output_res_mask) {
+ /* write composite mask */
+ cam_lrme_cdm_write_reg_val_pair(reg_val_pair, &num_cmd,
+ hw_info->bus_wr_reg.common_reg.composite_mask_0,
+ output_res_mask);
+ }
+
+ size = hw_cdm_info->cdm_ops->cdm_required_size_changebase();
+ if ((size * 4) > available_size) {
+ CAM_ERR(CAM_LRME, "buf size:%d is not sufficient, expected: %d",
+ available_size, size);
+ return -EINVAL;
+ }
+
+ mem_base = CAM_SOC_GET_REG_MAP_CAM_BASE(soc_info, CAM_LRME_BASE_IDX);
+
+ hw_cdm_info->cdm_ops->cdm_write_changebase(cmd_buf_addr, mem_base);
+ cmd_buf_addr += size;
+ available_size -= (size * 4);
+
+ size = hw_cdm_info->cdm_ops->cdm_required_size_reg_random(
+ num_cmd / 2);
+
+ if ((size * 4) > available_size) {
+ CAM_ERR(CAM_LRME, "buf size:%d is not sufficient, expected: %d",
+ available_size, size);
+ return -ENOMEM;
+ }
+
+ hw_cdm_info->cdm_ops->cdm_write_regrandom(cmd_buf_addr, num_cmd / 2,
+ reg_val_pair);
+ cmd_buf_addr += size;
+ available_size -= (size * 4);
+
+ config_args->config_buf_size =
+ config_args->size - available_size;
+
+ return 0;
+}
+
+static int cam_lrme_hw_util_submit_go(struct cam_hw_info *lrme_hw)
+{
+ struct cam_lrme_core *lrme_core;
+ struct cam_hw_soc_info *soc_info;
+ struct cam_lrme_hw_info *hw_info;
+
+ lrme_core = (struct cam_lrme_core *)lrme_hw->core_info;
+ hw_info = lrme_core->hw_info;
+ soc_info = &lrme_hw->soc_info;
+
+ cam_io_w_mb(0x1, soc_info->reg_map[0].mem_base +
+ hw_info->bus_rd_reg.common_reg.cmd);
+
+ return 0;
+}
+
+static int cam_lrme_hw_util_reset(struct cam_hw_info *lrme_hw,
+ uint32_t reset_type)
+{
+ struct cam_lrme_core *lrme_core;
+ struct cam_hw_soc_info *soc_info = &lrme_hw->soc_info;
+ struct cam_lrme_hw_info *hw_info;
+ long time_left;
+
+ lrme_core = lrme_hw->core_info;
+ hw_info = lrme_core->hw_info;
+
+ switch (reset_type) {
+ case CAM_LRME_HW_RESET_TYPE_HW_RESET:
+ reinit_completion(&lrme_core->reset_complete);
+ cam_io_w_mb(0x1, soc_info->reg_map[0].mem_base +
+ hw_info->titan_reg.top_rst_cmd);
+ time_left = wait_for_completion_timeout(
+ &lrme_core->reset_complete,
+ msecs_to_jiffies(CAM_LRME_HW_RESET_TIMEOUT));
+ if (time_left <= 0) {
+ CAM_ERR(CAM_LRME,
+ "HW reset wait failed time_left=%ld",
+ time_left);
+ return -ETIMEDOUT;
+ }
+ break;
+ case CAM_LRME_HW_RESET_TYPE_SW_RESET:
+ cam_io_w_mb(0x3, soc_info->reg_map[0].mem_base +
+ hw_info->bus_wr_reg.common_reg.sw_reset);
+ cam_io_w_mb(0x3, soc_info->reg_map[0].mem_base +
+ hw_info->bus_rd_reg.common_reg.sw_reset);
+ reinit_completion(&lrme_core->reset_complete);
+ cam_io_w_mb(0x2, soc_info->reg_map[0].mem_base +
+ hw_info->titan_reg.top_rst_cmd);
+ time_left = wait_for_completion_timeout(
+ &lrme_core->reset_complete,
+ msecs_to_jiffies(CAM_LRME_HW_RESET_TIMEOUT));
+ if (time_left <= 0) {
+ CAM_ERR(CAM_LRME,
+ "SW reset wait failed time_left=%ld",
+ time_left);
+ return -ETIMEDOUT;
+ }
+ break;
+ }
+
+ return 0;
+}
+
+int cam_lrme_hw_util_get_caps(struct cam_hw_info *lrme_hw,
+ struct cam_lrme_dev_cap *hw_caps)
+{
+ struct cam_hw_soc_info *soc_info = &lrme_hw->soc_info;
+ struct cam_lrme_hw_info *hw_info =
+ ((struct cam_lrme_core *)lrme_hw->core_info)->hw_info;
+ uint32_t reg_value;
+
+ if (!hw_info) {
+ CAM_ERR(CAM_LRME, "Invalid hw info data");
+ return -EINVAL;
+ }
+
+ reg_value = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ hw_info->clc_reg.clc_hw_version);
+ hw_caps->clc_hw_version.gen =
+ CAM_BITS_MASK_SHIFT(reg_value, 0xf0000000, 0x1C);
+ hw_caps->clc_hw_version.rev =
+ CAM_BITS_MASK_SHIFT(reg_value, 0xfff0000, 0x10);
+ hw_caps->clc_hw_version.step =
+ CAM_BITS_MASK_SHIFT(reg_value, 0xffff, 0x0);
+
+ reg_value = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ hw_info->bus_rd_reg.common_reg.hw_version);
+ hw_caps->bus_rd_hw_version.gen =
+ CAM_BITS_MASK_SHIFT(reg_value, 0xf0000000, 0x1C);
+ hw_caps->bus_rd_hw_version.rev =
+ CAM_BITS_MASK_SHIFT(reg_value, 0xfff0000, 0x10);
+ hw_caps->bus_rd_hw_version.step =
+ CAM_BITS_MASK_SHIFT(reg_value, 0xffff, 0x0);
+
+ reg_value = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ hw_info->bus_wr_reg.common_reg.hw_version);
+ hw_caps->bus_wr_hw_version.gen =
+ CAM_BITS_MASK_SHIFT(reg_value, 0xf0000000, 0x1C);
+ hw_caps->bus_wr_hw_version.rev =
+ CAM_BITS_MASK_SHIFT(reg_value, 0xfff0000, 0x10);
+ hw_caps->bus_wr_hw_version.step =
+ CAM_BITS_MASK_SHIFT(reg_value, 0xffff, 0x0);
+
+ reg_value = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ hw_info->titan_reg.top_hw_version);
+ hw_caps->top_hw_version.gen =
+ CAM_BITS_MASK_SHIFT(reg_value, 0xf0000000, 0x1C);
+ hw_caps->top_hw_version.rev =
+ CAM_BITS_MASK_SHIFT(reg_value, 0xfff0000, 0x10);
+ hw_caps->top_hw_version.step =
+ CAM_BITS_MASK_SHIFT(reg_value, 0xffff, 0x0);
+
+ reg_value = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ hw_info->titan_reg.top_titan_version);
+ hw_caps->top_titan_version.gen =
+ CAM_BITS_MASK_SHIFT(reg_value, 0xf0000000, 0x1C);
+ hw_caps->top_titan_version.rev =
+ CAM_BITS_MASK_SHIFT(reg_value, 0xfff0000, 0x10);
+ hw_caps->top_titan_version.step =
+ CAM_BITS_MASK_SHIFT(reg_value, 0xffff, 0x0);
+
+ return 0;
+}
+
+static int cam_lrme_hw_util_submit_req(struct cam_lrme_core *lrme_core,
+ struct cam_lrme_frame_request *frame_req)
+{
+ struct cam_lrme_cdm_info *hw_cdm_info =
+ lrme_core->hw_cdm_info;
+ struct cam_cdm_bl_request *cdm_cmd = hw_cdm_info->cdm_cmd;
+ struct cam_hw_update_entry *cmd;
+ int i, rc = 0;
+
+ if (frame_req->num_hw_update_entries > 0) {
+ cdm_cmd->cmd_arrary_count = frame_req->num_hw_update_entries;
+ cdm_cmd->type = CAM_CDM_BL_CMD_TYPE_MEM_HANDLE;
+ cdm_cmd->flag = false;
+ cdm_cmd->userdata = NULL;
+ cdm_cmd->cookie = 0;
+
+ for (i = 0; i <= frame_req->num_hw_update_entries; i++) {
+ cmd = (frame_req->hw_update_entries + i);
+ cdm_cmd->cmd[i].bl_addr.mem_handle = cmd->handle;
+ cdm_cmd->cmd[i].offset = cmd->offset;
+ cdm_cmd->cmd[i].len = cmd->len;
+ }
+
+ rc = cam_cdm_submit_bls(hw_cdm_info->cdm_handle, cdm_cmd);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to submit cdm commands");
+ return -EINVAL;
+ }
+ } else {
+ CAM_ERR(CAM_LRME, "No hw update entry");
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+static int cam_lrme_hw_util_process_err(struct cam_hw_info *lrme_hw)
+{
+ struct cam_lrme_core *lrme_core = lrme_hw->core_info;
+ struct cam_lrme_frame_request *req_proc, *req_submit;
+ struct cam_lrme_hw_cb_args cb_args;
+ int rc;
+
+ req_proc = lrme_core->req_proc;
+ req_submit = lrme_core->req_submit;
+ cb_args.cb_type = CAM_LRME_CB_ERROR;
+
+ if ((lrme_core->state != CAM_LRME_CORE_STATE_PROCESSING) &&
+ (lrme_core->state != CAM_LRME_CORE_STATE_REQ_PENDING) &&
+ (lrme_core->state != CAM_LRME_CORE_STATE_REQ_PROC_PEND)) {
+ CAM_ERR(CAM_LRME, "Get error irq in wrong state %d",
+ lrme_core->state);
+ }
+
+ CAM_ERR_RATE_LIMIT(CAM_LRME, "Start recovery");
+ lrme_core->state = CAM_LRME_CORE_STATE_RECOVERY;
+ rc = cam_lrme_hw_util_reset(lrme_hw, CAM_LRME_HW_RESET_TYPE_HW_RESET);
+ if (rc)
+ CAM_ERR(CAM_LRME, "Failed to reset");
+
+ lrme_core->req_proc = NULL;
+ lrme_core->req_submit = NULL;
+ if (!rc)
+ lrme_core->state = CAM_LRME_CORE_STATE_IDLE;
+
+ cb_args.frame_req = req_proc;
+ lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb(lrme_core->hw_mgr_cb.data,
+ &cb_args);
+
+ cb_args.frame_req = req_submit;
+ lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb(lrme_core->hw_mgr_cb.data,
+ &cb_args);
+
+ return rc;
+}
+
+static int cam_lrme_hw_util_process_reg_update(
+ struct cam_hw_info *lrme_hw, struct cam_lrme_hw_cb_args *cb_args)
+{
+ struct cam_lrme_core *lrme_core = lrme_hw->core_info;
+ int rc = 0;
+
+ cb_args->cb_type |= CAM_LRME_CB_COMP_REG_UPDATE;
+ if (lrme_core->state == CAM_LRME_CORE_STATE_REQ_PENDING) {
+ lrme_core->state = CAM_LRME_CORE_STATE_PROCESSING;
+ } else {
+ CAM_ERR(CAM_LRME, "Reg update in wrong state %d",
+ lrme_core->state);
+ rc = cam_lrme_hw_util_process_err(lrme_hw);
+ if (rc)
+ CAM_ERR(CAM_LRME, "Failed to reset");
+ return -EINVAL;
+ }
+
+ lrme_core->req_proc = lrme_core->req_submit;
+ lrme_core->req_submit = NULL;
+
+ return 0;
+}
+
+static int cam_lrme_hw_util_process_idle(
+ struct cam_hw_info *lrme_hw, struct cam_lrme_hw_cb_args *cb_args)
+{
+ struct cam_lrme_core *lrme_core = lrme_hw->core_info;
+ int rc = 0;
+
+ cb_args->cb_type |= CAM_LRME_CB_BUF_DONE;
+ switch (lrme_core->state) {
+ case CAM_LRME_CORE_STATE_REQ_PROC_PEND:
+ cam_lrme_hw_util_submit_go(lrme_hw);
+ lrme_core->state = CAM_LRME_CORE_STATE_REQ_PENDING;
+ break;
+
+ case CAM_LRME_CORE_STATE_PROCESSING:
+ lrme_core->state = CAM_LRME_CORE_STATE_IDLE;
+ break;
+
+ default:
+ CAM_ERR(CAM_LRME, "Idle in wrong state %d",
+ lrme_core->state);
+ rc = cam_lrme_hw_util_process_err(lrme_hw);
+ return rc;
+ }
+ cb_args->frame_req = lrme_core->req_proc;
+ lrme_core->req_proc = NULL;
+
+ return 0;
+}
+
+void cam_lrme_set_irq(struct cam_hw_info *lrme_hw,
+ enum cam_lrme_irq_set set)
+{
+ struct cam_hw_soc_info *soc_info = &lrme_hw->soc_info;
+ struct cam_lrme_core *lrme_core = lrme_hw->core_info;
+ struct cam_lrme_hw_info *hw_info = lrme_core->hw_info;
+
+ switch (set) {
+ case CAM_LRME_IRQ_ENABLE:
+ cam_io_w_mb(0xFFFF,
+ soc_info->reg_map[0].mem_base +
+ hw_info->titan_reg.top_irq_mask);
+ cam_io_w_mb(0xFFFF,
+ soc_info->reg_map[0].mem_base +
+ hw_info->bus_wr_reg.common_reg.irq_mask_0);
+ cam_io_w_mb(0xFFFF,
+ soc_info->reg_map[0].mem_base +
+ hw_info->bus_wr_reg.common_reg.irq_mask_1);
+ cam_io_w_mb(0xFFFF,
+ soc_info->reg_map[0].mem_base +
+ hw_info->bus_rd_reg.common_reg.irq_mask);
+ break;
+
+ case CAM_LRME_IRQ_DISABLE:
+ cam_io_w_mb(0x0,
+ soc_info->reg_map[0].mem_base +
+ hw_info->titan_reg.top_irq_mask);
+ cam_io_w_mb(0x0,
+ soc_info->reg_map[0].mem_base +
+ hw_info->bus_wr_reg.common_reg.irq_mask_0);
+ cam_io_w_mb(0x0,
+ soc_info->reg_map[0].mem_base +
+ hw_info->bus_wr_reg.common_reg.irq_mask_1);
+ cam_io_w_mb(0x0,
+ soc_info->reg_map[0].mem_base +
+ hw_info->bus_rd_reg.common_reg.irq_mask);
+ break;
+ }
+}
+
+
+int cam_lrme_hw_process_irq(void *priv, void *data)
+{
+ struct cam_lrme_hw_work_data *work_data;
+ struct cam_hw_info *lrme_hw;
+ struct cam_lrme_core *lrme_core;
+ int rc = 0;
+ uint32_t top_irq_status, fe_irq_status;
+ uint32_t *we_irq_status;
+ struct cam_lrme_hw_cb_args cb_args;
+
+ if (!data || !priv) {
+ CAM_ERR(CAM_LRME, "Invalid data %pK %pK", data, priv);
+ return -EINVAL;
+ }
+
+ memset(&cb_args, 0, sizeof(struct cam_lrme_hw_cb_args));
+ lrme_hw = (struct cam_hw_info *)priv;
+ work_data = (struct cam_lrme_hw_work_data *)data;
+ lrme_core = (struct cam_lrme_core *)lrme_hw->core_info;
+ top_irq_status = work_data->top_irq_status;
+ fe_irq_status = work_data->fe_irq_status;
+ we_irq_status = work_data->we_irq_status;
+
+ CAM_DBG(CAM_LRME,
+ "top status %x, fe status %x, we status0 %x, we status1 %x",
+ top_irq_status, fe_irq_status, we_irq_status[0],
+ we_irq_status[1]);
+ CAM_DBG(CAM_LRME, "Current state %d", lrme_core->state);
+
+ mutex_lock(&lrme_hw->hw_mutex);
+
+ if (top_irq_status & (1 << 3)) {
+ CAM_DBG(CAM_LRME, "Error");
+ rc = cam_lrme_hw_util_process_err(lrme_hw);
+ if (rc)
+ CAM_ERR(CAM_LRME, "Process error failed");
+ goto end;
+ }
+
+ if (we_irq_status[0] & (1 << 1)) {
+ CAM_DBG(CAM_LRME, "reg update");
+ rc = cam_lrme_hw_util_process_reg_update(lrme_hw, &cb_args);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Process reg_update failed");
+ goto end;
+ }
+ }
+
+ if (top_irq_status & (1 << 4)) {
+ CAM_DBG(CAM_LRME, "IDLE");
+
+ rc = cam_lrme_hw_util_process_idle(lrme_hw, &cb_args);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Process idle failed");
+ goto end;
+ }
+ }
+
+ if (lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb) {
+ lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb(lrme_core->
+ hw_mgr_cb.data, &cb_args);
+ } else {
+ CAM_ERR(CAM_LRME, "No hw mgr cb");
+ rc = -EINVAL;
+ }
+
+end:
+ mutex_unlock(&lrme_hw->hw_mutex);
+ return rc;
+}
+
+int cam_lrme_hw_start(void *hw_priv, void *hw_start_args, uint32_t arg_size)
+{
+ struct cam_hw_info *lrme_hw = (struct cam_hw_info *)hw_priv;
+ int rc = 0;
+ struct cam_lrme_core *lrme_core;
+
+ if (!lrme_hw) {
+ CAM_ERR(CAM_LRME,
+ "Invalid input params, lrme_hw %pK",
+ lrme_hw);
+ return -EINVAL;
+ }
+
+ lrme_core = (struct cam_lrme_core *)lrme_hw->core_info;
+
+ mutex_lock(&lrme_hw->hw_mutex);
+
+ if (lrme_hw->open_count > 0) {
+ CAM_DBG(CAM_LRME, "This device is activated before");
+ goto unlock;
+ }
+
+ rc = cam_lrme_soc_enable_resources(lrme_hw);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to enable soc resources");
+ goto unlock;
+ }
+
+ rc = cam_lrme_hw_util_reset(lrme_hw, CAM_LRME_HW_RESET_TYPE_HW_RESET);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to reset hw");
+ goto disable_soc;
+ }
+
+ if (lrme_core->hw_cdm_info) {
+ struct cam_lrme_cdm_info *hw_cdm_info =
+ lrme_core->hw_cdm_info;
+
+ rc = cam_cdm_stream_on(hw_cdm_info->cdm_handle);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to stream on cdm");
+ goto disable_soc;
+ }
+ }
+
+ lrme_hw->hw_state = CAM_HW_STATE_POWER_UP;
+ lrme_hw->open_count++;
+ lrme_core->state = CAM_LRME_CORE_STATE_IDLE;
+
+ mutex_unlock(&lrme_hw->hw_mutex);
+ return rc;
+
+disable_soc:
+ if (cam_lrme_soc_disable_resources(lrme_hw))
+ CAM_ERR(CAM_LRME, "Error in disable soc resources");
+unlock:
+ mutex_unlock(&lrme_hw->hw_mutex);
+ return rc;
+}
+
+int cam_lrme_hw_stop(void *hw_priv, void *hw_stop_args, uint32_t arg_size)
+{
+ struct cam_hw_info *lrme_hw = (struct cam_hw_info *)hw_priv;
+ int rc = 0;
+ struct cam_lrme_core *lrme_core;
+
+ if (!lrme_hw) {
+ CAM_ERR(CAM_LRME, "Invalid argument");
+ return -EINVAL;
+ }
+
+ lrme_core = (struct cam_lrme_core *)lrme_hw->core_info;
+
+ mutex_lock(&lrme_hw->hw_mutex);
+
+ if (lrme_hw->open_count == 0) {
+ mutex_unlock(&lrme_hw->hw_mutex);
+ CAM_ERR(CAM_LRME, "Error Unbalanced stop");
+ return -EINVAL;
+ }
+ lrme_hw->open_count--;
+
+ if (lrme_hw->open_count)
+ goto unlock;
+
+ lrme_core->req_proc = NULL;
+ lrme_core->req_submit = NULL;
+
+ if (lrme_core->hw_cdm_info) {
+ struct cam_lrme_cdm_info *hw_cdm_info =
+ lrme_core->hw_cdm_info;
+
+ rc = cam_cdm_stream_off(hw_cdm_info->cdm_handle);
+ if (rc) {
+ CAM_ERR(CAM_LRME,
+ "Failed in CDM StreamOff, handle=0x%x, rc=%d",
+ hw_cdm_info->cdm_handle, rc);
+ goto unlock;
+ }
+ }
+
+ rc = cam_lrme_soc_disable_resources(lrme_hw);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed in Disable SOC, rc=%d", rc);
+ goto unlock;
+ }
+
+ lrme_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
+ if (lrme_core->state == CAM_LRME_CORE_STATE_IDLE) {
+ lrme_core->state = CAM_LRME_CORE_STATE_INIT;
+ } else {
+ CAM_ERR(CAM_LRME, "HW in wrong state %d", lrme_core->state);
+ return -EINVAL;
+ }
+
+unlock:
+ mutex_unlock(&lrme_hw->hw_mutex);
+ return rc;
+}
+
+int cam_lrme_hw_submit_req(void *hw_priv, void *hw_submit_args,
+ uint32_t arg_size)
+{
+ struct cam_hw_info *lrme_hw = (struct cam_hw_info *)hw_priv;
+ struct cam_lrme_core *lrme_core;
+ struct cam_lrme_hw_submit_args *args =
+ (struct cam_lrme_hw_submit_args *)hw_submit_args;
+ int rc = 0;
+ struct cam_lrme_frame_request *frame_req;
+
+
+ if (!hw_priv || !hw_submit_args) {
+ CAM_ERR(CAM_LRME, "Invalid input");
+ return -EINVAL;
+ }
+
+ if (sizeof(struct cam_lrme_hw_submit_args) != arg_size) {
+ CAM_ERR(CAM_LRME,
+ "size of args %lu, arg_size %d",
+ sizeof(struct cam_lrme_hw_submit_args), arg_size);
+ return -EINVAL;
+ }
+
+ frame_req = args->frame_req;
+
+ mutex_lock(&lrme_hw->hw_mutex);
+
+ if (lrme_hw->open_count == 0) {
+ CAM_ERR(CAM_LRME, "HW is not open");
+ mutex_unlock(&lrme_hw->hw_mutex);
+ return -EINVAL;
+ }
+
+ lrme_core = (struct cam_lrme_core *)lrme_hw->core_info;
+ if (lrme_core->state != CAM_LRME_CORE_STATE_IDLE &&
+ lrme_core->state != CAM_LRME_CORE_STATE_PROCESSING) {
+ mutex_unlock(&lrme_hw->hw_mutex);
+ CAM_DBG(CAM_LRME, "device busy, can not submit, state %d",
+ lrme_core->state);
+ return -EBUSY;
+ }
+
+ if (lrme_core->req_submit != NULL) {
+ CAM_ERR(CAM_LRME, "req_submit is not NULL");
+ return -EBUSY;
+ }
+
+ rc = cam_lrme_hw_util_submit_req(lrme_core, frame_req);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Submit req failed");
+ goto error;
+ }
+
+ switch (lrme_core->state) {
+ case CAM_LRME_CORE_STATE_PROCESSING:
+ lrme_core->state = CAM_LRME_CORE_STATE_REQ_PROC_PEND;
+ break;
+
+ case CAM_LRME_CORE_STATE_IDLE:
+ cam_lrme_hw_util_submit_go(lrme_hw);
+ lrme_core->state = CAM_LRME_CORE_STATE_REQ_PENDING;
+ break;
+
+ default:
+ CAM_ERR(CAM_LRME, "Wrong hw state");
+ rc = -EINVAL;
+ goto error;
+ }
+
+ lrme_core->req_submit = frame_req;
+ mutex_unlock(&lrme_hw->hw_mutex);
+ CAM_DBG(CAM_LRME, "Release lock, submit done for req %llu",
+ frame_req->req_id);
+
+ return 0;
+
+error:
+ mutex_unlock(&lrme_hw->hw_mutex);
+
+ return rc;
+
+}
+
+int cam_lrme_hw_reset(void *hw_priv, void *reset_core_args, uint32_t arg_size)
+{
+ struct cam_hw_info *lrme_hw = hw_priv;
+ struct cam_lrme_core *lrme_core;
+ struct cam_lrme_hw_reset_args *lrme_reset_args = reset_core_args;
+ int rc;
+
+ if (!hw_priv) {
+ CAM_ERR(CAM_LRME, "Invalid input args");
+ return -EINVAL;
+ }
+
+ if (!reset_core_args ||
+ sizeof(struct cam_lrme_hw_reset_args) != arg_size) {
+ CAM_ERR(CAM_LRME, "Invalid reset args");
+ return -EINVAL;
+ }
+
+ lrme_core = lrme_hw->core_info;
+
+ mutex_lock(&lrme_hw->hw_mutex);
+ if (lrme_core->state == CAM_LRME_CORE_STATE_RECOVERY) {
+ mutex_unlock(&lrme_hw->hw_mutex);
+ CAM_ERR(CAM_LRME, "Reset not allowed in %d state",
+ lrme_core->state);
+ return -EINVAL;
+ }
+
+ lrme_core->state = CAM_LRME_CORE_STATE_RECOVERY;
+
+ rc = cam_lrme_hw_util_reset(lrme_hw, lrme_reset_args->reset_type);
+ if (rc) {
+ mutex_unlock(&lrme_hw->hw_mutex);
+ CAM_ERR(CAM_FD, "Failed to reset");
+ return rc;
+ }
+
+ lrme_core->state = CAM_LRME_CORE_STATE_IDLE;
+
+ mutex_unlock(&lrme_hw->hw_mutex);
+
+ return 0;
+}
+
+int cam_lrme_hw_get_caps(void *hw_priv, void *get_hw_cap_args,
+ uint32_t arg_size)
+{
+ struct cam_hw_info *lrme_hw;
+ struct cam_lrme_core *lrme_core;
+ struct cam_lrme_dev_cap *lrme_hw_caps =
+ (struct cam_lrme_dev_cap *)get_hw_cap_args;
+
+ if (!hw_priv || !get_hw_cap_args) {
+ CAM_ERR(CAM_LRME, "Invalid input pointers %pK %pK",
+ hw_priv, get_hw_cap_args);
+ return -EINVAL;
+ }
+
+ lrme_hw = (struct cam_hw_info *)hw_priv;
+ lrme_core = (struct cam_lrme_core *)lrme_hw->core_info;
+ *lrme_hw_caps = lrme_core->hw_caps;
+
+ return 0;
+}
+
+irqreturn_t cam_lrme_hw_irq(int irq_num, void *data)
+{
+ struct cam_hw_info *lrme_hw;
+ struct cam_lrme_core *lrme_core;
+ struct cam_hw_soc_info *soc_info;
+ struct cam_lrme_hw_info *hw_info;
+ struct crm_workq_task *task;
+ struct cam_lrme_hw_work_data *work_data;
+ uint32_t top_irq_status, fe_irq_status, we_irq_status0, we_irq_status1;
+ int rc;
+
+ if (!data) {
+ CAM_ERR(CAM_LRME, "Invalid data in IRQ callback");
+ return -EINVAL;
+ }
+
+ lrme_hw = (struct cam_hw_info *)data;
+ lrme_core = (struct cam_lrme_core *)lrme_hw->core_info;
+ soc_info = &lrme_hw->soc_info;
+ hw_info = lrme_core->hw_info;
+
+ top_irq_status = cam_io_r_mb(
+ soc_info->reg_map[0].mem_base +
+ hw_info->titan_reg.top_irq_status);
+ CAM_DBG(CAM_LRME, "top_irq_status %x", top_irq_status);
+ cam_io_w_mb(top_irq_status,
+ soc_info->reg_map[0].mem_base +
+ hw_info->titan_reg.top_irq_clear);
+ top_irq_status &= CAM_LRME_TOP_IRQ_MASK;
+
+ fe_irq_status = cam_io_r_mb(
+ soc_info->reg_map[0].mem_base +
+ hw_info->bus_rd_reg.common_reg.irq_status);
+ CAM_DBG(CAM_LRME, "fe_irq_status %x", fe_irq_status);
+ cam_io_w_mb(fe_irq_status,
+ soc_info->reg_map[0].mem_base +
+ hw_info->bus_rd_reg.common_reg.irq_clear);
+ fe_irq_status &= CAM_LRME_FE_IRQ_MASK;
+
+ we_irq_status0 = cam_io_r_mb(
+ soc_info->reg_map[0].mem_base +
+ hw_info->bus_wr_reg.common_reg.irq_status_0);
+ CAM_DBG(CAM_LRME, "we_irq_status[0] %x", we_irq_status0);
+ cam_io_w_mb(we_irq_status0,
+ soc_info->reg_map[0].mem_base +
+ hw_info->bus_wr_reg.common_reg.irq_clear_0);
+ we_irq_status0 &= CAM_LRME_WE_IRQ_MASK_0;
+
+ we_irq_status1 = cam_io_r_mb(
+ soc_info->reg_map[0].mem_base +
+ hw_info->bus_wr_reg.common_reg.irq_status_1);
+ CAM_DBG(CAM_LRME, "we_irq_status[1] %x", we_irq_status1);
+ cam_io_w_mb(we_irq_status1,
+ soc_info->reg_map[0].mem_base +
+ hw_info->bus_wr_reg.common_reg.irq_clear_1);
+ we_irq_status1 &= CAM_LRME_WE_IRQ_MASK_1;
+
+ cam_io_w_mb(0x1, soc_info->reg_map[0].mem_base +
+ hw_info->titan_reg.top_irq_cmd);
+ cam_io_w_mb(0x1, soc_info->reg_map[0].mem_base +
+ hw_info->bus_wr_reg.common_reg.irq_cmd);
+ cam_io_w_mb(0x1, soc_info->reg_map[0].mem_base +
+ hw_info->bus_rd_reg.common_reg.irq_cmd);
+
+ if (top_irq_status & 0x1) {
+ complete(&lrme_core->reset_complete);
+ top_irq_status &= (~0x1);
+ }
+
+ if (top_irq_status || fe_irq_status ||
+ we_irq_status0 || we_irq_status1) {
+ task = cam_req_mgr_workq_get_task(lrme_core->work);
+ if (!task) {
+ CAM_ERR(CAM_LRME, "no empty task available");
+ return -ENOMEM;
+ }
+ work_data = (struct cam_lrme_hw_work_data *)task->payload;
+ work_data->top_irq_status = top_irq_status;
+ work_data->fe_irq_status = fe_irq_status;
+ work_data->we_irq_status[0] = we_irq_status0;
+ work_data->we_irq_status[1] = we_irq_status1;
+ task->process_cb = cam_lrme_hw_process_irq;
+ rc = cam_req_mgr_workq_enqueue_task(task, data,
+ CRM_TASK_PRIORITY_0);
+ if (rc)
+ CAM_ERR(CAM_LRME,
+ "Failed in enqueue work task, rc=%d", rc);
+ }
+
+ return IRQ_HANDLED;
+}
+
+int cam_lrme_hw_process_cmd(void *hw_priv, uint32_t cmd_type,
+ void *cmd_args, uint32_t arg_size)
+{
+ struct cam_hw_info *lrme_hw = (struct cam_hw_info *)hw_priv;
+ int rc = 0;
+
+ switch (cmd_type) {
+ case CAM_LRME_HW_CMD_PREPARE_HW_UPDATE: {
+ struct cam_lrme_hw_cmd_config_args *config_args;
+
+ config_args = (struct cam_lrme_hw_cmd_config_args *)cmd_args;
+ rc = cam_lrme_hw_util_process_config_hw(lrme_hw, config_args);
+ break;
+ }
+
+ case CAM_LRME_HW_CMD_REGISTER_CB: {
+ struct cam_lrme_hw_cmd_set_cb *cb_args;
+ struct cam_lrme_device *hw_device;
+ struct cam_lrme_core *lrme_core =
+ (struct cam_lrme_core *)lrme_hw->core_info;
+ cb_args = (struct cam_lrme_hw_cmd_set_cb *)cmd_args;
+ lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb =
+ cb_args->cam_lrme_hw_mgr_cb;
+ lrme_core->hw_mgr_cb.data = cb_args->data;
+ hw_device = cb_args->data;
+ rc = 0;
+ break;
+ }
+
+ case CAM_LRME_HW_CMD_SUBMIT: {
+ struct cam_lrme_hw_submit_args *submit_args;
+
+ submit_args = (struct cam_lrme_hw_submit_args *)cmd_args;
+ rc = cam_lrme_hw_submit_req(hw_priv,
+ submit_args, arg_size);
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.h b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.h
new file mode 100644
index 0000000..bf2f370
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.h
@@ -0,0 +1,457 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_LRME_HW_CORE_H_
+#define _CAM_LRME_HW_CORE_H_
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <media/cam_defs.h>
+#include <media/cam_lrme.h>
+
+#include "cam_common_util.h"
+#include "cam_debug_util.h"
+#include "cam_io_util.h"
+#include "cam_cpas_api.h"
+#include "cam_cdm_intf_api.h"
+#include "cam_lrme_hw_intf.h"
+#include "cam_lrme_hw_soc.h"
+#include "cam_req_mgr_workq.h"
+
+#define CAM_LRME_HW_RESET_TIMEOUT 3000
+
+#define CAM_LRME_BUS_RD_MAX_CLIENTS 2
+#define CAM_LRME_BUS_WR_MAX_CLIENTS 2
+
+#define CAM_LRME_HW_WORKQ_NUM_TASK 30
+
+#define CAM_LRME_TOP_IRQ_MASK 0x19
+#define CAM_LRME_WE_IRQ_MASK_0 0x2
+#define CAM_LRME_WE_IRQ_MASK_1 0x0
+#define CAM_LRME_FE_IRQ_MASK 0x0
+
+#define CAM_LRME_MAX_REG_PAIR_NUM 60
+
+/**
+ * enum cam_lrme_irq_set
+ *
+ * @CAM_LRME_IRQ_ENABLE : Enable irqs
+ * @CAM_LRME_IRQ_DISABLE : Disable irqs
+ */
+enum cam_lrme_irq_set {
+ CAM_LRME_IRQ_ENABLE,
+ CAM_LRME_IRQ_DISABLE,
+};
+
+/**
+ * struct cam_lrme_cdm_info : information used to submit cdm command
+ *
+ * @cdm_handle : CDM handle for this device
+ * @cdm_ops : CDM ops
+ * @cdm_cmd : CDM command pointer
+ */
+struct cam_lrme_cdm_info {
+ uint32_t cdm_handle;
+ struct cam_cdm_utils_ops *cdm_ops;
+ struct cam_cdm_bl_request *cdm_cmd;
+};
+
+/**
+ * struct cam_lrme_hw_work_data : Work data for HW work queue
+ *
+ * @top_irq_status : Top registers irq status
+ * @fe_irq_status : FE engine irq status
+ * @we_irq_status : WE engine irq status
+ */
+struct cam_lrme_hw_work_data {
+ uint32_t top_irq_status;
+ uint32_t fe_irq_status;
+ uint32_t we_irq_status[2];
+};
+
+/**
+ * enum cam_lrme_core_state : LRME core states
+ *
+ * @CAM_LRME_CORE_STATE_UNINIT : LRME is in uninit state
+ * @CAM_LRME_CORE_STATE_INIT : LRME is in init state after probe
+ * @ CAM_LRME_CORE_STATE_IDLE : LRME is in idle state. Hardware is in
+ * this state when no frame is processing
+ * or waiting for this core.
+ * @CAM_LRME_CORE_STATE_REQ_PENDING : LRME is in pending state. One frame is
+ * waiting for processing
+ * @CAM_LRME_CORE_STATE_PROCESSING : LRME is in processing state. HW manager
+ * can submit one more frame to HW
+ * @CAM_LRME_CORE_STATE_REQ_PROC_PEND : Indicate two frames are inside HW.
+ * @CAM_LRME_CORE_STATE_RECOVERY : Indicate core is in the process of reset
+ * @CAM_LRME_CORE_STATE_MAX : upper limit of states
+ */
+enum cam_lrme_core_state {
+ CAM_LRME_CORE_STATE_UNINIT,
+ CAM_LRME_CORE_STATE_INIT,
+ CAM_LRME_CORE_STATE_IDLE,
+ CAM_LRME_CORE_STATE_REQ_PENDING,
+ CAM_LRME_CORE_STATE_PROCESSING,
+ CAM_LRME_CORE_STATE_REQ_PROC_PEND,
+ CAM_LRME_CORE_STATE_RECOVERY,
+ CAM_LRME_CORE_STATE_MAX,
+};
+
+/**
+ * struct cam_lrme_core : LRME HW core information
+ *
+ * @hw_info : Pointer to base HW information structure
+ * @device_iommu : Device iommu handle
+ * @cdm_iommu : CDM iommu handle
+ * @hw_caps : Hardware capabilities
+ * @state : Hardware state
+ * @reset_complete : Reset completion
+ * @work : Hardware workqueue to handle irq events
+ * @work_data : Work data used by hardware workqueue
+ * @hw_mgr_cb : Hw manager callback
+ * @req_proc : Pointer to the processing frame request
+ * @req_submit : Pointer to the frame request waiting for processing
+ * @hw_cdm_info : CDM information used by this device
+ * @hw_idx : Hardware index
+ */
+struct cam_lrme_core {
+ struct cam_lrme_hw_info *hw_info;
+ struct cam_iommu_handle device_iommu;
+ struct cam_iommu_handle cdm_iommu;
+ struct cam_lrme_dev_cap hw_caps;
+ enum cam_lrme_core_state state;
+ struct completion reset_complete;
+ struct cam_req_mgr_core_workq *work;
+ struct cam_lrme_hw_work_data work_data[CAM_LRME_HW_WORKQ_NUM_TASK];
+ struct cam_lrme_hw_cmd_set_cb hw_mgr_cb;
+ struct cam_lrme_frame_request *req_proc;
+ struct cam_lrme_frame_request *req_submit;
+ struct cam_lrme_cdm_info *hw_cdm_info;
+ uint32_t hw_idx;
+};
+
+/**
+ * struct cam_lrme_bus_rd_reg_common : Offsets of FE common registers
+ *
+ * @hw_version : Offset of hw_version register
+ * @hw_capability : Offset of hw_capability register
+ * @sw_reset : Offset of sw_reset register
+ * @cgc_override : Offset of cgc_override register
+ * @irq_mask : Offset of irq_mask register
+ * @irq_clear : Offset of irq_clear register
+ * @irq_cmd : Offset of irq_cmd register
+ * @irq_status : Offset of irq_status register
+ * @cmd : Offset of cmd register
+ * @irq_set : Offset of irq_set register
+ * @misr_reset : Offset of misr_reset register
+ * @security_cfg : Offset of security_cfg register
+ * @pwr_iso_cfg : Offset of pwr_iso_cfg register
+ * @pwr_iso_seed : Offset of pwr_iso_seed register
+ * @test_bus_ctrl : Offset of test_bus_ctrl register
+ * @spare : Offset of spare register
+ */
+struct cam_lrme_bus_rd_reg_common {
+ uint32_t hw_version;
+ uint32_t hw_capability;
+ uint32_t sw_reset;
+ uint32_t cgc_override;
+ uint32_t irq_mask;
+ uint32_t irq_clear;
+ uint32_t irq_cmd;
+ uint32_t irq_status;
+ uint32_t cmd;
+ uint32_t irq_set;
+ uint32_t misr_reset;
+ uint32_t security_cfg;
+ uint32_t pwr_iso_cfg;
+ uint32_t pwr_iso_seed;
+ uint32_t test_bus_ctrl;
+ uint32_t spare;
+};
+
+/**
+ * struct cam_lrme_bus_wr_reg_common : Offset of WE common registers
+ * @hw_version : Offset of hw_version register
+ * @hw_capability : Offset of hw_capability register
+ * @sw_reset : Offset of sw_reset register
+ * @cgc_override : Offset of cgc_override register
+ * @misr_reset : Offset of misr_reset register
+ * @pwr_iso_cfg : Offset of pwr_iso_cfg register
+ * @test_bus_ctrl : Offset of test_bus_ctrl register
+ * @composite_mask_0 : Offset of composite_mask_0 register
+ * @irq_mask_0 : Offset of irq_mask_0 register
+ * @irq_mask_1 : Offset of irq_mask_1 register
+ * @irq_clear_0 : Offset of irq_clear_0 register
+ * @irq_clear_1 : Offset of irq_clear_1 register
+ * @irq_status_0 : Offset of irq_status_0 register
+ * @irq_status_1 : Offset of irq_status_1 register
+ * @irq_cmd : Offset of irq_cmd register
+ * @irq_set_0 : Offset of irq_set_0 register
+ * @irq_set_1 : Offset of irq_set_1 register
+ * @addr_fifo_status : Offset of addr_fifo_status register
+ * @frame_header_cfg0 : Offset of frame_header_cfg0 register
+ * @frame_header_cfg1 : Offset of frame_header_cfg1 register
+ * @spare : Offset of spare register
+ */
+struct cam_lrme_bus_wr_reg_common {
+ uint32_t hw_version;
+ uint32_t hw_capability;
+ uint32_t sw_reset;
+ uint32_t cgc_override;
+ uint32_t misr_reset;
+ uint32_t pwr_iso_cfg;
+ uint32_t test_bus_ctrl;
+ uint32_t composite_mask_0;
+ uint32_t irq_mask_0;
+ uint32_t irq_mask_1;
+ uint32_t irq_clear_0;
+ uint32_t irq_clear_1;
+ uint32_t irq_status_0;
+ uint32_t irq_status_1;
+ uint32_t irq_cmd;
+ uint32_t irq_set_0;
+ uint32_t irq_set_1;
+ uint32_t addr_fifo_status;
+ uint32_t frame_header_cfg0;
+ uint32_t frame_header_cfg1;
+ uint32_t spare;
+};
+
+/**
+ * struct cam_lrme_bus_rd_bus_client : Offset of FE registers
+ *
+ * @core_cfg : Offset of core_cfg register
+ * @ccif_meta_data : Offset of ccif_meta_data register
+ * @addr_image : Offset of addr_image register
+ * @rd_buffer_size : Offset of rd_buffer_size register
+ * @rd_stride : Offset of rd_stride register
+ * @unpack_cfg_0 : Offset of unpack_cfg_0 register
+ * @latency_buff_allocation : Offset of latency_buff_allocation register
+ * @burst_limit_cfg : Offset of burst_limit_cfg register
+ * @misr_cfg_0 : Offset of misr_cfg_0 register
+ * @misr_cfg_1 : Offset of misr_cfg_1 register
+ * @misr_rd_val : Offset of misr_rd_val register
+ * @debug_status_cfg : Offset of debug_status_cfg register
+ * @debug_status_0 : Offset of debug_status_0 register
+ * @debug_status_1 : Offset of debug_status_1 register
+ */
+struct cam_lrme_bus_rd_bus_client {
+ uint32_t core_cfg;
+ uint32_t ccif_meta_data;
+ uint32_t addr_image;
+ uint32_t rd_buffer_size;
+ uint32_t rd_stride;
+ uint32_t unpack_cfg_0;
+ uint32_t latency_buff_allocation;
+ uint32_t burst_limit_cfg;
+ uint32_t misr_cfg_0;
+ uint32_t misr_cfg_1;
+ uint32_t misr_rd_val;
+ uint32_t debug_status_cfg;
+ uint32_t debug_status_0;
+ uint32_t debug_status_1;
+};
+
+/**
+ * struct cam_lrme_bus_wr_bus_client : Offset of WE registers
+ *
+ * @status_0 : Offset of status_0 register
+ * @status_1 : Offset of status_1 register
+ * @cfg : Offset of cfg register
+ * @addr_frame_header : Offset of addr_frame_header register
+ * @frame_header_cfg : Offset of frame_header_cfg register
+ * @addr_image : Offset of addr_image register
+ * @addr_image_offset : Offset of addr_image_offset register
+ * @buffer_width_cfg : Offset of buffer_width_cfg register
+ * @buffer_height_cfg : Offset of buffer_height_cfg register
+ * @packer_cfg : Offset of packer_cfg register
+ * @wr_stride : Offset of wr_stride register
+ * @irq_subsample_cfg_period : Offset of irq_subsample_cfg_period register
+ * @irq_subsample_cfg_pattern : Offset of irq_subsample_cfg_pattern register
+ * @burst_limit_cfg : Offset of burst_limit_cfg register
+ * @misr_cfg : Offset of misr_cfg register
+ * @misr_rd_word_sel : Offset of misr_rd_word_sel register
+ * @misr_val : Offset of misr_val register
+ * @debug_status_cfg : Offset of debug_status_cfg register
+ * @debug_status_0 : Offset of debug_status_0 register
+ * @debug_status_1 : Offset of debug_status_1 register
+ */
+struct cam_lrme_bus_wr_bus_client {
+ uint32_t status_0;
+ uint32_t status_1;
+ uint32_t cfg;
+ uint32_t addr_frame_header;
+ uint32_t frame_header_cfg;
+ uint32_t addr_image;
+ uint32_t addr_image_offset;
+ uint32_t buffer_width_cfg;
+ uint32_t buffer_height_cfg;
+ uint32_t packer_cfg;
+ uint32_t wr_stride;
+ uint32_t irq_subsample_cfg_period;
+ uint32_t irq_subsample_cfg_pattern;
+ uint32_t burst_limit_cfg;
+ uint32_t misr_cfg;
+ uint32_t misr_rd_word_sel;
+ uint32_t misr_val;
+ uint32_t debug_status_cfg;
+ uint32_t debug_status_0;
+ uint32_t debug_status_1;
+};
+
+/**
+ * struct cam_lrme_bus_rd_hw_info : FE registers information
+ *
+ * @common_reg : FE common register
+ * @bus_client_reg : List of FE bus registers information
+ */
+struct cam_lrme_bus_rd_hw_info {
+ struct cam_lrme_bus_rd_reg_common common_reg;
+ struct cam_lrme_bus_rd_bus_client
+ bus_client_reg[CAM_LRME_BUS_RD_MAX_CLIENTS];
+};
+
+/**
+ * struct cam_lrme_bus_wr_hw_info : WE engine registers information
+ *
+ * @common_reg : WE common register
+ * @bus_client_reg : List of WE bus registers information
+ */
+struct cam_lrme_bus_wr_hw_info {
+ struct cam_lrme_bus_wr_reg_common common_reg;
+ struct cam_lrme_bus_wr_bus_client
+ bus_client_reg[CAM_LRME_BUS_WR_MAX_CLIENTS];
+};
+
+/**
+ * struct cam_lrme_clc_reg : Offset of clc registers
+ *
+ * @clc_hw_version : Offset of clc_hw_version register
+ * @clc_hw_status : Offset of clc_hw_status register
+ * @clc_hw_status_dbg : Offset of clc_hw_status_dbg register
+ * @clc_module_cfg : Offset of clc_module_cfg register
+ * @clc_moduleformat : Offset of clc_moduleformat register
+ * @clc_rangestep : Offset of clc_rangestep register
+ * @clc_offset : Offset of clc_offset register
+ * @clc_maxallowedsad : Offset of clc_maxallowedsad register
+ * @clc_minallowedtarmad : Offset of clc_minallowedtarmad register
+ * @clc_meaningfulsaddiff : Offset of clc_meaningfulsaddiff register
+ * @clc_minsaddiffdenom : Offset of clc_minsaddiffdenom register
+ * @clc_robustnessmeasuredistmap_0 : Offset of measuredistmap_0 register
+ * @clc_robustnessmeasuredistmap_1 : Offset of measuredistmap_1 register
+ * @clc_robustnessmeasuredistmap_2 : Offset of measuredistmap_2 register
+ * @clc_robustnessmeasuredistmap_3 : Offset of measuredistmap_3 register
+ * @clc_robustnessmeasuredistmap_4 : Offset of measuredistmap_4 register
+ * @clc_robustnessmeasuredistmap_5 : Offset of measuredistmap_5 register
+ * @clc_robustnessmeasuredistmap_6 : Offset of measuredistmap_6 register
+ * @clc_robustnessmeasuredistmap_7 : Offset of measuredistmap_7 register
+ * @clc_ds_crop_horizontal : Offset of clc_ds_crop_horizontal register
+ * @clc_ds_crop_vertical : Offset of clc_ds_crop_vertical register
+ * @clc_tar_pd_unpacker : Offset of clc_tar_pd_unpacker register
+ * @clc_ref_pd_unpacker : Offset of clc_ref_pd_unpacker register
+ * @clc_sw_override : Offset of clc_sw_override register
+ * @clc_tar_height : Offset of clc_tar_height register
+ * @clc_test_bus_ctrl : Offset of clc_test_bus_ctrl register
+ * @clc_spare : Offset of clc_spare register
+ */
+struct cam_lrme_clc_reg {
+ uint32_t clc_hw_version;
+ uint32_t clc_hw_status;
+ uint32_t clc_hw_status_dbg;
+ uint32_t clc_module_cfg;
+ uint32_t clc_moduleformat;
+ uint32_t clc_rangestep;
+ uint32_t clc_offset;
+ uint32_t clc_maxallowedsad;
+ uint32_t clc_minallowedtarmad;
+ uint32_t clc_meaningfulsaddiff;
+ uint32_t clc_minsaddiffdenom;
+ uint32_t clc_robustnessmeasuredistmap_0;
+ uint32_t clc_robustnessmeasuredistmap_1;
+ uint32_t clc_robustnessmeasuredistmap_2;
+ uint32_t clc_robustnessmeasuredistmap_3;
+ uint32_t clc_robustnessmeasuredistmap_4;
+ uint32_t clc_robustnessmeasuredistmap_5;
+ uint32_t clc_robustnessmeasuredistmap_6;
+ uint32_t clc_robustnessmeasuredistmap_7;
+ uint32_t clc_ds_crop_horizontal;
+ uint32_t clc_ds_crop_vertical;
+ uint32_t clc_tar_pd_unpacker;
+ uint32_t clc_ref_pd_unpacker;
+ uint32_t clc_sw_override;
+ uint32_t clc_tar_height;
+ uint32_t clc_ref_height;
+ uint32_t clc_test_bus_ctrl;
+ uint32_t clc_spare;
+};
+
+/**
+ * struct cam_lrme_titan_reg : Offset of LRME top registers
+ *
+ * @top_hw_version : Offset of top_hw_version register
+ * @top_titan_version : Offset of top_titan_version register
+ * @top_rst_cmd : Offset of top_rst_cmd register
+ * @top_core_clk_cfg : Offset of top_core_clk_cfg register
+ * @top_irq_status : Offset of top_irq_status register
+ * @top_irq_mask : Offset of top_irq_mask register
+ * @top_irq_clear : Offset of top_irq_clear register
+ * @top_irq_set : Offset of top_irq_set register
+ * @top_irq_cmd : Offset of top_irq_cmd register
+ * @top_violation_status : Offset of top_violation_status register
+ * @top_spare : Offset of top_spare register
+ */
+struct cam_lrme_titan_reg {
+ uint32_t top_hw_version;
+ uint32_t top_titan_version;
+ uint32_t top_rst_cmd;
+ uint32_t top_core_clk_cfg;
+ uint32_t top_irq_status;
+ uint32_t top_irq_mask;
+ uint32_t top_irq_clear;
+ uint32_t top_irq_set;
+ uint32_t top_irq_cmd;
+ uint32_t top_violation_status;
+ uint32_t top_spare;
+};
+
+/**
+ * struct cam_lrme_hw_info : LRME registers information
+ *
+ * @clc_reg : LRME CLC registers
+ * @bus_rd_reg : LRME FE registers
+ * @bus_wr_reg : LRME WE registers
+ * @titan_reg : LRME top reisters
+ */
+struct cam_lrme_hw_info {
+ struct cam_lrme_clc_reg clc_reg;
+ struct cam_lrme_bus_rd_hw_info bus_rd_reg;
+ struct cam_lrme_bus_wr_hw_info bus_wr_reg;
+ struct cam_lrme_titan_reg titan_reg;
+};
+
+int cam_lrme_hw_process_irq(void *priv, void *data);
+int cam_lrme_hw_submit_req(void *hw_priv, void *hw_submit_args,
+ uint32_t arg_size);
+int cam_lrme_hw_reset(void *hw_priv, void *reset_core_args, uint32_t arg_size);
+int cam_lrme_hw_stop(void *hw_priv, void *stop_args, uint32_t arg_size);
+int cam_lrme_hw_get_caps(void *hw_priv, void *get_hw_cap_args,
+ uint32_t arg_size);
+irqreturn_t cam_lrme_hw_irq(int irq_num, void *data);
+int cam_lrme_hw_process_cmd(void *hw_priv, uint32_t cmd_type,
+ void *cmd_args, uint32_t arg_size);
+int cam_lrme_hw_util_get_caps(struct cam_hw_info *lrme_hw,
+ struct cam_lrme_dev_cap *hw_caps);
+int cam_lrme_hw_start(void *hw_priv, void *hw_init_args, uint32_t arg_size);
+int cam_lrme_hw_flush(void *hw_priv, void *hw_flush_args, uint32_t arg_size);
+void cam_lrme_set_irq(struct cam_hw_info *lrme_hw, enum cam_lrme_irq_set set);
+
+#endif /* _CAM_LRME_HW_CORE_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_dev.c b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_dev.c
new file mode 100644
index 0000000..2e63752
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_dev.c
@@ -0,0 +1,320 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <media/cam_req_mgr.h>
+
+#include "cam_subdev.h"
+#include "cam_lrme_hw_intf.h"
+#include "cam_lrme_hw_core.h"
+#include "cam_lrme_hw_soc.h"
+#include "cam_lrme_hw_reg.h"
+#include "cam_req_mgr_workq.h"
+#include "cam_lrme_hw_mgr.h"
+#include "cam_mem_mgr_api.h"
+#include "cam_smmu_api.h"
+
+#define CAM_LRME_HW_WORKQ_NUM_TASK 30
+
+static int cam_lrme_hw_dev_util_cdm_acquire(struct cam_lrme_core *lrme_core,
+ struct cam_hw_info *lrme_hw)
+{
+ int rc, i;
+ struct cam_cdm_bl_request *cdm_cmd;
+ struct cam_cdm_acquire_data cdm_acquire;
+ struct cam_lrme_cdm_info *hw_cdm_info;
+
+ hw_cdm_info = kzalloc(sizeof(struct cam_lrme_cdm_info),
+ GFP_KERNEL);
+ if (!hw_cdm_info) {
+ CAM_ERR(CAM_LRME, "No memory for hw_cdm_info");
+ return -ENOMEM;
+ }
+
+ cdm_cmd = kzalloc((sizeof(struct cam_cdm_bl_request) +
+ ((CAM_LRME_MAX_HW_ENTRIES - 1) *
+ sizeof(struct cam_cdm_bl_cmd))), GFP_KERNEL);
+ if (!cdm_cmd) {
+ CAM_ERR(CAM_LRME, "No memory for cdm_cmd");
+ kfree(hw_cdm_info);
+ return -ENOMEM;
+ }
+
+ memset(&cdm_acquire, 0, sizeof(cdm_acquire));
+ strlcpy(cdm_acquire.identifier, "lrmecdm", sizeof("lrmecdm"));
+ cdm_acquire.cell_index = lrme_hw->soc_info.index;
+ cdm_acquire.handle = 0;
+ cdm_acquire.userdata = hw_cdm_info;
+ cdm_acquire.cam_cdm_callback = NULL;
+ cdm_acquire.id = CAM_CDM_VIRTUAL;
+ cdm_acquire.base_array_cnt = lrme_hw->soc_info.num_reg_map;
+ for (i = 0; i < lrme_hw->soc_info.num_reg_map; i++)
+ cdm_acquire.base_array[i] = &lrme_hw->soc_info.reg_map[i];
+
+ rc = cam_cdm_acquire(&cdm_acquire);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Can't acquire cdm");
+ goto error;
+ }
+
+ hw_cdm_info->cdm_cmd = cdm_cmd;
+ hw_cdm_info->cdm_ops = cdm_acquire.ops;
+ hw_cdm_info->cdm_handle = cdm_acquire.handle;
+
+ lrme_core->hw_cdm_info = hw_cdm_info;
+ CAM_DBG(CAM_LRME, "cdm acquire done");
+
+ return 0;
+error:
+ kfree(cdm_cmd);
+ kfree(hw_cdm_info);
+ return rc;
+}
+
+static int cam_lrme_hw_dev_probe(struct platform_device *pdev)
+{
+ struct cam_hw_info *lrme_hw;
+ struct cam_hw_intf lrme_hw_intf;
+ struct cam_lrme_core *lrme_core;
+ const struct of_device_id *match_dev = NULL;
+ struct cam_lrme_hw_info *hw_info;
+ int rc, i;
+
+ lrme_hw = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
+ if (!lrme_hw) {
+ CAM_ERR(CAM_LRME, "No memory to create lrme_hw");
+ return -ENOMEM;
+ }
+
+ lrme_core = kzalloc(sizeof(struct cam_lrme_core), GFP_KERNEL);
+ if (!lrme_core) {
+ CAM_ERR(CAM_LRME, "No memory to create lrme_core");
+ kfree(lrme_hw);
+ return -ENOMEM;
+ }
+
+ lrme_hw->core_info = lrme_core;
+ lrme_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
+ lrme_hw->soc_info.pdev = pdev;
+ lrme_hw->soc_info.dev = &pdev->dev;
+ lrme_hw->soc_info.dev_name = pdev->name;
+ lrme_hw->open_count = 0;
+ lrme_core->state = CAM_LRME_CORE_STATE_INIT;
+
+ mutex_init(&lrme_hw->hw_mutex);
+ spin_lock_init(&lrme_hw->hw_lock);
+ init_completion(&lrme_hw->hw_complete);
+ init_completion(&lrme_core->reset_complete);
+
+ rc = cam_req_mgr_workq_create("cam_lrme_hw_worker",
+ CAM_LRME_HW_WORKQ_NUM_TASK,
+ &lrme_core->work, CRM_WORKQ_USAGE_IRQ);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Unable to create a workq, rc=%d", rc);
+ goto free_memory;
+ }
+
+ for (i = 0; i < CAM_LRME_HW_WORKQ_NUM_TASK; i++)
+ lrme_core->work->task.pool[i].payload =
+ &lrme_core->work_data[i];
+
+ match_dev = of_match_device(pdev->dev.driver->of_match_table,
+ &pdev->dev);
+ if (!match_dev || !match_dev->data) {
+ CAM_ERR(CAM_LRME, "No Of_match data, %pK", match_dev);
+ rc = -EINVAL;
+ goto destroy_workqueue;
+ }
+ hw_info = (struct cam_lrme_hw_info *)match_dev->data;
+ lrme_core->hw_info = hw_info;
+
+ rc = cam_lrme_soc_init_resources(&lrme_hw->soc_info,
+ cam_lrme_hw_irq, lrme_hw);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to init soc, rc=%d", rc);
+ goto destroy_workqueue;
+ }
+
+ rc = cam_lrme_hw_dev_util_cdm_acquire(lrme_core, lrme_hw);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to acquire cdm");
+ goto deinit_platform_res;
+ }
+
+ rc = cam_smmu_get_handle("lrme", &lrme_core->device_iommu.non_secure);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Get iommu handle failed");
+ goto release_cdm;
+ }
+
+ rc = cam_smmu_ops(lrme_core->device_iommu.non_secure, CAM_SMMU_ATTACH);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "LRME attach iommu handle failed, rc=%d", rc);
+ goto destroy_smmu;
+ }
+
+ rc = cam_lrme_hw_start(lrme_hw, NULL, 0);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to hw init, rc=%d", rc);
+ goto detach_smmu;
+ }
+
+ rc = cam_lrme_hw_util_get_caps(lrme_hw, &lrme_core->hw_caps);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to get hw caps, rc=%d", rc);
+ if (cam_lrme_hw_stop(lrme_hw, NULL, 0))
+ CAM_ERR(CAM_LRME, "Failed in hw deinit");
+ goto detach_smmu;
+ }
+
+ rc = cam_lrme_hw_stop(lrme_hw, NULL, 0);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to deinit hw, rc=%d", rc);
+ goto detach_smmu;
+ }
+
+ lrme_core->hw_idx = lrme_hw->soc_info.index;
+ lrme_hw_intf.hw_priv = lrme_hw;
+ lrme_hw_intf.hw_idx = lrme_hw->soc_info.index;
+ lrme_hw_intf.hw_ops.get_hw_caps = cam_lrme_hw_get_caps;
+ lrme_hw_intf.hw_ops.init = NULL;
+ lrme_hw_intf.hw_ops.deinit = NULL;
+ lrme_hw_intf.hw_ops.reset = cam_lrme_hw_reset;
+ lrme_hw_intf.hw_ops.reserve = NULL;
+ lrme_hw_intf.hw_ops.release = NULL;
+ lrme_hw_intf.hw_ops.start = cam_lrme_hw_start;
+ lrme_hw_intf.hw_ops.stop = cam_lrme_hw_stop;
+ lrme_hw_intf.hw_ops.read = NULL;
+ lrme_hw_intf.hw_ops.write = NULL;
+ lrme_hw_intf.hw_ops.process_cmd = cam_lrme_hw_process_cmd;
+ lrme_hw_intf.hw_type = CAM_HW_LRME;
+
+ rc = cam_cdm_get_iommu_handle("lrmecdm", &lrme_core->cdm_iommu);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to acquire the CDM iommu handles");
+ goto detach_smmu;
+ }
+
+ rc = cam_lrme_mgr_register_device(&lrme_hw_intf,
+ &lrme_core->device_iommu,
+ &lrme_core->cdm_iommu);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to register device");
+ goto detach_smmu;
+ }
+
+ platform_set_drvdata(pdev, lrme_hw);
+ CAM_DBG(CAM_LRME, "LRME-%d probe successful", lrme_hw_intf.hw_idx);
+
+ return rc;
+
+detach_smmu:
+ cam_smmu_ops(lrme_core->device_iommu.non_secure, CAM_SMMU_DETACH);
+destroy_smmu:
+ cam_smmu_destroy_handle(lrme_core->device_iommu.non_secure);
+release_cdm:
+ cam_cdm_release(lrme_core->hw_cdm_info->cdm_handle);
+ kfree(lrme_core->hw_cdm_info->cdm_cmd);
+ kfree(lrme_core->hw_cdm_info);
+deinit_platform_res:
+ if (cam_lrme_soc_deinit_resources(&lrme_hw->soc_info))
+ CAM_ERR(CAM_LRME, "Failed in soc deinit");
+ mutex_destroy(&lrme_hw->hw_mutex);
+destroy_workqueue:
+ cam_req_mgr_workq_destroy(&lrme_core->work);
+free_memory:
+ mutex_destroy(&lrme_hw->hw_mutex);
+ kfree(lrme_hw);
+ kfree(lrme_core);
+
+ return rc;
+}
+
+static int cam_lrme_hw_dev_remove(struct platform_device *pdev)
+{
+ int rc = 0;
+ struct cam_hw_info *lrme_hw;
+ struct cam_lrme_core *lrme_core;
+
+ lrme_hw = platform_get_drvdata(pdev);
+ if (!lrme_hw) {
+ CAM_ERR(CAM_LRME, "Invalid lrme_hw from fd_hw_intf");
+ rc = -ENODEV;
+ goto deinit_platform_res;
+ }
+
+ lrme_core = (struct cam_lrme_core *)lrme_hw->core_info;
+ if (!lrme_core) {
+ CAM_ERR(CAM_LRME, "Invalid lrme_core from fd_hw");
+ rc = -EINVAL;
+ goto deinit_platform_res;
+ }
+
+ cam_smmu_ops(lrme_core->device_iommu.non_secure, CAM_SMMU_DETACH);
+ cam_smmu_destroy_handle(lrme_core->device_iommu.non_secure);
+ cam_cdm_release(lrme_core->hw_cdm_info->cdm_handle);
+ cam_lrme_mgr_deregister_device(lrme_core->hw_idx);
+
+ kfree(lrme_core->hw_cdm_info->cdm_cmd);
+ kfree(lrme_core->hw_cdm_info);
+ kfree(lrme_core);
+
+deinit_platform_res:
+ rc = cam_lrme_soc_deinit_resources(&lrme_hw->soc_info);
+ if (rc)
+ CAM_ERR(CAM_LRME, "Error in LRME soc deinit, rc=%d", rc);
+
+ mutex_destroy(&lrme_hw->hw_mutex);
+ kfree(lrme_hw);
+
+ return rc;
+}
+
+static const struct of_device_id cam_lrme_hw_dt_match[] = {
+ {
+ .compatible = "qcom,lrme",
+ .data = &cam_lrme10_hw_info,
+ },
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, cam_lrme_hw_dt_match);
+
+static struct platform_driver cam_lrme_hw_driver = {
+ .probe = cam_lrme_hw_dev_probe,
+ .remove = cam_lrme_hw_dev_remove,
+ .driver = {
+ .name = "cam_lrme_hw",
+ .owner = THIS_MODULE,
+ .of_match_table = cam_lrme_hw_dt_match,
+ },
+};
+
+static int __init cam_lrme_hw_init_module(void)
+{
+ return platform_driver_register(&cam_lrme_hw_driver);
+}
+
+static void __exit cam_lrme_hw_exit_module(void)
+{
+ platform_driver_unregister(&cam_lrme_hw_driver);
+}
+
+module_init(cam_lrme_hw_init_module);
+module_exit(cam_lrme_hw_exit_module);
+MODULE_DESCRIPTION("CAM LRME HW driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_intf.h b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_intf.h
new file mode 100644
index 0000000..d16b174
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_intf.h
@@ -0,0 +1,200 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_LRME_HW_INTF_H_
+#define _CAM_LRME_HW_INTF_H_
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <media/cam_cpas.h>
+#include <media/cam_req_mgr.h>
+#include <media/cam_lrme.h>
+
+#include "cam_io_util.h"
+#include "cam_soc_util.h"
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "cam_subdev.h"
+#include "cam_cpas_api.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_debug_util.h"
+
+
+#define CAM_LRME_MAX_IO_BUFFER 2
+#define CAM_LRME_MAX_HW_ENTRIES 5
+
+#define CAM_LRME_BASE_IDX 0
+
+/**
+ * enum cam_lrme_hw_type : Enum for LRME HW type
+ *
+ * @CAM_HW_LRME : LRME HW type
+ */
+enum cam_lrme_hw_type {
+ CAM_HW_LRME,
+};
+
+/**
+ * enum cam_lrme_cb_type : HW manager call back type
+ *
+ * @CAM_LRME_CB_BUF_DONE : Indicate buf done has been generated
+ * @CAM_LRME_CB_COMP_REG_UPDATE : Indicate receiving WE comp reg update
+ * @CAM_LRME_CB_PUT_FRAME : Request HW manager to put back the frame
+ * @CAM_LRME_CB_ERROR : Indicate error irq has been generated
+ */
+enum cam_lrme_cb_type {
+ CAM_LRME_CB_BUF_DONE = 1,
+ CAM_LRME_CB_COMP_REG_UPDATE = 1 << 1,
+ CAM_LRME_CB_PUT_FRAME = 1 << 2,
+ CAM_LRME_CB_ERROR = 1 << 3,
+};
+
+/**
+ * enum cam_lrme_hw_cmd_type : HW CMD type
+ *
+ * @CAM_LRME_HW_CMD_prepare_hw_update : Prepare HW update
+ * @CAM_LRME_HW_CMD_REGISTER_CB : register HW manager callback
+ * @CAM_LRME_HW_CMD_SUBMIT : Submit frame to HW
+ */
+enum cam_lrme_hw_cmd_type {
+ CAM_LRME_HW_CMD_PREPARE_HW_UPDATE,
+ CAM_LRME_HW_CMD_REGISTER_CB,
+ CAM_LRME_HW_CMD_SUBMIT,
+};
+
+/**
+ * enum cam_lrme_hw_reset_type : Type of reset
+ *
+ * @CAM_LRME_HW_RESET_TYPE_HW_RESET : HW reset
+ * @CAM_LRME_HW_RESET_TYPE_SW_RESET : SW reset
+ */
+enum cam_lrme_hw_reset_type {
+ CAM_LRME_HW_RESET_TYPE_HW_RESET,
+ CAM_LRME_HW_RESET_TYPE_SW_RESET,
+};
+
+/**
+ *struct cam_lrme_frame_request : LRME frame request
+ *
+ * @frame_list : List head
+ * @req_id : Request ID
+ * @ctxt_to_hw_map : Information about context id, priority and device id
+ * @hw_device : Pointer to HW device
+ * @hw_update_entries : List of hw_update_entries
+ * @num_hw_update_entries : number of hw_update_entries
+ */
+struct cam_lrme_frame_request {
+ struct list_head frame_list;
+ uint64_t req_id;
+ void *ctxt_to_hw_map;
+ struct cam_lrme_device *hw_device;
+ struct cam_hw_update_entry hw_update_entries[CAM_LRME_MAX_HW_ENTRIES];
+ uint32_t num_hw_update_entries;
+};
+
+/**
+ * struct cam_lrme_hw_io_buffer : IO buffer information
+ *
+ * @valid : Indicate whether this IO config is valid
+ * @io_cfg : Pointer to IO configuration
+ * @num_buf : Number of buffers
+ * @num_plane : Number of planes
+ * @io_addr : List of IO address
+ */
+struct cam_lrme_hw_io_buffer {
+ bool valid;
+ struct cam_buf_io_cfg *io_cfg;
+ uint32_t num_buf;
+ uint32_t num_plane;
+ uint64_t io_addr[CAM_PACKET_MAX_PLANES];
+};
+
+/**
+ * struct cam_lrme_hw_cmd_config_args : Args for prepare HW update
+ *
+ * @hw_device : Pointer to HW device
+ * @input_buf : List of input buffers
+ * @output_buf : List of output buffers
+ * @cmd_buf_addr : Pointer to available KMD buffer
+ * @size : Available KMD buffer size
+ * @config_buf_size : Size used to prepare update
+ */
+struct cam_lrme_hw_cmd_config_args {
+ struct cam_lrme_device *hw_device;
+ struct cam_lrme_hw_io_buffer input_buf[CAM_LRME_MAX_IO_BUFFER];
+ struct cam_lrme_hw_io_buffer output_buf[CAM_LRME_MAX_IO_BUFFER];
+ uint32_t *cmd_buf_addr;
+ uint32_t size;
+ uint32_t config_buf_size;
+};
+
+/**
+ * struct cam_lrme_hw_flush_args : Args for flush HW
+ *
+ * @ctxt_to_hw_map : Identity of context
+ * @req_to_flush : Pointer to the frame need to flush in
+ * case of single frame flush
+ * @flush_type : Flush type
+ */
+struct cam_lrme_hw_flush_args {
+ void *ctxt_to_hw_map;
+ struct cam_lrme_frame_request *req_to_flush;
+ uint32_t flush_type;
+};
+
+/**
+ * struct cam_lrme_hw_reset_args : Args for reset HW
+ *
+ * @reset_type : Enum cam_lrme_hw_reset_type
+ */
+struct cam_lrme_hw_reset_args {
+ uint32_t reset_type;
+};
+
+/**
+ * struct cam_lrme_hw_cb_args : HW manager callback args
+ *
+ * @cb_type : Callback event type
+ * @frame_req : Pointer to the frame associated with the cb
+ */
+struct cam_lrme_hw_cb_args {
+ uint32_t cb_type;
+ struct cam_lrme_frame_request *frame_req;
+};
+
+/**
+ * struct cam_lrme_hw_cmd_set_cb : Args for set callback function
+ *
+ * @cam_lrme_hw_mgr_cb : Callback function pointer
+ * @data : Data sent along with callback function
+ */
+struct cam_lrme_hw_cmd_set_cb {
+ int (*cam_lrme_hw_mgr_cb)(void *data,
+ struct cam_lrme_hw_cb_args *args);
+ void *data;
+};
+
+/**
+ * struct cam_lrme_hw_submit_args : Args for submit request
+ *
+ * @hw_update_entries : List of hw update entries used to program registers
+ * @num_hw_update_entries : Number of hw update entries
+ * @frame_req : Pointer to the frame request
+ */
+struct cam_lrme_hw_submit_args {
+ struct cam_hw_update_entry *hw_update_entries;
+ uint32_t num_hw_update_entries;
+ struct cam_lrme_frame_request *frame_req;
+};
+
+#endif /* _CAM_LRME_HW_INTF_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_reg.h b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_reg.h
new file mode 100644
index 0000000..39cfde7
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_reg.h
@@ -0,0 +1,193 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_LRME_HW_REG_H_
+#define _CAM_LRME_HW_REG_H_
+
+#include "cam_lrme_hw_core.h"
+
+static struct cam_lrme_hw_info cam_lrme10_hw_info = {
+ .clc_reg = {
+ .clc_hw_version = 0x00000000,
+ .clc_hw_status = 0x00000004,
+ .clc_hw_status_dbg = 0x00000008,
+ .clc_module_cfg = 0x00000060,
+ .clc_moduleformat = 0x000000A8,
+ .clc_rangestep = 0x00000068,
+ .clc_offset = 0x0000006C,
+ .clc_maxallowedsad = 0x00000070,
+ .clc_minallowedtarmad = 0x00000074,
+ .clc_meaningfulsaddiff = 0x00000078,
+ .clc_minsaddiffdenom = 0x0000007C,
+ .clc_robustnessmeasuredistmap_0 = 0x00000080,
+ .clc_robustnessmeasuredistmap_1 = 0x00000084,
+ .clc_robustnessmeasuredistmap_2 = 0x00000088,
+ .clc_robustnessmeasuredistmap_3 = 0x0000008C,
+ .clc_robustnessmeasuredistmap_4 = 0x00000090,
+ .clc_robustnessmeasuredistmap_5 = 0x00000094,
+ .clc_robustnessmeasuredistmap_6 = 0x00000098,
+ .clc_robustnessmeasuredistmap_7 = 0x0000009C,
+ .clc_ds_crop_horizontal = 0x000000A0,
+ .clc_ds_crop_vertical = 0x000000A4,
+ .clc_tar_pd_unpacker = 0x000000AC,
+ .clc_ref_pd_unpacker = 0x000000B0,
+ .clc_sw_override = 0x000000B4,
+ .clc_tar_height = 0x000000B8,
+ .clc_ref_height = 0x000000BC,
+ .clc_test_bus_ctrl = 0x000001F8,
+ .clc_spare = 0x000001FC,
+ },
+ .bus_rd_reg = {
+ .common_reg = {
+ .hw_version = 0x00000200,
+ .hw_capability = 0x00000204,
+ .sw_reset = 0x00000208,
+ .cgc_override = 0x0000020C,
+ .irq_mask = 0x00000210,
+ .irq_clear = 0x00000214,
+ .irq_cmd = 0x00000218,
+ .irq_status = 0x0000021C,
+ .cmd = 0x00000220,
+ .irq_set = 0x00000224,
+ .misr_reset = 0x0000022C,
+ .security_cfg = 0x00000230,
+ .pwr_iso_cfg = 0x00000234,
+ .pwr_iso_seed = 0x00000238,
+ .test_bus_ctrl = 0x00000248,
+ .spare = 0x0000024C,
+ },
+ .bus_client_reg = {
+ /* bus client 0 */
+ {
+ .core_cfg = 0x00000250,
+ .ccif_meta_data = 0x00000254,
+ .addr_image = 0x00000258,
+ .rd_buffer_size = 0x0000025C,
+ .rd_stride = 0x00000260,
+ .unpack_cfg_0 = 0x00000264,
+ .latency_buff_allocation = 0x00000278,
+ .burst_limit_cfg = 0x00000280,
+ .misr_cfg_0 = 0x00000284,
+ .misr_cfg_1 = 0x00000288,
+ .misr_rd_val = 0x0000028C,
+ .debug_status_cfg = 0x00000290,
+ .debug_status_0 = 0x00000294,
+ .debug_status_1 = 0x00000298,
+ },
+ /* bus client 1 */
+ {
+ .core_cfg = 0x000002F0,
+ .ccif_meta_data = 0x000002F4,
+ .addr_image = 0x000002F8,
+ .rd_buffer_size = 0x000002FC,
+ .rd_stride = 0x00000300,
+ .unpack_cfg_0 = 0x00000304,
+ .latency_buff_allocation = 0x00000318,
+ .burst_limit_cfg = 0x00000320,
+ .misr_cfg_0 = 0x00000324,
+ .misr_cfg_1 = 0x00000328,
+ .misr_rd_val = 0x0000032C,
+ .debug_status_cfg = 0x00000330,
+ .debug_status_0 = 0x00000334,
+ .debug_status_1 = 0x00000338,
+ },
+ },
+ },
+ .bus_wr_reg = {
+ .common_reg = {
+ .hw_version = 0x00000500,
+ .hw_capability = 0x00000504,
+ .sw_reset = 0x00000508,
+ .cgc_override = 0x0000050C,
+ .misr_reset = 0x000005C8,
+ .pwr_iso_cfg = 0x000005CC,
+ .test_bus_ctrl = 0x0000061C,
+ .composite_mask_0 = 0x00000510,
+ .irq_mask_0 = 0x00000544,
+ .irq_mask_1 = 0x00000548,
+ .irq_clear_0 = 0x00000550,
+ .irq_clear_1 = 0x00000554,
+ .irq_status_0 = 0x0000055C,
+ .irq_status_1 = 0x00000560,
+ .irq_cmd = 0x00000568,
+ .irq_set_0 = 0x000005BC,
+ .irq_set_1 = 0x000005C0,
+ .addr_fifo_status = 0x000005A8,
+ .frame_header_cfg0 = 0x000005AC,
+ .frame_header_cfg1 = 0x000005B0,
+ .spare = 0x00000620,
+ },
+ .bus_client_reg = {
+ /* bus client 0 */
+ {
+ .status_0 = 0x00000700,
+ .status_1 = 0x00000704,
+ .cfg = 0x00000708,
+ .addr_frame_header = 0x0000070C,
+ .frame_header_cfg = 0x00000710,
+ .addr_image = 0x00000714,
+ .addr_image_offset = 0x00000718,
+ .buffer_width_cfg = 0x0000071C,
+ .buffer_height_cfg = 0x00000720,
+ .packer_cfg = 0x00000724,
+ .wr_stride = 0x00000728,
+ .irq_subsample_cfg_period = 0x00000748,
+ .irq_subsample_cfg_pattern = 0x0000074C,
+ .burst_limit_cfg = 0x0000075C,
+ .misr_cfg = 0x00000760,
+ .misr_rd_word_sel = 0x00000764,
+ .misr_val = 0x00000768,
+ .debug_status_cfg = 0x0000076C,
+ .debug_status_0 = 0x00000770,
+ .debug_status_1 = 0x00000774,
+ },
+ /* bus client 1 */
+ {
+ .status_0 = 0x00000800,
+ .status_1 = 0x00000804,
+ .cfg = 0x00000808,
+ .addr_frame_header = 0x0000080C,
+ .frame_header_cfg = 0x00000810,
+ .addr_image = 0x00000814,
+ .addr_image_offset = 0x00000818,
+ .buffer_width_cfg = 0x0000081C,
+ .buffer_height_cfg = 0x00000820,
+ .packer_cfg = 0x00000824,
+ .wr_stride = 0x00000828,
+ .irq_subsample_cfg_period = 0x00000848,
+ .irq_subsample_cfg_pattern = 0x0000084C,
+ .burst_limit_cfg = 0x0000085C,
+ .misr_cfg = 0x00000860,
+ .misr_rd_word_sel = 0x00000864,
+ .misr_val = 0x00000868,
+ .debug_status_cfg = 0x0000086C,
+ .debug_status_0 = 0x00000870,
+ .debug_status_1 = 0x00000874,
+ },
+ },
+ },
+ .titan_reg = {
+ .top_hw_version = 0x00000900,
+ .top_titan_version = 0x00000904,
+ .top_rst_cmd = 0x00000908,
+ .top_core_clk_cfg = 0x00000920,
+ .top_irq_status = 0x0000090C,
+ .top_irq_mask = 0x00000910,
+ .top_irq_clear = 0x00000914,
+ .top_irq_set = 0x00000918,
+ .top_irq_cmd = 0x0000091C,
+ .top_violation_status = 0x00000924,
+ .top_spare = 0x000009FC,
+ },
+};
+
+#endif /* _CAM_LRME_HW_REG_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_soc.c b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_soc.c
new file mode 100644
index 0000000..75de0dd
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_soc.c
@@ -0,0 +1,158 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include "cam_lrme_hw_core.h"
+#include "cam_lrme_hw_soc.h"
+
+
+int cam_lrme_soc_enable_resources(struct cam_hw_info *lrme_hw)
+{
+ struct cam_hw_soc_info *soc_info = &lrme_hw->soc_info;
+ struct cam_lrme_soc_private *soc_private =
+ (struct cam_lrme_soc_private *)soc_info->soc_private;
+ struct cam_ahb_vote ahb_vote;
+ struct cam_axi_vote axi_vote;
+ int rc = 0;
+
+ ahb_vote.type = CAM_VOTE_ABSOLUTE;
+ ahb_vote.vote.level = CAM_SVS_VOTE;
+ axi_vote.compressed_bw = 7200000;
+ axi_vote.uncompressed_bw = 7200000;
+ rc = cam_cpas_start(soc_private->cpas_handle, &ahb_vote, &axi_vote);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to start cpas, rc %d", rc);
+ return -EFAULT;
+ }
+
+ rc = cam_soc_util_enable_platform_resource(soc_info, true, CAM_SVS_VOTE,
+ true);
+ if (rc) {
+ CAM_ERR(CAM_LRME,
+ "Failed to enable platform resource, rc %d", rc);
+ goto stop_cpas;
+ }
+
+ cam_lrme_set_irq(lrme_hw, CAM_LRME_IRQ_ENABLE);
+
+ return rc;
+
+stop_cpas:
+ if (cam_cpas_stop(soc_private->cpas_handle))
+ CAM_ERR(CAM_LRME, "Failed to stop cpas");
+
+ return rc;
+}
+
+int cam_lrme_soc_disable_resources(struct cam_hw_info *lrme_hw)
+{
+ struct cam_hw_soc_info *soc_info = &lrme_hw->soc_info;
+ struct cam_lrme_soc_private *soc_private;
+ int rc = 0;
+
+ soc_private = soc_info->soc_private;
+
+ cam_lrme_set_irq(lrme_hw, CAM_LRME_IRQ_DISABLE);
+
+ rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to disable platform resource");
+ return rc;
+ }
+ rc = cam_cpas_stop(soc_private->cpas_handle);
+ if (rc)
+ CAM_ERR(CAM_LRME, "Failed to stop cpas");
+
+ return rc;
+}
+
+int cam_lrme_soc_init_resources(struct cam_hw_soc_info *soc_info,
+ irq_handler_t irq_handler, void *private_data)
+{
+ struct cam_lrme_soc_private *soc_private;
+ struct cam_cpas_register_params cpas_register_param;
+ int rc;
+
+ rc = cam_soc_util_get_dt_properties(soc_info);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed in get_dt_properties, rc=%d", rc);
+ return rc;
+ }
+
+ rc = cam_soc_util_request_platform_resource(soc_info, irq_handler,
+ private_data);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed in request_platform_resource rc=%d",
+ rc);
+ return rc;
+ }
+
+ soc_private = kzalloc(sizeof(struct cam_lrme_soc_private), GFP_KERNEL);
+ if (!soc_private) {
+ rc = -ENOMEM;
+ goto release_res;
+ }
+ soc_info->soc_private = soc_private;
+
+ memset(&cpas_register_param, 0, sizeof(cpas_register_param));
+ strlcpy(cpas_register_param.identifier,
+ "lrmecpas", CAM_HW_IDENTIFIER_LENGTH);
+ cpas_register_param.cell_index = soc_info->index;
+ cpas_register_param.dev = &soc_info->pdev->dev;
+ cpas_register_param.userdata = private_data;
+ cpas_register_param.cam_cpas_client_cb = NULL;
+
+ rc = cam_cpas_register_client(&cpas_register_param);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "CPAS registration failed");
+ goto free_soc_private;
+ }
+ soc_private->cpas_handle = cpas_register_param.client_handle;
+ CAM_DBG(CAM_LRME, "CPAS handle=%d", soc_private->cpas_handle);
+
+ return rc;
+
+free_soc_private:
+ kfree(soc_info->soc_private);
+ soc_info->soc_private = NULL;
+release_res:
+ cam_soc_util_release_platform_resource(soc_info);
+
+ return rc;
+}
+
+int cam_lrme_soc_deinit_resources(struct cam_hw_soc_info *soc_info)
+{
+ struct cam_lrme_soc_private *soc_private =
+ (struct cam_lrme_soc_private *)soc_info->soc_private;
+ int rc;
+
+ rc = cam_cpas_unregister_client(soc_private->cpas_handle);
+ if (rc)
+ CAM_ERR(CAM_LRME, "Unregister cpas failed, handle=%d, rc=%d",
+ soc_private->cpas_handle, rc);
+
+ rc = cam_soc_util_release_platform_resource(soc_info);
+ if (rc)
+ CAM_ERR(CAM_LRME, "release platform failed, rc=%d", rc);
+
+ kfree(soc_info->soc_private);
+ soc_info->soc_private = NULL;
+
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_soc.h b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_soc.h
new file mode 100644
index 0000000..44e8486
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_soc.h
@@ -0,0 +1,28 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_LRME_HW_SOC_H_
+#define _CAM_LRME_HW_SOC_H_
+
+#include "cam_soc_util.h"
+
+struct cam_lrme_soc_private {
+ uint32_t cpas_handle;
+};
+
+int cam_lrme_soc_enable_resources(struct cam_hw_info *lrme_hw);
+int cam_lrme_soc_disable_resources(struct cam_hw_info *lrme_hw);
+int cam_lrme_soc_init_resources(struct cam_hw_soc_info *soc_info,
+ irq_handler_t irq_handler, void *private_data);
+int cam_lrme_soc_deinit_resources(struct cam_hw_soc_info *soc_info);
+
+#endif /* _CAM_LRME_HW_SOC_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
index f38af7d..244746b 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
@@ -810,15 +810,34 @@
*/
static void __cam_req_mgr_sof_freeze(unsigned long data)
{
- struct cam_req_mgr_timer *timer = (struct cam_req_mgr_timer *)data;
- struct cam_req_mgr_core_link *link = NULL;
+ struct cam_req_mgr_timer *timer = (struct cam_req_mgr_timer *)data;
+ struct cam_req_mgr_core_link *link = NULL;
+ struct cam_req_mgr_core_session *session = NULL;
+ struct cam_req_mgr_message msg;
if (!timer) {
CAM_ERR(CAM_CRM, "NULL timer");
return;
}
link = (struct cam_req_mgr_core_link *)timer->parent;
- CAM_ERR(CAM_CRM, "SOF freeze for link %x", link->link_hdl);
+ session = (struct cam_req_mgr_core_session *)link->parent;
+
+ CAM_ERR(CAM_CRM, "SOF freeze for session %d link 0x%x",
+ session->session_hdl, link->link_hdl);
+
+ memset(&msg, 0, sizeof(msg));
+
+ msg.session_hdl = session->session_hdl;
+ msg.u.err_msg.error_type = CAM_REQ_MGR_ERROR_TYPE_DEVICE;
+ msg.u.err_msg.request_id = 0;
+ msg.u.err_msg.link_hdl = link->link_hdl;
+
+
+ if (cam_req_mgr_notify_message(&msg,
+ V4L_EVENT_CAM_REQ_MGR_ERROR, V4L_EVENT_CAM_REQ_MGR_EVENT))
+ CAM_ERR(CAM_CRM,
+ "Error notifying SOF freeze for session %d link 0x%x",
+ session->session_hdl, link->link_hdl);
}
/**
@@ -863,12 +882,14 @@
* @brief : Cleans up the mem allocated while linking
* @link : pointer to link, mem associated with this link is freed
*
+ * @return : returns if unlink for any device was success or failure
*/
-static void __cam_req_mgr_destroy_link_info(struct cam_req_mgr_core_link *link)
+static int __cam_req_mgr_destroy_link_info(struct cam_req_mgr_core_link *link)
{
int32_t i = 0;
struct cam_req_mgr_connected_device *dev;
struct cam_req_mgr_core_dev_link_setup link_data;
+ int rc = 0;
link_data.link_enable = 0;
link_data.link_hdl = link->link_hdl;
@@ -881,7 +902,11 @@
if (dev != NULL) {
link_data.dev_hdl = dev->dev_hdl;
if (dev->ops && dev->ops->link_setup)
- dev->ops->link_setup(&link_data);
+ rc = dev->ops->link_setup(&link_data);
+ if (rc)
+ CAM_ERR(CAM_CRM,
+ "Unlink failed dev_hdl %d",
+ dev->dev_hdl);
dev->dev_hdl = 0;
dev->parent = NULL;
dev->ops = NULL;
@@ -896,6 +921,7 @@
link->num_devs = 0;
link->max_delay = 0;
+ return rc;
}
/**
@@ -2024,8 +2050,12 @@
cam_req_mgr_workq_destroy(&link->workq);
- /* Cleanuprequest tables */
- __cam_req_mgr_destroy_link_info(link);
+ /* Cleanup request tables and unlink devices */
+ rc = __cam_req_mgr_destroy_link_info(link);
+ if (rc) {
+ CAM_ERR(CAM_CORE, "Unlink failed. Cannot proceed");
+ return rc;
+ }
/* Free memory holding data of linked devs */
__cam_req_mgr_destroy_subdev(link->l_dev);
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c
index c316dbb..49c3c56e 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c
@@ -462,7 +462,7 @@
return rc;
}
-int cam_req_mgr_notify_frame_message(struct cam_req_mgr_message *msg,
+int cam_req_mgr_notify_message(struct cam_req_mgr_message *msg,
uint32_t id,
uint32_t type)
{
@@ -481,7 +481,7 @@
return 0;
}
-EXPORT_SYMBOL(cam_req_mgr_notify_frame_message);
+EXPORT_SYMBOL(cam_req_mgr_notify_message);
void cam_video_device_cleanup(void)
{
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.h
index 77faed9..93278b8 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.h
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.h
@@ -43,7 +43,7 @@
#define CAM_REQ_MGR_GET_PAYLOAD_PTR(ev, type) \
(type *)((char *)ev.u.data)
-int cam_req_mgr_notify_frame_message(struct cam_req_mgr_message *msg,
+int cam_req_mgr_notify_message(struct cam_req_mgr_message *msg,
uint32_t id,
uint32_t type);
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_util.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_util.c
index 1d2169b..f357941 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_util.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_util.c
@@ -317,6 +317,8 @@
}
hdl_tbl->hdl[idx].state = HDL_FREE;
+ hdl_tbl->hdl[idx].ops = NULL;
+ hdl_tbl->hdl[idx].priv = NULL;
clear_bit(idx, hdl_tbl->bitmap);
spin_unlock_bh(&hdl_tbl_lock);
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.c
index 64acea7..c5c9b0a 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.c
@@ -186,13 +186,6 @@
goto free_ctrl;
}
- soc_private = (struct cam_actuator_soc_private *)(id->driver_data);
- if (!soc_private) {
- CAM_ERR(CAM_EEPROM, "board info NULL");
- rc = -EINVAL;
- goto free_ctrl;
- }
-
rc = cam_actuator_init_subdev(a_ctrl);
if (rc)
goto free_soc;
@@ -249,8 +242,10 @@
static int32_t cam_actuator_platform_remove(struct platform_device *pdev)
{
- struct cam_actuator_ctrl_t *a_ctrl;
int32_t rc = 0;
+ struct cam_actuator_ctrl_t *a_ctrl;
+ struct cam_actuator_soc_private *soc_private;
+ struct cam_sensor_power_ctrl_t *power_info;
a_ctrl = platform_get_drvdata(pdev);
if (!a_ctrl) {
@@ -258,8 +253,15 @@
return 0;
}
+ soc_private =
+ (struct cam_actuator_soc_private *)a_ctrl->soc_info.soc_private;
+ power_info = &soc_private->power_info;
+
kfree(a_ctrl->io_master_info.cci_client);
a_ctrl->io_master_info.cci_client = NULL;
+ kfree(power_info->power_setting);
+ kfree(power_info->power_down_setting);
+ kfree(a_ctrl->soc_info.soc_private);
kfree(a_ctrl->i2c_data.per_frame);
a_ctrl->i2c_data.per_frame = NULL;
devm_kfree(&pdev->dev, a_ctrl);
@@ -269,17 +271,29 @@
static int32_t cam_actuator_driver_i2c_remove(struct i2c_client *client)
{
- struct cam_actuator_ctrl_t *a_ctrl = i2c_get_clientdata(client);
int32_t rc = 0;
+ struct cam_actuator_ctrl_t *a_ctrl =
+ i2c_get_clientdata(client);
+ struct cam_actuator_soc_private *soc_private;
+ struct cam_sensor_power_ctrl_t *power_info;
/* Handle I2C Devices */
if (!a_ctrl) {
CAM_ERR(CAM_ACTUATOR, "Actuator device is NULL");
return -EINVAL;
}
+
+ soc_private =
+ (struct cam_actuator_soc_private *)a_ctrl->soc_info.soc_private;
+ power_info = &soc_private->power_info;
+
/*Free Allocated Mem */
kfree(a_ctrl->i2c_data.per_frame);
a_ctrl->i2c_data.per_frame = NULL;
+ kfree(power_info->power_setting);
+ kfree(power_info->power_down_setting);
+ kfree(a_ctrl->soc_info.soc_private);
+ a_ctrl->soc_info.soc_private = NULL;
kfree(a_ctrl);
return rc;
}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.c
index f151b9b..d7a6504 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.c
@@ -730,17 +730,30 @@
reg_addr++;
} else {
if ((i + 1) <= cci_dev->payload_size) {
- if (i2c_msg->data_type ==
- CAMERA_SENSOR_I2C_TYPE_DWORD) {
+ switch (i2c_msg->data_type) {
+ case CAMERA_SENSOR_I2C_TYPE_DWORD:
data[i++] = (i2c_cmd->reg_data &
0xFF000000) >> 24;
+ /* fallthrough */
+ case CAMERA_SENSOR_I2C_TYPE_3B:
data[i++] = (i2c_cmd->reg_data &
0x00FF0000) >> 16;
+ /* fallthrough */
+ case CAMERA_SENSOR_I2C_TYPE_WORD:
+ data[i++] = (i2c_cmd->reg_data &
+ 0x0000FF00) >> 8;
+ /* fallthrough */
+ case CAMERA_SENSOR_I2C_TYPE_BYTE:
+ data[i++] = i2c_cmd->reg_data &
+ 0x000000FF;
+ break;
+ default:
+ CAM_ERR(CAM_CCI,
+ "invalid data type: %d",
+ i2c_msg->data_type);
+ return -EINVAL;
}
- data[i++] = (i2c_cmd->reg_data &
- 0x0000FF00) >> 8; /* MSB */
- data[i++] = i2c_cmd->reg_data &
- 0x000000FF; /* LSB */
+
if (c_ctrl->cmd ==
MSM_CCI_I2C_WRITE_SEQ)
reg_addr++;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c
index 4c69afb..72b1779 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c
@@ -657,6 +657,7 @@
struct cam_packet *csl_packet = NULL;
struct cam_eeprom_soc_private *soc_private =
(struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
+ struct cam_sensor_power_ctrl_t *power_info = &soc_private->power_info;
ioctl_ctrl = (struct cam_control *)arg;
@@ -701,7 +702,7 @@
e_ctrl->cal_data.num_map = 0;
CAM_DBG(CAM_EEPROM,
"Returning the data using kernel probe");
- break;
+ break;
}
rc = cam_eeprom_init_pkt_parser(e_ctrl, csl_packet);
if (rc) {
@@ -750,16 +751,21 @@
memdata_free:
kfree(e_ctrl->cal_data.mapdata);
error:
+ kfree(power_info->power_setting);
+ kfree(power_info->power_down_setting);
kfree(e_ctrl->cal_data.map);
e_ctrl->cal_data.num_data = 0;
e_ctrl->cal_data.num_map = 0;
- e_ctrl->cam_eeprom_state = CAM_EEPROM_ACQUIRE;
+ e_ctrl->cam_eeprom_state = CAM_EEPROM_INIT;
return rc;
}
void cam_eeprom_shutdown(struct cam_eeprom_ctrl_t *e_ctrl)
{
int rc;
+ struct cam_eeprom_soc_private *soc_private =
+ (struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
+ struct cam_sensor_power_ctrl_t *power_info = &soc_private->power_info;
if (e_ctrl->cam_eeprom_state == CAM_EEPROM_INIT)
return;
@@ -779,6 +785,9 @@
e_ctrl->bridge_intf.device_hdl = -1;
e_ctrl->bridge_intf.link_hdl = -1;
e_ctrl->bridge_intf.session_hdl = -1;
+
+ kfree(power_info->power_setting);
+ kfree(power_info->power_down_setting);
}
e_ctrl->cam_eeprom_state = CAM_EEPROM_INIT;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.c
index d667cf4..5eb29c3 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.c
@@ -201,13 +201,6 @@
goto free_soc;
}
- soc_private = (struct cam_eeprom_soc_private *)(id->driver_data);
- if (!soc_private) {
- CAM_ERR(CAM_EEPROM, "board info NULL");
- rc = -EINVAL;
- goto ectrl_free;
- }
-
rc = cam_eeprom_init_subdev(e_ctrl);
if (rc)
goto free_soc;
@@ -260,10 +253,9 @@
return -EINVAL;
}
- if (soc_private) {
- kfree(soc_private->power_info.gpio_num_info);
+ if (soc_private)
kfree(soc_private);
- }
+
kfree(e_ctrl);
return 0;
@@ -451,6 +443,9 @@
platform_set_drvdata(pdev, e_ctrl);
v4l2_set_subdevdata(&e_ctrl->v4l2_dev_str.sd, e_ctrl);
+
+ e_ctrl->cam_eeprom_state = CAM_EEPROM_INIT;
+
return rc;
free_soc:
kfree(soc_private);
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_dev.c
index 97fede2..d9b43a4 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_dev.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_dev.c
@@ -226,13 +226,21 @@
static int cam_ois_i2c_driver_remove(struct i2c_client *client)
{
- struct cam_ois_ctrl_t *o_ctrl = i2c_get_clientdata(client);
+ struct cam_ois_ctrl_t *o_ctrl = i2c_get_clientdata(client);
+ struct cam_ois_soc_private *soc_private;
+ struct cam_sensor_power_ctrl_t *power_info;
if (!o_ctrl) {
CAM_ERR(CAM_OIS, "ois device is NULL");
return -EINVAL;
}
+ soc_private =
+ (struct cam_ois_soc_private *)o_ctrl->soc_info.soc_private;
+ power_info = &soc_private->power_info;
+
+ kfree(power_info->power_setting);
+ kfree(power_info->power_down_setting);
kfree(o_ctrl->soc_info.soc_private);
kfree(o_ctrl);
@@ -251,6 +259,7 @@
return -ENOMEM;
o_ctrl->soc_info.pdev = pdev;
+ o_ctrl->pdev = pdev;
o_ctrl->soc_info.dev = &pdev->dev;
o_ctrl->soc_info.dev_name = pdev->name;
@@ -302,6 +311,8 @@
platform_set_drvdata(pdev, o_ctrl);
v4l2_set_subdevdata(&o_ctrl->v4l2_dev_str.sd, o_ctrl);
+ o_ctrl->cam_ois_state = CAM_OIS_INIT;
+
return rc;
unreg_subdev:
cam_unregister_subdev(&(o_ctrl->v4l2_dev_str));
@@ -316,7 +327,9 @@
static int cam_ois_platform_driver_remove(struct platform_device *pdev)
{
- struct cam_ois_ctrl_t *o_ctrl;
+ struct cam_ois_ctrl_t *o_ctrl;
+ struct cam_ois_soc_private *soc_private;
+ struct cam_sensor_power_ctrl_t *power_info;
o_ctrl = platform_get_drvdata(pdev);
if (!o_ctrl) {
@@ -324,6 +337,12 @@
return -EINVAL;
}
+ soc_private =
+ (struct cam_ois_soc_private *)o_ctrl->soc_info.soc_private;
+ power_info = &soc_private->power_info;
+
+ kfree(power_info->power_setting);
+ kfree(power_info->power_down_setting);
kfree(o_ctrl->soc_info.soc_private);
kfree(o_ctrl->io_master_info.cci_client);
kfree(o_ctrl);
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr/cam_res_mgr.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr/cam_res_mgr.c
index 949f902..bb3789b 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr/cam_res_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr/cam_res_mgr.c
@@ -51,6 +51,10 @@
kfree(flash_res);
}
mutex_unlock(&cam_res->flash_res_lock);
+
+ mutex_lock(&cam_res->clk_res_lock);
+ cam_res->shared_clk_ref_count = 0;
+ mutex_unlock(&cam_res->clk_res_lock);
}
void cam_res_mgr_led_trigger_register(const char *name, struct led_trigger **tp)
@@ -243,6 +247,9 @@
}
}
+ if (cam_res->shared_clk_ref_count > 1)
+ hold = true;
+
return hold;
}
@@ -258,11 +265,13 @@
mutex_lock(&cam_res->gpio_res_lock);
if (cam_res->pstatus == PINCTRL_STATUS_PUT) {
CAM_DBG(CAM_RES, "The shared pinctrl already been put");
+ mutex_unlock(&cam_res->gpio_res_lock);
return;
}
if (cam_res_mgr_shared_pinctrl_check_hold()) {
CAM_INFO(CAM_RES, "Need hold put this pinctrl");
+ mutex_unlock(&cam_res->gpio_res_lock);
return;
}
@@ -330,10 +339,12 @@
pinctrl_info = &cam_res->dt.pinctrl_info;
/*
- * If no gpio resource in gpio_res_list, it means
- * this device don't have shared gpio
+ * If no gpio resource in gpio_res_list, and
+ * no shared clk now, it means this device
+ * don't have shared gpio.
*/
- if (list_empty(&cam_res->gpio_res_list)) {
+ if (list_empty(&cam_res->gpio_res_list) &&
+ cam_res->shared_clk_ref_count < 1) {
ret = pinctrl_select_state(pinctrl_info->pinctrl,
pinctrl_info->gpio_state_suspend);
devm_pinctrl_put(pinctrl_info->pinctrl);
@@ -555,16 +566,20 @@
if (!found) {
gpio_set_value_cansleep(gpio, value);
} else {
- if (value)
+ if (value) {
gpio_res->power_on_count++;
- else
- gpio_res->power_on_count--;
-
- if (gpio_res->power_on_count > 0) {
- gpio_set_value_cansleep(gpio, value);
+ if (gpio_res->power_on_count < 2) {
+ gpio_set_value_cansleep(gpio, value);
+ CAM_DBG(CAM_RES,
+ "Shared GPIO(%d) : HIGH", gpio);
+ }
} else {
- gpio_res->power_on_count = 0;
- gpio_set_value_cansleep(gpio, 0);
+ gpio_res->power_on_count--;
+ if (gpio_res->power_on_count < 1) {
+ gpio_set_value_cansleep(gpio, value);
+ CAM_DBG(CAM_RES,
+ "Shared GPIO(%d) : LOW", gpio);
+ }
}
}
@@ -572,6 +587,20 @@
}
EXPORT_SYMBOL(cam_res_mgr_gpio_set_value);
+void cam_res_mgr_shared_clk_config(bool value)
+{
+ if (!cam_res)
+ return;
+
+ mutex_lock(&cam_res->clk_res_lock);
+ if (value)
+ cam_res->shared_clk_ref_count++;
+ else
+ cam_res->shared_clk_ref_count--;
+ mutex_unlock(&cam_res->clk_res_lock);
+}
+EXPORT_SYMBOL(cam_res_mgr_shared_clk_config);
+
static int cam_res_mgr_parse_dt(struct device *dev)
{
int rc = 0;
@@ -645,6 +674,7 @@
cam_res->dev = &pdev->dev;
mutex_init(&cam_res->flash_res_lock);
mutex_init(&cam_res->gpio_res_lock);
+ mutex_init(&cam_res->clk_res_lock);
rc = cam_res_mgr_parse_dt(&pdev->dev);
if (rc) {
@@ -655,6 +685,7 @@
cam_res->shared_gpio_enabled = true;
}
+ cam_res->shared_clk_ref_count = 0;
cam_res->pstatus = PINCTRL_STATUS_PUT;
INIT_LIST_HEAD(&cam_res->gpio_res_list);
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr/cam_res_mgr_api.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr/cam_res_mgr_api.h
index 1c4c6c8..7fb13ba 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr/cam_res_mgr_api.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr/cam_res_mgr_api.h
@@ -134,4 +134,15 @@
*/
int cam_res_mgr_gpio_set_value(unsigned int gpio, int value);
+/**
+ * @brief: Config the shared clk ref count
+ *
+ * Config the shared clk ref count..
+ *
+ * @value : get or put the shared clk.
+ *
+ * @return None
+ */
+void cam_res_mgr_shared_clk_config(bool value);
+
#endif /* __CAM_RES_MGR_API_H__ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr/cam_res_mgr_private.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr/cam_res_mgr_private.h
index 4d46c8e..53a8778 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr/cam_res_mgr_private.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr/cam_res_mgr_private.h
@@ -96,6 +96,7 @@
* @flash_res_list : List head of the flash resource
* @gpio_res_lock : GPIO resource lock
* @flash_res_lock : Flash resource lock
+ * @clk_res_lock : Clk resource lock
*/
struct cam_res_mgr {
struct device *dev;
@@ -104,10 +105,13 @@
bool shared_gpio_enabled;
enum pinctrl_status pstatus;
+ uint shared_clk_ref_count;
+
struct list_head gpio_res_list;
struct list_head flash_res_list;
struct mutex gpio_res_lock;
struct mutex flash_res_lock;
+ struct mutex clk_res_lock;
};
#endif /* __CAM_RES_MGR_PRIVATE_H__ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
index bc92d7e..97158e4 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
@@ -182,12 +182,7 @@
return rc;
}
}
-
- i2c_reg_settings->request_id =
- csl_packet->header.request_id;
- i2c_reg_settings->is_settings_valid = 1;
- cam_sensor_update_req_mgr(s_ctrl, csl_packet);
- break;
+ break;
}
case CAM_SENSOR_PACKET_OPCODE_SENSOR_NOP: {
cam_sensor_update_req_mgr(s_ctrl, csl_packet);
@@ -207,6 +202,14 @@
CAM_ERR(CAM_SENSOR, "Fail parsing I2C Pkt: %d", rc);
return rc;
}
+
+ if ((csl_packet->header.op_code & 0xFFFFFF) ==
+ CAM_SENSOR_PACKET_OPCODE_SENSOR_UPDATE) {
+ i2c_reg_settings->request_id =
+ csl_packet->header.request_id;
+ cam_sensor_update_req_mgr(s_ctrl, csl_packet);
+ }
+
return rc;
}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c
index b3de092..0a3878e 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c
@@ -1235,6 +1235,9 @@
return -EINVAL;
}
+ if (soc_info->use_shared_clk)
+ cam_res_mgr_shared_clk_config(true);
+
ret = msm_camera_pinctrl_init(&(ctrl->pinctrl_info), ctrl->dev);
if (ret < 0) {
/* Some sensor subdev no pinctrl. */
@@ -1492,6 +1495,7 @@
(power_setting->delay * 1000) + 1000);
}
}
+
if (ctrl->cam_pinctrl_status) {
ret = pinctrl_select_state(
ctrl->pinctrl_info.pinctrl,
@@ -1502,6 +1506,10 @@
pinctrl_put(ctrl->pinctrl_info.pinctrl);
cam_res_mgr_shared_pinctrl_put();
}
+
+ if (soc_info->use_shared_clk)
+ cam_res_mgr_shared_clk_config(false);
+
ctrl->cam_pinctrl_status = 0;
cam_sensor_util_request_gpio_table(soc_info, 0);
@@ -1698,6 +1706,9 @@
cam_res_mgr_shared_pinctrl_put();
}
+ if (soc_info->use_shared_clk)
+ cam_res_mgr_shared_clk_config(false);
+
ctrl->cam_pinctrl_status = 0;
cam_sensor_util_request_gpio_table(soc_info, 0);
diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync.c b/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
index 2422016..e7dcbe7 100644
--- a/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
+++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
@@ -230,6 +230,8 @@
spin_unlock_bh(
&sync_dev->row_spinlocks[
parent_info->sync_id]);
+ spin_unlock_bh(
+ &sync_dev->row_spinlocks[sync_obj]);
return rc;
}
}
@@ -344,24 +346,8 @@
int cam_sync_destroy(int32_t sync_obj)
{
- struct sync_table_row *row = NULL;
-
- if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
- return -EINVAL;
-
- spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
- row = sync_dev->sync_table + sync_obj;
- if (row->state == CAM_SYNC_STATE_INVALID) {
- CAM_ERR(CAM_SYNC,
- "Error: accessing an uninitialized sync obj: idx = %d",
- sync_obj);
- spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
- return -EINVAL;
- }
cam_sync_deinit_object(sync_dev->sync_table, sync_obj);
- spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
-
return 0;
}
diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync_private.h b/drivers/media/platform/msm/camera/cam_sync/cam_sync_private.h
index ba9bef4..e2a7fcb 100644
--- a/drivers/media/platform/msm/camera/cam_sync/cam_sync_private.h
+++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync_private.h
@@ -55,6 +55,18 @@
};
/**
+ * enum sync_list_clean_type - Enum to indicate the type of list clean action
+ * to be peformed, i.e. specific sync ID or all list sync ids.
+ *
+ * @SYNC_CLEAN_ID : Specific object to be cleaned in the list
+ * @SYNC_CLEAN_ALL : Clean all objects in the list
+ */
+enum sync_list_clean_type {
+ SYNC_LIST_CLEAN_ID,
+ SYNC_LIST_CLEAN_ALL
+};
+
+/**
* struct sync_parent_info - Single node of information about a parent
* of a sync object, usually part of the parents linked list
*
diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c b/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c
index f66b882..6aa7c23 100644
--- a/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c
+++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c
@@ -145,7 +145,7 @@
if (!child_info) {
cam_sync_util_cleanup_children_list(
- &row->children_list);
+ &row->children_list, SYNC_LIST_CLEAN_ALL, 0);
return -ENOMEM;
}
@@ -160,9 +160,10 @@
parent_info = kzalloc(sizeof(*parent_info), GFP_ATOMIC);
if (!parent_info) {
cam_sync_util_cleanup_parents_list(
- &child_row->parents_list);
+ &child_row->parents_list,
+ SYNC_LIST_CLEAN_ALL, 0);
cam_sync_util_cleanup_children_list(
- &row->children_list);
+ &row->children_list, SYNC_LIST_CLEAN_ALL, 0);
spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
return -ENOMEM;
}
@@ -197,27 +198,131 @@
int cam_sync_deinit_object(struct sync_table_row *table, uint32_t idx)
{
struct sync_table_row *row = table + idx;
- struct sync_child_info *child_info, *temp_child;
+ struct sync_child_info *child_info, *temp_child, *child_copy_info;
struct sync_callback_info *sync_cb, *temp_cb;
- struct sync_parent_info *parent_info, *temp_parent;
+ struct sync_parent_info *parent_info, *temp_parent, *parent_copy_info;
struct sync_user_payload *upayload_info, *temp_upayload;
+ struct sync_table_row *child_row = NULL, *parent_row = NULL;
+ struct list_head child_copy_list, parent_copy_list;
if (!table || idx <= 0 || idx >= CAM_SYNC_MAX_OBJS)
return -EINVAL;
- clear_bit(idx, sync_dev->bitmap);
- list_for_each_entry_safe(child_info, temp_child,
- &row->children_list, list) {
+ spin_lock_bh(&sync_dev->row_spinlocks[idx]);
+ if (row->state == CAM_SYNC_STATE_INVALID) {
+ CAM_ERR(CAM_SYNC,
+ "Error: accessing an uninitialized sync obj: idx = %d",
+ idx);
+ spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
+ return -EINVAL;
+ }
+
+ /* Objects child and parent objects will be added into this list */
+ INIT_LIST_HEAD(&child_copy_list);
+ INIT_LIST_HEAD(&parent_copy_list);
+
+ list_for_each_entry_safe(child_info, temp_child, &row->children_list,
+ list) {
+ if (child_info->sync_id <= 0)
+ continue;
+
+ child_copy_info = kzalloc(sizeof(*child_copy_info), GFP_ATOMIC);
+ if (!child_copy_info) {
+ /* No free memory, clean up the child_copy_list */
+ while (!list_empty(&child_copy_list)) {
+ child_info = list_first_entry(&child_copy_list,
+ struct sync_child_info, list);
+ list_del_init(&child_info->list);
+ kfree(child_info);
+ }
+ spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
+ goto deinit;
+ }
+ child_copy_info->sync_id = child_info->sync_id;
+ list_add_tail(&child_copy_info->list, &child_copy_list);
+ }
+
+ list_for_each_entry_safe(parent_info, temp_parent, &row->parents_list,
+ list) {
+ if (parent_info->sync_id <= 0)
+ continue;
+ parent_copy_info = kzalloc(sizeof(*parent_copy_info),
+ GFP_ATOMIC);
+ if (!parent_copy_info) {
+ /* No free memory, clean up the parent_copy_list */
+ while (!list_empty(&parent_copy_list)) {
+ parent_info = list_first_entry(
+ &parent_copy_list,
+ struct sync_parent_info, list);
+ list_del_init(&parent_info->list);
+ kfree(parent_info);
+ }
+ /* No free memory, clean up the child_copy_list */
+ while (!list_empty(&child_copy_list)) {
+ child_info = list_first_entry(&child_copy_list,
+ struct sync_child_info, list);
+ list_del_init(&child_info->list);
+ kfree(child_info);
+ }
+ spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
+ goto deinit;
+ }
+ parent_copy_info->sync_id = parent_info->sync_id;
+ list_add_tail(&parent_copy_info->list, &parent_copy_list);
+ }
+
+ spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
+ /* Cleanup the child to parent link from child list*/
+ while (!list_empty(&child_copy_list)) {
+ child_info = list_first_entry(&child_copy_list,
+ struct sync_child_info, list);
+ child_row = sync_dev->sync_table + child_info->sync_id;
+ spin_lock_bh(&sync_dev->row_spinlocks[child_info->sync_id]);
+ if (child_row->state == CAM_SYNC_STATE_INVALID) {
+ spin_unlock_bh(&sync_dev->row_spinlocks[
+ child_info->sync_id]);
+ list_del_init(&child_info->list);
+ kfree(child_info);
+ continue;
+ }
+
+ cam_sync_util_cleanup_parents_list(&child_row->parents_list,
+ SYNC_LIST_CLEAN_ID, idx);
+
+ spin_unlock_bh(&sync_dev->row_spinlocks[child_info->sync_id]);
list_del_init(&child_info->list);
kfree(child_info);
}
- list_for_each_entry_safe(parent_info, temp_parent,
- &row->parents_list, list) {
+ /* Cleanup the parent to child link */
+ while (!list_empty(&parent_copy_list)) {
+ parent_info = list_first_entry(&parent_copy_list,
+ struct sync_parent_info, list);
+ parent_row = sync_dev->sync_table + parent_info->sync_id;
+ spin_lock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
+ if (parent_row->state == CAM_SYNC_STATE_INVALID) {
+ spin_unlock_bh(&sync_dev->row_spinlocks[
+ parent_info->sync_id]);
+ list_del_init(&parent_info->list);
+ kfree(parent_info);
+ continue;
+ }
+
+ cam_sync_util_cleanup_children_list(&parent_row->children_list,
+ SYNC_LIST_CLEAN_ID, idx);
+
+ spin_unlock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
list_del_init(&parent_info->list);
kfree(parent_info);
}
+deinit:
+ spin_lock_bh(&sync_dev->row_spinlocks[idx]);
+ cam_sync_util_cleanup_children_list(&row->children_list,
+ SYNC_LIST_CLEAN_ALL, 0);
+ cam_sync_util_cleanup_parents_list(&row->parents_list,
+ SYNC_LIST_CLEAN_ALL, 0);
+
list_for_each_entry_safe(upayload_info, temp_upayload,
&row->user_payload_list, list) {
list_del_init(&upayload_info->list);
@@ -232,6 +337,8 @@
row->state = CAM_SYNC_STATE_INVALID;
memset(row, 0, sizeof(*row));
+ clear_bit(idx, sync_dev->bitmap);
+ spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
return 0;
}
@@ -350,26 +457,48 @@
return result;
}
-void cam_sync_util_cleanup_children_list(struct list_head *list_to_clean)
+void cam_sync_util_cleanup_children_list(struct list_head *list_to_clean,
+ uint32_t list_clean_type, uint32_t sync_obj)
{
struct sync_child_info *child_info = NULL;
struct sync_child_info *temp_child_info = NULL;
+ uint32_t curr_sync_obj;
list_for_each_entry_safe(child_info,
temp_child_info, list_to_clean, list) {
+ if ((list_clean_type == SYNC_LIST_CLEAN_ID) &&
+ (child_info->sync_id != sync_obj))
+ continue;
+
+ curr_sync_obj = child_info->sync_id;
list_del_init(&child_info->list);
kfree(child_info);
+
+ if ((list_clean_type == SYNC_LIST_CLEAN_ID) &&
+ (curr_sync_obj == sync_obj))
+ break;
}
}
-void cam_sync_util_cleanup_parents_list(struct list_head *list_to_clean)
+void cam_sync_util_cleanup_parents_list(struct list_head *list_to_clean,
+ uint32_t list_clean_type, uint32_t sync_obj)
{
struct sync_parent_info *parent_info = NULL;
struct sync_parent_info *temp_parent_info = NULL;
+ uint32_t curr_sync_obj;
list_for_each_entry_safe(parent_info,
temp_parent_info, list_to_clean, list) {
+ if ((list_clean_type == SYNC_LIST_CLEAN_ID) &&
+ (parent_info->sync_id != sync_obj))
+ continue;
+
+ curr_sync_obj = parent_info->sync_id;
list_del_init(&parent_info->list);
kfree(parent_info);
+
+ if ((list_clean_type == SYNC_LIST_CLEAN_ID) &&
+ (curr_sync_obj == sync_obj))
+ break;
}
}
diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.h b/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.h
index 8b60ce1..1c5c4bf 100644
--- a/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.h
+++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.h
@@ -141,17 +141,25 @@
/**
* @brief: Function to clean up the children of a sync object
* @param list_to_clean : List to clean up
+ * @list_clean_type : Clean specific object or clean all objects
+ * @sync_obj : Sync object to be clean if list clean type is
+ * SYNC_LIST_CLEAN_ID
*
* @return None
*/
-void cam_sync_util_cleanup_children_list(struct list_head *list_to_clean);
+void cam_sync_util_cleanup_children_list(struct list_head *list_to_clean,
+ uint32_t list_clean_type, uint32_t sync_obj);
/**
* @brief: Function to clean up the parents of a sync object
* @param list_to_clean : List to clean up
+ * @list_clean_type : Clean specific object or clean all objects
+ * @sync_obj : Sync object to be clean if list clean type is
+ * SYNC_LIST_CLEAN_ID
*
* @return None
*/
-void cam_sync_util_cleanup_parents_list(struct list_head *list_to_clean);
+void cam_sync_util_cleanup_parents_list(struct list_head *list_to_clean,
+ uint32_t list_clean_type, uint32_t sync_obj);
#endif /* __CAM_SYNC_UTIL_H__ */
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c
index 611c4e9..07fb944 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c
@@ -410,6 +410,13 @@
of_node = soc_info->dev->of_node;
+ if (!of_property_read_bool(of_node, "use-shared-clk")) {
+ CAM_DBG(CAM_UTIL, "No shared clk parameter defined");
+ soc_info->use_shared_clk = false;
+ } else {
+ soc_info->use_shared_clk = true;
+ }
+
count = of_property_count_strings(of_node, "clock-names");
CAM_DBG(CAM_UTIL, "count = %d", count);
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h
index 5123ec4..4a87d50 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h
@@ -180,6 +180,7 @@
struct regulator *rgltr[CAM_SOC_MAX_REGULATOR];
uint32_t rgltr_delay[CAM_SOC_MAX_REGULATOR];
+ uint32_t use_shared_clk;
uint32_t num_clk;
const char *clk_name[CAM_SOC_MAX_CLK];
struct clk *clk[CAM_SOC_MAX_CLK];
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
index c7d1074..a455357 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
@@ -54,7 +54,7 @@
#define ROT_HW_ACQUIRE_TIMEOUT_IN_MS 100
/* waiting for inline hw start */
-#define ROT_INLINE_START_TIMEOUT_IN_MS 2000
+#define ROT_INLINE_START_TIMEOUT_IN_MS (10000 + 500)
/* default pixel per clock ratio */
#define ROT_PIXEL_PER_CLK_NUMERATOR 36
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
index 13c5098..523ff5b 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
@@ -1440,6 +1440,61 @@
EXPORT_SYMBOL(sde_rotator_inline_get_pixfmt_caps);
/*
+ * _sde_rotator_inline_cleanup - perform inline related request cleanup
+ * This function assumes rot_dev->mgr lock has been taken when called.
+ * @handle: Pointer to rotator context
+ * @request: Pointer to rotation request
+ * return: 0 if success; -EAGAIN if cleanup should be retried
+ */
+static int _sde_rotator_inline_cleanup(void *handle,
+ struct sde_rotator_request *request)
+{
+ struct sde_rotator_ctx *ctx;
+ struct sde_rotator_device *rot_dev;
+ int ret;
+
+ if (!handle || !request) {
+ SDEROT_ERR("invalid rotator handle/request\n");
+ return -EINVAL;
+ }
+
+ ctx = handle;
+ rot_dev = ctx->rot_dev;
+
+ if (!rot_dev || !rot_dev->mgr) {
+ SDEROT_ERR("invalid rotator device\n");
+ return -EINVAL;
+ }
+
+ if (request->committed) {
+ /* wait until request is finished */
+ sde_rot_mgr_unlock(rot_dev->mgr);
+ mutex_unlock(&rot_dev->lock);
+ ret = wait_event_timeout(ctx->wait_queue,
+ sde_rotator_is_request_retired(request),
+ msecs_to_jiffies(rot_dev->streamoff_timeout));
+ mutex_lock(&rot_dev->lock);
+ sde_rot_mgr_lock(rot_dev->mgr);
+
+ if (!ret) {
+ SDEROT_ERR("timeout w/o retire s:%d\n",
+ ctx->session_id);
+ SDEROT_EVTLOG(ctx->session_id, SDE_ROT_EVTLOG_ERROR);
+ sde_rotator_abort_inline_request(rot_dev->mgr,
+ ctx->private, request->req);
+ return -EAGAIN;
+ } else if (ret == 1) {
+ SDEROT_ERR("timeout w/ retire s:%d\n", ctx->session_id);
+ SDEROT_EVTLOG(ctx->session_id, SDE_ROT_EVTLOG_ERROR);
+ }
+ }
+
+ sde_rotator_req_finish(rot_dev->mgr, ctx->private, request->req);
+ sde_rotator_retire_request(request);
+ return 0;
+}
+
+/*
* sde_rotator_inline_commit - commit given rotator command
* @handle: Pointer to rotator context
* @cmd: Pointer to rotator command
@@ -1466,7 +1521,7 @@
ctx = handle;
rot_dev = ctx->rot_dev;
- if (!rot_dev) {
+ if (!rot_dev || !rot_dev->mgr) {
SDEROT_ERR("invalid rotator device\n");
return -EINVAL;
}
@@ -1498,6 +1553,7 @@
(cmd->video_mode << 5) |
(cmd_type << 24));
+ mutex_lock(&rot_dev->lock);
sde_rot_mgr_lock(rot_dev->mgr);
if (cmd_type == SDE_ROTATOR_INLINE_CMD_VALIDATE ||
@@ -1707,30 +1763,11 @@
}
request = cmd->priv_handle;
- req = request->req;
- if (request->committed) {
- /* wait until request is finished */
- sde_rot_mgr_unlock(rot_dev->mgr);
- ret = wait_event_timeout(ctx->wait_queue,
- sde_rotator_is_request_retired(request),
- msecs_to_jiffies(rot_dev->streamoff_timeout));
- if (!ret) {
- SDEROT_ERR("timeout w/o retire s:%d\n",
- ctx->session_id);
- SDEROT_EVTLOG(ctx->session_id,
- SDE_ROT_EVTLOG_ERROR);
- } else if (ret == 1) {
- SDEROT_ERR("timeout w/ retire s:%d\n",
- ctx->session_id);
- SDEROT_EVTLOG(ctx->session_id,
- SDE_ROT_EVTLOG_ERROR);
- }
- sde_rot_mgr_lock(rot_dev->mgr);
- }
+ /* attempt single retry if first cleanup attempt failed */
+ if (_sde_rotator_inline_cleanup(handle, request) == -EAGAIN)
+ _sde_rotator_inline_cleanup(handle, request);
- sde_rotator_req_finish(rot_dev->mgr, ctx->private, req);
- sde_rotator_retire_request(request);
cmd->priv_handle = NULL;
} else if (cmd_type == SDE_ROTATOR_INLINE_CMD_ABORT) {
if (!cmd->priv_handle) {
@@ -1746,6 +1783,7 @@
}
sde_rot_mgr_unlock(rot_dev->mgr);
+ mutex_unlock(&rot_dev->lock);
return 0;
error_handle_request:
@@ -1758,6 +1796,7 @@
error_invalid_handle:
error_init_request:
sde_rot_mgr_unlock(rot_dev->mgr);
+ mutex_unlock(&rot_dev->lock);
return ret;
}
EXPORT_SYMBOL(sde_rotator_inline_commit);
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
index c3849a8..6ecec03 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
@@ -1634,7 +1634,7 @@
/* use prefill bandwidth instead if specified */
if (cfg->prefill_bw)
- bw = DIV_ROUND_UP(cfg->prefill_bw,
+ bw = DIV_ROUND_UP_SECTOR_T(cfg->prefill_bw,
TRAFFIC_SHAPE_VSYNC_CLK);
if (bw > 0xFF)
diff --git a/drivers/media/platform/msm/vidc/governors/msm_vidc_dyn_gov.c b/drivers/media/platform/msm/vidc/governors/msm_vidc_dyn_gov.c
index 83b80d7..cdcfa96 100644
--- a/drivers/media/platform/msm/vidc/governors/msm_vidc_dyn_gov.c
+++ b/drivers/media/platform/msm/vidc/governors/msm_vidc_dyn_gov.c
@@ -312,6 +312,7 @@
case HAL_COLOR_FORMAT_NV12_UBWC:
return 8;
case HAL_COLOR_FORMAT_NV12_TP10_UBWC:
+ case HAL_COLOR_FORMAT_P010:
return 10;
default:
dprintk(VIDC_ERR,
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index dd62fb7..2d8fdda 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -1868,7 +1868,7 @@
break;
case V4L2_CID_MPEG_VIDC_VIDEO_USELTRFRAME:
property_id = HAL_CONFIG_VENC_USELTRFRAME;
- use_ltr.ref_ltr = 0x1 << ctrl->val;
+ use_ltr.ref_ltr = ctrl->val;
use_ltr.use_constraint = false;
use_ltr.frames = 0;
pdata = &use_ltr;
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
index 32b548a..1d22077 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
@@ -304,11 +304,18 @@
*/
if (inst->session_type == MSM_VIDC_DECODER) {
+ struct vb2_v4l2_buffer *vbuf = NULL;
+
q = &inst->bufq[CAPTURE_PORT].vb2_bufq;
for (i = 0; i < q->num_buffers; i++) {
vb = q->bufs[i];
- if (vb && vb->state != VB2_BUF_STATE_ACTIVE &&
- vb->planes[0].bytesused)
+ if (!vb)
+ continue;
+ vbuf = to_vb2_v4l2_buffer(vb);
+ if (vbuf &&
+ vb->state != VB2_BUF_STATE_ACTIVE &&
+ !(vbuf->flags &
+ V4L2_QCOM_BUF_FLAG_DECODEONLY))
fw_out_qsize++;
}
} else {
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index 9dce3f9..7ca2fd6 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -5761,8 +5761,9 @@
skip = true;
} else if (b->type ==
V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
- if (!i) /* yuv */
- skip = true;
+ if (!i) { /* yuv */
+ /* all values are correct */
+ }
}
} else if (inst->session_type == MSM_VIDC_ENCODER) {
if (b->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_platform.c b/drivers/media/platform/msm/vidc/msm_vidc_platform.c
index 5e5d030..d7641c3 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_platform.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_platform.c
@@ -170,6 +170,10 @@
.value = 1,
},
{
+ .key = "qcom,domain-attr-cache-pagetables",
+ .value = 1,
+ },
+ {
.key = "qcom,max-secure-instances",
.value = 5,
},
@@ -217,6 +221,10 @@
.value = 1,
},
{
+ .key = "qcom,domain-attr-cache-pagetables",
+ .value = 1,
+ },
+ {
.key = "qcom,max-secure-instances",
.value = 5,
},
diff --git a/drivers/media/usb/cx231xx/cx231xx-core.c b/drivers/media/usb/cx231xx/cx231xx-core.c
index 8b099fe..71b65ab 100644
--- a/drivers/media/usb/cx231xx/cx231xx-core.c
+++ b/drivers/media/usb/cx231xx/cx231xx-core.c
@@ -356,7 +356,12 @@
*/
if ((ven_req->wLength > 4) && ((ven_req->bRequest == 0x4) ||
(ven_req->bRequest == 0x5) ||
- (ven_req->bRequest == 0x6))) {
+ (ven_req->bRequest == 0x6) ||
+
+ /* Internal Master 3 Bus can send
+ * and receive only 4 bytes per time
+ */
+ (ven_req->bRequest == 0x2))) {
unsend_size = 0;
pdata = ven_req->pBuff;
diff --git a/drivers/mfd/ab8500-sysctrl.c b/drivers/mfd/ab8500-sysctrl.c
index 207cc49..8062d37 100644
--- a/drivers/mfd/ab8500-sysctrl.c
+++ b/drivers/mfd/ab8500-sysctrl.c
@@ -98,7 +98,7 @@
u8 bank;
if (sysctrl_dev == NULL)
- return -EINVAL;
+ return -EPROBE_DEFER;
bank = (reg >> 8);
if (!valid_bank(bank))
@@ -114,11 +114,13 @@
u8 bank;
if (sysctrl_dev == NULL)
- return -EINVAL;
+ return -EPROBE_DEFER;
bank = (reg >> 8);
- if (!valid_bank(bank))
+ if (!valid_bank(bank)) {
+ pr_err("invalid bank\n");
return -EINVAL;
+ }
return abx500_mask_and_set_register_interruptible(sysctrl_dev, bank,
(u8)(reg & 0xFF), mask, value);
@@ -145,9 +147,15 @@
return 0;
}
+static const struct of_device_id ab8500_sysctrl_match[] = {
+ { .compatible = "stericsson,ab8500-sysctrl", },
+ {}
+};
+
static struct platform_driver ab8500_sysctrl_driver = {
.driver = {
.name = "ab8500-sysctrl",
+ .of_match_table = ab8500_sysctrl_match,
},
.probe = ab8500_sysctrl_probe,
.remove = ab8500_sysctrl_remove,
diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
index ba130be..9617fc3 100644
--- a/drivers/mfd/axp20x.c
+++ b/drivers/mfd/axp20x.c
@@ -205,14 +205,14 @@
static struct resource axp288_power_button_resources[] = {
{
.name = "PEK_DBR",
- .start = AXP288_IRQ_POKN,
- .end = AXP288_IRQ_POKN,
+ .start = AXP288_IRQ_POKP,
+ .end = AXP288_IRQ_POKP,
.flags = IORESOURCE_IRQ,
},
{
.name = "PEK_DBF",
- .start = AXP288_IRQ_POKP,
- .end = AXP288_IRQ_POKP,
+ .start = AXP288_IRQ_POKN,
+ .end = AXP288_IRQ_POKN,
.flags = IORESOURCE_IRQ,
},
};
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index fa4fe02..eef202d 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -1620,6 +1620,9 @@
cxl_sysfs_adapter_remove(adapter);
cxl_debugfs_adapter_remove(adapter);
+ /* Flush adapter datacache as its about to be removed */
+ cxl_data_cache_flush(adapter);
+
cxl_deconfigure_adapter(adapter);
device_unregister(&adapter->dev);
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index e2af61f..451d417 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -1320,6 +1320,9 @@
return -EOPNOTSUPP;
}
+ if (!mei_cl_is_connected(cl))
+ return -ENODEV;
+
rets = pm_runtime_get(dev->dev);
if (rets < 0 && rets != -EINPROGRESS) {
pm_runtime_put_noidle(dev->dev);
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index afb8d72..4c4835d 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -1900,20 +1900,22 @@
ptr_app->blocked_on_listener_id = resp->data;
/* sleep until listener is available */
- qseecom.app_block_ref_cnt++;
- ptr_app->app_blocked = true;
- mutex_unlock(&app_access_lock);
- if (wait_event_freezable(
+ do {
+ qseecom.app_block_ref_cnt++;
+ ptr_app->app_blocked = true;
+ mutex_unlock(&app_access_lock);
+ if (wait_event_freezable(
list_ptr->listener_block_app_wq,
!list_ptr->listener_in_use)) {
- pr_err("Interrupted: listener_id %d, app_id %d\n",
+ pr_err("Interrupted: listener_id %d, app_id %d\n",
resp->data, ptr_app->app_id);
- ret = -ERESTARTSYS;
- goto exit;
- }
- mutex_lock(&app_access_lock);
- ptr_app->app_blocked = false;
- qseecom.app_block_ref_cnt--;
+ ret = -ERESTARTSYS;
+ goto exit;
+ }
+ mutex_lock(&app_access_lock);
+ ptr_app->app_blocked = false;
+ qseecom.app_block_ref_cnt--;
+ } while (list_ptr->listener_in_use);
ptr_app->blocked_on_listener_id = 0;
/* notify the blocked app that listener is available */
@@ -1964,18 +1966,20 @@
pr_debug("lsntr %d in_use = %d\n",
resp->data, list_ptr->listener_in_use);
/* sleep until listener is available */
- qseecom.app_block_ref_cnt++;
- mutex_unlock(&app_access_lock);
- if (wait_event_freezable(
+ do {
+ qseecom.app_block_ref_cnt++;
+ mutex_unlock(&app_access_lock);
+ if (wait_event_freezable(
list_ptr->listener_block_app_wq,
!list_ptr->listener_in_use)) {
- pr_err("Interrupted: listener_id %d, session_id %d\n",
+ pr_err("Interrupted: listener_id %d, session_id %d\n",
resp->data, session_id);
- ret = -ERESTARTSYS;
- goto exit;
- }
- mutex_lock(&app_access_lock);
- qseecom.app_block_ref_cnt--;
+ ret = -ERESTARTSYS;
+ goto exit;
+ }
+ mutex_lock(&app_access_lock);
+ qseecom.app_block_ref_cnt--;
+ } while (list_ptr->listener_in_use);
/* notify TZ that listener is available */
pr_warn("Lsntr %d is available, unblock session(%d) in TZ\n",
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 538a8d9..e4af5c3 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -3184,6 +3184,16 @@
return &mqrq->cmdq_req;
}
+static void mmc_blk_cmdq_requeue_rw_rq(struct mmc_queue *mq,
+ struct request *req)
+{
+ struct mmc_card *card = mq->card;
+ struct mmc_host *host = card->host;
+
+ blk_requeue_request(req->q, req);
+ mmc_put_card(host->card);
+}
+
static int mmc_blk_cmdq_issue_rw_rq(struct mmc_queue *mq, struct request *req)
{
struct mmc_queue_req *active_mqrq;
@@ -3231,6 +3241,15 @@
wait_event_interruptible(ctx->queue_empty_wq,
(!ctx->active_reqs));
+ if (ret) {
+ /* clear pending request */
+ WARN_ON(!test_and_clear_bit(req->tag,
+ &host->cmdq_ctx.data_active_reqs));
+ WARN_ON(!test_and_clear_bit(req->tag,
+ &host->cmdq_ctx.active_reqs));
+ mmc_cmdq_clk_scaling_stop_busy(host, true, false);
+ }
+
return ret;
}
@@ -4058,6 +4077,13 @@
ret = mmc_blk_cmdq_issue_flush_rq(mq, req);
} else {
ret = mmc_blk_cmdq_issue_rw_rq(mq, req);
+ /*
+ * If issuing of the request fails with eitehr EBUSY or
+ * EAGAIN error, re-queue the request.
+ * This case would occur with ICE calls.
+ */
+ if (ret == -EBUSY || ret == -EAGAIN)
+ mmc_blk_cmdq_requeue_rw_rq(mq, req);
}
}
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 300e9e1c..c172be9 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -1209,9 +1209,51 @@
return 0;
}
-static void mmc_start_cmdq_request(struct mmc_host *host,
+static int mmc_cmdq_check_retune(struct mmc_host *host)
+{
+ bool cmdq_mode;
+ int err = 0;
+
+ if (!host->need_retune || host->doing_retune || !host->card ||
+ mmc_card_hs400es(host->card) ||
+ (host->ios.clock <= MMC_HIGH_DDR_MAX_DTR))
+ return 0;
+
+ cmdq_mode = mmc_card_cmdq(host->card);
+ if (cmdq_mode) {
+ err = mmc_cmdq_halt(host, true);
+ if (err) {
+ pr_err("%s: %s: failed halting queue (%d)\n",
+ mmc_hostname(host), __func__, err);
+ host->cmdq_ops->dumpstate(host);
+ goto halt_failed;
+ }
+ }
+
+ mmc_retune_hold(host);
+ err = mmc_retune(host);
+ mmc_retune_release(host);
+
+ if (cmdq_mode) {
+ if (mmc_cmdq_halt(host, false)) {
+ pr_err("%s: %s: cmdq unhalt failed\n",
+ mmc_hostname(host), __func__);
+ host->cmdq_ops->dumpstate(host);
+ }
+ }
+
+halt_failed:
+ pr_debug("%s: %s: Retuning done err: %d\n",
+ mmc_hostname(host), __func__, err);
+
+ return err;
+}
+
+static int mmc_start_cmdq_request(struct mmc_host *host,
struct mmc_request *mrq)
{
+ int ret = 0;
+
if (mrq->data) {
pr_debug("%s: blksz %d blocks %d flags %08x tsac %lu ms nsac %d\n",
mmc_hostname(host), mrq->data->blksz,
@@ -1233,11 +1275,22 @@
}
mmc_host_clk_hold(host);
- if (likely(host->cmdq_ops->request))
- host->cmdq_ops->request(host, mrq);
- else
- pr_err("%s: %s: issue request failed\n", mmc_hostname(host),
- __func__);
+ mmc_cmdq_check_retune(host);
+ if (likely(host->cmdq_ops->request)) {
+ ret = host->cmdq_ops->request(host, mrq);
+ } else {
+ ret = -ENOENT;
+ pr_err("%s: %s: cmdq request host op is not available\n",
+ mmc_hostname(host), __func__);
+ }
+
+ if (ret) {
+ mmc_host_clk_release(host);
+ pr_err("%s: %s: issue request failed, err=%d\n",
+ mmc_hostname(host), __func__, ret);
+ }
+
+ return ret;
}
/**
@@ -1769,8 +1822,7 @@
mrq->cmd->error = -ENOMEDIUM;
return -ENOMEDIUM;
}
- mmc_start_cmdq_request(host, mrq);
- return 0;
+ return mmc_start_cmdq_request(host, mrq);
}
EXPORT_SYMBOL(mmc_cmdq_start_req);
@@ -3420,6 +3472,9 @@
if (cd_irq && mmc_bus_manual_resume(host))
host->ignore_bus_resume_flags = true;
+ if (delayed_work_pending(&host->detect))
+ cancel_delayed_work(&host->detect);
+
mmc_schedule_delayed_work(&host->detect, delay);
}
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 64c8743..8a503b2 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -434,7 +434,8 @@
else
return 0;
- if (!host->need_retune || host->doing_retune || !host->card)
+ if (!host->need_retune || host->doing_retune || !host->card ||
+ mmc_card_hs400es(host->card))
return 0;
host->need_retune = 0;
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 10d55b8..e3bbc2c 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -1309,7 +1309,7 @@
while (retries) {
err = mmc_sd_init_card(host, host->card->ocr, host->card);
- if (err) {
+ if (err && err != -ENOENT) {
printk(KERN_ERR "%s: Re-init card rc = %d (retries = %d)\n",
mmc_hostname(host), err, retries);
retries--;
@@ -1324,6 +1324,12 @@
#else
err = mmc_sd_init_card(host, host->card->ocr, host->card);
#endif
+ if (err == -ENOENT) {
+ pr_debug("%s: %s: found a different card(%d), do detect change\n",
+ mmc_hostname(host), __func__, err);
+ mmc_card_set_removed(host->card);
+ mmc_detect_change(host, msecs_to_jiffies(200));
+ }
mmc_card_clr_suspended(host->card);
if (host->card->sdr104_blocked)
diff --git a/drivers/mmc/host/cmdq_hci.c b/drivers/mmc/host/cmdq_hci.c
index f16a999..55ce946 100644
--- a/drivers/mmc/host/cmdq_hci.c
+++ b/drivers/mmc/host/cmdq_hci.c
@@ -806,7 +806,7 @@
mmc->err_stats[MMC_ERR_ICE_CFG]++;
pr_err("%s: failed to configure crypto: err %d tag %d\n",
mmc_hostname(mmc), err, tag);
- goto out;
+ goto ice_err;
}
}
@@ -824,7 +824,7 @@
if (err) {
pr_err("%s: %s: failed to setup tx desc: %d\n",
mmc_hostname(mmc), __func__, err);
- goto out;
+ goto desc_err;
}
cq_host->mrq_slot[tag] = mrq;
@@ -844,6 +844,22 @@
/* Commit the doorbell write immediately */
wmb();
+ return err;
+
+desc_err:
+ if (cq_host->ops->crypto_cfg_end) {
+ err = cq_host->ops->crypto_cfg_end(mmc, mrq);
+ if (err) {
+ pr_err("%s: failed to end ice config: err %d tag %d\n",
+ mmc_hostname(mmc), err, tag);
+ }
+ }
+ if (!(cq_host->caps & CMDQ_CAP_CRYPTO_SUPPORT) &&
+ cq_host->ops->crypto_cfg_reset)
+ cq_host->ops->crypto_cfg_reset(mmc, tag);
+ice_err:
+ if (err)
+ cmdq_runtime_pm_put(cq_host);
out:
return err;
}
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index c531dee..8f27fe3 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -21,6 +21,7 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/gpio.h>
+#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/io.h>
diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c
index 8b8470c..f9b2a77 100644
--- a/drivers/mtd/nand/sunxi_nand.c
+++ b/drivers/mtd/nand/sunxi_nand.c
@@ -320,6 +320,10 @@
ret = wait_for_completion_timeout(&nfc->complete,
msecs_to_jiffies(timeout_ms));
+ if (!ret)
+ ret = -ETIMEDOUT;
+ else
+ ret = 0;
writel(0, nfc->regs + NFC_REG_INT);
} else {
diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c
index cf7c189..d065c0e 100644
--- a/drivers/net/can/c_can/c_can_pci.c
+++ b/drivers/net/can/c_can/c_can_pci.c
@@ -178,7 +178,6 @@
break;
case BOSCH_D_CAN:
priv->regs = reg_map_d_can;
- priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
break;
default:
ret = -EINVAL;
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index e36d105..717530e 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -320,7 +320,6 @@
break;
case BOSCH_D_CAN:
priv->regs = reg_map_d_can;
- priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
priv->read_reg = c_can_plat_read_reg_aligned_to_16bit;
priv->write_reg = c_can_plat_write_reg_aligned_to_16bit;
priv->read_reg32 = d_can_plat_read_reg32;
diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c
index 481895b..c06ef43 100644
--- a/drivers/net/can/ifi_canfd/ifi_canfd.c
+++ b/drivers/net/can/ifi_canfd/ifi_canfd.c
@@ -670,9 +670,9 @@
priv->base + IFI_CANFD_FTIME);
/* Configure transmitter delay */
- tdc = (dbt->brp * (dbt->phase_seg1 + 1)) & IFI_CANFD_TDELAY_MASK;
- writel(IFI_CANFD_TDELAY_EN | IFI_CANFD_TDELAY_ABS | tdc,
- priv->base + IFI_CANFD_TDELAY);
+ tdc = dbt->brp * (dbt->prop_seg + dbt->phase_seg1);
+ tdc &= IFI_CANFD_TDELAY_MASK;
+ writel(IFI_CANFD_TDELAY_EN | tdc, priv->base + IFI_CANFD_TDELAY);
}
static void ifi_canfd_set_filter(struct net_device *ndev, const u32 id,
diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c
index b0c8085..1ac2090 100644
--- a/drivers/net/can/sun4i_can.c
+++ b/drivers/net/can/sun4i_can.c
@@ -539,6 +539,13 @@
}
stats->rx_over_errors++;
stats->rx_errors++;
+
+ /* reset the CAN IP by entering reset mode
+ * ignoring timeout error
+ */
+ set_reset_mode(dev);
+ set_normal_mode(dev);
+
/* clear bit */
sun4i_can_write_cmdreg(priv, SUN4I_CMD_CLEAR_OR_FLAG);
}
@@ -653,8 +660,9 @@
netif_wake_queue(dev);
can_led_event(dev, CAN_LED_EVENT_TX);
}
- if (isrc & SUN4I_INT_RBUF_VLD) {
- /* receive interrupt */
+ if ((isrc & SUN4I_INT_RBUF_VLD) &&
+ !(isrc & SUN4I_INT_DATA_OR)) {
+ /* receive interrupt - don't read if overrun occurred */
while (status & SUN4I_STA_RBUF_RDY) {
/* RX buffer is not empty */
sun4i_can_rx(dev);
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index 3066d9c..e2512ab 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -36,9 +36,9 @@
/*****************************************************************************/
/* Timeout in micro-sec */
-#define ADMIN_CMD_TIMEOUT_US (1000000)
+#define ADMIN_CMD_TIMEOUT_US (3000000)
-#define ENA_ASYNC_QUEUE_DEPTH 4
+#define ENA_ASYNC_QUEUE_DEPTH 16
#define ENA_ADMIN_QUEUE_DEPTH 32
#define MIN_ENA_VER (((ENA_COMMON_SPEC_VERSION_MAJOR) << \
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index 69d7e9e..c5eaf76 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -100,7 +100,7 @@
/* Number of queues to check for missing queues per timer service */
#define ENA_MONITORED_TX_QUEUES 4
/* Max timeout packets before device reset */
-#define MAX_NUM_OF_TIMEOUTED_PACKETS 32
+#define MAX_NUM_OF_TIMEOUTED_PACKETS 128
#define ENA_TX_RING_IDX_NEXT(idx, ring_size) (((idx) + 1) & ((ring_size) - 1))
@@ -116,9 +116,9 @@
#define ENA_IO_IRQ_IDX(q) (ENA_IO_IRQ_FIRST_IDX + (q))
/* ENA device should send keep alive msg every 1 sec.
- * We wait for 3 sec just to be on the safe side.
+ * We wait for 6 sec just to be on the safe side.
*/
-#define ENA_DEVICE_KALIVE_TIMEOUT (3 * HZ)
+#define ENA_DEVICE_KALIVE_TIMEOUT (6 * HZ)
#define ENA_MMIO_DISABLE_REG_READ BIT(0)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 20e569b..333df54 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -97,6 +97,8 @@
BCM57407_NPAR,
BCM57414_NPAR,
BCM57416_NPAR,
+ BCM57452,
+ BCM57454,
NETXTREME_E_VF,
NETXTREME_C_VF,
};
@@ -131,6 +133,8 @@
{ "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
{ "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
{ "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
+ { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
+ { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
{ "Broadcom NetXtreme-E Ethernet Virtual Function" },
{ "Broadcom NetXtreme-C Ethernet Virtual Function" },
};
@@ -166,6 +170,8 @@
{ PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
+ { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
#ifdef CONFIG_BNXT_SRIOV
{ PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
{ PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 707bc46..6ea10a9 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -28,6 +28,7 @@
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/phy.h>
+#include <linux/phy_fixed.h>
#include <linux/platform_device.h>
#include <linux/skbuff.h>
#include <net/hwbm.h>
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index 01cf094..8f84961 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -33,6 +33,7 @@
/* Extended Registers */
#define DP83867_RGMIICTL 0x0032
+#define DP83867_STRAP_STS1 0x006E
#define DP83867_RGMIIDCTL 0x0086
#define DP83867_SW_RESET BIT(15)
@@ -56,9 +57,13 @@
#define DP83867_RGMII_TX_CLK_DELAY_EN BIT(1)
#define DP83867_RGMII_RX_CLK_DELAY_EN BIT(0)
+/* STRAP_STS1 bits */
+#define DP83867_STRAP_STS1_RESERVED BIT(11)
+
/* PHY CTRL bits */
#define DP83867_PHYCR_FIFO_DEPTH_SHIFT 14
#define DP83867_PHYCR_FIFO_DEPTH_MASK (3 << 14)
+#define DP83867_PHYCR_RESERVED_MASK BIT(11)
/* RGMIIDCTL bits */
#define DP83867_RGMII_TX_CLK_DELAY_SHIFT 4
@@ -141,7 +146,7 @@
static int dp83867_config_init(struct phy_device *phydev)
{
struct dp83867_private *dp83867;
- int ret, val;
+ int ret, val, bs;
u16 delay;
if (!phydev->priv) {
@@ -164,6 +169,22 @@
return val;
val &= ~DP83867_PHYCR_FIFO_DEPTH_MASK;
val |= (dp83867->fifo_depth << DP83867_PHYCR_FIFO_DEPTH_SHIFT);
+
+ /* The code below checks if "port mirroring" N/A MODE4 has been
+ * enabled during power on bootstrap.
+ *
+ * Such N/A mode enabled by mistake can put PHY IC in some
+ * internal testing mode and disable RGMII transmission.
+ *
+ * In this particular case one needs to check STRAP_STS1
+ * register's bit 11 (marked as RESERVED).
+ */
+
+ bs = phy_read_mmd_indirect(phydev, DP83867_STRAP_STS1,
+ DP83867_DEVADDR);
+ if (bs & DP83867_STRAP_STS1_RESERVED)
+ val &= ~DP83867_PHYCR_RESERVED_MASK;
+
ret = phy_write(phydev, MII_DP83867_PHYCTRL, val);
if (ret)
return ret;
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index afbfc0f..dc6d3b0 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -769,8 +769,10 @@
u8 *buf;
int len;
int temp;
+ int err;
u8 iface_no;
struct usb_cdc_parsed_header hdr;
+ u16 curr_ntb_format;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
@@ -875,6 +877,32 @@
goto error2;
}
+ /*
+ * Some Huawei devices have been observed to come out of reset in NDP32 mode.
+ * Let's check if this is the case, and set the device to NDP16 mode again if
+ * needed.
+ */
+ if (ctx->drvflags & CDC_NCM_FLAG_RESET_NTB16) {
+ err = usbnet_read_cmd(dev, USB_CDC_GET_NTB_FORMAT,
+ USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE,
+ 0, iface_no, &curr_ntb_format, 2);
+ if (err < 0) {
+ goto error2;
+ }
+
+ if (curr_ntb_format == USB_CDC_NCM_NTB32_FORMAT) {
+ dev_info(&intf->dev, "resetting NTB format to 16-bit");
+ err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_FORMAT,
+ USB_TYPE_CLASS | USB_DIR_OUT
+ | USB_RECIP_INTERFACE,
+ USB_CDC_NCM_NTB16_FORMAT,
+ iface_no, NULL, 0);
+
+ if (err < 0)
+ goto error2;
+ }
+ }
+
cdc_ncm_find_endpoints(dev, ctx->data);
cdc_ncm_find_endpoints(dev, ctx->control);
if (!dev->in || !dev->out || !dev->status) {
diff --git a/drivers/net/usb/huawei_cdc_ncm.c b/drivers/net/usb/huawei_cdc_ncm.c
index 2680a65..63f28908 100644
--- a/drivers/net/usb/huawei_cdc_ncm.c
+++ b/drivers/net/usb/huawei_cdc_ncm.c
@@ -80,6 +80,12 @@
* be at the end of the frame.
*/
drvflags |= CDC_NCM_FLAG_NDP_TO_END;
+
+ /* Additionally, it has been reported that some Huawei E3372H devices, with
+ * firmware version 21.318.01.00.541, come out of reset in NTB32 format mode, hence
+ * needing to be set to the NTB16 one again.
+ */
+ drvflags |= CDC_NCM_FLAG_RESET_NTB16;
ret = cdc_ncm_bind_common(usbnet_dev, intf, 1, drvflags);
if (ret)
goto err;
diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c
index 766c63b..45226db 100644
--- a/drivers/net/wireless/ath/ath10k/ahb.c
+++ b/drivers/net/wireless/ath/ath10k/ahb.c
@@ -33,6 +33,9 @@
MODULE_DEVICE_TABLE(of, ath10k_ahb_of_match);
+#define QCA4019_SRAM_ADDR 0x000C0000
+#define QCA4019_SRAM_LEN 0x00040000 /* 256 kb */
+
static inline struct ath10k_ahb *ath10k_ahb_priv(struct ath10k *ar)
{
return &((struct ath10k_pci *)ar->drv_priv)->ahb[0];
@@ -699,6 +702,25 @@
return ret;
}
+static u32 ath10k_ahb_qca4019_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
+{
+ u32 val = 0, region = addr & 0xfffff;
+
+ val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS);
+
+ if (region >= QCA4019_SRAM_ADDR && region <=
+ (QCA4019_SRAM_ADDR + QCA4019_SRAM_LEN)) {
+ /* SRAM contents for QCA4019 can be directly accessed and
+ * no conversions are required
+ */
+ val |= region;
+ } else {
+ val |= 0x100000 | region;
+ }
+
+ return val;
+}
+
static const struct ath10k_hif_ops ath10k_ahb_hif_ops = {
.tx_sg = ath10k_pci_hif_tx_sg,
.diag_read = ath10k_pci_hif_diag_read,
@@ -766,6 +788,7 @@
ar_pci->mem_len = ar_ahb->mem_len;
ar_pci->ar = ar;
ar_pci->bus_ops = &ath10k_ahb_bus_ops;
+ ar_pci->targ_cpu_to_ce_addr = ath10k_ahb_qca4019_targ_cpu_to_ce_addr;
ret = ath10k_pci_setup_resource(ar);
if (ret) {
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 410bcda..25b8d50 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -840,29 +840,33 @@
ath10k_pci_rx_post(ar);
}
+static u32 ath10k_pci_qca988x_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
+{
+ u32 val = 0, region = addr & 0xfffff;
+
+ val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS)
+ & 0x7ff) << 21;
+ val |= 0x100000 | region;
+ return val;
+}
+
+static u32 ath10k_pci_qca99x0_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
+{
+ u32 val = 0, region = addr & 0xfffff;
+
+ val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS);
+ val |= 0x100000 | region;
+ return val;
+}
+
static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
{
- u32 val = 0;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- switch (ar->hw_rev) {
- case ATH10K_HW_QCA988X:
- case ATH10K_HW_QCA9887:
- case ATH10K_HW_QCA6174:
- case ATH10K_HW_QCA9377:
- val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
- CORE_CTRL_ADDRESS) &
- 0x7ff) << 21;
- break;
- case ATH10K_HW_QCA9888:
- case ATH10K_HW_QCA99X0:
- case ATH10K_HW_QCA9984:
- case ATH10K_HW_QCA4019:
- val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS);
- break;
- }
+ if (WARN_ON_ONCE(!ar_pci->targ_cpu_to_ce_addr))
+ return -ENOTSUPP;
- val |= 0x100000 | (addr & 0xfffff);
- return val;
+ return ar_pci->targ_cpu_to_ce_addr(ar, addr);
}
/*
@@ -3171,6 +3175,7 @@
bool pci_ps;
int (*pci_soft_reset)(struct ath10k *ar);
int (*pci_hard_reset)(struct ath10k *ar);
+ u32 (*targ_cpu_to_ce_addr)(struct ath10k *ar, u32 addr);
switch (pci_dev->device) {
case QCA988X_2_0_DEVICE_ID:
@@ -3178,12 +3183,14 @@
pci_ps = false;
pci_soft_reset = ath10k_pci_warm_reset;
pci_hard_reset = ath10k_pci_qca988x_chip_reset;
+ targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
break;
case QCA9887_1_0_DEVICE_ID:
hw_rev = ATH10K_HW_QCA9887;
pci_ps = false;
pci_soft_reset = ath10k_pci_warm_reset;
pci_hard_reset = ath10k_pci_qca988x_chip_reset;
+ targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
break;
case QCA6164_2_1_DEVICE_ID:
case QCA6174_2_1_DEVICE_ID:
@@ -3191,30 +3198,35 @@
pci_ps = true;
pci_soft_reset = ath10k_pci_warm_reset;
pci_hard_reset = ath10k_pci_qca6174_chip_reset;
+ targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
break;
case QCA99X0_2_0_DEVICE_ID:
hw_rev = ATH10K_HW_QCA99X0;
pci_ps = false;
pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
+ targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
break;
case QCA9984_1_0_DEVICE_ID:
hw_rev = ATH10K_HW_QCA9984;
pci_ps = false;
pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
+ targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
break;
case QCA9888_2_0_DEVICE_ID:
hw_rev = ATH10K_HW_QCA9888;
pci_ps = false;
pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
+ targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
break;
case QCA9377_1_0_DEVICE_ID:
hw_rev = ATH10K_HW_QCA9377;
pci_ps = true;
pci_soft_reset = NULL;
pci_hard_reset = ath10k_pci_qca6174_chip_reset;
+ targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
break;
default:
WARN_ON(1);
@@ -3241,6 +3253,7 @@
ar_pci->bus_ops = &ath10k_pci_bus_ops;
ar_pci->pci_soft_reset = pci_soft_reset;
ar_pci->pci_hard_reset = pci_hard_reset;
+ ar_pci->targ_cpu_to_ce_addr = targ_cpu_to_ce_addr;
ar->id.vendor = pdev->vendor;
ar->id.device = pdev->device;
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index 9854ad5..577bb87 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -238,6 +238,11 @@
/* Chip specific pci full reset function */
int (*pci_hard_reset)(struct ath10k *ar);
+ /* chip specific methods for converting target CPU virtual address
+ * space to CE address space
+ */
+ u32 (*targ_cpu_to_ce_addr)(struct ath10k *ar, u32 addr);
+
/* Keep this entry in the last, memory for struct ath10k_ahb is
* allocated (ahb support enabled case) in the continuation of
* this struct.
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index e1d59da..ca8797c 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -1165,11 +1165,12 @@
wcn36xx_dbg(WCN36XX_DBG_MAC, "platform remove\n");
release_firmware(wcn->nv);
- mutex_destroy(&wcn->hal_mutex);
ieee80211_unregister_hw(hw);
iounmap(wcn->dxe_base);
iounmap(wcn->ccu_base);
+
+ mutex_destroy(&wcn->hal_mutex);
ieee80211_free_hw(hw);
return 0;
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index cadb36a..ae5a1b6 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -1143,6 +1143,10 @@
if (wil->tt_data_set)
wmi_set_tt_cfg(wil, &wil->tt_data);
+ if (wil->snr_thresh.enabled)
+ wmi_set_snr_thresh(wil, wil->snr_thresh.omni,
+ wil->snr_thresh.direct);
+
if (wil->platform_ops.notify) {
rc = wil->platform_ops.notify(wil->platform_handle,
WIL_PLATFORM_EVT_FW_RDY);
diff --git a/drivers/net/wireless/ath/wil6210/sysfs.c b/drivers/net/wireless/ath/wil6210/sysfs.c
index b91bf51..7c9a790 100644
--- a/drivers/net/wireless/ath/wil6210/sysfs.c
+++ b/drivers/net/wireless/ath/wil6210/sysfs.c
@@ -268,10 +268,49 @@
wil_fst_link_loss_sysfs_show,
wil_fst_link_loss_sysfs_store);
+static ssize_t
+wil_snr_thresh_sysfs_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct wil6210_priv *wil = dev_get_drvdata(dev);
+ ssize_t len = 0;
+
+ if (wil->snr_thresh.enabled)
+ len = snprintf(buf, PAGE_SIZE, "omni=%d, direct=%d\n",
+ wil->snr_thresh.omni, wil->snr_thresh.direct);
+
+ return len;
+}
+
+static ssize_t
+wil_snr_thresh_sysfs_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct wil6210_priv *wil = dev_get_drvdata(dev);
+ int rc;
+ short omni, direct;
+
+ /* to disable snr threshold, set both omni and direct to 0 */
+ if (sscanf(buf, "%hd %hd", &omni, &direct) != 2)
+ return -EINVAL;
+
+ rc = wmi_set_snr_thresh(wil, omni, direct);
+ if (!rc)
+ rc = count;
+
+ return rc;
+}
+
+static DEVICE_ATTR(snr_thresh, 0644,
+ wil_snr_thresh_sysfs_show,
+ wil_snr_thresh_sysfs_store);
+
static struct attribute *wil6210_sysfs_entries[] = {
&dev_attr_ftm_txrx_offset.attr,
&dev_attr_thermal_throttling.attr,
&dev_attr_fst_link_loss.attr,
+ &dev_attr_snr_thresh.attr,
NULL
};
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 52321f4..bb43f3f 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -751,6 +751,11 @@
struct wil_ftm_priv ftm;
bool tt_data_set;
struct wmi_tt_data tt_data;
+ struct {
+ bool enabled;
+ short omni;
+ short direct;
+ } snr_thresh;
int fw_calib_result;
@@ -1070,4 +1075,5 @@
const u8 *addr,
bool fst_link_loss);
+int wmi_set_snr_thresh(struct wil6210_priv *wil, short omni, short direct);
#endif /* __WIL6210_H__ */
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 205c3ab..9520c39 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -378,7 +378,7 @@
s32 signal;
__le16 fc;
u32 d_len;
- u16 d_status;
+ s16 snr;
if (flen < 0) {
wil_err(wil, "MGMT Rx: short event, len %d\n", len);
@@ -400,13 +400,13 @@
signal = 100 * data->info.rssi;
else
signal = data->info.sqi;
- d_status = le16_to_cpu(data->info.status);
+ snr = le16_to_cpu(data->info.snr); /* 1/4 dB units */
fc = rx_mgmt_frame->frame_control;
wil_dbg_wmi(wil, "MGMT Rx: channel %d MCS %d RSSI %d SQI %d%%\n",
data->info.channel, data->info.mcs, data->info.rssi,
data->info.sqi);
- wil_dbg_wmi(wil, "status 0x%04x len %d fc 0x%04x\n", d_status, d_len,
+ wil_dbg_wmi(wil, "snr %ddB len %d fc 0x%04x\n", snr / 4, d_len,
le16_to_cpu(fc));
wil_dbg_wmi(wil, "qid %d mid %d cid %d\n",
data->info.qid, data->info.mid, data->info.cid);
@@ -434,6 +434,11 @@
wil_dbg_wmi(wil, "Capability info : 0x%04x\n", cap);
+ if (wil->snr_thresh.enabled && snr < wil->snr_thresh.omni) {
+ wil_dbg_wmi(wil, "snr below threshold. dropping\n");
+ return;
+ }
+
bss = cfg80211_inform_bss_frame(wiphy, channel, rx_mgmt_frame,
d_len, signal, GFP_KERNEL);
if (bss) {
@@ -2165,3 +2170,32 @@
spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
return rc;
}
+
+int wmi_set_snr_thresh(struct wil6210_priv *wil, short omni, short direct)
+{
+ int rc;
+ struct wmi_set_connect_snr_thr_cmd cmd = {
+ .enable = true,
+ .omni_snr_thr = cpu_to_le16(omni),
+ .direct_snr_thr = cpu_to_le16(direct),
+ };
+
+ if (!test_bit(WMI_FW_CAPABILITY_CONNECT_SNR_THR, wil->fw_capabilities))
+ return -ENOTSUPP;
+
+ if (omni == 0 && direct == 0)
+ cmd.enable = false;
+
+ wil_dbg_wmi(wil, "%s snr thresh omni=%d, direct=%d (1/4 dB units)\n",
+ cmd.enable ? "enable" : "disable", omni, direct);
+
+ rc = wmi_send(wil, WMI_SET_CONNECT_SNR_THR_CMDID, &cmd, sizeof(cmd));
+ if (rc)
+ return rc;
+
+ wil->snr_thresh.enabled = cmd.enable;
+ wil->snr_thresh.omni = omni;
+ wil->snr_thresh.direct = direct;
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
index fcefdd1..809e320 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.h
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -71,6 +71,7 @@
WMI_FW_CAPABILITY_RSSI_REPORTING = 12,
WMI_FW_CAPABILITY_SET_SILENT_RSSI_TABLE = 13,
WMI_FW_CAPABILITY_LO_POWER_CALIB_FROM_OTP = 14,
+ WMI_FW_CAPABILITY_CONNECT_SNR_THR = 16,
WMI_FW_CAPABILITY_REF_CLOCK_CONTROL = 18,
WMI_FW_CAPABILITY_MAX,
};
@@ -1822,7 +1823,7 @@
u8 range;
u8 sqi;
__le16 stype;
- __le16 status;
+ __le16 snr;
__le32 len;
/* Not resolved when == 0xFFFFFFFF == > Broadcast to all MIDS */
u8 qid;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 1082f66..bc59aa2 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -6581,8 +6581,7 @@
wiphy->bands[NL80211_BAND_5GHZ] = band;
}
}
- err = brcmf_setup_wiphybands(wiphy);
- return err;
+ return 0;
}
static s32 brcmf_config_dongle(struct brcmf_cfg80211_info *cfg)
@@ -6947,6 +6946,12 @@
goto priv_out;
}
+ err = brcmf_setup_wiphybands(wiphy);
+ if (err) {
+ brcmf_err("Setting wiphy bands failed (%d)\n", err);
+ goto wiphy_unreg_out;
+ }
+
/* If cfg80211 didn't disable 40MHz HT CAP in wiphy_register(),
* setup 40MHz in 2GHz band and enable OBSS scanning.
*/
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c
index e64557c..6f8a4b0 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c
@@ -32,16 +32,25 @@
{
void *dump;
size_t ramsize;
+ int err;
ramsize = brcmf_bus_get_ramsize(bus);
- if (ramsize) {
- dump = vzalloc(len + ramsize);
- if (!dump)
- return -ENOMEM;
- memcpy(dump, data, len);
- brcmf_bus_get_memdump(bus, dump + len, ramsize);
- dev_coredumpv(bus->dev, dump, len + ramsize, GFP_KERNEL);
+ if (!ramsize)
+ return -ENOTSUPP;
+
+ dump = vzalloc(len + ramsize);
+ if (!dump)
+ return -ENOMEM;
+
+ memcpy(dump, data, len);
+ err = brcmf_bus_get_memdump(bus, dump + len, ramsize);
+ if (err) {
+ vfree(dump);
+ return err;
}
+
+ dev_coredumpv(bus->dev, dump, len + ramsize, GFP_KERNEL);
+
return 0;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 0556d13..092ae00 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -499,15 +499,17 @@
switch (info->control.vif->type) {
case NL80211_IFTYPE_AP:
/*
- * handle legacy hostapd as well, where station may be added
- * only after assoc.
+ * Handle legacy hostapd as well, where station may be added
+ * only after assoc. Take care of the case where we send a
+ * deauth to a station that we don't have.
*/
- if (ieee80211_is_probe_resp(fc) || ieee80211_is_auth(fc))
+ if (ieee80211_is_probe_resp(fc) || ieee80211_is_auth(fc) ||
+ ieee80211_is_deauth(fc))
return IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
if (info->hw_queue == info->control.vif->cab_queue)
return info->hw_queue;
- WARN_ON_ONCE(1);
+ WARN_ONCE(1, "fc=0x%02x", le16_to_cpu(fc));
return IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
case NL80211_IFTYPE_P2P_DEVICE:
if (ieee80211_is_mgmt(fc))
diff --git a/drivers/net/wireless/marvell/libertas/cmd.c b/drivers/net/wireless/marvell/libertas/cmd.c
index 301170c..033ff88 100644
--- a/drivers/net/wireless/marvell/libertas/cmd.c
+++ b/drivers/net/wireless/marvell/libertas/cmd.c
@@ -305,7 +305,7 @@
}
lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
- return 0;
+ return ret;
}
static int lbs_wait_for_ds_awake(struct lbs_private *priv)
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
index 4b0bb6b..c636e60 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
@@ -646,10 +646,9 @@
!test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
break;
- if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags))
+ if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags) ||
+ rt2800usb_entry_txstatus_timeout(entry))
rt2x00lib_txdone_noinfo(entry, TXDONE_FAILURE);
- else if (rt2800usb_entry_txstatus_timeout(entry))
- rt2x00lib_txdone_noinfo(entry, TXDONE_UNKNOWN);
else
break;
}
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index d9b5b73..a7bdb1f 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -67,6 +67,7 @@
unsigned int rx_stall_timeout_msecs = 60000;
module_param(rx_stall_timeout_msecs, uint, 0444);
+#define MAX_QUEUES_DEFAULT 8
unsigned int xenvif_max_queues;
module_param_named(max_queues, xenvif_max_queues, uint, 0644);
MODULE_PARM_DESC(max_queues,
@@ -1626,11 +1627,12 @@
if (!xen_domain())
return -ENODEV;
- /* Allow as many queues as there are CPUs if user has not
+ /* Allow as many queues as there are CPUs but max. 8 if user has not
* specified a value.
*/
if (xenvif_max_queues == 0)
- xenvif_max_queues = num_online_cpus();
+ xenvif_max_queues = min_t(unsigned int, MAX_QUEUES_DEFAULT,
+ num_online_cpus());
if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index d11cdbb..7b5cf6d 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -672,8 +672,9 @@
WARN_ON(!dev->block_cfg_access);
dev->block_cfg_access = 0;
- wake_up_all(&pci_cfg_wait);
raw_spin_unlock_irqrestore(&pci_lock, flags);
+
+ wake_up_all(&pci_cfg_wait);
}
EXPORT_SYMBOL_GPL(pci_cfg_access_unlock);
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
index 45a89d9..90e0b6f 100644
--- a/drivers/pci/host/pci-mvebu.c
+++ b/drivers/pci/host/pci-mvebu.c
@@ -133,6 +133,12 @@
int nports;
};
+struct mvebu_pcie_window {
+ phys_addr_t base;
+ phys_addr_t remap;
+ size_t size;
+};
+
/* Structure representing one PCIe interface */
struct mvebu_pcie_port {
char *name;
@@ -150,10 +156,8 @@
struct mvebu_sw_pci_bridge bridge;
struct device_node *dn;
struct mvebu_pcie *pcie;
- phys_addr_t memwin_base;
- size_t memwin_size;
- phys_addr_t iowin_base;
- size_t iowin_size;
+ struct mvebu_pcie_window memwin;
+ struct mvebu_pcie_window iowin;
u32 saved_pcie_stat;
};
@@ -379,23 +383,45 @@
}
}
+static void mvebu_pcie_set_window(struct mvebu_pcie_port *port,
+ unsigned int target, unsigned int attribute,
+ const struct mvebu_pcie_window *desired,
+ struct mvebu_pcie_window *cur)
+{
+ if (desired->base == cur->base && desired->remap == cur->remap &&
+ desired->size == cur->size)
+ return;
+
+ if (cur->size != 0) {
+ mvebu_pcie_del_windows(port, cur->base, cur->size);
+ cur->size = 0;
+ cur->base = 0;
+
+ /*
+ * If something tries to change the window while it is enabled
+ * the change will not be done atomically. That would be
+ * difficult to do in the general case.
+ */
+ }
+
+ if (desired->size == 0)
+ return;
+
+ mvebu_pcie_add_windows(port, target, attribute, desired->base,
+ desired->size, desired->remap);
+ *cur = *desired;
+}
+
static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
{
- phys_addr_t iobase;
+ struct mvebu_pcie_window desired = {};
/* Are the new iobase/iolimit values invalid? */
if (port->bridge.iolimit < port->bridge.iobase ||
port->bridge.iolimitupper < port->bridge.iobaseupper ||
!(port->bridge.command & PCI_COMMAND_IO)) {
-
- /* If a window was configured, remove it */
- if (port->iowin_base) {
- mvebu_pcie_del_windows(port, port->iowin_base,
- port->iowin_size);
- port->iowin_base = 0;
- port->iowin_size = 0;
- }
-
+ mvebu_pcie_set_window(port, port->io_target, port->io_attr,
+ &desired, &port->iowin);
return;
}
@@ -412,32 +438,27 @@
* specifications. iobase is the bus address, port->iowin_base
* is the CPU address.
*/
- iobase = ((port->bridge.iobase & 0xF0) << 8) |
- (port->bridge.iobaseupper << 16);
- port->iowin_base = port->pcie->io.start + iobase;
- port->iowin_size = ((0xFFF | ((port->bridge.iolimit & 0xF0) << 8) |
- (port->bridge.iolimitupper << 16)) -
- iobase) + 1;
+ desired.remap = ((port->bridge.iobase & 0xF0) << 8) |
+ (port->bridge.iobaseupper << 16);
+ desired.base = port->pcie->io.start + desired.remap;
+ desired.size = ((0xFFF | ((port->bridge.iolimit & 0xF0) << 8) |
+ (port->bridge.iolimitupper << 16)) -
+ desired.remap) +
+ 1;
- mvebu_pcie_add_windows(port, port->io_target, port->io_attr,
- port->iowin_base, port->iowin_size,
- iobase);
+ mvebu_pcie_set_window(port, port->io_target, port->io_attr, &desired,
+ &port->iowin);
}
static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
{
+ struct mvebu_pcie_window desired = {.remap = MVEBU_MBUS_NO_REMAP};
+
/* Are the new membase/memlimit values invalid? */
if (port->bridge.memlimit < port->bridge.membase ||
!(port->bridge.command & PCI_COMMAND_MEMORY)) {
-
- /* If a window was configured, remove it */
- if (port->memwin_base) {
- mvebu_pcie_del_windows(port, port->memwin_base,
- port->memwin_size);
- port->memwin_base = 0;
- port->memwin_size = 0;
- }
-
+ mvebu_pcie_set_window(port, port->mem_target, port->mem_attr,
+ &desired, &port->memwin);
return;
}
@@ -447,14 +468,12 @@
* window to setup, according to the PCI-to-PCI bridge
* specifications.
*/
- port->memwin_base = ((port->bridge.membase & 0xFFF0) << 16);
- port->memwin_size =
- (((port->bridge.memlimit & 0xFFF0) << 16) | 0xFFFFF) -
- port->memwin_base + 1;
+ desired.base = ((port->bridge.membase & 0xFFF0) << 16);
+ desired.size = (((port->bridge.memlimit & 0xFFF0) << 16) | 0xFFFFF) -
+ desired.base + 1;
- mvebu_pcie_add_windows(port, port->mem_target, port->mem_attr,
- port->memwin_base, port->memwin_size,
- MVEBU_MBUS_NO_REMAP);
+ mvebu_pcie_set_window(port, port->mem_target, port->mem_attr, &desired,
+ &port->memwin);
}
/*
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 3455f75..0e9a9db 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -730,7 +730,7 @@
ret = 0;
out:
kfree(masks);
- return 0;
+ return ret;
}
static void msix_program_entries(struct pci_dev *dev,
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index 5419de8..0a96502 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -1466,7 +1466,7 @@
val & BYT_INPUT_EN ? " " : "in",
val & BYT_OUTPUT_EN ? " " : "out",
val & BYT_LEVEL ? "hi" : "lo",
- comm->pad_map[i], comm->pad_map[i] * 32,
+ comm->pad_map[i], comm->pad_map[i] * 16,
conf0 & 0x7,
conf0 & BYT_TRIG_NEG ? " fall" : " ",
conf0 & BYT_TRIG_POS ? " rise" : " ",
diff --git a/drivers/pinctrl/qcom/pinctrl-sdm670.c b/drivers/pinctrl/qcom/pinctrl-sdm670.c
index 6145c75..f7af6da 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdm670.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdm670.c
@@ -54,6 +54,8 @@
.intr_cfg_reg = base + 0x8 + REG_SIZE * id, \
.intr_status_reg = base + 0xc + REG_SIZE * id, \
.intr_target_reg = base + 0x8 + REG_SIZE * id, \
+ .dir_conn_reg = (base == NORTH) ? base + 0xa3000 : \
+ ((base == SOUTH) ? base + 0xa6000 : base + 0xa4000), \
.mux_bit = 2, \
.pull_bit = 0, \
.drv_bit = 6, \
@@ -68,6 +70,7 @@
.intr_polarity_bit = 1, \
.intr_detection_bit = 2, \
.intr_detection_width = 2, \
+ .dir_conn_en_bit = 8, \
}
#define SDC_QDSD_PINGROUP(pg_name, ctl, pull, drv) \
@@ -1651,6 +1654,14 @@
{132, 621},
{133, 622},
{145, 623},
+ {0, 216},
+ {0, 215},
+ {0, 214},
+ {0, 213},
+ {0, 212},
+ {0, 211},
+ {0, 210},
+ {0, 209},
};
static const struct msm_pinctrl_soc_data sdm670_pinctrl = {
@@ -1663,6 +1674,7 @@
.ngpios = 150,
.dir_conn = sdm670_dir_conn,
.n_dir_conns = ARRAY_SIZE(sdm670_dir_conn),
+ .dir_conn_irq_base = 216,
};
static int sdm670_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/platform/msm/Kconfig b/drivers/platform/msm/Kconfig
index aef0db2..6117d4d 100644
--- a/drivers/platform/msm/Kconfig
+++ b/drivers/platform/msm/Kconfig
@@ -112,6 +112,27 @@
help
No-Data-Path BAM is used to improve BAM performance.
+config EP_PCIE
+ bool "PCIe Endpoint mode support"
+ select GENERIC_ALLOCATOR
+ help
+ PCIe controller is in endpoint mode.
+ It supports the APIs to clients as a service layer, and allows
+ clients to enable/disable PCIe link, configure the address
+ mapping for the access to host memory, trigger wake interrupt
+ on host side to wake up host, and trigger MSI to host side.
+
+config EP_PCIE_HW
+ bool "PCIe Endpoint HW driver"
+ depends on EP_PCIE
+ help
+ PCIe endpoint HW specific implementation.
+ It supports:
+ 1. link training with Root Complex.
+ 2. Address mapping.
+ 3. Sideband signaling.
+ 4. Power management.
+
config QPNP_COINCELL
tristate "QPNP coincell charger support"
depends on SPMI
diff --git a/drivers/platform/msm/Makefile b/drivers/platform/msm/Makefile
index 27179b9..bee32c2 100644
--- a/drivers/platform/msm/Makefile
+++ b/drivers/platform/msm/Makefile
@@ -7,6 +7,7 @@
obj-$(CONFIG_SPS) += sps/
obj-$(CONFIG_QPNP_COINCELL) += qpnp-coincell.o
obj-$(CONFIG_QPNP_REVID) += qpnp-revid.o
+obj-$(CONFIG_EP_PCIE) += ep_pcie/
obj-$(CONFIG_MSM_MHI_DEV) += mhi_dev/
obj-$(CONFIG_USB_BAM) += usb_bam.o
obj-$(CONFIG_MSM_11AD) += msm_11ad/
diff --git a/drivers/platform/msm/ep_pcie/Makefile b/drivers/platform/msm/ep_pcie/Makefile
new file mode 100644
index 0000000..0567e15
--- /dev/null
+++ b/drivers/platform/msm/ep_pcie/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_EP_PCIE) += ep_pcie.o
+obj-$(CONFIG_EP_PCIE_HW) += ep_pcie_core.o ep_pcie_phy.o ep_pcie_dbg.o
diff --git a/drivers/platform/msm/ep_pcie/ep_pcie.c b/drivers/platform/msm/ep_pcie/ep_pcie.c
new file mode 100644
index 0000000..ecff4c4
--- /dev/null
+++ b/drivers/platform/msm/ep_pcie/ep_pcie.c
@@ -0,0 +1,230 @@
+/* Copyright (c) 2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * MSM PCIe endpoint service layer.
+ */
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include "ep_pcie_com.h"
+
+LIST_HEAD(head);
+
+int ep_pcie_register_drv(struct ep_pcie_hw *handle)
+{
+ struct ep_pcie_hw *present;
+ bool new = true;
+
+ if (!handle) {
+ pr_err("ep_pcie:%s: the input handle is NULL.",
+ __func__);
+ return -EINVAL;
+ }
+
+ list_for_each_entry(present, &head, node) {
+ if (present->device_id == handle->device_id) {
+ new = false;
+ break;
+ }
+ }
+
+ if (new) {
+ list_add(&handle->node, &head);
+ pr_debug("ep_pcie:%s: register a new driver for device 0x%x.",
+ __func__, handle->device_id);
+ return 0;
+ }
+ pr_debug(
+ "ep_pcie:%s: driver to register for device 0x%x has already existed.",
+ __func__, handle->device_id);
+ return -EEXIST;
+}
+EXPORT_SYMBOL(ep_pcie_register_drv);
+
+int ep_pcie_deregister_drv(struct ep_pcie_hw *handle)
+{
+ struct ep_pcie_hw *present;
+ bool found = false;
+
+ if (!handle) {
+ pr_err("ep_pcie:%s: the input handle is NULL.",
+ __func__);
+ return -EINVAL;
+ }
+
+ list_for_each_entry(present, &head, node) {
+ if (present->device_id == handle->device_id) {
+ found = true;
+ list_del(&handle->node);
+ break;
+ }
+ }
+
+ if (found) {
+ pr_debug("ep_pcie:%s: deregistered driver for device 0x%x.",
+ __func__, handle->device_id);
+ return 0;
+ }
+ pr_err("ep_pcie:%s: driver for device 0x%x does not exist.",
+ __func__, handle->device_id);
+ return -EEXIST;
+}
+EXPORT_SYMBOL(ep_pcie_deregister_drv);
+
+struct ep_pcie_hw *ep_pcie_get_phandle(u32 id)
+{
+ struct ep_pcie_hw *present;
+
+ list_for_each_entry(present, &head, node) {
+ if (present->device_id == id) {
+ pr_debug("ep_pcie:%s: found driver for device 0x%x.",
+ __func__, id);
+ return present;
+ }
+ }
+
+ pr_debug("ep_pcie:%s: driver for device 0x%x does not exist.",
+ __func__, id);
+ return NULL;
+}
+EXPORT_SYMBOL(ep_pcie_get_phandle);
+
+int ep_pcie_register_event(struct ep_pcie_hw *phandle,
+ struct ep_pcie_register_event *reg)
+{
+ if (phandle)
+ return phandle->register_event(reg);
+
+ return ep_pcie_core_register_event(reg);
+}
+EXPORT_SYMBOL(ep_pcie_register_event);
+
+int ep_pcie_deregister_event(struct ep_pcie_hw *phandle)
+{
+ if (phandle)
+ return phandle->deregister_event();
+
+ pr_err("ep_pcie:%s: the input driver handle is NULL.",
+ __func__);
+ return -EINVAL;
+}
+EXPORT_SYMBOL(ep_pcie_deregister_event);
+
+enum ep_pcie_link_status ep_pcie_get_linkstatus(struct ep_pcie_hw *phandle)
+{
+ if (phandle)
+ return phandle->get_linkstatus();
+
+ pr_err("ep_pcie:%s: the input driver handle is NULL.",
+ __func__);
+ return -EINVAL;
+}
+EXPORT_SYMBOL(ep_pcie_get_linkstatus);
+
+int ep_pcie_config_outbound_iatu(struct ep_pcie_hw *phandle,
+ struct ep_pcie_iatu entries[],
+ u32 num_entries)
+{
+ if (phandle)
+ return phandle->config_outbound_iatu(entries, num_entries);
+
+ pr_err("ep_pcie:%s: the input driver handle is NULL.",
+ __func__);
+ return -EINVAL;
+}
+EXPORT_SYMBOL(ep_pcie_config_outbound_iatu);
+
+int ep_pcie_get_msi_config(struct ep_pcie_hw *phandle,
+ struct ep_pcie_msi_config *cfg)
+{
+ if (phandle)
+ return phandle->get_msi_config(cfg);
+
+ pr_err("ep_pcie:%s: the input driver handle is NULL.",
+ __func__);
+ return -EINVAL;
+}
+EXPORT_SYMBOL(ep_pcie_get_msi_config);
+
+int ep_pcie_trigger_msi(struct ep_pcie_hw *phandle, u32 idx)
+{
+ if (phandle)
+ return phandle->trigger_msi(idx);
+
+ pr_err("ep_pcie:%s: the input driver handle is NULL.",
+ __func__);
+ return -EINVAL;
+}
+EXPORT_SYMBOL(ep_pcie_trigger_msi);
+
+int ep_pcie_wakeup_host(struct ep_pcie_hw *phandle)
+{
+ if (phandle)
+ return phandle->wakeup_host();
+
+ pr_err("ep_pcie:%s: the input driver handle is NULL.",
+ __func__);
+ return -EINVAL;
+}
+EXPORT_SYMBOL(ep_pcie_wakeup_host);
+
+int ep_pcie_config_db_routing(struct ep_pcie_hw *phandle,
+ struct ep_pcie_db_config chdb_cfg,
+ struct ep_pcie_db_config erdb_cfg)
+{
+ if (phandle)
+ return phandle->config_db_routing(chdb_cfg, erdb_cfg);
+
+ pr_err("ep_pcie:%s: the input driver handle is NULL.",
+ __func__);
+ return -EINVAL;
+}
+EXPORT_SYMBOL(ep_pcie_config_db_routing);
+
+int ep_pcie_enable_endpoint(struct ep_pcie_hw *phandle,
+ enum ep_pcie_options opt)
+{
+ if (phandle)
+ return phandle->enable_endpoint(opt);
+
+ pr_err("ep_pcie:%s: the input driver handle is NULL.",
+ __func__);
+ return -EINVAL;
+}
+EXPORT_SYMBOL(ep_pcie_enable_endpoint);
+
+int ep_pcie_disable_endpoint(struct ep_pcie_hw *phandle)
+{
+ if (phandle)
+ return phandle->disable_endpoint();
+
+ pr_err("ep_pcie:%s: the input driver handle is NULL.",
+ __func__);
+ return -EINVAL;
+}
+EXPORT_SYMBOL(ep_pcie_disable_endpoint);
+
+int ep_pcie_mask_irq_event(struct ep_pcie_hw *phandle,
+ enum ep_pcie_irq_event event,
+ bool enable)
+{
+ if (phandle)
+ return phandle->mask_irq_event(event, enable);
+
+ pr_err("ep_pcie:%s: the input driver handle is NULL.", __func__);
+ return -EINVAL;
+}
+EXPORT_SYMBOL(ep_pcie_mask_irq_event);
diff --git a/drivers/platform/msm/ep_pcie/ep_pcie_com.h b/drivers/platform/msm/ep_pcie/ep_pcie_com.h
new file mode 100644
index 0000000..7553a24
--- /dev/null
+++ b/drivers/platform/msm/ep_pcie/ep_pcie_com.h
@@ -0,0 +1,391 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __EP_PCIE_COM_H
+#define __EP_PCIE_COM_H
+
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/compiler.h>
+#include <linux/ipc_logging.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/msm_ep_pcie.h>
+
+#define PCIE20_PARF_SYS_CTRL 0x00
+#define PCIE20_PARF_DB_CTRL 0x10
+#define PCIE20_PARF_PM_CTRL 0x20
+#define PCIE20_PARF_PM_STTS 0x24
+#define PCIE20_PARF_PHY_CTRL 0x40
+#define PCIE20_PARF_PHY_REFCLK 0x4C
+#define PCIE20_PARF_CONFIG_BITS 0x50
+#define PCIE20_PARF_TEST_BUS 0xE4
+#define PCIE20_PARF_MHI_BASE_ADDR_LOWER 0x178
+#define PCIE20_PARF_MHI_BASE_ADDR_UPPER 0x17c
+#define PCIE20_PARF_MSI_GEN 0x188
+#define PCIE20_PARF_DEBUG_INT_EN 0x190
+#define PCIE20_PARF_MHI_IPA_DBS 0x198
+#define PCIE20_PARF_MHI_IPA_CDB_TARGET_LOWER 0x19C
+#define PCIE20_PARF_MHI_IPA_EDB_TARGET_LOWER 0x1A0
+#define PCIE20_PARF_AXI_MSTR_RD_HALT_NO_WRITES 0x1A4
+#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT 0x1A8
+#define PCIE20_PARF_Q2A_FLUSH 0x1AC
+#define PCIE20_PARF_LTSSM 0x1B0
+#define PCIE20_PARF_CFG_BITS 0x210
+#define PCIE20_PARF_LTR_MSI_EXIT_L1SS 0x214
+#define PCIE20_PARF_INT_ALL_STATUS 0x224
+#define PCIE20_PARF_INT_ALL_CLEAR 0x228
+#define PCIE20_PARF_INT_ALL_MASK 0x22C
+#define PCIE20_PARF_SLV_ADDR_MSB_CTRL 0x2C0
+#define PCIE20_PARF_DBI_BASE_ADDR 0x350
+#define PCIE20_PARF_DBI_BASE_ADDR_HI 0x354
+#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x358
+#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE_HI 0x35C
+#define PCIE20_PARF_DEVICE_TYPE 0x1000
+
+#define PCIE20_ELBI_VERSION 0x00
+#define PCIE20_ELBI_SYS_CTRL 0x04
+#define PCIE20_ELBI_SYS_STTS 0x08
+#define PCIE20_ELBI_CS2_ENABLE 0xA4
+
+#define PCIE20_DEVICE_ID_VENDOR_ID 0x00
+#define PCIE20_COMMAND_STATUS 0x04
+#define PCIE20_CLASS_CODE_REVISION_ID 0x08
+#define PCIE20_BIST_HDR_TYPE 0x0C
+#define PCIE20_BAR0 0x10
+#define PCIE20_SUBSYSTEM 0x2c
+#define PCIE20_CAP_ID_NXT_PTR 0x40
+#define PCIE20_CON_STATUS 0x44
+#define PCIE20_MSI_CAP_ID_NEXT_CTRL 0x50
+#define PCIE20_MSI_LOWER 0x54
+#define PCIE20_MSI_UPPER 0x58
+#define PCIE20_MSI_DATA 0x5C
+#define PCIE20_MSI_MASK 0x60
+#define PCIE20_DEVICE_CAPABILITIES 0x74
+#define PCIE20_MASK_EP_L1_ACCPT_LATENCY 0xE00
+#define PCIE20_MASK_EP_L0S_ACCPT_LATENCY 0x1C0
+#define PCIE20_LINK_CAPABILITIES 0x7C
+#define PCIE20_MASK_CLOCK_POWER_MAN 0x40000
+#define PCIE20_MASK_L1_EXIT_LATENCY 0x38000
+#define PCIE20_MASK_L0S_EXIT_LATENCY 0x7000
+#define PCIE20_CAP_LINKCTRLSTATUS 0x80
+#define PCIE20_DEVICE_CONTROL2_STATUS2 0x98
+#define PCIE20_LINK_CONTROL2_LINK_STATUS2 0xA0
+#define PCIE20_L1SUB_CAPABILITY 0x154
+#define PCIE20_L1SUB_CONTROL1 0x158
+#define PCIE20_ACK_F_ASPM_CTRL_REG 0x70C
+#define PCIE20_MASK_ACK_N_FTS 0xff00
+#define PCIE20_MISC_CONTROL_1 0x8BC
+
+#define PCIE20_PLR_IATU_VIEWPORT 0x900
+#define PCIE20_PLR_IATU_CTRL1 0x904
+#define PCIE20_PLR_IATU_CTRL2 0x908
+#define PCIE20_PLR_IATU_LBAR 0x90C
+#define PCIE20_PLR_IATU_UBAR 0x910
+#define PCIE20_PLR_IATU_LAR 0x914
+#define PCIE20_PLR_IATU_LTAR 0x918
+#define PCIE20_PLR_IATU_UTAR 0x91c
+
+#define PCIE20_MHICFG 0x110
+#define PCIE20_BHI_EXECENV 0x228
+
+#define PCIE20_AUX_CLK_FREQ_REG 0xB40
+
+#define PERST_TIMEOUT_US_MIN 1000
+#define PERST_TIMEOUT_US_MAX 1000
+#define PERST_CHECK_MAX_COUNT 30000
+#define LINK_UP_TIMEOUT_US_MIN 1000
+#define LINK_UP_TIMEOUT_US_MAX 1000
+#define LINK_UP_CHECK_MAX_COUNT 30000
+#define BME_TIMEOUT_US_MIN 1000
+#define BME_TIMEOUT_US_MAX 1000
+#define BME_CHECK_MAX_COUNT 30000
+#define PHY_STABILIZATION_DELAY_US_MIN 1000
+#define PHY_STABILIZATION_DELAY_US_MAX 1000
+#define REFCLK_STABILIZATION_DELAY_US_MIN 1000
+#define REFCLK_STABILIZATION_DELAY_US_MAX 1000
+#define PHY_READY_TIMEOUT_COUNT 30000
+#define MSI_EXIT_L1SS_WAIT 10
+#define MSI_EXIT_L1SS_WAIT_MAX_COUNT 100
+#define XMLH_LINK_UP 0x400
+#define PARF_XMLH_LINK_UP 0x40000000
+
+#define MAX_PROP_SIZE 32
+#define MAX_MSG_LEN 80
+#define MAX_NAME_LEN 80
+#define MAX_IATU_ENTRY_NUM 2
+
+#define EP_PCIE_LOG_PAGES 50
+#define EP_PCIE_MAX_VREG 2
+#define EP_PCIE_MAX_CLK 5
+#define EP_PCIE_MAX_PIPE_CLK 1
+#define EP_PCIE_MAX_RESET 2
+
+#define EP_PCIE_ERROR -30655
+#define EP_PCIE_LINK_DOWN 0xFFFFFFFF
+
+#define EP_PCIE_OATU_INDEX_MSI 1
+#define EP_PCIE_OATU_INDEX_CTRL 2
+#define EP_PCIE_OATU_INDEX_DATA 3
+
+#define EP_PCIE_OATU_UPPER 0x100
+
+#define EP_PCIE_GEN_DBG(x...) do { \
+ if (ep_pcie_get_debug_mask()) \
+ pr_alert(x); \
+ else \
+ pr_debug(x); \
+ } while (0)
+
+#define EP_PCIE_DBG(dev, fmt, arg...) do { \
+ if ((dev)->ipc_log_ful) \
+ ipc_log_string((dev)->ipc_log_ful, "%s: " fmt, __func__, arg); \
+ if (ep_pcie_get_debug_mask()) \
+ pr_alert("%s: " fmt, __func__, arg); \
+ } while (0)
+
+#define EP_PCIE_DBG2(dev, fmt, arg...) do { \
+ if ((dev)->ipc_log_sel) \
+ ipc_log_string((dev)->ipc_log_sel, \
+ "DBG1:%s: " fmt, __func__, arg); \
+ if ((dev)->ipc_log_ful) \
+ ipc_log_string((dev)->ipc_log_ful, \
+ "DBG2:%s: " fmt, __func__, arg); \
+ if (ep_pcie_get_debug_mask()) \
+ pr_alert("%s: " fmt, __func__, arg); \
+ } while (0)
+
+#define EP_PCIE_DBG_FS(fmt, arg...) pr_alert("%s: " fmt, __func__, arg)
+
+#define EP_PCIE_DUMP(dev, fmt, arg...) do { \
+ if ((dev)->ipc_log_dump) \
+ ipc_log_string((dev)->ipc_log_dump, \
+ "DUMP:%s: " fmt, __func__, arg); \
+ if (ep_pcie_get_debug_mask()) \
+ pr_alert("%s: " fmt, __func__, arg); \
+ } while (0)
+
+#define EP_PCIE_INFO(dev, fmt, arg...) do { \
+ if ((dev)->ipc_log_sel) \
+ ipc_log_string((dev)->ipc_log_sel, \
+ "INFO:%s: " fmt, __func__, arg); \
+ if ((dev)->ipc_log_ful) \
+ ipc_log_string((dev)->ipc_log_ful, "%s: " fmt, __func__, arg); \
+ pr_info("%s: " fmt, __func__, arg); \
+ } while (0)
+
+#define EP_PCIE_ERR(dev, fmt, arg...) do { \
+ if ((dev)->ipc_log_sel) \
+ ipc_log_string((dev)->ipc_log_sel, \
+ "ERR:%s: " fmt, __func__, arg); \
+ if ((dev)->ipc_log_ful) \
+ ipc_log_string((dev)->ipc_log_ful, "%s: " fmt, __func__, arg); \
+ pr_err("%s: " fmt, __func__, arg); \
+ } while (0)
+
+enum ep_pcie_res {
+ EP_PCIE_RES_PARF,
+ EP_PCIE_RES_PHY,
+ EP_PCIE_RES_MMIO,
+ EP_PCIE_RES_MSI,
+ EP_PCIE_RES_DM_CORE,
+ EP_PCIE_RES_ELBI,
+ EP_PCIE_MAX_RES,
+};
+
+enum ep_pcie_irq {
+ EP_PCIE_INT_PM_TURNOFF,
+ EP_PCIE_INT_DSTATE_CHANGE,
+ EP_PCIE_INT_L1SUB_TIMEOUT,
+ EP_PCIE_INT_LINK_UP,
+ EP_PCIE_INT_LINK_DOWN,
+ EP_PCIE_INT_BRIDGE_FLUSH_N,
+ EP_PCIE_INT_BME,
+ EP_PCIE_INT_GLOBAL,
+ EP_PCIE_MAX_IRQ,
+};
+
+enum ep_pcie_gpio {
+ EP_PCIE_GPIO_PERST,
+ EP_PCIE_GPIO_WAKE,
+ EP_PCIE_GPIO_CLKREQ,
+ EP_PCIE_GPIO_MDM2AP,
+ EP_PCIE_MAX_GPIO,
+};
+
+struct ep_pcie_gpio_info_t {
+ char *name;
+ u32 num;
+ bool out;
+ u32 on;
+ u32 init;
+};
+
+struct ep_pcie_vreg_info_t {
+ struct regulator *hdl;
+ char *name;
+ u32 max_v;
+ u32 min_v;
+ u32 opt_mode;
+ bool required;
+};
+
+struct ep_pcie_clk_info_t {
+ struct clk *hdl;
+ char *name;
+ u32 freq;
+ bool required;
+};
+
+struct ep_pcie_reset_info_t {
+ struct reset_control *hdl;
+ char *name;
+ bool required;
+};
+
+struct ep_pcie_res_info_t {
+ char *name;
+ struct resource *resource;
+ void __iomem *base;
+};
+
+struct ep_pcie_irq_info_t {
+ char *name;
+ u32 num;
+};
+
+/* phy info structure */
+struct ep_pcie_phy_info_t {
+ u32 offset;
+ u32 val;
+ u32 delay;
+ u32 direction;
+};
+
+/* pcie endpoint device structure */
+struct ep_pcie_dev_t {
+ struct platform_device *pdev;
+ struct regulator *gdsc;
+ struct ep_pcie_vreg_info_t vreg[EP_PCIE_MAX_VREG];
+ struct ep_pcie_gpio_info_t gpio[EP_PCIE_MAX_GPIO];
+ struct ep_pcie_clk_info_t clk[EP_PCIE_MAX_CLK];
+ struct ep_pcie_clk_info_t pipeclk[EP_PCIE_MAX_PIPE_CLK];
+ struct ep_pcie_reset_info_t reset[EP_PCIE_MAX_RESET];
+ struct ep_pcie_irq_info_t irq[EP_PCIE_MAX_IRQ];
+ struct ep_pcie_res_info_t res[EP_PCIE_MAX_RES];
+
+ void __iomem *parf;
+ void __iomem *phy;
+ void __iomem *mmio;
+ void __iomem *msi;
+ void __iomem *dm_core;
+ void __iomem *elbi;
+
+ struct msm_bus_scale_pdata *bus_scale_table;
+ u32 bus_client;
+ u32 link_speed;
+ bool active_config;
+ bool aggregated_irq;
+ bool mhi_a7_irq;
+ u32 dbi_base_reg;
+ u32 slv_space_reg;
+ u32 phy_status_reg;
+ u32 phy_init_len;
+ struct ep_pcie_phy_info_t *phy_init;
+ bool perst_enum;
+
+ u32 rev;
+ u32 phy_rev;
+ void *ipc_log_sel;
+ void *ipc_log_ful;
+ void *ipc_log_dump;
+ struct mutex setup_mtx;
+ struct mutex ext_mtx;
+ spinlock_t ext_lock;
+ unsigned long ext_save_flags;
+
+ spinlock_t isr_lock;
+ unsigned long isr_save_flags;
+ ulong linkdown_counter;
+ ulong linkup_counter;
+ ulong bme_counter;
+ ulong pm_to_counter;
+ ulong d0_counter;
+ ulong d3_counter;
+ ulong perst_ast_counter;
+ ulong perst_deast_counter;
+ ulong wake_counter;
+ ulong msi_counter;
+ ulong global_irq_counter;
+
+ bool dump_conf;
+
+ bool enumerated;
+ enum ep_pcie_link_status link_status;
+ bool perst_deast;
+ bool power_on;
+ bool suspending;
+ bool l23_ready;
+ bool l1ss_enabled;
+ struct ep_pcie_msi_config msi_cfg;
+
+ struct ep_pcie_register_event *event_reg;
+ struct work_struct handle_perst_work;
+ struct work_struct handle_bme_work;
+};
+
+extern struct ep_pcie_dev_t ep_pcie_dev;
+extern struct ep_pcie_hw hw_drv;
+
+static inline void ep_pcie_write_mask(void __iomem *addr,
+ u32 clear_mask, u32 set_mask)
+{
+ u32 val;
+
+ val = (readl_relaxed(addr) & ~clear_mask) | set_mask;
+ writel_relaxed(val, addr);
+ /* ensure register write goes through before next regiser operation */
+ wmb();
+}
+
+static inline void ep_pcie_write_reg(void __iomem *base, u32 offset, u32 value)
+{
+ writel_relaxed(value, base + offset);
+ /* ensure register write goes through before next regiser operation */
+ wmb();
+}
+
+static inline void ep_pcie_write_reg_field(void __iomem *base, u32 offset,
+ const u32 mask, u32 val)
+{
+ u32 shift = find_first_bit((void *)&mask, 32);
+ u32 tmp = readl_relaxed(base + offset);
+
+ tmp &= ~mask; /* clear written bits */
+ val = tmp | (val << shift);
+ writel_relaxed(val, base + offset);
+ /* ensure register write goes through before next regiser operation */
+ wmb();
+}
+
+extern int ep_pcie_core_register_event(struct ep_pcie_register_event *reg);
+extern int ep_pcie_get_debug_mask(void);
+extern void ep_pcie_phy_init(struct ep_pcie_dev_t *dev);
+extern bool ep_pcie_phy_is_ready(struct ep_pcie_dev_t *dev);
+extern void ep_pcie_reg_dump(struct ep_pcie_dev_t *dev, u32 sel, bool linkdown);
+extern void ep_pcie_debugfs_init(struct ep_pcie_dev_t *ep_dev);
+extern void ep_pcie_debugfs_exit(void);
+
+#endif
diff --git a/drivers/platform/msm/ep_pcie/ep_pcie_core.c b/drivers/platform/msm/ep_pcie/ep_pcie_core.c
new file mode 100644
index 0000000..88c03fc
--- /dev/null
+++ b/drivers/platform/msm/ep_pcie/ep_pcie_core.c
@@ -0,0 +1,2554 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * MSM PCIe endpoint core driver.
+ */
+
+#include <linux/module.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/of_gpio.h>
+#include <linux/clk/qcom.h>
+#include <linux/reset.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+
+#include "ep_pcie_com.h"
+
+/* debug mask sys interface */
+static int ep_pcie_debug_mask;
+static int ep_pcie_debug_keep_resource;
+static u32 ep_pcie_bar0_address;
+module_param_named(debug_mask, ep_pcie_debug_mask,
+ int, 0664);
+module_param_named(debug_keep_resource, ep_pcie_debug_keep_resource,
+ int, 0664);
+module_param_named(bar0_address, ep_pcie_bar0_address,
+ int, 0664);
+
+struct ep_pcie_dev_t ep_pcie_dev = {0};
+
+static struct ep_pcie_vreg_info_t ep_pcie_vreg_info[EP_PCIE_MAX_VREG] = {
+ {NULL, "vreg-1.8", 1800000, 1800000, 14000, true},
+ {NULL, "vreg-0.9", 1000000, 1000000, 40000, true}
+};
+
+static struct ep_pcie_gpio_info_t ep_pcie_gpio_info[EP_PCIE_MAX_GPIO] = {
+ {"perst-gpio", 0, 0, 0, 1},
+ {"wake-gpio", 0, 1, 0, 1},
+ {"clkreq-gpio", 0, 1, 0, 0},
+ {"mdm2apstatus-gpio", 0, 1, 1, 0}
+};
+
+static struct ep_pcie_clk_info_t
+ ep_pcie_clk_info[EP_PCIE_MAX_CLK] = {
+ {NULL, "pcie_0_cfg_ahb_clk", 0, true},
+ {NULL, "pcie_0_mstr_axi_clk", 0, true},
+ {NULL, "pcie_0_slv_axi_clk", 0, true},
+ {NULL, "pcie_0_aux_clk", 1000000, true},
+ {NULL, "pcie_0_ldo", 0, true},
+};
+
+static struct ep_pcie_clk_info_t
+ ep_pcie_pipe_clk_info[EP_PCIE_MAX_PIPE_CLK] = {
+ {NULL, "pcie_0_pipe_clk", 62500000, true}
+};
+
+static struct ep_pcie_reset_info_t
+ ep_pcie_reset_info[EP_PCIE_MAX_RESET] = {
+ {NULL, "pcie_0_core_reset", false},
+ {NULL, "pcie_0_phy_reset", false},
+};
+
+static const struct ep_pcie_res_info_t ep_pcie_res_info[EP_PCIE_MAX_RES] = {
+ {"parf", 0, 0},
+ {"phy", 0, 0},
+ {"mmio", 0, 0},
+ {"msi", 0, 0},
+ {"dm_core", 0, 0},
+ {"elbi", 0, 0}
+};
+
+static const struct ep_pcie_irq_info_t ep_pcie_irq_info[EP_PCIE_MAX_IRQ] = {
+ {"int_pm_turnoff", 0},
+ {"int_dstate_change", 0},
+ {"int_l1sub_timeout", 0},
+ {"int_link_up", 0},
+ {"int_link_down", 0},
+ {"int_bridge_flush_n", 0},
+ {"int_bme", 0},
+ {"int_global", 0}
+};
+
+int ep_pcie_get_debug_mask(void)
+{
+ return ep_pcie_debug_mask;
+}
+
+static bool ep_pcie_confirm_linkup(struct ep_pcie_dev_t *dev,
+ bool check_sw_stts)
+{
+ u32 val;
+
+ if (check_sw_stts && (dev->link_status != EP_PCIE_LINK_ENABLED)) {
+ EP_PCIE_DBG(dev, "PCIe V%d: The link is not enabled.\n",
+ dev->rev);
+ return false;
+ }
+
+ val = readl_relaxed(dev->dm_core);
+ EP_PCIE_DBG(dev, "PCIe V%d: device ID and vender ID are 0x%x.\n",
+ dev->rev, val);
+ if (val == EP_PCIE_LINK_DOWN) {
+ EP_PCIE_ERR(dev,
+ "PCIe V%d: The link is not really up; device ID and vender ID are 0x%x.\n",
+ dev->rev, val);
+ return false;
+ }
+
+ return true;
+}
+
+static int ep_pcie_gpio_init(struct ep_pcie_dev_t *dev)
+{
+ int i, rc = 0;
+ struct ep_pcie_gpio_info_t *info;
+
+ EP_PCIE_DBG(dev, "PCIe V%d\n", dev->rev);
+
+ for (i = 0; i < EP_PCIE_MAX_GPIO; i++) {
+ info = &dev->gpio[i];
+
+ if (!info->num) {
+ if (i == EP_PCIE_GPIO_MDM2AP) {
+ EP_PCIE_DBG(dev,
+ "PCIe V%d: gpio %s does not exist.\n",
+ dev->rev, info->name);
+ continue;
+ } else {
+ EP_PCIE_ERR(dev,
+ "PCIe V%d: the number of gpio %s is invalid\n",
+ dev->rev, info->name);
+ rc = -EINVAL;
+ break;
+ }
+ }
+
+ rc = gpio_request(info->num, info->name);
+ if (rc) {
+ EP_PCIE_ERR(dev, "PCIe V%d: can't get gpio %s; %d\n",
+ dev->rev, info->name, rc);
+ break;
+ }
+
+ if (info->out)
+ rc = gpio_direction_output(info->num, info->init);
+ else
+ rc = gpio_direction_input(info->num);
+ if (rc) {
+ EP_PCIE_ERR(dev,
+ "PCIe V%d: can't set direction for GPIO %s:%d\n",
+ dev->rev, info->name, rc);
+ gpio_free(info->num);
+ break;
+ }
+ }
+
+ if (rc)
+ while (i--)
+ gpio_free(dev->gpio[i].num);
+
+ return rc;
+}
+
+static void ep_pcie_gpio_deinit(struct ep_pcie_dev_t *dev)
+{
+ int i;
+
+ EP_PCIE_DBG(dev, "PCIe V%d\n", dev->rev);
+
+ for (i = 0; i < EP_PCIE_MAX_GPIO; i++)
+ gpio_free(dev->gpio[i].num);
+}
+
+static int ep_pcie_vreg_init(struct ep_pcie_dev_t *dev)
+{
+ int i, rc = 0;
+ struct regulator *vreg;
+ struct ep_pcie_vreg_info_t *info;
+
+ EP_PCIE_DBG(dev, "PCIe V%d\n", dev->rev);
+
+ for (i = 0; i < EP_PCIE_MAX_VREG; i++) {
+ info = &dev->vreg[i];
+ vreg = info->hdl;
+
+ if (!vreg) {
+ EP_PCIE_ERR(dev,
+ "PCIe V%d: handle of Vreg %s is NULL\n",
+ dev->rev, info->name);
+ rc = -EINVAL;
+ break;
+ }
+
+ EP_PCIE_DBG(dev, "PCIe V%d: Vreg %s is being enabled\n",
+ dev->rev, info->name);
+ if (info->max_v) {
+ rc = regulator_set_voltage(vreg,
+ info->min_v, info->max_v);
+ if (rc) {
+ EP_PCIE_ERR(dev,
+ "PCIe V%d: can't set voltage for %s: %d\n",
+ dev->rev, info->name, rc);
+ break;
+ }
+ }
+
+ if (info->opt_mode) {
+ rc = regulator_set_load(vreg, info->opt_mode);
+ if (rc < 0) {
+ EP_PCIE_ERR(dev,
+ "PCIe V%d: can't set mode for %s: %d\n",
+ dev->rev, info->name, rc);
+ break;
+ }
+ }
+
+ rc = regulator_enable(vreg);
+ if (rc) {
+ EP_PCIE_ERR(dev,
+ "PCIe V%d: can't enable regulator %s: %d\n",
+ dev->rev, info->name, rc);
+ break;
+ }
+ }
+
+ if (rc)
+ while (i--) {
+ struct regulator *hdl = dev->vreg[i].hdl;
+
+ if (hdl)
+ regulator_disable(hdl);
+ }
+
+ return rc;
+}
+
+static void ep_pcie_vreg_deinit(struct ep_pcie_dev_t *dev)
+{
+ int i;
+
+ EP_PCIE_DBG(dev, "PCIe V%d\n", dev->rev);
+
+ for (i = EP_PCIE_MAX_VREG - 1; i >= 0; i--) {
+ if (dev->vreg[i].hdl) {
+ EP_PCIE_DBG(dev, "Vreg %s is being disabled\n",
+ dev->vreg[i].name);
+ regulator_disable(dev->vreg[i].hdl);
+ }
+ }
+}
+
+static int ep_pcie_clk_init(struct ep_pcie_dev_t *dev)
+{
+ int i, rc = 0;
+ struct ep_pcie_clk_info_t *info;
+ struct ep_pcie_reset_info_t *reset_info;
+
+ EP_PCIE_DBG(dev, "PCIe V%d\n", dev->rev);
+
+ rc = regulator_enable(dev->gdsc);
+
+ if (rc) {
+ EP_PCIE_ERR(dev, "PCIe V%d: fail to enable GDSC for %s\n",
+ dev->rev, dev->pdev->name);
+ return rc;
+ }
+
+ if (dev->bus_client) {
+ rc = msm_bus_scale_client_update_request(dev->bus_client, 1);
+ if (rc) {
+ EP_PCIE_ERR(dev,
+ "PCIe V%d: fail to set bus bandwidth:%d.\n",
+ dev->rev, rc);
+ return rc;
+ }
+ EP_PCIE_DBG(dev,
+ "PCIe V%d: set bus bandwidth.\n",
+ dev->rev);
+ }
+
+ for (i = 0; i < EP_PCIE_MAX_CLK; i++) {
+ info = &dev->clk[i];
+
+ if (!info->hdl) {
+ EP_PCIE_DBG(dev,
+ "PCIe V%d: handle of Clock %s is NULL\n",
+ dev->rev, info->name);
+ continue;
+ }
+
+ if (info->freq) {
+ rc = clk_set_rate(info->hdl, info->freq);
+ if (rc) {
+ EP_PCIE_ERR(dev,
+ "PCIe V%d: can't set rate for clk %s: %d.\n",
+ dev->rev, info->name, rc);
+ break;
+ }
+ EP_PCIE_DBG(dev,
+ "PCIe V%d: set rate for clk %s.\n",
+ dev->rev, info->name);
+ }
+
+ rc = clk_prepare_enable(info->hdl);
+
+ if (rc)
+ EP_PCIE_ERR(dev, "PCIe V%d: failed to enable clk %s\n",
+ dev->rev, info->name);
+ else
+ EP_PCIE_DBG(dev, "PCIe V%d: enable clk %s.\n",
+ dev->rev, info->name);
+ }
+
+ if (rc) {
+ EP_PCIE_DBG(dev,
+ "PCIe V%d: disable clocks for error handling.\n",
+ dev->rev);
+ while (i--) {
+ struct clk *hdl = dev->clk[i].hdl;
+
+ if (hdl)
+ clk_disable_unprepare(hdl);
+ }
+
+ regulator_disable(dev->gdsc);
+ }
+
+ for (i = 0; i < EP_PCIE_MAX_RESET; i++) {
+ reset_info = &dev->reset[i];
+ if (reset_info->hdl) {
+ rc = reset_control_assert(reset_info->hdl);
+ if (rc)
+ EP_PCIE_ERR(dev,
+ "PCIe V%d: failed to assert reset for %s.\n",
+ dev->rev, reset_info->name);
+ else
+ EP_PCIE_DBG(dev,
+ "PCIe V%d: successfully asserted reset for %s.\n",
+ dev->rev, reset_info->name);
+
+ /* add a 1ms delay to ensure the reset is asserted */
+ usleep_range(1000, 1005);
+
+ rc = reset_control_deassert(reset_info->hdl);
+ if (rc)
+ EP_PCIE_ERR(dev,
+ "PCIe V%d: failed to deassert reset for %s.\n",
+ dev->rev, reset_info->name);
+ else
+ EP_PCIE_DBG(dev,
+ "PCIe V%d: successfully deasserted reset for %s.\n",
+ dev->rev, reset_info->name);
+ }
+ }
+
+ return rc;
+}
+
+static void ep_pcie_clk_deinit(struct ep_pcie_dev_t *dev)
+{
+ int i;
+ int rc;
+
+ EP_PCIE_DBG(dev, "PCIe V%d\n", dev->rev);
+
+ for (i = EP_PCIE_MAX_CLK - 1; i >= 0; i--)
+ if (dev->clk[i].hdl)
+ clk_disable_unprepare(dev->clk[i].hdl);
+
+ if (dev->bus_client) {
+ rc = msm_bus_scale_client_update_request(dev->bus_client, 0);
+ if (rc)
+ EP_PCIE_ERR(dev,
+ "PCIe V%d: fail to relinquish bus bandwidth:%d.\n",
+ dev->rev, rc);
+ else
+ EP_PCIE_DBG(dev,
+ "PCIe V%d: relinquish bus bandwidth.\n",
+ dev->rev);
+ }
+
+ regulator_disable(dev->gdsc);
+}
+
+static int ep_pcie_pipe_clk_init(struct ep_pcie_dev_t *dev)
+{
+ int i, rc = 0;
+ struct ep_pcie_clk_info_t *info;
+
+ EP_PCIE_DBG(dev, "PCIe V%d\n", dev->rev);
+
+ for (i = 0; i < EP_PCIE_MAX_PIPE_CLK; i++) {
+ info = &dev->pipeclk[i];
+
+ if (!info->hdl) {
+ EP_PCIE_ERR(dev,
+ "PCIe V%d: handle of Pipe Clock %s is NULL\n",
+ dev->rev, info->name);
+ rc = -EINVAL;
+ break;
+ }
+
+ if (info->freq) {
+ rc = clk_set_rate(info->hdl, info->freq);
+ if (rc) {
+ EP_PCIE_ERR(dev,
+ "PCIe V%d: can't set rate for clk %s: %d.\n",
+ dev->rev, info->name, rc);
+ break;
+ }
+ EP_PCIE_DBG(dev,
+ "PCIe V%d: set rate for clk %s\n",
+ dev->rev, info->name);
+ }
+
+ rc = clk_prepare_enable(info->hdl);
+
+ if (rc)
+ EP_PCIE_ERR(dev, "PCIe V%d: failed to enable clk %s.\n",
+ dev->rev, info->name);
+ else
+ EP_PCIE_DBG(dev, "PCIe V%d: enabled pipe clk %s.\n",
+ dev->rev, info->name);
+ }
+
+ if (rc) {
+ EP_PCIE_DBG(dev,
+ "PCIe V%d: disable pipe clocks for error handling.\n",
+ dev->rev);
+ while (i--)
+ if (dev->pipeclk[i].hdl)
+ clk_disable_unprepare(dev->pipeclk[i].hdl);
+ }
+
+ return rc;
+}
+
+static void ep_pcie_pipe_clk_deinit(struct ep_pcie_dev_t *dev)
+{
+ int i;
+
+ EP_PCIE_DBG(dev, "PCIe V%d\n", dev->rev);
+
+ for (i = 0; i < EP_PCIE_MAX_PIPE_CLK; i++)
+ if (dev->pipeclk[i].hdl)
+ clk_disable_unprepare(
+ dev->pipeclk[i].hdl);
+}
+
+static void ep_pcie_bar_init(struct ep_pcie_dev_t *dev)
+{
+ struct resource *res = dev->res[EP_PCIE_RES_MMIO].resource;
+ u32 mask = res->end - res->start;
+ u32 properties = 0x4;
+
+ EP_PCIE_DBG(dev, "PCIe V%d: BAR mask to program is 0x%x\n",
+ dev->rev, mask);
+
+ /* Configure BAR mask via CS2 */
+ ep_pcie_write_mask(dev->elbi + PCIE20_ELBI_CS2_ENABLE, 0, BIT(0));
+ ep_pcie_write_reg(dev->dm_core, PCIE20_BAR0, mask);
+ ep_pcie_write_reg(dev->dm_core, PCIE20_BAR0 + 0x4, 0);
+ ep_pcie_write_reg(dev->dm_core, PCIE20_BAR0 + 0x8, mask);
+ ep_pcie_write_reg(dev->dm_core, PCIE20_BAR0 + 0xc, 0);
+ ep_pcie_write_reg(dev->dm_core, PCIE20_BAR0 + 0x10, 0);
+ ep_pcie_write_reg(dev->dm_core, PCIE20_BAR0 + 0x14, 0);
+ ep_pcie_write_mask(dev->elbi + PCIE20_ELBI_CS2_ENABLE, BIT(0), 0);
+
+ /* Configure BAR properties via CS */
+ ep_pcie_write_mask(dev->dm_core + PCIE20_MISC_CONTROL_1, 0, BIT(0));
+ ep_pcie_write_reg(dev->dm_core, PCIE20_BAR0, properties);
+ ep_pcie_write_reg(dev->dm_core, PCIE20_BAR0 + 0x8, properties);
+ ep_pcie_write_mask(dev->dm_core + PCIE20_MISC_CONTROL_1, BIT(0), 0);
+}
+
+static void ep_pcie_core_init(struct ep_pcie_dev_t *dev, bool configured)
+{
+ EP_PCIE_DBG(dev, "PCIe V%d\n", dev->rev);
+
+ /* enable debug IRQ */
+ ep_pcie_write_mask(dev->parf + PCIE20_PARF_DEBUG_INT_EN,
+ 0, BIT(3) | BIT(2) | BIT(1));
+
+ if (!configured) {
+ /* Configure PCIe to endpoint mode */
+ ep_pcie_write_reg(dev->parf, PCIE20_PARF_DEVICE_TYPE, 0x0);
+
+ /* adjust DBI base address */
+ if (dev->dbi_base_reg)
+ writel_relaxed(0x3FFFE000,
+ dev->parf + dev->dbi_base_reg);
+ else
+ writel_relaxed(0x3FFFE000,
+ dev->parf + PCIE20_PARF_DBI_BASE_ADDR);
+
+ /* Configure PCIe core to support 1GB aperture */
+ if (dev->slv_space_reg)
+ ep_pcie_write_reg(dev->parf, dev->slv_space_reg,
+ 0x40000000);
+ else
+ ep_pcie_write_reg(dev->parf,
+ PCIE20_PARF_SLV_ADDR_SPACE_SIZE, 0x40000000);
+
+ /* Configure link speed */
+ ep_pcie_write_mask(dev->dm_core +
+ PCIE20_LINK_CONTROL2_LINK_STATUS2,
+ 0xf, dev->link_speed);
+ }
+
+ /* Read halts write */
+ ep_pcie_write_mask(dev->parf + PCIE20_PARF_AXI_MSTR_RD_HALT_NO_WRITES,
+ 0, BIT(0));
+
+ /* Write after write halt */
+ ep_pcie_write_mask(dev->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT,
+ 0, BIT(31));
+
+ /* Q2A flush disable */
+ writel_relaxed(0, dev->parf + PCIE20_PARF_Q2A_FLUSH);
+
+ /* Disable the DBI Wakeup */
+ ep_pcie_write_mask(dev->parf + PCIE20_PARF_SYS_CTRL, BIT(11), 0);
+
+ /* Disable the debouncers */
+ ep_pcie_write_reg(dev->parf, PCIE20_PARF_DB_CTRL, 0x73);
+
+ /* Disable core clock CGC */
+ ep_pcie_write_mask(dev->parf + PCIE20_PARF_SYS_CTRL, 0, BIT(6));
+
+ /* Set AUX power to be on */
+ ep_pcie_write_mask(dev->parf + PCIE20_PARF_SYS_CTRL, 0, BIT(4));
+
+ /* Request to exit from L1SS for MSI and LTR MSG */
+ ep_pcie_write_mask(dev->parf + PCIE20_PARF_CFG_BITS, 0, BIT(1));
+
+ EP_PCIE_DBG(dev,
+ "Initial: CLASS_CODE_REVISION_ID:0x%x; HDR_TYPE:0x%x\n",
+ readl_relaxed(dev->dm_core + PCIE20_CLASS_CODE_REVISION_ID),
+ readl_relaxed(dev->dm_core + PCIE20_BIST_HDR_TYPE));
+
+ if (!configured) {
+ /* Enable CS for RO(CS) register writes */
+ ep_pcie_write_mask(dev->dm_core + PCIE20_MISC_CONTROL_1, 0,
+ BIT(0));
+
+ /* Set class code and revision ID */
+ ep_pcie_write_reg(dev->dm_core, PCIE20_CLASS_CODE_REVISION_ID,
+ 0xff000000);
+
+ /* Set header type */
+ ep_pcie_write_reg(dev->dm_core, PCIE20_BIST_HDR_TYPE, 0x10);
+
+ /* Set Subsystem ID and Subsystem Vendor ID */
+ ep_pcie_write_reg(dev->dm_core, PCIE20_SUBSYSTEM, 0xa01f17cb);
+
+ /* Set the PMC Register - to support PME in D0/D3hot/D3cold */
+ ep_pcie_write_mask(dev->dm_core + PCIE20_CAP_ID_NXT_PTR, 0,
+ BIT(31)|BIT(30)|BIT(27));
+
+ /* Set the Endpoint L0s Acceptable Latency to 1us (max) */
+ ep_pcie_write_reg_field(dev->dm_core,
+ PCIE20_DEVICE_CAPABILITIES,
+ PCIE20_MASK_EP_L0S_ACCPT_LATENCY, 0x7);
+
+ /* Set the Endpoint L1 Acceptable Latency to 2 us (max) */
+ ep_pcie_write_reg_field(dev->dm_core,
+ PCIE20_DEVICE_CAPABILITIES,
+ PCIE20_MASK_EP_L1_ACCPT_LATENCY, 0x7);
+
+ /* Set the L0s Exit Latency to 2us-4us = 0x6 */
+ ep_pcie_write_reg_field(dev->dm_core, PCIE20_LINK_CAPABILITIES,
+ PCIE20_MASK_L1_EXIT_LATENCY, 0x6);
+
+ /* Set the L1 Exit Latency to be 32us-64 us = 0x6 */
+ ep_pcie_write_reg_field(dev->dm_core, PCIE20_LINK_CAPABILITIES,
+ PCIE20_MASK_L0S_EXIT_LATENCY, 0x6);
+
+ /* L1ss is supported */
+ ep_pcie_write_mask(dev->dm_core + PCIE20_L1SUB_CAPABILITY, 0,
+ 0x1f);
+
+ /* Enable Clock Power Management */
+ ep_pcie_write_reg_field(dev->dm_core, PCIE20_LINK_CAPABILITIES,
+ PCIE20_MASK_CLOCK_POWER_MAN, 0x1);
+
+ /* Disable CS for RO(CS) register writes */
+ ep_pcie_write_mask(dev->dm_core + PCIE20_MISC_CONTROL_1, BIT(0),
+ 0);
+
+ /* Set FTS value to match the PHY setting */
+ ep_pcie_write_reg_field(dev->dm_core,
+ PCIE20_ACK_F_ASPM_CTRL_REG,
+ PCIE20_MASK_ACK_N_FTS, 0x80);
+
+ EP_PCIE_DBG(dev,
+ "After program: CLASS_CODE_REVISION_ID:0x%x; HDR_TYPE:0x%x; L1SUB_CAPABILITY:0x%x; PARF_SYS_CTRL:0x%x\n",
+ readl_relaxed(dev->dm_core +
+ PCIE20_CLASS_CODE_REVISION_ID),
+ readl_relaxed(dev->dm_core + PCIE20_BIST_HDR_TYPE),
+ readl_relaxed(dev->dm_core + PCIE20_L1SUB_CAPABILITY),
+ readl_relaxed(dev->parf + PCIE20_PARF_SYS_CTRL));
+
+ /* Configure BARs */
+ ep_pcie_bar_init(dev);
+
+ ep_pcie_write_reg(dev->mmio, PCIE20_MHICFG, 0x02800880);
+ ep_pcie_write_reg(dev->mmio, PCIE20_BHI_EXECENV, 0x2);
+ }
+
+ /* Configure IRQ events */
+ if (dev->aggregated_irq) {
+ ep_pcie_write_reg(dev->parf, PCIE20_PARF_INT_ALL_MASK, 0);
+ ep_pcie_write_mask(dev->parf + PCIE20_PARF_INT_ALL_MASK, 0,
+ BIT(EP_PCIE_INT_EVT_LINK_DOWN) |
+ BIT(EP_PCIE_INT_EVT_BME) |
+ BIT(EP_PCIE_INT_EVT_PM_TURNOFF) |
+ BIT(EP_PCIE_INT_EVT_DSTATE_CHANGE) |
+ BIT(EP_PCIE_INT_EVT_LINK_UP));
+ if (!dev->mhi_a7_irq)
+ ep_pcie_write_mask(dev->parf +
+ PCIE20_PARF_INT_ALL_MASK, 0,
+ BIT(EP_PCIE_INT_EVT_MHI_A7));
+
+ EP_PCIE_DBG(dev, "PCIe V%d: PCIE20_PARF_INT_ALL_MASK:0x%x\n",
+ dev->rev,
+ readl_relaxed(dev->parf + PCIE20_PARF_INT_ALL_MASK));
+ }
+
+ if (dev->active_config) {
+ ep_pcie_write_reg(dev->dm_core, PCIE20_AUX_CLK_FREQ_REG, 0x14);
+
+ EP_PCIE_DBG2(dev, "PCIe V%d: Enable L1.\n", dev->rev);
+ ep_pcie_write_mask(dev->parf + PCIE20_PARF_PM_CTRL, BIT(5), 0);
+ }
+}
+
+static void ep_pcie_config_inbound_iatu(struct ep_pcie_dev_t *dev)
+{
+ struct resource *mmio = dev->res[EP_PCIE_RES_MMIO].resource;
+ u32 lower, limit, bar;
+
+ lower = mmio->start;
+ limit = mmio->end;
+ bar = readl_relaxed(dev->dm_core + PCIE20_BAR0);
+
+ EP_PCIE_DBG(dev,
+ "PCIe V%d: BAR0 is 0x%x; MMIO[0x%x-0x%x]\n",
+ dev->rev, bar, lower, limit);
+
+ ep_pcie_write_reg(dev->parf, PCIE20_PARF_MHI_BASE_ADDR_LOWER, lower);
+ ep_pcie_write_reg(dev->parf, PCIE20_PARF_MHI_BASE_ADDR_UPPER, 0x0);
+
+ /* program inbound address translation using region 0 */
+ ep_pcie_write_reg(dev->dm_core, PCIE20_PLR_IATU_VIEWPORT, 0x80000000);
+ /* set region to mem type */
+ ep_pcie_write_reg(dev->dm_core, PCIE20_PLR_IATU_CTRL1, 0x0);
+ /* setup target address registers */
+ ep_pcie_write_reg(dev->dm_core, PCIE20_PLR_IATU_LTAR, lower);
+ ep_pcie_write_reg(dev->dm_core, PCIE20_PLR_IATU_UTAR, 0x0);
+ /* use BAR match mode for BAR0 and enable region 0 */
+ ep_pcie_write_reg(dev->dm_core, PCIE20_PLR_IATU_CTRL2, 0xc0000000);
+
+ EP_PCIE_DBG(dev, "PCIE20_PLR_IATU_VIEWPORT:0x%x\n",
+ readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_VIEWPORT));
+ EP_PCIE_DBG(dev, "PCIE20_PLR_IATU_CTRL1:0x%x\n",
+ readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_CTRL1));
+ EP_PCIE_DBG(dev, "PCIE20_PLR_IATU_LTAR:0x%x\n",
+ readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_LTAR));
+ EP_PCIE_DBG(dev, "PCIE20_PLR_IATU_UTAR:0x%x\n",
+ readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_UTAR));
+ EP_PCIE_DBG(dev, "PCIE20_PLR_IATU_CTRL2:0x%x\n",
+ readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_CTRL2));
+}
+
+static void ep_pcie_config_outbound_iatu_entry(struct ep_pcie_dev_t *dev,
+ u32 region, u32 lower, u32 upper,
+ u32 limit, u32 tgt_lower, u32 tgt_upper)
+{
+ EP_PCIE_DBG(dev,
+ "PCIe V%d: region:%d; lower:0x%x; limit:0x%x; target_lower:0x%x; target_upper:0x%x\n",
+ dev->rev, region, lower, limit, tgt_lower, tgt_upper);
+
+ /* program outbound address translation using an input region */
+ ep_pcie_write_reg(dev->dm_core, PCIE20_PLR_IATU_VIEWPORT, region);
+ /* set region to mem type */
+ ep_pcie_write_reg(dev->dm_core, PCIE20_PLR_IATU_CTRL1, 0x0);
+ /* setup source address registers */
+ ep_pcie_write_reg(dev->dm_core, PCIE20_PLR_IATU_LBAR, lower);
+ ep_pcie_write_reg(dev->dm_core, PCIE20_PLR_IATU_UBAR, upper);
+ ep_pcie_write_reg(dev->dm_core, PCIE20_PLR_IATU_LAR, limit);
+ /* setup target address registers */
+ ep_pcie_write_reg(dev->dm_core, PCIE20_PLR_IATU_LTAR, tgt_lower);
+ ep_pcie_write_reg(dev->dm_core, PCIE20_PLR_IATU_UTAR, tgt_upper);
+ /* use DMA bypass mode and enable the region */
+ ep_pcie_write_mask(dev->dm_core + PCIE20_PLR_IATU_CTRL2, 0,
+ BIT(31) | BIT(27));
+
+ EP_PCIE_DBG(dev, "PCIE20_PLR_IATU_VIEWPORT:0x%x\n",
+ readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_VIEWPORT));
+ EP_PCIE_DBG(dev, "PCIE20_PLR_IATU_CTRL1:0x%x\n",
+ readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_CTRL1));
+ EP_PCIE_DBG(dev, "PCIE20_PLR_IATU_LBAR:0x%x\n",
+ readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_LBAR));
+ EP_PCIE_DBG(dev, "PCIE20_PLR_IATU_UBAR:0x%x\n",
+ readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_UBAR));
+ EP_PCIE_DBG(dev, "PCIE20_PLR_IATU_LAR:0x%x\n",
+ readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_LAR));
+ EP_PCIE_DBG(dev, "PCIE20_PLR_IATU_LTAR:0x%x\n",
+ readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_LTAR));
+ EP_PCIE_DBG(dev, "PCIE20_PLR_IATU_UTAR:0x%x\n",
+ readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_UTAR));
+ EP_PCIE_DBG(dev, "PCIE20_PLR_IATU_CTRL2:0x%x\n",
+ readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_CTRL2));
+}
+
+static void ep_pcie_notify_event(struct ep_pcie_dev_t *dev,
+ enum ep_pcie_event event)
+{
+ if (dev->event_reg && dev->event_reg->callback &&
+ (dev->event_reg->events & event)) {
+ struct ep_pcie_notify *notify = &dev->event_reg->notify;
+
+ notify->event = event;
+ notify->user = dev->event_reg->user;
+ EP_PCIE_DBG(&ep_pcie_dev,
+ "PCIe V%d: Callback client for event %d.\n",
+ dev->rev, event);
+ dev->event_reg->callback(notify);
+ } else {
+ EP_PCIE_DBG(&ep_pcie_dev,
+ "PCIe V%d: Client does not register for event %d.\n",
+ dev->rev, event);
+ }
+}
+
+static int ep_pcie_get_resources(struct ep_pcie_dev_t *dev,
+ struct platform_device *pdev)
+{
+ int i, len, cnt, ret = 0, size = 0;
+ struct ep_pcie_vreg_info_t *vreg_info;
+ struct ep_pcie_gpio_info_t *gpio_info;
+ struct ep_pcie_clk_info_t *clk_info;
+ struct ep_pcie_reset_info_t *reset_info;
+ struct resource *res;
+ struct ep_pcie_res_info_t *res_info;
+ struct ep_pcie_irq_info_t *irq_info;
+ char prop_name[MAX_PROP_SIZE];
+ const __be32 *prop;
+ u32 *clkfreq = NULL;
+
+ EP_PCIE_DBG(dev, "PCIe V%d\n", dev->rev);
+
+ of_get_property(pdev->dev.of_node, "qcom,phy-init", &size);
+ if (size) {
+ dev->phy_init = (struct ep_pcie_phy_info_t *)
+ devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+
+ if (dev->phy_init) {
+ dev->phy_init_len =
+ size / sizeof(*dev->phy_init);
+ EP_PCIE_DBG(dev,
+ "PCIe V%d: phy init length is 0x%x.\n",
+ dev->rev, dev->phy_init_len);
+
+ of_property_read_u32_array(pdev->dev.of_node,
+ "qcom,phy-init",
+ (unsigned int *)dev->phy_init,
+ size / sizeof(dev->phy_init->offset));
+ } else {
+ EP_PCIE_ERR(dev,
+ "PCIe V%d: Could not allocate memory for phy init sequence.\n",
+ dev->rev);
+ return -ENOMEM;
+ }
+ } else {
+ EP_PCIE_DBG(dev,
+ "PCIe V%d: PHY V%d: phy init sequence is not present in DT.\n",
+ dev->rev, dev->phy_rev);
+ }
+
+ cnt = of_property_count_strings((&pdev->dev)->of_node,
+ "clock-names");
+ if (cnt > 0) {
+ size_t size = cnt * sizeof(*clkfreq);
+
+ clkfreq = kzalloc(size, GFP_KERNEL);
+ if (!clkfreq) {
+ EP_PCIE_ERR(dev, "PCIe V%d: memory alloc failed\n",
+ dev->rev);
+ return -ENOMEM;
+ }
+ ret = of_property_read_u32_array(
+ (&pdev->dev)->of_node,
+ "max-clock-frequency-hz", clkfreq, cnt);
+ if (ret) {
+ EP_PCIE_ERR(dev,
+ "PCIe V%d: invalid max-clock-frequency-hz property:%d\n",
+ dev->rev, ret);
+ goto out;
+ }
+ }
+
+ for (i = 0; i < EP_PCIE_MAX_VREG; i++) {
+ vreg_info = &dev->vreg[i];
+ vreg_info->hdl =
+ devm_regulator_get(&pdev->dev, vreg_info->name);
+
+ if (PTR_ERR(vreg_info->hdl) == -EPROBE_DEFER) {
+ EP_PCIE_DBG(dev, "EPROBE_DEFER for VReg:%s\n",
+ vreg_info->name);
+ ret = PTR_ERR(vreg_info->hdl);
+ goto out;
+ }
+
+ if (IS_ERR(vreg_info->hdl)) {
+ if (vreg_info->required) {
+ EP_PCIE_ERR(dev, "Vreg %s doesn't exist\n",
+ vreg_info->name);
+ ret = PTR_ERR(vreg_info->hdl);
+ goto out;
+ } else {
+ EP_PCIE_DBG(dev,
+ "Optional Vreg %s doesn't exist\n",
+ vreg_info->name);
+ vreg_info->hdl = NULL;
+ }
+ } else {
+ snprintf(prop_name, MAX_PROP_SIZE,
+ "qcom,%s-voltage-level", vreg_info->name);
+ prop = of_get_property((&pdev->dev)->of_node,
+ prop_name, &len);
+ if (!prop || (len != (3 * sizeof(__be32)))) {
+ EP_PCIE_DBG(dev, "%s %s property\n",
+ prop ? "invalid format" :
+ "no", prop_name);
+ } else {
+ vreg_info->max_v = be32_to_cpup(&prop[0]);
+ vreg_info->min_v = be32_to_cpup(&prop[1]);
+ vreg_info->opt_mode =
+ be32_to_cpup(&prop[2]);
+ }
+ }
+ }
+
+ dev->gdsc = devm_regulator_get(&pdev->dev, "gdsc-vdd");
+
+ if (IS_ERR(dev->gdsc)) {
+ EP_PCIE_ERR(dev, "PCIe V%d: Failed to get %s GDSC:%ld\n",
+ dev->rev, dev->pdev->name, PTR_ERR(dev->gdsc));
+ if (PTR_ERR(dev->gdsc) == -EPROBE_DEFER)
+ EP_PCIE_DBG(dev, "PCIe V%d: EPROBE_DEFER for %s GDSC\n",
+ dev->rev, dev->pdev->name);
+ ret = PTR_ERR(dev->gdsc);
+ goto out;
+ }
+
+ for (i = 0; i < EP_PCIE_MAX_GPIO; i++) {
+ gpio_info = &dev->gpio[i];
+ ret = of_get_named_gpio((&pdev->dev)->of_node,
+ gpio_info->name, 0);
+ if (ret >= 0) {
+ gpio_info->num = ret;
+ ret = 0;
+ EP_PCIE_DBG(dev, "GPIO num for %s is %d\n",
+ gpio_info->name, gpio_info->num);
+ } else {
+ EP_PCIE_DBG(dev,
+ "GPIO %s is not supported in this configuration.\n",
+ gpio_info->name);
+ ret = 0;
+ }
+ }
+
+ for (i = 0; i < EP_PCIE_MAX_CLK; i++) {
+ clk_info = &dev->clk[i];
+
+ clk_info->hdl = devm_clk_get(&pdev->dev, clk_info->name);
+
+ if (IS_ERR(clk_info->hdl)) {
+ if (clk_info->required) {
+ EP_PCIE_ERR(dev,
+ "Clock %s isn't available:%ld\n",
+ clk_info->name, PTR_ERR(clk_info->hdl));
+ ret = PTR_ERR(clk_info->hdl);
+ goto out;
+ } else {
+ EP_PCIE_DBG(dev, "Ignoring Clock %s\n",
+ clk_info->name);
+ clk_info->hdl = NULL;
+ }
+ } else {
+ if (clkfreq != NULL) {
+ clk_info->freq = clkfreq[i +
+ EP_PCIE_MAX_PIPE_CLK];
+ EP_PCIE_DBG(dev, "Freq of Clock %s is:%d\n",
+ clk_info->name, clk_info->freq);
+ }
+ }
+ }
+
+ for (i = 0; i < EP_PCIE_MAX_PIPE_CLK; i++) {
+ clk_info = &dev->pipeclk[i];
+
+ clk_info->hdl = devm_clk_get(&pdev->dev, clk_info->name);
+
+ if (IS_ERR(clk_info->hdl)) {
+ if (clk_info->required) {
+ EP_PCIE_ERR(dev,
+ "Clock %s isn't available:%ld\n",
+ clk_info->name, PTR_ERR(clk_info->hdl));
+ ret = PTR_ERR(clk_info->hdl);
+ goto out;
+ } else {
+ EP_PCIE_DBG(dev, "Ignoring Clock %s\n",
+ clk_info->name);
+ clk_info->hdl = NULL;
+ }
+ } else {
+ if (clkfreq != NULL) {
+ clk_info->freq = clkfreq[i];
+ EP_PCIE_DBG(dev, "Freq of Clock %s is:%d\n",
+ clk_info->name, clk_info->freq);
+ }
+ }
+ }
+
+ for (i = 0; i < EP_PCIE_MAX_RESET; i++) {
+ reset_info = &dev->reset[i];
+
+ reset_info->hdl = devm_reset_control_get(&pdev->dev,
+ reset_info->name);
+
+ if (IS_ERR(reset_info->hdl)) {
+ if (reset_info->required) {
+ EP_PCIE_ERR(dev,
+ "Reset %s isn't available:%ld\n",
+ reset_info->name,
+ PTR_ERR(reset_info->hdl));
+
+ ret = PTR_ERR(reset_info->hdl);
+ reset_info->hdl = NULL;
+ goto out;
+ } else {
+ EP_PCIE_DBG(dev, "Ignoring Reset %s\n",
+ reset_info->name);
+ reset_info->hdl = NULL;
+ }
+ }
+ }
+
+ dev->bus_scale_table = msm_bus_cl_get_pdata(pdev);
+ if (!dev->bus_scale_table) {
+ EP_PCIE_DBG(dev, "PCIe V%d: No bus scale table for %s\n",
+ dev->rev, dev->pdev->name);
+ dev->bus_client = 0;
+ } else {
+ dev->bus_client =
+ msm_bus_scale_register_client(dev->bus_scale_table);
+ if (!dev->bus_client) {
+ EP_PCIE_ERR(dev,
+ "PCIe V%d: Failed to register bus client for %s\n",
+ dev->rev, dev->pdev->name);
+ msm_bus_cl_clear_pdata(dev->bus_scale_table);
+ ret = -ENODEV;
+ goto out;
+ }
+ }
+
+ for (i = 0; i < EP_PCIE_MAX_RES; i++) {
+ res_info = &dev->res[i];
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ res_info->name);
+
+ if (!res) {
+ EP_PCIE_ERR(dev,
+ "PCIe V%d: can't get resource for %s.\n",
+ dev->rev, res_info->name);
+ ret = -ENOMEM;
+ goto out;
+ } else {
+ EP_PCIE_DBG(dev, "start addr for %s is %pa.\n",
+ res_info->name, &res->start);
+ }
+
+ res_info->base = devm_ioremap(&pdev->dev,
+ res->start, resource_size(res));
+ if (!res_info->base) {
+ EP_PCIE_ERR(dev, "PCIe V%d: can't remap %s.\n",
+ dev->rev, res_info->name);
+ ret = -ENOMEM;
+ goto out;
+ }
+ res_info->resource = res;
+ }
+
+ for (i = 0; i < EP_PCIE_MAX_IRQ; i++) {
+ irq_info = &dev->irq[i];
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+ irq_info->name);
+
+ if (!res) {
+ EP_PCIE_DBG2(dev, "PCIe V%d: can't find IRQ # for %s\n",
+ dev->rev, irq_info->name);
+ } else {
+ irq_info->num = res->start;
+ EP_PCIE_DBG2(dev, "IRQ # for %s is %d.\n",
+ irq_info->name, irq_info->num);
+ }
+ }
+
+ dev->parf = dev->res[EP_PCIE_RES_PARF].base;
+ dev->phy = dev->res[EP_PCIE_RES_PHY].base;
+ dev->mmio = dev->res[EP_PCIE_RES_MMIO].base;
+ dev->msi = dev->res[EP_PCIE_RES_MSI].base;
+ dev->dm_core = dev->res[EP_PCIE_RES_DM_CORE].base;
+ dev->elbi = dev->res[EP_PCIE_RES_ELBI].base;
+
+out:
+ kfree(clkfreq);
+ return ret;
+}
+
+static void ep_pcie_release_resources(struct ep_pcie_dev_t *dev)
+{
+ dev->parf = NULL;
+ dev->elbi = NULL;
+ dev->dm_core = NULL;
+ dev->phy = NULL;
+ dev->mmio = NULL;
+ dev->msi = NULL;
+
+ if (dev->bus_client) {
+ msm_bus_scale_unregister_client(dev->bus_client);
+ dev->bus_client = 0;
+ }
+}
+
+static void ep_pcie_enumeration_complete(struct ep_pcie_dev_t *dev)
+{
+ dev->enumerated = true;
+ dev->link_status = EP_PCIE_LINK_ENABLED;
+
+ if (dev->gpio[EP_PCIE_GPIO_MDM2AP].num) {
+ /* assert MDM2AP Status GPIO */
+ EP_PCIE_DBG2(dev, "PCIe V%d: assert MDM2AP Status.\n",
+ dev->rev);
+ EP_PCIE_DBG(dev,
+ "PCIe V%d: MDM2APStatus GPIO initial:%d.\n",
+ dev->rev,
+ gpio_get_value(
+ dev->gpio[EP_PCIE_GPIO_MDM2AP].num));
+ gpio_set_value(dev->gpio[EP_PCIE_GPIO_MDM2AP].num,
+ dev->gpio[EP_PCIE_GPIO_MDM2AP].on);
+ EP_PCIE_DBG(dev,
+ "PCIe V%d: MDM2APStatus GPIO after assertion:%d.\n",
+ dev->rev,
+ gpio_get_value(
+ dev->gpio[EP_PCIE_GPIO_MDM2AP].num));
+ }
+
+ hw_drv.device_id = readl_relaxed(dev->dm_core);
+ EP_PCIE_DBG(&ep_pcie_dev,
+ "PCIe V%d: register driver for device 0x%x.\n",
+ ep_pcie_dev.rev, hw_drv.device_id);
+ ep_pcie_register_drv(&hw_drv);
+ ep_pcie_notify_event(dev, EP_PCIE_EVENT_LINKUP);
+}
+
+int ep_pcie_core_enable_endpoint(enum ep_pcie_options opt)
+{
+ int ret = 0;
+ u32 val = 0;
+ u32 retries = 0;
+ u32 bme = 0;
+ bool ltssm_en = false;
+ struct ep_pcie_dev_t *dev = &ep_pcie_dev;
+
+ EP_PCIE_DBG(dev, "PCIe V%d: options input are 0x%x.\n", dev->rev, opt);
+
+ mutex_lock(&dev->setup_mtx);
+
+ if (dev->link_status == EP_PCIE_LINK_ENABLED) {
+ EP_PCIE_ERR(dev,
+ "PCIe V%d: link is already enabled.\n",
+ dev->rev);
+ goto out;
+ }
+
+ if (dev->link_status == EP_PCIE_LINK_UP)
+ EP_PCIE_DBG(dev,
+ "PCIe V%d: link is already up, let's proceed with the voting for the resources.\n",
+ dev->rev);
+
+ if (dev->power_on && (opt & EP_PCIE_OPT_POWER_ON)) {
+ EP_PCIE_ERR(dev,
+ "PCIe V%d: request to turn on the power when link is already powered on.\n",
+ dev->rev);
+ goto out;
+ }
+
+ if (opt & EP_PCIE_OPT_POWER_ON) {
+ /* enable power */
+ ret = ep_pcie_vreg_init(dev);
+ if (ret) {
+ EP_PCIE_ERR(dev, "PCIe V%d: failed to enable Vreg\n",
+ dev->rev);
+ goto out;
+ }
+
+ /* enable clocks */
+ ret = ep_pcie_clk_init(dev);
+ if (ret) {
+ EP_PCIE_ERR(dev, "PCIe V%d: failed to enable clocks\n",
+ dev->rev);
+ goto clk_fail;
+ }
+
+ /* enable pipe clock */
+ ret = ep_pcie_pipe_clk_init(dev);
+ if (ret) {
+ EP_PCIE_ERR(dev,
+ "PCIe V%d: failed to enable pipe clock\n",
+ dev->rev);
+ goto pipe_clk_fail;
+ }
+
+ dev->power_on = true;
+ }
+
+ if (!(opt & EP_PCIE_OPT_ENUM))
+ goto out;
+
+ /* check link status during initial bootup */
+ if (!dev->enumerated) {
+ val = readl_relaxed(dev->parf + PCIE20_PARF_PM_STTS);
+ val = val & PARF_XMLH_LINK_UP;
+ EP_PCIE_DBG(dev, "PCIe V%d: Link status is 0x%x.\n", dev->rev,
+ val);
+ if (val) {
+ EP_PCIE_INFO(dev,
+ "PCIe V%d: link initialized by bootloader for LE PCIe endpoint; skip link training in HLOS.\n",
+ dev->rev);
+ ep_pcie_core_init(dev, true);
+ dev->link_status = EP_PCIE_LINK_UP;
+ dev->l23_ready = false;
+ goto checkbme;
+ } else {
+ ltssm_en = readl_relaxed(dev->parf
+ + PCIE20_PARF_LTSSM) & BIT(8);
+
+ if (ltssm_en) {
+ EP_PCIE_ERR(dev,
+ "PCIe V%d: link is not up when LTSSM has already enabled by bootloader.\n",
+ dev->rev);
+ ret = EP_PCIE_ERROR;
+ goto link_fail;
+ } else {
+ EP_PCIE_DBG(dev,
+ "PCIe V%d: Proceed with regular link training.\n",
+ dev->rev);
+ }
+ }
+ }
+
+ if (opt & EP_PCIE_OPT_AST_WAKE) {
+ /* assert PCIe WAKE# */
+ EP_PCIE_INFO(dev, "PCIe V%d: assert PCIe WAKE#.\n",
+ dev->rev);
+ EP_PCIE_DBG(dev, "PCIe V%d: WAKE GPIO initial:%d.\n",
+ dev->rev,
+ gpio_get_value(dev->gpio[EP_PCIE_GPIO_WAKE].num));
+ gpio_set_value(dev->gpio[EP_PCIE_GPIO_WAKE].num,
+ 1 - dev->gpio[EP_PCIE_GPIO_WAKE].on);
+ EP_PCIE_DBG(dev,
+ "PCIe V%d: WAKE GPIO after deassertion:%d.\n",
+ dev->rev,
+ gpio_get_value(dev->gpio[EP_PCIE_GPIO_WAKE].num));
+ gpio_set_value(dev->gpio[EP_PCIE_GPIO_WAKE].num,
+ dev->gpio[EP_PCIE_GPIO_WAKE].on);
+ EP_PCIE_DBG(dev,
+ "PCIe V%d: WAKE GPIO after assertion:%d.\n",
+ dev->rev,
+ gpio_get_value(dev->gpio[EP_PCIE_GPIO_WAKE].num));
+ }
+
+ /* wait for host side to deassert PERST */
+ retries = 0;
+ do {
+ if (gpio_get_value(dev->gpio[EP_PCIE_GPIO_PERST].num) == 1)
+ break;
+ retries++;
+ usleep_range(PERST_TIMEOUT_US_MIN, PERST_TIMEOUT_US_MAX);
+ } while (retries < PERST_CHECK_MAX_COUNT);
+
+ EP_PCIE_DBG(dev, "PCIe V%d: number of PERST retries:%d.\n",
+ dev->rev, retries);
+
+ if (retries == PERST_CHECK_MAX_COUNT) {
+ EP_PCIE_ERR(dev,
+ "PCIe V%d: PERST is not de-asserted by host\n",
+ dev->rev);
+ ret = EP_PCIE_ERROR;
+ goto link_fail;
+ } else {
+ dev->perst_deast = true;
+ if (opt & EP_PCIE_OPT_AST_WAKE) {
+ /* deassert PCIe WAKE# */
+ EP_PCIE_DBG(dev,
+ "PCIe V%d: deassert PCIe WAKE# after PERST# is deasserted.\n",
+ dev->rev);
+ gpio_set_value(dev->gpio[EP_PCIE_GPIO_WAKE].num,
+ 1 - dev->gpio[EP_PCIE_GPIO_WAKE].on);
+ }
+ }
+
+ /* init PCIe PHY */
+ ep_pcie_phy_init(dev);
+
+ EP_PCIE_DBG(dev, "PCIe V%d: waiting for phy ready...\n", dev->rev);
+ retries = 0;
+ do {
+ if (ep_pcie_phy_is_ready(dev))
+ break;
+ retries++;
+ if (retries % 100 == 0)
+ EP_PCIE_DBG(dev,
+ "PCIe V%d: current number of PHY retries:%d.\n",
+ dev->rev, retries);
+ usleep_range(REFCLK_STABILIZATION_DELAY_US_MIN,
+ REFCLK_STABILIZATION_DELAY_US_MAX);
+ } while (retries < PHY_READY_TIMEOUT_COUNT);
+
+ EP_PCIE_DBG(dev, "PCIe V%d: number of PHY retries:%d.\n",
+ dev->rev, retries);
+
+ if (retries == PHY_READY_TIMEOUT_COUNT) {
+ EP_PCIE_ERR(dev, "PCIe V%d: PCIe PHY failed to come up!\n",
+ dev->rev);
+ ret = EP_PCIE_ERROR;
+ ep_pcie_reg_dump(dev, BIT(EP_PCIE_RES_PHY), false);
+ goto link_fail;
+ } else {
+ EP_PCIE_INFO(dev, "PCIe V%d: PCIe PHY is ready!\n", dev->rev);
+ }
+
+ ep_pcie_core_init(dev, false);
+ ep_pcie_config_inbound_iatu(dev);
+
+ /* enable link training */
+ if (dev->phy_rev >= 3)
+ ep_pcie_write_mask(dev->parf + PCIE20_PARF_LTSSM, 0, BIT(8));
+ else
+ ep_pcie_write_mask(dev->elbi + PCIE20_ELBI_SYS_CTRL, 0, BIT(0));
+
+ EP_PCIE_DBG(dev, "PCIe V%d: check if link is up\n", dev->rev);
+
+ /* Wait for up to 100ms for the link to come up */
+ retries = 0;
+ do {
+ usleep_range(LINK_UP_TIMEOUT_US_MIN, LINK_UP_TIMEOUT_US_MAX);
+ val = readl_relaxed(dev->elbi + PCIE20_ELBI_SYS_STTS);
+ retries++;
+ if (retries % 100 == 0)
+ EP_PCIE_DBG(dev, "PCIe V%d: LTSSM_STATE:0x%x.\n",
+ dev->rev, (val >> 0xC) & 0x3f);
+ } while ((!(val & XMLH_LINK_UP) ||
+ !ep_pcie_confirm_linkup(dev, false))
+ && (retries < LINK_UP_CHECK_MAX_COUNT));
+
+ if (retries == LINK_UP_CHECK_MAX_COUNT) {
+ EP_PCIE_ERR(dev, "PCIe V%d: link initialization failed\n",
+ dev->rev);
+ ret = EP_PCIE_ERROR;
+ goto link_fail;
+ } else {
+ dev->link_status = EP_PCIE_LINK_UP;
+ dev->l23_ready = false;
+ EP_PCIE_DBG(dev,
+ "PCIe V%d: link is up after %d checkings (%d ms)\n",
+ dev->rev, retries,
+ LINK_UP_TIMEOUT_US_MIN * retries / 1000);
+ EP_PCIE_INFO(dev,
+ "PCIe V%d: link initialized for LE PCIe endpoint\n",
+ dev->rev);
+ }
+
+checkbme:
+ if (dev->active_config) {
+ ep_pcie_write_mask(dev->parf + PCIE20_PARF_SLV_ADDR_MSB_CTRL,
+ 0, BIT(0));
+ ep_pcie_write_reg(dev->parf, PCIE20_PARF_SLV_ADDR_SPACE_SIZE_HI,
+ 0x200);
+ ep_pcie_write_reg(dev->parf, PCIE20_PARF_SLV_ADDR_SPACE_SIZE,
+ 0x0);
+ ep_pcie_write_reg(dev->parf, PCIE20_PARF_DBI_BASE_ADDR_HI,
+ 0x100);
+ ep_pcie_write_reg(dev->parf, PCIE20_PARF_DBI_BASE_ADDR,
+ 0x7FFFE000);
+ }
+
+ if (!(opt & EP_PCIE_OPT_ENUM_ASYNC)) {
+ /* Wait for up to 1000ms for BME to be set */
+ retries = 0;
+
+ bme = readl_relaxed(dev->dm_core +
+ PCIE20_COMMAND_STATUS) & BIT(2);
+ while (!bme && (retries < BME_CHECK_MAX_COUNT)) {
+ retries++;
+ usleep_range(BME_TIMEOUT_US_MIN, BME_TIMEOUT_US_MAX);
+ bme = readl_relaxed(dev->dm_core +
+ PCIE20_COMMAND_STATUS) & BIT(2);
+ }
+ } else {
+ EP_PCIE_DBG(dev,
+ "PCIe V%d: EP_PCIE_OPT_ENUM_ASYNC is true.\n",
+ dev->rev);
+ bme = readl_relaxed(dev->dm_core +
+ PCIE20_COMMAND_STATUS) & BIT(2);
+ }
+
+ if (bme) {
+ EP_PCIE_DBG(dev,
+ "PCIe V%d: PCIe link is up and BME is enabled after %d checkings (%d ms).\n",
+ dev->rev, retries,
+ BME_TIMEOUT_US_MIN * retries / 1000);
+ ep_pcie_enumeration_complete(dev);
+ /* expose BAR to user space to identify modem */
+ ep_pcie_bar0_address =
+ readl_relaxed(dev->dm_core + PCIE20_BAR0);
+ } else {
+ if (!(opt & EP_PCIE_OPT_ENUM_ASYNC))
+ EP_PCIE_ERR(dev,
+ "PCIe V%d: PCIe link is up but BME is still disabled after max waiting time.\n",
+ dev->rev);
+ if (!ep_pcie_debug_keep_resource &&
+ !(opt&EP_PCIE_OPT_ENUM_ASYNC)) {
+ ret = EP_PCIE_ERROR;
+ dev->link_status = EP_PCIE_LINK_DISABLED;
+ goto link_fail;
+ }
+ }
+
+ dev->suspending = false;
+ goto out;
+
+link_fail:
+ dev->power_on = false;
+ if (!ep_pcie_debug_keep_resource)
+ ep_pcie_pipe_clk_deinit(dev);
+pipe_clk_fail:
+ if (!ep_pcie_debug_keep_resource)
+ ep_pcie_clk_deinit(dev);
+clk_fail:
+ if (!ep_pcie_debug_keep_resource)
+ ep_pcie_vreg_deinit(dev);
+ else
+ ret = 0;
+out:
+ mutex_unlock(&dev->setup_mtx);
+
+ return ret;
+}
+
+int ep_pcie_core_disable_endpoint(void)
+{
+ int rc = 0;
+ struct ep_pcie_dev_t *dev = &ep_pcie_dev;
+
+ EP_PCIE_DBG(dev, "PCIe V%d\n", dev->rev);
+
+ mutex_lock(&dev->setup_mtx);
+
+ if (!dev->power_on) {
+ EP_PCIE_DBG(dev,
+ "PCIe V%d: the link is already power down.\n",
+ dev->rev);
+ goto out;
+ }
+
+ dev->link_status = EP_PCIE_LINK_DISABLED;
+ dev->power_on = false;
+
+ EP_PCIE_DBG(dev, "PCIe V%d: shut down the link.\n",
+ dev->rev);
+
+ ep_pcie_pipe_clk_deinit(dev);
+ ep_pcie_clk_deinit(dev);
+ ep_pcie_vreg_deinit(dev);
+out:
+ mutex_unlock(&dev->setup_mtx);
+ return rc;
+}
+
+int ep_pcie_core_mask_irq_event(enum ep_pcie_irq_event event,
+ bool enable)
+{
+ int rc = 0;
+ struct ep_pcie_dev_t *dev = &ep_pcie_dev;
+ unsigned long irqsave_flags;
+ u32 mask = 0;
+
+ EP_PCIE_DUMP(dev,
+ "PCIe V%d: Client askes to %s IRQ event 0x%x.\n",
+ dev->rev,
+ enable ? "enable" : "disable",
+ event);
+
+ spin_lock_irqsave(&dev->ext_lock, irqsave_flags);
+
+ if (dev->aggregated_irq) {
+ mask = readl_relaxed(dev->parf + PCIE20_PARF_INT_ALL_MASK);
+ EP_PCIE_DUMP(dev,
+ "PCIe V%d: current PCIE20_PARF_INT_ALL_MASK:0x%x\n",
+ dev->rev, mask);
+ if (enable)
+ ep_pcie_write_mask(dev->parf + PCIE20_PARF_INT_ALL_MASK,
+ 0, BIT(event));
+ else
+ ep_pcie_write_mask(dev->parf + PCIE20_PARF_INT_ALL_MASK,
+ BIT(event), 0);
+ EP_PCIE_DUMP(dev,
+ "PCIe V%d: new PCIE20_PARF_INT_ALL_MASK:0x%x\n",
+ dev->rev,
+ readl_relaxed(dev->parf + PCIE20_PARF_INT_ALL_MASK));
+ } else {
+ EP_PCIE_ERR(dev,
+ "PCIe V%d: Client askes to %s IRQ event 0x%x when aggregated IRQ is not supported.\n",
+ dev->rev,
+ enable ? "enable" : "disable",
+ event);
+ rc = EP_PCIE_ERROR;
+ }
+
+ spin_unlock_irqrestore(&dev->ext_lock, irqsave_flags);
+ return rc;
+}
+
+static irqreturn_t ep_pcie_handle_bme_irq(int irq, void *data)
+{
+ struct ep_pcie_dev_t *dev = data;
+ unsigned long irqsave_flags;
+
+ spin_lock_irqsave(&dev->isr_lock, irqsave_flags);
+
+ dev->bme_counter++;
+ EP_PCIE_DBG(dev,
+ "PCIe V%d: No. %ld BME IRQ.\n", dev->rev, dev->bme_counter);
+
+ if (readl_relaxed(dev->dm_core + PCIE20_COMMAND_STATUS) & BIT(2)) {
+ /* BME has been enabled */
+ if (!dev->enumerated) {
+ EP_PCIE_DBG(dev,
+ "PCIe V%d:BME is set. Enumeration is complete\n",
+ dev->rev);
+ schedule_work(&dev->handle_bme_work);
+ } else {
+ EP_PCIE_DBG(dev,
+ "PCIe V%d:BME is set again after the enumeration has completed; callback client for link ready.\n",
+ dev->rev);
+ ep_pcie_notify_event(dev, EP_PCIE_EVENT_LINKUP);
+ }
+ } else {
+ EP_PCIE_DBG(dev,
+ "PCIe V%d:BME is still disabled\n", dev->rev);
+ }
+
+ spin_unlock_irqrestore(&dev->isr_lock, irqsave_flags);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ep_pcie_handle_linkdown_irq(int irq, void *data)
+{
+ struct ep_pcie_dev_t *dev = data;
+ unsigned long irqsave_flags;
+
+ spin_lock_irqsave(&dev->isr_lock, irqsave_flags);
+
+ dev->linkdown_counter++;
+ EP_PCIE_DBG(dev,
+ "PCIe V%d: No. %ld linkdown IRQ.\n",
+ dev->rev, dev->linkdown_counter);
+
+ if (!dev->enumerated || dev->link_status == EP_PCIE_LINK_DISABLED) {
+ EP_PCIE_DBG(dev,
+ "PCIe V%d:Linkdown IRQ happened when the link is disabled.\n",
+ dev->rev);
+ } else if (dev->suspending) {
+ EP_PCIE_DBG(dev,
+ "PCIe V%d:Linkdown IRQ happened when the link is suspending.\n",
+ dev->rev);
+ } else {
+ dev->link_status = EP_PCIE_LINK_DISABLED;
+ EP_PCIE_ERR(dev, "PCIe V%d:PCIe link is down for %ld times\n",
+ dev->rev, dev->linkdown_counter);
+ ep_pcie_reg_dump(dev, BIT(EP_PCIE_RES_PHY) |
+ BIT(EP_PCIE_RES_PARF), true);
+ ep_pcie_notify_event(dev, EP_PCIE_EVENT_LINKDOWN);
+ }
+
+ spin_unlock_irqrestore(&dev->isr_lock, irqsave_flags);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ep_pcie_handle_linkup_irq(int irq, void *data)
+{
+ struct ep_pcie_dev_t *dev = data;
+ unsigned long irqsave_flags;
+
+ spin_lock_irqsave(&dev->isr_lock, irqsave_flags);
+
+ dev->linkup_counter++;
+ EP_PCIE_DBG(dev,
+ "PCIe V%d: No. %ld linkup IRQ.\n",
+ dev->rev, dev->linkup_counter);
+
+ dev->link_status = EP_PCIE_LINK_UP;
+
+ spin_unlock_irqrestore(&dev->isr_lock, irqsave_flags);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ep_pcie_handle_pm_turnoff_irq(int irq, void *data)
+{
+ struct ep_pcie_dev_t *dev = data;
+ unsigned long irqsave_flags;
+
+ spin_lock_irqsave(&dev->isr_lock, irqsave_flags);
+
+ dev->pm_to_counter++;
+ EP_PCIE_DBG2(dev,
+ "PCIe V%d: No. %ld PM_TURNOFF is received.\n",
+ dev->rev, dev->pm_to_counter);
+ EP_PCIE_DBG2(dev, "PCIe V%d: Put the link into L23.\n", dev->rev);
+ ep_pcie_write_mask(dev->parf + PCIE20_PARF_PM_CTRL, 0, BIT(2));
+
+ spin_unlock_irqrestore(&dev->isr_lock, irqsave_flags);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ep_pcie_handle_dstate_change_irq(int irq, void *data)
+{
+ struct ep_pcie_dev_t *dev = data;
+ unsigned long irqsave_flags;
+ u32 dstate;
+
+ spin_lock_irqsave(&dev->isr_lock, irqsave_flags);
+
+ dstate = readl_relaxed(dev->dm_core +
+ PCIE20_CON_STATUS) & 0x3;
+
+ if (dev->dump_conf)
+ ep_pcie_reg_dump(dev, BIT(EP_PCIE_RES_DM_CORE), false);
+
+ if (dstate == 3) {
+ dev->l23_ready = true;
+ dev->d3_counter++;
+ EP_PCIE_DBG(dev,
+ "PCIe V%d: No. %ld change to D3 state.\n",
+ dev->rev, dev->d3_counter);
+ ep_pcie_write_mask(dev->parf + PCIE20_PARF_PM_CTRL, 0, BIT(1));
+ ep_pcie_notify_event(dev, EP_PCIE_EVENT_PM_D3_HOT);
+ } else if (dstate == 0) {
+ dev->l23_ready = false;
+ dev->d0_counter++;
+ EP_PCIE_DBG(dev,
+ "PCIe V%d: No. %ld change to D0 state.\n",
+ dev->rev, dev->d0_counter);
+ ep_pcie_notify_event(dev, EP_PCIE_EVENT_PM_D0);
+ } else {
+ EP_PCIE_ERR(dev,
+ "PCIe V%d:invalid D state change to 0x%x.\n",
+ dev->rev, dstate);
+ }
+
+ spin_unlock_irqrestore(&dev->isr_lock, irqsave_flags);
+
+ return IRQ_HANDLED;
+}
+
+static int ep_pcie_enumeration(struct ep_pcie_dev_t *dev)
+{
+ int ret = 0;
+
+ if (!dev) {
+ EP_PCIE_ERR(&ep_pcie_dev,
+ "PCIe V%d: the input handler is NULL.\n",
+ ep_pcie_dev.rev);
+ return EP_PCIE_ERROR;
+ }
+
+ EP_PCIE_DBG(dev,
+ "PCIe V%d: start PCIe link enumeration per host side.\n",
+ dev->rev);
+
+ ret = ep_pcie_core_enable_endpoint(EP_PCIE_OPT_ALL);
+
+ if (ret) {
+ EP_PCIE_ERR(&ep_pcie_dev,
+ "PCIe V%d: PCIe link enumeration failed.\n",
+ ep_pcie_dev.rev);
+ } else {
+ if (dev->link_status == EP_PCIE_LINK_ENABLED) {
+ EP_PCIE_INFO(&ep_pcie_dev,
+ "PCIe V%d: PCIe link enumeration is successful with host side.\n",
+ ep_pcie_dev.rev);
+ } else if (dev->link_status == EP_PCIE_LINK_UP) {
+ EP_PCIE_INFO(&ep_pcie_dev,
+ "PCIe V%d: PCIe link training is successful with host side. Waiting for enumeration to complete.\n",
+ ep_pcie_dev.rev);
+ } else {
+ EP_PCIE_ERR(&ep_pcie_dev,
+ "PCIe V%d: PCIe link is in the unexpected status: %d\n",
+ ep_pcie_dev.rev, dev->link_status);
+ }
+ }
+
+ return ret;
+}
+
+static void handle_perst_func(struct work_struct *work)
+{
+ struct ep_pcie_dev_t *dev = container_of(work, struct ep_pcie_dev_t,
+ handle_perst_work);
+
+ ep_pcie_enumeration(dev);
+}
+
+static void handle_bme_func(struct work_struct *work)
+{
+ struct ep_pcie_dev_t *dev = container_of(work,
+ struct ep_pcie_dev_t, handle_bme_work);
+
+ ep_pcie_enumeration_complete(dev);
+}
+
+static irqreturn_t ep_pcie_handle_perst_irq(int irq, void *data)
+{
+ struct ep_pcie_dev_t *dev = data;
+ unsigned long irqsave_flags;
+ u32 perst;
+
+ spin_lock_irqsave(&dev->isr_lock, irqsave_flags);
+
+ perst = gpio_get_value(dev->gpio[EP_PCIE_GPIO_PERST].num);
+
+ if (!dev->enumerated) {
+ EP_PCIE_DBG(dev,
+ "PCIe V%d: PCIe is not enumerated yet; PERST is %sasserted.\n",
+ dev->rev, perst ? "de" : "");
+ if ((!dev->perst_enum) || !perst)
+ goto out;
+ /* start work for link enumeration with the host side */
+ schedule_work(&dev->handle_perst_work);
+
+ goto out;
+ }
+
+ if (perst) {
+ dev->perst_deast = true;
+ dev->perst_deast_counter++;
+ EP_PCIE_DBG(dev,
+ "PCIe V%d: No. %ld PERST deassertion.\n",
+ dev->rev, dev->perst_deast_counter);
+ ep_pcie_notify_event(dev, EP_PCIE_EVENT_PM_RST_DEAST);
+ } else {
+ dev->perst_deast = false;
+ dev->perst_ast_counter++;
+ EP_PCIE_DBG(dev,
+ "PCIe V%d: No. %ld PERST assertion.\n",
+ dev->rev, dev->perst_ast_counter);
+ ep_pcie_notify_event(dev, EP_PCIE_EVENT_PM_D3_COLD);
+ }
+
+out:
+ spin_unlock_irqrestore(&dev->isr_lock, irqsave_flags);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ep_pcie_handle_global_irq(int irq, void *data)
+{
+ struct ep_pcie_dev_t *dev = data;
+ int i;
+ u32 status = readl_relaxed(dev->parf + PCIE20_PARF_INT_ALL_STATUS);
+ u32 mask = readl_relaxed(dev->parf + PCIE20_PARF_INT_ALL_MASK);
+
+ ep_pcie_write_mask(dev->parf + PCIE20_PARF_INT_ALL_CLEAR, 0, status);
+
+ dev->global_irq_counter++;
+ EP_PCIE_DUMP(dev,
+ "PCIe V%d: No. %ld Global IRQ %d received; status:0x%x; mask:0x%x.\n",
+ dev->rev, dev->global_irq_counter, irq, status, mask);
+ status &= mask;
+
+ for (i = 1; i <= EP_PCIE_INT_EVT_MAX; i++) {
+ if (status & BIT(i)) {
+ switch (i) {
+ case EP_PCIE_INT_EVT_LINK_DOWN:
+ EP_PCIE_DUMP(dev,
+ "PCIe V%d: handle linkdown event.\n",
+ dev->rev);
+ ep_pcie_handle_linkdown_irq(irq, data);
+ break;
+ case EP_PCIE_INT_EVT_BME:
+ EP_PCIE_DUMP(dev,
+ "PCIe V%d: handle BME event.\n",
+ dev->rev);
+ ep_pcie_handle_bme_irq(irq, data);
+ break;
+ case EP_PCIE_INT_EVT_PM_TURNOFF:
+ EP_PCIE_DUMP(dev,
+ "PCIe V%d: handle PM Turn-off event.\n",
+ dev->rev);
+ ep_pcie_handle_pm_turnoff_irq(irq, data);
+ break;
+ case EP_PCIE_INT_EVT_MHI_A7:
+ EP_PCIE_DUMP(dev,
+ "PCIe V%d: handle MHI A7 event.\n",
+ dev->rev);
+ ep_pcie_notify_event(dev, EP_PCIE_EVENT_MHI_A7);
+ break;
+ case EP_PCIE_INT_EVT_DSTATE_CHANGE:
+ EP_PCIE_DUMP(dev,
+ "PCIe V%d: handle D state chagge event.\n",
+ dev->rev);
+ ep_pcie_handle_dstate_change_irq(irq, data);
+ break;
+ case EP_PCIE_INT_EVT_LINK_UP:
+ EP_PCIE_DUMP(dev,
+ "PCIe V%d: handle linkup event.\n",
+ dev->rev);
+ ep_pcie_handle_linkup_irq(irq, data);
+ break;
+ default:
+ EP_PCIE_ERR(dev,
+ "PCIe V%d: Unexpected event %d is caught!\n",
+ dev->rev, i);
+ }
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+int32_t ep_pcie_irq_init(struct ep_pcie_dev_t *dev)
+{
+ int ret;
+ struct device *pdev = &dev->pdev->dev;
+ u32 perst_irq;
+
+ EP_PCIE_DBG(dev, "PCIe V%d\n", dev->rev);
+
+ /* Initialize all works to be performed before registering for IRQs*/
+ INIT_WORK(&dev->handle_perst_work, handle_perst_func);
+ INIT_WORK(&dev->handle_bme_work, handle_bme_func);
+
+ if (dev->aggregated_irq) {
+ ret = devm_request_irq(pdev,
+ dev->irq[EP_PCIE_INT_GLOBAL].num,
+ ep_pcie_handle_global_irq,
+ IRQF_TRIGGER_HIGH, dev->irq[EP_PCIE_INT_GLOBAL].name,
+ dev);
+ if (ret) {
+ EP_PCIE_ERR(dev,
+ "PCIe V%d: Unable to request global interrupt %d\n",
+ dev->rev, dev->irq[EP_PCIE_INT_GLOBAL].num);
+ return ret;
+ }
+
+ ret = enable_irq_wake(dev->irq[EP_PCIE_INT_GLOBAL].num);
+ if (ret) {
+ EP_PCIE_ERR(dev,
+ "PCIe V%d: Unable to enable wake for Global interrupt\n",
+ dev->rev);
+ return ret;
+ }
+
+ EP_PCIE_DBG(dev,
+ "PCIe V%d: request global interrupt %d\n",
+ dev->rev, dev->irq[EP_PCIE_INT_GLOBAL].num);
+ goto perst_irq;
+ }
+
+ /* register handler for BME interrupt */
+ ret = devm_request_irq(pdev,
+ dev->irq[EP_PCIE_INT_BME].num,
+ ep_pcie_handle_bme_irq,
+ IRQF_TRIGGER_RISING, dev->irq[EP_PCIE_INT_BME].name,
+ dev);
+ if (ret) {
+ EP_PCIE_ERR(dev,
+ "PCIe V%d: Unable to request BME interrupt %d\n",
+ dev->rev, dev->irq[EP_PCIE_INT_BME].num);
+ return ret;
+ }
+
+ ret = enable_irq_wake(dev->irq[EP_PCIE_INT_BME].num);
+ if (ret) {
+ EP_PCIE_ERR(dev,
+ "PCIe V%d: Unable to enable wake for BME interrupt\n",
+ dev->rev);
+ return ret;
+ }
+
+ /* register handler for linkdown interrupt */
+ ret = devm_request_irq(pdev,
+ dev->irq[EP_PCIE_INT_LINK_DOWN].num,
+ ep_pcie_handle_linkdown_irq,
+ IRQF_TRIGGER_RISING, dev->irq[EP_PCIE_INT_LINK_DOWN].name,
+ dev);
+ if (ret) {
+ EP_PCIE_ERR(dev,
+ "PCIe V%d: Unable to request linkdown interrupt %d\n",
+ dev->rev, dev->irq[EP_PCIE_INT_LINK_DOWN].num);
+ return ret;
+ }
+
+ /* register handler for linkup interrupt */
+ ret = devm_request_irq(pdev,
+ dev->irq[EP_PCIE_INT_LINK_UP].num, ep_pcie_handle_linkup_irq,
+ IRQF_TRIGGER_RISING, dev->irq[EP_PCIE_INT_LINK_UP].name,
+ dev);
+ if (ret) {
+ EP_PCIE_ERR(dev,
+ "PCIe V%d: Unable to request linkup interrupt %d\n",
+ dev->rev, dev->irq[EP_PCIE_INT_LINK_UP].num);
+ return ret;
+ }
+
+ /* register handler for PM_TURNOFF interrupt */
+ ret = devm_request_irq(pdev,
+ dev->irq[EP_PCIE_INT_PM_TURNOFF].num,
+ ep_pcie_handle_pm_turnoff_irq,
+ IRQF_TRIGGER_RISING, dev->irq[EP_PCIE_INT_PM_TURNOFF].name,
+ dev);
+ if (ret) {
+ EP_PCIE_ERR(dev,
+ "PCIe V%d: Unable to request PM_TURNOFF interrupt %d\n",
+ dev->rev, dev->irq[EP_PCIE_INT_PM_TURNOFF].num);
+ return ret;
+ }
+
+ /* register handler for D state change interrupt */
+ ret = devm_request_irq(pdev,
+ dev->irq[EP_PCIE_INT_DSTATE_CHANGE].num,
+ ep_pcie_handle_dstate_change_irq,
+ IRQF_TRIGGER_RISING, dev->irq[EP_PCIE_INT_DSTATE_CHANGE].name,
+ dev);
+ if (ret) {
+ EP_PCIE_ERR(dev,
+ "PCIe V%d: Unable to request D state change interrupt %d\n",
+ dev->rev, dev->irq[EP_PCIE_INT_DSTATE_CHANGE].num);
+ return ret;
+ }
+
+perst_irq:
+ /* register handler for PERST interrupt */
+ perst_irq = gpio_to_irq(dev->gpio[EP_PCIE_GPIO_PERST].num);
+ ret = devm_request_irq(pdev, perst_irq,
+ ep_pcie_handle_perst_irq,
+ IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
+ "ep_pcie_perst", dev);
+ if (ret) {
+ EP_PCIE_ERR(dev,
+ "PCIe V%d: Unable to request PERST interrupt %d\n",
+ dev->rev, perst_irq);
+ return ret;
+ }
+
+ ret = enable_irq_wake(perst_irq);
+ if (ret) {
+ EP_PCIE_ERR(dev,
+ "PCIe V%d: Unable to enable PERST interrupt %d\n",
+ dev->rev, perst_irq);
+ return ret;
+ }
+
+ return 0;
+}
+
+void ep_pcie_irq_deinit(struct ep_pcie_dev_t *dev)
+{
+ EP_PCIE_DBG(dev, "PCIe V%d\n", dev->rev);
+
+ disable_irq(gpio_to_irq(dev->gpio[EP_PCIE_GPIO_PERST].num));
+}
+
+int ep_pcie_core_register_event(struct ep_pcie_register_event *reg)
+{
+ if (!reg) {
+ EP_PCIE_ERR(&ep_pcie_dev,
+ "PCIe V%d: Event registration is NULL\n",
+ ep_pcie_dev.rev);
+ return -ENODEV;
+ }
+
+ if (!reg->user) {
+ EP_PCIE_ERR(&ep_pcie_dev,
+ "PCIe V%d: User of event registration is NULL\n",
+ ep_pcie_dev.rev);
+ return -ENODEV;
+ }
+
+ ep_pcie_dev.event_reg = reg;
+ EP_PCIE_DBG(&ep_pcie_dev,
+ "PCIe V%d: Event 0x%x is registered\n",
+ ep_pcie_dev.rev, reg->events);
+
+ return 0;
+}
+
+int ep_pcie_core_deregister_event(void)
+{
+ if (ep_pcie_dev.event_reg) {
+ EP_PCIE_DBG(&ep_pcie_dev,
+ "PCIe V%d: current registered events:0x%x; events are deregistered.\n",
+ ep_pcie_dev.rev, ep_pcie_dev.event_reg->events);
+ ep_pcie_dev.event_reg = NULL;
+ } else {
+ EP_PCIE_ERR(&ep_pcie_dev,
+ "PCIe V%d: Event registration is NULL\n",
+ ep_pcie_dev.rev);
+ }
+
+ return 0;
+}
+
+enum ep_pcie_link_status ep_pcie_core_get_linkstatus(void)
+{
+ struct ep_pcie_dev_t *dev = &ep_pcie_dev;
+ u32 bme;
+
+ if (!dev->power_on || (dev->link_status == EP_PCIE_LINK_DISABLED)) {
+ EP_PCIE_DBG(dev,
+ "PCIe V%d: PCIe endpoint is not powered on.\n",
+ dev->rev);
+ return EP_PCIE_LINK_DISABLED;
+ }
+
+ bme = readl_relaxed(dev->dm_core +
+ PCIE20_COMMAND_STATUS) & BIT(2);
+ if (bme) {
+ EP_PCIE_DBG(dev,
+ "PCIe V%d: PCIe link is up and BME is enabled; current SW link status:%d.\n",
+ dev->rev, dev->link_status);
+ dev->link_status = EP_PCIE_LINK_ENABLED;
+ } else {
+ EP_PCIE_DBG(dev,
+ "PCIe V%d: PCIe link is up but BME is disabled; current SW link status:%d.\n",
+ dev->rev, dev->link_status);
+ dev->link_status = EP_PCIE_LINK_UP;
+ }
+ return dev->link_status;
+}
+
+int ep_pcie_core_config_outbound_iatu(struct ep_pcie_iatu entries[],
+ u32 num_entries)
+{
+ u32 data_start = 0;
+ u32 data_end = 0;
+ u32 data_tgt_lower = 0;
+ u32 data_tgt_upper = 0;
+ u32 ctrl_start = 0;
+ u32 ctrl_end = 0;
+ u32 ctrl_tgt_lower = 0;
+ u32 ctrl_tgt_upper = 0;
+ u32 upper = 0;
+ bool once = true;
+
+ if (ep_pcie_dev.active_config) {
+ upper = EP_PCIE_OATU_UPPER;
+ if (once) {
+ once = false;
+ EP_PCIE_DBG2(&ep_pcie_dev,
+ "PCIe V%d: No outbound iATU config is needed since active config is enabled.\n",
+ ep_pcie_dev.rev);
+ }
+ }
+
+ if ((num_entries > MAX_IATU_ENTRY_NUM) || !num_entries) {
+ EP_PCIE_ERR(&ep_pcie_dev,
+ "PCIe V%d: Wrong iATU entry number %d.\n",
+ ep_pcie_dev.rev, num_entries);
+ return EP_PCIE_ERROR;
+ }
+
+ data_start = entries[0].start;
+ data_end = entries[0].end;
+ data_tgt_lower = entries[0].tgt_lower;
+ data_tgt_upper = entries[0].tgt_upper;
+
+ if (num_entries > 1) {
+ ctrl_start = entries[1].start;
+ ctrl_end = entries[1].end;
+ ctrl_tgt_lower = entries[1].tgt_lower;
+ ctrl_tgt_upper = entries[1].tgt_upper;
+ }
+
+ EP_PCIE_DBG(&ep_pcie_dev,
+ "PCIe V%d: data_start:0x%x; data_end:0x%x; data_tgt_lower:0x%x; data_tgt_upper:0x%x; ctrl_start:0x%x; ctrl_end:0x%x; ctrl_tgt_lower:0x%x; ctrl_tgt_upper:0x%x.\n",
+ ep_pcie_dev.rev, data_start, data_end, data_tgt_lower,
+ data_tgt_upper, ctrl_start, ctrl_end, ctrl_tgt_lower,
+ ctrl_tgt_upper);
+
+
+ if ((ctrl_end < data_start) || (data_end < ctrl_start)) {
+ EP_PCIE_DBG(&ep_pcie_dev,
+ "PCIe V%d: iATU configuration case No. 1: detached.\n",
+ ep_pcie_dev.rev);
+ ep_pcie_config_outbound_iatu_entry(&ep_pcie_dev,
+ EP_PCIE_OATU_INDEX_DATA,
+ data_start, upper, data_end,
+ data_tgt_lower, data_tgt_upper);
+ ep_pcie_config_outbound_iatu_entry(&ep_pcie_dev,
+ EP_PCIE_OATU_INDEX_CTRL,
+ ctrl_start, upper, ctrl_end,
+ ctrl_tgt_lower, ctrl_tgt_upper);
+ } else if ((data_start <= ctrl_start) && (ctrl_end <= data_end)) {
+ EP_PCIE_DBG(&ep_pcie_dev,
+ "PCIe V%d: iATU configuration case No. 2: included.\n",
+ ep_pcie_dev.rev);
+ ep_pcie_config_outbound_iatu_entry(&ep_pcie_dev,
+ EP_PCIE_OATU_INDEX_DATA,
+ data_start, upper, data_end,
+ data_tgt_lower, data_tgt_upper);
+ } else {
+ EP_PCIE_DBG(&ep_pcie_dev,
+ "PCIe V%d: iATU configuration case No. 3: overlap.\n",
+ ep_pcie_dev.rev);
+ ep_pcie_config_outbound_iatu_entry(&ep_pcie_dev,
+ EP_PCIE_OATU_INDEX_CTRL,
+ ctrl_start, upper, ctrl_end,
+ ctrl_tgt_lower, ctrl_tgt_upper);
+ ep_pcie_config_outbound_iatu_entry(&ep_pcie_dev,
+ EP_PCIE_OATU_INDEX_DATA,
+ data_start, upper, data_end,
+ data_tgt_lower, data_tgt_upper);
+ }
+
+ return 0;
+}
+
+int ep_pcie_core_get_msi_config(struct ep_pcie_msi_config *cfg)
+{
+ u32 cap, lower, upper, data, ctrl_reg;
+ static u32 changes;
+
+ if (ep_pcie_dev.link_status == EP_PCIE_LINK_DISABLED) {
+ EP_PCIE_ERR(&ep_pcie_dev,
+ "PCIe V%d: PCIe link is currently disabled.\n",
+ ep_pcie_dev.rev);
+ return EP_PCIE_ERROR;
+ }
+
+ cap = readl_relaxed(ep_pcie_dev.dm_core + PCIE20_MSI_CAP_ID_NEXT_CTRL);
+ EP_PCIE_DBG(&ep_pcie_dev, "PCIe V%d: MSI CAP:0x%x\n",
+ ep_pcie_dev.rev, cap);
+
+ if (!(cap & BIT(16))) {
+ EP_PCIE_ERR(&ep_pcie_dev,
+ "PCIe V%d: MSI is not enabled yet.\n",
+ ep_pcie_dev.rev);
+ return EP_PCIE_ERROR;
+ }
+
+ lower = readl_relaxed(ep_pcie_dev.dm_core + PCIE20_MSI_LOWER);
+ upper = readl_relaxed(ep_pcie_dev.dm_core + PCIE20_MSI_UPPER);
+ data = readl_relaxed(ep_pcie_dev.dm_core + PCIE20_MSI_DATA);
+ ctrl_reg = readl_relaxed(ep_pcie_dev.dm_core +
+ PCIE20_MSI_CAP_ID_NEXT_CTRL);
+
+ EP_PCIE_DBG(&ep_pcie_dev,
+ "PCIe V%d: MSI info: lower:0x%x; upper:0x%x; data:0x%x.\n",
+ ep_pcie_dev.rev, lower, upper, data);
+
+ if (ctrl_reg & BIT(16)) {
+ struct resource *msi =
+ ep_pcie_dev.res[EP_PCIE_RES_MSI].resource;
+ if (ep_pcie_dev.active_config)
+ ep_pcie_config_outbound_iatu_entry(&ep_pcie_dev,
+ EP_PCIE_OATU_INDEX_MSI,
+ msi->start, EP_PCIE_OATU_UPPER,
+ msi->end, lower, upper);
+ else
+ ep_pcie_config_outbound_iatu_entry(&ep_pcie_dev,
+ EP_PCIE_OATU_INDEX_MSI,
+ msi->start, 0, msi->end,
+ lower, upper);
+
+ if (ep_pcie_dev.active_config) {
+ cfg->lower = lower;
+ cfg->upper = upper;
+ } else {
+ cfg->lower = msi->start + (lower & 0xfff);
+ cfg->upper = 0;
+ }
+ cfg->data = data;
+ cfg->msg_num = (cap >> 20) & 0x7;
+ if ((lower != ep_pcie_dev.msi_cfg.lower)
+ || (upper != ep_pcie_dev.msi_cfg.upper)
+ || (data != ep_pcie_dev.msi_cfg.data)
+ || (cfg->msg_num != ep_pcie_dev.msi_cfg.msg_num)) {
+ changes++;
+ EP_PCIE_DBG(&ep_pcie_dev,
+ "PCIe V%d: MSI config has been changed by host side for %d time(s).\n",
+ ep_pcie_dev.rev, changes);
+ EP_PCIE_DBG(&ep_pcie_dev,
+ "PCIe V%d: old MSI cfg: lower:0x%x; upper:0x%x; data:0x%x; msg_num:0x%x.\n",
+ ep_pcie_dev.rev, ep_pcie_dev.msi_cfg.lower,
+ ep_pcie_dev.msi_cfg.upper,
+ ep_pcie_dev.msi_cfg.data,
+ ep_pcie_dev.msi_cfg.msg_num);
+ ep_pcie_dev.msi_cfg.lower = lower;
+ ep_pcie_dev.msi_cfg.upper = upper;
+ ep_pcie_dev.msi_cfg.data = data;
+ ep_pcie_dev.msi_cfg.msg_num = cfg->msg_num;
+ }
+ return 0;
+ }
+
+ EP_PCIE_ERR(&ep_pcie_dev,
+ "PCIe V%d: Wrong MSI info found when MSI is enabled: lower:0x%x; data:0x%x.\n",
+ ep_pcie_dev.rev, lower, data);
+ return EP_PCIE_ERROR;
+}
+
+int ep_pcie_core_trigger_msi(u32 idx)
+{
+ u32 addr, data, ctrl_reg;
+ int max_poll = MSI_EXIT_L1SS_WAIT_MAX_COUNT;
+
+ if (ep_pcie_dev.link_status == EP_PCIE_LINK_DISABLED) {
+ EP_PCIE_ERR(&ep_pcie_dev,
+ "PCIe V%d: PCIe link is currently disabled.\n",
+ ep_pcie_dev.rev);
+ return EP_PCIE_ERROR;
+ }
+
+ addr = readl_relaxed(ep_pcie_dev.dm_core + PCIE20_MSI_LOWER);
+ data = readl_relaxed(ep_pcie_dev.dm_core + PCIE20_MSI_DATA);
+ ctrl_reg = readl_relaxed(ep_pcie_dev.dm_core +
+ PCIE20_MSI_CAP_ID_NEXT_CTRL);
+
+ if (ctrl_reg & BIT(16)) {
+ ep_pcie_dev.msi_counter++;
+ EP_PCIE_DUMP(&ep_pcie_dev,
+ "PCIe V%d: No. %ld MSI fired for IRQ %d; index from client:%d; active-config is %s enabled.\n",
+ ep_pcie_dev.rev, ep_pcie_dev.msi_counter,
+ data + idx, idx,
+ ep_pcie_dev.active_config ? "" : "not");
+
+ if (ep_pcie_dev.active_config) {
+ u32 status;
+
+ if (ep_pcie_dev.msi_counter % 2) {
+ EP_PCIE_DBG2(&ep_pcie_dev,
+ "PCIe V%d: try to trigger MSI by PARF_MSI_GEN.\n",
+ ep_pcie_dev.rev);
+ ep_pcie_write_reg(ep_pcie_dev.parf,
+ PCIE20_PARF_MSI_GEN, idx);
+ status = readl_relaxed(ep_pcie_dev.parf +
+ PCIE20_PARF_LTR_MSI_EXIT_L1SS);
+ while ((status & BIT(1)) && (max_poll-- > 0)) {
+ udelay(MSI_EXIT_L1SS_WAIT);
+ status = readl_relaxed(ep_pcie_dev.parf
+ +
+ PCIE20_PARF_LTR_MSI_EXIT_L1SS);
+ }
+ if (max_poll == 0)
+ EP_PCIE_DBG2(&ep_pcie_dev,
+ "PCIe V%d: MSI_EXIT_L1SS is not cleared yet.\n",
+ ep_pcie_dev.rev);
+ else
+ EP_PCIE_DBG2(&ep_pcie_dev,
+ "PCIe V%d: MSI_EXIT_L1SS has been cleared.\n",
+ ep_pcie_dev.rev);
+ } else {
+ EP_PCIE_DBG2(&ep_pcie_dev,
+ "PCIe V%d: try to trigger MSI by direct address write as well.\n",
+ ep_pcie_dev.rev);
+ ep_pcie_write_reg(ep_pcie_dev.msi, addr & 0xfff,
+ data + idx);
+ }
+ } else {
+ ep_pcie_write_reg(ep_pcie_dev.msi, addr & 0xfff, data
+ + idx);
+ }
+ return 0;
+ }
+
+ EP_PCIE_ERR(&ep_pcie_dev,
+ "PCIe V%d: MSI is not enabled yet. MSI addr:0x%x; data:0x%x; index from client:%d.\n",
+ ep_pcie_dev.rev, addr, data, idx);
+ return EP_PCIE_ERROR;
+}
+
+int ep_pcie_core_wakeup_host(void)
+{
+ struct ep_pcie_dev_t *dev = &ep_pcie_dev;
+
+ if (dev->perst_deast && !dev->l23_ready) {
+ EP_PCIE_ERR(dev,
+ "PCIe V%d: request to assert WAKE# when PERST is de-asserted and D3hot is not received.\n",
+ dev->rev);
+ return EP_PCIE_ERROR;
+ }
+
+ dev->wake_counter++;
+ EP_PCIE_DBG(dev,
+ "PCIe V%d: No. %ld to assert PCIe WAKE#; perst is %s de-asserted; D3hot is %s received.\n",
+ dev->rev, dev->wake_counter,
+ dev->perst_deast ? "" : "not",
+ dev->l23_ready ? "" : "not");
+ gpio_set_value(dev->gpio[EP_PCIE_GPIO_WAKE].num,
+ 1 - dev->gpio[EP_PCIE_GPIO_WAKE].on);
+ gpio_set_value(dev->gpio[EP_PCIE_GPIO_WAKE].num,
+ dev->gpio[EP_PCIE_GPIO_WAKE].on);
+ return 0;
+}
+
+int ep_pcie_core_config_db_routing(struct ep_pcie_db_config chdb_cfg,
+ struct ep_pcie_db_config erdb_cfg)
+{
+ u32 dbs = (erdb_cfg.end << 24) | (erdb_cfg.base << 16) |
+ (chdb_cfg.end << 8) | chdb_cfg.base;
+
+ ep_pcie_write_reg(ep_pcie_dev.parf, PCIE20_PARF_MHI_IPA_DBS, dbs);
+ ep_pcie_write_reg(ep_pcie_dev.parf,
+ PCIE20_PARF_MHI_IPA_CDB_TARGET_LOWER,
+ chdb_cfg.tgt_addr);
+ ep_pcie_write_reg(ep_pcie_dev.parf,
+ PCIE20_PARF_MHI_IPA_EDB_TARGET_LOWER,
+ erdb_cfg.tgt_addr);
+
+ EP_PCIE_DBG(&ep_pcie_dev,
+ "PCIe V%d: DB routing info: chdb_cfg.base:0x%x; chdb_cfg.end:0x%x; erdb_cfg.base:0x%x; erdb_cfg.end:0x%x; chdb_cfg.tgt_addr:0x%x; erdb_cfg.tgt_addr:0x%x.\n",
+ ep_pcie_dev.rev, chdb_cfg.base, chdb_cfg.end, erdb_cfg.base,
+ erdb_cfg.end, chdb_cfg.tgt_addr, erdb_cfg.tgt_addr);
+
+ return 0;
+}
+
+struct ep_pcie_hw hw_drv = {
+ .register_event = ep_pcie_core_register_event,
+ .deregister_event = ep_pcie_core_deregister_event,
+ .get_linkstatus = ep_pcie_core_get_linkstatus,
+ .config_outbound_iatu = ep_pcie_core_config_outbound_iatu,
+ .get_msi_config = ep_pcie_core_get_msi_config,
+ .trigger_msi = ep_pcie_core_trigger_msi,
+ .wakeup_host = ep_pcie_core_wakeup_host,
+ .config_db_routing = ep_pcie_core_config_db_routing,
+ .enable_endpoint = ep_pcie_core_enable_endpoint,
+ .disable_endpoint = ep_pcie_core_disable_endpoint,
+ .mask_irq_event = ep_pcie_core_mask_irq_event,
+};
+
+static int ep_pcie_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ pr_debug("%s\n", __func__);
+
+ ep_pcie_dev.link_speed = 1;
+ ret = of_property_read_u32((&pdev->dev)->of_node,
+ "qcom,pcie-link-speed",
+ &ep_pcie_dev.link_speed);
+ if (ret)
+ EP_PCIE_DBG(&ep_pcie_dev,
+ "PCIe V%d: pcie-link-speed does not exist.\n",
+ ep_pcie_dev.rev);
+ else
+ EP_PCIE_DBG(&ep_pcie_dev, "PCIe V%d: pcie-link-speed:%d.\n",
+ ep_pcie_dev.rev, ep_pcie_dev.link_speed);
+
+ ret = of_property_read_u32((&pdev->dev)->of_node,
+ "qcom,dbi-base-reg",
+ &ep_pcie_dev.dbi_base_reg);
+ if (ret)
+ EP_PCIE_DBG(&ep_pcie_dev,
+ "PCIe V%d: dbi-base-reg does not exist.\n",
+ ep_pcie_dev.rev);
+ else
+ EP_PCIE_DBG(&ep_pcie_dev, "PCIe V%d: dbi-base-reg:0x%x.\n",
+ ep_pcie_dev.rev, ep_pcie_dev.dbi_base_reg);
+
+ ret = of_property_read_u32((&pdev->dev)->of_node,
+ "qcom,slv-space-reg",
+ &ep_pcie_dev.slv_space_reg);
+ if (ret)
+ EP_PCIE_DBG(&ep_pcie_dev,
+ "PCIe V%d: slv-space-reg does not exist.\n",
+ ep_pcie_dev.rev);
+ else
+ EP_PCIE_DBG(&ep_pcie_dev, "PCIe V%d: slv-space-reg:0x%x.\n",
+ ep_pcie_dev.rev, ep_pcie_dev.slv_space_reg);
+
+ ret = of_property_read_u32((&pdev->dev)->of_node,
+ "qcom,phy-status-reg",
+ &ep_pcie_dev.phy_status_reg);
+ if (ret)
+ EP_PCIE_DBG(&ep_pcie_dev,
+ "PCIe V%d: phy-status-reg does not exist.\n",
+ ep_pcie_dev.rev);
+ else
+ EP_PCIE_DBG(&ep_pcie_dev, "PCIe V%d: phy-status-reg:0x%x.\n",
+ ep_pcie_dev.rev, ep_pcie_dev.phy_status_reg);
+
+ ep_pcie_dev.phy_rev = 1;
+ ret = of_property_read_u32((&pdev->dev)->of_node,
+ "qcom,pcie-phy-ver",
+ &ep_pcie_dev.phy_rev);
+ if (ret)
+ EP_PCIE_DBG(&ep_pcie_dev,
+ "PCIe V%d: pcie-phy-ver does not exist.\n",
+ ep_pcie_dev.rev);
+ else
+ EP_PCIE_DBG(&ep_pcie_dev, "PCIe V%d: pcie-phy-ver:%d.\n",
+ ep_pcie_dev.rev, ep_pcie_dev.phy_rev);
+
+ ep_pcie_dev.active_config = of_property_read_bool((&pdev->dev)->of_node,
+ "qcom,pcie-active-config");
+ EP_PCIE_DBG(&ep_pcie_dev,
+ "PCIe V%d: active config is %s enabled.\n",
+ ep_pcie_dev.rev, ep_pcie_dev.active_config ? "" : "not");
+
+ ep_pcie_dev.aggregated_irq =
+ of_property_read_bool((&pdev->dev)->of_node,
+ "qcom,pcie-aggregated-irq");
+ EP_PCIE_DBG(&ep_pcie_dev,
+ "PCIe V%d: aggregated IRQ is %s enabled.\n",
+ ep_pcie_dev.rev, ep_pcie_dev.aggregated_irq ? "" : "not");
+
+ ep_pcie_dev.mhi_a7_irq =
+ of_property_read_bool((&pdev->dev)->of_node,
+ "qcom,pcie-mhi-a7-irq");
+ EP_PCIE_DBG(&ep_pcie_dev,
+ "PCIe V%d: Mhi a7 IRQ is %s enabled.\n",
+ ep_pcie_dev.rev, ep_pcie_dev.mhi_a7_irq ? "" : "not");
+
+ ep_pcie_dev.perst_enum = of_property_read_bool((&pdev->dev)->of_node,
+ "qcom,pcie-perst-enum");
+ EP_PCIE_DBG(&ep_pcie_dev,
+ "PCIe V%d: enum by PERST is %s enabled.\n",
+ ep_pcie_dev.rev, ep_pcie_dev.perst_enum ? "" : "not");
+
+ ep_pcie_dev.rev = 1711211;
+ ep_pcie_dev.pdev = pdev;
+ memcpy(ep_pcie_dev.vreg, ep_pcie_vreg_info,
+ sizeof(ep_pcie_vreg_info));
+ memcpy(ep_pcie_dev.gpio, ep_pcie_gpio_info,
+ sizeof(ep_pcie_gpio_info));
+ memcpy(ep_pcie_dev.clk, ep_pcie_clk_info,
+ sizeof(ep_pcie_clk_info));
+ memcpy(ep_pcie_dev.pipeclk, ep_pcie_pipe_clk_info,
+ sizeof(ep_pcie_pipe_clk_info));
+ memcpy(ep_pcie_dev.reset, ep_pcie_reset_info,
+ sizeof(ep_pcie_reset_info));
+ memcpy(ep_pcie_dev.res, ep_pcie_res_info,
+ sizeof(ep_pcie_res_info));
+ memcpy(ep_pcie_dev.irq, ep_pcie_irq_info,
+ sizeof(ep_pcie_irq_info));
+
+ ret = ep_pcie_get_resources(&ep_pcie_dev,
+ ep_pcie_dev.pdev);
+ if (ret) {
+ EP_PCIE_ERR(&ep_pcie_dev,
+ "PCIe V%d: failed to get resources.\n",
+ ep_pcie_dev.rev);
+ goto res_failure;
+ }
+
+ ret = ep_pcie_gpio_init(&ep_pcie_dev);
+ if (ret) {
+ EP_PCIE_ERR(&ep_pcie_dev,
+ "PCIe V%d: failed to init GPIO.\n",
+ ep_pcie_dev.rev);
+ ep_pcie_release_resources(&ep_pcie_dev);
+ goto gpio_failure;
+ }
+
+ ret = ep_pcie_irq_init(&ep_pcie_dev);
+ if (ret) {
+ EP_PCIE_ERR(&ep_pcie_dev,
+ "PCIe V%d: failed to init IRQ.\n",
+ ep_pcie_dev.rev);
+ ep_pcie_release_resources(&ep_pcie_dev);
+ ep_pcie_gpio_deinit(&ep_pcie_dev);
+ goto irq_failure;
+ }
+
+ if (ep_pcie_dev.perst_enum &&
+ !gpio_get_value(ep_pcie_dev.gpio[EP_PCIE_GPIO_PERST].num)) {
+ EP_PCIE_DBG2(&ep_pcie_dev,
+ "PCIe V%d: %s probe is done; link will be trained when PERST is deasserted.\n",
+ ep_pcie_dev.rev, dev_name(&(pdev->dev)));
+ return 0;
+ }
+
+ EP_PCIE_DBG(&ep_pcie_dev,
+ "PCIe V%d: %s got resources successfully; start turning on the link.\n",
+ ep_pcie_dev.rev, dev_name(&(pdev->dev)));
+
+ ret = ep_pcie_enumeration(&ep_pcie_dev);
+
+ if (!ret || ep_pcie_debug_keep_resource)
+ return 0;
+
+ ep_pcie_irq_deinit(&ep_pcie_dev);
+irq_failure:
+ ep_pcie_gpio_deinit(&ep_pcie_dev);
+gpio_failure:
+ ep_pcie_release_resources(&ep_pcie_dev);
+res_failure:
+ EP_PCIE_ERR(&ep_pcie_dev, "PCIe V%d: Driver probe failed:%d\n",
+ ep_pcie_dev.rev, ret);
+
+ return ret;
+}
+
+static int __exit ep_pcie_remove(struct platform_device *pdev)
+{
+ pr_debug("%s\n", __func__);
+
+ ep_pcie_irq_deinit(&ep_pcie_dev);
+ ep_pcie_vreg_deinit(&ep_pcie_dev);
+ ep_pcie_pipe_clk_deinit(&ep_pcie_dev);
+ ep_pcie_clk_deinit(&ep_pcie_dev);
+ ep_pcie_gpio_deinit(&ep_pcie_dev);
+ ep_pcie_release_resources(&ep_pcie_dev);
+ ep_pcie_deregister_drv(&hw_drv);
+
+ return 0;
+}
+
+static const struct of_device_id ep_pcie_match[] = {
+ { .compatible = "qcom,pcie-ep",
+ },
+ {}
+};
+
+static struct platform_driver ep_pcie_driver = {
+ .probe = ep_pcie_probe,
+ .remove = ep_pcie_remove,
+ .driver = {
+ .name = "pcie-ep",
+ .owner = THIS_MODULE,
+ .of_match_table = ep_pcie_match,
+ },
+};
+
+static int __init ep_pcie_init(void)
+{
+ int ret;
+ char logname[MAX_NAME_LEN];
+
+ pr_debug("%s\n", __func__);
+
+ snprintf(logname, MAX_NAME_LEN, "ep-pcie-long");
+ ep_pcie_dev.ipc_log_sel =
+ ipc_log_context_create(EP_PCIE_LOG_PAGES, logname, 0);
+ if (ep_pcie_dev.ipc_log_sel == NULL)
+ pr_err("%s: unable to create IPC selected log for %s\n",
+ __func__, logname);
+ else
+ EP_PCIE_DBG(&ep_pcie_dev,
+ "PCIe V%d: IPC selected logging is enable for %s\n",
+ ep_pcie_dev.rev, logname);
+
+ snprintf(logname, MAX_NAME_LEN, "ep-pcie-short");
+ ep_pcie_dev.ipc_log_ful =
+ ipc_log_context_create(EP_PCIE_LOG_PAGES * 2, logname, 0);
+ if (ep_pcie_dev.ipc_log_ful == NULL)
+ pr_err("%s: unable to create IPC detailed log for %s\n",
+ __func__, logname);
+ else
+ EP_PCIE_DBG(&ep_pcie_dev,
+ "PCIe V%d: IPC detailed logging is enable for %s\n",
+ ep_pcie_dev.rev, logname);
+
+ snprintf(logname, MAX_NAME_LEN, "ep-pcie-dump");
+ ep_pcie_dev.ipc_log_dump =
+ ipc_log_context_create(EP_PCIE_LOG_PAGES, logname, 0);
+ if (ep_pcie_dev.ipc_log_dump == NULL)
+ pr_err("%s: unable to create IPC dump log for %s\n",
+ __func__, logname);
+ else
+ EP_PCIE_DBG(&ep_pcie_dev,
+ "PCIe V%d: IPC dump logging is enable for %s\n",
+ ep_pcie_dev.rev, logname);
+
+ mutex_init(&ep_pcie_dev.setup_mtx);
+ mutex_init(&ep_pcie_dev.ext_mtx);
+ spin_lock_init(&ep_pcie_dev.ext_lock);
+ spin_lock_init(&ep_pcie_dev.isr_lock);
+
+ ep_pcie_debugfs_init(&ep_pcie_dev);
+
+ ret = platform_driver_register(&ep_pcie_driver);
+
+ if (ret)
+ EP_PCIE_ERR(&ep_pcie_dev,
+ "PCIe V%d: failed register platform driver:%d\n",
+ ep_pcie_dev.rev, ret);
+ else
+ EP_PCIE_DBG(&ep_pcie_dev,
+ "PCIe V%d: platform driver is registered.\n",
+ ep_pcie_dev.rev);
+
+ return ret;
+}
+
+static void __exit ep_pcie_exit(void)
+{
+ pr_debug("%s\n", __func__);
+
+ ipc_log_context_destroy(ep_pcie_dev.ipc_log_sel);
+ ipc_log_context_destroy(ep_pcie_dev.ipc_log_ful);
+ ipc_log_context_destroy(ep_pcie_dev.ipc_log_dump);
+
+ ep_pcie_debugfs_exit();
+
+ platform_driver_unregister(&ep_pcie_driver);
+}
+
+module_init(ep_pcie_init);
+module_exit(ep_pcie_exit);
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM PCIe Endpoint Driver");
diff --git a/drivers/platform/msm/ep_pcie/ep_pcie_dbg.c b/drivers/platform/msm/ep_pcie/ep_pcie_dbg.c
new file mode 100644
index 0000000..1f09a88
--- /dev/null
+++ b/drivers/platform/msm/ep_pcie/ep_pcie_dbg.c
@@ -0,0 +1,459 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Debugging enhancement in MSM PCIe endpoint driver.
+ */
+
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/debugfs.h>
+#include "ep_pcie_com.h"
+#include "ep_pcie_phy.h"
+
+static struct dentry *dent_ep_pcie;
+static struct dentry *dfile_case;
+static struct ep_pcie_dev_t *dev;
+
+static void ep_ep_pcie_phy_dump_pcs_debug_bus(struct ep_pcie_dev_t *dev,
+ u32 cntrl4, u32 cntrl5,
+ u32 cntrl6, u32 cntrl7)
+{
+ ep_pcie_write_reg(dev->phy, PCIE_PHY_TEST_CONTROL4, cntrl4);
+ ep_pcie_write_reg(dev->phy, PCIE_PHY_TEST_CONTROL5, cntrl5);
+ ep_pcie_write_reg(dev->phy, PCIE_PHY_TEST_CONTROL6, cntrl6);
+ ep_pcie_write_reg(dev->phy, PCIE_PHY_TEST_CONTROL7, cntrl7);
+
+ if (!cntrl4 && !cntrl5 && !cntrl6 && !cntrl7) {
+ EP_PCIE_DUMP(dev,
+ "PCIe V%d: zero out test control registers.\n\n",
+ dev->rev);
+ return;
+ }
+
+ EP_PCIE_DUMP(dev,
+ "PCIe V%d: PCIE_PHY_TEST_CONTROL4: 0x%x\n", dev->rev,
+ readl_relaxed(dev->phy + PCIE_PHY_TEST_CONTROL4));
+ EP_PCIE_DUMP(dev,
+ "PCIe V%d: PCIE_PHY_TEST_CONTROL5: 0x%x\n", dev->rev,
+ readl_relaxed(dev->phy + PCIE_PHY_TEST_CONTROL5));
+ EP_PCIE_DUMP(dev,
+ "PCIe V%d: PCIE_PHY_TEST_CONTROL6: 0x%x\n", dev->rev,
+ readl_relaxed(dev->phy + PCIE_PHY_TEST_CONTROL6));
+ EP_PCIE_DUMP(dev,
+ "PCIe V%d: PCIE_PHY_TEST_CONTROL7: 0x%x\n", dev->rev,
+ readl_relaxed(dev->phy + PCIE_PHY_TEST_CONTROL7));
+
+ EP_PCIE_DUMP(dev,
+ "PCIe V%d: PCIE_PHY_DEBUG_BUS_0_STATUS: 0x%x\n", dev->rev,
+ readl_relaxed(dev->phy + PCIE_PHY_DEBUG_BUS_0_STATUS));
+ EP_PCIE_DUMP(dev,
+ "PCIe V%d: PCIE_PHY_DEBUG_BUS_1_STATUS: 0x%x\n", dev->rev,
+ readl_relaxed(dev->phy + PCIE_PHY_DEBUG_BUS_1_STATUS));
+ EP_PCIE_DUMP(dev,
+ "PCIe V%d: PCIE_PHY_DEBUG_BUS_2_STATUS: 0x%x\n", dev->rev,
+ readl_relaxed(dev->phy + PCIE_PHY_DEBUG_BUS_2_STATUS));
+ EP_PCIE_DUMP(dev,
+ "PCIe V%d: PCIE_PHY_DEBUG_BUS_3_STATUS: 0x%x\n\n", dev->rev,
+ readl_relaxed(dev->phy + PCIE_PHY_DEBUG_BUS_3_STATUS));
+}
+
+static void ep_ep_pcie_phy_dump_pcs_misc_debug_bus(struct ep_pcie_dev_t *dev,
+ u32 b0, u32 b1, u32 b2, u32 b3)
+{
+ ep_pcie_write_reg(dev->phy, PCIE_PHY_MISC_DEBUG_BUS_BYTE0_INDEX, b0);
+ ep_pcie_write_reg(dev->phy, PCIE_PHY_MISC_DEBUG_BUS_BYTE1_INDEX, b1);
+ ep_pcie_write_reg(dev->phy, PCIE_PHY_MISC_DEBUG_BUS_BYTE2_INDEX, b2);
+ ep_pcie_write_reg(dev->phy, PCIE_PHY_MISC_DEBUG_BUS_BYTE3_INDEX, b3);
+
+ if (!b0 && !b1 && !b2 && !b3) {
+ EP_PCIE_DUMP(dev,
+ "PCIe V%d: zero out misc debug bus byte index registers.\n\n",
+ dev->rev);
+ return;
+ }
+
+ EP_PCIE_DUMP(dev,
+ "PCIe V%d: PCIE_PHY_MISC_DEBUG_BUS_BYTE0_INDEX: 0x%x\n",
+ dev->rev,
+ readl_relaxed(dev->phy + PCIE_PHY_MISC_DEBUG_BUS_BYTE0_INDEX));
+ EP_PCIE_DUMP(dev,
+ "PCIe V%d: PCIE_PHY_MISC_DEBUG_BUS_BYTE1_INDEX: 0x%x\n",
+ dev->rev,
+ readl_relaxed(dev->phy + PCIE_PHY_MISC_DEBUG_BUS_BYTE1_INDEX));
+ EP_PCIE_DUMP(dev,
+ "PCIe V%d: PCIE_PHY_MISC_DEBUG_BUS_BYTE2_INDEX: 0x%x\n",
+ dev->rev,
+ readl_relaxed(dev->phy + PCIE_PHY_MISC_DEBUG_BUS_BYTE2_INDEX));
+ EP_PCIE_DUMP(dev,
+ "PCIe V%d: PCIE_PHY_MISC_DEBUG_BUS_BYTE3_INDEX: 0x%x\n",
+ dev->rev,
+ readl_relaxed(dev->phy + PCIE_PHY_MISC_DEBUG_BUS_BYTE3_INDEX));
+
+ EP_PCIE_DUMP(dev,
+ "PCIe V%d: PCIE_PHY_MISC_DEBUG_BUS_0_STATUS: 0x%x\n", dev->rev,
+ readl_relaxed(dev->phy + PCIE_PHY_MISC_DEBUG_BUS_0_STATUS));
+ EP_PCIE_DUMP(dev,
+ "PCIe V%d: PCIE_PHY_MISC_DEBUG_BUS_1_STATUS: 0x%x\n", dev->rev,
+ readl_relaxed(dev->phy + PCIE_PHY_MISC_DEBUG_BUS_1_STATUS));
+ EP_PCIE_DUMP(dev,
+ "PCIe V%d: PCIE_PHY_MISC_DEBUG_BUS_2_STATUS: 0x%x\n", dev->rev,
+ readl_relaxed(dev->phy + PCIE_PHY_MISC_DEBUG_BUS_2_STATUS));
+ EP_PCIE_DUMP(dev,
+ "PCIe V%d: PCIE_PHY_MISC_DEBUG_BUS_3_STATUS: 0x%x\n\n",
+ dev->rev,
+ readl_relaxed(dev->phy + PCIE_PHY_MISC_DEBUG_BUS_3_STATUS));
+}
+
+static void ep_pcie_phy_dump(struct ep_pcie_dev_t *dev)
+{
+ int i;
+ u32 write_val;
+
+ EP_PCIE_DUMP(dev, "PCIe V%d: Beginning of PHY debug dump.\n\n",
+ dev->rev);
+
+ EP_PCIE_DUMP(dev, "PCIe V%d: PCS Debug Signals.\n\n", dev->rev);
+
+ ep_ep_pcie_phy_dump_pcs_debug_bus(dev, 0x01, 0x02, 0x03, 0x0A);
+ ep_ep_pcie_phy_dump_pcs_debug_bus(dev, 0x0E, 0x0F, 0x12, 0x13);
+ ep_ep_pcie_phy_dump_pcs_debug_bus(dev, 0x18, 0x19, 0x1A, 0x1B);
+ ep_ep_pcie_phy_dump_pcs_debug_bus(dev, 0x1C, 0x1D, 0x1E, 0x1F);
+ ep_ep_pcie_phy_dump_pcs_debug_bus(dev, 0x20, 0x21, 0x22, 0x23);
+ ep_ep_pcie_phy_dump_pcs_debug_bus(dev, 0, 0, 0, 0);
+
+ EP_PCIE_DUMP(dev, "PCIe V%d: PCS Misc Debug Signals.\n\n", dev->rev);
+
+ ep_ep_pcie_phy_dump_pcs_misc_debug_bus(dev, 0x1, 0x2, 0x3, 0x4);
+ ep_ep_pcie_phy_dump_pcs_misc_debug_bus(dev, 0x5, 0x6, 0x7, 0x8);
+ ep_ep_pcie_phy_dump_pcs_misc_debug_bus(dev, 0, 0, 0, 0);
+
+ EP_PCIE_DUMP(dev, "PCIe V%d: QSERDES COM Debug Signals.\n\n", dev->rev);
+
+ for (i = 0; i < 2; i++) {
+ write_val = 0x2 + i;
+
+ ep_pcie_write_reg(dev->phy, QSERDES_COM_DEBUG_BUS_SEL,
+ write_val);
+
+ EP_PCIE_DUMP(dev,
+ "PCIe V%d: to QSERDES_COM_DEBUG_BUS_SEL: 0x%x\n",
+ dev->rev,
+ readl_relaxed(dev->phy + QSERDES_COM_DEBUG_BUS_SEL));
+ EP_PCIE_DUMP(dev,
+ "PCIe V%d: QSERDES_COM_DEBUG_BUS0: 0x%x\n",
+ dev->rev,
+ readl_relaxed(dev->phy + QSERDES_COM_DEBUG_BUS0));
+ EP_PCIE_DUMP(dev,
+ "PCIe V%d: QSERDES_COM_DEBUG_BUS1: 0x%x\n",
+ dev->rev,
+ readl_relaxed(dev->phy + QSERDES_COM_DEBUG_BUS1));
+ EP_PCIE_DUMP(dev,
+ "PCIe V%d: QSERDES_COM_DEBUG_BUS2: 0x%x\n",
+ dev->rev,
+ readl_relaxed(dev->phy + QSERDES_COM_DEBUG_BUS2));
+ EP_PCIE_DUMP(dev,
+ "PCIe V%d: QSERDES_COM_DEBUG_BUS3: 0x%x\n\n",
+ dev->rev,
+ readl_relaxed(dev->phy + QSERDES_COM_DEBUG_BUS3));
+ }
+
+ ep_pcie_write_reg(dev->phy, QSERDES_COM_DEBUG_BUS_SEL, 0);
+
+ EP_PCIE_DUMP(dev, "PCIe V%d: QSERDES LANE Debug Signals.\n\n",
+ dev->rev);
+
+ for (i = 0; i < 3; i++) {
+ write_val = 0x1 + i;
+ ep_pcie_write_reg(dev->phy,
+ QSERDES_TX_DEBUG_BUS_SEL, write_val);
+ EP_PCIE_DUMP(dev,
+ "PCIe V%d: QSERDES_TX_DEBUG_BUS_SEL: 0x%x\n",
+ dev->rev,
+ readl_relaxed(dev->phy + QSERDES_TX_DEBUG_BUS_SEL));
+
+ ep_ep_pcie_phy_dump_pcs_debug_bus(dev, 0x30, 0x31, 0x32, 0x33);
+ }
+
+ ep_ep_pcie_phy_dump_pcs_debug_bus(dev, 0, 0, 0, 0);
+
+ EP_PCIE_DUMP(dev, "PCIe V%d: End of PHY debug dump.\n\n", dev->rev);
+
+}
+
+void ep_pcie_reg_dump(struct ep_pcie_dev_t *dev, u32 sel, bool linkdown)
+{
+ int r, i;
+ u32 original;
+ u32 size;
+
+ EP_PCIE_DBG(dev,
+ "PCIe V%d: Dump PCIe reg for 0x%x %s linkdown.\n",
+ dev->rev, sel, linkdown ? "with" : "without");
+
+ if (!dev->power_on) {
+ EP_PCIE_ERR(dev,
+ "PCIe V%d: the power is already down; can't dump registers.\n",
+ dev->rev);
+ return;
+ }
+
+ if (linkdown) {
+ EP_PCIE_DUMP(dev,
+ "PCIe V%d: dump PARF registers for linkdown case.\n",
+ dev->rev);
+
+ original = readl_relaxed(dev->parf + PCIE20_PARF_SYS_CTRL);
+ for (i = 1; i <= 0x1A; i++) {
+ ep_pcie_write_mask(dev->parf + PCIE20_PARF_SYS_CTRL,
+ 0xFF0000, i << 16);
+ EP_PCIE_DUMP(dev,
+ "PCIe V%d: PARF_SYS_CTRL:0x%x PARF_TEST_BUS:0x%x\n",
+ dev->rev,
+ readl_relaxed(dev->parf + PCIE20_PARF_SYS_CTRL),
+ readl_relaxed(dev->parf +
+ PCIE20_PARF_TEST_BUS));
+ }
+ ep_pcie_write_reg(dev->parf, PCIE20_PARF_SYS_CTRL, original);
+ }
+
+ for (r = 0; r < EP_PCIE_MAX_RES; r++) {
+ if (!(sel & BIT(r)))
+ continue;
+
+ if ((r == EP_PCIE_RES_PHY) && (dev->phy_rev > 3))
+ ep_pcie_phy_dump(dev);
+
+ size = resource_size(dev->res[r].resource);
+ EP_PCIE_DUMP(dev,
+ "\nPCIe V%d: dump registers of %s.\n\n",
+ dev->rev, dev->res[r].name);
+
+ for (i = 0; i < size; i += 32) {
+ EP_PCIE_DUMP(dev,
+ "0x%04x %08x %08x %08x %08x %08x %08x %08x %08x\n",
+ i, readl_relaxed(dev->res[r].base + i),
+ readl_relaxed(dev->res[r].base + (i + 4)),
+ readl_relaxed(dev->res[r].base + (i + 8)),
+ readl_relaxed(dev->res[r].base + (i + 12)),
+ readl_relaxed(dev->res[r].base + (i + 16)),
+ readl_relaxed(dev->res[r].base + (i + 20)),
+ readl_relaxed(dev->res[r].base + (i + 24)),
+ readl_relaxed(dev->res[r].base + (i + 28)));
+ }
+ }
+}
+
+static void ep_pcie_show_status(struct ep_pcie_dev_t *dev)
+{
+ EP_PCIE_DBG_FS("PCIe: is %s enumerated\n",
+ dev->enumerated ? "" : "not");
+ EP_PCIE_DBG_FS("PCIe: link is %s\n",
+ (dev->link_status == EP_PCIE_LINK_ENABLED)
+ ? "enabled" : "disabled");
+ EP_PCIE_DBG_FS("the link is %s suspending\n",
+ dev->suspending ? "" : "not");
+ EP_PCIE_DBG_FS("the power is %s on\n",
+ dev->power_on ? "" : "not");
+ EP_PCIE_DBG_FS("bus_client: %d\n",
+ dev->bus_client);
+ EP_PCIE_DBG_FS("linkdown_counter: %lu\n",
+ dev->linkdown_counter);
+ EP_PCIE_DBG_FS("linkup_counter: %lu\n",
+ dev->linkup_counter);
+ EP_PCIE_DBG_FS("wake_counter: %lu\n",
+ dev->wake_counter);
+ EP_PCIE_DBG_FS("d0_counter: %lu\n",
+ dev->d0_counter);
+ EP_PCIE_DBG_FS("d3_counter: %lu\n",
+ dev->d3_counter);
+ EP_PCIE_DBG_FS("perst_ast_counter: %lu\n",
+ dev->perst_ast_counter);
+ EP_PCIE_DBG_FS("perst_deast_counter: %lu\n",
+ dev->perst_deast_counter);
+}
+
+static ssize_t ep_pcie_cmd_debug(struct file *file,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long ret;
+ char str[MAX_MSG_LEN];
+ unsigned int testcase = 0;
+ struct ep_pcie_msi_config msi_cfg;
+ int i;
+ struct ep_pcie_hw *phandle = NULL;
+ struct ep_pcie_iatu entries[2] = {
+ {0x80000000, 0xbe7fffff, 0, 0},
+ {0xb1440000, 0xb144ae1e, 0x31440000, 0}
+ };
+ struct ep_pcie_db_config chdb_cfg = {0x64, 0x6b, 0xfd4fa000};
+ struct ep_pcie_db_config erdb_cfg = {0x64, 0x6b, 0xfd4fa080};
+
+ phandle = ep_pcie_get_phandle(hw_drv.device_id);
+
+ memset(str, 0, sizeof(str));
+ ret = copy_from_user(str, buf, sizeof(str));
+ if (ret)
+ return -EFAULT;
+
+ for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
+ testcase = (testcase * 10) + (str[i] - '0');
+
+ EP_PCIE_DBG_FS("PCIe: TEST: %d\n", testcase);
+
+
+ switch (testcase) {
+ case 0: /* output status */
+ ep_pcie_show_status(dev);
+ break;
+ case 1: /* output PHY and PARF registers */
+ ep_pcie_reg_dump(dev, BIT(EP_PCIE_RES_PHY) |
+ BIT(EP_PCIE_RES_PARF), true);
+ break;
+ case 2: /* output core registers */
+ ep_pcie_reg_dump(dev, BIT(EP_PCIE_RES_DM_CORE), false);
+ break;
+ case 3: /* output MMIO registers */
+ ep_pcie_reg_dump(dev, BIT(EP_PCIE_RES_MMIO), false);
+ break;
+ case 4: /* output ELBI registers */
+ ep_pcie_reg_dump(dev, BIT(EP_PCIE_RES_ELBI), false);
+ break;
+ case 5: /* output MSI registers */
+ ep_pcie_reg_dump(dev, BIT(EP_PCIE_RES_MSI), false);
+ break;
+ case 6: /* turn on link */
+ ep_pcie_enable_endpoint(phandle, EP_PCIE_OPT_ALL);
+ break;
+ case 7: /* enumeration */
+ ep_pcie_enable_endpoint(phandle, EP_PCIE_OPT_ENUM);
+ break;
+ case 8: /* turn off link */
+ ep_pcie_disable_endpoint(phandle);
+ break;
+ case 9: /* check MSI */
+ ep_pcie_get_msi_config(phandle, &msi_cfg);
+ break;
+ case 10: /* trigger MSI */
+ ep_pcie_trigger_msi(phandle, 0);
+ break;
+ case 11: /* indicate the status of PCIe link */
+ EP_PCIE_DBG_FS("\nPCIe: link status is %d.\n\n",
+ ep_pcie_get_linkstatus(phandle));
+ break;
+ case 12: /* configure outbound iATU */
+ ep_pcie_config_outbound_iatu(phandle, entries, 2);
+ break;
+ case 13: /* wake up the host */
+ ep_pcie_wakeup_host(phandle);
+ break;
+ case 14: /* Configure routing of doorbells */
+ ep_pcie_config_db_routing(phandle, chdb_cfg, erdb_cfg);
+ break;
+ case 21: /* write D3 */
+ EP_PCIE_DBG_FS("\nPCIe Testcase %d: write D3 to EP\n\n",
+ testcase);
+ EP_PCIE_DBG_FS("\nPCIe: 0x44 of EP is 0x%x before change\n\n",
+ readl_relaxed(dev->dm_core + 0x44));
+ ep_pcie_write_mask(dev->dm_core + 0x44, 0, 0x3);
+ EP_PCIE_DBG_FS("\nPCIe: 0x44 of EP is 0x%x now\n\n",
+ readl_relaxed(dev->dm_core + 0x44));
+ break;
+ case 22: /* write D0 */
+ EP_PCIE_DBG_FS("\nPCIe Testcase %d: write D0 to EP\n\n",
+ testcase);
+ EP_PCIE_DBG_FS("\nPCIe: 0x44 of EP is 0x%x before change\n\n",
+ readl_relaxed(dev->dm_core + 0x44));
+ ep_pcie_write_mask(dev->dm_core + 0x44, 0x3, 0);
+ EP_PCIE_DBG_FS("\nPCIe: 0x44 of EP is 0x%x now\n\n",
+ readl_relaxed(dev->dm_core + 0x44));
+ break;
+ case 23: /* assert wake */
+ EP_PCIE_DBG_FS("\nPCIe Testcase %d: assert wake\n\n",
+ testcase);
+ gpio_set_value(dev->gpio[EP_PCIE_GPIO_WAKE].num,
+ dev->gpio[EP_PCIE_GPIO_WAKE].on);
+ break;
+ case 24: /* deassert wake */
+ EP_PCIE_DBG_FS("\nPCIe Testcase %d: deassert wake\n\n",
+ testcase);
+ gpio_set_value(dev->gpio[EP_PCIE_GPIO_WAKE].num,
+ 1 - dev->gpio[EP_PCIE_GPIO_WAKE].on);
+ break;
+ case 25: /* output PERST# status */
+ EP_PCIE_DBG_FS("\nPCIe: PERST# is %d.\n\n",
+ gpio_get_value(dev->gpio[EP_PCIE_GPIO_PERST].num));
+ break;
+ case 26: /* output WAKE# status */
+ EP_PCIE_DBG_FS("\nPCIe: WAKE# is %d.\n\n",
+ gpio_get_value(dev->gpio[EP_PCIE_GPIO_WAKE].num));
+ break;
+ case 31: /* output core registers when D3 hot is set by host*/
+ dev->dump_conf = true;
+ break;
+ case 32: /* do not output core registers when D3 hot is set by host*/
+ dev->dump_conf = false;
+ break;
+ default:
+ EP_PCIE_DBG_FS("PCIe: Invalid testcase: %d.\n", testcase);
+ break;
+ }
+
+ if (ret == 0)
+ return count;
+ else
+ return -EFAULT;
+}
+
+const struct file_operations ep_pcie_cmd_debug_ops = {
+ .write = ep_pcie_cmd_debug,
+};
+
+void ep_pcie_debugfs_init(struct ep_pcie_dev_t *ep_dev)
+{
+ dev = ep_dev;
+ dent_ep_pcie = debugfs_create_dir("pcie-ep", 0);
+ if (IS_ERR(dent_ep_pcie)) {
+ EP_PCIE_ERR(dev,
+ "PCIe V%d: fail to create the folder for debug_fs.\n",
+ dev->rev);
+ return;
+ }
+
+ dfile_case = debugfs_create_file("case", 0664,
+ dent_ep_pcie, 0,
+ &ep_pcie_cmd_debug_ops);
+ if (!dfile_case || IS_ERR(dfile_case)) {
+ EP_PCIE_ERR(dev,
+ "PCIe V%d: fail to create the file for case.\n",
+ dev->rev);
+ goto case_error;
+ }
+
+ EP_PCIE_DBG2(dev,
+ "PCIe V%d: debugfs is enabled.\n",
+ dev->rev);
+
+ return;
+
+case_error:
+ debugfs_remove(dent_ep_pcie);
+}
+
+void ep_pcie_debugfs_exit(void)
+{
+ debugfs_remove(dfile_case);
+ debugfs_remove(dent_ep_pcie);
+}
diff --git a/drivers/platform/msm/ep_pcie/ep_pcie_phy.c b/drivers/platform/msm/ep_pcie/ep_pcie_phy.c
new file mode 100644
index 0000000..776ef08
--- /dev/null
+++ b/drivers/platform/msm/ep_pcie/ep_pcie_phy.c
@@ -0,0 +1,160 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * MSM PCIe PHY endpoint mode
+ */
+
+#include "ep_pcie_com.h"
+#include "ep_pcie_phy.h"
+
+void ep_pcie_phy_init(struct ep_pcie_dev_t *dev)
+{
+ switch (dev->phy_rev) {
+ case 3:
+ EP_PCIE_DBG(dev,
+ "PCIe V%d: PHY V%d: Initializing 20nm QMP phy - 100MHz\n",
+ dev->rev, dev->phy_rev);
+ break;
+ case 4:
+ EP_PCIE_DBG(dev,
+ "PCIe V%d: PHY V%d: Initializing 14nm QMP phy - 100MHz\n",
+ dev->rev, dev->phy_rev);
+ break;
+ case 5:
+ EP_PCIE_DBG(dev,
+ "PCIe V%d: PHY V%d: Initializing 10nm QMP phy - 100MHz\n",
+ dev->rev, dev->phy_rev);
+ break;
+ default:
+ EP_PCIE_ERR(dev,
+ "PCIe V%d: Unexpected phy version %d is caught!\n",
+ dev->rev, dev->phy_rev);
+ }
+
+ if (dev->phy_init_len && dev->phy_init) {
+ int i;
+ struct ep_pcie_phy_info_t *phy_init;
+
+ EP_PCIE_DBG(dev,
+ "PCIe V%d: PHY V%d: process the sequence specified by DT.\n",
+ dev->rev, dev->phy_rev);
+
+ i = dev->phy_init_len;
+ phy_init = dev->phy_init;
+ while (i--) {
+ ep_pcie_write_reg(dev->phy,
+ phy_init->offset,
+ phy_init->val);
+ if (phy_init->delay)
+ usleep_range(phy_init->delay,
+ phy_init->delay + 1);
+ phy_init++;
+ }
+ return;
+ }
+
+ ep_pcie_write_reg(dev->phy, PCIE_PHY_SW_RESET, 0x01);
+ ep_pcie_write_reg(dev->phy, PCIE_PHY_POWER_DOWN_CONTROL, 0x01);
+
+ /* Common block settings */
+ ep_pcie_write_reg(dev->phy, QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x18);
+ ep_pcie_write_reg(dev->phy, QSERDES_COM_CLK_ENABLE1, 0x00);
+ ep_pcie_write_reg(dev->phy, QSERDES_COM_BG_TRIM, 0x0F);
+ ep_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP_EN, 0x01);
+ ep_pcie_write_reg(dev->phy, QSERDES_COM_VCO_TUNE_MAP, 0x00);
+ ep_pcie_write_reg(dev->phy, QSERDES_COM_VCO_TUNE_TIMER1, 0xFF);
+ ep_pcie_write_reg(dev->phy, QSERDES_COM_VCO_TUNE_TIMER2, 0x1F);
+ ep_pcie_write_reg(dev->phy, QSERDES_COM_CMN_CONFIG, 0x06);
+ ep_pcie_write_reg(dev->phy, QSERDES_COM_PLL_IVCO, 0x0F);
+ ep_pcie_write_reg(dev->phy, QSERDES_COM_HSCLK_SEL, 0x00);
+ ep_pcie_write_reg(dev->phy, QSERDES_COM_SVS_MODE_CLK_SEL, 0x01);
+ ep_pcie_write_reg(dev->phy, QSERDES_COM_CORE_CLK_EN, 0x20);
+ ep_pcie_write_reg(dev->phy, QSERDES_COM_CORECLK_DIV, 0x0A);
+ ep_pcie_write_reg(dev->phy, QSERDES_COM_RESETSM_CNTRL, 0x20);
+ ep_pcie_write_reg(dev->phy, QSERDES_COM_BG_TIMER, 0x01);
+
+ /* PLL Config Settings */
+ ep_pcie_write_reg(dev->phy, QSERDES_COM_SYSCLK_EN_SEL, 0x00);
+ ep_pcie_write_reg(dev->phy, QSERDES_COM_DEC_START_MODE0, 0x19);
+ ep_pcie_write_reg(dev->phy, QSERDES_COM_DIV_FRAC_START3_MODE0, 0x00);
+ ep_pcie_write_reg(dev->phy, QSERDES_COM_DIV_FRAC_START2_MODE0, 0x00);
+ ep_pcie_write_reg(dev->phy, QSERDES_COM_DIV_FRAC_START1_MODE0, 0x00);
+ ep_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP3_MODE0, 0x00);
+ ep_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP2_MODE0, 0x02);
+ ep_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP1_MODE0, 0x7F);
+ ep_pcie_write_reg(dev->phy, QSERDES_COM_CLK_SELECT, 0x30);
+ ep_pcie_write_reg(dev->phy, QSERDES_COM_SYS_CLK_CTRL, 0x06);
+ ep_pcie_write_reg(dev->phy, QSERDES_COM_SYSCLK_BUF_ENABLE, 0x1E);
+ ep_pcie_write_reg(dev->phy, QSERDES_COM_CP_CTRL_MODE0, 0x3F);
+ ep_pcie_write_reg(dev->phy, QSERDES_COM_PLL_RCTRL_MODE0, 0x1A);
+ ep_pcie_write_reg(dev->phy, QSERDES_COM_PLL_CCTRL_MODE0, 0x00);
+ ep_pcie_write_reg(dev->phy, QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x03);
+ ep_pcie_write_reg(dev->phy, QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0xFF);
+
+ /* TX settings */
+ ep_pcie_write_reg(dev->phy, QSERDES_TX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN,
+ 0x45);
+ ep_pcie_write_reg(dev->phy, QSERDES_TX_LANE_MODE, 0x06);
+ ep_pcie_write_reg(dev->phy, QSERDES_TX_RES_CODE_LANE_OFFSET, 0x02);
+ ep_pcie_write_reg(dev->phy, QSERDES_TX_RCV_DETECT_LVL_2, 0x12);
+
+ /* RX settings */
+ ep_pcie_write_reg(dev->phy, QSERDES_RX_SIGDET_ENABLES, 0x1C);
+ ep_pcie_write_reg(dev->phy, QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x14);
+ ep_pcie_write_reg(dev->phy, QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x01);
+ ep_pcie_write_reg(dev->phy, QSERDES_RX_RX_EQU_ADAPTOR_CNTRL3, 0x00);
+ ep_pcie_write_reg(dev->phy, QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4, 0xDB);
+ ep_pcie_write_reg(dev->phy, QSERDES_RX_UCDR_SO_SATURATION_AND_ENABLE,
+ 0x4B);
+ ep_pcie_write_reg(dev->phy, QSERDES_RX_UCDR_SO_GAIN, 0x04);
+ ep_pcie_write_reg(dev->phy, QSERDES_RX_UCDR_SO_GAIN_HALF, 0x04);
+
+ /* EP_REF_CLK settings */
+ ep_pcie_write_reg(dev->phy, QSERDES_COM_CLK_EP_DIV, 0x19);
+ ep_pcie_write_reg(dev->phy, PCIE_PHY_ENDPOINT_REFCLK_DRIVE, 0x00);
+
+ /* PCIE L1SS settings */
+ ep_pcie_write_reg(dev->phy, PCIE_PHY_PWRUP_RESET_DLY_TIME_AUXCLK, 0x40);
+ ep_pcie_write_reg(dev->phy, PCIE_PHY_L1SS_WAKEUP_DLY_TIME_AUXCLK_MSB,
+ 0x00);
+ ep_pcie_write_reg(dev->phy, PCIE_PHY_L1SS_WAKEUP_DLY_TIME_AUXCLK_LSB,
+ 0x40);
+ ep_pcie_write_reg(dev->phy, PCIE_PHY_LP_WAKEUP_DLY_TIME_AUXCLK_MSB,
+ 0x00);
+ ep_pcie_write_reg(dev->phy, PCIE_PHY_LP_WAKEUP_DLY_TIME_AUXCLK, 0x40);
+ ep_pcie_write_reg(dev->phy, PCIE_PHY_PLL_LOCK_CHK_DLY_TIME, 0x73);
+
+ /* PCS settings */
+ ep_pcie_write_reg(dev->phy, PCIE_PHY_SIGDET_CNTRL, 0x07);
+ ep_pcie_write_reg(dev->phy, PCIE_PHY_RX_SIGDET_LVL, 0x99);
+ ep_pcie_write_reg(dev->phy, PCIE_PHY_TXDEEMPH_M6DB_V0, 0x15);
+ ep_pcie_write_reg(dev->phy, PCIE_PHY_TXDEEMPH_M3P5DB_V0, 0x0E);
+
+ ep_pcie_write_reg(dev->phy, PCIE_PHY_SW_RESET, 0x00);
+ ep_pcie_write_reg(dev->phy, PCIE_PHY_START_CONTROL, 0x03);
+}
+
+bool ep_pcie_phy_is_ready(struct ep_pcie_dev_t *dev)
+{
+ u32 offset;
+
+ if (dev->phy_status_reg)
+ offset = dev->phy_status_reg;
+ else
+ offset = PCIE_PHY_PCS_STATUS;
+
+ if (readl_relaxed(dev->phy + offset) & BIT(6))
+ return false;
+ else
+ return true;
+}
diff --git a/drivers/platform/msm/ep_pcie/ep_pcie_phy.h b/drivers/platform/msm/ep_pcie/ep_pcie_phy.h
new file mode 100644
index 0000000..c8f01de
--- /dev/null
+++ b/drivers/platform/msm/ep_pcie/ep_pcie_phy.h
@@ -0,0 +1,463 @@
+/* Copyright (c) 2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __EP_PCIE_PHY_H
+#define __EP_PCIE_PHY_H
+
+#define QSERDES_COM_ATB_SEL1 0x000
+#define QSERDES_COM_ATB_SEL2 0x004
+#define QSERDES_COM_FREQ_UPDATE 0x008
+#define QSERDES_COM_BG_TIMER 0x00C
+#define QSERDES_COM_SSC_EN_CENTER 0x010
+#define QSERDES_COM_SSC_ADJ_PER1 0x014
+#define QSERDES_COM_SSC_ADJ_PER2 0x018
+#define QSERDES_COM_SSC_PER1 0x01C
+#define QSERDES_COM_SSC_PER2 0x020
+#define QSERDES_COM_SSC_STEP_SIZE1 0x024
+#define QSERDES_COM_SSC_STEP_SIZE2 0x028
+#define QSERDES_COM_POST_DIV 0x02C
+#define QSERDES_COM_POST_DIV_MUX 0x030
+#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN 0x034
+#define QSERDES_COM_CLK_ENABLE1 0x038
+#define QSERDES_COM_SYS_CLK_CTRL 0x03C
+#define QSERDES_COM_SYSCLK_BUF_ENABLE 0x040
+#define QSERDES_COM_PLL_EN 0x044
+#define QSERDES_COM_PLL_IVCO 0x048
+#define QSERDES_COM_LOCK_CMP1_MODE0 0x04C
+#define QSERDES_COM_LOCK_CMP2_MODE0 0x050
+#define QSERDES_COM_LOCK_CMP3_MODE0 0x054
+#define QSERDES_COM_LOCK_CMP1_MODE1 0x058
+#define QSERDES_COM_LOCK_CMP2_MODE1 0x05C
+#define QSERDES_COM_LOCK_CMP3_MODE1 0x060
+#define QSERDES_COM_CMN_RSVD0 0x064
+#define QSERDES_COM_EP_CLOCK_DETECT_CTRL 0x068
+#define QSERDES_COM_SYSCLK_DET_COMP_STATUS 0x06C
+#define QSERDES_COM_BG_TRIM 0x070
+#define QSERDES_COM_CLK_EP_DIV 0x074
+#define QSERDES_COM_CP_CTRL_MODE0 0x078
+#define QSERDES_COM_CP_CTRL_MODE1 0x07C
+#define QSERDES_COM_CMN_RSVD1 0x080
+#define QSERDES_COM_PLL_RCTRL_MODE0 0x084
+#define QSERDES_COM_PLL_RCTRL_MODE1 0x088
+#define QSERDES_COM_CMN_RSVD2 0x08C
+#define QSERDES_COM_PLL_CCTRL_MODE0 0x090
+#define QSERDES_COM_PLL_CCTRL_MODE1 0x094
+#define QSERDES_COM_CMN_RSVD3 0x098
+#define QSERDES_COM_PLL_CNTRL 0x09C
+#define QSERDES_COM_PHASE_SEL_CTRL 0x0A0
+#define QSERDES_COM_PHASE_SEL_DC 0x0A4
+#define QSERDES_COM_BIAS_EN_CTRL_BY_PSM 0x0A8
+#define QSERDES_COM_SYSCLK_EN_SEL 0x0AC
+#define QSERDES_COM_CML_SYSCLK_SEL 0x0B0
+#define QSERDES_COM_RESETSM_CNTRL 0x0B4
+#define QSERDES_COM_RESETSM_CNTRL2 0x0B8
+#define QSERDES_COM_RESTRIM_CTRL 0x0BC
+#define QSERDES_COM_RESTRIM_CTRL2 0x0C0
+#define QSERDES_COM_RESCODE_DIV_NUM 0x0C4
+#define QSERDES_COM_LOCK_CMP_EN 0x0C8
+#define QSERDES_COM_LOCK_CMP_CFG 0x0CC
+#define QSERDES_COM_DEC_START_MODE0 0x0D0
+#define QSERDES_COM_DEC_START_MODE1 0x0D4
+#define QSERDES_COM_VCOCAL_DEADMAN_CTRL 0x0D8
+#define QSERDES_COM_DIV_FRAC_START1_MODE0 0x0DC
+#define QSERDES_COM_DIV_FRAC_START2_MODE0 0x0E0
+#define QSERDES_COM_DIV_FRAC_START3_MODE0 0x0E4
+#define QSERDES_COM_DIV_FRAC_START1_MODE1 0x0E8
+#define QSERDES_COM_DIV_FRAC_START2_MODE1 0x0EC
+#define QSERDES_COM_DIV_FRAC_START3_MODE1 0x0F0
+#define QSERDES_COM_VCO_TUNE_MINVAL1 0x0F4
+#define QSERDES_COM_VCO_TUNE_MINVAL2 0x0F8
+#define QSERDES_COM_CMN_RSVD4 0x0FC
+#define QSERDES_COM_INTEGLOOP_INITVAL 0x100
+#define QSERDES_COM_INTEGLOOP_EN 0x104
+#define QSERDES_COM_INTEGLOOP_GAIN0_MODE0 0x108
+#define QSERDES_COM_INTEGLOOP_GAIN1_MODE0 0x10C
+#define QSERDES_COM_INTEGLOOP_GAIN0_MODE1 0x110
+#define QSERDES_COM_INTEGLOOP_GAIN1_MODE1 0x114
+#define QSERDES_COM_VCO_TUNE_MAXVAL1 0x118
+#define QSERDES_COM_VCO_TUNE_MAXVAL2 0x11C
+#define QSERDES_COM_RES_TRIM_CONTROL2 0x120
+#define QSERDES_COM_VCO_TUNE_CTRL 0x124
+#define QSERDES_COM_VCO_TUNE_MAP 0x128
+#define QSERDES_COM_VCO_TUNE1_MODE0 0x12C
+#define QSERDES_COM_VCO_TUNE2_MODE0 0x130
+#define QSERDES_COM_VCO_TUNE1_MODE1 0x134
+#define QSERDES_COM_VCO_TUNE2_MODE1 0x138
+#define QSERDES_COM_VCO_TUNE_INITVAL1 0x13C
+#define QSERDES_COM_VCO_TUNE_INITVAL2 0x140
+#define QSERDES_COM_VCO_TUNE_TIMER1 0x144
+#define QSERDES_COM_VCO_TUNE_TIMER2 0x148
+#define QSERDES_COM_SAR 0x14C
+#define QSERDES_COM_SAR_CLK 0x150
+#define QSERDES_COM_SAR_CODE_OUT_STATUS 0x154
+#define QSERDES_COM_SAR_CODE_READY_STATUS 0x158
+#define QSERDES_COM_CMN_STATUS 0x15C
+#define QSERDES_COM_RESET_SM_STATUS 0x160
+#define QSERDES_COM_RESTRIM_CODE_STATUS 0x164
+#define QSERDES_COM_PLLCAL_CODE1_STATUS 0x168
+#define QSERDES_COM_PLLCAL_CODE2_STATUS 0x16C
+#define QSERDES_COM_BG_CTRL 0x170
+#define QSERDES_COM_CLK_SELECT 0x174
+#define QSERDES_COM_HSCLK_SEL 0x178
+#define QSERDES_COM_PLL_ANALOG 0x180
+#define QSERDES_COM_CORECLK_DIV 0x184
+#define QSERDES_COM_SW_RESET 0x188
+#define QSERDES_COM_CORE_CLK_EN 0x18C
+#define QSERDES_COM_C_READY_STATUS 0x190
+#define QSERDES_COM_CMN_CONFIG 0x194
+#define QSERDES_COM_CMN_RATE_OVERRIDE 0x198
+#define QSERDES_COM_SVS_MODE_CLK_SEL 0x19C
+#define QSERDES_COM_DEBUG_BUS0 0x1A0
+#define QSERDES_COM_DEBUG_BUS1 0x1A4
+#define QSERDES_COM_DEBUG_BUS2 0x1A8
+#define QSERDES_COM_DEBUG_BUS3 0x1AC
+#define QSERDES_COM_DEBUG_BUS_SEL 0x1B0
+#define QSERDES_COM_CMN_MISC1 0x1B4
+#define QSERDES_COM_CMN_MISC2 0x1B8
+#define QSERDES_COM_CORECLK_DIV_MODE1 0x1BC
+#define QSERDES_COM_CMN_RSVD5 0x1C0
+#define QSERDES_TX_BIST_MODE_LANENO 0x200
+#define QSERDES_TX_BIST_INVERT 0x204
+#define QSERDES_TX_CLKBUF_ENABLE 0x208
+#define QSERDES_TX_CMN_CONTROL_ONE 0x20C
+#define QSERDES_TX_CMN_CONTROL_TWO 0x210
+#define QSERDES_TX_CMN_CONTROL_THREE 0x214
+#define QSERDES_TX_TX_EMP_POST1_LVL 0x218
+#define QSERDES_TX_TX_POST2_EMPH 0x21C
+#define QSERDES_TX_TX_BOOST_LVL_UP_DN 0x220
+#define QSERDES_TX_HP_PD_ENABLES 0x224
+#define QSERDES_TX_TX_IDLE_LVL_LARGE_AMP 0x228
+#define QSERDES_TX_TX_DRV_LVL 0x22C
+#define QSERDES_TX_TX_DRV_LVL_OFFSET 0x230
+#define QSERDES_TX_RESET_TSYNC_EN 0x234
+#define QSERDES_TX_PRE_STALL_LDO_BOOST_EN 0x238
+#define QSERDES_TX_TX_BAND 0x23C
+#define QSERDES_TX_SLEW_CNTL 0x240
+#define QSERDES_TX_INTERFACE_SELECT 0x244
+#define QSERDES_TX_LPB_EN 0x248
+#define QSERDES_TX_RES_CODE_LANE_TX 0x24C
+#define QSERDES_TX_RES_CODE_LANE_RX 0x250
+#define QSERDES_TX_RES_CODE_LANE_OFFSET 0x254
+#define QSERDES_TX_PERL_LENGTH1 0x258
+#define QSERDES_TX_PERL_LENGTH2 0x25C
+#define QSERDES_TX_SERDES_BYP_EN_OUT 0x260
+#define QSERDES_TX_DEBUG_BUS_SEL 0x264
+#define QSERDES_TX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN 0x268
+#define QSERDES_TX_TX_POL_INV 0x26C
+#define QSERDES_TX_PARRATE_REC_DETECT_IDLE_EN 0x270
+#define QSERDES_TX_BIST_PATTERN1 0x274
+#define QSERDES_TX_BIST_PATTERN2 0x278
+#define QSERDES_TX_BIST_PATTERN3 0x27C
+#define QSERDES_TX_BIST_PATTERN4 0x280
+#define QSERDES_TX_BIST_PATTERN5 0x284
+#define QSERDES_TX_BIST_PATTERN6 0x288
+#define QSERDES_TX_BIST_PATTERN7 0x28C
+#define QSERDES_TX_BIST_PATTERN8 0x290
+#define QSERDES_TX_LANE_MODE 0x294
+#define QSERDES_TX_IDAC_CAL_LANE_MODE 0x298
+#define QSERDES_TX_IDAC_CAL_LANE_MODE_CONFIGURATION 0x29C
+#define QSERDES_TX_ATB_SEL1 0x2A0
+#define QSERDES_TX_ATB_SEL2 0x2A4
+#define QSERDES_TX_RCV_DETECT_LVL 0x2A8
+#define QSERDES_TX_RCV_DETECT_LVL_2 0x2AC
+#define QSERDES_TX_PRBS_SEED1 0x2B0
+#define QSERDES_TX_PRBS_SEED2 0x2B4
+#define QSERDES_TX_PRBS_SEED3 0x2B8
+#define QSERDES_TX_PRBS_SEED4 0x2BC
+#define QSERDES_TX_RESET_GEN 0x2C0
+#define QSERDES_TX_RESET_GEN_MUXES 0x2C4
+#define QSERDES_TX_TRAN_DRVR_EMP_EN 0x2C8
+#define QSERDES_TX_TX_INTERFACE_MODE 0x2CC
+#define QSERDES_TX_PWM_CTRL 0x2D0
+#define QSERDES_TX_PWM_ENCODED_OR_DATA 0x2D4
+#define QSERDES_TX_PWM_GEAR_1_DIVIDER_BAND2 0x2D8
+#define QSERDES_TX_PWM_GEAR_2_DIVIDER_BAND2 0x2DC
+#define QSERDES_TX_PWM_GEAR_3_DIVIDER_BAND2 0x2E0
+#define QSERDES_TX_PWM_GEAR_4_DIVIDER_BAND2 0x2E4
+#define QSERDES_TX_PWM_GEAR_1_DIVIDER_BAND0_1 0x2E8
+#define QSERDES_TX_PWM_GEAR_2_DIVIDER_BAND0_1 0x2EC
+#define QSERDES_TX_PWM_GEAR_3_DIVIDER_BAND0_1 0x2F0
+#define QSERDES_TX_PWM_GEAR_4_DIVIDER_BAND0_1 0x2F4
+#define QSERDES_TX_VMODE_CTRL1 0x2F8
+#define QSERDES_TX_VMODE_CTRL2 0x2FC
+#define QSERDES_TX_TX_ALOG_INTF_OBSV_CNTL 0x300
+#define QSERDES_TX_BIST_STATUS 0x304
+#define QSERDES_TX_BIST_ERROR_COUNT1 0x308
+#define QSERDES_TX_BIST_ERROR_COUNT2 0x30C
+#define QSERDES_TX_TX_ALOG_INTF_OBSV 0x310
+#define QSERDES_RX_UCDR_FO_GAIN_HALF 0x400
+#define QSERDES_RX_UCDR_FO_GAIN_QUARTER 0x404
+#define QSERDES_RX_UCDR_FO_GAIN_EIGHTH 0x408
+#define QSERDES_RX_UCDR_FO_GAIN 0x40C
+#define QSERDES_RX_UCDR_SO_GAIN_HALF 0x410
+#define QSERDES_RX_UCDR_SO_GAIN_QUARTER 0x414
+#define QSERDES_RX_UCDR_SO_GAIN_EIGHTH 0x418
+#define QSERDES_RX_UCDR_SO_GAIN 0x41C
+#define QSERDES_RX_UCDR_SVS_FO_GAIN_HALF 0x420
+#define QSERDES_RX_UCDR_SVS_FO_GAIN_QUARTER 0x424
+#define QSERDES_RX_UCDR_SVS_FO_GAIN_EIGHTH 0x428
+#define QSERDES_RX_UCDR_SVS_FO_GAIN 0x42C
+#define QSERDES_RX_UCDR_SVS_SO_GAIN_HALF 0x430
+#define QSERDES_RX_UCDR_SVS_SO_GAIN_QUARTER 0x434
+#define QSERDES_RX_UCDR_SVS_SO_GAIN_EIGHTH 0x438
+#define QSERDES_RX_UCDR_SVS_SO_GAIN 0x43C
+#define QSERDES_RX_UCDR_FASTLOCK_FO_GAIN 0x440
+#define QSERDES_RX_UCDR_FD_GAIN 0x444
+#define QSERDES_RX_UCDR_SO_SATURATION_AND_ENABLE 0x448
+#define QSERDES_RX_UCDR_FO_TO_SO_DELAY 0x44C
+#define QSERDES_RX_UCDR_FASTLOCK_COUNT_LOW 0x450
+#define QSERDES_RX_UCDR_FASTLOCK_COUNT_HIGH 0x454
+#define QSERDES_RX_UCDR_MODULATE 0x458
+#define QSERDES_RX_UCDR_PI_CONTROLS 0x45C
+#define QSERDES_RX_RBIST_CONTROL 0x460
+#define QSERDES_RX_AUX_CONTROL 0x464
+#define QSERDES_RX_AUX_DATA_TCOARSE 0x468
+#define QSERDES_RX_AUX_DATA_TFINE_LSB 0x46C
+#define QSERDES_RX_AUX_DATA_TFINE_MSB 0x470
+#define QSERDES_RX_RCLK_AUXDATA_SEL 0x474
+#define QSERDES_RX_AC_JTAG_ENABLE 0x478
+#define QSERDES_RX_AC_JTAG_INITP 0x47C
+#define QSERDES_RX_AC_JTAG_INITN 0x480
+#define QSERDES_RX_AC_JTAG_LVL 0x484
+#define QSERDES_RX_AC_JTAG_MODE 0x488
+#define QSERDES_RX_AC_JTAG_RESET 0x48C
+#define QSERDES_RX_RX_TERM_BW 0x490
+#define QSERDES_RX_RX_RCVR_IQ_EN 0x494
+#define QSERDES_RX_RX_IDAC_I_DC_OFFSETS 0x498
+#define QSERDES_RX_RX_IDAC_IBAR_DC_OFFSETS 0x49C
+#define QSERDES_RX_RX_IDAC_Q_DC_OFFSETS 0x4A0
+#define QSERDES_RX_RX_IDAC_QBAR_DC_OFFSETS 0x4A4
+#define QSERDES_RX_RX_IDAC_A_DC_OFFSETS 0x4A8
+#define QSERDES_RX_RX_IDAC_ABAR_DC_OFFSETS 0x4AC
+#define QSERDES_RX_RX_IDAC_EN 0x4B0
+#define QSERDES_RX_RX_IDAC_ENABLES 0x4B4
+#define QSERDES_RX_RX_IDAC_SIGN 0x4B8
+#define QSERDES_RX_RX_HIGHZ_HIGHRATE 0x4BC
+#define QSERDES_RX_RX_TERM_AC_BYPASS_DC_COUPLE_OFFSET 0x4C0
+#define QSERDES_RX_RX_EQ_GAIN1_LSB 0x4C4
+#define QSERDES_RX_RX_EQ_GAIN1_MSB 0x4C8
+#define QSERDES_RX_RX_EQ_GAIN2_LSB 0x4CC
+#define QSERDES_RX_RX_EQ_GAIN2_MSB 0x4D0
+#define QSERDES_RX_RX_EQU_ADAPTOR_CNTRL1 0x4D4
+#define QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2 0x4D8
+#define QSERDES_RX_RX_EQU_ADAPTOR_CNTRL3 0x4DC
+#define QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4 0x4E0
+#define QSERDES_RX_RX_IDAC_CAL_CONFIGURATION 0x4E4
+#define QSERDES_RX_RX_IDAC_TSETTLE_LOW 0x4E8
+#define QSERDES_RX_RX_IDAC_TSETTLE_HIGH 0x4EC
+#define QSERDES_RX_RX_IDAC_ENDSAMP_LOW 0x4F0
+#define QSERDES_RX_RX_IDAC_ENDSAMP_HIGH 0x4F4
+#define QSERDES_RX_RX_IDAC_MIDPOINT_LOW 0x4F8
+#define QSERDES_RX_RX_IDAC_MIDPOINT_HIGH 0x4FC
+#define QSERDES_RX_RX_EQ_OFFSET_LSB 0x500
+#define QSERDES_RX_RX_EQ_OFFSET_MSB 0x504
+#define QSERDES_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1 0x508
+#define QSERDES_RX_RX_OFFSET_ADAPTOR_CNTRL2 0x50C
+#define QSERDES_RX_SIGDET_ENABLES 0x510
+#define QSERDES_RX_SIGDET_CNTRL 0x514
+#define QSERDES_RX_SIGDET_LVL 0x518
+#define QSERDES_RX_SIGDET_DEGLITCH_CNTRL 0x51C
+#define QSERDES_RX_RX_BAND 0x520
+#define QSERDES_RX_CDR_FREEZE_UP_DN 0x524
+#define QSERDES_RX_CDR_RESET_OVERRIDE 0x528
+#define QSERDES_RX_RX_INTERFACE_MODE 0x52C
+#define QSERDES_RX_JITTER_GEN_MODE 0x530
+#define QSERDES_RX_BUJ_AMP 0x534
+#define QSERDES_RX_SJ_AMP1 0x538
+#define QSERDES_RX_SJ_AMP2 0x53C
+#define QSERDES_RX_SJ_PER1 0x540
+#define QSERDES_RX_SJ_PER2 0x544
+#define QSERDES_RX_BUJ_STEP_FREQ1 0x548
+#define QSERDES_RX_BUJ_STEP_FREQ2 0x54C
+#define QSERDES_RX_PPM_OFFSET1 0x550
+#define QSERDES_RX_PPM_OFFSET2 0x554
+#define QSERDES_RX_SIGN_PPM_PERIOD1 0x558
+#define QSERDES_RX_SIGN_PPM_PERIOD2 0x55C
+#define QSERDES_RX_SSC_CTRL 0x560
+#define QSERDES_RX_SSC_COUNT1 0x564
+#define QSERDES_RX_SSC_COUNT2 0x568
+#define QSERDES_RX_RX_ALOG_INTF_OBSV_CNTL 0x56C
+#define QSERDES_RX_RX_PWM_ENABLE_AND_DATA 0x570
+#define QSERDES_RX_RX_PWM_GEAR1_TIMEOUT_COUNT 0x574
+#define QSERDES_RX_RX_PWM_GEAR2_TIMEOUT_COUNT 0x578
+#define QSERDES_RX_RX_PWM_GEAR3_TIMEOUT_COUNT 0x57C
+#define QSERDES_RX_RX_PWM_GEAR4_TIMEOUT_COUNT 0x580
+#define QSERDES_RX_PI_CTRL1 0x584
+#define QSERDES_RX_PI_CTRL2 0x588
+#define QSERDES_RX_PI_QUAD 0x58C
+#define QSERDES_RX_IDATA1 0x590
+#define QSERDES_RX_IDATA2 0x594
+#define QSERDES_RX_AUX_DATA1 0x598
+#define QSERDES_RX_AUX_DATA2 0x59C
+#define QSERDES_RX_AC_JTAG_OUTP 0x5A0
+#define QSERDES_RX_AC_JTAG_OUTN 0x5A4
+#define QSERDES_RX_RX_SIGDET 0x5A8
+#define QSERDES_RX_RX_VDCOFF 0x5AC
+#define QSERDES_RX_IDAC_CAL_ON 0x5B0
+#define QSERDES_RX_IDAC_STATUS_I 0x5B4
+#define QSERDES_RX_IDAC_STATUS_IBAR 0x5B8
+#define QSERDES_RX_IDAC_STATUS_Q 0x5BC
+#define QSERDES_RX_IDAC_STATUS_QBAR 0x5C0
+#define QSERDES_RX_IDAC_STATUS_A 0x5C4
+#define QSERDES_RX_IDAC_STATUS_ABAR 0x5C8
+#define QSERDES_RX_CALST_STATUS_I 0x5CC
+#define QSERDES_RX_CALST_STATUS_Q 0x5D0
+#define QSERDES_RX_CALST_STATUS_A 0x5D4
+#define QSERDES_RX_RX_ALOG_INTF_OBSV 0x5D8
+#define QSERDES_RX_READ_EQCODE 0x5DC
+#define QSERDES_RX_READ_OFFSETCODE 0x5E0
+#define QSERDES_RX_IA_ERROR_COUNTER_LOW 0x5E4
+#define QSERDES_RX_IA_ERROR_COUNTER_HIGH 0x5E8
+#define PCIE_PHY_MISC_DEBUG_BUS_BYTE0_INDEX 0x600
+#define PCIE_PHY_MISC_DEBUG_BUS_BYTE1_INDEX 0x604
+#define PCIE_PHY_MISC_DEBUG_BUS_BYTE2_INDEX 0x608
+#define PCIE_PHY_MISC_DEBUG_BUS_BYTE3_INDEX 0x60C
+#define PCIE_PHY_MISC_PLACEHOLDER_STATUS 0x610
+#define PCIE_PHY_MISC_DEBUG_BUS_0_STATUS 0x614
+#define PCIE_PHY_MISC_DEBUG_BUS_1_STATUS 0x618
+#define PCIE_PHY_MISC_DEBUG_BUS_2_STATUS 0x61C
+#define PCIE_PHY_MISC_DEBUG_BUS_3_STATUS 0x620
+#define PCIE_PHY_MISC_OSC_DTCT_STATUS 0x624
+#define PCIE_PHY_MISC_OSC_DTCT_CONFIG1 0x628
+#define PCIE_PHY_MISC_OSC_DTCT_CONFIG2 0x62C
+#define PCIE_PHY_MISC_OSC_DTCT_CONFIG3 0x630
+#define PCIE_PHY_MISC_OSC_DTCT_CONFIG4 0x634
+#define PCIE_PHY_MISC_OSC_DTCT_CONFIG5 0x638
+#define PCIE_PHY_MISC_OSC_DTCT_CONFIG6 0x63C
+#define PCIE_PHY_MISC_OSC_DTCT_CONFIG7 0x640
+#define PCIE_PHY_SW_RESET 0x800
+#define PCIE_PHY_POWER_DOWN_CONTROL 0x804
+#define PCIE_PHY_START_CONTROL 0x808
+#define PCIE_PHY_TXMGN_V0 0x80C
+#define PCIE_PHY_TXMGN_V1 0x810
+#define PCIE_PHY_TXMGN_V2 0x814
+#define PCIE_PHY_TXMGN_V3 0x818
+#define PCIE_PHY_TXMGN_V4 0x81C
+#define PCIE_PHY_TXMGN_LS 0x820
+#define PCIE_PHY_TXDEEMPH_M6DB_V0 0x824
+#define PCIE_PHY_TXDEEMPH_M3P5DB_V0 0x828
+#define PCIE_PHY_TXDEEMPH_M6DB_V1 0x82C
+#define PCIE_PHY_TXDEEMPH_M3P5DB_V1 0x830
+#define PCIE_PHY_TXDEEMPH_M6DB_V2 0x834
+#define PCIE_PHY_TXDEEMPH_M3P5DB_V2 0x838
+#define PCIE_PHY_TXDEEMPH_M6DB_V3 0x83C
+#define PCIE_PHY_TXDEEMPH_M3P5DB_V3 0x840
+#define PCIE_PHY_TXDEEMPH_M6DB_V4 0x844
+#define PCIE_PHY_TXDEEMPH_M3P5DB_V4 0x848
+#define PCIE_PHY_TXDEEMPH_M6DB_LS 0x84C
+#define PCIE_PHY_TXDEEMPH_M3P5DB_LS 0x850
+#define PCIE_PHY_ENDPOINT_REFCLK_DRIVE 0x854
+#define PCIE_PHY_RX_IDLE_DTCT_CNTRL 0x858
+#define PCIE_PHY_RATE_SLEW_CNTRL 0x85C
+#define PCIE_PHY_POWER_STATE_CONFIG1 0x860
+#define PCIE_PHY_POWER_STATE_CONFIG2 0x864
+#define PCIE_PHY_POWER_STATE_CONFIG3 0x868
+#define PCIE_PHY_POWER_STATE_CONFIG4 0x86C
+#define PCIE_PHY_RCVR_DTCT_DLY_P1U2_L 0x870
+#define PCIE_PHY_RCVR_DTCT_DLY_P1U2_H 0x874
+#define PCIE_PHY_RCVR_DTCT_DLY_U3_L 0x878
+#define PCIE_PHY_RCVR_DTCT_DLY_U3_H 0x87C
+#define PCIE_PHY_LOCK_DETECT_CONFIG1 0x880
+#define PCIE_PHY_LOCK_DETECT_CONFIG2 0x884
+#define PCIE_PHY_LOCK_DETECT_CONFIG3 0x888
+#define PCIE_PHY_TSYNC_RSYNC_TIME 0x88C
+#define PCIE_PHY_SIGDET_LOW_2_IDLE_TIME 0x890
+#define PCIE_PHY_BEACON_2_IDLE_TIME_L 0x894
+#define PCIE_PHY_BEACON_2_IDLE_TIME_H 0x898
+#define PCIE_PHY_PWRUP_RESET_DLY_TIME_SYSCLK 0x89C
+#define PCIE_PHY_PWRUP_RESET_DLY_TIME_AUXCLK 0x8A0
+#define PCIE_PHY_LP_WAKEUP_DLY_TIME_AUXCLK 0x8A4
+#define PCIE_PHY_PLL_LOCK_CHK_DLY_TIME 0x8A8
+#define PCIE_PHY_LFPS_DET_HIGH_COUNT_VAL 0x8AC
+#define PCIE_PHY_LFPS_TX_ECSTART_EQTLOCK 0x8B0
+#define PCIE_PHY_LFPS_TX_END_CNT_P2U3_START 0x8B4
+#define PCIE_PHY_RXEQTRAINING_WAIT_TIME 0x8B8
+#define PCIE_PHY_RXEQTRAINING_RUN_TIME 0x8BC
+#define PCIE_PHY_TXONESZEROS_RUN_LENGTH 0x8C0
+#define PCIE_PHY_FLL_CNTRL1 0x8C4
+#define PCIE_PHY_FLL_CNTRL2 0x8C8
+#define PCIE_PHY_FLL_CNT_VAL_L 0x8CC
+#define PCIE_PHY_FLL_CNT_VAL_H_TOL 0x8D0
+#define PCIE_PHY_FLL_MAN_CODE 0x8D4
+#define PCIE_PHY_AUTONOMOUS_MODE_CTRL 0x8D8
+#define PCIE_PHY_LFPS_RXTERM_IRQ_CLEAR 0x8DC
+#define PCIE_PHY_ARCVR_DTCT_EN_PERIOD 0x8E0
+#define PCIE_PHY_ARCVR_DTCT_CM_DLY 0x8E4
+#define PCIE_PHY_ALFPS_DEGLITCH_VAL 0x8E8
+#define PCIE_PHY_INSIG_SW_CTRL1 0x8EC
+#define PCIE_PHY_INSIG_SW_CTRL2 0x8F0
+#define PCIE_PHY_INSIG_SW_CTRL3 0x8F4
+#define PCIE_PHY_INSIG_MX_CTRL1 0x8F8
+#define PCIE_PHY_INSIG_MX_CTRL2 0x8FC
+#define PCIE_PHY_INSIG_MX_CTRL3 0x900
+#define PCIE_PHY_OUTSIG_SW_CTRL1 0x904
+#define PCIE_PHY_OUTSIG_MX_CTRL1 0x908
+#define PCIE_PHY_CLK_DEBUG_BYPASS_CTRL 0x90C
+#define PCIE_PHY_TEST_CONTROL 0x910
+#define PCIE_PHY_TEST_CONTROL2 0x914
+#define PCIE_PHY_TEST_CONTROL3 0x918
+#define PCIE_PHY_TEST_CONTROL4 0x91C
+#define PCIE_PHY_TEST_CONTROL5 0x920
+#define PCIE_PHY_TEST_CONTROL6 0x924
+#define PCIE_PHY_TEST_CONTROL7 0x928
+#define PCIE_PHY_COM_RESET_CONTROL 0x92C
+#define PCIE_PHY_BIST_CTRL 0x930
+#define PCIE_PHY_PRBS_POLY0 0x934
+#define PCIE_PHY_PRBS_POLY1 0x938
+#define PCIE_PHY_PRBS_SEED0 0x93C
+#define PCIE_PHY_PRBS_SEED1 0x940
+#define PCIE_PHY_FIXED_PAT_CTRL 0x944
+#define PCIE_PHY_FIXED_PAT0 0x948
+#define PCIE_PHY_FIXED_PAT1 0x94C
+#define PCIE_PHY_FIXED_PAT2 0x950
+#define PCIE_PHY_FIXED_PAT3 0x954
+#define PCIE_PHY_COM_CLK_SWITCH_CTRL 0x958
+#define PCIE_PHY_ELECIDLE_DLY_SEL 0x95C
+#define PCIE_PHY_SPARE1 0x960
+#define PCIE_PHY_BIST_CHK_ERR_CNT_L_STATUS 0x964
+#define PCIE_PHY_BIST_CHK_ERR_CNT_H_STATUS 0x968
+#define PCIE_PHY_BIST_CHK_STATUS 0x96C
+#define PCIE_PHY_LFPS_RXTERM_IRQ_SOURCE_STATUS 0x970
+#define PCIE_PHY_PCS_STATUS 0x974
+#define PCIE_PHY_PCS_STATUS2 0x978
+#define PCIE_PHY_PCS_STATUS3 0x97C
+#define PCIE_PHY_COM_RESET_STATUS 0x980
+#define PCIE_PHY_OSC_DTCT_STATUS 0x984
+#define PCIE_PHY_REVISION_ID0 0x988
+#define PCIE_PHY_REVISION_ID1 0x98C
+#define PCIE_PHY_REVISION_ID2 0x990
+#define PCIE_PHY_REVISION_ID3 0x994
+#define PCIE_PHY_DEBUG_BUS_0_STATUS 0x998
+#define PCIE_PHY_DEBUG_BUS_1_STATUS 0x99C
+#define PCIE_PHY_DEBUG_BUS_2_STATUS 0x9A0
+#define PCIE_PHY_DEBUG_BUS_3_STATUS 0x9A4
+#define PCIE_PHY_LP_WAKEUP_DLY_TIME_AUXCLK_MSB 0x9A8
+#define PCIE_PHY_OSC_DTCT_ACTIONS 0x9AC
+#define PCIE_PHY_SIGDET_CNTRL 0x9B0
+#define PCIE_PHY_IDAC_CAL_CNTRL 0x9B4
+#define PCIE_PHY_CMN_ACK_OUT_SEL 0x9B8
+#define PCIE_PHY_PLL_LOCK_CHK_DLY_TIME_SYSCLK 0x9BC
+#define PCIE_PHY_AUTONOMOUS_MODE_STATUS 0x9C0
+#define PCIE_PHY_ENDPOINT_REFCLK_CNTRL 0x9C4
+#define PCIE_PHY_EPCLK_PRE_PLL_LOCK_DLY_SYSCLK 0x9C8
+#define PCIE_PHY_EPCLK_PRE_PLL_LOCK_DLY_AUXCLK 0x9CC
+#define PCIE_PHY_EPCLK_DLY_COUNT_VAL_L 0x9D0
+#define PCIE_PHY_EPCLK_DLY_COUNT_VAL_H 0x9D4
+#define PCIE_PHY_RX_SIGDET_LVL 0x9D8
+#define PCIE_PHY_L1SS_WAKEUP_DLY_TIME_AUXCLK_LSB 0x9DC
+#define PCIE_PHY_L1SS_WAKEUP_DLY_TIME_AUXCLK_MSB 0x9E0
+#define PCIE_PHY_AUTONOMOUS_MODE_CTRL2 0x9E4
+#define PCIE_PHY_RXTERMINATION_DLY_SEL 0x9E8
+#define PCIE_PHY_LFPS_PER_TIMER_VAL 0x9EC
+#define PCIE_PHY_SIGDET_STARTUP_TIMER_VAL 0x9F0
+#define PCIE_PHY_LOCK_DETECT_CONFIG4 0x9F4
+#endif
diff --git a/drivers/platform/msm/ipa/ipa_api.c b/drivers/platform/msm/ipa/ipa_api.c
index 96b9bd6..7df312e 100644
--- a/drivers/platform/msm/ipa/ipa_api.c
+++ b/drivers/platform/msm/ipa/ipa_api.c
@@ -3135,6 +3135,17 @@
}
EXPORT_SYMBOL(ipa_ntn_uc_dereg_rdyCB);
+int ipa_get_smmu_params(struct ipa_smmu_in_params *in,
+ struct ipa_smmu_out_params *out)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_get_smmu_params, in, out);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_get_smmu_params);
+
/**
* ipa_conn_wdi3_pipes() - connect wdi3 pipes
*/
diff --git a/drivers/platform/msm/ipa/ipa_api.h b/drivers/platform/msm/ipa/ipa_api.h
index b526711..0779f34 100644
--- a/drivers/platform/msm/ipa/ipa_api.h
+++ b/drivers/platform/msm/ipa/ipa_api.h
@@ -417,6 +417,9 @@
int (*ipa_tz_unlock_reg)(struct ipa_tz_unlock_reg_info *reg_info,
u16 num_regs);
+
+ int (*ipa_get_smmu_params)(struct ipa_smmu_in_params *in,
+ struct ipa_smmu_out_params *out);
};
#ifdef CONFIG_IPA
diff --git a/drivers/platform/msm/ipa/ipa_common_i.h b/drivers/platform/msm/ipa/ipa_common_i.h
index 0a406d2..98a1cf9 100644
--- a/drivers/platform/msm/ipa/ipa_common_i.h
+++ b/drivers/platform/msm/ipa/ipa_common_i.h
@@ -19,6 +19,10 @@
#include <linux/ipa.h>
#include <linux/ipa_uc_offload.h>
#include <linux/ipa_wdi3.h>
+#include <linux/ratelimit.h>
+
+#define WARNON_RATELIMIT_BURST 1
+#define IPA_RATELIMIT_BURST 1
#define __FILENAME__ \
(strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
@@ -104,6 +108,39 @@
ipa_dec_client_disable_clks(&log_info); \
} while (0)
+/*
+ * Printing one warning message in 5 seconds if multiple warning messages
+ * are coming back to back.
+ */
+
+#define WARN_ON_RATELIMIT_IPA(condition) \
+({ \
+ static DEFINE_RATELIMIT_STATE(_rs, \
+ DEFAULT_RATELIMIT_INTERVAL, \
+ WARNON_RATELIMIT_BURST); \
+ int rtn = !!(condition); \
+ \
+ if (unlikely(rtn && __ratelimit(&_rs))) \
+ WARN_ON(rtn); \
+})
+
+/*
+ * Printing one error message in 5 seconds if multiple error messages
+ * are coming back to back.
+ */
+
+#define pr_err_ratelimited_ipa(fmt, ...) \
+ printk_ratelimited_ipa(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
+#define printk_ratelimited_ipa(fmt, ...) \
+({ \
+ static DEFINE_RATELIMIT_STATE(_rs, \
+ DEFAULT_RATELIMIT_INTERVAL, \
+ IPA_RATELIMIT_BURST); \
+ \
+ if (__ratelimit(&_rs)) \
+ printk(fmt, ##__VA_ARGS__); \
+})
+
#define ipa_assert_on(condition)\
do {\
if (unlikely(condition))\
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index d3c2ca3..b615ec8 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -2116,6 +2116,12 @@
if (ep_idx == -1)
continue;
+ /* from IPA 4.0 pipe suspend is not supported */
+ if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0)
+ ipahal_write_reg_n_fields(
+ IPA_ENDP_INIT_CTRL_n,
+ ep_idx, &ep_suspend);
+
/*
* ipa3_cfg_ep_holb is not used here because we are
* setting HOLB on Q6 pipes, and from APPS perspective
@@ -2128,12 +2134,6 @@
ipahal_write_reg_n_fields(
IPA_ENDP_INIT_HOL_BLOCK_EN_n,
ep_idx, &ep_holb);
-
- /* from IPA 4.0 pipe suspend is not supported */
- if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0)
- ipahal_write_reg_n_fields(
- IPA_ENDP_INIT_CTRL_n,
- ep_idx, &ep_suspend);
}
}
}
@@ -4518,6 +4518,7 @@
ipa3_register_panic_hdlr();
ipa3_ctx->q6_proxy_clk_vote_valid = true;
+ ipa3_ctx->q6_proxy_clk_vote_cnt++;
mutex_lock(&ipa3_ctx->lock);
ipa3_ctx->ipa_initialization_complete = true;
@@ -5138,6 +5139,7 @@
mutex_init(&ipa3_ctx->lock);
mutex_init(&ipa3_ctx->q6_proxy_clk_vote_mutex);
mutex_init(&ipa3_ctx->ipa_cne_evt_lock);
+ ipa3_ctx->q6_proxy_clk_vote_cnt = 0;
idr_init(&ipa3_ctx->ipa_idr);
spin_lock_init(&ipa3_ctx->idr_lock);
@@ -6403,5 +6405,39 @@
return iommu_map(domain, iova, paddr, size, prot);
}
+/**
+ * ipa3_get_smmu_params()- Return the ipa3 smmu related params.
+ */
+int ipa3_get_smmu_params(struct ipa_smmu_in_params *in,
+ struct ipa_smmu_out_params *out)
+{
+ bool is_smmu_enable = 0;
+
+ if (out == NULL || in == NULL) {
+ IPAERR("bad parms for Client SMMU out params\n");
+ return -EINVAL;
+ }
+
+ if (!ipa3_ctx) {
+ IPAERR("IPA not yet initialized\n");
+ return -EINVAL;
+ }
+
+ switch (in->smmu_client) {
+ case IPA_SMMU_WLAN_CLIENT:
+ is_smmu_enable = !(ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_UC] |
+ ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_WLAN]);
+ break;
+ default:
+ is_smmu_enable = 0;
+ IPAERR("Trying to get illegal clients SMMU status");
+ return -EINVAL;
+ }
+
+ out->smmu_enable = is_smmu_enable;
+
+ return 0;
+}
+
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("IPA HW device driver");
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
index 6a89f49..0f3940f 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
@@ -62,7 +62,7 @@
res = ipahal_flt_generate_hw_rule(&gen_params, &entry->hw_len, buf);
if (res)
- IPAERR("failed to generate flt h/w rule\n");
+ IPAERR_RL("failed to generate flt h/w rule\n");
return 0;
}
@@ -311,7 +311,7 @@
}
if (ipahal_fltrt_allocate_hw_tbl_imgs(alloc_params)) {
- IPAERR("fail to allocate FLT HW TBL images. IP %d\n", ip);
+ IPAERR_RL("fail to allocate FLT HW TBL images. IP %d\n", ip);
rc = -ENOMEM;
goto allocate_failed;
}
@@ -319,14 +319,14 @@
if (ipa_translate_flt_tbl_to_hw_fmt(ip, IPA_RULE_HASHABLE,
alloc_params->hash_bdy.base, alloc_params->hash_hdr.base,
hash_bdy_start_ofst)) {
- IPAERR("fail to translate hashable flt tbls to hw format\n");
+ IPAERR_RL("fail to translate hashable flt tbls to hw format\n");
rc = -EPERM;
goto translate_fail;
}
if (ipa_translate_flt_tbl_to_hw_fmt(ip, IPA_RULE_NON_HASHABLE,
alloc_params->nhash_bdy.base, alloc_params->nhash_hdr.base,
nhash_bdy_start_ofst)) {
- IPAERR("fail to translate non-hash flt tbls to hw format\n");
+ IPAERR_RL("fail to translate non-hash flt tbls to hw format\n");
rc = -EPERM;
goto translate_fail;
}
@@ -530,7 +530,7 @@
}
if (ipa_generate_flt_hw_tbl_img(ip, &alloc_params)) {
- IPAERR("fail to generate FLT HW TBL image. IP %d\n", ip);
+ IPAERR_RL("fail to generate FLT HW TBL image. IP %d\n", ip);
rc = -EFAULT;
goto prep_failed;
}
@@ -745,25 +745,25 @@
if (rule->action != IPA_PASS_TO_EXCEPTION) {
if (!rule->eq_attrib_type) {
if (!rule->rt_tbl_hdl) {
- IPAERR("invalid RT tbl\n");
+ IPAERR_RL("invalid RT tbl\n");
goto error;
}
*rt_tbl = ipa3_id_find(rule->rt_tbl_hdl);
if (*rt_tbl == NULL) {
- IPAERR("RT tbl not found\n");
+ IPAERR_RL("RT tbl not found\n");
goto error;
}
if ((*rt_tbl)->cookie != IPA_RT_TBL_COOKIE) {
- IPAERR("RT table cookie is invalid\n");
+ IPAERR_RL("RT table cookie is invalid\n");
goto error;
}
} else {
if (rule->rt_tbl_idx > ((ip == IPA_IP_v4) ?
IPA_MEM_PART(v4_modem_rt_index_hi) :
IPA_MEM_PART(v6_modem_rt_index_hi))) {
- IPAERR("invalid RT tbl\n");
+ IPAERR_RL("invalid RT tbl\n");
goto error;
}
}
@@ -778,12 +778,12 @@
if (rule->pdn_idx) {
if (rule->action == IPA_PASS_TO_EXCEPTION ||
rule->action == IPA_PASS_TO_ROUTING) {
- IPAERR(
+ IPAERR_RL(
"PDN index should be 0 when action is not pass to NAT\n");
goto error;
} else {
if (rule->pdn_idx >= IPA_MAX_PDN_NUM) {
- IPAERR("PDN index %d is too large\n",
+ IPAERR_RL("PDN index %d is too large\n",
rule->pdn_idx);
goto error;
}
@@ -794,7 +794,7 @@
if (rule->rule_id) {
if ((rule->rule_id < ipahal_get_rule_id_hi_bit()) ||
(rule->rule_id >= ((ipahal_get_rule_id_hi_bit()<<1)-1))) {
- IPAERR("invalid rule_id provided 0x%x\n"
+ IPAERR_RL("invalid rule_id provided 0x%x\n"
"rule_id with bit 0x%x are auto generated\n",
rule->rule_id, ipahal_get_rule_id_hi_bit());
goto error;
@@ -828,8 +828,8 @@
} else {
id = ipa3_alloc_rule_id(tbl->rule_ids);
if (id < 0) {
- IPAERR("failed to allocate rule id\n");
- WARN_ON(1);
+ IPAERR_RL("failed to allocate rule id\n");
+ WARN_ON_RATELIMIT_IPA(1);
goto rule_id_fail;
}
}
@@ -853,8 +853,8 @@
entry->rt_tbl->ref_cnt++;
id = ipa3_id_alloc(entry);
if (id < 0) {
- IPAERR("failed to add to tree\n");
- WARN_ON(1);
+ IPAERR_RL("failed to add to tree\n");
+ WARN_ON_RATELIMIT_IPA(1);
goto ipa_insert_failed;
}
*rule_hdl = id;
@@ -1399,7 +1399,7 @@
list_for_each_entry_safe(entry, next, &tbl->head_flt_rule_list,
link) {
if (ipa3_id_find(entry->id) == NULL) {
- WARN_ON(1);
+ WARN_ON_RATELIMIT_IPA(1);
mutex_unlock(&ipa3_ctx->lock);
return -EFAULT;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
index a89bd78..a37df7e 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
@@ -343,7 +343,7 @@
}
if (hdr_entry->cookie != IPA_HDR_COOKIE) {
IPAERR_RL("Invalid header cookie %u\n", hdr_entry->cookie);
- WARN_ON(1);
+ WARN_ON_RATELIMIT_IPA(1);
return -EINVAL;
}
IPADBG("Associated header is name=%s is_hdr_proc_ctx=%d\n",
@@ -373,7 +373,7 @@
bin = IPA_HDR_PROC_CTX_BIN1;
} else {
IPAERR_RL("unexpected needed len %d\n", needed_len);
- WARN_ON(1);
+ WARN_ON_RATELIMIT_IPA(1);
goto bad_len;
}
@@ -418,8 +418,8 @@
id = ipa3_id_alloc(entry);
if (id < 0) {
- IPAERR("failed to alloc id\n");
- WARN_ON(1);
+ IPAERR_RL("failed to alloc id\n");
+ WARN_ON_RATELIMIT_IPA(1);
goto ipa_insert_failed;
}
entry->id = id;
@@ -555,8 +555,8 @@
id = ipa3_id_alloc(entry);
if (id < 0) {
- IPAERR("failed to alloc id\n");
- WARN_ON(1);
+ IPAERR_RL("failed to alloc id\n");
+ WARN_ON_RATELIMIT_IPA(1);
goto ipa_insert_failed;
}
entry->id = id;
@@ -984,7 +984,7 @@
if (entry->is_hdr_proc_ctx) {
IPAERR("default header is proc ctx\n");
mutex_unlock(&ipa3_ctx->lock);
- WARN_ON(1);
+ WARN_ON_RATELIMIT_IPA(1);
return -EFAULT;
}
continue;
@@ -992,7 +992,7 @@
if (ipa3_id_find(entry->id) == NULL) {
mutex_unlock(&ipa3_ctx->lock);
- WARN_ON(1);
+ WARN_ON_RATELIMIT_IPA(1);
return -EFAULT;
}
if (entry->is_hdr_proc_ctx) {
@@ -1046,7 +1046,7 @@
if (ipa3_id_find(ctx_entry->id) == NULL) {
mutex_unlock(&ipa3_ctx->lock);
- WARN_ON(1);
+ WARN_ON_RATELIMIT_IPA(1);
return -EFAULT;
}
list_del(&ctx_entry->link);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index ad925c5..3754aa8 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -106,7 +106,7 @@
#define IPAERR_RL(fmt, args...) \
do { \
- pr_err_ratelimited(DRV_NAME " %s:%d " fmt, __func__,\
+ pr_err_ratelimited_ipa(DRV_NAME " %s:%d " fmt, __func__,\
__LINE__, ## args);\
if (ipa3_ctx) { \
IPA_IPC_LOGGING(ipa3_ctx->logbuf, \
@@ -1331,6 +1331,7 @@
u32 curr_ipa_clk_rate;
bool q6_proxy_clk_vote_valid;
struct mutex q6_proxy_clk_vote_mutex;
+ u32 q6_proxy_clk_vote_cnt;
u32 ipa_num_pipes;
dma_addr_t pkt_init_imm[IPA3_MAX_NUM_PIPES];
u32 pkt_init_imm_opcode;
@@ -2020,6 +2021,9 @@
u8 ipa3_get_qmb_master_sel(enum ipa_client_type client);
+int ipa3_get_smmu_params(struct ipa_smmu_in_params *in,
+ struct ipa_smmu_out_params *out);
+
/* internal functions */
int ipa3_bind_api_controller(enum ipa_hw_type ipa_hw_type,
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c b/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c
index 4ada018..40ef59a 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c
@@ -221,7 +221,7 @@
int result = -EINVAL;
if (lookup == NULL) {
- IPAERR("invalid param lookup=%p\n", lookup);
+ IPAERR_RL("invalid param lookup=%p\n", lookup);
return result;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c b/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c
index 3bf0327..fea9b3b 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c
@@ -1029,9 +1029,10 @@
IPA_PM_DBG_STATE(client->hdl, client->name,
client->state);
spin_unlock_irqrestore(&client->state_lock, flags);
- } else if (client->state ==
- IPA_PM_ACTIVATED_PENDING_DEACTIVATION ||
- IPA_PM_ACTIVATED_PENDING_RESCHEDULE) {
+ } else if ((client->state ==
+ IPA_PM_ACTIVATED_PENDING_DEACTIVATION) ||
+ (client->state ==
+ IPA_PM_ACTIVATED_PENDING_RESCHEDULE)) {
run_algorithm = true;
client->state = IPA_PM_DEACTIVATED;
IPA_PM_DBG_STATE(client->hdl, client->name,
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
index 2536bf4..fc76604 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
@@ -59,15 +59,15 @@
gen_params.ipt = ip;
gen_params.dst_pipe_idx = ipa3_get_ep_mapping(entry->rule.dst);
if (gen_params.dst_pipe_idx == -1) {
- IPAERR("Wrong destination pipe specified in RT rule\n");
- WARN_ON(1);
+ IPAERR_RL("Wrong destination pipe specified in RT rule\n");
+ WARN_ON_RATELIMIT_IPA(1);
return -EPERM;
}
if (!IPA_CLIENT_IS_CONS(entry->rule.dst)) {
- IPAERR("No RT rule on IPA_client_producer pipe.\n");
- IPAERR("pipe_idx: %d dst_pipe: %d\n",
+ IPAERR_RL("No RT rule on IPA_client_producer pipe.\n");
+ IPAERR_RL("pipe_idx: %d dst_pipe: %d\n",
gen_params.dst_pipe_idx, entry->rule.dst);
- WARN_ON(1);
+ WARN_ON_RATELIMIT_IPA(1);
return -EPERM;
}
@@ -145,14 +145,14 @@
tbl_mem.size = tbl->sz[rlt] -
ipahal_get_hw_tbl_hdr_width();
if (ipahal_fltrt_allocate_hw_sys_tbl(&tbl_mem)) {
- IPAERR("fail to alloc sys tbl of size %d\n",
+ IPAERR_RL("fail to alloc sys tbl of size %d\n",
tbl_mem.size);
goto err;
}
if (ipahal_fltrt_write_addr_to_hdr(tbl_mem.phys_base,
hdr, tbl->idx - apps_start_idx, true)) {
- IPAERR("fail to wrt sys tbl addr to hdr\n");
+ IPAERR_RL("fail to wrt sys tbl addr to hdr\n");
goto hdr_update_fail;
}
@@ -166,7 +166,7 @@
res = ipa_generate_rt_hw_rule(ip, entry,
tbl_mem_buf);
if (res) {
- IPAERR("failed to gen HW RT rule\n");
+ IPAERR_RL("failed to gen HW RT rule\n");
goto hdr_update_fail;
}
tbl_mem_buf += entry->hw_len;
@@ -183,7 +183,7 @@
/* update the hdr at the right index */
if (ipahal_fltrt_write_addr_to_hdr(offset, hdr,
tbl->idx - apps_start_idx, true)) {
- IPAERR("fail to wrt lcl tbl ofst to hdr\n");
+ IPAERR_RL("fail to wrt lcl tbl ofst to hdr\n");
goto hdr_update_fail;
}
@@ -195,7 +195,7 @@
res = ipa_generate_rt_hw_rule(ip, entry,
body_i);
if (res) {
- IPAERR("failed to gen HW RT rule\n");
+ IPAERR_RL("failed to gen HW RT rule\n");
goto err;
}
body_i += entry->hw_len;
@@ -296,7 +296,7 @@
res = ipa_generate_rt_hw_rule(ip, entry, NULL);
if (res) {
- IPAERR("failed to calculate HW RT rule size\n");
+ IPAERR_RL("failed to calculate HW RT rule size\n");
return -EPERM;
}
@@ -311,8 +311,8 @@
if ((tbl->sz[IPA_RULE_HASHABLE] +
tbl->sz[IPA_RULE_NON_HASHABLE]) == 0) {
- WARN_ON(1);
- IPAERR("rt tbl %s is with zero total size\n", tbl->name);
+ WARN_ON_RATELIMIT_IPA(1);
+ IPAERR_RL("rt tbl %s is with zero total size\n", tbl->name);
}
hdr_width = ipahal_get_hw_tbl_hdr_width();
@@ -819,8 +819,8 @@
id = ipa3_id_alloc(entry);
if (id < 0) {
- IPAERR("failed to add to tree\n");
- WARN_ON(1);
+ IPAERR_RL("failed to add to tree\n");
+ WARN_ON_RATELIMIT_IPA(1);
goto ipa_insert_failed;
}
entry->id = id;
@@ -859,7 +859,7 @@
else if (entry->set == &ipa3_ctx->rt_tbl_set[IPA_IP_v6])
ip = IPA_IP_v6;
else {
- WARN_ON(1);
+ WARN_ON_RATELIMIT_IPA(1);
return -EPERM;
}
@@ -892,14 +892,14 @@
struct ipa3_hdr_proc_ctx_entry **proc_ctx)
{
if (rule->hdr_hdl && rule->hdr_proc_ctx_hdl) {
- IPAERR("rule contains both hdr_hdl and hdr_proc_ctx_hdl\n");
+ IPAERR_RL("rule contains both hdr_hdl and hdr_proc_ctx_hdl\n");
return -EPERM;
}
if (rule->hdr_hdl) {
*hdr = ipa3_id_find(rule->hdr_hdl);
if ((*hdr == NULL) || ((*hdr)->cookie != IPA_HDR_COOKIE)) {
- IPAERR("rt rule does not point to valid hdr\n");
+ IPAERR_RL("rt rule does not point to valid hdr\n");
return -EPERM;
}
} else if (rule->hdr_proc_ctx_hdl) {
@@ -907,7 +907,7 @@
if ((*proc_ctx == NULL) ||
((*proc_ctx)->cookie != IPA_PROC_HDR_COOKIE)) {
- IPAERR("rt rule does not point to valid proc ctx\n");
+ IPAERR_RL("rt rule does not point to valid proc ctx\n");
return -EPERM;
}
}
@@ -940,8 +940,8 @@
} else {
id = ipa3_alloc_rule_id(tbl->rule_ids);
if (id < 0) {
- IPAERR("failed to allocate rule id\n");
- WARN_ON(1);
+ IPAERR_RL("failed to allocate rule id\n");
+ WARN_ON_RATELIMIT_IPA(1);
goto alloc_rule_id_fail;
}
}
@@ -967,8 +967,8 @@
entry->proc_ctx->ref_cnt++;
id = ipa3_id_alloc(entry);
if (id < 0) {
- IPAERR("failed to add to tree\n");
- WARN_ON(1);
+ IPAERR_RL("failed to add to tree\n");
+ WARN_ON_RATELIMIT_IPA(1);
goto ipa_insert_failed;
}
IPADBG("add rt rule tbl_idx=%d rule_cnt=%d rule_id=%d\n",
@@ -1433,7 +1433,7 @@
list_for_each_entry_safe(rule, rule_next,
&tbl->head_rt_rule_list, link) {
if (ipa3_id_find(rule->id) == NULL) {
- WARN_ON(1);
+ WARN_ON_RATELIMIT_IPA(1);
mutex_unlock(&ipa3_ctx->lock);
return -EFAULT;
}
@@ -1461,7 +1461,7 @@
}
if (ipa3_id_find(tbl->id) == NULL) {
- WARN_ON(1);
+ WARN_ON_RATELIMIT_IPA(1);
mutex_unlock(&ipa3_ctx->lock);
return -EFAULT;
}
@@ -1520,7 +1520,7 @@
entry = __ipa3_find_rt_tbl(lookup->ip, lookup->name);
if (entry && entry->cookie == IPA_RT_TBL_COOKIE) {
if (entry->ref_cnt == U32_MAX) {
- IPAERR("fail: ref count crossed limit\n");
+ IPAERR_RL("fail: ref count crossed limit\n");
goto ret;
}
entry->ref_cnt++;
@@ -1572,7 +1572,7 @@
else if (entry->set == &ipa3_ctx->rt_tbl_set[IPA_IP_v6])
ip = IPA_IP_v6;
else {
- WARN_ON(1);
+ WARN_ON_RATELIMIT_IPA(1);
result = -EINVAL;
goto ret;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
index b8928da..941e489 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
@@ -620,8 +620,9 @@
unsigned long iova, size_t len)
{
IPADBG("--res_idx=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", res_idx,
- &pa, iova, len);
- wdi_res[res_idx].res = kzalloc(sizeof(struct ipa_wdi_res), GFP_KERNEL);
+ &pa, iova, len);
+ wdi_res[res_idx].res = kzalloc(sizeof(*wdi_res[res_idx].res),
+ GFP_KERNEL);
if (!wdi_res[res_idx].res)
BUG();
wdi_res[res_idx].nents = 1;
@@ -647,7 +648,8 @@
return;
}
- wdi_res[res_idx].res = kcalloc(sgt->nents, sizeof(struct ipa_wdi_res),
+ wdi_res[res_idx].res = kcalloc(sgt->nents,
+ sizeof(*wdi_res[res_idx].res),
GFP_KERNEL);
if (!wdi_res[res_idx].res)
BUG();
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index 979369a..fb29d00 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -1113,12 +1113,6 @@
{ 31, 31, 8, 8, IPA_EE_AP } },
/* IPA_4_0 */
- [IPA_4_0][IPA_CLIENT_WLAN1_PROD] = {
- true, IPA_v4_0_GROUP_UL_DL,
- true,
- IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
- QMB_MASTER_SELECT_DDR,
- { 7, 9, 8, 16, IPA_EE_AP } },
[IPA_4_0][IPA_CLIENT_USB_PROD] = {
true, IPA_v4_0_GROUP_UL_DL,
true,
@@ -1348,13 +1342,13 @@
true,
IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
QMB_MASTER_SELECT_DDR,
- { 3, 0, 16, 32, IPA_EE_Q6 } },
+ { 6, 2, 12, 24, IPA_EE_Q6 } },
[IPA_4_0_MHI][IPA_CLIENT_Q6_WAN_PROD] = {
true, IPA_v4_0_GROUP_UL_DL,
true,
IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
QMB_MASTER_SELECT_DDR,
- { 6, 2, 12, 24, IPA_EE_Q6 } },
+ { 3, 0, 16, 32, IPA_EE_Q6 } },
[IPA_4_0_MHI][IPA_CLIENT_Q6_CMD_PROD] = {
true, IPA_v4_0_MHI_GROUP_PCIE,
false,
@@ -4194,7 +4188,9 @@
mutex_lock(&ipa3_ctx->q6_proxy_clk_vote_mutex);
if (ipa3_ctx->q6_proxy_clk_vote_valid) {
IPA_ACTIVE_CLIENTS_DEC_SPECIAL("PROXY_CLK_VOTE");
- ipa3_ctx->q6_proxy_clk_vote_valid = false;
+ ipa3_ctx->q6_proxy_clk_vote_cnt--;
+ if (ipa3_ctx->q6_proxy_clk_vote_cnt == 0)
+ ipa3_ctx->q6_proxy_clk_vote_valid = false;
}
mutex_unlock(&ipa3_ctx->q6_proxy_clk_vote_mutex);
}
@@ -4210,8 +4206,10 @@
return;
mutex_lock(&ipa3_ctx->q6_proxy_clk_vote_mutex);
- if (!ipa3_ctx->q6_proxy_clk_vote_valid) {
+ if (!ipa3_ctx->q6_proxy_clk_vote_valid ||
+ (ipa3_ctx->q6_proxy_clk_vote_cnt > 0)) {
IPA_ACTIVE_CLIENTS_INC_SPECIAL("PROXY_CLK_VOTE");
+ ipa3_ctx->q6_proxy_clk_vote_cnt++;
ipa3_ctx->q6_proxy_clk_vote_valid = true;
}
mutex_unlock(&ipa3_ctx->q6_proxy_clk_vote_mutex);
@@ -4505,6 +4503,7 @@
api_ctrl->ipa_enable_wdi3_pipes = ipa3_enable_wdi3_pipes;
api_ctrl->ipa_disable_wdi3_pipes = ipa3_disable_wdi3_pipes;
api_ctrl->ipa_tz_unlock_reg = ipa3_tz_unlock_reg;
+ api_ctrl->ipa_get_smmu_params = ipa3_get_smmu_params;
return 0;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
index d6dbc85..a677046 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
@@ -187,17 +187,17 @@
if (attrib->attrib_mask & IPA_FLT_NEXT_HDR ||
attrib->attrib_mask & IPA_FLT_TC ||
attrib->attrib_mask & IPA_FLT_FLOW_LABEL) {
- IPAHAL_ERR("v6 attrib's specified for v4 rule\n");
+ IPAHAL_ERR_RL("v6 attrib's specified for v4 rule\n");
return -EPERM;
}
} else if (ipt == IPA_IP_v6) {
if (attrib->attrib_mask & IPA_FLT_TOS ||
attrib->attrib_mask & IPA_FLT_PROTOCOL) {
- IPAHAL_ERR("v4 attrib's specified for v6 rule\n");
+ IPAHAL_ERR_RL("v4 attrib's specified for v6 rule\n");
return -EPERM;
}
} else {
- IPAHAL_ERR("unsupported ip %d\n", ipt);
+ IPAHAL_ERR_RL("unsupported ip %d\n", ipt);
return -EPERM;
}
@@ -236,7 +236,7 @@
break;
default:
IPAHAL_ERR("Invalid HDR type %d\n", params->hdr_type);
- WARN_ON(1);
+ WARN_ON_RATELIMIT_IPA(1);
return -EINVAL;
};
@@ -294,8 +294,8 @@
rule_hdr->u.hdr.action = 0x3;
break;
default:
- IPAHAL_ERR("Invalid Rule Action %d\n", params->rule->action);
- WARN_ON(1);
+ IPAHAL_ERR_RL("Invalid Rule Action %d\n", params->rule->action);
+ WARN_ON_RATELIMIT_IPA(1);
return -EINVAL;
}
ipa_assert_on(params->rt_tbl_idx & ~0x1F);
@@ -316,14 +316,14 @@
if (params->rule->eq_attrib_type) {
if (ipa_fltrt_generate_hw_rule_bdy_from_eq(
¶ms->rule->eq_attrib, &buf)) {
- IPAHAL_ERR("fail to generate hw rule from eq\n");
+ IPAHAL_ERR_RL("fail to generate hw rule from eq\n");
return -EPERM;
}
en_rule = params->rule->eq_attrib.rule_eq_bitmap;
} else {
if (ipa_fltrt_generate_hw_rule_bdy(params->ipt,
¶ms->rule->attrib, &buf, &en_rule)) {
- IPAHAL_ERR("fail to generate hw rule\n");
+ IPAHAL_ERR_RL("fail to generate hw rule\n");
return -EPERM;
}
}
@@ -343,7 +343,7 @@
if (*hw_len == 0) {
*hw_len = buf - start;
} else if (*hw_len != (buf - start)) {
- IPAHAL_ERR("hw_len differs b/w passed=0x%x calc=%td\n",
+ IPAHAL_ERR_RL("hw_len differs b/w passed=0x%x calc=%td\n",
*hw_len, (buf - start));
return -EPERM;
}
@@ -376,7 +376,7 @@
break;
default:
IPAHAL_ERR("Invalid Rule Action %d\n", params->rule->action);
- WARN_ON(1);
+ WARN_ON_RATELIMIT_IPA(1);
return -EINVAL;
}
@@ -1381,7 +1381,7 @@
sz = IPA3_0_HW_TBL_WIDTH * 2 + IPA3_0_HW_RULE_START_ALIGNMENT;
extra_wrd_buf = kzalloc(sz, GFP_KERNEL);
if (!extra_wrd_buf) {
- IPAHAL_ERR("failed to allocate %d bytes\n", sz);
+ IPAHAL_ERR_RL("failed to allocate %d bytes\n", sz);
rc = -ENOMEM;
goto fail_extra_alloc;
}
@@ -1389,7 +1389,7 @@
sz = IPA3_0_HW_RULE_BUF_SIZE + IPA3_0_HW_RULE_START_ALIGNMENT;
rest_wrd_buf = kzalloc(sz, GFP_KERNEL);
if (!rest_wrd_buf) {
- IPAHAL_ERR("failed to allocate %d bytes\n", sz);
+ IPAHAL_ERR_RL("failed to allocate %d bytes\n", sz);
rc = -ENOMEM;
goto fail_rest_alloc;
}
@@ -1407,14 +1407,14 @@
rc = ipa_fltrt_rule_generation_err_check(ipt, attrib);
if (rc) {
- IPAHAL_ERR("rule generation err check failed\n");
+ IPAHAL_ERR_RL("rule generation err check failed\n");
goto fail_err_check;
}
if (ipt == IPA_IP_v4) {
if (ipa_fltrt_generate_hw_rule_bdy_ip4(en_rule, attrib,
&extra_wrd_i, &rest_wrd_i)) {
- IPAHAL_ERR("failed to build ipv4 hw rule\n");
+ IPAHAL_ERR_RL("failed to build ipv4 hw rule\n");
rc = -EPERM;
goto fail_err_check;
}
@@ -1422,12 +1422,12 @@
} else if (ipt == IPA_IP_v6) {
if (ipa_fltrt_generate_hw_rule_bdy_ip6(en_rule, attrib,
&extra_wrd_i, &rest_wrd_i)) {
- IPAHAL_ERR("failed to build ipv6 hw rule\n");
+ IPAHAL_ERR_RL("failed to build ipv6 hw rule\n");
rc = -EPERM;
goto fail_err_check;
}
} else {
- IPAHAL_ERR("unsupported ip %d\n", ipt);
+ IPAHAL_ERR_RL("unsupported ip %d\n", ipt);
goto fail_err_check;
}
@@ -1514,7 +1514,7 @@
* of equations that needs extra word param
*/
if (extra_bytes > 13) {
- IPAHAL_ERR("too much extra bytes\n");
+ IPAHAL_ERR_RL("too much extra bytes\n");
return -EPERM;
} else if (extra_bytes > IPA3_0_HW_TBL_HDR_WIDTH) {
/* two extra words */
@@ -2041,7 +2041,7 @@
if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
- IPAHAL_ERR("ran out of meq128 eq\n");
+ IPAHAL_ERR_RL("ran out of meq128 eq\n");
return -EPERM;
}
*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
@@ -2069,7 +2069,7 @@
if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
- IPAHAL_ERR("ran out of meq128 eq\n");
+ IPAHAL_ERR_RL("ran out of meq128 eq\n");
return -EPERM;
}
*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
@@ -2097,7 +2097,7 @@
if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
- IPAHAL_ERR("ran out of meq128 eq\n");
+ IPAHAL_ERR_RL("ran out of meq128 eq\n");
return -EPERM;
}
*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
@@ -2114,7 +2114,7 @@
if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) {
if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
- IPAHAL_ERR("ran out of meq128 eq\n");
+ IPAHAL_ERR_RL("ran out of meq128 eq\n");
return -EPERM;
}
*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
@@ -2130,7 +2130,7 @@
if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) {
if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
- IPAHAL_ERR("ran out of meq128 eq\n");
+ IPAHAL_ERR_RL("ran out of meq128 eq\n");
return -EPERM;
}
*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
@@ -2146,7 +2146,7 @@
if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) {
if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
- IPAHAL_ERR("ran out of meq128 eq\n");
+ IPAHAL_ERR_RL("ran out of meq128 eq\n");
return -EPERM;
}
*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
@@ -2162,7 +2162,7 @@
if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) {
if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
- IPAHAL_ERR("ran out of meq128 eq\n");
+ IPAHAL_ERR_RL("ran out of meq128 eq\n");
return -EPERM;
}
*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
@@ -2180,7 +2180,7 @@
if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
ihl_ofst_meq32) || IPA_IS_RAN_OUT_OF_EQ(
ipa3_0_ihl_ofst_meq32, ihl_ofst_meq32 + 1)) {
- IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+ IPAHAL_ERR_RL("ran out of ihl_meq32 eq\n");
return -EPERM;
}
*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
@@ -2213,7 +2213,7 @@
if (attrib->attrib_mask & IPA_FLT_TCP_SYN) {
if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
ihl_ofst_meq32)) {
- IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+ IPAHAL_ERR_RL("ran out of ihl_meq32 eq\n");
return -EPERM;
}
*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
@@ -2229,7 +2229,7 @@
if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
ihl_ofst_meq32) || IPA_IS_RAN_OUT_OF_EQ(
ipa3_0_ihl_ofst_meq32, ihl_ofst_meq32 + 1)) {
- IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+ IPAHAL_ERR_RL("ran out of ihl_meq32 eq\n");
return -EPERM;
}
*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
@@ -2271,7 +2271,7 @@
if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) {
if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
- IPAHAL_ERR("ran out of meq32 eq\n");
+ IPAHAL_ERR_RL("ran out of meq32 eq\n");
return -EPERM;
}
*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
@@ -2287,7 +2287,7 @@
if (attrib->attrib_mask & IPA_FLT_TYPE) {
if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
ihl_ofst_meq32)) {
- IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+ IPAHAL_ERR_RL("ran out of ihl_meq32 eq\n");
return -EPERM;
}
*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
@@ -2302,7 +2302,7 @@
if (attrib->attrib_mask & IPA_FLT_CODE) {
if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
ihl_ofst_meq32)) {
- IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+ IPAHAL_ERR_RL("ran out of ihl_meq32 eq\n");
return -EPERM;
}
*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
@@ -2317,7 +2317,7 @@
if (attrib->attrib_mask & IPA_FLT_SPI) {
if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
ihl_ofst_meq32)) {
- IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+ IPAHAL_ERR_RL("ran out of ihl_meq32 eq\n");
return -EPERM;
}
*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
@@ -2342,7 +2342,7 @@
if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
ihl_ofst_rng16)) {
- IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+ IPAHAL_ERR_RL("ran out of ihl_rng16 eq\n");
return -EPERM;
}
*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
@@ -2358,7 +2358,7 @@
if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
ihl_ofst_rng16)) {
- IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+ IPAHAL_ERR_RL("ran out of ihl_rng16 eq\n");
return -EPERM;
}
*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
@@ -2374,11 +2374,11 @@
if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
ihl_ofst_rng16)) {
- IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+ IPAHAL_ERR_RL("ran out of ihl_rng16 eq\n");
return -EPERM;
}
if (attrib->src_port_hi < attrib->src_port_lo) {
- IPAHAL_ERR("bad src port range param\n");
+ IPAHAL_ERR_RL("bad src port range param\n");
return -EPERM;
}
*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
@@ -2394,11 +2394,11 @@
if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
ihl_ofst_rng16)) {
- IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+ IPAHAL_ERR_RL("ran out of ihl_rng16 eq\n");
return -EPERM;
}
if (attrib->dst_port_hi < attrib->dst_port_lo) {
- IPAHAL_ERR("bad dst port range param\n");
+ IPAHAL_ERR_RL("bad dst port range param\n");
return -EPERM;
}
*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
@@ -2414,7 +2414,7 @@
if (attrib->attrib_mask & IPA_FLT_TCP_SYN_L2TP) {
if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
ihl_ofst_rng16)) {
- IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+ IPAHAL_ERR_RL("ran out of ihl_rng16 eq\n");
return -EPERM;
}
*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
@@ -2713,7 +2713,7 @@
break;
default:
IPAHAL_ERR("Invalid Rule Action %d\n", rule_hdr->u.hdr.action);
- WARN_ON(1);
+ WARN_ON_RATELIMIT_IPA(1);
rule->rule.action = rule_hdr->u.hdr.action;
}
@@ -2760,7 +2760,7 @@
break;
default:
IPAHAL_ERR("Invalid Rule Action %d\n", rule_hdr->u.hdr.action);
- WARN_ON(1);
+ WARN_ON_RATELIMIT_IPA(1);
rule->rule.action = rule_hdr->u.hdr.action;
}
@@ -3221,7 +3221,7 @@
obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
if (!params) {
- IPAHAL_ERR("Input error: params=%p\n", params);
+ IPAHAL_ERR_RL("Input error: params=%p\n", params);
return -EINVAL;
}
@@ -3230,7 +3230,7 @@
params->nhash_hdr.size,
¶ms->nhash_hdr.phys_base, GFP_KERNEL);
if (!params->nhash_hdr.base) {
- IPAHAL_ERR("fail to alloc DMA buff of size %d\n",
+ IPAHAL_ERR_RL("fail to alloc DMA buff of size %d\n",
params->nhash_hdr.size);
goto nhash_alloc_fail;
}
@@ -3241,7 +3241,7 @@
params->hash_hdr.size, ¶ms->hash_hdr.phys_base,
GFP_KERNEL);
if (!params->hash_hdr.base) {
- IPAHAL_ERR("fail to alloc DMA buff of size %d\n",
+ IPAHAL_ERR_RL("fail to alloc DMA buff of size %d\n",
params->hash_hdr.size);
goto hash_alloc_fail;
}
@@ -3374,21 +3374,21 @@
/* Input validation */
if (!params) {
- IPAHAL_ERR("Input err: no params\n");
+ IPAHAL_ERR_RL("Input err: no params\n");
return -EINVAL;
}
if (params->ipt >= IPA_IP_MAX) {
- IPAHAL_ERR("Input err: Invalid ip type %d\n", params->ipt);
+ IPAHAL_ERR_RL("Input err: Invalid ip type %d\n", params->ipt);
return -EINVAL;
}
if (ipa_fltrt_alloc_init_tbl_hdr(params)) {
- IPAHAL_ERR("fail to alloc and init tbl hdr\n");
+ IPAHAL_ERR_RL("fail to alloc and init tbl hdr\n");
return -ENOMEM;
}
if (ipa_fltrt_alloc_lcl_bdy(params)) {
- IPAHAL_ERR("fail to alloc tbl bodies\n");
+ IPAHAL_ERR_RL("fail to alloc tbl bodies\n");
goto bdy_alloc_fail;
}
@@ -3649,12 +3649,12 @@
IPAHAL_DBG_LOW("Entry\n");
if (ipt >= IPA_IP_MAX) {
- IPAHAL_ERR("Input err: Invalid ip type %d\n", ipt);
+ IPAHAL_ERR_RL("Input err: Invalid ip type %d\n", ipt);
return -EINVAL;
}
if (!attrib || !eq_atrb) {
- IPAHAL_ERR("Input err: attrib=%p eq_atrb=%p\n",
+ IPAHAL_ERR_RL("Input err: attrib=%p eq_atrb=%p\n",
attrib, eq_atrb);
return -EINVAL;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h
index 4ccb7e0..8f78d56 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h
@@ -46,6 +46,16 @@
IPAHAL_DRV_NAME " %s:%d " fmt, ## args); \
} while (0)
+#define IPAHAL_ERR_RL(fmt, args...) \
+ do { \
+ pr_err_ratelimited_ipa(IPAHAL_DRV_NAME " %s:%d " fmt, \
+ __func__, __LINE__, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+ IPAHAL_DRV_NAME " %s:%d " fmt, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+ IPAHAL_DRV_NAME " %s:%d " fmt, ## args); \
+ } while (0)
+
#define IPAHAL_MEM_ALLOC(__size, __is_atomic_ctx) \
(kzalloc((__size), ((__is_atomic_ctx) ? GFP_ATOMIC : GFP_KERNEL)))
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
index 74f5bbd..1d8eb13 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
@@ -1910,6 +1910,8 @@
return;
}
+ memset(valmask, 0, sizeof(struct ipahal_reg_valmask));
+
if (ipahal_ctx->hw_type <= IPA_HW_v3_1) {
shft = IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT;
bmsk = IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK;
diff --git a/drivers/platform/msm/msm_11ad/msm_11ad.c b/drivers/platform/msm/msm_11ad/msm_11ad.c
index d55e655..f64e9de 100644
--- a/drivers/platform/msm/msm_11ad/msm_11ad.c
+++ b/drivers/platform/msm/msm_11ad/msm_11ad.c
@@ -1086,6 +1086,10 @@
ctx->keep_radio_on_during_sleep = of_property_read_bool(of_node,
"qcom,keep-radio-on-during-sleep");
ctx->bus_scale = msm_bus_cl_get_pdata(pdev);
+ if (!ctx->bus_scale) {
+ dev_err(ctx->dev, "Unable to read bus-scaling from DT\n");
+ return -EINVAL;
+ }
ctx->smmu_s1_en = of_property_read_bool(of_node, "qcom,smmu-s1-en");
if (ctx->smmu_s1_en) {
@@ -1114,7 +1118,7 @@
rc = msm_11ad_init_vregs(ctx);
if (rc) {
dev_err(ctx->dev, "msm_11ad_init_vregs failed: %d\n", rc);
- return rc;
+ goto out_bus_scale;
}
rc = msm_11ad_enable_vregs(ctx);
if (rc) {
@@ -1173,6 +1177,18 @@
}
ctx->pcidev = pcidev;
+ rc = msm_pcie_pm_control(MSM_PCIE_RESUME, pcidev->bus->number,
+ pcidev, NULL, 0);
+ if (rc) {
+ dev_err(ctx->dev, "msm_pcie_pm_control(RESUME) failed:%d\n",
+ rc);
+ goto out_rc;
+ }
+
+ pci_set_power_state(pcidev, PCI_D0);
+
+ pci_restore_state(ctx->pcidev);
+
/* Read current state */
rc = pci_read_config_dword(pcidev,
PCIE20_CAP_LINKCTRLSTATUS, &val);
@@ -1180,7 +1196,7 @@
dev_err(ctx->dev,
"reading PCIE20_CAP_LINKCTRLSTATUS failed:%d\n",
rc);
- goto out_rc;
+ goto out_suspend;
}
ctx->l1_enabled_in_enum = val & PCI_EXP_LNKCTL_ASPM_L1;
@@ -1193,7 +1209,7 @@
if (rc) {
dev_err(ctx->dev,
"failed to disable L1, rc %d\n", rc);
- goto out_rc;
+ goto out_suspend;
}
}
@@ -1213,7 +1229,7 @@
rc = msm_11ad_ssr_init(ctx);
if (rc) {
dev_err(ctx->dev, "msm_11ad_ssr_init failed: %d\n", rc);
- goto out_rc;
+ goto out_suspend;
}
msm_11ad_init_cpu_boost(ctx);
@@ -1235,6 +1251,9 @@
msm_11ad_suspend_power_off(ctx);
return 0;
+out_suspend:
+ msm_pcie_pm_control(MSM_PCIE_SUSPEND, pcidev->bus->number,
+ pcidev, NULL, 0);
out_rc:
if (ctx->gpio_en >= 0)
gpio_direction_output(ctx->gpio_en, 0);
@@ -1248,6 +1267,8 @@
msm_11ad_release_clocks(ctx);
msm_11ad_disable_vregs(ctx);
msm_11ad_release_vregs(ctx);
+out_bus_scale:
+ msm_bus_cl_clear_pdata(ctx->bus_scale);
return rc;
}
@@ -1262,7 +1283,6 @@
ctx->pcidev);
kfree(ctx->pristine_state);
- msm_bus_cl_clear_pdata(ctx->bus_scale);
pci_dev_put(ctx->pcidev);
if (ctx->gpio_en >= 0) {
gpio_direction_output(ctx->gpio_en, 0);
@@ -1423,6 +1443,7 @@
dev_info(ctx->dev, "SSR requested\n");
(void)msm_11ad_ssr_copy_ramdump(ctx);
ctx->recovery_in_progress = true;
+ subsys_set_crash_status(ctx->subsys, CRASH_STATUS_ERR_FATAL);
rc = subsystem_restart_dev(ctx->subsys);
if (rc) {
dev_err(ctx->dev,
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index 96ffda4..454cb2e 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -248,7 +248,7 @@
int ret = hp_wmi_perform_query(HPWMI_DISPLAY_QUERY, 0, &state,
sizeof(state), sizeof(state));
if (ret)
- return -EINVAL;
+ return ret < 0 ? ret : -EINVAL;
return state;
}
@@ -258,7 +258,7 @@
int ret = hp_wmi_perform_query(HPWMI_HDDTEMP_QUERY, 0, &state,
sizeof(state), sizeof(state));
if (ret)
- return -EINVAL;
+ return ret < 0 ? ret : -EINVAL;
return state;
}
@@ -268,7 +268,7 @@
int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 0, &state,
sizeof(state), sizeof(state));
if (ret)
- return -EINVAL;
+ return ret < 0 ? ret : -EINVAL;
return state;
}
@@ -279,7 +279,7 @@
sizeof(state), sizeof(state));
if (ret)
- return -EINVAL;
+ return ret < 0 ? ret : -EINVAL;
return state & 0x1;
}
@@ -290,7 +290,7 @@
int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, &state,
sizeof(state), sizeof(state));
if (ret)
- return ret;
+ return ret < 0 ? ret : -EINVAL;
return (state & 0x4) ? 1 : 0;
}
@@ -323,7 +323,7 @@
int ret = hp_wmi_perform_query(HPWMI_BIOS_QUERY, 1, &value,
sizeof(value), 0);
if (ret)
- return -EINVAL;
+ return ret < 0 ? ret : -EINVAL;
return 0;
}
@@ -336,7 +336,7 @@
ret = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1,
&query, sizeof(query), 0);
if (ret)
- return -EINVAL;
+ return ret < 0 ? ret : -EINVAL;
return 0;
}
@@ -428,7 +428,7 @@
int ret = hp_wmi_perform_query(HPWMI_POSTCODEERROR_QUERY, 0, &state,
sizeof(state), sizeof(state));
if (ret)
- return -EINVAL;
+ return ret < 0 ? ret : -EINVAL;
return state;
}
@@ -494,7 +494,7 @@
int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 1, &tmp,
sizeof(tmp), sizeof(tmp));
if (ret)
- return -EINVAL;
+ return ret < 0 ? ret : -EINVAL;
return count;
}
@@ -515,7 +515,7 @@
ret = hp_wmi_perform_query(HPWMI_POSTCODEERROR_QUERY, 1, &tmp,
sizeof(tmp), sizeof(tmp));
if (ret)
- return -EINVAL;
+ return ret < 0 ? ret : -EINVAL;
return count;
}
@@ -572,10 +572,12 @@
switch (event_id) {
case HPWMI_DOCK_EVENT:
- input_report_switch(hp_wmi_input_dev, SW_DOCK,
- hp_wmi_dock_state());
- input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE,
- hp_wmi_tablet_state());
+ if (test_bit(SW_DOCK, hp_wmi_input_dev->swbit))
+ input_report_switch(hp_wmi_input_dev, SW_DOCK,
+ hp_wmi_dock_state());
+ if (test_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit))
+ input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE,
+ hp_wmi_tablet_state());
input_sync(hp_wmi_input_dev);
break;
case HPWMI_PARK_HDD:
@@ -644,6 +646,7 @@
{
acpi_status status;
int err;
+ int val;
hp_wmi_input_dev = input_allocate_device();
if (!hp_wmi_input_dev)
@@ -654,17 +657,26 @@
hp_wmi_input_dev->id.bustype = BUS_HOST;
__set_bit(EV_SW, hp_wmi_input_dev->evbit);
- __set_bit(SW_DOCK, hp_wmi_input_dev->swbit);
- __set_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit);
+
+ /* Dock */
+ val = hp_wmi_dock_state();
+ if (!(val < 0)) {
+ __set_bit(SW_DOCK, hp_wmi_input_dev->swbit);
+ input_report_switch(hp_wmi_input_dev, SW_DOCK, val);
+ }
+
+ /* Tablet mode */
+ val = hp_wmi_tablet_state();
+ if (!(val < 0)) {
+ __set_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit);
+ input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, val);
+ }
err = sparse_keymap_setup(hp_wmi_input_dev, hp_wmi_keymap, NULL);
if (err)
goto err_free_dev;
/* Set initial hardware state */
- input_report_switch(hp_wmi_input_dev, SW_DOCK, hp_wmi_dock_state());
- input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE,
- hp_wmi_tablet_state());
input_sync(hp_wmi_input_dev);
if (!hp_wmi_bios_2009_later() && hp_wmi_bios_2008_later())
@@ -950,10 +962,12 @@
* changed.
*/
if (hp_wmi_input_dev) {
- input_report_switch(hp_wmi_input_dev, SW_DOCK,
- hp_wmi_dock_state());
- input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE,
- hp_wmi_tablet_state());
+ if (test_bit(SW_DOCK, hp_wmi_input_dev->swbit))
+ input_report_switch(hp_wmi_input_dev, SW_DOCK,
+ hp_wmi_dock_state());
+ if (test_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit))
+ input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE,
+ hp_wmi_tablet_state());
input_sync(hp_wmi_input_dev);
}
diff --git a/drivers/platform/x86/intel_mid_thermal.c b/drivers/platform/x86/intel_mid_thermal.c
index 9f713b8..5c768c4 100644
--- a/drivers/platform/x86/intel_mid_thermal.c
+++ b/drivers/platform/x86/intel_mid_thermal.c
@@ -550,6 +550,7 @@
{ "msic_thermal", 1 },
{ }
};
+MODULE_DEVICE_TABLE(platform, therm_id_table);
static struct platform_driver mid_thermal_driver = {
.driver = {
diff --git a/drivers/power/reset/msm-poweroff.c b/drivers/power/reset/msm-poweroff.c
index c090b2a..bfc401a 100644
--- a/drivers/power/reset/msm-poweroff.c
+++ b/drivers/power/reset/msm-poweroff.c
@@ -33,6 +33,7 @@
#include <soc/qcom/scm.h>
#include <soc/qcom/restart.h>
#include <soc/qcom/watchdog.h>
+#include <soc/qcom/minidump.h>
#define EMERGENCY_DLOAD_MAGIC1 0x322A4F99
#define EMERGENCY_DLOAD_MAGIC2 0xC67E4350
@@ -42,10 +43,11 @@
#define SCM_IO_DISABLE_PMIC_ARBITER 1
#define SCM_IO_DEASSERT_PS_HOLD 2
#define SCM_WDOG_DEBUG_BOOT_PART 0x9
-#define SCM_DLOAD_MODE 0X10
+#define SCM_DLOAD_FULLDUMP 0X10
#define SCM_EDLOAD_MODE 0X01
#define SCM_DLOAD_CMD 0x10
-
+#define SCM_DLOAD_MINIDUMP 0X20
+#define SCM_DLOAD_BOTHDUMPS (SCM_DLOAD_MINIDUMP | SCM_DLOAD_FULLDUMP)
static int restart_mode;
static void __iomem *restart_reason, *dload_type_addr;
@@ -65,6 +67,7 @@
#endif
static int in_panic;
+static int dload_type = SCM_DLOAD_FULLDUMP;
static void *dload_mode_addr;
static bool dload_mode_enabled;
static void *emergency_dload_mode_addr;
@@ -137,7 +140,7 @@
mb();
}
- ret = scm_set_dload_mode(on ? SCM_DLOAD_MODE : 0, 0);
+ ret = scm_set_dload_mode(on ? dload_type : 0, 0);
if (ret)
pr_err("Failed to set secure DLOAD mode: %d\n", ret);
@@ -452,6 +455,9 @@
{
uint32_t read_val, show_val;
+ if (!dload_type_addr)
+ return -ENODEV;
+
read_val = __raw_readl(dload_type_addr);
if (read_val == EMMC_DLOAD_TYPE)
show_val = 1;
@@ -467,6 +473,9 @@
uint32_t enabled;
int ret;
+ if (!dload_type_addr)
+ return -ENODEV;
+
ret = kstrtouint(buf, 0, &enabled);
if (ret < 0)
return ret;
@@ -481,10 +490,57 @@
return count;
}
+
+#ifdef CONFIG_QCOM_MINIDUMP
+static DEFINE_MUTEX(tcsr_lock);
+
+static ssize_t show_dload_mode(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "DLOAD dump type: %s\n",
+ (dload_type == SCM_DLOAD_BOTHDUMPS) ? "both" :
+ ((dload_type == SCM_DLOAD_MINIDUMP) ? "mini" : "full"));
+}
+
+static size_t store_dload_mode(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
+{
+ if (sysfs_streq(buf, "full")) {
+ dload_type = SCM_DLOAD_FULLDUMP;
+ } else if (sysfs_streq(buf, "mini")) {
+ if (!msm_minidump_enabled()) {
+ pr_err("Minidump is not enabled\n");
+ return -ENODEV;
+ }
+ dload_type = SCM_DLOAD_MINIDUMP;
+ } else if (sysfs_streq(buf, "both")) {
+ if (!msm_minidump_enabled()) {
+ pr_err("Minidump not enabled, setting fulldump only\n");
+ dload_type = SCM_DLOAD_FULLDUMP;
+ return count;
+ }
+ dload_type = SCM_DLOAD_BOTHDUMPS;
+ } else{
+ pr_err("Invalid Dump setup request..\n");
+ pr_err("Supported dumps:'full', 'mini', or 'both'\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&tcsr_lock);
+ /*Overwrite TCSR reg*/
+ set_dload_mode(dload_type);
+ mutex_unlock(&tcsr_lock);
+ return count;
+}
+RESET_ATTR(dload_mode, 0644, show_dload_mode, store_dload_mode);
+#endif
RESET_ATTR(emmc_dload, 0644, show_emmc_dload, store_emmc_dload);
static struct attribute *reset_attrs[] = {
&reset_attr_emmc_dload.attr,
+#ifdef CONFIG_QCOM_MINIDUMP
+ &reset_attr_dload_mode.attr,
+#endif
NULL
};
diff --git a/drivers/power/supply/qcom/fg-core.h b/drivers/power/supply/qcom/fg-core.h
index 9179325..99120f4 100644
--- a/drivers/power/supply/qcom/fg-core.h
+++ b/drivers/power/supply/qcom/fg-core.h
@@ -458,7 +458,6 @@
bool qnovo_enable;
struct completion soc_update;
struct completion soc_ready;
- struct completion mem_grant;
struct delayed_work profile_load_work;
struct work_struct status_change_work;
struct delayed_work ttf_work;
diff --git a/drivers/power/supply/qcom/fg-memif.c b/drivers/power/supply/qcom/fg-memif.c
index 279b097..d9b5ad7 100644
--- a/drivers/power/supply/qcom/fg-memif.c
+++ b/drivers/power/supply/qcom/fg-memif.c
@@ -746,15 +746,12 @@
return rc;
}
-#define MEM_GRANT_WAIT_MS 200
+#define MEM_GNT_WAIT_TIME_US 10000
+#define MEM_GNT_RETRIES 20
static int fg_direct_mem_request(struct fg_chip *chip, bool request)
{
- int rc, ret;
+ int rc, ret, i = 0;
u8 val, mask;
- bool tried_again = false;
-
- if (request)
- reinit_completion(&chip->mem_grant);
mask = MEM_ACCESS_REQ_BIT | IACS_SLCT_BIT;
val = request ? MEM_ACCESS_REQ_BIT : 0;
@@ -769,7 +766,7 @@
rc = fg_masked_write(chip, MEM_IF_MEM_ARB_CFG(chip), mask, val);
if (rc < 0) {
pr_err("failed to configure mem_if_mem_arb_cfg rc:%d\n", rc);
- return rc;
+ goto release;
}
if (request)
@@ -780,43 +777,39 @@
if (!request)
return 0;
-wait:
- ret = wait_for_completion_interruptible_timeout(
- &chip->mem_grant, msecs_to_jiffies(MEM_GRANT_WAIT_MS));
- /* If we were interrupted wait again one more time. */
- if (ret <= 0) {
- if ((ret == -ERESTARTSYS || ret == 0) && !tried_again) {
- pr_debug("trying again, ret=%d\n", ret);
- tried_again = true;
- goto wait;
- } else {
- pr_err("wait for mem_grant timed out ret=%d\n",
- ret);
- fg_dump_regs(chip);
+ while (i < MEM_GNT_RETRIES) {
+ rc = fg_read(chip, MEM_IF_INT_RT_STS(chip), &val, 1);
+ if (rc < 0) {
+ pr_err("Error in reading MEM_IF_INT_RT_STS, rc=%d\n",
+ rc);
+ goto release;
}
+
+ if (val & MEM_GNT_BIT)
+ return 0;
+
+ usleep_range(MEM_GNT_WAIT_TIME_US, MEM_GNT_WAIT_TIME_US + 1);
+ i++;
}
- if (ret <= 0) {
- val = 0;
- mask = MEM_ACCESS_REQ_BIT | IACS_SLCT_BIT;
- rc = fg_masked_write(chip, MEM_IF_MEM_INTF_CFG(chip), mask,
- val);
- if (rc < 0) {
- pr_err("failed to configure mem_if_mem_intf_cfg rc=%d\n",
- rc);
- return rc;
- }
+ rc = -ETIMEDOUT;
+ pr_err("wait for mem_grant timed out, val=0x%x\n", val);
+ fg_dump_regs(chip);
- mask = MEM_ARB_LO_LATENCY_EN_BIT | MEM_ARB_REQ_BIT;
- rc = fg_masked_write(chip, MEM_IF_MEM_ARB_CFG(chip), mask,
- val);
- if (rc < 0) {
- pr_err("failed to configure mem_if_mem_arb_cfg rc:%d\n",
- rc);
- return rc;
- }
+release:
+ val = 0;
+ mask = MEM_ACCESS_REQ_BIT | IACS_SLCT_BIT;
+ ret = fg_masked_write(chip, MEM_IF_MEM_INTF_CFG(chip), mask, val);
+ if (ret < 0) {
+ pr_err("failed to configure mem_if_mem_intf_cfg rc=%d\n", rc);
+ return ret;
+ }
- return -ETIMEDOUT;
+ mask = MEM_ARB_LO_LATENCY_EN_BIT | MEM_ARB_REQ_BIT;
+ ret = fg_masked_write(chip, MEM_IF_MEM_ARB_CFG(chip), mask, val);
+ if (ret < 0) {
+ pr_err("failed to configure mem_if_mem_arb_cfg rc:%d\n", rc);
+ return ret;
}
return rc;
diff --git a/drivers/power/supply/qcom/qpnp-fg-gen3.c b/drivers/power/supply/qcom/qpnp-fg-gen3.c
index 2044657..8c53b2e 100644
--- a/drivers/power/supply/qcom/qpnp-fg-gen3.c
+++ b/drivers/power/supply/qcom/qpnp-fg-gen3.c
@@ -881,7 +881,7 @@
return 0;
}
- if (chip->battery_missing) {
+ if (chip->battery_missing || !chip->soc_reporting_ready) {
*val = BATT_MISS_SOC;
return 0;
}
@@ -2567,6 +2567,11 @@
goto out;
}
+ if (!chip->soc_reporting_ready) {
+ fg_dbg(chip, FG_STATUS, "Profile load is not complete yet\n");
+ goto out;
+ }
+
rc = power_supply_get_property(chip->batt_psy, POWER_SUPPLY_PROP_STATUS,
&prop);
if (rc < 0) {
@@ -2630,7 +2635,7 @@
fg_ttf_update(chip);
chip->prev_charge_status = chip->charge_status;
out:
- fg_dbg(chip, FG_POWER_SUPPLY, "charge_status:%d charge_type:%d charge_done:%d\n",
+ fg_dbg(chip, FG_STATUS, "charge_status:%d charge_type:%d charge_done:%d\n",
chip->charge_status, chip->charge_type, chip->charge_done);
pm_relax(chip->dev);
}
@@ -2733,6 +2738,49 @@
return true;
}
+static void fg_update_batt_profile(struct fg_chip *chip)
+{
+ int rc, offset;
+ u8 val;
+
+ rc = fg_sram_read(chip, PROFILE_INTEGRITY_WORD,
+ SW_CONFIG_OFFSET, &val, 1, FG_IMA_DEFAULT);
+ if (rc < 0) {
+ pr_err("Error in reading SW_CONFIG_OFFSET, rc=%d\n", rc);
+ return;
+ }
+
+ /*
+ * If the RCONN had not been updated, no need to update battery
+ * profile. Else, update the battery profile so that the profile
+ * modified by bootloader or HLOS matches with the profile read
+ * from device tree.
+ */
+
+ if (!(val & RCONN_CONFIG_BIT))
+ return;
+
+ rc = fg_sram_read(chip, ESR_RSLOW_CHG_WORD,
+ ESR_RSLOW_CHG_OFFSET, &val, 1, FG_IMA_DEFAULT);
+ if (rc < 0) {
+ pr_err("Error in reading ESR_RSLOW_CHG_OFFSET, rc=%d\n", rc);
+ return;
+ }
+ offset = (ESR_RSLOW_CHG_WORD - PROFILE_LOAD_WORD) * 4
+ + ESR_RSLOW_CHG_OFFSET;
+ chip->batt_profile[offset] = val;
+
+ rc = fg_sram_read(chip, ESR_RSLOW_DISCHG_WORD,
+ ESR_RSLOW_DISCHG_OFFSET, &val, 1, FG_IMA_DEFAULT);
+ if (rc < 0) {
+ pr_err("Error in reading ESR_RSLOW_DISCHG_OFFSET, rc=%d\n", rc);
+ return;
+ }
+ offset = (ESR_RSLOW_DISCHG_WORD - PROFILE_LOAD_WORD) * 4
+ + ESR_RSLOW_DISCHG_OFFSET;
+ chip->batt_profile[offset] = val;
+}
+
static void clear_battery_profile(struct fg_chip *chip)
{
u8 val = 0;
@@ -2826,6 +2874,8 @@
if (!chip->profile_available)
goto out;
+ fg_update_batt_profile(chip);
+
if (!is_profile_load_required(chip))
goto done;
@@ -2887,6 +2937,10 @@
rc);
}
+ rc = fg_rconn_config(chip);
+ if (rc < 0)
+ pr_err("Error in configuring Rconn, rc=%d\n", rc);
+
batt_psy_initialized(chip);
fg_notify_charger(chip);
chip->profile_loaded = true;
@@ -2896,6 +2950,10 @@
vote(chip->awake_votable, ESR_FCC_VOTER, true, 0);
schedule_delayed_work(&chip->pl_enable_work, msecs_to_jiffies(5000));
vote(chip->awake_votable, PROFILE_LOAD, false, 0);
+ if (!work_pending(&chip->status_change_work)) {
+ pm_stay_awake(chip->dev);
+ schedule_work(&chip->status_change_work);
+ }
}
static void sram_dump_work(struct work_struct *work)
@@ -4083,12 +4141,6 @@
return rc;
}
- rc = fg_rconn_config(chip);
- if (rc < 0) {
- pr_err("Error in configuring Rconn, rc=%d\n", rc);
- return rc;
- }
-
fg_encode(chip->sp, FG_SRAM_ESR_TIGHT_FILTER,
chip->dt.esr_tight_flt_upct, buf);
rc = fg_sram_write(chip, chip->sp[FG_SRAM_ESR_TIGHT_FILTER].addr_word,
@@ -4184,25 +4236,6 @@
/* INTERRUPT HANDLERS STAY HERE */
-static irqreturn_t fg_dma_grant_irq_handler(int irq, void *data)
-{
- struct fg_chip *chip = data;
- u8 status;
- int rc;
-
- rc = fg_read(chip, MEM_IF_INT_RT_STS(chip), &status, 1);
- if (rc < 0) {
- pr_err("failed to read addr=0x%04x, rc=%d\n",
- MEM_IF_INT_RT_STS(chip), rc);
- return IRQ_HANDLED;
- }
-
- fg_dbg(chip, FG_IRQ, "irq %d triggered, status:%d\n", irq, status);
- complete_all(&chip->mem_grant);
-
- return IRQ_HANDLED;
-}
-
static irqreturn_t fg_mem_xcp_irq_handler(int irq, void *data)
{
struct fg_chip *chip = data;
@@ -4490,7 +4523,7 @@
/* MEM_IF irqs */
[DMA_GRANT_IRQ] = {
.name = "dma-grant",
- .handler = fg_dma_grant_irq_handler,
+ .handler = fg_dummy_irq_handler,
.wakeable = true,
},
[MEM_XCP_IRQ] = {
@@ -5167,7 +5200,6 @@
mutex_init(&chip->qnovo_esr_ctrl_lock);
init_completion(&chip->soc_update);
init_completion(&chip->soc_ready);
- init_completion(&chip->mem_grant);
INIT_DELAYED_WORK(&chip->profile_load_work, profile_load_work);
INIT_DELAYED_WORK(&chip->pl_enable_work, pl_enable_work);
INIT_WORK(&chip->status_change_work, status_change_work);
@@ -5183,23 +5215,6 @@
platform_set_drvdata(pdev, chip);
- rc = fg_register_interrupts(chip);
- if (rc < 0) {
- dev_err(chip->dev, "Error in registering interrupts, rc:%d\n",
- rc);
- goto exit;
- }
-
- /* Keep SOC_UPDATE irq disabled until we require it */
- if (fg_irqs[SOC_UPDATE_IRQ].irq)
- disable_irq_nosync(fg_irqs[SOC_UPDATE_IRQ].irq);
-
- /* Keep BSOC_DELTA_IRQ disabled until we require it */
- vote(chip->delta_bsoc_irq_en_votable, DELTA_BSOC_IRQ_VOTER, false, 0);
-
- /* Keep BATT_MISSING_IRQ disabled until we require it */
- vote(chip->batt_miss_irq_en_votable, BATT_MISS_IRQ_VOTER, false, 0);
-
rc = fg_hw_init(chip);
if (rc < 0) {
dev_err(chip->dev, "Error in initializing FG hardware, rc:%d\n",
@@ -5227,6 +5242,23 @@
goto exit;
}
+ rc = fg_register_interrupts(chip);
+ if (rc < 0) {
+ dev_err(chip->dev, "Error in registering interrupts, rc:%d\n",
+ rc);
+ goto exit;
+ }
+
+ /* Keep SOC_UPDATE irq disabled until we require it */
+ if (fg_irqs[SOC_UPDATE_IRQ].irq)
+ disable_irq_nosync(fg_irqs[SOC_UPDATE_IRQ].irq);
+
+ /* Keep BSOC_DELTA_IRQ disabled until we require it */
+ vote(chip->delta_bsoc_irq_en_votable, DELTA_BSOC_IRQ_VOTER, false, 0);
+
+ /* Keep BATT_MISSING_IRQ disabled until we require it */
+ vote(chip->batt_miss_irq_en_votable, BATT_MISS_IRQ_VOTER, false, 0);
+
rc = fg_debugfs_create(chip);
if (rc < 0) {
dev_err(chip->dev, "Error in creating debugfs entries, rc:%d\n",
diff --git a/drivers/power/supply/qcom/smb-lib.c b/drivers/power/supply/qcom/smb-lib.c
index 1cd3652..0012a92 100644
--- a/drivers/power/supply/qcom/smb-lib.c
+++ b/drivers/power/supply/qcom/smb-lib.c
@@ -2054,6 +2054,18 @@
return rc;
}
+static int smblib_force_vbus_voltage(struct smb_charger *chg, u8 val)
+{
+ int rc;
+
+ rc = smblib_masked_write(chg, CMD_HVDCP_2_REG, val, val);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't write to CMD_HVDCP_2_REG rc=%d\n",
+ rc);
+
+ return rc;
+}
+
int smblib_dp_dm(struct smb_charger *chg, int val)
{
int target_icl_ua, rc = 0;
@@ -2105,6 +2117,21 @@
smblib_dbg(chg, PR_PARALLEL, "ICL DOWN ICL=%d reduction=%d\n",
target_icl_ua, chg->usb_icl_delta_ua);
break;
+ case POWER_SUPPLY_DP_DM_FORCE_5V:
+ rc = smblib_force_vbus_voltage(chg, FORCE_5V_BIT);
+ if (rc < 0)
+ pr_err("Failed to force 5V\n");
+ break;
+ case POWER_SUPPLY_DP_DM_FORCE_9V:
+ rc = smblib_force_vbus_voltage(chg, FORCE_9V_BIT);
+ if (rc < 0)
+ pr_err("Failed to force 9V\n");
+ break;
+ case POWER_SUPPLY_DP_DM_FORCE_12V:
+ rc = smblib_force_vbus_voltage(chg, FORCE_12V_BIT);
+ if (rc < 0)
+ pr_err("Failed to force 12V\n");
+ break;
case POWER_SUPPLY_DP_DM_ICL_UP:
default:
break;
@@ -2512,23 +2539,16 @@
return rc;
}
- /* TEMP_RANGE bits are mutually exclusive */
- switch (stat & TEMP_RANGE_MASK) {
- case TEMP_BELOW_RANGE_BIT:
- val->intval = POWER_SUPPLY_HEALTH_COOL;
- break;
- case TEMP_WITHIN_RANGE_BIT:
- val->intval = POWER_SUPPLY_HEALTH_WARM;
- break;
- case TEMP_ABOVE_RANGE_BIT:
- val->intval = POWER_SUPPLY_HEALTH_HOT;
- break;
- case ALERT_LEVEL_BIT:
+ if (stat & ALERT_LEVEL_BIT)
val->intval = POWER_SUPPLY_HEALTH_OVERHEAT;
- break;
- default:
+ else if (stat & TEMP_ABOVE_RANGE_BIT)
+ val->intval = POWER_SUPPLY_HEALTH_HOT;
+ else if (stat & TEMP_WITHIN_RANGE_BIT)
+ val->intval = POWER_SUPPLY_HEALTH_WARM;
+ else if (stat & TEMP_BELOW_RANGE_BIT)
+ val->intval = POWER_SUPPLY_HEALTH_COOL;
+ else
val->intval = POWER_SUPPLY_HEALTH_UNKNOWN;
- }
return 0;
}
@@ -3566,16 +3586,6 @@
/* the APSD done handler will set the USB supply type */
apsd_result = smblib_get_apsd_result(chg);
- if (get_effective_result(chg->hvdcp_hw_inov_dis_votable)) {
- if (apsd_result->pst == POWER_SUPPLY_TYPE_USB_HVDCP) {
- /* force HVDCP2 to 9V if INOV is disabled */
- rc = smblib_masked_write(chg, CMD_HVDCP_2_REG,
- FORCE_9V_BIT, FORCE_9V_BIT);
- if (rc < 0)
- smblib_err(chg,
- "Couldn't force 9V HVDCP rc=%d\n", rc);
- }
- }
smblib_dbg(chg, PR_INTERRUPT, "IRQ: hvdcp-3p0-auth-done rising; %s detected\n",
apsd_result->name);
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index ec492af..e463117 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -4197,6 +4197,10 @@
const struct regulator_ops *ops;
mode_t mode;
+ /* Check if debugfs directory already exists */
+ if (rdev->debugfs)
+ return;
+
/* Avoid duplicate debugfs directory names */
if (parent && rname == rdev->desc->name) {
snprintf(name, sizeof(name), "%s-%s", dev_name(parent),
@@ -4221,6 +4225,7 @@
regulator = regulator_get(NULL, rdev_get_name(rdev));
if (IS_ERR(regulator)) {
+ debugfs_remove_recursive(rdev->debugfs);
rdev_err(rdev, "regulator get failed, ret=%ld\n",
PTR_ERR(regulator));
return;
@@ -4291,6 +4296,8 @@
if (regulator_resolve_supply(rdev))
rdev_dbg(rdev, "unable to resolve supply\n");
+ else
+ rdev_init_debugfs(rdev);
return 0;
}
diff --git a/drivers/regulator/refgen.c b/drivers/regulator/refgen.c
index 629fee0..830e1b0 100644
--- a/drivers/regulator/refgen.c
+++ b/drivers/regulator/refgen.c
@@ -31,7 +31,7 @@
#define REFGEN_BIAS_EN_DISABLE 0x6
#define REFGEN_REG_BG_CTRL 0x14
-#define REFGEN_BG_CTRL_MASK GENMASK(2, 0)
+#define REFGEN_BG_CTRL_MASK GENMASK(2, 1)
#define REFGEN_BG_CTRL_ENABLE 0x6
#define REFGEN_BG_CTRL_DISABLE 0x4
@@ -41,11 +41,21 @@
void __iomem *addr;
};
+static void masked_writel(u32 val, u32 mask, void __iomem *addr)
+{
+ u32 reg;
+
+ reg = readl_relaxed(addr);
+ reg = (reg & ~mask) | (val & mask);
+ writel_relaxed(reg, addr);
+}
+
static int refgen_enable(struct regulator_dev *rdev)
{
struct refgen *vreg = rdev_get_drvdata(rdev);
- writel_relaxed(REFGEN_BG_CTRL_ENABLE, vreg->addr + REFGEN_REG_BG_CTRL);
+ masked_writel(REFGEN_BG_CTRL_ENABLE, REFGEN_BG_CTRL_MASK,
+ vreg->addr + REFGEN_REG_BG_CTRL);
writel_relaxed(REFGEN_BIAS_EN_ENABLE, vreg->addr + REFGEN_REG_BIAS_EN);
return 0;
@@ -56,7 +66,8 @@
struct refgen *vreg = rdev_get_drvdata(rdev);
writel_relaxed(REFGEN_BIAS_EN_DISABLE, vreg->addr + REFGEN_REG_BIAS_EN);
- writel_relaxed(REFGEN_BG_CTRL_DISABLE, vreg->addr + REFGEN_REG_BG_CTRL);
+ masked_writel(REFGEN_BG_CTRL_DISABLE, REFGEN_BG_CTRL_MASK,
+ vreg->addr + REFGEN_REG_BG_CTRL);
return 0;
}
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 1de0890..5ecd408 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1704,8 +1704,11 @@
/* check for for attention message */
if (scsw_dstat(&irb->scsw) & DEV_STAT_ATTENTION) {
device = dasd_device_from_cdev_locked(cdev);
- device->discipline->check_attention(device, irb->esw.esw1.lpum);
- dasd_put_device(device);
+ if (!IS_ERR(device)) {
+ device->discipline->check_attention(device,
+ irb->esw.esw1.lpum);
+ dasd_put_device(device);
+ }
}
if (!cqr)
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index f3756ca..d55e643 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -921,7 +921,6 @@
int qeth_core_hardsetup_card(struct qeth_card *);
void qeth_print_status_message(struct qeth_card *);
int qeth_init_qdio_queues(struct qeth_card *);
-int qeth_send_startlan(struct qeth_card *);
int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
int (*reply_cb)
(struct qeth_card *, struct qeth_reply *, unsigned long),
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index e8c4830..21ef802 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -2944,7 +2944,7 @@
}
EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd);
-int qeth_send_startlan(struct qeth_card *card)
+static int qeth_send_startlan(struct qeth_card *card)
{
int rc;
struct qeth_cmd_buffer *iob;
@@ -2957,7 +2957,6 @@
rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
return rc;
}
-EXPORT_SYMBOL_GPL(qeth_send_startlan);
static int qeth_default_setadapterparms_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
@@ -5091,6 +5090,20 @@
goto out;
}
+ rc = qeth_send_startlan(card);
+ if (rc) {
+ QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
+ if (rc == IPA_RC_LAN_OFFLINE) {
+ dev_warn(&card->gdev->dev,
+ "The LAN is offline\n");
+ card->lan_online = 0;
+ } else {
+ rc = -ENODEV;
+ goto out;
+ }
+ } else
+ card->lan_online = 1;
+
card->options.ipa4.supported_funcs = 0;
card->options.ipa6.supported_funcs = 0;
card->options.adp.supported_funcs = 0;
@@ -5102,14 +5115,14 @@
if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
rc = qeth_query_setadapterparms(card);
if (rc < 0) {
- QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
+ QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc);
goto out;
}
}
if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
rc = qeth_query_setdiagass(card);
if (rc < 0) {
- QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc);
+ QETH_DBF_TEXT_(SETUP, 2, "8err%d", rc);
goto out;
}
}
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 5d010aa..8530477 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -1204,21 +1204,6 @@
/* softsetup */
QETH_DBF_TEXT(SETUP, 2, "softsetp");
- rc = qeth_send_startlan(card);
- if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
- if (rc == 0xe080) {
- dev_warn(&card->gdev->dev,
- "The LAN is offline\n");
- card->lan_online = 0;
- goto contin;
- }
- rc = -ENODEV;
- goto out_remove;
- } else
- card->lan_online = 1;
-
-contin:
if ((card->info.type == QETH_CARD_TYPE_OSD) ||
(card->info.type == QETH_CARD_TYPE_OSX)) {
rc = qeth_l2_start_ipassists(card);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 171be5e..03a2619 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -3230,21 +3230,6 @@
/* softsetup */
QETH_DBF_TEXT(SETUP, 2, "softsetp");
- rc = qeth_send_startlan(card);
- if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
- if (rc == 0xe080) {
- dev_warn(&card->gdev->dev,
- "The LAN is offline\n");
- card->lan_online = 0;
- goto contin;
- }
- rc = -ENODEV;
- goto out_remove;
- } else
- card->lan_online = 1;
-
-contin:
rc = qeth_l3_setadapter_parms(card);
if (rc)
QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index 0e00a5c..cffe42f 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -692,15 +692,15 @@
enum qeth_prot_versions proto)
{
struct qeth_ipaddr *ipaddr;
- struct hlist_node *tmp;
char addr_str[40];
+ int str_len = 0;
int entry_len; /* length of 1 entry string, differs between v4 and v6 */
- int i = 0;
+ int i;
entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
entry_len += 2; /* \n + terminator */
spin_lock_bh(&card->ip_lock);
- hash_for_each_safe(card->ip_htable, i, tmp, ipaddr, hnode) {
+ hash_for_each(card->ip_htable, i, ipaddr, hnode) {
if (ipaddr->proto != proto)
continue;
if (ipaddr->type != QETH_IP_TYPE_VIPA)
@@ -708,16 +708,17 @@
/* String must not be longer than PAGE_SIZE. So we check if
* string length gets near PAGE_SIZE. Then we can savely display
* the next IPv6 address (worst case, compared to IPv4) */
- if ((PAGE_SIZE - i) <= entry_len)
+ if ((PAGE_SIZE - str_len) <= entry_len)
break;
qeth_l3_ipaddr_to_string(proto, (const u8 *)&ipaddr->u,
addr_str);
- i += snprintf(buf + i, PAGE_SIZE - i, "%s\n", addr_str);
+ str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "%s\n",
+ addr_str);
}
spin_unlock_bh(&card->ip_lock);
- i += snprintf(buf + i, PAGE_SIZE - i, "\n");
+ str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "\n");
- return i;
+ return str_len;
}
static ssize_t qeth_l3_dev_vipa_add4_show(struct device *dev,
@@ -854,15 +855,15 @@
enum qeth_prot_versions proto)
{
struct qeth_ipaddr *ipaddr;
- struct hlist_node *tmp;
char addr_str[40];
+ int str_len = 0;
int entry_len; /* length of 1 entry string, differs between v4 and v6 */
- int i = 0;
+ int i;
entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
entry_len += 2; /* \n + terminator */
spin_lock_bh(&card->ip_lock);
- hash_for_each_safe(card->ip_htable, i, tmp, ipaddr, hnode) {
+ hash_for_each(card->ip_htable, i, ipaddr, hnode) {
if (ipaddr->proto != proto)
continue;
if (ipaddr->type != QETH_IP_TYPE_RXIP)
@@ -870,16 +871,17 @@
/* String must not be longer than PAGE_SIZE. So we check if
* string length gets near PAGE_SIZE. Then we can savely display
* the next IPv6 address (worst case, compared to IPv4) */
- if ((PAGE_SIZE - i) <= entry_len)
+ if ((PAGE_SIZE - str_len) <= entry_len)
break;
qeth_l3_ipaddr_to_string(proto, (const u8 *)&ipaddr->u,
addr_str);
- i += snprintf(buf + i, PAGE_SIZE - i, "%s\n", addr_str);
+ str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "%s\n",
+ addr_str);
}
spin_unlock_bh(&card->ip_lock);
- i += snprintf(buf + i, PAGE_SIZE - i, "\n");
+ str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "\n");
- return i;
+ return str_len;
}
static ssize_t qeth_l3_dev_rxip_add4_show(struct device *dev,
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 6678d1f..065f11a 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -2954,16 +2954,11 @@
return;
BUG_ON(fibptr == NULL);
+
dev = fibptr->dev;
- scsi_dma_unmap(scsicmd);
-
- /* expose physical device if expose_physicald flag is on */
- if (scsicmd->cmnd[0] == INQUIRY && !(scsicmd->cmnd[1] & 0x01)
- && expose_physicals > 0)
- aac_expose_phy_device(scsicmd);
-
srbreply = (struct aac_srb_reply *) fib_data(fibptr);
+
scsicmd->sense_buffer[0] = '\0'; /* Initialize sense valid flag to false */
if (fibptr->flags & FIB_CONTEXT_FLAG_FASTRESP) {
@@ -2976,158 +2971,176 @@
*/
scsi_set_resid(scsicmd, scsi_bufflen(scsicmd)
- le32_to_cpu(srbreply->data_xfer_length));
- /*
- * First check the fib status
- */
+ }
- if (le32_to_cpu(srbreply->status) != ST_OK) {
- int len;
- printk(KERN_WARNING "aac_srb_callback: srb failed, status = %d\n", le32_to_cpu(srbreply->status));
- len = min_t(u32, le32_to_cpu(srbreply->sense_data_size),
- SCSI_SENSE_BUFFERSIZE);
+ scsi_dma_unmap(scsicmd);
+
+ /* expose physical device if expose_physicald flag is on */
+ if (scsicmd->cmnd[0] == INQUIRY && !(scsicmd->cmnd[1] & 0x01)
+ && expose_physicals > 0)
+ aac_expose_phy_device(scsicmd);
+
+ /*
+ * First check the fib status
+ */
+
+ if (le32_to_cpu(srbreply->status) != ST_OK) {
+ int len;
+
+ pr_warn("aac_srb_callback: srb failed, status = %d\n",
+ le32_to_cpu(srbreply->status));
+ len = min_t(u32, le32_to_cpu(srbreply->sense_data_size),
+ SCSI_SENSE_BUFFERSIZE);
+ scsicmd->result = DID_ERROR << 16
+ | COMMAND_COMPLETE << 8
+ | SAM_STAT_CHECK_CONDITION;
+ memcpy(scsicmd->sense_buffer,
+ srbreply->sense_data, len);
+ }
+
+ /*
+ * Next check the srb status
+ */
+ switch ((le32_to_cpu(srbreply->srb_status))&0x3f) {
+ case SRB_STATUS_ERROR_RECOVERY:
+ case SRB_STATUS_PENDING:
+ case SRB_STATUS_SUCCESS:
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
+ break;
+ case SRB_STATUS_DATA_OVERRUN:
+ switch (scsicmd->cmnd[0]) {
+ case READ_6:
+ case WRITE_6:
+ case READ_10:
+ case WRITE_10:
+ case READ_12:
+ case WRITE_12:
+ case READ_16:
+ case WRITE_16:
+ if (le32_to_cpu(srbreply->data_xfer_length)
+ < scsicmd->underflow)
+ pr_warn("aacraid: SCSI CMD underflow\n");
+ else
+ pr_warn("aacraid: SCSI CMD Data Overrun\n");
scsicmd->result = DID_ERROR << 16
- | COMMAND_COMPLETE << 8
- | SAM_STAT_CHECK_CONDITION;
- memcpy(scsicmd->sense_buffer,
- srbreply->sense_data, len);
- }
-
- /*
- * Next check the srb status
- */
- switch ((le32_to_cpu(srbreply->srb_status))&0x3f) {
- case SRB_STATUS_ERROR_RECOVERY:
- case SRB_STATUS_PENDING:
- case SRB_STATUS_SUCCESS:
+ | COMMAND_COMPLETE << 8;
+ break;
+ case INQUIRY:
+ scsicmd->result = DID_OK << 16
+ | COMMAND_COMPLETE << 8;
+ break;
+ default:
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
break;
- case SRB_STATUS_DATA_OVERRUN:
- switch (scsicmd->cmnd[0]) {
- case READ_6:
- case WRITE_6:
- case READ_10:
- case WRITE_10:
- case READ_12:
- case WRITE_12:
- case READ_16:
- case WRITE_16:
- if (le32_to_cpu(srbreply->data_xfer_length)
- < scsicmd->underflow)
- printk(KERN_WARNING"aacraid: SCSI CMD underflow\n");
- else
- printk(KERN_WARNING"aacraid: SCSI CMD Data Overrun\n");
- scsicmd->result = DID_ERROR << 16
- | COMMAND_COMPLETE << 8;
- break;
- case INQUIRY: {
- scsicmd->result = DID_OK << 16
- | COMMAND_COMPLETE << 8;
- break;
- }
- default:
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
- break;
- }
- break;
- case SRB_STATUS_ABORTED:
- scsicmd->result = DID_ABORT << 16 | ABORT << 8;
- break;
- case SRB_STATUS_ABORT_FAILED:
- /*
- * Not sure about this one - but assuming the
- * hba was trying to abort for some reason
- */
- scsicmd->result = DID_ERROR << 16 | ABORT << 8;
- break;
- case SRB_STATUS_PARITY_ERROR:
- scsicmd->result = DID_PARITY << 16
- | MSG_PARITY_ERROR << 8;
- break;
- case SRB_STATUS_NO_DEVICE:
- case SRB_STATUS_INVALID_PATH_ID:
- case SRB_STATUS_INVALID_TARGET_ID:
- case SRB_STATUS_INVALID_LUN:
- case SRB_STATUS_SELECTION_TIMEOUT:
- scsicmd->result = DID_NO_CONNECT << 16
- | COMMAND_COMPLETE << 8;
- break;
+ }
+ break;
+ case SRB_STATUS_ABORTED:
+ scsicmd->result = DID_ABORT << 16 | ABORT << 8;
+ break;
+ case SRB_STATUS_ABORT_FAILED:
+ /*
+ * Not sure about this one - but assuming the
+ * hba was trying to abort for some reason
+ */
+ scsicmd->result = DID_ERROR << 16 | ABORT << 8;
+ break;
+ case SRB_STATUS_PARITY_ERROR:
+ scsicmd->result = DID_PARITY << 16
+ | MSG_PARITY_ERROR << 8;
+ break;
+ case SRB_STATUS_NO_DEVICE:
+ case SRB_STATUS_INVALID_PATH_ID:
+ case SRB_STATUS_INVALID_TARGET_ID:
+ case SRB_STATUS_INVALID_LUN:
+ case SRB_STATUS_SELECTION_TIMEOUT:
+ scsicmd->result = DID_NO_CONNECT << 16
+ | COMMAND_COMPLETE << 8;
+ break;
- case SRB_STATUS_COMMAND_TIMEOUT:
- case SRB_STATUS_TIMEOUT:
- scsicmd->result = DID_TIME_OUT << 16
- | COMMAND_COMPLETE << 8;
- break;
+ case SRB_STATUS_COMMAND_TIMEOUT:
+ case SRB_STATUS_TIMEOUT:
+ scsicmd->result = DID_TIME_OUT << 16
+ | COMMAND_COMPLETE << 8;
+ break;
- case SRB_STATUS_BUSY:
- scsicmd->result = DID_BUS_BUSY << 16
- | COMMAND_COMPLETE << 8;
- break;
+ case SRB_STATUS_BUSY:
+ scsicmd->result = DID_BUS_BUSY << 16
+ | COMMAND_COMPLETE << 8;
+ break;
- case SRB_STATUS_BUS_RESET:
- scsicmd->result = DID_RESET << 16
- | COMMAND_COMPLETE << 8;
- break;
+ case SRB_STATUS_BUS_RESET:
+ scsicmd->result = DID_RESET << 16
+ | COMMAND_COMPLETE << 8;
+ break;
- case SRB_STATUS_MESSAGE_REJECTED:
- scsicmd->result = DID_ERROR << 16
- | MESSAGE_REJECT << 8;
- break;
- case SRB_STATUS_REQUEST_FLUSHED:
- case SRB_STATUS_ERROR:
- case SRB_STATUS_INVALID_REQUEST:
- case SRB_STATUS_REQUEST_SENSE_FAILED:
- case SRB_STATUS_NO_HBA:
- case SRB_STATUS_UNEXPECTED_BUS_FREE:
- case SRB_STATUS_PHASE_SEQUENCE_FAILURE:
- case SRB_STATUS_BAD_SRB_BLOCK_LENGTH:
- case SRB_STATUS_DELAYED_RETRY:
- case SRB_STATUS_BAD_FUNCTION:
- case SRB_STATUS_NOT_STARTED:
- case SRB_STATUS_NOT_IN_USE:
- case SRB_STATUS_FORCE_ABORT:
- case SRB_STATUS_DOMAIN_VALIDATION_FAIL:
- default:
+ case SRB_STATUS_MESSAGE_REJECTED:
+ scsicmd->result = DID_ERROR << 16
+ | MESSAGE_REJECT << 8;
+ break;
+ case SRB_STATUS_REQUEST_FLUSHED:
+ case SRB_STATUS_ERROR:
+ case SRB_STATUS_INVALID_REQUEST:
+ case SRB_STATUS_REQUEST_SENSE_FAILED:
+ case SRB_STATUS_NO_HBA:
+ case SRB_STATUS_UNEXPECTED_BUS_FREE:
+ case SRB_STATUS_PHASE_SEQUENCE_FAILURE:
+ case SRB_STATUS_BAD_SRB_BLOCK_LENGTH:
+ case SRB_STATUS_DELAYED_RETRY:
+ case SRB_STATUS_BAD_FUNCTION:
+ case SRB_STATUS_NOT_STARTED:
+ case SRB_STATUS_NOT_IN_USE:
+ case SRB_STATUS_FORCE_ABORT:
+ case SRB_STATUS_DOMAIN_VALIDATION_FAIL:
+ default:
#ifdef AAC_DETAILED_STATUS_INFO
- printk(KERN_INFO "aacraid: SRB ERROR(%u) %s scsi cmd 0x%x - scsi status 0x%x\n",
- le32_to_cpu(srbreply->srb_status) & 0x3F,
- aac_get_status_string(
- le32_to_cpu(srbreply->srb_status) & 0x3F),
- scsicmd->cmnd[0],
- le32_to_cpu(srbreply->scsi_status));
+ pr_info("aacraid: SRB ERROR(%u) %s scsi cmd 0x%x -scsi status 0x%x\n",
+ le32_to_cpu(srbreply->srb_status) & 0x3F,
+ aac_get_status_string(
+ le32_to_cpu(srbreply->srb_status) & 0x3F),
+ scsicmd->cmnd[0],
+ le32_to_cpu(srbreply->scsi_status));
#endif
- if ((scsicmd->cmnd[0] == ATA_12)
- || (scsicmd->cmnd[0] == ATA_16)) {
- if (scsicmd->cmnd[2] & (0x01 << 5)) {
- scsicmd->result = DID_OK << 16
- | COMMAND_COMPLETE << 8;
- break;
- } else {
- scsicmd->result = DID_ERROR << 16
- | COMMAND_COMPLETE << 8;
- break;
- }
+ /*
+ * When the CC bit is SET by the host in ATA pass thru CDB,
+ * driver is supposed to return DID_OK
+ *
+ * When the CC bit is RESET by the host, driver should
+ * return DID_ERROR
+ */
+ if ((scsicmd->cmnd[0] == ATA_12)
+ || (scsicmd->cmnd[0] == ATA_16)) {
+
+ if (scsicmd->cmnd[2] & (0x01 << 5)) {
+ scsicmd->result = DID_OK << 16
+ | COMMAND_COMPLETE << 8;
+ break;
} else {
scsicmd->result = DID_ERROR << 16
| COMMAND_COMPLETE << 8;
- break;
+ break;
}
- }
- if (le32_to_cpu(srbreply->scsi_status)
- == SAM_STAT_CHECK_CONDITION) {
- int len;
-
- scsicmd->result |= SAM_STAT_CHECK_CONDITION;
- len = min_t(u32, le32_to_cpu(srbreply->sense_data_size),
- SCSI_SENSE_BUFFERSIZE);
-#ifdef AAC_DETAILED_STATUS_INFO
- printk(KERN_WARNING "aac_srb_callback: check condition, status = %d len=%d\n",
- le32_to_cpu(srbreply->status), len);
-#endif
- memcpy(scsicmd->sense_buffer,
- srbreply->sense_data, len);
+ } else {
+ scsicmd->result = DID_ERROR << 16
+ | COMMAND_COMPLETE << 8;
+ break;
}
}
+ if (le32_to_cpu(srbreply->scsi_status)
+ == SAM_STAT_CHECK_CONDITION) {
+ int len;
+
+ scsicmd->result |= SAM_STAT_CHECK_CONDITION;
+ len = min_t(u32, le32_to_cpu(srbreply->sense_data_size),
+ SCSI_SENSE_BUFFERSIZE);
+#ifdef AAC_DETAILED_STATUS_INFO
+ pr_warn("aac_srb_callback: check condition, status = %d len=%d\n",
+ le32_to_cpu(srbreply->status), len);
+#endif
+ memcpy(scsicmd->sense_buffer,
+ srbreply->sense_data, len);
+ }
+
/*
* OR in the scsi status (already shifted up a bit)
*/
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index bd04bd0..a156451 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -1960,7 +1960,8 @@
*/
static void
megasas_build_syspd_fusion(struct megasas_instance *instance,
- struct scsi_cmnd *scmd, struct megasas_cmd_fusion *cmd, u8 fp_possible)
+ struct scsi_cmnd *scmd, struct megasas_cmd_fusion *cmd,
+ bool fp_possible)
{
u32 device_id;
struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
@@ -2064,6 +2065,8 @@
u16 sge_count;
u8 cmd_type;
struct MPI2_RAID_SCSI_IO_REQUEST *io_request = cmd->io_request;
+ struct MR_PRIV_DEVICE *mr_device_priv_data;
+ mr_device_priv_data = scp->device->hostdata;
/* Zero out some fields so they don't get reused */
memset(io_request->LUN, 0x0, 8);
@@ -2092,12 +2095,14 @@
megasas_build_ld_nonrw_fusion(instance, scp, cmd);
break;
case READ_WRITE_SYSPDIO:
+ megasas_build_syspd_fusion(instance, scp, cmd, true);
+ break;
case NON_READ_WRITE_SYSPDIO:
- if (instance->secure_jbod_support &&
- (cmd_type == NON_READ_WRITE_SYSPDIO))
- megasas_build_syspd_fusion(instance, scp, cmd, 0);
+ if (instance->secure_jbod_support ||
+ mr_device_priv_data->is_tm_capable)
+ megasas_build_syspd_fusion(instance, scp, cmd, false);
else
- megasas_build_syspd_fusion(instance, scp, cmd, 1);
+ megasas_build_syspd_fusion(instance, scp, cmd, true);
break;
default:
break;
diff --git a/drivers/scsi/ufs/ufs-qcom-debugfs.c b/drivers/scsi/ufs/ufs-qcom-debugfs.c
index 494ecd1..db4ecec 100644
--- a/drivers/scsi/ufs/ufs-qcom-debugfs.c
+++ b/drivers/scsi/ufs/ufs-qcom-debugfs.c
@@ -121,7 +121,8 @@
struct ufs_hba *hba = host->hba;
- ret = simple_write_to_buffer(configuration, TESTBUS_CFG_BUFF_LINE_SIZE,
+ ret = simple_write_to_buffer(configuration,
+ TESTBUS_CFG_BUFF_LINE_SIZE - 1,
&buff_pos, ubuf, cnt);
if (ret < 0) {
dev_err(host->hba->dev, "%s: failed to read user data\n",
diff --git a/drivers/scsi/ufs/ufs-qcom-ice.c b/drivers/scsi/ufs/ufs-qcom-ice.c
index 0c86263..84765b1 100644
--- a/drivers/scsi/ufs/ufs-qcom-ice.c
+++ b/drivers/scsi/ufs/ufs-qcom-ice.c
@@ -170,17 +170,15 @@
static void ufs_qcom_ice_cfg_work(struct work_struct *work)
{
unsigned long flags;
- struct ice_data_setting ice_set;
struct ufs_qcom_host *qcom_host =
container_of(work, struct ufs_qcom_host, ice_cfg_work);
- struct request *req_pending = NULL;
if (!qcom_host->ice.vops->config_start)
return;
spin_lock_irqsave(&qcom_host->ice_work_lock, flags);
- req_pending = qcom_host->req_pending;
- if (!req_pending) {
+ if (!qcom_host->req_pending) {
+ qcom_host->work_pending = false;
spin_unlock_irqrestore(&qcom_host->ice_work_lock, flags);
return;
}
@@ -189,24 +187,15 @@
/*
* config_start is called again as previous attempt returned -EAGAIN,
* this call shall now take care of the necessary key setup.
- * 'ice_set' will not actually be used, instead the next call to
- * config_start() for this request, in the normal call flow, will
- * succeed as the key has now been setup.
*/
qcom_host->ice.vops->config_start(qcom_host->ice.pdev,
- qcom_host->req_pending, &ice_set, false);
+ qcom_host->req_pending, NULL, false);
spin_lock_irqsave(&qcom_host->ice_work_lock, flags);
qcom_host->req_pending = NULL;
+ qcom_host->work_pending = false;
spin_unlock_irqrestore(&qcom_host->ice_work_lock, flags);
- /*
- * Resume with requests processing. We assume config_start has been
- * successful, but even if it wasn't we still must resume in order to
- * allow for the request to be retried.
- */
- ufshcd_scsi_unblock_requests(qcom_host->hba);
-
}
/**
@@ -285,18 +274,14 @@
* requires a non-atomic context, this means we should
* call the function again from the worker thread to do
* the configuration. For this request the error will
- * propagate so it will be re-queued and until the
- * configuration is is completed we block further
- * request processing.
+ * propagate so it will be re-queued.
*/
if (err == -EAGAIN) {
dev_dbg(qcom_host->hba->dev,
"%s: scheduling task for ice setup\n",
__func__);
- if (!qcom_host->req_pending) {
- ufshcd_scsi_block_requests(
- qcom_host->hba);
+ if (!qcom_host->work_pending) {
qcom_host->req_pending = cmd->request;
if (!schedule_work(
@@ -307,10 +292,9 @@
&qcom_host->ice_work_lock,
flags);
- ufshcd_scsi_unblock_requests(
- qcom_host->hba);
return err;
}
+ qcom_host->work_pending = true;
}
} else {
@@ -409,9 +393,7 @@
* requires a non-atomic context, this means we should
* call the function again from the worker thread to do
* the configuration. For this request the error will
- * propagate so it will be re-queued and until the
- * configuration is is completed we block further
- * request processing.
+ * propagate so it will be re-queued.
*/
if (err == -EAGAIN) {
@@ -419,9 +401,8 @@
"%s: scheduling task for ice setup\n",
__func__);
- if (!qcom_host->req_pending) {
- ufshcd_scsi_block_requests(
- qcom_host->hba);
+ if (!qcom_host->work_pending) {
+
qcom_host->req_pending = cmd->request;
if (!schedule_work(
&qcom_host->ice_cfg_work)) {
@@ -431,10 +412,9 @@
&qcom_host->ice_work_lock,
flags);
- ufshcd_scsi_unblock_requests(
- qcom_host->hba);
return err;
}
+ qcom_host->work_pending = true;
}
} else {
diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h
index 0ab656e..9da3d19 100644
--- a/drivers/scsi/ufs/ufs-qcom.h
+++ b/drivers/scsi/ufs/ufs-qcom.h
@@ -375,6 +375,7 @@
struct work_struct ice_cfg_work;
struct request *req_pending;
struct ufs_vreg *vddp_ref_clk;
+ bool work_pending;
};
static inline u32
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 4a43695..a6bc1da 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -908,7 +908,6 @@
hba->capabilities, hba->caps);
dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks,
hba->dev_info.quirks);
- ufshcd_print_fsm_state(hba);
}
/**
@@ -7033,6 +7032,7 @@
*/
scsi_print_command(cmd);
if (!hba->req_abort_count) {
+ ufshcd_print_fsm_state(hba);
ufshcd_print_host_regs(hba);
ufshcd_print_host_state(hba);
ufshcd_print_pwr_info(hba);
@@ -9260,7 +9260,6 @@
goto enable_gating;
}
- flush_work(&hba->eeh_work);
ret = ufshcd_link_state_transition(hba, req_link_state, 1);
if (ret)
goto set_dev_active;
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 18aaacc..9750969 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -251,6 +251,23 @@
of deadlocks or cpu hangs these dump regions are captured to
give a snapshot of the system at the time of the crash.
+config QCOM_MINIDUMP
+ bool "QCOM Minidump Support"
+ depends on MSM_SMEM && QCOM_DLOAD_MODE
+ help
+ This enables minidump feature. It allows various clients to
+ register to dump their state at system bad state (panic/WDT,etc.,).
+ Minidump would dump all registered entries, only when DLOAD mode
+ is enabled.
+
+config MINIDUMP_MAX_ENTRIES
+ int "Minidump Maximum num of entries"
+ default 200
+ depends on QCOM_MINIDUMP
+ help
+ This defines maximum number of entries to be allocated for application
+ subsytem in Minidump table.
+
config QCOM_BUS_SCALING
bool "Bus scaling driver"
help
@@ -693,4 +710,4 @@
help
The driver will help route diag traffic from modem side over the QDSS
sub-system to USB on APSS side. The driver acts as a bridge between the
- MHI and USB interface. If unsure, say N.
\ No newline at end of file
+ MHI and USB interface. If unsure, say N.
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index bb08357..768d4d9 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -28,6 +28,7 @@
obj-$(CONFIG_QCOM_EUD) += eud.o
obj-$(CONFIG_QCOM_WATCHDOG_V2) += watchdog_v2.o
obj-$(CONFIG_QCOM_MEMORY_DUMP_V2) += memory_dump_v2.o
+obj-$(CONFIG_QCOM_MINIDUMP) += msm_minidump.o minidump_log.o
obj-$(CONFIG_QCOM_RUN_QUEUE_STATS) += rq_stats.o
obj-$(CONFIG_QCOM_SECURE_BUFFER) += secure_buffer.o
obj-$(CONFIG_MSM_SMEM) += msm_smem.o smem_debug.o
@@ -79,4 +80,4 @@
obj-$(CONFIG_QMP_DEBUGFS_CLIENT) += qmp-debugfs-client.o
obj-$(CONFIG_MSM_REMOTEQDSS) += remoteqdss.o
obj-$(CONFIG_QSEE_IPC_IRQ_BRIDGE) += qsee_ipc_irq_bridge.o
-obj-$(CONFIG_QCOM_QDSS_BRIDGE) += qdss_bridge.o
\ No newline at end of file
+obj-$(CONFIG_QCOM_QDSS_BRIDGE) += qdss_bridge.o
diff --git a/drivers/soc/qcom/cmd-db.c b/drivers/soc/qcom/cmd-db.c
index 252bd21..72abf50 100644
--- a/drivers/soc/qcom/cmd-db.c
+++ b/drivers/soc/qcom/cmd-db.c
@@ -197,6 +197,7 @@
len);
return len;
}
+EXPORT_SYMBOL(cmd_db_get_aux_data);
int cmd_db_get_aux_data_len(const char *resource_id)
{
@@ -208,6 +209,7 @@
return ret < 0 ? 0 : ent.len;
}
+EXPORT_SYMBOL(cmd_db_get_aux_data_len);
u16 cmd_db_get_version(const char *resource_id)
{
diff --git a/drivers/soc/qcom/cpuss_dump.c b/drivers/soc/qcom/cpuss_dump.c
index 886a32f..eba1128 100644
--- a/drivers/soc/qcom/cpuss_dump.c
+++ b/drivers/soc/qcom/cpuss_dump.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -74,6 +74,8 @@
dump_data->addr = dump_addr;
dump_data->len = size;
+ scnprintf(dump_data->name, sizeof(dump_data->name),
+ "KCPUSS%X", id);
dump_entry.id = id;
dump_entry.addr = virt_to_phys(dump_data);
ret = msm_dump_data_register(MSM_DUMP_TABLE_APPS, &dump_entry);
diff --git a/drivers/soc/qcom/dcc_v2.c b/drivers/soc/qcom/dcc_v2.c
index 457dc5f..cff407e 100644
--- a/drivers/soc/qcom/dcc_v2.c
+++ b/drivers/soc/qcom/dcc_v2.c
@@ -1610,6 +1610,14 @@
static int __init dcc_init(void)
{
+ int ret;
+
+ ret = scm_is_secure_device();
+ if (ret == 0) {
+ pr_info("DCC is not available\n");
+ return -ENODEV;
+ }
+
return platform_driver_register(&dcc_driver);
}
pure_initcall(dcc_init);
diff --git a/drivers/soc/qcom/glink.c b/drivers/soc/qcom/glink.c
index ebed4d2..e6fd52e 100644
--- a/drivers/soc/qcom/glink.c
+++ b/drivers/soc/qcom/glink.c
@@ -1669,6 +1669,8 @@
&ctx->local_rx_intent_list, list) {
ctx->notify_rx_abort(ctx, ctx->user_priv,
ptr_intent->pkt_priv);
+ ctx->transport_ptr->ops->deallocate_rx_intent(
+ ctx->transport_ptr->ops, ptr_intent);
list_del(&ptr_intent->list);
kfree(ptr_intent);
}
@@ -3767,6 +3769,8 @@
GLINK_INFO("%s: freeing transport [%s->%s]context\n", __func__,
xprt_ctx->name,
xprt_ctx->edge);
+ kfree(xprt_ctx->ops);
+ xprt_ctx->ops = NULL;
kfree(xprt_ctx);
}
@@ -4158,6 +4162,7 @@
rwref_write_get(&xprt_ptr->xprt_state_lhb0);
xprt_ptr->next_lcid = 1;
xprt_ptr->local_state = GLINK_XPRT_DOWN;
+ xprt_ptr->curr_qos_rate_kBps = 0;
xprt_ptr->local_version_idx = xprt_ptr->versions_entries - 1;
xprt_ptr->remote_version_idx = xprt_ptr->versions_entries - 1;
xprt_ptr->l_features =
@@ -4292,6 +4297,12 @@
rwref_read_get(&xprt_ptr->xprt_state_lhb0);
ctx = get_first_ch_ctx(xprt_ptr);
while (ctx) {
+ spin_lock_irqsave(&xprt_ptr->tx_ready_lock_lhb3, flags);
+ spin_lock(&ctx->tx_lists_lock_lhc3);
+ if (!list_empty(&ctx->tx_active))
+ glink_qos_done_ch_tx(ctx);
+ spin_unlock(&ctx->tx_lists_lock_lhc3);
+ spin_unlock_irqrestore(&xprt_ptr->tx_ready_lock_lhb3, flags);
rwref_write_get_atomic(&ctx->ch_state_lhb2, true);
if (ctx->local_open_state == GLINK_CHANNEL_OPENED ||
ctx->local_open_state == GLINK_CHANNEL_OPENING) {
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index 9417cde..e391cd1 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -2476,7 +2476,7 @@
icnss_pr_vdbg("Modem-Notify: event %lu\n", code);
if (code == SUBSYS_AFTER_SHUTDOWN &&
- notif->crashed != CRASH_STATUS_WDOG_BITE) {
+ notif->crashed == CRASH_STATUS_ERR_FATAL) {
ret = icnss_assign_msa_perm_all(priv,
ICNSS_MSA_PERM_HLOS_ALL);
if (!ret) {
diff --git a/drivers/soc/qcom/llcc_perfmon.c b/drivers/soc/qcom/llcc_perfmon.c
index 39276a9..8c86e7d 100644
--- a/drivers/soc/qcom/llcc_perfmon.c
+++ b/drivers/soc/qcom/llcc_perfmon.c
@@ -127,8 +127,11 @@
unsigned int i, j;
unsigned long long total;
+ if (!llcc_priv->configured_counters)
+ return;
+
llcc_bcast_write(llcc_priv, PERFMON_DUMP, MONITOR_DUMP);
- for (i = 0; i < llcc_priv->configured_counters - 1; i++) {
+ for (i = 0; i < llcc_priv->configured_counters; i++) {
total = 0;
for (j = 0; j < llcc_priv->num_banks; j++) {
regmap_read(llcc_priv->llcc_map, llcc_priv->bank_off[j]
@@ -138,15 +141,6 @@
llcc_priv->configured[i].counter_dump += total;
}
-
- total = 0;
- for (j = 0; j < llcc_priv->num_banks; j++) {
- regmap_read(llcc_priv->llcc_map, llcc_priv->bank_off[j] +
- LLCC_COUNTER_n_VALUE(i), &val);
- total += val;
- }
-
- llcc_priv->configured[i].counter_dump += total;
}
static ssize_t perfmon_counter_dump_show(struct device *dev,
@@ -288,8 +282,8 @@
llcc_priv->configured[j].port_sel = port_sel;
llcc_priv->configured[j].event_sel = event_sel;
port_ops = llcc_priv->port_ops[port_sel];
- pr_info("configured event %ld counter %d on port %ld\n",
- event_sel, j, port_sel);
+ pr_info("counter %d configured for event %ld from port %ld\n",
+ j, event_sel, port_sel);
port_ops->event_config(llcc_priv, event_sel, j++, true);
if (!(llcc_priv->enables_port & (1 << port_sel)))
if (port_ops->event_enable)
@@ -355,8 +349,8 @@
llcc_priv->configured[j].port_sel = MAX_NUMBER_OF_PORTS;
llcc_priv->configured[j].event_sel = 100;
port_ops = llcc_priv->port_ops[port_sel];
- pr_info("Removed event %ld counter %d from port %ld\n",
- event_sel, j, port_sel);
+ pr_info("removed counter %d for event %ld from port %ld\n",
+ j, event_sel, port_sel);
port_ops->event_config(llcc_priv, event_sel, j++, false);
if (llcc_priv->enables_port & (1 << port_sel))
@@ -531,13 +525,13 @@
val = MANUAL_MODE | MONITOR_EN;
if (llcc_priv->expires.tv64) {
- if (hrtimer_is_queued(&llcc_priv->hrtimer))
- hrtimer_forward_now(&llcc_priv->hrtimer,
- llcc_priv->expires);
- else
- hrtimer_start(&llcc_priv->hrtimer,
- llcc_priv->expires,
- HRTIMER_MODE_REL_PINNED);
+ if (hrtimer_is_queued(&llcc_priv->hrtimer))
+ hrtimer_forward_now(&llcc_priv->hrtimer,
+ llcc_priv->expires);
+ else
+ hrtimer_start(&llcc_priv->hrtimer,
+ llcc_priv->expires,
+ HRTIMER_MODE_REL_PINNED);
}
} else {
diff --git a/drivers/soc/qcom/memory_dump_v2.c b/drivers/soc/qcom/memory_dump_v2.c
index 5873f5c..b76fe86 100644
--- a/drivers/soc/qcom/memory_dump_v2.c
+++ b/drivers/soc/qcom/memory_dump_v2.c
@@ -18,6 +18,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <soc/qcom/memory_dump.h>
+#include <soc/qcom/minidump.h>
#include <soc/qcom/scm.h>
#include <linux/of_device.h>
#include <linux/dma-mapping.h>
@@ -100,6 +101,33 @@
return table;
}
+static int msm_dump_data_add_minidump(struct msm_dump_entry *entry)
+{
+ struct msm_dump_data *data;
+ struct md_region md_entry;
+
+ data = (struct msm_dump_data *)(phys_to_virt(entry->addr));
+
+ if (!data->addr || !data->len)
+ return -EINVAL;
+
+ if (!strcmp(data->name, "")) {
+ pr_debug("Entry name is NULL, Use ID %d for minidump\n",
+ entry->id);
+ snprintf(md_entry.name, sizeof(md_entry.name), "KMDT0x%X",
+ entry->id);
+ } else {
+ strlcpy(md_entry.name, data->name, sizeof(md_entry.name));
+ }
+
+ md_entry.phys_addr = data->addr;
+ md_entry.virt_addr = (uintptr_t)phys_to_virt(data->addr);
+ md_entry.size = data->len;
+ md_entry.id = entry->id;
+
+ return msm_minidump_add_region(&md_entry);
+}
+
int msm_dump_data_register(enum msm_dump_table_ids id,
struct msm_dump_entry *entry)
{
@@ -120,6 +148,10 @@
table->num_entries++;
dmac_flush_range(table, (void *)table + sizeof(struct msm_dump_table));
+
+ if (msm_dump_data_add_minidump(entry))
+ pr_err("Failed to add entry in Minidump table\n");
+
return 0;
}
EXPORT_SYMBOL(msm_dump_data_register);
@@ -286,6 +318,9 @@
dump_data->addr = dump_addr;
dump_data->len = size;
+ strlcpy(dump_data->name, child_node->name,
+ strlen(child_node->name) + 1);
+
dump_entry.id = id;
dump_entry.addr = virt_to_phys(dump_data);
ret = msm_dump_data_register(MSM_DUMP_TABLE_APPS, &dump_entry);
diff --git a/drivers/soc/qcom/minidump_log.c b/drivers/soc/qcom/minidump_log.c
new file mode 100644
index 0000000..c65dfd9
--- /dev/null
+++ b/drivers/soc/qcom/minidump_log.c
@@ -0,0 +1,104 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/kallsyms.h>
+#include <linux/slab.h>
+#include <linux/thread_info.h>
+#include <soc/qcom/minidump.h>
+#include <asm/sections.h>
+
+static void __init register_log_buf(void)
+{
+ char **log_bufp;
+ uint32_t *log_buf_lenp;
+ struct md_region md_entry;
+
+ log_bufp = (char **)kallsyms_lookup_name("log_buf");
+ log_buf_lenp = (uint32_t *)kallsyms_lookup_name("log_buf_len");
+ if (!log_bufp || !log_buf_lenp) {
+ pr_err("Unable to find log_buf by kallsyms!\n");
+ return;
+ }
+ /*Register logbuf to minidump, first idx would be from bss section */
+ strlcpy(md_entry.name, "KLOGBUF", sizeof(md_entry.name));
+ md_entry.virt_addr = (uintptr_t) (*log_bufp);
+ md_entry.phys_addr = virt_to_phys(*log_bufp);
+ md_entry.size = *log_buf_lenp;
+ if (msm_minidump_add_region(&md_entry))
+ pr_err("Failed to add logbuf in Minidump\n");
+}
+
+static void __init register_kernel_sections(void)
+{
+ struct md_region ksec_entry;
+ char *data_name = "KDATABSS";
+ const size_t static_size = __per_cpu_end - __per_cpu_start;
+ void __percpu *base = (void __percpu *)__per_cpu_start;
+ unsigned int cpu;
+
+ strlcpy(ksec_entry.name, data_name, sizeof(ksec_entry.name));
+ ksec_entry.virt_addr = (uintptr_t)_sdata;
+ ksec_entry.phys_addr = virt_to_phys(_sdata);
+ ksec_entry.size = roundup((__bss_stop - _sdata), 4);
+ if (msm_minidump_add_region(&ksec_entry))
+ pr_err("Failed to add data section in Minidump\n");
+
+ /* Add percpu static sections */
+ for_each_possible_cpu(cpu) {
+ void *start = per_cpu_ptr(base, cpu);
+
+ memset(&ksec_entry, 0, sizeof(ksec_entry));
+ scnprintf(ksec_entry.name, sizeof(ksec_entry.name),
+ "KSPERCPU%d", cpu);
+ ksec_entry.virt_addr = (uintptr_t)start;
+ ksec_entry.phys_addr = per_cpu_ptr_to_phys(start);
+ ksec_entry.size = static_size;
+ if (msm_minidump_add_region(&ksec_entry))
+ pr_err("Failed to add percpu sections in Minidump\n");
+ }
+}
+
+void dump_stack_minidump(u64 sp)
+{
+ struct md_region ksp_entry, ktsk_entry;
+ u32 cpu = smp_processor_id();
+
+ if (sp < KIMAGE_VADDR || sp > -256UL)
+ sp = current_stack_pointer;
+
+ sp &= ~(THREAD_SIZE - 1);
+ scnprintf(ksp_entry.name, sizeof(ksp_entry.name), "KSTACK%d", cpu);
+ ksp_entry.virt_addr = sp;
+ ksp_entry.phys_addr = virt_to_phys((uintptr_t *)sp);
+ ksp_entry.size = THREAD_SIZE;
+ if (msm_minidump_add_region(&ksp_entry))
+ pr_err("Failed to add stack of cpu %d in Minidump\n", cpu);
+
+ scnprintf(ktsk_entry.name, sizeof(ktsk_entry.name), "KTASK%d", cpu);
+ ktsk_entry.virt_addr = (u64)current;
+ ktsk_entry.phys_addr = virt_to_phys((uintptr_t *)current);
+ ktsk_entry.size = sizeof(struct task_struct);
+ if (msm_minidump_add_region(&ktsk_entry))
+ pr_err("Failed to add current task %d in Minidump\n", cpu);
+}
+
+static int __init msm_minidump_log_init(void)
+{
+ register_kernel_sections();
+ register_log_buf();
+ return 0;
+}
+late_initcall(msm_minidump_log_init);
diff --git a/drivers/soc/qcom/minidump_private.h b/drivers/soc/qcom/minidump_private.h
new file mode 100644
index 0000000..81ebb1c
--- /dev/null
+++ b/drivers/soc/qcom/minidump_private.h
@@ -0,0 +1,85 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __MINIDUMP_PRIVATE_H
+#define __MINIDUMP_PRIVATE_H
+
+#define MD_REVISION 1
+#define SBL_MINIDUMP_SMEM_ID 602
+#define MAX_NUM_OF_SS 10
+#define MD_SS_HLOS_ID 0
+#define SMEM_ENTRY_SIZE 40
+
+/* Bootloader has 16 byte support, 4 bytes reserved for itself */
+#define MAX_REGION_NAME_LENGTH 16
+
+#define MD_REGION_VALID ('V' << 24 | 'A' << 16 | 'L' << 8 | 'I' << 0)
+#define MD_REGION_INVALID ('I' << 24 | 'N' << 16 | 'V' << 8 | 'A' << 0)
+#define MD_REGION_INIT ('I' << 24 | 'N' << 16 | 'I' << 8 | 'T' << 0)
+#define MD_REGION_NOINIT 0
+
+#define MD_SS_ENCR_REQ (0 << 24 | 'Y' << 16 | 'E' << 8 | 'S' << 0)
+#define MD_SS_ENCR_NOTREQ (0 << 24 | 0 << 16 | 'N' << 8 | 'R' << 0)
+#define MD_SS_ENCR_NONE ('N' << 24 | 'O' << 16 | 'N' << 8 | 'E' << 0)
+#define MD_SS_ENCR_DONE ('D' << 24 | 'O' << 16 | 'N' << 8 | 'E' << 0)
+#define MD_SS_ENCR_START ('S' << 24 | 'T' << 16 | 'R' << 8 | 'T' << 0)
+#define MD_SS_ENABLED ('E' << 24 | 'N' << 16 | 'B' << 8 | 'L' << 0)
+#define MD_SS_DISABLED ('D' << 24 | 'S' << 16 | 'B' << 8 | 'L' << 0)
+
+/**
+ * md_ss_region - Minidump region
+ * @name : Name of the region to be dumped
+ * @seq_num: : Use to differentiate regions with same name.
+ * @md_valid : This entry to be dumped (if set to 1)
+ * @region_base_address : Physical address of region to be dumped
+ * @region_size : Size of the region
+ */
+struct md_ss_region {
+ char name[MAX_REGION_NAME_LENGTH];
+ u32 seq_num;
+ u32 md_valid;
+ u64 region_base_address;
+ u64 region_size;
+};
+
+/**
+ * md_ss_toc: Sub system SMEM Table of content
+ * @md_ss_toc_init : SS toc init status
+ * @md_ss_enable_status : if set to 1, Bootloader would dump this SS regions
+ * @encryption_status: Encryption status for this subsystem
+ * @encryption_required : Decides to encrypt the SS regions or not
+ * @ss_region_count : Number of regions added in this SS toc
+ * @md_ss_smem_regions_baseptr : regions base pointer of the Subsystem
+ */
+struct md_ss_toc {
+ u32 md_ss_toc_init;
+ u32 md_ss_enable_status;
+ u32 encryption_status;
+ u32 encryption_required;
+ u32 ss_region_count;
+ struct md_ss_region *md_ss_smem_regions_baseptr;
+};
+
+/**
+ * md_global_toc: Global Table of Content
+ * @md_toc_init : Global Minidump init status
+ * @md_revision : Minidump revision
+ * @md_enable_status : Minidump enable status
+ * @md_ss_toc : Array of subsystems toc
+ */
+struct md_global_toc {
+ u32 md_toc_init;
+ u32 md_revision;
+ u32 md_enable_status;
+ struct md_ss_toc md_ss_toc[MAX_NUM_OF_SS];
+};
+
+#endif
diff --git a/drivers/soc/qcom/msm_minidump.c b/drivers/soc/qcom/msm_minidump.c
new file mode 100644
index 0000000..3fe62f1
--- /dev/null
+++ b/drivers/soc/qcom/msm_minidump.c
@@ -0,0 +1,380 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#define pr_fmt(fmt) "Minidump: " fmt
+
+#include <linux/init.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/elf.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <soc/qcom/smem.h>
+#include <soc/qcom/minidump.h>
+#include "minidump_private.h"
+
+#define MAX_NUM_ENTRIES (CONFIG_MINIDUMP_MAX_ENTRIES + 1)
+#define MAX_STRTBL_SIZE (MAX_NUM_ENTRIES * MAX_REGION_NAME_LENGTH)
+
+/**
+ * md_table : Local Minidump toc holder
+ * @num_regions : Number of regions requested
+ * @md_ss_toc : HLOS toc pointer
+ * @md_gbl_toc : Global toc pointer
+ * @md_regions : HLOS regions base pointer
+ * @entry : array of HLOS regions requested
+ */
+struct md_table {
+ u32 revision;
+ u32 num_regions;
+ struct md_ss_toc *md_ss_toc;
+ struct md_global_toc *md_gbl_toc;
+ struct md_ss_region *md_regions;
+ struct md_region entry[MAX_NUM_ENTRIES];
+};
+
+/**
+ * md_elfhdr: Minidump table elf header
+ * @ehdr: elf main header
+ * @shdr: Section header
+ * @phdr: Program header
+ * @elf_offset: section offset in elf
+ * @strtable_idx: string table current index position
+ */
+struct md_elfhdr {
+ struct elfhdr *ehdr;
+ struct elf_shdr *shdr;
+ struct elf_phdr *phdr;
+ u64 elf_offset;
+ u64 strtable_idx;
+};
+
+/* Protect elfheader and smem table from deferred calls contention */
+static DEFINE_SPINLOCK(mdt_lock);
+static struct md_table minidump_table;
+static struct md_elfhdr minidump_elfheader;
+
+/* Number of pending entries to be added in ToC regions */
+static unsigned int pendings;
+
+static inline char *elf_lookup_string(struct elfhdr *hdr, int offset)
+{
+ char *strtab = elf_str_table(hdr);
+
+ if ((strtab == NULL) || (minidump_elfheader.strtable_idx < offset))
+ return NULL;
+ return strtab + offset;
+}
+
+static inline unsigned int set_section_name(const char *name)
+{
+ char *strtab = elf_str_table(minidump_elfheader.ehdr);
+ int idx = minidump_elfheader.strtable_idx;
+ int ret = 0;
+
+ if ((strtab == NULL) || (name == NULL))
+ return 0;
+
+ ret = idx;
+ idx += strlcpy((strtab + idx), name, MAX_REGION_NAME_LENGTH);
+ minidump_elfheader.strtable_idx = idx + 1;
+
+ return ret;
+}
+
+static inline bool md_check_name(const char *name)
+{
+ struct md_region *mde = minidump_table.entry;
+ int i, regno = minidump_table.num_regions;
+
+ for (i = 0; i < regno; i++, mde++)
+ if (!strcmp(mde->name, name))
+ return true;
+ return false;
+}
+
+/* Return next seq no, if name already exists in the table */
+static inline int md_get_seq_num(const char *name)
+{
+ struct md_ss_region *mde = minidump_table.md_regions;
+ int i, regno = minidump_table.md_ss_toc->ss_region_count;
+ int seqno = 0;
+
+ for (i = 0; i < (regno - 1); i++, mde++) {
+ if (!strcmp(mde->name, name)) {
+ if (mde->seq_num >= seqno)
+ seqno = mde->seq_num + 1;
+ }
+ }
+ return seqno;
+}
+
+/* Update Mini dump table in SMEM */
+static void md_update_ss_toc(const struct md_region *entry)
+{
+ struct md_ss_region *mdr;
+ struct elfhdr *hdr = minidump_elfheader.ehdr;
+ struct elf_shdr *shdr = elf_section(hdr, hdr->e_shnum++);
+ struct elf_phdr *phdr = elf_program(hdr, hdr->e_phnum++);
+ int reg_cnt = minidump_table.md_ss_toc->ss_region_count++;
+
+ mdr = &minidump_table.md_regions[reg_cnt];
+
+ strlcpy(mdr->name, entry->name, sizeof(mdr->name));
+ mdr->region_base_address = entry->phys_addr;
+ mdr->region_size = entry->size;
+ mdr->seq_num = md_get_seq_num(entry->name);
+
+ /* Update elf header */
+ shdr->sh_type = SHT_PROGBITS;
+ shdr->sh_name = set_section_name(mdr->name);
+ shdr->sh_addr = (elf_addr_t)entry->virt_addr;
+ shdr->sh_size = mdr->region_size;
+ shdr->sh_flags = SHF_WRITE;
+ shdr->sh_offset = minidump_elfheader.elf_offset;
+ shdr->sh_entsize = 0;
+
+ phdr->p_type = PT_LOAD;
+ phdr->p_offset = minidump_elfheader.elf_offset;
+ phdr->p_vaddr = entry->virt_addr;
+ phdr->p_paddr = entry->phys_addr;
+ phdr->p_filesz = phdr->p_memsz = mdr->region_size;
+ phdr->p_flags = PF_R | PF_W;
+
+ minidump_elfheader.elf_offset += shdr->sh_size;
+ mdr->md_valid = MD_REGION_VALID;
+}
+
+bool msm_minidump_enabled(void)
+{
+ bool ret = false;
+
+ spin_lock(&mdt_lock);
+ if (minidump_table.md_ss_toc &&
+ (minidump_table.md_ss_toc->md_ss_enable_status ==
+ MD_SS_ENABLED))
+ ret = true;
+ spin_unlock(&mdt_lock);
+ return ret;
+}
+EXPORT_SYMBOL(msm_minidump_enabled);
+
+int msm_minidump_add_region(const struct md_region *entry)
+{
+ u32 entries;
+ struct md_region *mdr;
+ int ret = 0;
+
+ if (!entry)
+ return -EINVAL;
+
+ if ((strlen(entry->name) > MAX_NAME_LENGTH) ||
+ md_check_name(entry->name) || !entry->virt_addr) {
+ pr_err("Invalid entry details\n");
+ return -EINVAL;
+ }
+
+ if (!IS_ALIGNED(entry->size, 4)) {
+ pr_err("size should be 4 byte aligned\n");
+ return -EINVAL;
+ }
+
+ spin_lock(&mdt_lock);
+ entries = minidump_table.num_regions;
+ if (entries >= MAX_NUM_ENTRIES) {
+ pr_err("Maximum entries reached.\n");
+ spin_unlock(&mdt_lock);
+ return -ENOMEM;
+ }
+
+ mdr = &minidump_table.entry[entries];
+ strlcpy(mdr->name, entry->name, sizeof(mdr->name));
+ mdr->virt_addr = entry->virt_addr;
+ mdr->phys_addr = entry->phys_addr;
+ mdr->size = entry->size;
+ mdr->id = entry->id;
+
+ minidump_table.num_regions = entries + 1;
+
+ if (minidump_table.md_ss_toc &&
+ (minidump_table.md_ss_toc->md_ss_enable_status ==
+ MD_SS_ENABLED))
+ md_update_ss_toc(entry);
+ else
+ pendings++;
+
+ spin_unlock(&mdt_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(msm_minidump_add_region);
+
+static int msm_minidump_add_header(void)
+{
+ struct md_ss_region *mdreg = &minidump_table.md_regions[0];
+ struct elfhdr *ehdr;
+ struct elf_shdr *shdr;
+ struct elf_phdr *phdr;
+ unsigned int strtbl_off, elfh_size, phdr_off;
+ char *banner;
+
+ /* Header buffer contains:
+ * elf header, MAX_NUM_ENTRIES+4 of section and program elf headers,
+ * string table section and linux banner.
+ */
+ elfh_size = sizeof(*ehdr) + MAX_STRTBL_SIZE + (strlen(linux_banner) +
+ 1) + ((sizeof(*shdr) + sizeof(*phdr)) * (MAX_NUM_ENTRIES + 4));
+ elfh_size = ALIGN(elfh_size, 4);
+
+ minidump_elfheader.ehdr = kzalloc(elfh_size, GFP_KERNEL);
+ if (!minidump_elfheader.ehdr)
+ return -ENOMEM;
+
+ strlcpy(mdreg->name, "KELF_HEADER", sizeof(mdreg->name));
+ mdreg->region_base_address = virt_to_phys(minidump_elfheader.ehdr);
+ mdreg->region_size = elfh_size;
+
+ ehdr = minidump_elfheader.ehdr;
+ /* Assign section/program headers offset */
+ minidump_elfheader.shdr = shdr = (struct elf_shdr *)(ehdr + 1);
+ minidump_elfheader.phdr = phdr =
+ (struct elf_phdr *)(shdr + MAX_NUM_ENTRIES);
+ phdr_off = sizeof(*ehdr) + (sizeof(*shdr) * MAX_NUM_ENTRIES);
+
+ memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
+ ehdr->e_ident[EI_CLASS] = ELF_CLASS;
+ ehdr->e_ident[EI_DATA] = ELF_DATA;
+ ehdr->e_ident[EI_VERSION] = EV_CURRENT;
+ ehdr->e_ident[EI_OSABI] = ELF_OSABI;
+ ehdr->e_type = ET_CORE;
+ ehdr->e_machine = ELF_ARCH;
+ ehdr->e_version = EV_CURRENT;
+ ehdr->e_ehsize = sizeof(*ehdr);
+ ehdr->e_phoff = phdr_off;
+ ehdr->e_phentsize = sizeof(*phdr);
+ ehdr->e_shoff = sizeof(*ehdr);
+ ehdr->e_shentsize = sizeof(*shdr);
+ ehdr->e_shstrndx = 1;
+
+ minidump_elfheader.elf_offset = elfh_size;
+
+ /*
+ * First section header should be NULL,
+ * 2nd section is string table.
+ */
+ minidump_elfheader.strtable_idx = 1;
+ strtbl_off = sizeof(*ehdr) +
+ ((sizeof(*phdr) + sizeof(*shdr)) * MAX_NUM_ENTRIES);
+ shdr++;
+ shdr->sh_type = SHT_STRTAB;
+ shdr->sh_offset = (elf_addr_t)strtbl_off;
+ shdr->sh_size = MAX_STRTBL_SIZE;
+ shdr->sh_entsize = 0;
+ shdr->sh_flags = 0;
+ shdr->sh_name = set_section_name("STR_TBL");
+ shdr++;
+
+ /* 3rd section is for minidump_table VA, used by parsers */
+ shdr->sh_type = SHT_PROGBITS;
+ shdr->sh_entsize = 0;
+ shdr->sh_flags = 0;
+ shdr->sh_addr = (elf_addr_t)&minidump_table;
+ shdr->sh_name = set_section_name("minidump_table");
+ shdr++;
+
+ /* 4th section is linux banner */
+ banner = (char *)ehdr + strtbl_off + MAX_STRTBL_SIZE;
+ strlcpy(banner, linux_banner, strlen(linux_banner) + 1);
+
+ shdr->sh_type = SHT_PROGBITS;
+ shdr->sh_offset = (elf_addr_t)(strtbl_off + MAX_STRTBL_SIZE);
+ shdr->sh_size = strlen(linux_banner) + 1;
+ shdr->sh_addr = (elf_addr_t)linux_banner;
+ shdr->sh_entsize = 0;
+ shdr->sh_flags = SHF_WRITE;
+ shdr->sh_name = set_section_name("linux_banner");
+
+ phdr->p_type = PT_LOAD;
+ phdr->p_offset = (elf_addr_t)(strtbl_off + MAX_STRTBL_SIZE);
+ phdr->p_vaddr = (elf_addr_t)linux_banner;
+ phdr->p_paddr = virt_to_phys(linux_banner);
+ phdr->p_filesz = phdr->p_memsz = strlen(linux_banner) + 1;
+ phdr->p_flags = PF_R | PF_W;
+
+ /* Update headers count*/
+ ehdr->e_phnum = 1;
+ ehdr->e_shnum = 4;
+
+ mdreg->md_valid = MD_REGION_VALID;
+ return 0;
+}
+
+static int __init msm_minidump_init(void)
+{
+ unsigned int i, size;
+ struct md_region *mdr;
+ struct md_global_toc *md_global_toc;
+ struct md_ss_toc *md_ss_toc;
+
+ /* Get Minidump table */
+ md_global_toc = smem_get_entry(SBL_MINIDUMP_SMEM_ID, &size, 0,
+ SMEM_ANY_HOST_FLAG);
+ if (IS_ERR_OR_NULL(md_global_toc)) {
+ pr_err("SMEM is not initialized.\n");
+ return -ENODEV;
+ }
+
+ /*Check global minidump support initialization */
+ if (!md_global_toc->md_toc_init) {
+ pr_err("System Minidump TOC not initialized\n");
+ return -ENODEV;
+ }
+
+ minidump_table.md_gbl_toc = md_global_toc;
+ minidump_table.revision = md_global_toc->md_revision;
+ md_ss_toc = &md_global_toc->md_ss_toc[MD_SS_HLOS_ID];
+
+ md_ss_toc->encryption_status = MD_SS_ENCR_NONE;
+ md_ss_toc->encryption_required = MD_SS_ENCR_REQ;
+
+ minidump_table.md_ss_toc = md_ss_toc;
+ minidump_table.md_regions = kzalloc((MAX_NUM_ENTRIES *
+ sizeof(struct md_ss_region)), GFP_KERNEL);
+ if (!minidump_table.md_regions)
+ return -ENOMEM;
+
+ md_ss_toc->md_ss_smem_regions_baseptr =
+ (void *)virt_to_phys(minidump_table.md_regions);
+
+ /* First entry would be ELF header */
+ md_ss_toc->ss_region_count = 1;
+ msm_minidump_add_header();
+
+ /* Add pending entries to HLOS TOC */
+ spin_lock(&mdt_lock);
+ md_ss_toc->md_ss_toc_init = 1;
+ md_ss_toc->md_ss_enable_status = MD_SS_ENABLED;
+ for (i = 0; i < pendings; i++) {
+ mdr = &minidump_table.entry[i];
+ md_update_ss_toc(mdr);
+ }
+
+ pendings = 0;
+ spin_unlock(&mdt_lock);
+
+ pr_info("Enabled with max number of regions %d\n",
+ CONFIG_MINIDUMP_MAX_ENTRIES);
+
+ return 0;
+}
+subsys_initcall(msm_minidump_init)
diff --git a/drivers/soc/qcom/pil-q6v5-mss.c b/drivers/soc/qcom/pil-q6v5-mss.c
index 0477064..2ca0615 100644
--- a/drivers/soc/qcom/pil-q6v5-mss.c
+++ b/drivers/soc/qcom/pil-q6v5-mss.c
@@ -38,6 +38,7 @@
#define PROXY_TIMEOUT_MS 10000
#define MAX_SSR_REASON_LEN 256U
#define STOP_ACK_TIMEOUT_MS 1000
+#define QDSP6SS_NMI_STATUS 0x44
#define subsys_to_drv(d) container_of(d, struct modem_data, subsys_desc)
@@ -74,12 +75,17 @@
static irqreturn_t modem_err_fatal_intr_handler(int irq, void *dev_id)
{
struct modem_data *drv = subsys_to_drv(dev_id);
+ u32 nmi_status = readl_relaxed(drv->q6->reg_base + QDSP6SS_NMI_STATUS);
/* Ignore if we're the one that set the force stop GPIO */
if (drv->crash_shutdown)
return IRQ_HANDLED;
- pr_err("Fatal error on the modem.\n");
+ if (nmi_status & 0x04)
+ pr_err("%s: Fatal error on the modem due to TZ NMI\n",
+ __func__);
+ else
+ pr_err("%s: Fatal error on the modem\n", __func__);
subsys_set_crash_status(drv->subsys, CRASH_STATUS_ERR_FATAL);
restart_modem(drv);
return IRQ_HANDLED;
diff --git a/drivers/soc/qcom/ramdump.c b/drivers/soc/qcom/ramdump.c
index e4c1bb8..7758c64 100644
--- a/drivers/soc/qcom/ramdump.c
+++ b/drivers/soc/qcom/ramdump.c
@@ -454,23 +454,6 @@
}
-static inline struct elf_shdr *elf_sheader(struct elfhdr *hdr)
-{
- return (struct elf_shdr *)((size_t)hdr + (size_t)hdr->e_shoff);
-}
-
-static inline struct elf_shdr *elf_section(struct elfhdr *hdr, int idx)
-{
- return &elf_sheader(hdr)[idx];
-}
-
-static inline char *elf_str_table(struct elfhdr *hdr)
-{
- if (hdr->e_shstrndx == SHN_UNDEF)
- return NULL;
- return (char *)hdr + elf_section(hdr, hdr->e_shstrndx)->sh_offset;
-}
-
static inline unsigned int set_section_name(const char *name,
struct elfhdr *ehdr)
{
diff --git a/drivers/soc/qcom/scm.c b/drivers/soc/qcom/scm.c
index ac5cc54..492b68c 100644
--- a/drivers/soc/qcom/scm.c
+++ b/drivers/soc/qcom/scm.c
@@ -764,7 +764,7 @@
return scm_remap_error(ret);
return ret;
}
-
+EXPORT_SYMBOL(scm_call2_atomic);
/**
* scm_call() - Send an SCM command
* @svc_id: service identifier
diff --git a/drivers/soc/qcom/secure_buffer.c b/drivers/soc/qcom/secure_buffer.c
index 6553ac0..5289cd0 100644
--- a/drivers/soc/qcom/secure_buffer.c
+++ b/drivers/soc/qcom/secure_buffer.c
@@ -212,6 +212,7 @@
kfree(source_vm_copy);
return ret;
}
+EXPORT_SYMBOL(hyp_assign_table);
int hyp_assign_phys(phys_addr_t addr, u64 size, u32 *source_vm_list,
int source_nelems, int *dest_vmids,
diff --git a/drivers/soc/qcom/spcom.c b/drivers/soc/qcom/spcom.c
index 68681f9..876e176 100644
--- a/drivers/soc/qcom/spcom.c
+++ b/drivers/soc/qcom/spcom.c
@@ -506,6 +506,7 @@
* We do it here, ASAP, to allow rx data.
*/
+ ch->rx_abort = false; /* cleanup from previouse close */
pr_debug("call glink_queue_rx_intent() ch [%s].\n", ch->name);
ret = glink_queue_rx_intent(handle, ch, ch->rx_buf_size);
if (ret) {
@@ -536,14 +537,15 @@
*/
pr_err("GLINK_REMOTE_DISCONNECTED, ch [%s].\n", ch->name);
- ch->glink_state = event;
-
/*
* Abort any blocking read() operation.
* The glink notification might be after REMOTE_DISCONNECT.
*/
spcom_notify_rx_abort(NULL, ch, NULL);
+ /* set the state to not-connected after notify-rx-abort */
+ ch->glink_state = event;
+
/*
* after glink_close(),
* expecting notify GLINK_LOCAL_DISCONNECTED
@@ -579,7 +581,10 @@
* spcom_notify_rx_abort() - glink callback on aborting rx pending buffer.
*
* Rx abort may happen if channel is closed by remote side, while rx buffer is
- * pending in the queue.
+ * pending in the queue, like upon SP reset (SSR).
+ *
+ * More common scenario, is when rx intent is queud (for next transfer),
+ * and the channel is closed locally.
*/
static void spcom_notify_rx_abort(void *handle, const void *priv,
const void *pkt_priv)
@@ -593,7 +598,8 @@
pr_debug("ch [%s] pending rx aborted.\n", ch->name);
- if (spcom_is_channel_open(ch) && (!ch->rx_abort)) {
+ /* ignore rx-abort after local channel disconected */
+ if (spcom_is_channel_connected(ch) && (!ch->rx_abort)) {
ch->rx_abort = true;
complete_all(&ch->rx_done);
}
@@ -873,14 +879,16 @@
for (retry = 0; retry < TX_MAX_RETRY ; retry++) {
ret = glink_tx(ch->glink_handle, pkt_priv, buf, size, tx_flags);
if (ret == -EAGAIN) {
- pr_err("glink_tx() fail, try again.\n");
+ pr_err("glink_tx() fail, try again, ch [%s].\n",
+ ch->name);
/*
* Delay to allow remote side to queue rx buffer.
* This may happen after the first channel connection.
*/
msleep(TX_RETRY_DELAY_MSEC);
} else if (ret < 0) {
- pr_err("glink_tx() error %d.\n", ret);
+ pr_err("glink_tx() error [%d], ch [%s].\n",
+ ret, ch->name);
goto exit_err;
} else {
break; /* no retry needed */
@@ -953,6 +961,7 @@
return -ETIMEDOUT;
} else if (ch->rx_abort) {
mutex_unlock(&ch->lock);
+ pr_err("rx_abort, probably remote side reset (SSR).\n");
return -ERESTART; /* probably SSR */
} else if (ch->actual_rx_size) {
pr_debug("actual_rx_size is [%zu]\n", ch->actual_rx_size);
@@ -1072,10 +1081,19 @@
for (i = 0 ; i < ARRAY_SIZE(spcom_dev->channels); i++) {
struct spcom_channel *ch = &spcom_dev->channels[i];
- if (ch->is_server) {
- pr_debug("rx-abort server on ch [%s].\n", ch->name);
- spcom_notify_rx_abort(NULL, ch, NULL);
- }
+ /* relevant only for servers */
+ if (!ch->is_server)
+ continue;
+
+ /* The server might not be connected to a client.
+ * Don't check if connected, only if open.
+ */
+ if (!spcom_is_channel_open(ch) || (ch->rx_abort))
+ continue;
+
+ pr_debug("rx-abort server ch [%s].\n", ch->name);
+ ch->rx_abort = true;
+ complete_all(&ch->rx_done);
}
}
diff --git a/drivers/soc/qcom/subsys-pil-tz.c b/drivers/soc/qcom/subsys-pil-tz.c
index d65756c..5b600f6 100644
--- a/drivers/soc/qcom/subsys-pil-tz.c
+++ b/drivers/soc/qcom/subsys-pil-tz.c
@@ -42,6 +42,7 @@
#define ERR_READY 0
#define PBL_DONE 1
+#define QDSP6SS_NMI_STATUS 0x44
#define desc_to_data(d) container_of(d, struct pil_tz_data, desc)
#define subsys_to_data(d) container_of(d, struct pil_tz_data, subsys_desc)
@@ -109,6 +110,7 @@
void __iomem *irq_mask;
void __iomem *err_status;
void __iomem *err_status_spare;
+ void __iomem *reg_base;
u32 bits_arr[2];
};
@@ -925,8 +927,19 @@
static irqreturn_t subsys_err_fatal_intr_handler (int irq, void *dev_id)
{
struct pil_tz_data *d = subsys_to_data(dev_id);
+ u32 nmi_status = 0;
- pr_err("Fatal error on %s!\n", d->subsys_desc.name);
+ if (d->reg_base)
+ nmi_status = readl_relaxed(d->reg_base +
+ QDSP6SS_NMI_STATUS);
+
+ if (nmi_status & 0x04)
+ pr_err("%s: Fatal error on the %s due to TZ NMI\n",
+ __func__, d->subsys_desc.name);
+ else
+ pr_err("%s Fatal error on the %s\n",
+ __func__, d->subsys_desc.name);
+
if (subsys_get_crash_status(d->subsys)) {
pr_err("%s: Ignoring error fatal, restart in progress\n",
d->subsys_desc.name);
@@ -1062,6 +1075,13 @@
d->keep_proxy_regs_on = of_property_read_bool(pdev->dev.of_node,
"qcom,keep-proxy-regs-on");
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base_reg");
+ d->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(d->reg_base)) {
+ dev_err(&pdev->dev, "Failed to ioremap base register\n");
+ d->reg_base = NULL;
+ }
+
rc = of_property_read_string(pdev->dev.of_node, "qcom,firmware-name",
&d->desc.name);
if (rc)
diff --git a/drivers/soc/qcom/subsystem_restart.c b/drivers/soc/qcom/subsystem_restart.c
index 55cb604..110cdf7 100644
--- a/drivers/soc/qcom/subsystem_restart.c
+++ b/drivers/soc/qcom/subsystem_restart.c
@@ -655,13 +655,16 @@
if (ret < 0) {
notify_each_subsys_device(&dev, 1, SUBSYS_POWERUP_FAILURE,
NULL);
- if (!dev->desc->ignore_ssr_failure) {
+ if (system_state == SYSTEM_RESTART
+ || system_state == SYSTEM_POWER_OFF)
+ WARN(1, "SSR aborted: %s, system reboot/shutdown is under way\n",
+ name);
+ else if (!dev->desc->ignore_ssr_failure)
panic("[%s:%d]: Powerup error: %s!",
current->comm, current->pid, name);
- } else {
+ else
pr_err("Powerup failure on %s\n", name);
- return ret;
- }
+ return ret;
}
enable_all_irqs(dev);
@@ -1174,6 +1177,7 @@
{
dev->crashed = crashed;
}
+EXPORT_SYMBOL(subsys_set_crash_status);
enum crash_status subsys_get_crash_status(struct subsys_device *dev)
{
diff --git a/drivers/soc/qcom/watchdog_v2.c b/drivers/soc/qcom/watchdog_v2.c
index 9aea6db..f5e76e0 100644
--- a/drivers/soc/qcom/watchdog_v2.c
+++ b/drivers/soc/qcom/watchdog_v2.c
@@ -29,6 +29,7 @@
#include <linux/wait.h>
#include <soc/qcom/scm.h>
#include <soc/qcom/memory_dump.h>
+#include <soc/qcom/minidump.h>
#include <soc/qcom/watchdog.h>
#include <linux/dma-mapping.h>
@@ -549,6 +550,8 @@
cpu_data[cpu].addr = virt_to_phys(cpu_buf +
cpu * MAX_CPU_CTX_SIZE);
cpu_data[cpu].len = MAX_CPU_CTX_SIZE;
+ snprintf(cpu_data[cpu].name, sizeof(cpu_data[cpu].name),
+ "KCPU_CTX%d", cpu);
dump_entry.id = MSM_DUMP_DATA_CPU_CTX + cpu;
dump_entry.addr = virt_to_phys(&cpu_data[cpu]);
ret = msm_dump_data_register(MSM_DUMP_TABLE_APPS,
@@ -596,6 +599,8 @@
cpu_data->addr = dump_addr;
cpu_data->len = MAX_CPU_SCANDUMP_SIZE;
+ snprintf(cpu_data->name, sizeof(cpu_data->name),
+ "KSCANDUMP%d", cpu);
dump_entry.id = MSM_DUMP_DATA_SCANDUMP_PER_CPU + cpu;
dump_entry.addr = virt_to_phys(cpu_data);
ret = msm_dump_data_register(MSM_DUMP_TABLE_APPS,
@@ -799,6 +804,7 @@
{
int ret;
struct msm_watchdog_data *wdog_dd;
+ struct md_region md_entry;
if (!pdev->dev.of_node || !enable)
return -ENODEV;
@@ -820,6 +826,15 @@
goto err;
}
init_watchdog_data(wdog_dd);
+
+ /* Add wdog info to minidump table */
+ strlcpy(md_entry.name, "KWDOGDATA", sizeof(md_entry.name));
+ md_entry.virt_addr = (uintptr_t)wdog_dd;
+ md_entry.phys_addr = virt_to_phys(wdog_dd);
+ md_entry.size = sizeof(*wdog_dd);
+ if (msm_minidump_add_region(&md_entry))
+ pr_info("Failed to add Watchdog data in Minidump\n");
+
return 0;
err:
kzfree(wdog_dd);
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index b29e60d..d6089aa 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -944,8 +944,8 @@
* multiple EE's to write to a single PPID in arbiter version 5, there
* is more than one APID mapped to each PPID. The owner field for each
* of these mappings specifies the EE which is allowed to write to the
- * APID. The owner of the last (highest) APID for a given PPID will
- * receive interrupts from the PPID.
+ * APID. The owner of the last (highest) APID which has the IRQ owner
+ * bit set for a given PPID will receive interrupts from the PPID.
*/
for (apid = 0; apid < pa->max_periph; apid++) {
offset = pa->ver_ops->channel_map_offset(apid);
@@ -969,7 +969,10 @@
valid = pa->ppid_to_apid[ppid] & PMIC_ARB_CHAN_VALID;
prev_apid = pa->ppid_to_apid[ppid] & ~PMIC_ARB_CHAN_VALID;
- if (valid && is_irq_owner &&
+ if (!valid || pa->apid_data[apid].write_owner == pa->ee) {
+ /* First PPID mapping or one for this EE */
+ pa->ppid_to_apid[ppid] = apid | PMIC_ARB_CHAN_VALID;
+ } else if (valid && is_irq_owner &&
pa->apid_data[prev_apid].write_owner == pa->ee) {
/*
* Duplicate PPID mapping after the one for this EE;
@@ -977,9 +980,6 @@
*/
pa->apid_data[prev_apid].irq_owner
= pa->apid_data[apid].irq_owner;
- } else if (!valid || is_irq_owner) {
- /* First PPID mapping or duplicate for another EE */
- pa->ppid_to_apid[ppid] = apid | PMIC_ARB_CHAN_VALID;
}
pa->apid_data[apid].ppid = ppid;
diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c
index 7c58e19..72f2b6a 100644
--- a/drivers/staging/android/ion/ion_cma_heap.c
+++ b/drivers/staging/android/ion/ion_cma_heap.c
@@ -57,6 +57,7 @@
return ret;
sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
+ sg_dma_address(sgt->sgl) = sg_phys(sgt->sgl);
return 0;
}
@@ -97,9 +98,9 @@
&info->handle,
GFP_KERNEL);
else
- info->cpu_addr = dma_alloc_nonconsistent(dev, len,
- &info->handle,
- GFP_KERNEL);
+ info->cpu_addr = dma_alloc_attrs(dev, len, &info->handle,
+ GFP_KERNEL,
+ DMA_ATTR_FORCE_COHERENT);
if (!info->cpu_addr) {
dev_err(dev, "Fail to allocate buffer\n");
@@ -115,6 +116,11 @@
ion_cma_get_sgtable(dev,
info->table, info->cpu_addr, info->handle, len);
+ /* Ensure memory is dma-ready - refer to ion_buffer_create() */
+ if (info->is_cached)
+ dma_sync_sg_for_device(dev, info->table->sgl,
+ info->table->nents, DMA_BIDIRECTIONAL);
+
/* keep this for memory release */
buffer->priv_virt = info;
dev_dbg(dev, "Allocate buffer %pK\n", buffer);
@@ -129,10 +135,13 @@
{
struct device *dev = buffer->heap->priv;
struct ion_cma_buffer_info *info = buffer->priv_virt;
+ unsigned long attrs = 0;
dev_dbg(dev, "Release buffer %pK\n", buffer);
/* release memory */
- dma_free_coherent(dev, buffer->size, info->cpu_addr, info->handle);
+ if (info->is_cached)
+ attrs |= DMA_ATTR_FORCE_COHERENT;
+ dma_free_attrs(dev, buffer->size, info->cpu_addr, info->handle, attrs);
sg_free_table(info->table);
/* release sg table */
kfree(info->table);
@@ -175,8 +184,9 @@
struct ion_cma_buffer_info *info = buffer->priv_virt;
if (info->is_cached)
- return dma_mmap_nonconsistent(dev, vma, info->cpu_addr,
- info->handle, buffer->size);
+ return dma_mmap_attrs(dev, vma, info->cpu_addr,
+ info->handle, buffer->size,
+ DMA_ATTR_FORCE_COHERENT);
else
return dma_mmap_writecombine(dev, vma, info->cpu_addr,
info->handle, buffer->size);
diff --git a/drivers/staging/fsl-mc/bus/fsl-mc-msi.c b/drivers/staging/fsl-mc/bus/fsl-mc-msi.c
index 3d46b1b..7de992c 100644
--- a/drivers/staging/fsl-mc/bus/fsl-mc-msi.c
+++ b/drivers/staging/fsl-mc/bus/fsl-mc-msi.c
@@ -17,6 +17,7 @@
#include <linux/irqdomain.h>
#include <linux/msi.h>
#include "../include/mc-bus.h"
+#include "fsl-mc-private.h"
/*
* Generate a unique ID identifying the interrupt (only used within the MSI
diff --git a/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c b/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
index 7a6ac64..eaeb3c5 100644
--- a/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
+++ b/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
@@ -17,6 +17,7 @@
#include <linux/of.h>
#include <linux/of_irq.h>
#include "../include/mc-bus.h"
+#include "fsl-mc-private.h"
static struct irq_chip its_msi_irq_chip = {
.name = "fsl-mc-bus-msi",
diff --git a/drivers/staging/iio/trigger/iio-trig-bfin-timer.c b/drivers/staging/iio/trigger/iio-trig-bfin-timer.c
index 38dca69..ce500a5 100644
--- a/drivers/staging/iio/trigger/iio-trig-bfin-timer.c
+++ b/drivers/staging/iio/trigger/iio-trig-bfin-timer.c
@@ -260,7 +260,7 @@
out1:
iio_trigger_unregister(st->trig);
out:
- iio_trigger_put(st->trig);
+ iio_trigger_free(st->trig);
return ret;
}
@@ -273,7 +273,7 @@
peripheral_free(st->t->pin);
free_irq(st->irq, st);
iio_trigger_unregister(st->trig);
- iio_trigger_put(st->trig);
+ iio_trigger_free(st->trig);
return 0;
}
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
index 6fc9855..e533088 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
@@ -1213,23 +1213,21 @@
* \retval buffer
*/
static inline char *hai_dump_data_field(struct hsm_action_item *hai,
- char *buffer, int len)
+ char *buffer, size_t len)
{
- int i, sz, data_len;
+ int i, data_len;
char *ptr;
ptr = buffer;
- sz = len;
data_len = hai->hai_len - sizeof(*hai);
- for (i = 0 ; (i < data_len) && (sz > 0) ; i++) {
- int cnt;
-
- cnt = snprintf(ptr, sz, "%.2X",
- (unsigned char)hai->hai_data[i]);
- ptr += cnt;
- sz -= cnt;
+ for (i = 0; (i < data_len) && (len > 2); i++) {
+ snprintf(ptr, 3, "%02X", (unsigned char)hai->hai_data[i]);
+ ptr += 2;
+ len -= 2;
}
+
*ptr = '\0';
+
return buffer;
}
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
index 3c48b4f..d18ab3f 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
@@ -546,6 +546,13 @@
if (!lock)
return NULL;
+ if (lock->l_export && lock->l_export->exp_failed) {
+ CDEBUG(D_INFO, "lock export failed: lock %p, exp %p\n",
+ lock, lock->l_export);
+ LDLM_LOCK_PUT(lock);
+ return NULL;
+ }
+
/* It's unlikely but possible that someone marked the lock as
* destroyed after we did handle2object on it
*/
diff --git a/drivers/staging/lustre/lustre/llite/rw26.c b/drivers/staging/lustre/lustre/llite/rw26.c
index 26f3a37..0cb70c3 100644
--- a/drivers/staging/lustre/lustre/llite/rw26.c
+++ b/drivers/staging/lustre/lustre/llite/rw26.c
@@ -354,6 +354,10 @@
if (!lli->lli_has_smd)
return -EBADF;
+ /* Check EOF by ourselves */
+ if (iov_iter_rw(iter) == READ && file_offset >= i_size_read(inode))
+ return 0;
+
/* FIXME: io smaller than PAGE_SIZE is broken on ia64 ??? */
if ((file_offset & ~PAGE_MASK) || (count & ~PAGE_MASK))
return -EINVAL;
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_obd.c b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
index 7dbb2b9..cd19ce8 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_obd.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
@@ -744,16 +744,18 @@
/* count how many requests must be sent to the given target */
for (i = 0; i < hur->hur_request.hr_itemcount; i++) {
curr_tgt = lmv_find_target(lmv, &hur->hur_user_item[i].hui_fid);
+ if (IS_ERR(curr_tgt))
+ return PTR_ERR(curr_tgt);
if (obd_uuid_equals(&curr_tgt->ltd_uuid, &tgt_mds->ltd_uuid))
nr++;
}
return nr;
}
-static void lmv_hsm_req_build(struct lmv_obd *lmv,
- struct hsm_user_request *hur_in,
- const struct lmv_tgt_desc *tgt_mds,
- struct hsm_user_request *hur_out)
+static int lmv_hsm_req_build(struct lmv_obd *lmv,
+ struct hsm_user_request *hur_in,
+ const struct lmv_tgt_desc *tgt_mds,
+ struct hsm_user_request *hur_out)
{
int i, nr_out;
struct lmv_tgt_desc *curr_tgt;
@@ -764,6 +766,8 @@
for (i = 0; i < hur_in->hur_request.hr_itemcount; i++) {
curr_tgt = lmv_find_target(lmv,
&hur_in->hur_user_item[i].hui_fid);
+ if (IS_ERR(curr_tgt))
+ return PTR_ERR(curr_tgt);
if (obd_uuid_equals(&curr_tgt->ltd_uuid, &tgt_mds->ltd_uuid)) {
hur_out->hur_user_item[nr_out] =
hur_in->hur_user_item[i];
@@ -773,6 +777,8 @@
hur_out->hur_request.hr_itemcount = nr_out;
memcpy(hur_data(hur_out), hur_data(hur_in),
hur_in->hur_request.hr_data_len);
+
+ return 0;
}
static int lmv_hsm_ct_unregister(struct lmv_obd *lmv, unsigned int cmd, int len,
@@ -1052,15 +1058,17 @@
} else {
/* split fid list to their respective MDS */
for (i = 0; i < count; i++) {
- unsigned int nr, reqlen;
- int rc1;
struct hsm_user_request *req;
+ size_t reqlen;
+ int nr, rc1;
tgt = lmv->tgts[i];
if (!tgt || !tgt->ltd_exp)
continue;
nr = lmv_hsm_req_count(lmv, hur, tgt);
+ if (nr < 0)
+ return nr;
if (nr == 0) /* nothing for this MDS */
continue;
@@ -1072,10 +1080,13 @@
if (!req)
return -ENOMEM;
- lmv_hsm_req_build(lmv, hur, tgt, req);
+ rc1 = lmv_hsm_req_build(lmv, hur, tgt, req);
+ if (rc1 < 0)
+ goto hsm_req_err;
rc1 = obd_iocontrol(cmd, tgt->ltd_exp, reqlen,
req, uarg);
+hsm_req_err:
if (rc1 != 0 && rc == 0)
rc = rc1;
kvfree(req);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/service.c b/drivers/staging/lustre/lustre/ptlrpc/service.c
index 72f3930..9d34848 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/service.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/service.c
@@ -1264,20 +1264,15 @@
*/
if (req->rq_ops->hpreq_check) {
rc = req->rq_ops->hpreq_check(req);
- /**
- * XXX: Out of all current
- * ptlrpc_hpreq_ops::hpreq_check(), only
- * ldlm_cancel_hpreq_check() can return an error code;
- * other functions assert in similar places, which seems
- * odd. What also does not seem right is that handlers
- * for those RPCs do not assert on the same checks, but
- * rather handle the error cases. e.g. see
- * ost_rw_hpreq_check(), and ost_brw_read(),
- * ost_brw_write().
+ if (rc == -ESTALE) {
+ req->rq_status = rc;
+ ptlrpc_error(req);
+ }
+ /** can only return error,
+ * 0 for normal request,
+ * or 1 for high priority request
*/
- if (rc < 0)
- return rc;
- LASSERT(rc == 0 || rc == 1);
+ LASSERT(rc <= 1);
}
spin_lock_bh(&req->rq_export->exp_rpc_lock);
diff --git a/drivers/staging/rtl8712/ieee80211.h b/drivers/staging/rtl8712/ieee80211.h
index 67ab580..68fd65e 100644
--- a/drivers/staging/rtl8712/ieee80211.h
+++ b/drivers/staging/rtl8712/ieee80211.h
@@ -138,51 +138,51 @@
};
struct ieee80211_hdr {
- u16 frame_ctl;
- u16 duration_id;
+ __le16 frame_ctl;
+ __le16 duration_id;
u8 addr1[ETH_ALEN];
u8 addr2[ETH_ALEN];
u8 addr3[ETH_ALEN];
- u16 seq_ctl;
+ __le16 seq_ctl;
u8 addr4[ETH_ALEN];
-} __packed;
+} __packed __aligned(2);
struct ieee80211_hdr_3addr {
- u16 frame_ctl;
- u16 duration_id;
+ __le16 frame_ctl;
+ __le16 duration_id;
u8 addr1[ETH_ALEN];
u8 addr2[ETH_ALEN];
u8 addr3[ETH_ALEN];
- u16 seq_ctl;
-} __packed;
+ __le16 seq_ctl;
+} __packed __aligned(2);
struct ieee80211_hdr_qos {
- u16 frame_ctl;
- u16 duration_id;
+ __le16 frame_ctl;
+ __le16 duration_id;
u8 addr1[ETH_ALEN];
u8 addr2[ETH_ALEN];
u8 addr3[ETH_ALEN];
- u16 seq_ctl;
+ __le16 seq_ctl;
u8 addr4[ETH_ALEN];
- u16 qc;
-} __packed;
+ __le16 qc;
+} __packed __aligned(2);
struct ieee80211_hdr_3addr_qos {
- u16 frame_ctl;
- u16 duration_id;
+ __le16 frame_ctl;
+ __le16 duration_id;
u8 addr1[ETH_ALEN];
u8 addr2[ETH_ALEN];
u8 addr3[ETH_ALEN];
- u16 seq_ctl;
- u16 qc;
+ __le16 seq_ctl;
+ __le16 qc;
} __packed;
struct eapol {
u8 snap[6];
- u16 ethertype;
+ __be16 ethertype;
u8 version;
u8 type;
- u16 length;
+ __le16 length;
} __packed;
enum eap_type {
@@ -514,13 +514,13 @@
*/
struct ieee80211_header_data {
- u16 frame_ctl;
- u16 duration_id;
+ __le16 frame_ctl;
+ __le16 duration_id;
u8 addr1[6];
u8 addr2[6];
u8 addr3[6];
- u16 seq_ctrl;
-};
+ __le16 seq_ctrl;
+} __packed __aligned(2);
#define BEACON_PROBE_SSID_ID_POSITION 12
@@ -552,18 +552,18 @@
/*
* These are the data types that can make up management packets
*
- u16 auth_algorithm;
- u16 auth_sequence;
- u16 beacon_interval;
- u16 capability;
+ __le16 auth_algorithm;
+ __le16 auth_sequence;
+ __le16 beacon_interval;
+ __le16 capability;
u8 current_ap[ETH_ALEN];
- u16 listen_interval;
+ __le16 listen_interval;
struct {
u16 association_id:14, reserved:2;
} __packed;
- u32 time_stamp[2];
- u16 reason;
- u16 status;
+ __le32 time_stamp[2];
+ __le16 reason;
+ __le16 status;
*/
#define IEEE80211_DEFAULT_TX_ESSID "Penguin"
@@ -571,16 +571,16 @@
struct ieee80211_authentication {
struct ieee80211_header_data header;
- u16 algorithm;
- u16 transaction;
- u16 status;
+ __le16 algorithm;
+ __le16 transaction;
+ __le16 status;
} __packed;
struct ieee80211_probe_response {
struct ieee80211_header_data header;
- u32 time_stamp[2];
- u16 beacon_interval;
- u16 capability;
+ __le32 time_stamp[2];
+ __le16 beacon_interval;
+ __le16 capability;
struct ieee80211_info_element info_element;
} __packed;
@@ -590,16 +590,16 @@
struct ieee80211_assoc_request_frame {
struct ieee80211_hdr_3addr header;
- u16 capability;
- u16 listen_interval;
+ __le16 capability;
+ __le16 listen_interval;
struct ieee80211_info_element_hdr info_element;
} __packed;
struct ieee80211_assoc_response_frame {
struct ieee80211_hdr_3addr header;
- u16 capability;
- u16 status;
- u16 aid;
+ __le16 capability;
+ __le16 status;
+ __le16 aid;
} __packed;
struct ieee80211_txb {
diff --git a/drivers/staging/rtl8712/rtl871x_xmit.c b/drivers/staging/rtl8712/rtl871x_xmit.c
index be38364..c478639 100644
--- a/drivers/staging/rtl8712/rtl871x_xmit.c
+++ b/drivers/staging/rtl8712/rtl871x_xmit.c
@@ -344,7 +344,8 @@
* some settings above.
*/
if (check_fwstate(pmlmepriv, WIFI_MP_STATE))
- pattrib->priority = (txdesc.txdw1 >> QSEL_SHT) & 0x1f;
+ pattrib->priority =
+ (le32_to_cpu(txdesc.txdw1) >> QSEL_SHT) & 0x1f;
return _SUCCESS;
}
@@ -485,7 +486,7 @@
struct ieee80211_hdr *pwlanhdr = (struct ieee80211_hdr *)hdr;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct qos_priv *pqospriv = &pmlmepriv->qospriv;
- u16 *fctrl = &pwlanhdr->frame_ctl;
+ __le16 *fctrl = &pwlanhdr->frame_ctl;
memset(hdr, 0, WLANHDR_OFFSET);
SetFrameSubType(fctrl, pattrib->subtype);
@@ -574,7 +575,7 @@
snap->oui[0] = oui[0];
snap->oui[1] = oui[1];
snap->oui[2] = oui[2];
- *(u16 *)(data + SNAP_SIZE) = htons(h_proto);
+ *(__be16 *)(data + SNAP_SIZE) = htons(h_proto);
return SNAP_SIZE + sizeof(u16);
}
diff --git a/drivers/thermal/qcom/msm_lmh_dcvs.c b/drivers/thermal/qcom/msm_lmh_dcvs.c
index 4e5546e..94c93b5 100644
--- a/drivers/thermal/qcom/msm_lmh_dcvs.c
+++ b/drivers/thermal/qcom/msm_lmh_dcvs.c
@@ -58,8 +58,7 @@
#define LIMITS_CLUSTER_0 0x6370302D
#define LIMITS_CLUSTER_1 0x6370312D
-#define LIMITS_DOMAIN_MAX 0x444D4158
-#define LIMITS_DOMAIN_MIN 0x444D494E
+#define LIMITS_FREQ_CAP 0x46434150
#define LIMITS_TEMP_DEFAULT 75000
#define LIMITS_TEMP_HIGH_THRESH_MAX 120000
@@ -225,31 +224,36 @@
}
static int limits_dcvs_write(uint32_t node_id, uint32_t fn,
- uint32_t setting, uint32_t val)
+ uint32_t setting, uint32_t val, uint32_t val1,
+ bool enable_val1)
{
int ret;
struct scm_desc desc_arg;
uint32_t *payload = NULL;
+ uint32_t payload_len;
- payload = kzalloc(sizeof(uint32_t) * 5, GFP_KERNEL);
+ payload_len = ((enable_val1) ? 6 : 5) * sizeof(uint32_t);
+ payload = kzalloc(payload_len, GFP_KERNEL);
if (!payload)
return -ENOMEM;
payload[0] = fn; /* algorithm */
payload[1] = 0; /* unused sub-algorithm */
payload[2] = setting;
- payload[3] = 1; /* number of values */
+ payload[3] = enable_val1 ? 2 : 1; /* number of values */
payload[4] = val;
+ if (enable_val1)
+ payload[5] = val1;
desc_arg.args[0] = SCM_BUFFER_PHYS(payload);
- desc_arg.args[1] = sizeof(uint32_t) * 5;
+ desc_arg.args[1] = payload_len;
desc_arg.args[2] = LIMITS_NODE_DCVS;
desc_arg.args[3] = node_id;
desc_arg.args[4] = 0; /* version */
desc_arg.arginfo = SCM_ARGS(5, SCM_RO, SCM_VAL, SCM_VAL,
SCM_VAL, SCM_VAL);
- dmac_flush_range(payload, (void *)payload + 5 * (sizeof(uint32_t)));
+ dmac_flush_range(payload, (void *)payload + payload_len);
ret = scm_call2(SCM_SIP_FNID(SCM_SVC_LMH, LIMITS_DCVSH), &desc_arg);
kfree(payload);
@@ -288,16 +292,17 @@
hw->temp_limits[LIMITS_TRIP_ARM] = (uint32_t)low;
ret = limits_dcvs_write(hw->affinity, LIMITS_SUB_FN_THERMAL,
- LIMITS_ARM_THRESHOLD, low);
+ LIMITS_ARM_THRESHOLD, low, 0, 0);
if (ret)
return ret;
ret = limits_dcvs_write(hw->affinity, LIMITS_SUB_FN_THERMAL,
- LIMITS_HI_THRESHOLD, high);
+ LIMITS_HI_THRESHOLD, high, 0, 0);
if (ret)
return ret;
ret = limits_dcvs_write(hw->affinity, LIMITS_SUB_FN_THERMAL,
LIMITS_LOW_THRESHOLD,
- high - LIMITS_LOW_THRESHOLD_OFFSET);
+ high - LIMITS_LOW_THRESHOLD_OFFSET,
+ 0, 0);
if (ret)
return ret;
@@ -365,8 +370,9 @@
max_freq = hw->cdev_data[idx].max_freq;
idx++;
}
- ret = limits_dcvs_write(hw->affinity, LIMITS_SUB_FN_GENERAL,
- LIMITS_DOMAIN_MAX, max_freq);
+ ret = limits_dcvs_write(hw->affinity, LIMITS_SUB_FN_THERMAL,
+ LIMITS_FREQ_CAP, max_freq,
+ (max_freq == U32_MAX) ? 0 : 1, 1);
mutex_unlock(&hw->access_lock);
lmh_dcvs_notify(hw);
@@ -556,22 +562,22 @@
/* Enable the thermal algorithm early */
ret = limits_dcvs_write(hw->affinity, LIMITS_SUB_FN_THERMAL,
- LIMITS_ALGO_MODE_ENABLE, 1);
+ LIMITS_ALGO_MODE_ENABLE, 1, 0, 0);
if (ret)
return ret;
/* Enable the LMH outer loop algorithm */
ret = limits_dcvs_write(hw->affinity, LIMITS_SUB_FN_CRNT,
- LIMITS_ALGO_MODE_ENABLE, 1);
+ LIMITS_ALGO_MODE_ENABLE, 1, 0, 0);
if (ret)
return ret;
/* Enable the Reliability algorithm */
ret = limits_dcvs_write(hw->affinity, LIMITS_SUB_FN_REL,
- LIMITS_ALGO_MODE_ENABLE, 1);
+ LIMITS_ALGO_MODE_ENABLE, 1, 0, 0);
if (ret)
return ret;
/* Enable the BCL algorithm */
ret = limits_dcvs_write(hw->affinity, LIMITS_SUB_FN_BCL,
- LIMITS_ALGO_MODE_ENABLE, 1);
+ LIMITS_ALGO_MODE_ENABLE, 1, 0, 0);
if (ret)
return ret;
ret = enable_lmh();
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 7e97a1c..15eaea5 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -193,18 +193,17 @@
},
/*
- * Common definitions for legacy IrDA ports, dependent on
- * regshift value.
+ * Common definitions for legacy IrDA ports.
*/
[SCIx_IRDA_REGTYPE] = {
[SCSMR] = { 0x00, 8 },
- [SCBRR] = { 0x01, 8 },
- [SCSCR] = { 0x02, 8 },
- [SCxTDR] = { 0x03, 8 },
- [SCxSR] = { 0x04, 8 },
- [SCxRDR] = { 0x05, 8 },
- [SCFCR] = { 0x06, 8 },
- [SCFDR] = { 0x07, 16 },
+ [SCBRR] = { 0x02, 8 },
+ [SCSCR] = { 0x04, 8 },
+ [SCxTDR] = { 0x06, 8 },
+ [SCxSR] = { 0x08, 16 },
+ [SCxRDR] = { 0x0a, 8 },
+ [SCFCR] = { 0x0c, 8 },
+ [SCFDR] = { 0x0e, 16 },
[SCTFDR] = sci_reg_invalid,
[SCRFDR] = sci_reg_invalid,
[SCSPTR] = sci_reg_invalid,
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 7b8ca7d..e4b39a7 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -2290,6 +2290,14 @@
return hcd->driver->get_core_id(hcd);
}
+int usb_hcd_stop_endpoint(struct usb_device *udev,
+ struct usb_host_endpoint *ep)
+{
+ struct usb_hcd *hcd = bus_to_hcd(udev->bus);
+
+ return hcd->driver->stop_endpoint(hcd, udev, ep);
+}
+
#ifdef CONFIG_PM
int hcd_bus_suspend(struct usb_device *rhdev, pm_message_t msg)
@@ -3093,6 +3101,7 @@
}
usb_put_invalidate_rhdev(hcd);
+ hcd->flags = 0;
}
EXPORT_SYMBOL_GPL(usb_remove_hcd);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 70c90e4..50a6f2f 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -4744,7 +4744,7 @@
static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
u16 portchange)
{
- int status = -ENODEV;
+ int ret, status = -ENODEV;
int i;
unsigned unit_load;
struct usb_device *hdev = hub->hdev;
@@ -4752,6 +4752,7 @@
struct usb_port *port_dev = hub->ports[port1 - 1];
struct usb_device *udev = port_dev->child;
static int unreliable_port = -1;
+ enum usb_device_speed dev_speed = USB_SPEED_UNKNOWN;
/* Disconnect any existing devices under this port */
if (udev) {
@@ -4806,6 +4807,7 @@
else
unit_load = 100;
+retry_enum:
status = 0;
for (i = 0; i < SET_CONFIG_TRIES; i++) {
@@ -4843,6 +4845,13 @@
if (status < 0)
goto loop;
+ dev_speed = udev->speed;
+ if (udev->speed > USB_SPEED_UNKNOWN &&
+ udev->speed <= USB_SPEED_HIGH && hcd->usb_phy
+ && hcd->usb_phy->disable_chirp)
+ hcd->usb_phy->disable_chirp(hcd->usb_phy,
+ false);
+
if (udev->quirks & USB_QUIRK_DELAY_INIT)
msleep(2000);
@@ -4945,6 +4954,19 @@
if (status != -ENOTCONN && status != -ENODEV)
dev_err(&port_dev->dev,
"unable to enumerate USB device\n");
+ if (!hub->hdev->parent && dev_speed == USB_SPEED_UNKNOWN
+ && hcd->usb_phy && hcd->usb_phy->disable_chirp) {
+ ret = hcd->usb_phy->disable_chirp(hcd->usb_phy, true);
+ if (!ret) {
+ dev_dbg(&port_dev->dev,
+ "chirp disabled re-try enum\n");
+ goto retry_enum;
+ } else {
+ /* bail out and re-enable chirping */
+ hcd->usb_phy->disable_chirp(hcd->usb_phy,
+ false);
+ }
+ }
}
done:
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index d745733..bb2a4fe 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -734,6 +734,12 @@
}
EXPORT_SYMBOL(usb_get_controller_id);
+int usb_stop_endpoint(struct usb_device *dev, struct usb_host_endpoint *ep)
+{
+ return usb_hcd_stop_endpoint(dev, ep);
+}
+EXPORT_SYMBOL(usb_stop_endpoint);
+
/*-------------------------------------------------------------------*/
/*
* __usb_get_extra_descriptor() finds a descriptor of specific type in the
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index f511055..68a40f9 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -480,6 +480,8 @@
#define DWC3_DEPCMD_SETTRANSFRESOURCE (0x02 << 0)
#define DWC3_DEPCMD_SETEPCONFIG (0x01 << 0)
+#define DWC3_DEPCMD_CMD(x) ((x) & 0xf)
+
/* The EP number goes 0..31 so ep0 is always out and ep1 is always in */
#define DWC3_DALEPENA_EP(n) (1 << n)
@@ -613,6 +615,7 @@
#define DWC3_EP_BUSY (1 << 4)
#define DWC3_EP_PENDING_REQUEST (1 << 5)
#define DWC3_EP_MISSED_ISOC (1 << 6)
+#define DWC3_EP_TRANSFER_STARTED (1 << 8)
/* This last one is specific to EP0 */
#define DWC3_EP0_DIR_IN (1 << 31)
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 0ee88b4..5571374 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -355,7 +355,7 @@
}
}
- if (cmd == DWC3_DEPCMD_STARTTRANSFER) {
+ if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) {
int needs_wakeup;
needs_wakeup = (dwc->link_state == DWC3_LINK_STATE_U1 ||
@@ -423,6 +423,20 @@
trace_dwc3_gadget_ep_cmd(dep, cmd, params, cmd_status);
+ if (ret == 0) {
+ switch (DWC3_DEPCMD_CMD(cmd)) {
+ case DWC3_DEPCMD_STARTTRANSFER:
+ dep->flags |= DWC3_EP_TRANSFER_STARTED;
+ break;
+ case DWC3_DEPCMD_ENDTRANSFER:
+ dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
+ break;
+ default:
+ /* nothing */
+ break;
+ }
+ }
+
if (unlikely(susphy)) {
reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
reg |= DWC3_GUSB2PHYCFG_SUSPHY;
@@ -1200,6 +1214,14 @@
return 0;
}
+static int __dwc3_gadget_get_frame(struct dwc3 *dwc)
+{
+ u32 reg;
+
+ reg = dwc3_readl(dwc->regs, DWC3_DSTS);
+ return DWC3_DSTS_SOFFN(reg);
+}
+
static void __dwc3_gadget_start_isoc(struct dwc3 *dwc,
struct dwc3_ep *dep, u32 cur_uf)
{
@@ -1214,8 +1236,11 @@
return;
}
- /* 4 micro frames in the future */
- uf = cur_uf + dep->interval * 4;
+ /*
+ * Schedule the first trb for one interval in the future or at
+ * least 4 microframes.
+ */
+ uf = cur_uf + max_t(u32, 4, dep->interval);
ret = __dwc3_gadget_kick_transfer(dep, uf);
if (ret < 0)
@@ -1285,12 +1310,28 @@
* errors which will force us issue EndTransfer command.
*/
if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
- if ((dep->flags & DWC3_EP_PENDING_REQUEST) &&
- list_empty(&dep->started_list)) {
- dwc3_stop_active_transfer(dwc, dep->number, true);
- dep->flags = DWC3_EP_ENABLED;
+ if ((dep->flags & DWC3_EP_PENDING_REQUEST)) {
+ if (dep->flags & DWC3_EP_TRANSFER_STARTED) {
+ dwc3_stop_active_transfer(dwc, dep->number, true);
+ dep->flags = DWC3_EP_ENABLED;
+ } else {
+ u32 cur_uf;
+
+ cur_uf = __dwc3_gadget_get_frame(dwc);
+ __dwc3_gadget_start_isoc(dwc, dep, cur_uf);
+ dep->flags &= ~DWC3_EP_PENDING_REQUEST;
+ }
+ return 0;
}
- return 0;
+
+ if ((dep->flags & DWC3_EP_BUSY) &&
+ !(dep->flags & DWC3_EP_MISSED_ISOC)) {
+ WARN_ON_ONCE(!dep->resource_index);
+ ret = __dwc3_gadget_kick_transfer(dep,
+ dep->resource_index);
+ }
+
+ goto out;
}
if (!dwc3_calc_trbs_left(dep))
@@ -1301,6 +1342,7 @@
dwc3_trace(trace_dwc3_gadget,
"%s: failed to kick transfers",
dep->name);
+out:
if (ret == -EBUSY)
ret = 0;
@@ -1635,10 +1677,8 @@
static int dwc3_gadget_get_frame(struct usb_gadget *g)
{
struct dwc3 *dwc = gadget_to_dwc(g);
- u32 reg;
- reg = dwc3_readl(dwc->regs, DWC3_DSTS);
- return DWC3_DSTS_SOFFN(reg);
+ return __dwc3_gadget_get_frame(dwc);
}
static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
@@ -2831,43 +2871,55 @@
static void dwc3_disconnect_gadget(struct dwc3 *dwc)
{
+ struct usb_gadget_driver *gadget_driver;
+
if (dwc->gadget_driver && dwc->gadget_driver->disconnect) {
+ gadget_driver = dwc->gadget_driver;
spin_unlock(&dwc->lock);
dbg_event(0xFF, "DISCONNECT", 0);
- dwc->gadget_driver->disconnect(&dwc->gadget);
+ gadget_driver->disconnect(&dwc->gadget);
spin_lock(&dwc->lock);
}
}
static void dwc3_suspend_gadget(struct dwc3 *dwc)
{
+ struct usb_gadget_driver *gadget_driver;
+
if (dwc->gadget_driver && dwc->gadget_driver->suspend) {
+ gadget_driver = dwc->gadget_driver;
spin_unlock(&dwc->lock);
dbg_event(0xFF, "SUSPEND", 0);
- dwc->gadget_driver->suspend(&dwc->gadget);
+ gadget_driver->suspend(&dwc->gadget);
spin_lock(&dwc->lock);
}
}
static void dwc3_resume_gadget(struct dwc3 *dwc)
{
+ struct usb_gadget_driver *gadget_driver;
+
if (dwc->gadget_driver && dwc->gadget_driver->resume) {
+ gadget_driver = dwc->gadget_driver;
spin_unlock(&dwc->lock);
dbg_event(0xFF, "RESUME", 0);
- dwc->gadget_driver->resume(&dwc->gadget);
+ gadget_driver->resume(&dwc->gadget);
spin_lock(&dwc->lock);
}
}
static void dwc3_reset_gadget(struct dwc3 *dwc)
{
+ struct usb_gadget_driver *gadget_driver;
+
if (!dwc->gadget_driver)
return;
if (dwc->gadget.speed != USB_SPEED_UNKNOWN) {
+ gadget_driver = dwc->gadget_driver;
spin_unlock(&dwc->lock);
dbg_event(0xFF, "UDC RESET", 0);
- usb_gadget_udc_reset(&dwc->gadget, dwc->gadget_driver);
+ usb_gadget_udc_reset(&dwc->gadget, gadget_driver);
spin_lock(&dwc->lock);
}
}
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 885ed26..f915e55 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -14,11 +14,16 @@
#include <linux/kdev_t.h>
#include <linux/usb/ch9.h>
+#ifdef CONFIG_USB_F_NCM
+#include <function/u_ncm.h>
+#endif
+
#ifdef CONFIG_USB_CONFIGFS_F_ACC
extern int acc_ctrlrequest(struct usb_composite_dev *cdev,
const struct usb_ctrlrequest *ctrl);
void acc_disconnect(void);
#endif
+
static struct class *android_class;
static struct device *android_device;
static int index;
@@ -84,6 +89,7 @@
struct usb_composite_driver composite;
struct usb_composite_dev cdev;
bool use_os_desc;
+ bool unbinding;
char b_vendor_code;
char qw_sign[OS_STRING_QW_SIGN_LEN];
#ifdef CONFIG_USB_CONFIGFS_UEVENT
@@ -281,9 +287,12 @@
if (!gi->composite.gadget_driver.udc_name)
return -ENODEV;
+ gi->unbinding = true;
ret = usb_gadget_unregister_driver(&gi->composite.gadget_driver);
if (ret)
return ret;
+
+ gi->unbinding = false;
kfree(gi->composite.gadget_driver.udc_name);
gi->composite.gadget_driver.udc_name = NULL;
return 0;
@@ -1504,6 +1513,18 @@
}
}
+#ifdef CONFIG_USB_F_NCM
+ if (value < 0)
+ value = ncm_ctrlrequest(cdev, c);
+
+ /*
+ * for mirror link command case, if it already been handled,
+ * do not pass to composite_setup
+ */
+ if (value == 0)
+ return value;
+#endif
+
#ifdef CONFIG_USB_CONFIGFS_F_ACC
if (value < 0)
value = acc_ctrlrequest(cdev, c);
@@ -1555,7 +1576,8 @@
acc_disconnect();
#endif
gi->connected = 0;
- schedule_work(&gi->work);
+ if (!gi->unbinding)
+ schedule_work(&gi->work);
composite_disconnect(gadget);
}
#endif
diff --git a/drivers/usb/gadget/function/f_gsi.h b/drivers/usb/gadget/function/f_gsi.h
index d6bf0f4..bdd0dfa 100644
--- a/drivers/usb/gadget/function/f_gsi.h
+++ b/drivers/usb/gadget/function/f_gsi.h
@@ -586,7 +586,7 @@
};
static struct usb_descriptor_header *gsi_eth_fs_function[] = {
- (struct usb_descriptor_header *) &gsi_eth_fs_function,
+ (struct usb_descriptor_header *) &rndis_gsi_iad_descriptor,
/* control interface matches ACM, not Ethernet */
(struct usb_descriptor_header *) &rndis_gsi_control_intf,
(struct usb_descriptor_header *) &rndis_gsi_header_desc,
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index d2fbed7..98e353d 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -1605,10 +1605,57 @@
.ct_owner = THIS_MODULE,
};
+#ifdef CONFIG_USB_CONFIGFS_UEVENT
+
+struct ncm_setup_desc {
+ struct work_struct work;
+ struct device *device;
+ uint8_t major; // Mirror Link major version
+ uint8_t minor; // Mirror Link minor version
+};
+
+static struct ncm_setup_desc *_ncm_setup_desc;
+
+#define MIRROR_LINK_STRING_LENGTH_MAX 32
+static void ncm_setup_work(struct work_struct *data)
+{
+ char mirror_link_string[MIRROR_LINK_STRING_LENGTH_MAX];
+ char *envp[2] = { mirror_link_string, NULL };
+
+ snprintf(mirror_link_string, MIRROR_LINK_STRING_LENGTH_MAX,
+ "MirrorLink=V%d.%d",
+ _ncm_setup_desc->major, _ncm_setup_desc->minor);
+ kobject_uevent_env(&_ncm_setup_desc->device->kobj, KOBJ_CHANGE, envp);
+}
+
+int ncm_ctrlrequest(struct usb_composite_dev *cdev,
+ const struct usb_ctrlrequest *ctrl)
+{
+ int value = -EOPNOTSUPP;
+
+ if (ctrl->bRequestType == 0x40 && ctrl->bRequest == 0xF0) {
+ _ncm_setup_desc->minor = (uint8_t)(ctrl->wValue >> 8);
+ _ncm_setup_desc->major = (uint8_t)(ctrl->wValue & 0xFF);
+ schedule_work(&_ncm_setup_desc->work);
+ value = 0;
+ }
+
+ return value;
+}
+#endif
+
static void ncm_free_inst(struct usb_function_instance *f)
{
struct f_ncm_opts *opts;
+#ifdef CONFIG_USB_CONFIGFS_UEVENT
+ /* release _ncm_setup_desc related resource */
+ device_destroy(_ncm_setup_desc->device->class,
+ _ncm_setup_desc->device->devt);
+ cancel_work(&_ncm_setup_desc->work);
+ kfree(_ncm_setup_desc);
+#endif
+
opts = container_of(f, struct f_ncm_opts, func_inst);
if (opts->bound)
gether_cleanup(netdev_priv(opts->net));
@@ -1627,6 +1674,14 @@
config_group_init_type_name(&opts->func_inst.group, "", &ncm_func_type);
+#ifdef CONFIG_USB_CONFIGFS_UEVENT
+ _ncm_setup_desc = kzalloc(sizeof(*_ncm_setup_desc), GFP_KERNEL);
+ if (!_ncm_setup_desc)
+ return ERR_PTR(-ENOMEM);
+ INIT_WORK(&_ncm_setup_desc->work, ncm_setup_work);
+ _ncm_setup_desc->device = create_function_device("f_ncm");
+#endif
+
return &opts->func_inst;
}
diff --git a/drivers/usb/gadget/function/u_ncm.h b/drivers/usb/gadget/function/u_ncm.h
index ce0f3a7..b4541e2 100644
--- a/drivers/usb/gadget/function/u_ncm.h
+++ b/drivers/usb/gadget/function/u_ncm.h
@@ -33,4 +33,8 @@
int refcnt;
};
+extern struct device *create_function_device(char *name);
+int ncm_ctrlrequest(struct usb_composite_dev *cdev,
+ const struct usb_ctrlrequest *ctrl);
+
#endif /* U_NCM_H */
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 1332057..ab3633c 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -5045,6 +5045,61 @@
return xhci->core_id;
}
+static int xhci_stop_endpoint(struct usb_hcd *hcd,
+ struct usb_device *udev, struct usb_host_endpoint *ep)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ unsigned int ep_index;
+ struct xhci_virt_device *virt_dev;
+ struct xhci_command *cmd;
+ unsigned long flags;
+ int ret = 0;
+
+ cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO);
+ if (!cmd)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&xhci->lock, flags);
+ virt_dev = xhci->devs[udev->slot_id];
+ if (!virt_dev) {
+ ret = -ENODEV;
+ goto err;
+ }
+
+ ep_index = xhci_get_endpoint_index(&ep->desc);
+ if (virt_dev->eps[ep_index].ring &&
+ virt_dev->eps[ep_index].ring->dequeue) {
+ ret = xhci_queue_stop_endpoint(xhci, cmd, udev->slot_id,
+ ep_index, 0);
+ if (ret)
+ goto err;
+
+ xhci_ring_cmd_db(xhci);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+
+ /* Wait for stop endpoint command to finish */
+ wait_for_completion(cmd->completion);
+
+ if (cmd->status == COMP_CMD_ABORT ||
+ cmd->status == COMP_CMD_STOP) {
+ xhci_warn(xhci,
+ "stop endpoint command timeout for ep%d%s\n",
+ usb_endpoint_num(&ep->desc),
+ usb_endpoint_dir_in(&ep->desc) ? "in" : "out");
+ ret = -ETIME;
+ }
+ goto free_cmd;
+ }
+
+err:
+ spin_unlock_irqrestore(&xhci->lock, flags);
+free_cmd:
+ xhci_free_command(xhci, cmd);
+ return ret;
+}
+
+
+
static const struct hc_driver xhci_hc_driver = {
.description = "xhci-hcd",
.product_desc = "xHCI Host Controller",
@@ -5109,6 +5164,7 @@
.get_sec_event_ring_phys_addr = xhci_get_sec_event_ring_phys_addr,
.get_xfer_ring_phys_addr = xhci_get_xfer_ring_phys_addr,
.get_core_id = xhci_get_core_id,
+ .stop_endpoint = xhci_stop_endpoint,
};
void xhci_init_driver(struct hc_driver *drv,
diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c
index 32d0e52..44ab6d6 100644
--- a/drivers/usb/pd/policy_engine.c
+++ b/drivers/usb/pd/policy_engine.c
@@ -368,6 +368,7 @@
enum usbpd_state current_state;
bool hard_reset_recvd;
+ ktime_t hard_reset_recvd_time;
struct list_head rx_q;
spinlock_t rx_lock;
struct rx_msg *rx_ext_msg;
@@ -614,6 +615,9 @@
int ret;
u16 hdr;
+ if (pd->hard_reset_recvd)
+ return -EBUSY;
+
hdr = PD_MSG_HDR(msg_type, pd->current_dr, pd->current_pr,
pd->tx_msgid, num_data, pd->spec_rev);
@@ -805,11 +809,13 @@
return;
}
- usbpd_dbg(&pd->dev, "hard reset received\n");
+ pd->hard_reset_recvd = true;
+ pd->hard_reset_recvd_time = ktime_get();
+
+ usbpd_err(&pd->dev, "hard reset received\n");
/* Force CC logic to source/sink to keep Rp/Rd unchanged */
set_power_role(pd, pd->current_pr);
- pd->hard_reset_recvd = true;
power_supply_set_property(pd->usb_psy,
POWER_SUPPLY_PROP_PD_IN_HARD_RESET, &val);
@@ -1074,6 +1080,9 @@
unsigned long flags;
int ret;
+ if (pd->hard_reset_recvd) /* let usbpd_sm handle it */
+ return;
+
usbpd_dbg(&pd->dev, "%s -> %s\n",
usbpd_state_strings[pd->current_state],
usbpd_state_strings[next_state]);
@@ -2044,8 +2053,13 @@
if (pd->current_pr == PR_SINK) {
usbpd_set_state(pd, PE_SNK_TRANSITION_TO_DEFAULT);
} else {
+ s64 delta = ktime_ms_delta(ktime_get(),
+ pd->hard_reset_recvd_time);
pd->current_state = PE_SRC_TRANSITION_TO_DEFAULT;
- kick_sm(pd, PS_HARD_RESET_TIME);
+ if (delta >= PS_HARD_RESET_TIME)
+ kick_sm(pd, 0);
+ else
+ kick_sm(pd, PS_HARD_RESET_TIME - (int)delta);
}
goto sm_done;
diff --git a/drivers/usb/pd/qpnp-pdphy.c b/drivers/usb/pd/qpnp-pdphy.c
index 6395ca2..8a4f3d4 100644
--- a/drivers/usb/pd/qpnp-pdphy.c
+++ b/drivers/usb/pd/qpnp-pdphy.c
@@ -582,6 +582,10 @@
{
struct usb_pdphy *pdphy = data;
+ /* TX already aborted by received signal */
+ if (pdphy->tx_status != -EINPROGRESS)
+ return IRQ_HANDLED;
+
if (irq == pdphy->msg_tx_irq) {
pdphy->msg_tx_cnt++;
pdphy->tx_status = 0;
@@ -635,6 +639,10 @@
if (pdphy->signal_cb)
pdphy->signal_cb(pdphy->usbpd, frame_type);
+ if (pdphy->tx_status == -EINPROGRESS) {
+ pdphy->tx_status = -EBUSY;
+ wake_up(&pdphy->tx_waitq);
+ }
done:
return IRQ_HANDLED;
}
diff --git a/drivers/usb/phy/phy-msm-qusb-v2.c b/drivers/usb/phy/phy-msm-qusb-v2.c
index bc27c31..81c39a3 100644
--- a/drivers/usb/phy/phy-msm-qusb-v2.c
+++ b/drivers/usb/phy/phy-msm-qusb-v2.c
@@ -26,6 +26,7 @@
#include <linux/regulator/machine.h>
#include <linux/usb/phy.h>
#include <linux/reset.h>
+#include <linux/debugfs.h>
/* QUSB2PHY_PWR_CTRL1 register related bits */
#define PWR_CTRL1_POWR_DOWN BIT(0)
@@ -65,13 +66,12 @@
#define BIAS_CTRL_2_OVERRIDE_VAL 0x28
+#define SQ_CTRL1_CHIRP_DISABLE 0x20
+#define SQ_CTRL2_CHIRP_DISABLE 0x80
+
/* PERIPH_SS_PHY_REFGEN_NORTH_BG_CTRL register bits */
#define BANDGAP_BYPASS BIT(0)
-unsigned int phy_tune1;
-module_param(phy_tune1, uint, 0644);
-MODULE_PARM_DESC(phy_tune1, "QUSB PHY v2 TUNE1");
-
enum qusb_phy_reg {
PORT_TUNE1,
PLL_COMMON_STATUS_ONE,
@@ -80,6 +80,8 @@
PLL_CORE_INPUT_OVERRIDE,
TEST1,
BIAS_CTRL_2,
+ SQ_CTRL1,
+ SQ_CTRL2,
USB2_PHY_REG_MAX,
};
@@ -120,6 +122,10 @@
struct regulator_desc dpdm_rdesc;
struct regulator_dev *dpdm_rdev;
+ u32 sq_ctrl1_default;
+ u32 sq_ctrl2_default;
+ bool chirp_disable;
+
/* emulation targets specific */
void __iomem *emu_phy_base;
bool emulation;
@@ -129,6 +135,10 @@
int phy_pll_reset_seq_len;
int *emu_dcm_reset_seq;
int emu_dcm_reset_seq_len;
+
+ /* override TUNEX registers value */
+ struct dentry *root;
+ u8 tune[5];
};
static void qusb_phy_enable_clocks(struct qusb_phy *qphy, bool on)
@@ -410,7 +420,7 @@
static int qusb_phy_init(struct usb_phy *phy)
{
struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy);
- int ret;
+ int ret, p_index;
u8 reg;
dev_dbg(phy->dev, "%s\n", __func__);
@@ -465,12 +475,12 @@
qphy->base + qphy->phy_reg[PORT_TUNE1]);
}
- /* If phy_tune1 modparam set, override tune1 value */
- if (phy_tune1) {
- pr_debug("%s(): (modparam) TUNE1 val:0x%02x\n",
- __func__, phy_tune1);
- writel_relaxed(phy_tune1,
- qphy->base + qphy->phy_reg[PORT_TUNE1]);
+ /* if debugfs based tunex params are set, use that value. */
+ for (p_index = 0; p_index < 5; p_index++) {
+ if (qphy->tune[p_index])
+ writel_relaxed(qphy->tune[p_index],
+ qphy->base + qphy->phy_reg[PORT_TUNE1] +
+ (4 * p_index));
}
if (qphy->refgen_north_bg_reg)
@@ -651,6 +661,52 @@
return 0;
}
+static int qusb_phy_disable_chirp(struct usb_phy *phy, bool disable)
+{
+ struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy);
+ int ret = 0;
+
+ dev_dbg(phy->dev, "%s qphy chirp disable %d disable %d\n", __func__,
+ qphy->chirp_disable, disable);
+
+ mutex_lock(&qphy->lock);
+
+ if (qphy->chirp_disable == disable) {
+ ret = -EALREADY;
+ goto done;
+ }
+
+ qphy->chirp_disable = disable;
+
+ if (disable) {
+ qphy->sq_ctrl1_default =
+ readl_relaxed(qphy->base + qphy->phy_reg[SQ_CTRL1]);
+ qphy->sq_ctrl2_default =
+ readl_relaxed(qphy->base + qphy->phy_reg[SQ_CTRL2]);
+
+ writel_relaxed(SQ_CTRL1_CHIRP_DISABLE,
+ qphy->base + qphy->phy_reg[SQ_CTRL1]);
+ readl_relaxed(qphy->base + qphy->phy_reg[SQ_CTRL1]);
+
+ writel_relaxed(SQ_CTRL1_CHIRP_DISABLE,
+ qphy->base + qphy->phy_reg[SQ_CTRL2]);
+ readl_relaxed(qphy->base + qphy->phy_reg[SQ_CTRL2]);
+
+ goto done;
+ }
+
+ writel_relaxed(qphy->sq_ctrl1_default,
+ qphy->base + qphy->phy_reg[SQ_CTRL1]);
+ readl_relaxed(qphy->base + qphy->phy_reg[SQ_CTRL1]);
+
+ writel_relaxed(qphy->sq_ctrl2_default,
+ qphy->base + qphy->phy_reg[SQ_CTRL2]);
+ readl_relaxed(qphy->base + qphy->phy_reg[SQ_CTRL2]);
+done:
+ mutex_unlock(&qphy->lock);
+ return ret;
+}
+
static int qusb_phy_dpdm_regulator_enable(struct regulator_dev *rdev)
{
int ret = 0;
@@ -736,6 +792,38 @@
return 0;
}
+static int qusb_phy_create_debugfs(struct qusb_phy *qphy)
+{
+ struct dentry *file;
+ int ret = 0, i;
+ char name[6];
+
+ qphy->root = debugfs_create_dir(dev_name(qphy->phy.dev), NULL);
+ if (IS_ERR_OR_NULL(qphy->root)) {
+ dev_err(qphy->phy.dev,
+ "can't create debugfs root for %s\n",
+ dev_name(qphy->phy.dev));
+ ret = -ENOMEM;
+ goto create_err;
+ }
+
+ for (i = 0; i < 5; i++) {
+ snprintf(name, sizeof(name), "tune%d", (i + 1));
+ file = debugfs_create_x8(name, 0644, qphy->root,
+ &qphy->tune[i]);
+ if (IS_ERR_OR_NULL(file)) {
+ dev_err(qphy->phy.dev,
+ "can't create debugfs entry for %s\n", name);
+ debugfs_remove_recursive(qphy->root);
+ ret = ENOMEM;
+ goto create_err;
+ }
+ }
+
+create_err:
+ return ret;
+}
+
static int qusb_phy_probe(struct platform_device *pdev)
{
struct qusb_phy *qphy;
@@ -1004,6 +1092,7 @@
qphy->phy.type = USB_PHY_TYPE_USB2;
qphy->phy.notify_connect = qusb_phy_notify_connect;
qphy->phy.notify_disconnect = qusb_phy_notify_disconnect;
+ qphy->phy.disable_chirp = qusb_phy_disable_chirp;
ret = usb_add_phy_dev(&qphy->phy);
if (ret)
@@ -1013,6 +1102,8 @@
if (ret)
usb_remove_phy(&qphy->phy);
+ qusb_phy_create_debugfs(qphy);
+
return ret;
}
@@ -1023,6 +1114,7 @@
usb_remove_phy(&qphy->phy);
qusb_phy_enable_clocks(qphy, false);
qusb_phy_enable_power(qphy, false);
+ debugfs_remove_recursive(qphy->root);
return 0;
}
diff --git a/drivers/video/fbdev/pmag-ba-fb.c b/drivers/video/fbdev/pmag-ba-fb.c
index 5872bc4..df02fb4 100644
--- a/drivers/video/fbdev/pmag-ba-fb.c
+++ b/drivers/video/fbdev/pmag-ba-fb.c
@@ -129,7 +129,7 @@
/*
* Turn the hardware cursor off.
*/
-static void __init pmagbafb_erase_cursor(struct fb_info *info)
+static void pmagbafb_erase_cursor(struct fb_info *info)
{
struct pmagbafb_par *par = info->par;
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index 26e5e85..9122ba2 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -277,8 +277,16 @@
err = xenbus_transaction_start(&xbt);
if (err)
return;
- if (!xenbus_scanf(xbt, "control", "sysrq", "%c", &sysrq_key)) {
- pr_err("Unable to read sysrq code in control/sysrq\n");
+ err = xenbus_scanf(xbt, "control", "sysrq", "%c", &sysrq_key);
+ if (err < 0) {
+ /*
+ * The Xenstore watch fires directly after registering it and
+ * after a suspend/resume cycle. So ENOENT is no error but
+ * might happen in those cases.
+ */
+ if (err != -ENOENT)
+ pr_err("Error %d reading sysrq code in control/sysrq\n",
+ err);
xenbus_transaction_end(xbt, 1);
return;
}
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index dd3e236..d9cbda2 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -193,7 +193,8 @@
struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
int i;
- if (unlikely(direntry->d_name.len >
+ if (unlikely(tcon->fsAttrInfo.MaxPathNameComponentLength &&
+ direntry->d_name.len >
le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength)))
return -ENAMETOOLONG;
@@ -509,7 +510,7 @@
rc = check_name(direntry, tcon);
if (rc)
- goto out_free_xid;
+ goto out;
server = tcon->ses->server;
diff --git a/fs/crypto/Makefile b/fs/crypto/Makefile
index f17684c..facf63c 100644
--- a/fs/crypto/Makefile
+++ b/fs/crypto/Makefile
@@ -1,3 +1,4 @@
obj-$(CONFIG_FS_ENCRYPTION) += fscrypto.o
+ccflags-y += -Ifs/ext4
fscrypto-y := crypto.o fname.o policy.o keyinfo.o
diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
index 61cfcce..5c24071 100644
--- a/fs/crypto/crypto.c
+++ b/fs/crypto/crypto.c
@@ -28,6 +28,7 @@
#include <linux/dcache.h>
#include <linux/namei.h>
#include <linux/fscrypto.h>
+#include "ext4_ice.h"
static unsigned int num_prealloc_crypto_pages = 32;
static unsigned int num_prealloc_crypto_ctxs = 128;
@@ -406,6 +407,9 @@
bio_for_each_segment_all(bv, bio, i) {
struct page *page = bv->bv_page;
+ if (ext4_is_ice_enabled())
+ SetPageUptodate(page);
+ else {
int ret = fscrypt_decrypt_page(page);
if (ret) {
@@ -414,6 +418,7 @@
} else {
SetPageUptodate(page);
}
+ }
unlock_page(page);
}
fscrypt_release_ctx(ctx);
diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c
index a755fa1..106e55c 100644
--- a/fs/crypto/keyinfo.c
+++ b/fs/crypto/keyinfo.c
@@ -11,6 +11,7 @@
#include <keys/user-type.h>
#include <linux/scatterlist.h>
#include <linux/fscrypto.h>
+#include "ext4_ice.h"
static void derive_crypt_complete(struct crypto_async_request *req, int rc)
{
@@ -135,13 +136,17 @@
}
static int determine_cipher_type(struct fscrypt_info *ci, struct inode *inode,
- const char **cipher_str_ret, int *keysize_ret)
+ const char **cipher_str_ret, int *keysize_ret, int *fname)
{
if (S_ISREG(inode->i_mode)) {
if (ci->ci_data_mode == FS_ENCRYPTION_MODE_AES_256_XTS) {
*cipher_str_ret = "xts(aes)";
*keysize_ret = FS_AES_256_XTS_KEY_SIZE;
return 0;
+ } else if (ci->ci_data_mode == FS_ENCRYPTION_MODE_PRIVATE) {
+ *cipher_str_ret = "bugon";
+ *keysize_ret = FS_AES_256_XTS_KEY_SIZE;
+ return 0;
}
pr_warn_once("fscrypto: unsupported contents encryption mode "
"%d for inode %lu\n",
@@ -153,6 +158,7 @@
if (ci->ci_filename_mode == FS_ENCRYPTION_MODE_AES_256_CTS) {
*cipher_str_ret = "cts(cbc(aes))";
*keysize_ret = FS_AES_256_CTS_KEY_SIZE;
+ *fname = 1;
return 0;
}
pr_warn_once("fscrypto: unsupported filenames encryption mode "
@@ -172,9 +178,26 @@
return;
crypto_free_skcipher(ci->ci_ctfm);
+ memzero_explicit(ci->ci_raw_key,
+ sizeof(ci->ci_raw_key));
kmem_cache_free(fscrypt_info_cachep, ci);
}
+static int fs_data_encryption_mode(void)
+{
+ return ext4_is_ice_enabled() ? FS_ENCRYPTION_MODE_PRIVATE :
+ FS_ENCRYPTION_MODE_AES_256_XTS;
+}
+
+int fs_using_hardware_encryption(struct inode *inode)
+{
+ struct fscrypt_info *ci = inode->i_crypt_info;
+
+ return S_ISREG(inode->i_mode) && ci &&
+ ci->ci_data_mode == FS_ENCRYPTION_MODE_PRIVATE;
+}
+EXPORT_SYMBOL(fs_using_hardware_encryption);
+
int fscrypt_get_encryption_info(struct inode *inode)
{
struct fscrypt_info *crypt_info;
@@ -182,8 +205,8 @@
struct crypto_skcipher *ctfm;
const char *cipher_str;
int keysize;
- u8 *raw_key = NULL;
int res;
+ int fname = 0;
if (inode->i_crypt_info)
return 0;
@@ -200,7 +223,7 @@
if (!fscrypt_dummy_context_enabled(inode))
return res;
ctx.format = FS_ENCRYPTION_CONTEXT_FORMAT_V1;
- ctx.contents_encryption_mode = FS_ENCRYPTION_MODE_AES_256_XTS;
+ ctx.contents_encryption_mode = fs_data_encryption_mode();
ctx.filenames_encryption_mode = FS_ENCRYPTION_MODE_AES_256_CTS;
ctx.flags = 0;
} else if (res != sizeof(ctx)) {
@@ -224,7 +247,8 @@
memcpy(crypt_info->ci_master_key, ctx.master_key_descriptor,
sizeof(crypt_info->ci_master_key));
- res = determine_cipher_type(crypt_info, inode, &cipher_str, &keysize);
+ res = determine_cipher_type(crypt_info, inode, &cipher_str, &keysize,
+ &fname);
if (res)
goto out;
@@ -233,24 +257,21 @@
* crypto API as part of key derivation.
*/
res = -ENOMEM;
- raw_key = kmalloc(FS_MAX_KEY_SIZE, GFP_NOFS);
- if (!raw_key)
- goto out;
if (fscrypt_dummy_context_enabled(inode)) {
- memset(raw_key, 0x42, FS_AES_256_XTS_KEY_SIZE);
+ memset(crypt_info->ci_raw_key, 0x42, FS_AES_256_XTS_KEY_SIZE);
goto got_key;
}
- res = validate_user_key(crypt_info, &ctx, raw_key,
+ res = validate_user_key(crypt_info, &ctx, crypt_info->ci_raw_key,
FS_KEY_DESC_PREFIX, FS_KEY_DESC_PREFIX_SIZE);
if (res && inode->i_sb->s_cop->key_prefix) {
u8 *prefix = NULL;
int prefix_size, res2;
prefix_size = inode->i_sb->s_cop->key_prefix(inode, &prefix);
- res2 = validate_user_key(crypt_info, &ctx, raw_key,
- prefix, prefix_size);
+ res2 = validate_user_key(crypt_info, &ctx,
+ crypt_info->ci_raw_key, prefix, prefix_size);
if (res2) {
if (res2 == -ENOKEY)
res = -ENOKEY;
@@ -260,28 +281,33 @@
goto out;
}
got_key:
- ctfm = crypto_alloc_skcipher(cipher_str, 0, 0);
- if (!ctfm || IS_ERR(ctfm)) {
- res = ctfm ? PTR_ERR(ctfm) : -ENOMEM;
- printk(KERN_DEBUG
- "%s: error %d (inode %u) allocating crypto tfm\n",
- __func__, res, (unsigned) inode->i_ino);
+ if (crypt_info->ci_data_mode != FS_ENCRYPTION_MODE_PRIVATE || fname) {
+ ctfm = crypto_alloc_skcipher(cipher_str, 0, 0);
+ if (!ctfm || IS_ERR(ctfm)) {
+ res = ctfm ? PTR_ERR(ctfm) : -ENOMEM;
+ pr_err("%s: error %d inode %u allocating crypto tfm\n",
+ __func__, res, (unsigned int) inode->i_ino);
+ goto out;
+ }
+ crypt_info->ci_ctfm = ctfm;
+ crypto_skcipher_clear_flags(ctfm, ~0);
+ crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_REQ_WEAK_KEY);
+ res = crypto_skcipher_setkey(ctfm, crypt_info->ci_raw_key,
+ keysize);
+ if (res)
+ goto out;
+ } else if (!ext4_is_ice_enabled()) {
+ pr_warn("%s: ICE support not available\n",
+ __func__);
+ res = -EINVAL;
goto out;
}
- crypt_info->ci_ctfm = ctfm;
- crypto_skcipher_clear_flags(ctfm, ~0);
- crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_REQ_WEAK_KEY);
- res = crypto_skcipher_setkey(ctfm, raw_key, keysize);
- if (res)
- goto out;
-
if (cmpxchg(&inode->i_crypt_info, NULL, crypt_info) == NULL)
crypt_info = NULL;
out:
if (res == -ENOKEY)
res = 0;
put_crypt_info(crypt_info);
- kzfree(raw_key);
return res;
}
EXPORT_SYMBOL(fscrypt_get_encryption_info);
diff --git a/fs/direct-io.c b/fs/direct-io.c
index c6220a2..bf03a92 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -411,6 +411,7 @@
if (dio->is_async && dio->op == REQ_OP_READ && dio->should_dirty)
bio_set_pages_dirty(bio);
+ bio->bi_dio_inode = dio->inode;
dio->bio_bdev = bio->bi_bdev;
if (sdio->submit_io) {
@@ -424,6 +425,18 @@
sdio->logical_offset_in_bio = 0;
}
+struct inode *dio_bio_get_inode(struct bio *bio)
+{
+ struct inode *inode = NULL;
+
+ if (bio == NULL)
+ return NULL;
+
+ inode = bio->bi_dio_inode;
+
+ return inode;
+}
+EXPORT_SYMBOL(dio_bio_get_inode);
/*
* Release any resources in case of a failure
*/
diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig
index e38039f..e9232a0 100644
--- a/fs/ext4/Kconfig
+++ b/fs/ext4/Kconfig
@@ -109,10 +109,16 @@
decrypted pages in the page cache.
config EXT4_FS_ENCRYPTION
- bool
- default y
+ bool "Ext4 FS Encryption"
+ default n
depends on EXT4_ENCRYPTION
+config EXT4_FS_ICE_ENCRYPTION
+ bool "Ext4 Encryption with ICE support"
+ default n
+ depends on EXT4_FS_ENCRYPTION
+ depends on PFK
+
config EXT4_DEBUG
bool "EXT4 debugging support"
depends on EXT4_FS
diff --git a/fs/ext4/Makefile b/fs/ext4/Makefile
index 354103f..d9e563a 100644
--- a/fs/ext4/Makefile
+++ b/fs/ext4/Makefile
@@ -1,6 +1,7 @@
#
# Makefile for the linux ext4-filesystem routines.
#
+ccflags-y += -Ifs/crypto
obj-$(CONFIG_EXT4_FS) += ext4.o
@@ -12,3 +13,4 @@
ext4-$(CONFIG_EXT4_FS_POSIX_ACL) += acl.o
ext4-$(CONFIG_EXT4_FS_SECURITY) += xattr_security.o
+ext4-$(CONFIG_EXT4_FS_ICE_ENCRYPTION) += ext4_ice.o
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 20ee0e4..9b67de7 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -2352,6 +2352,7 @@
#define fscrypt_fname_free_buffer fscrypt_notsupp_fname_free_buffer
#define fscrypt_fname_disk_to_usr fscrypt_notsupp_fname_disk_to_usr
#define fscrypt_fname_usr_to_disk fscrypt_notsupp_fname_usr_to_disk
+#define fs_using_hardware_encryption fs_notsupp_using_hardware_encryption
#endif
/* dir.c */
diff --git a/fs/ext4/ext4_ice.c b/fs/ext4/ext4_ice.c
new file mode 100644
index 0000000..25f79ae
--- /dev/null
+++ b/fs/ext4/ext4_ice.c
@@ -0,0 +1,107 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "ext4_ice.h"
+
+/*
+ * Retrieves encryption key from the inode
+ */
+char *ext4_get_ice_encryption_key(const struct inode *inode)
+{
+ struct fscrypt_info *ci = NULL;
+
+ if (!inode)
+ return NULL;
+
+ ci = inode->i_crypt_info;
+ if (!ci)
+ return NULL;
+
+ return &(ci->ci_raw_key[0]);
+}
+
+/*
+ * Retrieves encryption salt from the inode
+ */
+char *ext4_get_ice_encryption_salt(const struct inode *inode)
+{
+ struct fscrypt_info *ci = NULL;
+
+ if (!inode)
+ return NULL;
+
+ ci = inode->i_crypt_info;
+ if (!ci)
+ return NULL;
+
+ return &(ci->ci_raw_key[ext4_get_ice_encryption_key_size(inode)]);
+}
+
+/*
+ * returns true if the cipher mode in inode is AES XTS
+ */
+int ext4_is_aes_xts_cipher(const struct inode *inode)
+{
+ struct fscrypt_info *ci = NULL;
+
+ ci = inode->i_crypt_info;
+ if (!ci)
+ return 0;
+
+ return (ci->ci_data_mode == FS_ENCRYPTION_MODE_PRIVATE);
+}
+
+/*
+ * returns true if encryption info in both inodes is equal
+ */
+int ext4_is_ice_encryption_info_equal(const struct inode *inode1,
+ const struct inode *inode2)
+{
+ char *key1 = NULL;
+ char *key2 = NULL;
+ char *salt1 = NULL;
+ char *salt2 = NULL;
+
+ if (!inode1 || !inode2)
+ return 0;
+
+ if (inode1 == inode2)
+ return 1;
+
+ /* both do not belong to ice, so we don't care, they are equal for us */
+ if (!ext4_should_be_processed_by_ice(inode1) &&
+ !ext4_should_be_processed_by_ice(inode2))
+ return 1;
+
+ /* one belongs to ice, the other does not -> not equal */
+ if (ext4_should_be_processed_by_ice(inode1) ^
+ ext4_should_be_processed_by_ice(inode2))
+ return 0;
+
+ key1 = ext4_get_ice_encryption_key(inode1);
+ key2 = ext4_get_ice_encryption_key(inode2);
+ salt1 = ext4_get_ice_encryption_salt(inode1);
+ salt2 = ext4_get_ice_encryption_salt(inode2);
+
+ /* key and salt should not be null by this point */
+ if (!key1 || !key2 || !salt1 || !salt2 ||
+ (ext4_get_ice_encryption_key_size(inode1) !=
+ ext4_get_ice_encryption_key_size(inode2)) ||
+ (ext4_get_ice_encryption_salt_size(inode1) !=
+ ext4_get_ice_encryption_salt_size(inode2)))
+ return 0;
+
+ return ((memcmp(key1, key2,
+ ext4_get_ice_encryption_key_size(inode1)) == 0) &&
+ (memcmp(salt1, salt2,
+ ext4_get_ice_encryption_salt_size(inode1)) == 0));
+}
diff --git a/fs/ext4/ext4_ice.h b/fs/ext4/ext4_ice.h
new file mode 100644
index 0000000..04e09bf
--- /dev/null
+++ b/fs/ext4/ext4_ice.h
@@ -0,0 +1,104 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _EXT4_ICE_H
+#define _EXT4_ICE_H
+
+#include "ext4.h"
+#include <linux/fscrypto.h>
+
+#ifdef CONFIG_EXT4_FS_ICE_ENCRYPTION
+static inline int ext4_should_be_processed_by_ice(const struct inode *inode)
+{
+ if (!ext4_encrypted_inode((struct inode *)inode))
+ return 0;
+
+ return fs_using_hardware_encryption((struct inode *)inode);
+}
+
+static inline int ext4_is_ice_enabled(void)
+{
+ return 1;
+}
+
+int ext4_is_aes_xts_cipher(const struct inode *inode);
+
+char *ext4_get_ice_encryption_key(const struct inode *inode);
+char *ext4_get_ice_encryption_salt(const struct inode *inode);
+
+int ext4_is_ice_encryption_info_equal(const struct inode *inode1,
+ const struct inode *inode2);
+
+static inline size_t ext4_get_ice_encryption_key_size(
+ const struct inode *inode)
+{
+ return FS_AES_256_XTS_KEY_SIZE / 2;
+}
+
+static inline size_t ext4_get_ice_encryption_salt_size(
+ const struct inode *inode)
+{
+ return FS_AES_256_XTS_KEY_SIZE / 2;
+}
+
+#else
+static inline int ext4_should_be_processed_by_ice(const struct inode *inode)
+{
+ return 0;
+}
+static inline int ext4_is_ice_enabled(void)
+{
+ return 0;
+}
+
+static inline char *ext4_get_ice_encryption_key(const struct inode *inode)
+{
+ return NULL;
+}
+
+static inline char *ext4_get_ice_encryption_salt(const struct inode *inode)
+{
+ return NULL;
+}
+
+static inline size_t ext4_get_ice_encryption_key_size(
+ const struct inode *inode)
+{
+ return 0;
+}
+
+static inline size_t ext4_get_ice_encryption_salt_size(
+ const struct inode *inode)
+{
+ return 0;
+}
+
+static inline int ext4_is_xts_cipher(const struct inode *inode)
+{
+ return 0;
+}
+
+static inline int ext4_is_ice_encryption_info_equal(
+ const struct inode *inode1,
+ const struct inode *inode2)
+{
+ return 0;
+}
+
+static inline int ext4_is_aes_xts_cipher(const struct inode *inode)
+{
+ return 0;
+}
+
+#endif
+
+#endif /* _EXT4_ICE_H */
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 496c9b5..dcb9669 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -42,6 +42,7 @@
#include "xattr.h"
#include "acl.h"
#include "truncate.h"
+#include "ext4_ice.h"
#include <trace/events/ext4.h>
#include <trace/events/android_fs.h>
@@ -1152,7 +1153,8 @@
ll_rw_block(REQ_OP_READ, 0, 1, &bh);
*wait_bh++ = bh;
decrypt = ext4_encrypted_inode(inode) &&
- S_ISREG(inode->i_mode);
+ S_ISREG(inode->i_mode) &&
+ !ext4_is_ice_enabled();
}
}
/*
@@ -3509,7 +3511,8 @@
get_block_func = ext4_dio_get_block_unwritten_async;
dio_flags = DIO_LOCKING;
}
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#if defined(CONFIG_EXT4_FS_ENCRYPTION) && \
+!defined(CONFIG_EXT4_FS_ICE_ENCRYPTION)
BUG_ON(ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode));
#endif
if (IS_DAX(inode)) {
@@ -3623,7 +3626,8 @@
ssize_t ret;
int rw = iov_iter_rw(iter);
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#if defined(CONFIG_EXT4_FS_ENCRYPTION) && \
+!defined(CONFIG_EXT4_FS_ICE_ENCRYPTION)
if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode))
return 0;
#endif
@@ -3820,7 +3824,8 @@
if (!buffer_uptodate(bh))
goto unlock;
if (S_ISREG(inode->i_mode) &&
- ext4_encrypted_inode(inode)) {
+ ext4_encrypted_inode(inode) &&
+ !fs_using_hardware_encryption(inode)) {
/* We expect the key to be set. */
BUG_ON(!fscrypt_has_encryption_key(inode));
BUG_ON(blocksize != PAGE_SIZE);
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index cec9280..1ddceb6 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -773,10 +773,6 @@
case EXT4_IOC_SET_ENCRYPTION_POLICY: {
#ifdef CONFIG_EXT4_FS_ENCRYPTION
struct fscrypt_policy policy;
-
- if (!ext4_has_feature_encrypt(sb))
- return -EOPNOTSUPP;
-
if (copy_from_user(&policy,
(struct fscrypt_policy __user *)arg,
sizeof(policy)))
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index df8168f..e5e99a7 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -2136,8 +2136,10 @@
* We search using buddy data only if the order of the request
* is greater than equal to the sbi_s_mb_order2_reqs
* You can tune it via /sys/fs/ext4/<partition>/mb_order2_req
+ * We also support searching for power-of-two requests only for
+ * requests upto maximum buddy size we have constructed.
*/
- if (i >= sbi->s_mb_order2_reqs) {
+ if (i >= sbi->s_mb_order2_reqs && i <= sb->s_blocksize_bits + 2) {
/*
* This should tell if fe_len is exactly power of 2
*/
@@ -2207,7 +2209,7 @@
}
ac->ac_groups_scanned++;
- if (cr == 0 && ac->ac_2order < sb->s_blocksize_bits+2)
+ if (cr == 0)
ext4_mb_simple_scan_group(ac, &e4b);
else if (cr == 1 && sbi->s_stripe &&
!(ac->ac_g_ex.fe_len % sbi->s_stripe))
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 0094923..d8a0770 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -29,6 +29,7 @@
#include "ext4_jbd2.h"
#include "xattr.h"
#include "acl.h"
+#include "ext4_ice.h"
static struct kmem_cache *io_end_cachep;
@@ -470,6 +471,7 @@
gfp_t gfp_flags = GFP_NOFS;
retry_encrypt:
+ if (!fs_using_hardware_encryption(inode))
data_page = fscrypt_encrypt_page(inode, page, gfp_flags);
if (IS_ERR(data_page)) {
ret = PTR_ERR(data_page);
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index f72535e..1f58179 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -2628,9 +2628,9 @@
if (sbi->s_stripe && sbi->s_stripe <= sbi->s_blocks_per_group)
ret = sbi->s_stripe;
- else if (stripe_width <= sbi->s_blocks_per_group)
+ else if (stripe_width && stripe_width <= sbi->s_blocks_per_group)
ret = stripe_width;
- else if (stride <= sbi->s_blocks_per_group)
+ else if (stride && stride <= sbi->s_blocks_per_group)
ret = stride;
else
ret = 0;
diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c
index 1d9a8c4..57b0902 100644
--- a/fs/fat/fatent.c
+++ b/fs/fat/fatent.c
@@ -92,7 +92,8 @@
err_brelse:
brelse(bhs[0]);
err:
- fat_msg(sb, KERN_ERR, "FAT read failed (blocknr %llu)", (llu)blocknr);
+ fat_msg_ratelimit(sb, KERN_ERR,
+ "FAT read failed (blocknr %llu)", (llu)blocknr);
return -EIO;
}
@@ -105,8 +106,8 @@
fatent->fat_inode = MSDOS_SB(sb)->fat_inode;
fatent->bhs[0] = sb_bread(sb, blocknr);
if (!fatent->bhs[0]) {
- fat_msg(sb, KERN_ERR, "FAT read failed (blocknr %llu)",
- (llu)blocknr);
+ fat_msg_ratelimit(sb, KERN_ERR,
+ "FAT read failed (blocknr %llu)", (llu)blocknr);
return -EIO;
}
fatent->nr_bhs = 1;
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index a2c05f2..0b6ba8c 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -843,8 +843,9 @@
fat_get_blknr_offset(sbi, i_pos, &blocknr, &offset);
bh = sb_bread(sb, blocknr);
if (!bh) {
- fat_msg(sb, KERN_ERR, "unable to read inode block "
- "for updating (i_pos %lld)", i_pos);
+ fat_msg_ratelimit(sb, KERN_ERR,
+ "unable to read inode block for updating (i_pos %lld)",
+ i_pos);
return -EIO;
}
spin_lock(&sbi->inode_hash_lock);
diff --git a/fs/namei.c b/fs/namei.c
index e10895c..2af3818 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -2903,6 +2903,11 @@
if (error)
return error;
error = dir->i_op->create(dir, dentry, mode, want_excl);
+ if (error)
+ return error;
+ error = security_inode_post_create(dir, dentry, mode);
+ if (error)
+ return error;
if (!error)
fsnotify_create(dir, dentry);
return error;
@@ -3002,10 +3007,16 @@
static int may_o_create(const struct path *dir, struct dentry *dentry, umode_t mode)
{
+ struct user_namespace *s_user_ns;
int error = security_path_mknod(dir, dentry, mode, 0);
if (error)
return error;
+ s_user_ns = dir->dentry->d_sb->s_user_ns;
+ if (!kuid_has_mapping(s_user_ns, current_fsuid()) ||
+ !kgid_has_mapping(s_user_ns, current_fsgid()))
+ return -EOVERFLOW;
+
error = inode_permission2(dir->mnt, dir->dentry->d_inode, MAY_WRITE | MAY_EXEC);
if (error)
return error;
@@ -3712,6 +3723,13 @@
return error;
error = dir->i_op->mknod(dir, dentry, mode, dev);
+ if (error)
+ return error;
+
+ error = security_inode_post_create(dir, dentry, mode);
+ if (error)
+ return error;
+
if (!error)
fsnotify_create(dir, dentry);
return error;
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index f72712f..06089be 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -7310,13 +7310,24 @@
static int ocfs2_trim_extent(struct super_block *sb,
struct ocfs2_group_desc *gd,
- u32 start, u32 count)
+ u64 group, u32 start, u32 count)
{
u64 discard, bcount;
+ struct ocfs2_super *osb = OCFS2_SB(sb);
bcount = ocfs2_clusters_to_blocks(sb, count);
- discard = le64_to_cpu(gd->bg_blkno) +
- ocfs2_clusters_to_blocks(sb, start);
+ discard = ocfs2_clusters_to_blocks(sb, start);
+
+ /*
+ * For the first cluster group, the gd->bg_blkno is not at the start
+ * of the group, but at an offset from the start. If we add it while
+ * calculating discard for first group, we will wrongly start fstrim a
+ * few blocks after the desried start block and the range can cross
+ * over into the next cluster group. So, add it only if this is not
+ * the first cluster group.
+ */
+ if (group != osb->first_cluster_group_blkno)
+ discard += le64_to_cpu(gd->bg_blkno);
trace_ocfs2_trim_extent(sb, (unsigned long long)discard, bcount);
@@ -7324,7 +7335,7 @@
}
static int ocfs2_trim_group(struct super_block *sb,
- struct ocfs2_group_desc *gd,
+ struct ocfs2_group_desc *gd, u64 group,
u32 start, u32 max, u32 minbits)
{
int ret = 0, count = 0, next;
@@ -7343,7 +7354,7 @@
next = ocfs2_find_next_bit(bitmap, max, start);
if ((next - start) >= minbits) {
- ret = ocfs2_trim_extent(sb, gd,
+ ret = ocfs2_trim_extent(sb, gd, group,
start, next - start);
if (ret < 0) {
mlog_errno(ret);
@@ -7441,7 +7452,8 @@
}
gd = (struct ocfs2_group_desc *)gd_bh->b_data;
- cnt = ocfs2_trim_group(sb, gd, first_bit, last_bit, minlen);
+ cnt = ocfs2_trim_group(sb, gd, group,
+ first_bit, last_bit, minlen);
brelse(gd_bh);
gd_bh = NULL;
if (cnt < 0) {
diff --git a/include/dt-bindings/clock/exynos5433.h b/include/dt-bindings/clock/exynos5433.h
index 4fa6bb2..be39d23 100644
--- a/include/dt-bindings/clock/exynos5433.h
+++ b/include/dt-bindings/clock/exynos5433.h
@@ -771,7 +771,10 @@
#define CLK_PCLK_DECON 113
-#define DISP_NR_CLK 114
+#define CLK_PHYCLK_MIPIDPHY0_BITCLKDIV8_PHY 114
+#define CLK_PHYCLK_MIPIDPHY0_RXCLKESC0_PHY 115
+
+#define DISP_NR_CLK 116
/* CMU_AUD */
#define CLK_MOUT_AUD_PLL_USER 1
diff --git a/include/dt-bindings/clock/qcom,cpu-a7.h b/include/dt-bindings/clock/qcom,cpu-a7.h
new file mode 100644
index 0000000..9b89030
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,cpu-a7.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_CLK_MSM_CPU_A7_H
+#define _DT_BINDINGS_CLK_MSM_CPU_A7_H
+
+#define SYS_APC0_AUX_CLK 0
+#define APCS_CPU_PLL 1
+#define APCS_CLK 2
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-sdxpoorwills.h b/include/dt-bindings/clock/qcom,gcc-sdxpoorwills.h
index e773848..950811f 100644
--- a/include/dt-bindings/clock/qcom,gcc-sdxpoorwills.h
+++ b/include/dt-bindings/clock/qcom,gcc-sdxpoorwills.h
@@ -48,59 +48,60 @@
#define GCC_CPUSS_AHB_CLK 30
#define GCC_CPUSS_AHB_CLK_SRC 31
#define GCC_CPUSS_GNOC_CLK 32
-#define GCC_CPUSS_GPLL0_CLK_SRC 33
-#define GCC_CPUSS_RBCPR_CLK 34
-#define GCC_CPUSS_RBCPR_CLK_SRC 35
-#define GCC_GP1_CLK 36
-#define GCC_GP1_CLK_SRC 37
-#define GCC_GP2_CLK 38
-#define GCC_GP2_CLK_SRC 39
-#define GCC_GP3_CLK 40
-#define GCC_GP3_CLK_SRC 41
-#define GCC_MSS_CFG_AHB_CLK 42
-#define GCC_MSS_GPLL0_DIV_CLK_SRC 43
-#define GCC_MSS_SNOC_AXI_CLK 44
-#define GCC_PCIE_AUX_CLK 45
-#define GCC_PCIE_AUX_PHY_CLK_SRC 46
-#define GCC_PCIE_CFG_AHB_CLK 47
-#define GCC_PCIE_0_CLKREF_EN 48
-#define GCC_PCIE_MSTR_AXI_CLK 49
-#define GCC_PCIE_PHY_REFGEN_CLK 50
-#define GCC_PCIE_PHY_REFGEN_CLK_SRC 51
-#define GCC_PCIE_PIPE_CLK 52
-#define GCC_PCIE_SLEEP_CLK 53
-#define GCC_PCIE_SLV_AXI_CLK 54
-#define GCC_PCIE_SLV_Q2A_AXI_CLK 55
-#define GCC_PDM2_CLK 56
-#define GCC_PDM2_CLK_SRC 57
-#define GCC_PDM_AHB_CLK 58
-#define GCC_PDM_XO4_CLK 59
-#define GCC_PRNG_AHB_CLK 60
-#define GCC_SDCC1_AHB_CLK 61
-#define GCC_SDCC1_APPS_CLK 62
-#define GCC_SDCC1_APPS_CLK_SRC 63
-#define GCC_SPMI_FETCHER_AHB_CLK 64
-#define GCC_SPMI_FETCHER_CLK 65
-#define GCC_SPMI_FETCHER_CLK_SRC 66
-#define GCC_SYS_NOC_CPUSS_AHB_CLK 67
-#define GCC_SYS_NOC_USB3_CLK 68
-#define GCC_USB30_MASTER_CLK 69
-#define GCC_USB30_MASTER_CLK_SRC 70
-#define GCC_USB30_MOCK_UTMI_CLK 71
-#define GCC_USB30_MOCK_UTMI_CLK_SRC 72
-#define GCC_USB30_SLEEP_CLK 73
-#define GCC_USB3_PRIM_CLKREF_CLK 74
-#define GCC_USB3_PHY_AUX_CLK 75
-#define GCC_USB3_PHY_AUX_CLK_SRC 76
-#define GCC_USB3_PHY_PIPE_CLK 77
-#define GCC_USB_PHY_CFG_AHB2PHY_CLK 78
-#define GCC_XO_DIV4_CLK 79
-#define GPLL0 80
-#define GPLL0_OUT_EVEN 81
-
-/* GDSCs */
-#define PCIE_GDSC 0
-#define USB30_GDSC 1
+#define GCC_CPUSS_RBCPR_CLK 33
+#define GCC_CPUSS_RBCPR_CLK_SRC 34
+#define GCC_EMAC_CLK_SRC 35
+#define GCC_EMAC_PTP_CLK_SRC 36
+#define GCC_ETH_AXI_CLK 37
+#define GCC_ETH_PTP_CLK 38
+#define GCC_ETH_RGMII_CLK 39
+#define GCC_ETH_SLAVE_AHB_CLK 40
+#define GCC_GP1_CLK 41
+#define GCC_GP1_CLK_SRC 42
+#define GCC_GP2_CLK 43
+#define GCC_GP2_CLK_SRC 44
+#define GCC_GP3_CLK 45
+#define GCC_GP3_CLK_SRC 46
+#define GCC_MSS_CFG_AHB_CLK 47
+#define GCC_MSS_GPLL0_DIV_CLK_SRC 48
+#define GCC_MSS_SNOC_AXI_CLK 49
+#define GCC_PCIE_AUX_CLK 50
+#define GCC_PCIE_AUX_PHY_CLK_SRC 51
+#define GCC_PCIE_CFG_AHB_CLK 52
+#define GCC_PCIE_MSTR_AXI_CLK 53
+#define GCC_PCIE_PHY_REFGEN_CLK 54
+#define GCC_PCIE_PHY_REFGEN_CLK_SRC 55
+#define GCC_PCIE_PIPE_CLK 56
+#define GCC_PCIE_SLEEP_CLK 57
+#define GCC_PCIE_SLV_AXI_CLK 58
+#define GCC_PCIE_SLV_Q2A_AXI_CLK 59
+#define GCC_PDM2_CLK 60
+#define GCC_PDM2_CLK_SRC 61
+#define GCC_PDM_AHB_CLK 62
+#define GCC_PDM_XO4_CLK 63
+#define GCC_PRNG_AHB_CLK 64
+#define GCC_SDCC1_AHB_CLK 65
+#define GCC_SDCC1_APPS_CLK 66
+#define GCC_SDCC1_APPS_CLK_SRC 67
+#define GCC_SPMI_FETCHER_AHB_CLK 68
+#define GCC_SPMI_FETCHER_CLK 69
+#define GCC_SPMI_FETCHER_CLK_SRC 70
+#define GCC_SYS_NOC_CPUSS_AHB_CLK 71
+#define GCC_SYS_NOC_USB3_CLK 72
+#define GCC_USB30_MASTER_CLK 73
+#define GCC_USB30_MASTER_CLK_SRC 74
+#define GCC_USB30_MOCK_UTMI_CLK 75
+#define GCC_USB30_MOCK_UTMI_CLK_SRC 76
+#define GCC_USB30_SLEEP_CLK 77
+#define GCC_USB3_PHY_AUX_CLK 78
+#define GCC_USB3_PHY_AUX_CLK_SRC 79
+#define GCC_USB3_PHY_PIPE_CLK 80
+#define GCC_USB_PHY_CFG_AHB2PHY_CLK 81
+#define GPLL0 82
+#define GPLL0_OUT_EVEN 83
+#define GPLL4 84
+#define GPLL4_OUT_EVEN 85
+#define GCC_USB3_PRIM_CLKREF_CLK 86
/* CPU clocks */
#define CLOCK_A7SS 0
@@ -125,5 +126,6 @@
#define GCC_USB3PHY_PHY_BCR 16
#define GCC_QUSB2PHY_BCR 17
#define GCC_USB_PHY_CFG_AHB2PHY_BCR 18
+#define GCC_EMAC_BCR 19
#endif
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 2b8b6e0..8a7a15c 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -81,6 +81,12 @@
struct bio_set *bi_pool;
/*
+ * When using dircet-io (O_DIRECT), we can't get the inode from a bio
+ * by walking bio->bi_io_vec->bv_page->mapping->host
+ * since the page is anon.
+ */
+ struct inode *bi_dio_inode;
+ /*
* We can inline a number of vecs at the end of the bio, to avoid
* double allocations for a small number of bio_vecs. This member
* MUST obviously be kept at the very end of the bio.
diff --git a/include/linux/elf.h b/include/linux/elf.h
index 20fa8d8..611e3ae 100644
--- a/include/linux/elf.h
+++ b/include/linux/elf.h
@@ -42,6 +42,39 @@
#endif
+/* Generic helpers for ELF use */
+/* Return first section header */
+static inline struct elf_shdr *elf_sheader(struct elfhdr *hdr)
+{
+ return (struct elf_shdr *)((size_t)hdr + (size_t)hdr->e_shoff);
+}
+
+/* Return idx section header */
+static inline struct elf_shdr *elf_section(struct elfhdr *hdr, int idx)
+{
+ return &elf_sheader(hdr)[idx];
+}
+
+/* Return first program header */
+static inline struct elf_phdr *elf_pheader(struct elfhdr *hdr)
+{
+ return (struct elf_phdr *)((size_t)hdr + (size_t)hdr->e_phoff);
+}
+
+/* Return idx program header */
+static inline struct elf_phdr *elf_program(struct elfhdr *hdr, int idx)
+{
+ return &elf_pheader(hdr)[idx];
+}
+
+/* Retunr section's string table header */
+static inline char *elf_str_table(struct elfhdr *hdr)
+{
+ if (hdr->e_shstrndx == SHN_UNDEF)
+ return NULL;
+ return (char *)hdr + elf_section(hdr, hdr->e_shstrndx)->sh_offset;
+}
+
/* Optional callbacks to write extra ELF notes. */
struct file;
struct coredump_params;
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 18bd249..4f6ec47 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2925,6 +2925,8 @@
wake_up_bit(&inode->i_state, __I_DIO_WAKEUP);
}
+struct inode *dio_bio_get_inode(struct bio *bio);
+
extern void inode_set_flags(struct inode *inode, unsigned int flags,
unsigned int mask);
diff --git a/include/linux/fscrypto.h b/include/linux/fscrypto.h
index f6dfc29..9b57c19 100644
--- a/include/linux/fscrypto.h
+++ b/include/linux/fscrypto.h
@@ -34,6 +34,7 @@
#define FS_ENCRYPTION_MODE_AES_256_GCM 2
#define FS_ENCRYPTION_MODE_AES_256_CBC 3
#define FS_ENCRYPTION_MODE_AES_256_CTS 4
+#define FS_ENCRYPTION_MODE_PRIVATE 127
/**
* Encryption context for inode
@@ -80,6 +81,7 @@
u8 ci_flags;
struct crypto_skcipher *ci_ctfm;
u8 ci_master_key[FS_KEY_DESCRIPTOR_SIZE];
+ u8 ci_raw_key[FS_MAX_KEY_SIZE];
};
#define FS_CTX_REQUIRES_FREE_ENCRYPT_FL 0x00000001
@@ -176,7 +178,8 @@
static inline bool fscrypt_valid_contents_enc_mode(u32 mode)
{
- return (mode == FS_ENCRYPTION_MODE_AES_256_XTS);
+ return (mode == FS_ENCRYPTION_MODE_AES_256_XTS ||
+ mode == FS_ENCRYPTION_MODE_PRIVATE);
}
static inline bool fscrypt_valid_filenames_enc_mode(u32 mode)
@@ -257,6 +260,7 @@
/* keyinfo.c */
extern int fscrypt_get_encryption_info(struct inode *);
extern void fscrypt_put_encryption_info(struct inode *, struct fscrypt_info *);
+extern int fs_using_hardware_encryption(struct inode *inode);
/* fname.c */
extern int fscrypt_setup_filename(struct inode *, const struct qstr *,
@@ -354,6 +358,11 @@
return;
}
+static inline int fs_notsupp_using_hardware_encryption(struct inode *inode)
+{
+ return -EOPNOTSUPP;
+}
+
/* fname.c */
static inline int fscrypt_notsupp_setup_filename(struct inode *dir,
const struct qstr *iname,
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 46cd745..16ef407 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -189,7 +189,7 @@
#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE)
/* Room for N __GFP_FOO bits */
-#define __GFP_BITS_SHIFT 26
+#define __GFP_BITS_SHIFT 27
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
/*
diff --git a/include/linux/ipa.h b/include/linux/ipa.h
index dd6849d..405aed5 100644
--- a/include/linux/ipa.h
+++ b/include/linux/ipa.h
@@ -1175,6 +1175,28 @@
u64 size;
};
+/**
+ * struct ipa_smmu_in_params - information provided from client
+ * @ipa_smmu_client_type: clinet requesting for the smmu info.
+ */
+
+enum ipa_smmu_client_type {
+ IPA_SMMU_WLAN_CLIENT,
+ IPA_SMMU_CLIENT_MAX
+};
+
+struct ipa_smmu_in_params {
+ enum ipa_smmu_client_type smmu_client;
+};
+
+/**
+ * struct ipa_smmu_out_params - information provided to IPA client
+ * @ipa_smmu_s1_enable: IPA S1 SMMU enable/disable status
+ */
+struct ipa_smmu_out_params {
+ bool smmu_enable;
+};
+
#if defined CONFIG_IPA || defined CONFIG_IPA3
/*
@@ -1564,6 +1586,9 @@
*/
int ipa_tz_unlock_reg(struct ipa_tz_unlock_reg_info *reg_info, u16 num_regs);
+int ipa_get_smmu_params(struct ipa_smmu_in_params *in,
+ struct ipa_smmu_out_params *out);
+
#else /* (CONFIG_IPA || CONFIG_IPA3) */
/*
@@ -2351,6 +2376,12 @@
return -EPERM;
}
+
+static inline int ipa_get_smmu_params(struct ipa_smmu_in_params *in,
+ struct ipa_smmu_out_params *out)
+{
+ return -EPERM;
+}
#endif /* (CONFIG_IPA || CONFIG_IPA3) */
#endif /* _IPA_H_ */
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index 8f5af30..580cc10 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -1419,6 +1419,8 @@
size_t *len);
int (*inode_create)(struct inode *dir, struct dentry *dentry,
umode_t mode);
+ int (*inode_post_create)(struct inode *dir, struct dentry *dentry,
+ umode_t mode);
int (*inode_link)(struct dentry *old_dentry, struct inode *dir,
struct dentry *new_dentry);
int (*inode_unlink)(struct inode *dir, struct dentry *dentry);
@@ -1706,6 +1708,7 @@
struct list_head inode_free_security;
struct list_head inode_init_security;
struct list_head inode_create;
+ struct list_head inode_post_create;
struct list_head inode_link;
struct list_head inode_unlink;
struct list_head inode_symlink;
diff --git a/include/linux/msm_ep_pcie.h b/include/linux/msm_ep_pcie.h
new file mode 100644
index 0000000..a1d2a17
--- /dev/null
+++ b/include/linux/msm_ep_pcie.h
@@ -0,0 +1,290 @@
+/* Copyright (c) 2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_EP_PCIE_H
+#define __MSM_EP_PCIE_H
+
+#include <linux/types.h>
+
+enum ep_pcie_link_status {
+ EP_PCIE_LINK_DISABLED,
+ EP_PCIE_LINK_UP,
+ EP_PCIE_LINK_ENABLED,
+};
+
+enum ep_pcie_event {
+ EP_PCIE_EVENT_INVALID = 0,
+ EP_PCIE_EVENT_PM_D0 = 0x1,
+ EP_PCIE_EVENT_PM_D3_HOT = 0x2,
+ EP_PCIE_EVENT_PM_D3_COLD = 0x4,
+ EP_PCIE_EVENT_PM_RST_DEAST = 0x8,
+ EP_PCIE_EVENT_LINKDOWN = 0x10,
+ EP_PCIE_EVENT_LINKUP = 0x20,
+ EP_PCIE_EVENT_MHI_A7 = 0x40,
+ EP_PCIE_EVENT_MMIO_WRITE = 0x80,
+};
+
+enum ep_pcie_irq_event {
+ EP_PCIE_INT_EVT_LINK_DOWN = 1,
+ EP_PCIE_INT_EVT_BME,
+ EP_PCIE_INT_EVT_PM_TURNOFF,
+ EP_PCIE_INT_EVT_DEBUG,
+ EP_PCIE_INT_EVT_LTR,
+ EP_PCIE_INT_EVT_MHI_Q6,
+ EP_PCIE_INT_EVT_MHI_A7,
+ EP_PCIE_INT_EVT_DSTATE_CHANGE,
+ EP_PCIE_INT_EVT_L1SUB_TIMEOUT,
+ EP_PCIE_INT_EVT_MMIO_WRITE,
+ EP_PCIE_INT_EVT_CFG_WRITE,
+ EP_PCIE_INT_EVT_BRIDGE_FLUSH_N,
+ EP_PCIE_INT_EVT_LINK_UP,
+ EP_PCIE_INT_EVT_MAX = 13,
+};
+
+enum ep_pcie_trigger {
+ EP_PCIE_TRIGGER_CALLBACK,
+ EP_PCIE_TRIGGER_COMPLETION,
+};
+
+enum ep_pcie_options {
+ EP_PCIE_OPT_NULL = 0,
+ EP_PCIE_OPT_AST_WAKE = 0x1,
+ EP_PCIE_OPT_POWER_ON = 0x2,
+ EP_PCIE_OPT_ENUM = 0x4,
+ EP_PCIE_OPT_ENUM_ASYNC = 0x8,
+ EP_PCIE_OPT_ALL = 0xFFFFFFFF,
+};
+
+struct ep_pcie_notify {
+ enum ep_pcie_event event;
+ void *user;
+ void *data;
+ u32 options;
+};
+
+struct ep_pcie_register_event {
+ u32 events;
+ void *user;
+ enum ep_pcie_trigger mode;
+ void (*callback)(struct ep_pcie_notify *notify);
+ struct ep_pcie_notify notify;
+ struct completion *completion;
+ u32 options;
+};
+
+struct ep_pcie_iatu {
+ u32 start;
+ u32 end;
+ u32 tgt_lower;
+ u32 tgt_upper;
+};
+
+struct ep_pcie_msi_config {
+ u32 lower;
+ u32 upper;
+ u32 data;
+ u32 msg_num;
+};
+
+struct ep_pcie_db_config {
+ u8 base;
+ u8 end;
+ u32 tgt_addr;
+};
+
+struct ep_pcie_hw {
+ struct list_head node;
+ u32 device_id;
+ void **private_data;
+ int (*register_event)(struct ep_pcie_register_event *reg);
+ int (*deregister_event)(void);
+ enum ep_pcie_link_status (*get_linkstatus)(void);
+ int (*config_outbound_iatu)(struct ep_pcie_iatu entries[],
+ u32 num_entries);
+ int (*get_msi_config)(struct ep_pcie_msi_config *cfg);
+ int (*trigger_msi)(u32 idx);
+ int (*wakeup_host)(void);
+ int (*enable_endpoint)(enum ep_pcie_options opt);
+ int (*disable_endpoint)(void);
+ int (*config_db_routing)(struct ep_pcie_db_config chdb_cfg,
+ struct ep_pcie_db_config erdb_cfg);
+ int (*mask_irq_event)(enum ep_pcie_irq_event event,
+ bool enable);
+};
+
+/*
+ * ep_pcie_register_drv - register HW driver.
+ * @phandle: PCIe endpoint HW driver handle
+ *
+ * This function registers PCIe HW driver to PCIe endpoint service
+ * layer.
+ *
+ * Return: 0 on success, negative value on error
+ */
+int ep_pcie_register_drv(struct ep_pcie_hw *phandle);
+
+/*
+ * ep_pcie_deregister_drv - deregister HW driver.
+ * @phandle: PCIe endpoint HW driver handle
+ *
+ * This function deregisters PCIe HW driver to PCIe endpoint service
+ * layer.
+ *
+ * Return: 0 on success, negative value on error
+ */
+int ep_pcie_deregister_drv(struct ep_pcie_hw *phandle);
+
+/*
+ * ep_pcie_get_phandle - get PCIe endpoint HW driver handle.
+ * @id: PCIe endpoint device ID
+ *
+ * This function deregisters PCIe HW driver from PCIe endpoint service
+ * layer.
+ *
+ * Return: PCIe endpoint HW driver handle
+ */
+struct ep_pcie_hw *ep_pcie_get_phandle(u32 id);
+
+/*
+ * ep_pcie_register_event - register event with PCIe driver.
+ * @phandle: PCIe endpoint HW driver handle
+ * @reg: event structure
+ *
+ * This function gives PCIe client driver an option to register
+ * event with PCIe driver.
+ *
+ * Return: 0 on success, negative value on error
+ */
+int ep_pcie_register_event(struct ep_pcie_hw *phandle,
+ struct ep_pcie_register_event *reg);
+
+/*
+ * ep_pcie_deregister_event - deregister event with PCIe driver.
+ * @phandle: PCIe endpoint HW driver handle
+ *
+ * This function gives PCIe client driver an option to deregister
+ * existing event with PCIe driver.
+ *
+ * Return: 0 on success, negative value on error
+ */
+int ep_pcie_deregister_event(struct ep_pcie_hw *phandle);
+
+/*
+ * ep_pcie_get_linkstatus - indicate the status of PCIe link.
+ * @phandle: PCIe endpoint HW driver handle
+ *
+ * This function tells PCIe client about the status of PCIe link.
+ *
+ * Return: status of PCIe link
+ */
+enum ep_pcie_link_status ep_pcie_get_linkstatus(struct ep_pcie_hw *phandle);
+
+/*
+ * ep_pcie_config_outbound_iatu - configure outbound iATU.
+ * @entries: iatu entries
+ * @num_entries: number of iatu entries
+ *
+ * This function configures the outbound iATU for PCIe
+ * client's access to the regions in the host memory which
+ * are specified by the SW on host side.
+ *
+ * Return: 0 on success, negative value on error
+ */
+int ep_pcie_config_outbound_iatu(struct ep_pcie_hw *phandle,
+ struct ep_pcie_iatu entries[],
+ u32 num_entries);
+
+/*
+ * ep_pcie_get_msi_config - get MSI config info.
+ * @phandle: PCIe endpoint HW driver handle
+ * @cfg: pointer to MSI config
+ *
+ * This function returns MSI config info.
+ *
+ * Return: 0 on success, negative value on error
+ */
+int ep_pcie_get_msi_config(struct ep_pcie_hw *phandle,
+ struct ep_pcie_msi_config *cfg);
+
+/*
+ * ep_pcie_trigger_msi - trigger an MSI.
+ * @phandle: PCIe endpoint HW driver handle
+ * @idx: MSI index number
+ *
+ * This function allows PCIe client to trigger an MSI
+ * on host side.
+ *
+ * Return: 0 on success, negative value on error
+ */
+int ep_pcie_trigger_msi(struct ep_pcie_hw *phandle, u32 idx);
+
+/*
+ * ep_pcie_wakeup_host - wake up the host.
+ * @phandle: PCIe endpoint HW driver handle
+ *
+ * This function asserts WAKE GPIO to wake up the host.
+ *
+ * Return: 0 on success, negative value on error
+ */
+int ep_pcie_wakeup_host(struct ep_pcie_hw *phandle);
+
+/*
+ * ep_pcie_enable_endpoint - enable PCIe endpoint.
+ * @phandle: PCIe endpoint HW driver handle
+ * @opt: endpoint enable options
+ *
+ * This function is to enable the PCIe endpoint device.
+ *
+ * Return: 0 on success, negative value on error
+ */
+int ep_pcie_enable_endpoint(struct ep_pcie_hw *phandle,
+ enum ep_pcie_options opt);
+
+/*
+ * ep_pcie_disable_endpoint - disable PCIe endpoint.
+ * @phandle: PCIe endpoint HW driver handle
+ *
+ * This function is to disable the PCIe endpoint device.
+ *
+ * Return: 0 on success, negative value on error
+ */
+int ep_pcie_disable_endpoint(struct ep_pcie_hw *phandle);
+
+/*
+ * ep_pcie_config_db_routing - Configure routing of doorbells to another block.
+ * @phandle: PCIe endpoint HW driver handle
+ * @chdb_cfg: channel doorbell config
+ * @erdb_cfg: event ring doorbell config
+ *
+ * This function allows PCIe core to route the doorbells intended
+ * for another entity via a target address.
+ *
+ * Return: 0 on success, negative value on error
+ */
+int ep_pcie_config_db_routing(struct ep_pcie_hw *phandle,
+ struct ep_pcie_db_config chdb_cfg,
+ struct ep_pcie_db_config erdb_cfg);
+
+/*
+ * ep_pcie_mask_irq_event - enable and disable IRQ event.
+ * @phandle: PCIe endpoint HW driver handle
+ * @event: IRQ event
+ * @enable: true to enable that IRQ event and false to disable
+ *
+ * This function is to enable and disable IRQ event.
+ *
+ * Return: 0 on success, negative value on error
+ */
+int ep_pcie_mask_irq_event(struct ep_pcie_hw *phandle,
+ enum ep_pcie_irq_event event,
+ bool enable);
+#endif
diff --git a/include/linux/netfilter/nf_conntrack_sip.h b/include/linux/netfilter/nf_conntrack_sip.h
index d5af3c2..220380b 100644
--- a/include/linux/netfilter/nf_conntrack_sip.h
+++ b/include/linux/netfilter/nf_conntrack_sip.h
@@ -166,6 +166,11 @@
};
extern const struct nf_nat_sip_hooks *nf_nat_sip_hooks;
+extern void (*nf_nat_sip_seq_adjust_hook)
+ (struct sk_buff *skb,
+ unsigned int protoff,
+ s16 off);
+
int ct_sip_parse_request(const struct nf_conn *ct, const char *dptr,
unsigned int datalen, unsigned int *matchoff,
unsigned int *matchlen, union nf_inet_addr *addr,
diff --git a/include/linux/pfk.h b/include/linux/pfk.h
new file mode 100644
index 0000000..82ee741
--- /dev/null
+++ b/include/linux/pfk.h
@@ -0,0 +1,57 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef PFK_H_
+#define PFK_H_
+
+#include <linux/bio.h>
+
+struct ice_crypto_setting;
+
+#ifdef CONFIG_PFK
+
+int pfk_load_key_start(const struct bio *bio,
+ struct ice_crypto_setting *ice_setting, bool *is_pfe, bool);
+int pfk_load_key_end(const struct bio *bio, bool *is_pfe);
+int pfk_remove_key(const unsigned char *key, size_t key_size);
+bool pfk_allow_merge_bio(const struct bio *bio1, const struct bio *bio2);
+void pfk_clear_on_reset(void);
+
+#else
+static inline int pfk_load_key_start(const struct bio *bio,
+ struct ice_crypto_setting *ice_setting, bool *is_pfe, bool async)
+{
+ return -ENODEV;
+}
+
+static inline int pfk_load_key_end(const struct bio *bio, bool *is_pfe)
+{
+ return -ENODEV;
+}
+
+static inline int pfk_remove_key(const unsigned char *key, size_t key_size)
+{
+ return -ENODEV;
+}
+
+static inline bool pfk_allow_merge_bio(const struct bio *bio1,
+ const struct bio *bio2)
+{
+ return true;
+}
+
+static inline void pfk_clear_on_reset(void)
+{}
+
+#endif /* CONFIG_PFK */
+
+#endif /* PFK_H */
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 8431c8c..a04d69a 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -142,11 +142,7 @@
/* Used when trying to connect to a specific phy (mii bus id:phy device id) */
#define PHY_ID_FMT "%s:%02x"
-/*
- * Need to be a little smaller than phydev->dev.bus_id to leave room
- * for the ":%02x"
- */
-#define MII_BUS_ID_SIZE (20 - 3)
+#define MII_BUS_ID_SIZE 61
/* Or MII_ADDR_C45 into regnum for read/write on mii_bus to enable the 21 bit
IEEE 802.3ae clause 45 addressing mode used by 10GIGE phy chips. */
@@ -602,7 +598,7 @@
/* A Structure for boards to register fixups with the PHY Lib */
struct phy_fixup {
struct list_head list;
- char bus_id[20];
+ char bus_id[MII_BUS_ID_SIZE + 3];
u32 phy_uid;
u32 phy_uid_mask;
int (*run)(struct phy_device *phydev);
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index ba99b33..d253ca6 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -103,6 +103,9 @@
POWER_SUPPLY_DP_DM_HVDCP3_SUPPORTED = 10,
POWER_SUPPLY_DP_DM_ICL_DOWN = 11,
POWER_SUPPLY_DP_DM_ICL_UP = 12,
+ POWER_SUPPLY_DP_DM_FORCE_5V = 13,
+ POWER_SUPPLY_DP_DM_FORCE_9V = 14,
+ POWER_SUPPLY_DP_DM_FORCE_12V = 15,
};
enum {
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index 75e4e30..7eeceac 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -65,19 +65,24 @@
/*
* Are we doing bottom half or hardware interrupt processing?
- * Are we in a softirq context? Interrupt context?
- * in_softirq - Are we currently processing softirq or have bh disabled?
- * in_serving_softirq - Are we currently processing softirq?
+ *
+ * in_irq() - We're in (hard) IRQ context
+ * in_softirq() - We have BH disabled, or are processing softirqs
+ * in_interrupt() - We're in NMI,IRQ,SoftIRQ context or have BH disabled
+ * in_serving_softirq() - We're in softirq context
+ * in_nmi() - We're in NMI context
+ * in_task() - We're in task context
+ *
+ * Note: due to the BH disabled confusion: in_softirq(),in_interrupt() really
+ * should not be used in new code.
*/
#define in_irq() (hardirq_count())
#define in_softirq() (softirq_count())
#define in_interrupt() (irq_count())
#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
-
-/*
- * Are we in NMI context?
- */
-#define in_nmi() (preempt_count() & NMI_MASK)
+#define in_nmi() (preempt_count() & NMI_MASK)
+#define in_task() (!(preempt_count() & \
+ (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
/*
* The preempt_count offset after preempt_disable();
diff --git a/include/linux/security.h b/include/linux/security.h
index c2125e9..02e05de 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -30,6 +30,7 @@
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/fs.h>
+#include <linux/bio.h>
struct linux_binprm;
struct cred;
@@ -256,6 +257,8 @@
const struct qstr *qstr, const char **name,
void **value, size_t *len);
int security_inode_create(struct inode *dir, struct dentry *dentry, umode_t mode);
+int security_inode_post_create(struct inode *dir, struct dentry *dentry,
+ umode_t mode);
int security_inode_link(struct dentry *old_dentry, struct inode *dir,
struct dentry *new_dentry);
int security_inode_unlink(struct inode *dir, struct dentry *dentry);
@@ -304,6 +307,7 @@
struct fown_struct *fown, int sig);
int security_file_receive(struct file *file);
int security_file_open(struct file *file, const struct cred *cred);
+
int security_task_create(unsigned long clone_flags);
void security_task_free(struct task_struct *task);
int security_cred_alloc_blank(struct cred *cred, gfp_t gfp);
@@ -637,6 +641,13 @@
return 0;
}
+static inline int security_inode_post_create(struct inode *dir,
+ struct dentry *dentry,
+ umode_t mode)
+{
+ return 0;
+}
+
static inline int security_inode_link(struct dentry *old_dentry,
struct inode *dir,
struct dentry *new_dentry)
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 232c3e0..81e8469 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -757,6 +757,9 @@
struct usb_host_endpoint *ep, dma_addr_t *dma);
extern int usb_get_controller_id(struct usb_device *dev);
+extern int usb_stop_endpoint(struct usb_device *dev,
+ struct usb_host_endpoint *ep);
+
/* Sets up a group of bulk endpoints to support multiple stream IDs. */
extern int usb_alloc_streams(struct usb_interface *interface,
struct usb_host_endpoint **eps, unsigned int num_eps,
diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h
index 00d2324..b0fad11 100644
--- a/include/linux/usb/cdc_ncm.h
+++ b/include/linux/usb/cdc_ncm.h
@@ -83,6 +83,7 @@
/* Driver flags */
#define CDC_NCM_FLAG_NDP_TO_END 0x02 /* NDP is placed at end of frame */
#define CDC_MBIM_FLAG_AVOID_ALTSETTING_TOGGLE 0x04 /* Avoid altsetting toggle during init */
+#define CDC_NCM_FLAG_RESET_NTB16 0x08 /* set NDP16 one more time after altsetting switch */
#define cdc_ncm_comm_intf_is_mbim(x) ((x)->desc.bInterfaceSubClass == USB_CDC_SUBCLASS_MBIM && \
(x)->desc.bInterfaceProtocol == USB_CDC_PROTO_NONE)
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 1699d2b..d070109 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -407,6 +407,8 @@
struct usb_device *udev, struct usb_host_endpoint *ep,
dma_addr_t *dma);
int (*get_core_id)(struct usb_hcd *hcd);
+ int (*stop_endpoint)(struct usb_hcd *hcd, struct usb_device *udev,
+ struct usb_host_endpoint *ep);
};
static inline int hcd_giveback_urb_in_bh(struct usb_hcd *hcd)
@@ -454,6 +456,8 @@
extern phys_addr_t usb_hcd_get_xfer_ring_phys_addr(
struct usb_device *udev, struct usb_host_endpoint *ep, dma_addr_t *dma);
extern int usb_hcd_get_controller_id(struct usb_device *udev);
+extern int usb_hcd_stop_endpoint(struct usb_device *udev,
+ struct usb_host_endpoint *ep);
struct usb_hcd *__usb_create_hcd(const struct hc_driver *driver,
struct device *sysdev, struct device *dev, const char *bus_name,
diff --git a/include/linux/usb/phy.h b/include/linux/usb/phy.h
index ffb6393..092c32e 100644
--- a/include/linux/usb/phy.h
+++ b/include/linux/usb/phy.h
@@ -138,6 +138,7 @@
/* reset the PHY clocks */
int (*reset)(struct usb_phy *x);
+ int (*disable_chirp)(struct usb_phy *x, bool disable);
};
/**
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 38a02fd..1ba16f7 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -18,6 +18,7 @@
#include <linux/compiler.h>
#include <linux/atomic.h>
#include <linux/rhashtable.h>
+#include <linux/list.h>
#include <linux/netfilter/nf_conntrack_tcp.h>
#include <linux/netfilter/nf_conntrack_dccp.h>
@@ -27,6 +28,14 @@
#include <net/netfilter/nf_conntrack_tuple.h>
+#define SIP_LIST_ELEMENTS 2
+
+struct sip_length {
+ int msg_length[SIP_LIST_ELEMENTS];
+ int skb_len[SIP_LIST_ELEMENTS];
+ int data_len[SIP_LIST_ELEMENTS];
+};
+
/* per conntrack: protocol private data */
union nf_conntrack_proto {
/* insert conntrack proto private data here */
@@ -71,6 +80,11 @@
#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
#include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
+/* Handle NATTYPE Stuff,only if NATTYPE module was defined */
+#ifdef CONFIG_IP_NF_TARGET_NATTYPE_MODULE
+#include <linux/netfilter_ipv4/ipt_NATTYPE.h>
+#endif
+
struct nf_conn {
/* Usage count in here is 1 for hash table, 1 per skb,
* plus 1 for any connection(s) we are `master' for
@@ -122,6 +136,15 @@
void *sfe_entry;
+#ifdef CONFIG_IP_NF_TARGET_NATTYPE_MODULE
+ unsigned long nattype_entry;
+#endif
+ struct list_head sip_segment_list;
+ const char *dptr_prev;
+ struct sip_length segment;
+ bool sip_original_dir;
+ bool sip_reply_dir;
+
/* Storage reserved for other modules, must be the last member */
union nf_conntrack_proto proto;
};
diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h
index af67969..abc090c 100644
--- a/include/net/netfilter/nf_conntrack_core.h
+++ b/include/net/netfilter/nf_conntrack_core.h
@@ -20,6 +20,9 @@
/* This header is used to share core functionality between the
standalone connection tracking module, and the compatibility layer's use
of connection tracking. */
+
+extern unsigned int nf_conntrack_hash_rnd;
+
unsigned int nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
struct sk_buff *skb);
@@ -51,6 +54,9 @@
const struct nf_conntrack_l3proto *l3proto,
const struct nf_conntrack_l4proto *l4proto);
extern void (*delete_sfe_entry)(struct nf_conn *ct);
+extern bool (*nattype_refresh_timer)
+ (unsigned long nattype,
+ unsigned long timeout_value);
/* Find a connection corresponding to a tuple. */
struct nf_conntrack_tuple_hash *
@@ -87,4 +93,9 @@
extern spinlock_t nf_conntrack_expect_lock;
+struct sip_list {
+ struct nf_queue_entry *entry;
+ struct list_head list;
+};
+
#endif /* _NF_CONNTRACK_CORE_H */
diff --git a/include/soc/qcom/cmd-db.h b/include/soc/qcom/cmd-db.h
index e2c72d1..3c2aff3 100644
--- a/include/soc/qcom/cmd-db.h
+++ b/include/soc/qcom/cmd-db.h
@@ -110,17 +110,18 @@
return 0;
}
-bool cmd_db_get_priority(u32 addr, u8 drv_id)
+static inline bool cmd_db_get_priority(u32 addr, u8 drv_id)
{
return false;
}
-int cmd_db_get_aux_data(const char *resource_id, u8 *data, int len)
+static inline int cmd_db_get_aux_data(const char *resource_id,
+ u8 *data, int len)
{
return -ENODEV;
}
-int cmd_db_get_aux_data_len(const char *resource_id)
+static inline int cmd_db_get_aux_data_len(const char *resource_id)
{
return -ENODEV;
}
diff --git a/include/soc/qcom/minidump.h b/include/soc/qcom/minidump.h
new file mode 100644
index 0000000..5c751e8
--- /dev/null
+++ b/include/soc/qcom/minidump.h
@@ -0,0 +1,51 @@
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MINIDUMP_H
+#define __MINIDUMP_H
+
+#define MAX_NAME_LENGTH 12
+/* md_region - Minidump table entry
+ * @name: Entry name, Minidump will dump binary with this name.
+ * @id: Entry ID, used only for SDI dumps.
+ * @virt_addr: Address of the entry.
+ * @phys_addr: Physical address of the entry to dump.
+ * @size: Number of byte to dump from @address location
+ * it should be 4 byte aligned.
+ */
+struct md_region {
+ char name[MAX_NAME_LENGTH];
+ u32 id;
+ u64 virt_addr;
+ u64 phys_addr;
+ u64 size;
+};
+
+/* Register an entry in Minidump table
+ * Returns:
+ * Zero: on successful addition
+ * Negetive error number on failures
+ */
+#ifdef CONFIG_QCOM_MINIDUMP
+extern int msm_minidump_add_region(const struct md_region *entry);
+extern bool msm_minidump_enabled(void);
+extern void dump_stack_minidump(u64 sp);
+#else
+static inline int msm_minidump_add_region(const struct md_region *entry)
+{
+ /* Return quietly, if minidump is not supported */
+ return 0;
+}
+static inline bool msm_minidump_enabled(void) { return false; }
+static inline void dump_stack_minidump(u64 sp) {}
+#endif
+#endif
diff --git a/include/sound/seq_kernel.h b/include/sound/seq_kernel.h
index feb58d4..4b9ee30 100644
--- a/include/sound/seq_kernel.h
+++ b/include/sound/seq_kernel.h
@@ -49,7 +49,8 @@
#define SNDRV_SEQ_DEFAULT_CLIENT_EVENTS 200
/* max delivery path length */
-#define SNDRV_SEQ_MAX_HOPS 10
+/* NOTE: this shouldn't be greater than MAX_LOCKDEP_SUBCLASSES */
+#define SNDRV_SEQ_MAX_HOPS 8
/* max size of event size */
#define SNDRV_SEQ_MAX_EVENT_LEN 0x3fffffff
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index b4bcedf..a3b01c6 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -262,9 +262,9 @@
TRACE_EVENT(sched_get_task_cpu_cycles,
- TP_PROTO(int cpu, int event, u64 cycles, u64 exec_time),
+ TP_PROTO(int cpu, int event, u64 cycles, u64 exec_time, struct task_struct *p),
- TP_ARGS(cpu, event, cycles, exec_time),
+ TP_ARGS(cpu, event, cycles, exec_time, p),
TP_STRUCT__entry(
__field(int, cpu )
@@ -273,6 +273,8 @@
__field(u64, exec_time )
__field(u32, freq )
__field(u32, legacy_freq )
+ __field(pid_t, pid )
+ __array(char, comm, TASK_COMM_LEN )
),
TP_fast_assign(
@@ -282,11 +284,13 @@
__entry->exec_time = exec_time;
__entry->freq = cpu_cycles_to_freq(cycles, exec_time);
__entry->legacy_freq = cpu_cur_freq(cpu);
+ __entry->pid = p->pid;
+ memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
),
- TP_printk("cpu=%d event=%d cycles=%llu exec_time=%llu freq=%u legacy_freq=%u",
+ TP_printk("cpu=%d event=%d cycles=%llu exec_time=%llu freq=%u legacy_freq=%u task=%d (%s)",
__entry->cpu, __entry->event, __entry->cycles,
- __entry->exec_time, __entry->freq, __entry->legacy_freq)
+ __entry->exec_time, __entry->freq, __entry->legacy_freq, __entry->pid, __entry->comm)
);
TRACE_EVENT(sched_update_task_ravg,
diff --git a/include/trace/events/trace_msm_low_power.h b/include/trace/events/trace_msm_low_power.h
index 97eefc6..c25da0e 100644
--- a/include/trace/events/trace_msm_low_power.h
+++ b/include/trace/events/trace_msm_low_power.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012, 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -250,24 +250,6 @@
__entry->sample, __entry->tmr)
);
-TRACE_EVENT(pre_pc_cb,
-
- TP_PROTO(int tzflag),
-
- TP_ARGS(tzflag),
-
- TP_STRUCT__entry(
- __field(int, tzflag)
- ),
-
- TP_fast_assign(
- __entry->tzflag = tzflag;
- ),
-
- TP_printk("tzflag:%d",
- __entry->tzflag
- )
-);
#endif
#define TRACE_INCLUDE_FILE trace_msm_low_power
#include <trace/define_trace.h>
diff --git a/include/uapi/linux/msm_kgsl.h b/include/uapi/linux/msm_kgsl.h
index f05155b..9ee2a8b 100644
--- a/include/uapi/linux/msm_kgsl.h
+++ b/include/uapi/linux/msm_kgsl.h
@@ -142,6 +142,7 @@
#define KGSL_MEMFLAGS_USE_CPU_MAP 0x10000000ULL
#define KGSL_MEMFLAGS_SPARSE_PHYS 0x20000000ULL
#define KGSL_MEMFLAGS_SPARSE_VIRT 0x40000000ULL
+#define KGSL_MEMFLAGS_IOCOHERENT 0x80000000ULL
/* Memory types for which allocations are made */
#define KGSL_MEMTYPE_MASK 0x0000FF00
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index 85b7e87..229dd25 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -718,6 +718,8 @@
v4l2_fourcc('T', 'P', '1', '0') /* Y/CbCr 4:2:0 TP10 */
#define V4L2_PIX_FMT_SDE_Y_CBCR_H2V2_P010 \
v4l2_fourcc('P', '0', '1', '0') /* Y/CbCr 4:2:0 P10 */
+#define V4L2_PIX_FMT_SDE_Y_CBCR_H2V2_P010_VENUS \
+ v4l2_fourcc('Q', 'P', '1', '0') /* Y/CbCr 4:2:0 P10 Venus*/
/* SDR formats - used only for Software Defined Radio devices */
#define V4L2_SDR_FMT_CU8 v4l2_fourcc('C', 'U', '0', '8') /* IQ u8 */
diff --git a/include/uapi/media/Kbuild b/include/uapi/media/Kbuild
index e72a1f0..1e087a1 100644
--- a/include/uapi/media/Kbuild
+++ b/include/uapi/media/Kbuild
@@ -14,3 +14,4 @@
header-y += msm_sde_rotator.h
header-y += radio-iris.h
header-y += radio-iris-commands.h
+header-y += cam_lrme.h
diff --git a/include/uapi/media/cam_isp.h b/include/uapi/media/cam_isp.h
index 4a63292..afd109f 100644
--- a/include/uapi/media/cam_isp.h
+++ b/include/uapi/media/cam_isp.h
@@ -84,7 +84,9 @@
#define CAM_ISP_DSP_MODE_ROUND 2
/* ISP Generic Cmd Buffer Blob types */
-#define CAM_ISP_GENERIC_BLOB_TYPE_HFR_CONFIG 0
+#define CAM_ISP_GENERIC_BLOB_TYPE_HFR_CONFIG 0
+#define CAM_ISP_GENERIC_BLOB_TYPE_CLOCK_CONFIG 1
+#define CAM_ISP_GENERIC_BLOB_TYPE_BW_CONFIG 2
/* Query devices */
/**
@@ -248,7 +250,7 @@
uint32_t framedrop_pattern;
uint32_t framedrop_period;
uint32_t reserved;
-};
+} __attribute__((packed));
/**
* struct cam_isp_resource_hfr_config - Resource HFR configuration
@@ -261,7 +263,7 @@
uint32_t num_ports;
uint32_t reserved;
struct cam_isp_port_hfr_config port_hfr_config[1];
-};
+} __attribute__((packed));
/**
* struct cam_isp_dual_split_params - dual isp spilt parameters
@@ -317,6 +319,60 @@
uint32_t reserved;
struct cam_isp_dual_split_params split_params;
struct cam_isp_dual_stripe_config stripes[1];
-};
+} __attribute__((packed));
+
+/**
+ * struct cam_isp_clock_config - Clock configuration
+ *
+ * @usage_type: Usage type (Single/Dual)
+ * @num_rdi: Number of RDI votes
+ * @left_pix_hz: Pixel Clock for Left ISP
+ * @right_pix_hz: Pixel Clock for Right ISP, valid only if Dual
+ * @rdi_hz: RDI Clock. ISP clock will be max of RDI and
+ * PIX clocks. For a particular context which ISP
+ * HW the RDI is allocated to is not known to UMD.
+ * Hence pass the clock and let KMD decide.
+ */
+struct cam_isp_clock_config {
+ uint32_t usage_type;
+ uint32_t num_rdi;
+ uint64_t left_pix_hz;
+ uint64_t right_pix_hz;
+ uint64_t rdi_hz[1];
+} __attribute__((packed));
+
+/**
+ * struct cam_isp_bw_vote - Bandwidth vote information
+ *
+ * @resource_id: Resource ID
+ * @reserved: Reserved field for alignment
+ * @cam_bw_bps: Bandwidth vote for CAMNOC
+ * @ext_bw_bps: Bandwidth vote for path-to-DDR after CAMNOC
+ */
+
+struct cam_isp_bw_vote {
+ uint32_t resource_id;
+ uint32_t reserved;
+ uint64_t cam_bw_bps;
+ uint64_t ext_bw_bps;
+} __attribute__((packed));
+
+/**
+ * struct cam_isp_bw_config - Bandwidth configuration
+ *
+ * @usage_type: Usage type (Single/Dual)
+ * @num_rdi: Number of RDI votes
+ * @left_pix_vote: Bandwidth vote for left ISP
+ * @right_pix_vote: Bandwidth vote for right ISP
+ * @rdi_vote: RDI bandwidth requirements
+ */
+
+struct cam_isp_bw_config {
+ uint32_t usage_type;
+ uint32_t num_rdi;
+ struct cam_isp_bw_vote left_pix_vote;
+ struct cam_isp_bw_vote right_pix_vote;
+ struct cam_isp_bw_vote rdi_vote[1];
+} __attribute__((packed));
#endif /* __UAPI_CAM_ISP_H__ */
diff --git a/include/uapi/media/cam_lrme.h b/include/uapi/media/cam_lrme.h
new file mode 100644
index 0000000..97d9578
--- /dev/null
+++ b/include/uapi/media/cam_lrme.h
@@ -0,0 +1,65 @@
+#ifndef __UAPI_CAM_LRME_H__
+#define __UAPI_CAM_LRME_H__
+
+#include "cam_defs.h"
+
+/* LRME Resource Types */
+
+enum CAM_LRME_IO_TYPE {
+ CAM_LRME_IO_TYPE_TAR,
+ CAM_LRME_IO_TYPE_REF,
+ CAM_LRME_IO_TYPE_RES,
+ CAM_LRME_IO_TYPE_DS2,
+};
+
+#define CAM_LRME_INPUT_PORT_TYPE_TAR (1 << 0)
+#define CAM_LRME_INPUT_PORT_TYPE_REF (1 << 1)
+
+#define CAM_LRME_OUTPUT_PORT_TYPE_DS2 (1 << 0)
+#define CAM_LRME_OUTPUT_PORT_TYPE_RES (1 << 1)
+
+#define CAM_LRME_DEV_MAX 1
+
+
+struct cam_lrme_hw_version {
+ uint32_t gen;
+ uint32_t rev;
+ uint32_t step;
+};
+
+struct cam_lrme_dev_cap {
+ struct cam_lrme_hw_version clc_hw_version;
+ struct cam_lrme_hw_version bus_rd_hw_version;
+ struct cam_lrme_hw_version bus_wr_hw_version;
+ struct cam_lrme_hw_version top_hw_version;
+ struct cam_lrme_hw_version top_titan_version;
+};
+
+/**
+ * struct cam_lrme_query_cap_cmd - LRME query device capability payload
+ *
+ * @dev_iommu_handle: LRME iommu handles for secure/non secure
+ * modes
+ * @cdm_iommu_handle: Iommu handles for secure/non secure modes
+ * @num_devices: number of hardware devices
+ * @dev_caps: Returned device capability array
+ */
+struct cam_lrme_query_cap_cmd {
+ struct cam_iommu_handle device_iommu;
+ struct cam_iommu_handle cdm_iommu;
+ uint32_t num_devices;
+ struct cam_lrme_dev_cap dev_caps[CAM_LRME_DEV_MAX];
+};
+
+struct cam_lrme_soc_info {
+ uint64_t clock_rate;
+ uint64_t bandwidth;
+ uint64_t reserved[4];
+};
+
+struct cam_lrme_acquire_args {
+ struct cam_lrme_soc_info lrme_soc_info;
+};
+
+#endif /* __UAPI_CAM_LRME_H__ */
+
diff --git a/include/uapi/media/cam_req_mgr.h b/include/uapi/media/cam_req_mgr.h
index 9b7d055..233d84e 100644
--- a/include/uapi/media/cam_req_mgr.h
+++ b/include/uapi/media/cam_req_mgr.h
@@ -355,14 +355,14 @@
* @error_type: type of error
* @request_id: request id of frame
* @device_hdl: device handle
- * @reserved: reserved field
+ * @linke_hdl: link_hdl
* @resource_size: size of the resource
*/
struct cam_req_mgr_error_msg {
uint32_t error_type;
uint32_t request_id;
int32_t device_hdl;
- int32_t reserved;
+ int32_t link_hdl;
uint64_t resource_size;
};
diff --git a/kernel/panic.c b/kernel/panic.c
index fcc8786..d797170 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -27,6 +27,7 @@
#include <linux/bug.h>
#define CREATE_TRACE_POINTS
#include <trace/events/exception.h>
+#include <soc/qcom/minidump.h>
#define PANIC_TIMER_STEP 100
#define PANIC_BLINK_SPD 18
@@ -174,6 +175,7 @@
va_start(args, fmt);
vsnprintf(buf, sizeof(buf), fmt, args);
va_end(args);
+ dump_stack_minidump(0);
pr_emerg("Kernel panic - not syncing: %s\n", buf);
#ifdef CONFIG_DEBUG_BUGVERBOSE
/*
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 01a589c..bbe783e 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -8660,6 +8660,7 @@
struct rq *rq;
rq = task_rq_lock(tsk, &rf);
+ update_rq_clock(rq);
running = task_current(rq, tsk);
queued = task_on_rq_queued(tsk);
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 3192612..32b67eb 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -235,7 +235,9 @@
/* Track cycles in current window */
delta_ns = upto - sg_policy->last_cyc_update_time;
- cycles = (prev_freq * delta_ns) / (NSEC_PER_SEC / KHZ);
+ delta_ns *= prev_freq;
+ do_div(delta_ns, (NSEC_PER_SEC / KHZ));
+ cycles = delta_ns;
sg_policy->curr_cycles += cycles;
sg_policy->last_cyc_update_time = upto;
}
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 0782ea74..42be34a 100755
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6928,7 +6928,7 @@
int target_cpu, targeted_cpus = 0;
unsigned long task_util_boosted = 0, curr_util = 0;
long new_util, new_util_cum;
- int i = -1;
+ int i;
int ediff = -1;
int cpu = smp_processor_id();
int min_util_cpu = -1;
@@ -6949,13 +6949,8 @@
struct related_thread_group *grp;
cpumask_t search_cpus;
int prev_cpu = task_cpu(p);
-#ifdef CONFIG_SCHED_CORE_ROTATE
bool do_rotate = false;
bool avoid_prev_cpu = false;
-#else
-#define do_rotate false
-#define avoid_prev_cpu false
-#endif
sd = rcu_dereference(per_cpu(sd_ea, prev_cpu));
@@ -7044,13 +7039,11 @@
cpumask_and(&search_cpus, &search_cpus,
sched_group_cpus(sg_target));
-#ifdef CONFIG_SCHED_CORE_ROTATE
i = find_first_cpu_bit(p, &search_cpus, sg_target,
&avoid_prev_cpu, &do_rotate,
&first_cpu_bit_env);
retry:
-#endif
/* Find cpu with sufficient capacity */
while ((i = cpumask_next(i, &search_cpus)) < nr_cpu_ids) {
cpumask_clear_cpu(i, &search_cpus);
@@ -7146,9 +7139,7 @@
}
} else if (cpu_rq(i)->nr_running) {
target_cpu = i;
-#ifdef CONFIG_SCHED_CORE_ROTATE
do_rotate = false;
-#endif
break;
}
} else if (!need_idle) {
@@ -7188,7 +7179,6 @@
}
}
-#ifdef CONFIG_SCHED_CORE_ROTATE
if (do_rotate) {
/*
* We started iteration somewhere in the middle of
@@ -7199,7 +7189,6 @@
i = -1;
goto retry;
}
-#endif
if (target_cpu == -1 ||
(target_cpu != min_util_cpu && !safe_to_pack &&
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index b2b26e5..c97b779 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1758,13 +1758,8 @@
int best_cpu_idle_idx = INT_MAX;
int cpu_idle_idx = -1;
bool placement_boost;
-#ifdef CONFIG_SCHED_CORE_ROTATE
bool do_rotate = false;
bool avoid_prev_cpu = false;
-#else
-#define do_rotate false
-#define avoid_prev_cpu false
-#endif
/* Make sure the mask is initialized first */
if (unlikely(!lowest_mask))
@@ -1892,7 +1887,6 @@
best_cpu = cpu;
}
-#ifdef CONFIG_SCHED_CORE_ROTATE
if (do_rotate) {
/*
* We started iteration somewhere in the middle of
@@ -1903,7 +1897,6 @@
cpu = -1;
goto retry;
}
-#endif
if (best_cpu != -1) {
return best_cpu;
diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c
index 32c7f32..da7c0f0 100644
--- a/kernel/sched/walt.c
+++ b/kernel/sched/walt.c
@@ -1867,7 +1867,7 @@
p->cpu_cycles = cur_cycles;
- trace_sched_get_task_cpu_cycles(cpu, event, rq->cc.cycles, rq->cc.time);
+ trace_sched_get_task_cpu_cycles(cpu, event, rq->cc.cycles, rq->cc.time, p);
}
static inline void run_walt_irq_work(u64 old_window_start, struct rq *rq)
diff --git a/kernel/sched/walt.h b/kernel/sched/walt.h
index 86d5bfd..10f3e84 100644
--- a/kernel/sched/walt.h
+++ b/kernel/sched/walt.h
@@ -219,7 +219,7 @@
return sched_ravg_window;
}
-static inline u32 cpu_cycles_to_freq(u64 cycles, u32 period)
+static inline u32 cpu_cycles_to_freq(u64 cycles, u64 period)
{
return div64_u64(cycles, period);
}
diff --git a/kernel/trace/msm_rtb.c b/kernel/trace/msm_rtb.c
index 9d9f0bf..d3bcd5c 100644
--- a/kernel/trace/msm_rtb.c
+++ b/kernel/trace/msm_rtb.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -28,6 +28,7 @@
#include <asm-generic/sizes.h>
#include <linux/msm_rtb.h>
#include <asm/timex.h>
+#include <soc/qcom/minidump.h>
#define SENTINEL_BYTE_1 0xFF
#define SENTINEL_BYTE_2 0xAA
@@ -242,6 +243,7 @@
static int msm_rtb_probe(struct platform_device *pdev)
{
struct msm_rtb_platform_data *d = pdev->dev.platform_data;
+ struct md_region md_entry;
#if defined(CONFIG_QCOM_RTB_SEPARATE_CPUS)
unsigned int cpu;
#endif
@@ -293,6 +295,12 @@
memset(msm_rtb.rtb, 0, msm_rtb.size);
+ strlcpy(md_entry.name, "KRTB_BUF", sizeof(md_entry.name));
+ md_entry.virt_addr = (uintptr_t)msm_rtb.rtb;
+ md_entry.phys_addr = msm_rtb.phys;
+ md_entry.size = msm_rtb.size;
+ if (msm_minidump_add_region(&md_entry))
+ pr_info("Failed to add RTB in Minidump\n");
#if defined(CONFIG_QCOM_RTB_SEPARATE_CPUS)
for_each_possible_cpu(cpu) {
diff --git a/kernel/workqueue_internal.h b/kernel/workqueue_internal.h
index 8635417..29fa81f 100644
--- a/kernel/workqueue_internal.h
+++ b/kernel/workqueue_internal.h
@@ -9,6 +9,7 @@
#include <linux/workqueue.h>
#include <linux/kthread.h>
+#include <linux/preempt.h>
struct worker_pool;
@@ -59,7 +60,7 @@
*/
static inline struct worker *current_wq_worker(void)
{
- if (current->flags & PF_WQ_WORKER)
+ if (in_task() && (current->flags & PF_WQ_WORKER))
return kthread_data(current);
return NULL;
}
diff --git a/lib/asn1_decoder.c b/lib/asn1_decoder.c
index 0bd8a61..1ef0cec 100644
--- a/lib/asn1_decoder.c
+++ b/lib/asn1_decoder.c
@@ -228,7 +228,7 @@
hdr = 2;
/* Extract a tag from the data */
- if (unlikely(dp >= datalen - 1))
+ if (unlikely(datalen - dp < 2))
goto data_overrun_error;
tag = data[dp++];
if (unlikely((tag & 0x1f) == ASN1_LONG_TAG))
@@ -274,7 +274,7 @@
int n = len - 0x80;
if (unlikely(n > 2))
goto length_too_long;
- if (unlikely(dp >= datalen - n))
+ if (unlikely(n > datalen - dp))
goto data_overrun_error;
hdr += n;
for (len = 0; n > 0; n--) {
@@ -284,6 +284,9 @@
if (unlikely(len > datalen - dp))
goto data_overrun_error;
}
+ } else {
+ if (unlikely(len > datalen - dp))
+ goto data_overrun_error;
}
if (flags & FLAG_CONS) {
diff --git a/mm/page_owner.c b/mm/page_owner.c
index fe850b9..c4381d93 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -554,11 +554,17 @@
continue;
/*
- * We are safe to check buddy flag and order, because
- * this is init stage and only single thread runs.
+ * To avoid having to grab zone->lock, be a little
+ * careful when reading buddy page order. The only
+ * danger is that we skip too much and potentially miss
+ * some early allocated pages, which is better than
+ * heavy lock contention.
*/
if (PageBuddy(page)) {
- pfn += (1UL << page_order(page)) - 1;
+ unsigned long order = page_order_unsafe(page);
+
+ if (order > 0 && order < MAX_ORDER)
+ pfn += (1UL << order) - 1;
continue;
}
@@ -577,6 +583,7 @@
set_page_owner(page, 0, 0);
count++;
}
+ cond_resched();
}
pr_info("Node %d, zone %8s: page owner found early allocated %lu pages\n",
@@ -587,15 +594,12 @@
{
struct zone *zone;
struct zone *node_zones = pgdat->node_zones;
- unsigned long flags;
for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
if (!populated_zone(zone))
continue;
- spin_lock_irqsave(&zone->lock, flags);
init_pages_in_zone(pgdat, zone);
- spin_unlock_irqrestore(&zone->lock, flags);
}
}
diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig
index 96e47c5..39bb5b3 100644
--- a/net/dsa/Kconfig
+++ b/net/dsa/Kconfig
@@ -1,12 +1,13 @@
config HAVE_NET_DSA
def_bool y
- depends on NETDEVICES && !S390
+ depends on INET && NETDEVICES && !S390
# Drivers must select NET_DSA and the appropriate tagging format
config NET_DSA
tristate "Distributed Switch Architecture"
- depends on HAVE_NET_DSA && NET_SWITCHDEV
+ depends on HAVE_NET_DSA
+ select NET_SWITCHDEV
select PHYLIB
---help---
Say Y if you want to enable support for the hardware switches supported
diff --git a/net/embms_kernel/embms_kernel.c b/net/embms_kernel/embms_kernel.c
index 7b79574..3bbe51b 100644
--- a/net/embms_kernel/embms_kernel.c
+++ b/net/embms_kernel/embms_kernel.c
@@ -62,7 +62,6 @@
{
struct iphdr *iph;
struct udphdr *udph;
- struct in_device *in_dev;
unsigned char *tmp_ptr = NULL;
struct sk_buff *skb_new = NULL;
struct sk_buff *skb_cpy = NULL;
@@ -396,12 +395,9 @@
int delete_tmgi_entry_from_table(char *buffer)
{
- int i;
struct tmgi_to_clnt_info_update *info_update;
- char message_buffer[sizeof(struct tmgi_to_clnt_info_update)];
struct clnt_info *temp_client = NULL;
struct tmgi_to_clnt_info *temp_tmgi = NULL;
- struct list_head *tmgi_entry_ptr, *prev_tmgi_entry_ptr;
struct list_head *clnt_ptr, *prev_clnt_ptr;
embms_debug("delete_tmgi_entry_from_table: Enter\n");
@@ -477,13 +473,10 @@
*/
int delete_client_entry_from_all_tmgi(char *buffer)
{
- int i;
struct tmgi_to_clnt_info_update *info_update;
- char message_buffer[sizeof(struct tmgi_to_clnt_info_update)];
struct clnt_info *temp_client = NULL;
struct tmgi_to_clnt_info *tmgi = NULL;
struct list_head *tmgi_entry_ptr, *prev_tmgi_entry_ptr;
- struct list_head *clnt_ptr, *prev_clnt_ptr;
/* We use this function when we want to delete any
* client entry from all TMGI entries. This scenario
@@ -574,18 +567,11 @@
*/
int add_client_entry_to_table(char *buffer)
{
- int i, ret;
+ int ret;
struct tmgi_to_clnt_info_update *info_update;
- char message_buffer[sizeof(struct tmgi_to_clnt_info_update)];
struct clnt_info *new_client = NULL;
- struct clnt_info *temp_client = NULL;
- struct tmgi_to_clnt_info *new_tmgi = NULL;
struct tmgi_to_clnt_info *tmgi = NULL;
- struct list_head *tmgi_entry_ptr, *prev_tmgi_entry_ptr;
- struct list_head *clnt_ptr, *prev_clnt_ptr;
struct neighbour *neigh_entry;
- struct in_device *iface_dev;
- struct in_ifaddr *iface_info;
embms_debug("add_client_entry_to_table: Enter\n");
@@ -699,13 +685,9 @@
*/
int delete_client_entry_from_table(char *buffer)
{
- int i;
struct tmgi_to_clnt_info_update *info_update;
- char message_buffer[sizeof(struct tmgi_to_clnt_info_update)];
struct clnt_info *temp_client = NULL;
struct tmgi_to_clnt_info *temp_tmgi = NULL;
- struct list_head *tmgi_entry_ptr, *prev_tmgi_entry_ptr;
- struct list_head *clnt_ptr, *prev_clnt_ptr;
embms_debug("delete_client_entry_from_table: Enter\n");
@@ -796,11 +778,10 @@
* Return: Success if functoin call returns SUCCESS, error otherwise.
*/
-int embms_device_ioctl(struct file *file, unsigned int ioctl_num,
- unsigned long ioctl_param)
+long embms_device_ioctl(struct file *file, unsigned int ioctl_num,
+ unsigned long ioctl_param)
{
- int i, ret, error;
- char *temp;
+ int ret;
char buffer[BUF_LEN];
struct in_device *iface_dev;
struct in_ifaddr *iface_info;
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index f2a7102..22377c8 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -270,6 +270,9 @@
int ihl = ip_hdrlen(skb);
int ah_hlen = (ah->hdrlen + 2) << 2;
+ if (err)
+ goto out;
+
work_iph = AH_SKB_CB(skb)->tmp;
auth_data = ah_tmp_auth(work_iph, ihl);
icv = ah_tmp_icv(ahp->ahash, auth_data, ahp->icv_trunc_len);
diff --git a/net/ipv4/netfilter/ipt_NATTYPE.c b/net/ipv4/netfilter/ipt_NATTYPE.c
index b8597d2..bed569f8 100644
--- a/net/ipv4/netfilter/ipt_NATTYPE.c
+++ b/net/ipv4/netfilter/ipt_NATTYPE.c
@@ -24,6 +24,7 @@
* Ubicom32 implementation derived from
* Cameo's implementation(with many thanks):
*/
+
#include <linux/types.h>
#include <linux/ip.h>
#include <linux/udp.h>
@@ -36,21 +37,17 @@
#include <linux/tcp.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_core.h>
-#include <net/netfilter/nf_nat_rule.h>
+#include <net/netfilter/nf_nat.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_ipv4/ipt_NATTYPE.h>
#include <linux/atomic.h>
-#if !defined(NATTYPE_DEBUG)
-#define DEBUGP(type, args...)
-#else
static const char * const types[] = {"TYPE_PORT_ADDRESS_RESTRICTED",
"TYPE_ENDPOINT_INDEPENDENT",
"TYPE_ADDRESS_RESTRICTED"};
static const char * const modes[] = {"MODE_DNAT", "MODE_FORWARD_IN",
"MODE_FORWARD_OUT"};
#define DEBUGP(args...) pr_debug(args)
-#endif
/* netfilter NATTYPE TODO:
* Add magic value checks to data structure.
@@ -58,13 +55,17 @@
struct ipt_nattype {
struct list_head list;
struct timer_list timeout;
+ unsigned long timeout_value;
+ unsigned int nattype_cookie;
unsigned short proto; /* Protocol: TCP or UDP */
- struct nf_nat_ipv4_range range; /* LAN side src info*/
+ struct nf_nat_range range; /* LAN side source information */
unsigned short nat_port; /* Routed NAT port */
unsigned int dest_addr; /* Original egress packets dst addr */
unsigned short dest_port;/* Original egress packets destination port */
};
+#define NATTYPE_COOKIE 0x11abcdef
+
/* TODO: It might be better to use a hash table for performance in
* heavy traffic.
*/
@@ -77,11 +78,13 @@
static void nattype_nte_debug_print(const struct ipt_nattype *nte,
const char *s)
{
- DEBUGP("%p: %s - proto[%d], src[%pI4:%d], nat[<x>:%d], dest[%pI4:%d]\n",
+ DEBUGP("%p:%s-proto[%d],src[%pI4:%d],nat[%d],dest[%pI4:%d]\n",
nte, s, nte->proto,
- &nte->range.min_ip, ntohs(nte->range.min.all),
+ &nte->range.min_addr.ip, ntohs(nte->range.min_proto.all),
ntohs(nte->nat_port),
&nte->dest_addr, ntohs(nte->dest_port));
+ DEBUGP("Timeout[%lx], Expires[%lx]\n", nte->timeout_value,
+ nte->timeout.expires);
}
/* netfilter NATTYPE nattype_free()
@@ -89,20 +92,31 @@
*/
static void nattype_free(struct ipt_nattype *nte)
{
- nattype_nte_debug_print(nte, "free");
kfree(nte);
}
/* netfilter NATTYPE nattype_refresh_timer()
* Refresh the timer for this object.
*/
-static bool nattype_refresh_timer(struct ipt_nattype *nte)
+bool nattype_refresh_timer(unsigned long nat_type, unsigned long timeout_value)
{
+ struct ipt_nattype *nte = (struct ipt_nattype *)nat_type;
+
+ if (!nte)
+ return false;
+ spin_lock_bh(&nattype_lock);
+ if (nte->nattype_cookie != NATTYPE_COOKIE) {
+ spin_unlock_bh(&nattype_lock);
+ return false;
+ }
if (del_timer(&nte->timeout)) {
- nte->timeout.expires = jiffies + NATTYPE_TIMEOUT * HZ;
+ nte->timeout.expires = timeout_value;
add_timer(&nte->timeout);
+ spin_unlock_bh(&nattype_lock);
+ nattype_nte_debug_print(nte, "refresh");
return true;
}
+ spin_unlock_bh(&nattype_lock);
return false;
}
@@ -121,6 +135,7 @@
nattype_nte_debug_print(nte, "timeout");
spin_lock_bh(&nattype_lock);
list_del(&nte->list);
+ memset(nte, 0, sizeof(struct ipt_nattype));
spin_unlock_bh(&nattype_lock);
nattype_free(nte);
}
@@ -200,7 +215,8 @@
/* netfilter NATTYPE nattype_compare
* Compare two entries, return true if relevant fields are the same.
*/
-static bool nattype_compare(struct ipt_nattype *n1, struct ipt_nattype *n2)
+static bool nattype_compare(struct ipt_nattype *n1, struct ipt_nattype *n2,
+ const struct ipt_nattype_info *info)
{
/* netfilter NATTYPE Protocol
* compare.
@@ -215,16 +231,16 @@
* Since we always keep min/max values the same,
* just compare the min values.
*/
- if (n1->range.min_ip != n2->range.min_ip) {
- DEBUGP("nattype_compare: r.min_ip mismatch: %pI4:%pI4\n",
- &n1->range.min_ip, &n2->range.min_ip);
+ if (n1->range.min_addr.ip != n2->range.min_addr.ip) {
+ DEBUGP("nattype_compare: r.min_addr.ip mismatch: %pI4:%pI4\n",
+ &n1->range.min_addr.ip, &n2->range.min_addr.ip);
return false;
}
- if (n1->range.min.all != n2->range.min.all) {
+ if (n1->range.min_proto.all != n2->range.min_proto.all) {
DEBUGP("nattype_compare: r.min mismatch: %d:%d\n",
- ntohs(n1->range.min.all),
- ntohs(n2->range.min.all));
+ ntohs(n1->range.min_proto.all),
+ ntohs(n2->range.min_proto.all));
return false;
}
@@ -237,20 +253,16 @@
return false;
}
- /* netfilter NATTYPE
- * Destination compare
+ /* netfilter NATTYPE Destination compare
+ * Destination Comapre for Address Restricted Cone NAT.
*/
- if (n1->dest_addr != n2->dest_addr) {
+ if ((info->type == TYPE_ADDRESS_RESTRICTED) &&
+ (n1->dest_addr != n2->dest_addr)) {
DEBUGP("nattype_compare: dest_addr mismatch: %pI4:%pI4\n",
&n1->dest_addr, &n2->dest_addr);
return false;
}
- if (n1->dest_port != n2->dest_port) {
- DEBUGP("nattype_compare: dest_port mismatch: %d:%d\n",
- ntohs(n1->dest_port), ntohs(n2->dest_port));
- return false;
- }
return true;
}
@@ -270,7 +282,7 @@
list_for_each_entry(nte, &nattype_list, list) {
struct nf_conn *ct;
enum ip_conntrack_info ctinfo;
- struct nf_nat_ipv4_range newrange;
+ struct nf_nat_range newrange;
unsigned int ret;
if (!nattype_packet_in_match(nte, skb, par->targinfo))
@@ -291,11 +303,22 @@
return XT_CONTINUE;
}
- /* Expand the ingress conntrack
- * to include the reply as source
+ /* netfilter
+ * Refresh the timer, if we fail, break
+ * out and forward fail as though we never
+ * found the entry.
+ */
+ if (!nattype_refresh_timer((unsigned long)nte,
+ jiffies + nte->timeout_value))
+ break;
+
+ /* netfilter
+ * Expand the ingress conntrack to include the reply as source
*/
DEBUGP("Expand ingress conntrack=%p, type=%d, src[%pI4:%d]\n",
- ct, ctinfo, &newrange.min_ip, ntohs(newrange.min.all));
+ ct, ctinfo, &newrange.min_addr.ip,
+ ntohs(newrange.min_proto.all));
+ ct->nattype_entry = (unsigned long)nte;
ret = nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_DST);
DEBUGP("Expand returned: %d\n", ret);
return ret;
@@ -318,12 +341,22 @@
enum ip_conntrack_info ctinfo;
const struct ipt_nattype_info *info = par->targinfo;
u16 nat_port;
+ enum ip_conntrack_dir dir;
- if (par->hooknum != NF_INET_FORWARD)
+
+ if (par->hooknum != NF_INET_POST_ROUTING)
return XT_CONTINUE;
- /* Ingress packet,
- * refresh the timer if we find an entry.
+ /* netfilter
+ * Egress packet, create a new rule in our list. If conntrack does
+ * not have an entry, skip this packet.
+ */
+ ct = nf_ct_get(skb, &ctinfo);
+ if (!ct)
+ return XT_CONTINUE;
+
+ /* netfilter
+ * Ingress packet, refresh the timer if we find an entry.
*/
if (info->mode == MODE_FORWARD_IN) {
spin_lock_bh(&nattype_lock);
@@ -335,12 +368,14 @@
if (!nattype_packet_in_match(nte, skb, info))
continue;
+ spin_unlock_bh(&nattype_lock);
/* netfilter NATTYPE
* Refresh the timer, if we fail, break
* out and forward fail as though we never
* found the entry.
*/
- if (!nattype_refresh_timer(nte))
+ if (!nattype_refresh_timer((unsigned long)nte,
+ ct->timeout.expires))
break;
/* netfilter NATTYPE
@@ -348,7 +383,6 @@
* entry values should not change so print
* them outside the lock.
*/
- spin_unlock_bh(&nattype_lock);
nattype_nte_debug_print(nte, "refresh");
DEBUGP("FORWARD_IN_ACCEPT\n");
return NF_ACCEPT;
@@ -358,15 +392,9 @@
return XT_CONTINUE;
}
- /* netfilter NATTYPE
- * Egress packet, create a new rule in our list. If conntrack does
- * not have an entry, skip this packet.
- */
- ct = nf_ct_get(skb, &ctinfo);
- if (!ct || (ctinfo == IP_CT_NEW && ctinfo == IP_CT_RELATED))
- return XT_CONTINUE;
+ dir = CTINFO2DIR(ctinfo);
- nat_port = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u.all;
+ nat_port = ct->tuplehash[!dir].tuple.dst.u.all;
/* netfilter NATTYPE
* Allocate a new entry
@@ -382,20 +410,22 @@
nte->proto = iph->protocol;
nte->nat_port = nat_port;
nte->dest_addr = iph->daddr;
- nte->range.min_ip = iph->saddr;
- nte->range.max_ip = nte->range.min_ip;
+ nte->range.min_addr.ip = iph->saddr;
+ nte->range.max_addr.ip = nte->range.min_addr.ip;
/* netfilter NATTYPE
* TOOD: Would it be better to get this information from the
* conntrack instead of the headers.
*/
if (iph->protocol == IPPROTO_TCP) {
- nte->range.min.tcp.port = ((struct tcphdr *)protoh)->source;
- nte->range.max.tcp.port = nte->range.min.tcp.port;
+ nte->range.min_proto.tcp.port =
+ ((struct tcphdr *)protoh)->source;
+ nte->range.max_proto.tcp.port = nte->range.min_proto.tcp.port;
nte->dest_port = ((struct tcphdr *)protoh)->dest;
} else if (iph->protocol == IPPROTO_UDP) {
- nte->range.min.udp.port = ((struct udphdr *)protoh)->source;
- nte->range.max.udp.port = nte->range.min.udp.port;
+ nte->range.min_proto.udp.port =
+ ((struct udphdr *)protoh)->source;
+ nte->range.max_proto.udp.port = nte->range.min_proto.udp.port;
nte->dest_port = ((struct udphdr *)protoh)->dest;
}
nte->range.flags = (NF_NAT_RANGE_MAP_IPS |
@@ -416,15 +446,17 @@
*/
spin_lock_bh(&nattype_lock);
list_for_each_entry(nte2, &nattype_list, list) {
- if (!nattype_compare(nte, nte2))
+ if (!nattype_compare(nte, nte2, info))
continue;
-
+ spin_unlock_bh(&nattype_lock);
/* netfilter NATTYPE
* If we can not refresh this entry, insert our new
* entry as this one is timed out and will be removed
* from the list shortly.
*/
- if (!nattype_refresh_timer(nte2))
+ if (!nattype_refresh_timer(
+ (unsigned long)nte2,
+ jiffies + nte2->timeout_value))
break;
/* netfilter NATTYPE
@@ -433,7 +465,6 @@
*
* Free up the new entry.
*/
- spin_unlock_bh(&nattype_lock);
nattype_nte_debug_print(nte2, "refresh");
nattype_free(nte);
return XT_CONTINUE;
@@ -442,9 +473,12 @@
/* netfilter NATTYPE
* Add the new entry to the list.
*/
- nte->timeout.expires = jiffies + (NATTYPE_TIMEOUT * HZ);
+ nte->timeout_value = ct->timeout.expires;
+ nte->timeout.expires = ct->timeout.expires + jiffies;
add_timer(&nte->timeout);
list_add(&nte->list, &nattype_list);
+ ct->nattype_entry = (unsigned long)nte;
+ nte->nattype_cookie = NATTYPE_COOKIE;
spin_unlock_bh(&nattype_lock);
nattype_nte_debug_print(nte, "ADD");
return XT_CONTINUE;
@@ -534,7 +568,7 @@
types[info->type], modes[info->mode]);
if (par->hook_mask & ~((1 << NF_INET_PRE_ROUTING) |
- (1 << NF_INET_FORWARD))) {
+ (1 << NF_INET_POST_ROUTING))) {
DEBUGP("nattype_check: bad hooks %x.\n", par->hook_mask);
return -EINVAL;
}
@@ -575,12 +609,14 @@
.checkentry = nattype_check,
.targetsize = sizeof(struct ipt_nattype_info),
.hooks = ((1 << NF_INET_PRE_ROUTING) |
- (1 << NF_INET_FORWARD)),
+ (1 << NF_INET_POST_ROUTING)),
.me = THIS_MODULE,
};
static int __init init(void)
{
+ WARN_ON(nattype_refresh_timer);
+ RCU_INIT_POINTER(nattype_refresh_timer, nattype_refresh_timer_impl);
return xt_register_target(&nattype);
}
diff --git a/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c b/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c
index ea91058..1eda519 100644
--- a/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c
+++ b/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c
@@ -68,7 +68,13 @@
newrange.max_proto = range->max_proto;
/* Hand modified range to generic setup. */
+#if defined(CONFIG_IP_NF_TARGET_NATTYPE_MODULE)
+ nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_SRC);
+ return XT_CONTINUE;
+#else
return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_SRC);
+#endif
+
}
EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4);
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 072e80a..5b9cb3c 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -72,6 +72,12 @@
struct hlist_nulls_head *nf_conntrack_hash __read_mostly;
EXPORT_SYMBOL_GPL(nf_conntrack_hash);
+bool (*nattype_refresh_timer)
+ (unsigned long nattype,
+ unsigned long timeout_value)
+ __rcu __read_mostly;
+EXPORT_SYMBOL(nattype_refresh_timer);
+
struct conntrack_gc_work {
struct delayed_work dwork;
u32 last_bucket;
@@ -185,6 +191,7 @@
EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
unsigned int nf_conntrack_max __read_mostly;
+
seqcount_t nf_conntrack_generation __read_mostly;
unsigned int nf_conntrack_pkt_threshold __read_mostly;
@@ -193,7 +200,8 @@
DEFINE_PER_CPU(struct nf_conn, nf_conntrack_untracked);
EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked);
-static unsigned int nf_conntrack_hash_rnd __read_mostly;
+unsigned int nf_conntrack_hash_rnd __read_mostly;
+EXPORT_SYMBOL(nf_conntrack_hash_rnd);
static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple,
const struct net *net)
@@ -396,6 +404,9 @@
struct nf_conn *ct = (struct nf_conn *)nfct;
struct nf_conntrack_l4proto *l4proto;
void (*delete_entry)(struct nf_conn *ct);
+ struct sip_list *sip_node = NULL;
+ struct list_head *sip_node_list;
+ struct list_head *sip_node_save_list;
pr_debug("destroy_conntrack(%pK)\n", ct);
NF_CT_ASSERT(atomic_read(&nfct->use) == 0);
@@ -423,6 +434,14 @@
rcu_read_unlock();
local_bh_disable();
+
+ pr_debug("freeing item in the SIP list\n");
+ list_for_each_safe(sip_node_list, sip_node_save_list,
+ &ct->sip_segment_list) {
+ sip_node = list_entry(sip_node_list, struct sip_list, list);
+ list_del(&sip_node->list);
+ kfree(sip_node);
+ }
/* Expectations will have been removed in clean_from_lists,
* except TFTP can create an expectation on the first packet,
* before connection is in the list, so we need to clean here,
@@ -1094,6 +1113,9 @@
nf_ct_zone_add(ct, zone);
+#if defined(CONFIG_IP_NF_TARGET_NATTYPE_MODULE)
+ ct->nattype_entry = 0;
+#endif
/* Because we use RCU lookups, we set ct_general.use to zero before
* this is inserted in any list.
*/
@@ -1197,6 +1219,7 @@
GFP_ATOMIC);
local_bh_disable();
+ INIT_LIST_HEAD(&ct->sip_segment_list);
if (net->ct.expect_count) {
spin_lock(&nf_conntrack_expect_lock);
exp = nf_ct_find_expectation(net, zone, tuple);
@@ -1220,6 +1243,10 @@
#ifdef CONFIG_NF_CONNTRACK_SECMARK
ct->secmark = exp->master->secmark;
#endif
+/* Initialize the NAT type entry. */
+#if defined(CONFIG_IP_NF_TARGET_NATTYPE_MODULE)
+ ct->nattype_entry = 0;
+#endif
NF_CT_STAT_INC(net, expect_new);
}
spin_unlock(&nf_conntrack_expect_lock);
@@ -1460,6 +1487,11 @@
{
struct nf_conn_acct *acct;
u64 pkts;
+#if defined(CONFIG_IP_NF_TARGET_NATTYPE_MODULE)
+ bool (*nattype_ref_timer)
+ (unsigned long nattype,
+ unsigned long timeout_value);
+#endif
NF_CT_ASSERT(skb);
@@ -1472,6 +1504,13 @@
extra_jiffies += nfct_time_stamp;
ct->timeout = extra_jiffies;
+/* Refresh the NAT type entry. */
+#if defined(CONFIG_IP_NF_TARGET_NATTYPE_MODULE)
+ nattype_ref_timer = rcu_dereference(nattype_refresh_timer);
+ if (nattype_ref_timer)
+ nattype_ref_timer(ct->nattype_entry, ct->timeout.expires);
+#endif
+
acct:
if (do_acct) {
acct = nf_conn_acct_find(ct);
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 6bd58eea..1ce25f5 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1540,12 +1540,23 @@
const struct nlattr * const cda[])
{
u_int32_t timeout = ntohl(nla_get_be32(cda[CTA_TIMEOUT]));
+#if defined(CONFIG_IP_NF_TARGET_NATTYPE_MODULE)
+ bool (*nattype_ref_timer)
+ (unsigned long nattype,
+ unsigned long timeout_value);
+#endif
ct->timeout = nfct_time_stamp + timeout * HZ;
if (test_bit(IPS_DYING_BIT, &ct->status))
return -ETIME;
+/* Refresh the NAT type entry. */
+#if defined(CONFIG_IP_NF_TARGET_NATTYPE_MODULE)
+ nattype_ref_timer = rcu_dereference(nattype_refresh_timer);
+ if (nattype_ref_timer)
+ nattype_ref_timer(ct->nattype_entry, ct->timeout.expires);
+#endif
return 0;
}
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index f132ef9..6d6731f 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -1,5 +1,6 @@
/* SIP extension for IP connection tracking.
*
+ * Copyright (c) 2015,2017, The Linux Foundation. All rights reserved.
* (C) 2005 by Christian Hentschel <chentschel@arnet.com.ar>
* based on RR's ip_conntrack_ftp.c and other modules.
* (C) 2007 United Security Providers
@@ -20,13 +21,18 @@
#include <linux/udp.h>
#include <linux/tcp.h>
#include <linux/netfilter.h>
-
+#include <net/tcp.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_expect.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_zones.h>
#include <linux/netfilter/nf_conntrack_sip.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_nat_l3proto.h>
+#include <net/netfilter/nf_nat_l4proto.h>
+#include <net/netfilter/nf_queue.h>
+
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Christian Hentschel <chentschel@arnet.com.ar>");
@@ -54,6 +60,12 @@
static struct ctl_table_header *sip_sysctl_header;
static unsigned int nf_ct_disable_sip_alg;
static int sip_direct_media = 1;
+static unsigned int nf_ct_enable_sip_segmentation;
+static int packet_count;
+static
+int proc_sip_segment(struct ctl_table *ctl, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos);
+
static struct ctl_table sip_sysctl_tbl[] = {
{
.procname = "nf_conntrack_disable_sip_alg",
@@ -69,9 +81,289 @@
.mode = 0644,
.proc_handler = proc_dointvec,
},
+ {
+ .procname = "nf_conntrack_enable_sip_segmentation",
+ .data = &nf_ct_enable_sip_segmentation,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_sip_segment,
+ },
{}
};
+static unsigned int (*nf_nat_sip_hook)
+ (struct sk_buff *skb,
+ unsigned int protoff,
+ unsigned int dataoff,
+ const char **dptr,
+ unsigned int *datalen)
+ __read_mostly;
+EXPORT_SYMBOL(nf_nat_sip_hook);
+static void sip_calculate_parameters(s16 *diff, s16 *tdiff,
+ unsigned int *dataoff, const char **dptr,
+ unsigned int *datalen,
+ unsigned int msglen, unsigned int origlen)
+{
+ *diff = msglen - origlen;
+ *tdiff += *diff;
+ *dataoff += msglen;
+ *dptr += msglen;
+ *datalen = *datalen + *diff - msglen;
+}
+
+static void sip_update_params(enum ip_conntrack_dir dir,
+ unsigned int *msglen, unsigned int *origlen,
+ const char **dptr, unsigned int *datalen,
+ bool skb_is_combined, struct nf_conn *ct)
+{
+ if (skb_is_combined) {
+ /* The msglen of first skb has the total msg length of
+ * the two fragments. hence after combining,we update
+ * the msglen to that of the msglen of first skb
+ */
+ *msglen = (dir == IP_CT_DIR_ORIGINAL) ?
+ ct->segment.msg_length[0] : ct->segment.msg_length[1];
+ *origlen = *msglen;
+ *dptr = ct->dptr_prev;
+ *datalen = *msglen;
+ }
+}
+
+/* This function is to save all the information of the first segment
+ * that will be needed for combining the two segments
+ */
+static bool sip_save_segment_info(struct nf_conn *ct, struct sk_buff *skb,
+ unsigned int msglen, unsigned int datalen,
+ const char *dptr,
+ enum ip_conntrack_info ctinfo)
+{
+ enum ip_conntrack_dir dir = IP_CT_DIR_MAX;
+ bool skip = false;
+
+ /* one set of information is saved per direction ,also only one segment
+ * per direction is queued based on the assumption that after the first
+ * complete message leaves the kernel, only then the next fragmented
+ * segment will reach the kernel
+ */
+ dir = CTINFO2DIR(ctinfo);
+ if (dir == IP_CT_DIR_ORIGINAL) {
+ /* here we check if there is already an element queued for this
+ * direction, in that case we do not queue the next element,we
+ * make skip 1.ideally this scenario should never be hit
+ */
+ if (ct->sip_original_dir == 1) {
+ skip = true;
+ } else {
+ ct->segment.msg_length[0] = msglen;
+ ct->segment.data_len[0] = datalen;
+ ct->segment.skb_len[0] = skb->len;
+ ct->dptr_prev = dptr;
+ ct->sip_original_dir = 1;
+ skip = false;
+ }
+ } else {
+ if (ct->sip_reply_dir == 1) {
+ skip = true;
+ } else {
+ if (ct->sip_reply_dir == 1) {
+ skip = true;
+ } else {
+ ct->segment.msg_length[1] = msglen;
+ ct->segment.data_len[1] = datalen;
+ ct->segment.skb_len[1] = skb->len;
+ ct->dptr_prev = dptr;
+ ct->sip_reply_dir = 1;
+ skip = false;
+ }
+ }
+ }
+ return skip;
+}
+
+static struct sip_list *sip_coalesce_segments(struct nf_conn *ct,
+ struct sk_buff **skb_ref,
+ unsigned int dataoff,
+ struct sk_buff **combined_skb_ref,
+ bool *skip_sip_process,
+ bool do_not_process,
+ enum ip_conntrack_info ctinfo,
+ bool *success)
+
+{
+ struct list_head *list_trav_node;
+ struct list_head *list_backup_node;
+ struct nf_conn *ct_list;
+ enum ip_conntrack_info ctinfo_list;
+ enum ip_conntrack_dir dir_list;
+ enum ip_conntrack_dir dir = IP_CT_DIR_MAX;
+ const struct tcphdr *th_old;
+ unsigned int prev_data_len;
+ unsigned int seq_no, seq_old, exp_seq_no;
+ const struct tcphdr *th_new;
+ bool fragstolen = false;
+ int delta_truesize = 0;
+ struct sip_list *sip_entry = NULL;
+
+ th_new = (struct tcphdr *)(skb_network_header(*skb_ref) +
+ ip_hdrlen(*skb_ref));
+ seq_no = ntohl(th_new->seq);
+
+ if (ct) {
+ dir = CTINFO2DIR(ctinfo);
+ /* traverse the list it would have 1 or 2 elements. 1 element
+ * per direction at max
+ */
+ list_for_each_safe(list_trav_node, list_backup_node,
+ &ct->sip_segment_list){
+ sip_entry = list_entry(list_trav_node, struct sip_list,
+ list);
+ ct_list = nf_ct_get(sip_entry->entry->skb,
+ &ctinfo_list);
+ dir_list = CTINFO2DIR(ctinfo_list);
+ /* take an element and check if its direction matches
+ * with the current one
+ */
+ if (dir_list == dir) {
+ /* once we have the two elements to be combined
+ * we do another check. match the next expected
+ * seq no of the packet in the list with the
+ * seq no of the current packet.this is to be
+ * protected against out of order fragments
+ */
+ th_old = ((struct tcphdr *)(skb_network_header
+ (sip_entry->entry->skb) +
+ ip_hdrlen(sip_entry->entry->skb)));
+
+ prev_data_len = (dir == IP_CT_DIR_ORIGINAL) ?
+ ct->segment.data_len[0] :
+ ct->segment.data_len[1];
+ seq_old = (ntohl(th_old->seq));
+ exp_seq_no = seq_old + prev_data_len;
+
+ if (exp_seq_no == seq_no) {
+ /* Found packets to be combined.Pull
+ * header from second skb when
+ * preparing combined skb.This shifts
+ * the second skb start pointer to its
+ * data that was initially at the start
+ * of its headers.This so that the
+ * combined skb has the tcp ip headerof
+ * the first skb followed by the data
+ * of first skb followed by the data
+ * of second skb.
+ */
+ skb_pull(*skb_ref, dataoff);
+ if (skb_try_coalesce(
+ sip_entry->entry->skb,
+ *skb_ref, &fragstolen,
+ &delta_truesize)) {
+ pr_debug(" Combining segments\n");
+ *combined_skb_ref =
+ sip_entry->entry->skb;
+ *success = true;
+ list_del(list_trav_node);
+ } else{
+ skb_push(*skb_ref, dataoff);
+ }
+ }
+ } else if (do_not_process) {
+ *skip_sip_process = true;
+ }
+ }
+ }
+ return sip_entry;
+}
+
+static void recalc_header(struct sk_buff *skb, unsigned int skblen,
+ unsigned int oldlen, unsigned int protoff)
+{
+ unsigned int datalen;
+ struct tcphdr *tcph;
+ const struct nf_nat_l3proto *l3proto;
+
+ /* here we recalculate ip and tcp headers */
+ if (nf_ct_l3num((struct nf_conn *)skb->nfct) == NFPROTO_IPV4) {
+ /* fix IP hdr checksum information */
+ ip_hdr(skb)->tot_len = htons(skblen);
+ ip_send_check(ip_hdr(skb));
+ } else {
+ ipv6_hdr(skb)->payload_len =
+ htons(skblen - sizeof(struct ipv6hdr));
+ }
+ datalen = skb->len - protoff;
+ tcph = (struct tcphdr *)((void *)skb->data + protoff);
+ l3proto = __nf_nat_l3proto_find(nf_ct_l3num
+ ((struct nf_conn *)skb->nfct));
+ l3proto->csum_recalc(skb, IPPROTO_TCP, tcph, &tcph->check,
+ datalen, oldlen);
+}
+
+void (*nf_nat_sip_seq_adjust_hook)
+ (struct sk_buff *skb,
+ unsigned int protoff,
+ s16 off);
+
+static unsigned int (*nf_nat_sip_expect_hook)
+ (struct sk_buff *skb,
+ unsigned int protoff,
+ unsigned int dataoff,
+ const char **dptr,
+ unsigned int *datalen,
+ struct nf_conntrack_expect *exp,
+ unsigned int matchoff,
+ unsigned int matchlen)
+ __read_mostly;
+EXPORT_SYMBOL(nf_nat_sip_expect_hook);
+
+static unsigned int (*nf_nat_sdp_addr_hook)
+ (struct sk_buff *skb,
+ unsigned int protoff,
+ unsigned int dataoff,
+ const char **dptr,
+ unsigned int *datalen,
+ unsigned int sdpoff,
+ enum sdp_header_types type,
+ enum sdp_header_types term,
+ const union nf_inet_addr *addr)
+ __read_mostly;
+EXPORT_SYMBOL(nf_nat_sdp_addr_hook);
+
+static unsigned int (*nf_nat_sdp_port_hook)
+ (struct sk_buff *skb,
+ unsigned int protoff,
+ unsigned int dataoff,
+ const char **dptr,
+ unsigned int *datalen,
+ unsigned int matchoff,
+ unsigned int matchlen,
+ u_int16_t port) __read_mostly;
+EXPORT_SYMBOL(nf_nat_sdp_port_hook);
+
+static unsigned int (*nf_nat_sdp_session_hook)
+ (struct sk_buff *skb,
+ unsigned int protoff,
+ unsigned int dataoff,
+ const char **dptr,
+ unsigned int *datalen,
+ unsigned int sdpoff,
+ const union nf_inet_addr *addr)
+ __read_mostly;
+EXPORT_SYMBOL(nf_nat_sdp_session_hook);
+
+static unsigned int (*nf_nat_sdp_media_hook)
+ (struct sk_buff *skb,
+ unsigned int protoff,
+ unsigned int dataoff,
+ const char **dptr,
+ unsigned int *datalen,
+ struct nf_conntrack_expect *rtp_exp,
+ struct nf_conntrack_expect *rtcp_exp,
+ unsigned int mediaoff,
+ unsigned int medialen,
+ union nf_inet_addr *rtp_addr)
+ __read_mostly;
+EXPORT_SYMBOL(nf_nat_sdp_media_hook);
+
static int string_len(const struct nf_conn *ct, const char *dptr,
const char *limit, int *shift)
{
@@ -84,6 +376,43 @@
return len;
}
+static int nf_sip_enqueue_packet(struct nf_queue_entry *entry,
+ unsigned int queuenum)
+{
+ enum ip_conntrack_info ctinfo_list;
+ struct nf_conn *ct_temp;
+ struct sip_list *node = kzalloc(sizeof(*node),
+ GFP_ATOMIC | __GFP_NOWARN);
+ if (!node)
+ return XT_CONTINUE;
+
+ ct_temp = nf_ct_get(entry->skb, &ctinfo_list);
+ node->entry = entry;
+ list_add(&node->list, &ct_temp->sip_segment_list);
+ return 0;
+}
+
+static const struct nf_queue_handler nf_sip_qh = {
+ .outfn = &nf_sip_enqueue_packet,
+};
+
+static
+int proc_sip_segment(struct ctl_table *ctl, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ int ret;
+
+ ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
+ if (nf_ct_enable_sip_segmentation) {
+ pr_debug("registering queue handler\n");
+ nf_register_queue_handler(&init_net, &nf_sip_qh);
+ } else {
+ pr_debug("de-registering queue handler\n");
+ nf_unregister_queue_handler(&init_net);
+ }
+ return ret;
+}
+
static int digits_len(const struct nf_conn *ct, const char *dptr,
const char *limit, int *shift)
{
@@ -1505,13 +1834,29 @@
struct nf_conn *ct, enum ip_conntrack_info ctinfo)
{
struct tcphdr *th, _tcph;
- unsigned int dataoff, datalen;
+ unsigned int dataoff;
unsigned int matchoff, matchlen, clen;
- unsigned int msglen, origlen;
const char *dptr, *end;
s16 diff, tdiff = 0;
int ret = NF_ACCEPT;
bool term;
+ unsigned int datalen = 0, msglen = 0, origlen = 0;
+ unsigned int dataoff_orig = 0;
+ unsigned int splitlen, oldlen, oldlen1;
+ struct sip_list *sip_entry = NULL;
+ bool skip_sip_process = false;
+ bool do_not_process = false;
+ bool skip = false;
+ bool skb_is_combined = false;
+ enum ip_conntrack_dir dir = IP_CT_DIR_MAX;
+ struct sk_buff *combined_skb = NULL;
+ bool content_len_exists = 1;
+
+ packet_count++;
+ pr_debug("packet count %d\n", packet_count);
+
+ if (nf_ct_disable_sip_alg)
+ return NF_ACCEPT;
if (ctinfo != IP_CT_ESTABLISHED &&
ctinfo != IP_CT_ESTABLISHED_REPLY)
@@ -1535,11 +1880,26 @@
if (datalen < strlen("SIP/2.0 200"))
return NF_ACCEPT;
+ /* here we save the original datalength and data offset of the skb, this
+ * is needed later to split combined skbs
+ */
+ oldlen1 = skb->len - protoff;
+ dataoff_orig = dataoff;
+
+ if (!ct)
+ return NF_DROP;
while (1) {
if (ct_sip_get_header(ct, dptr, 0, datalen,
SIP_HDR_CONTENT_LENGTH,
- &matchoff, &matchlen) <= 0)
+ &matchoff, &matchlen) <= 0){
+ if (nf_ct_enable_sip_segmentation) {
+ do_not_process = true;
+ content_len_exists = 0;
+ goto destination;
+ } else {
break;
+ }
+ }
clen = simple_strtoul(dptr + matchoff, (char **)&end, 10);
if (dptr + matchoff == end)
@@ -1555,26 +1915,111 @@
}
if (!term)
break;
+
end += strlen("\r\n\r\n") + clen;
+destination:
- msglen = origlen = end - dptr;
- if (msglen > datalen)
+ if (content_len_exists == 0) {
+ origlen = datalen;
+ msglen = origlen;
+ } else {
+ origlen = end - dptr;
+ msglen = origlen;
+ }
+ pr_debug("mslgen %d datalen %d\n", msglen, datalen);
+ dir = CTINFO2DIR(ctinfo);
+ combined_skb = skb;
+ if (nf_ct_enable_sip_segmentation) {
+ /* Segmented Packet */
+ if (msglen > datalen) {
+ skip = sip_save_segment_info(ct, skb, msglen,
+ datalen, dptr,
+ ctinfo);
+ if (!skip)
+ return NF_QUEUE;
+ }
+ /* Traverse list to find prev segment */
+ /*Traverse the list if list non empty */
+ if (((&ct->sip_segment_list)->next) !=
+ (&ct->sip_segment_list)) {
+ /* Combine segments if they are fragments of
+ * the same message.
+ */
+ sip_entry = sip_coalesce_segments(ct, &skb,
+ dataoff,
+ &combined_skb,
+ &skip_sip_process,
+ do_not_process,
+ ctinfo,
+ &skb_is_combined);
+ sip_update_params(dir, &msglen, &origlen, &dptr,
+ &datalen,
+ skb_is_combined, ct);
+
+ if (skip_sip_process)
+ goto here;
+ } else if (do_not_process) {
+ goto here;
+ }
+ } else if (msglen > datalen) {
return NF_ACCEPT;
-
- ret = process_sip_msg(skb, ct, protoff, dataoff,
+ }
+ /* process the combined skb having the complete SIP message */
+ ret = process_sip_msg(combined_skb, ct, protoff, dataoff,
&dptr, &msglen);
+
/* process_sip_* functions report why this packet is dropped */
if (ret != NF_ACCEPT)
break;
- diff = msglen - origlen;
- tdiff += diff;
-
- dataoff += msglen;
- dptr += msglen;
- datalen = datalen + diff - msglen;
+ sip_calculate_parameters(&diff, &tdiff, &dataoff, &dptr,
+ &datalen, msglen, origlen);
+ if (nf_ct_enable_sip_segmentation && skb_is_combined)
+ break;
+ }
+ if (skb_is_combined) {
+ /* once combined skb is processed, split the skbs again The
+ * length to split at is the same as length of first skb. Any
+ * changes in the combined skb length because of SIP processing
+ * will reflect in the second fragment
+ */
+ splitlen = (dir == IP_CT_DIR_ORIGINAL) ?
+ ct->segment.skb_len[0] : ct->segment.skb_len[1];
+ oldlen = combined_skb->len - protoff;
+ skb_split(combined_skb, skb, splitlen);
+ /* Headers need to be recalculated since during SIP processing
+ * headers are calculated based on the change in length of the
+ * combined message
+ */
+ recalc_header(combined_skb, splitlen, oldlen, protoff);
+ /* Reinject the first skb now that the processing is complete */
+ if (sip_entry) {
+ nf_reinject(sip_entry->entry, NF_ACCEPT);
+ kfree(sip_entry);
+ }
+ skb->len = (oldlen1 + protoff) + tdiff - dataoff_orig;
+ /* After splitting, push the headers back to the first skb which
+ * were removed before combining the skbs.This moves the skb
+ * begin pointer back to the beginning of its headers
+ */
+ skb_push(skb, dataoff_orig);
+ /* Since the length of this second segment willbe affected
+ * because of SIP processing,we need to recalculate its header
+ * as well.
+ */
+ recalc_header(skb, skb->len, oldlen1, protoff);
+ /* Now that the processing is done and the first skb reinjected.
+ * We allow addition of fragmented skbs to the list for this
+ * direction
+ */
+ if (dir == IP_CT_DIR_ORIGINAL)
+ ct->sip_original_dir = 0;
+ else
+ ct->sip_reply_dir = 0;
}
- if (ret == NF_ACCEPT && ct->status & IPS_NAT_MASK) {
+here:
+
+ if (ret == NF_ACCEPT && ct && ct->status & IPS_NAT_MASK) {
const struct nf_nat_sip_hooks *hooks;
hooks = rcu_dereference(nf_nat_sip_hooks);
diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
index 6c1e024..7c33955 100644
--- a/net/netfilter/nft_meta.c
+++ b/net/netfilter/nft_meta.c
@@ -159,8 +159,34 @@
else
*dest = PACKET_BROADCAST;
break;
+ case NFPROTO_NETDEV:
+ switch (skb->protocol) {
+ case htons(ETH_P_IP): {
+ int noff = skb_network_offset(skb);
+ struct iphdr *iph, _iph;
+
+ iph = skb_header_pointer(skb, noff,
+ sizeof(_iph), &_iph);
+ if (!iph)
+ goto err;
+
+ if (ipv4_is_multicast(iph->daddr))
+ *dest = PACKET_MULTICAST;
+ else
+ *dest = PACKET_BROADCAST;
+
+ break;
+ }
+ case htons(ETH_P_IPV6):
+ *dest = PACKET_MULTICAST;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ goto err;
+ }
+ break;
default:
- WARN_ON(1);
+ WARN_ON_ONCE(1);
goto err;
}
break;
diff --git a/net/wireless/db.txt b/net/wireless/db.txt
index f323faf..ff9887f 100644
--- a/net/wireless/db.txt
+++ b/net/wireless/db.txt
@@ -1477,9 +1477,9 @@
country VN: DFS-FCC
(2402 - 2482 @ 40), (20)
- (5170 - 5250 @ 80), (24), AUTO-BW
- (5250 - 5330 @ 80), (24), DFS, AUTO-BW
- (5490 - 5730 @ 160), (24), DFS
+ (5170 - 5250 @ 80), (24)
+ (5250 - 5330 @ 80), (24), DFS
+ (5490 - 5730 @ 80), (24), DFS
(5735 - 5835 @ 80), (30)
# 60 gHz band channels 1-4
(57240 - 65880 @ 2160), (40)
diff --git a/samples/trace_events/trace-events-sample.c b/samples/trace_events/trace-events-sample.c
index 880a7d1..4ccff66 100644
--- a/samples/trace_events/trace-events-sample.c
+++ b/samples/trace_events/trace-events-sample.c
@@ -78,28 +78,36 @@
}
static DEFINE_MUTEX(thread_mutex);
+static int simple_thread_cnt;
void foo_bar_reg(void)
{
+ mutex_lock(&thread_mutex);
+ if (simple_thread_cnt++)
+ goto out;
+
pr_info("Starting thread for foo_bar_fn\n");
/*
* We shouldn't be able to start a trace when the module is
* unloading (there's other locks to prevent that). But
* for consistency sake, we still take the thread_mutex.
*/
- mutex_lock(&thread_mutex);
simple_tsk_fn = kthread_run(simple_thread_fn, NULL, "event-sample-fn");
+ out:
mutex_unlock(&thread_mutex);
}
void foo_bar_unreg(void)
{
- pr_info("Killing thread for foo_bar_fn\n");
- /* protect against module unloading */
mutex_lock(&thread_mutex);
+ if (--simple_thread_cnt)
+ goto out;
+
+ pr_info("Killing thread for foo_bar_fn\n");
if (simple_tsk_fn)
kthread_stop(simple_tsk_fn);
simple_tsk_fn = NULL;
+ out:
mutex_unlock(&thread_mutex);
}
diff --git a/security/Kconfig b/security/Kconfig
index 5693989..4415de2 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -6,6 +6,11 @@
source security/keys/Kconfig
+if ARCH_QCOM
+source security/pfe/Kconfig
+endif
+
+
config SECURITY_DMESG_RESTRICT
bool "Restrict unprivileged access to the kernel syslog"
default n
diff --git a/security/Makefile b/security/Makefile
index f2d71cd..79166ba 100644
--- a/security/Makefile
+++ b/security/Makefile
@@ -9,6 +9,7 @@
subdir-$(CONFIG_SECURITY_APPARMOR) += apparmor
subdir-$(CONFIG_SECURITY_YAMA) += yama
subdir-$(CONFIG_SECURITY_LOADPIN) += loadpin
+subdir-$(CONFIG_ARCH_QCOM) += pfe
# always enable default capabilities
obj-y += commoncap.o
@@ -24,6 +25,7 @@
obj-$(CONFIG_SECURITY_APPARMOR) += apparmor/
obj-$(CONFIG_SECURITY_YAMA) += yama/
obj-$(CONFIG_SECURITY_LOADPIN) += loadpin/
+obj-$(CONFIG_ARCH_QCOM) += pfe/
obj-$(CONFIG_CGROUP_DEVICE) += device_cgroup.o
# Object integrity file lists
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index 57bc405..935752c 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -671,9 +671,9 @@
module_param_call(mode, param_set_mode, param_get_mode,
&aa_g_profile_mode, S_IRUSR | S_IWUSR);
-#ifdef CONFIG_SECURITY_APPARMOR_HASH
/* whether policy verification hashing is enabled */
bool aa_g_hash_policy = IS_ENABLED(CONFIG_SECURITY_APPARMOR_HASH_DEFAULT);
+#ifdef CONFIG_SECURITY_APPARMOR_HASH
module_param_named(hash_policy, aa_g_hash_policy, aabool, S_IRUSR | S_IWUSR);
#endif
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index 32969f6..4e9b4d2 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -452,34 +452,33 @@
char __user *buffer, size_t buflen)
{
struct keyring_read_iterator_context ctx;
- unsigned long nr_keys;
- int ret;
+ long ret;
kenter("{%d},,%zu", key_serial(keyring), buflen);
if (buflen & (sizeof(key_serial_t) - 1))
return -EINVAL;
- nr_keys = keyring->keys.nr_leaves_on_tree;
- if (nr_keys == 0)
- return 0;
-
- /* Calculate how much data we could return */
- if (!buffer || !buflen)
- return nr_keys * sizeof(key_serial_t);
-
- /* Copy the IDs of the subscribed keys into the buffer */
- ctx.buffer = (key_serial_t __user *)buffer;
- ctx.buflen = buflen;
- ctx.count = 0;
- ret = assoc_array_iterate(&keyring->keys, keyring_read_iterator, &ctx);
- if (ret < 0) {
- kleave(" = %d [iterate]", ret);
- return ret;
+ /* Copy as many key IDs as fit into the buffer */
+ if (buffer && buflen) {
+ ctx.buffer = (key_serial_t __user *)buffer;
+ ctx.buflen = buflen;
+ ctx.count = 0;
+ ret = assoc_array_iterate(&keyring->keys,
+ keyring_read_iterator, &ctx);
+ if (ret < 0) {
+ kleave(" = %ld [iterate]", ret);
+ return ret;
+ }
}
- kleave(" = %zu [ok]", ctx.count);
- return ctx.count;
+ /* Return the size of the buffer needed */
+ ret = keyring->keys.nr_leaves_on_tree * sizeof(key_serial_t);
+ if (ret <= buflen)
+ kleave("= %ld [ok]", ret);
+ else
+ kleave("= %ld [buffer too small]", ret);
+ return ret;
}
/*
diff --git a/security/keys/trusted.c b/security/keys/trusted.c
index f4db42e..4ba2f6b 100644
--- a/security/keys/trusted.c
+++ b/security/keys/trusted.c
@@ -70,7 +70,7 @@
}
ret = crypto_shash_digest(&sdesc->shash, data, datalen, digest);
- kfree(sdesc);
+ kzfree(sdesc);
return ret;
}
@@ -114,7 +114,7 @@
if (!ret)
ret = crypto_shash_final(&sdesc->shash, digest);
out:
- kfree(sdesc);
+ kzfree(sdesc);
return ret;
}
@@ -165,7 +165,7 @@
paramdigest, TPM_NONCE_SIZE, h1,
TPM_NONCE_SIZE, h2, 1, &c, 0, 0);
out:
- kfree(sdesc);
+ kzfree(sdesc);
return ret;
}
@@ -246,7 +246,7 @@
if (memcmp(testhmac, authdata, SHA1_DIGEST_SIZE))
ret = -EINVAL;
out:
- kfree(sdesc);
+ kzfree(sdesc);
return ret;
}
@@ -347,7 +347,7 @@
if (memcmp(testhmac2, authdata2, SHA1_DIGEST_SIZE))
ret = -EINVAL;
out:
- kfree(sdesc);
+ kzfree(sdesc);
return ret;
}
@@ -564,7 +564,7 @@
*bloblen = storedsize;
}
out:
- kfree(td);
+ kzfree(td);
return ret;
}
@@ -678,7 +678,7 @@
if (ret < 0)
pr_info("trusted_key: srkseal failed (%d)\n", ret);
- kfree(tb);
+ kzfree(tb);
return ret;
}
@@ -703,7 +703,7 @@
/* pull migratable flag out of sealed key */
p->migratable = p->key[--p->key_len];
- kfree(tb);
+ kzfree(tb);
return ret;
}
@@ -1037,12 +1037,12 @@
if (!ret && options->pcrlock)
ret = pcrlock(options->pcrlock);
out:
- kfree(datablob);
- kfree(options);
+ kzfree(datablob);
+ kzfree(options);
if (!ret)
rcu_assign_keypointer(key, payload);
else
- kfree(payload);
+ kzfree(payload);
return ret;
}
@@ -1051,8 +1051,7 @@
struct trusted_key_payload *p;
p = container_of(rcu, struct trusted_key_payload, rcu);
- memset(p->key, 0, p->key_len);
- kfree(p);
+ kzfree(p);
}
/*
@@ -1094,13 +1093,13 @@
ret = datablob_parse(datablob, new_p, new_o);
if (ret != Opt_update) {
ret = -EINVAL;
- kfree(new_p);
+ kzfree(new_p);
goto out;
}
if (!new_o->keyhandle) {
ret = -EINVAL;
- kfree(new_p);
+ kzfree(new_p);
goto out;
}
@@ -1114,22 +1113,22 @@
ret = key_seal(new_p, new_o);
if (ret < 0) {
pr_info("trusted_key: key_seal failed (%d)\n", ret);
- kfree(new_p);
+ kzfree(new_p);
goto out;
}
if (new_o->pcrlock) {
ret = pcrlock(new_o->pcrlock);
if (ret < 0) {
pr_info("trusted_key: pcrlock failed (%d)\n", ret);
- kfree(new_p);
+ kzfree(new_p);
goto out;
}
}
rcu_assign_keypointer(key, new_p);
call_rcu(&p->rcu, trusted_rcu_free);
out:
- kfree(datablob);
- kfree(new_o);
+ kzfree(datablob);
+ kzfree(new_o);
return ret;
}
@@ -1148,34 +1147,30 @@
p = rcu_dereference_key(key);
if (!p)
return -EINVAL;
- if (!buffer || buflen <= 0)
- return 2 * p->blob_len;
- ascii_buf = kmalloc(2 * p->blob_len, GFP_KERNEL);
- if (!ascii_buf)
- return -ENOMEM;
- bufp = ascii_buf;
- for (i = 0; i < p->blob_len; i++)
- bufp = hex_byte_pack(bufp, p->blob[i]);
- if ((copy_to_user(buffer, ascii_buf, 2 * p->blob_len)) != 0) {
- kfree(ascii_buf);
- return -EFAULT;
+ if (buffer && buflen >= 2 * p->blob_len) {
+ ascii_buf = kmalloc(2 * p->blob_len, GFP_KERNEL);
+ if (!ascii_buf)
+ return -ENOMEM;
+
+ bufp = ascii_buf;
+ for (i = 0; i < p->blob_len; i++)
+ bufp = hex_byte_pack(bufp, p->blob[i]);
+ if (copy_to_user(buffer, ascii_buf, 2 * p->blob_len) != 0) {
+ kzfree(ascii_buf);
+ return -EFAULT;
+ }
+ kzfree(ascii_buf);
}
- kfree(ascii_buf);
return 2 * p->blob_len;
}
/*
- * trusted_destroy - before freeing the key, clear the decrypted data
+ * trusted_destroy - clear and free the key's payload
*/
static void trusted_destroy(struct key *key)
{
- struct trusted_key_payload *p = key->payload.data[0];
-
- if (!p)
- return;
- memset(p->key, 0, p->key_len);
- kfree(key->payload.data[0]);
+ kzfree(key->payload.data[0]);
}
struct key_type key_type_trusted = {
diff --git a/security/pfe/Kconfig b/security/pfe/Kconfig
new file mode 100644
index 0000000..0cd9e81
--- /dev/null
+++ b/security/pfe/Kconfig
@@ -0,0 +1,28 @@
+menu "Qualcomm Technologies, Inc Per File Encryption security device drivers"
+ depends on ARCH_QCOM
+
+config PFT
+ bool "Per-File-Tagger driver"
+ depends on SECURITY
+ default n
+ help
+ This driver is used for tagging enterprise files.
+ It is part of the Per-File-Encryption (PFE) feature.
+ The driver is tagging files when created by
+ registered application.
+ Tagged files are encrypted using the dm-req-crypt driver.
+
+config PFK
+ bool "Per-File-Key driver"
+ depends on SECURITY
+ depends on SECURITY_SELINUX
+ default n
+ help
+ This driver is used for storing eCryptfs information
+ in file node.
+ This is part of eCryptfs hardware enhanced solution
+ provided by Qualcomm Technologies, Inc.
+ Information is used when file is encrypted later using
+ ICE or dm crypto engine
+
+endmenu
diff --git a/security/pfe/Makefile b/security/pfe/Makefile
new file mode 100644
index 0000000..242a216
--- /dev/null
+++ b/security/pfe/Makefile
@@ -0,0 +1,10 @@
+#
+# Makefile for the MSM specific security device drivers.
+#
+
+ccflags-y += -Isecurity/selinux -Isecurity/selinux/include
+ccflags-y += -Ifs/ext4
+ccflags-y += -Ifs/crypto
+
+obj-$(CONFIG_PFT) += pft.o
+obj-$(CONFIG_PFK) += pfk.o pfk_kc.o pfk_ice.o pfk_ext4.o
diff --git a/security/pfe/pfk.c b/security/pfe/pfk.c
new file mode 100644
index 0000000..615353e
--- /dev/null
+++ b/security/pfe/pfk.c
@@ -0,0 +1,483 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Per-File-Key (PFK).
+ *
+ * This driver is responsible for overall management of various
+ * Per File Encryption variants that work on top of or as part of different
+ * file systems.
+ *
+ * The driver has the following purpose :
+ * 1) Define priorities between PFE's if more than one is enabled
+ * 2) Extract key information from inode
+ * 3) Load and manage various keys in ICE HW engine
+ * 4) It should be invoked from various layers in FS/BLOCK/STORAGE DRIVER
+ * that need to take decision on HW encryption management of the data
+ * Some examples:
+ * BLOCK LAYER: when it takes decision on whether 2 chunks can be united
+ * to one encryption / decryption request sent to the HW
+ *
+ * UFS DRIVER: when it need to configure ICE HW with a particular key slot
+ * to be used for encryption / decryption
+ *
+ * PFE variants can differ on particular way of storing the cryptographic info
+ * inside inode, actions to be taken upon file operations, etc., but the common
+ * properties are described above
+ *
+ */
+
+
+/* Uncomment the line below to enable debug messages */
+/* #define DEBUG 1 */
+#define pr_fmt(fmt) "pfk [%s]: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/errno.h>
+#include <linux/printk.h>
+#include <linux/bio.h>
+#include <linux/security.h>
+#include <crypto/ice.h>
+
+#include <linux/pfk.h>
+
+#include "pfk_kc.h"
+#include "objsec.h"
+#include "pfk_ice.h"
+#include "pfk_ext4.h"
+#include "pfk_internal.h"
+#include "ext4.h"
+
+static bool pfk_ready;
+
+
+/* might be replaced by a table when more than one cipher is supported */
+#define PFK_SUPPORTED_KEY_SIZE 32
+#define PFK_SUPPORTED_SALT_SIZE 32
+
+/* Various PFE types and function tables to support each one of them */
+enum pfe_type {EXT4_CRYPT_PFE, INVALID_PFE};
+
+typedef int (*pfk_parse_inode_type)(const struct bio *bio,
+ const struct inode *inode,
+ struct pfk_key_info *key_info,
+ enum ice_cryto_algo_mode *algo,
+ bool *is_pfe);
+
+typedef bool (*pfk_allow_merge_bio_type)(const struct bio *bio1,
+ const struct bio *bio2, const struct inode *inode1,
+ const struct inode *inode2);
+
+static const pfk_parse_inode_type pfk_parse_inode_ftable[] = {
+ /* EXT4_CRYPT_PFE */ &pfk_ext4_parse_inode,
+};
+
+static const pfk_allow_merge_bio_type pfk_allow_merge_bio_ftable[] = {
+ /* EXT4_CRYPT_PFE */ &pfk_ext4_allow_merge_bio,
+};
+
+static void __exit pfk_exit(void)
+{
+ pfk_ready = false;
+ pfk_ext4_deinit();
+ pfk_kc_deinit();
+}
+
+static int __init pfk_init(void)
+{
+
+ int ret = 0;
+
+ ret = pfk_ext4_init();
+ if (ret != 0)
+ goto fail;
+
+ ret = pfk_kc_init();
+ if (ret != 0) {
+ pr_err("could init pfk key cache, error %d\n", ret);
+ pfk_ext4_deinit();
+ goto fail;
+ }
+
+ pfk_ready = true;
+ pr_info("Driver initialized successfully\n");
+
+ return 0;
+
+fail:
+ pr_err("Failed to init driver\n");
+ return -ENODEV;
+}
+
+/*
+ * If more than one type is supported simultaneously, this function will also
+ * set the priority between them
+ */
+static enum pfe_type pfk_get_pfe_type(const struct inode *inode)
+{
+ if (!inode)
+ return INVALID_PFE;
+
+ if (pfk_is_ext4_type(inode))
+ return EXT4_CRYPT_PFE;
+
+ return INVALID_PFE;
+}
+
+/**
+ * inode_to_filename() - get the filename from inode pointer.
+ * @inode: inode pointer
+ *
+ * it is used for debug prints.
+ *
+ * Return: filename string or "unknown".
+ */
+char *inode_to_filename(const struct inode *inode)
+{
+ struct dentry *dentry = NULL;
+ char *filename = NULL;
+
+ if (hlist_empty(&inode->i_dentry))
+ return "unknown";
+
+ dentry = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
+ filename = dentry->d_iname;
+
+ return filename;
+}
+
+/**
+ * pfk_is_ready() - driver is initialized and ready.
+ *
+ * Return: true if the driver is ready.
+ */
+static inline bool pfk_is_ready(void)
+{
+ return pfk_ready;
+}
+
+/**
+ * pfk_bio_get_inode() - get the inode from a bio.
+ * @bio: Pointer to BIO structure.
+ *
+ * Walk the bio struct links to get the inode.
+ * Please note, that in general bio may consist of several pages from
+ * several files, but in our case we always assume that all pages come
+ * from the same file, since our logic ensures it. That is why we only
+ * walk through the first page to look for inode.
+ *
+ * Return: pointer to the inode struct if successful, or NULL otherwise.
+ *
+ */
+static struct inode *pfk_bio_get_inode(const struct bio *bio)
+{
+ struct address_space *mapping;
+
+ if (!bio)
+ return NULL;
+ if (!bio->bi_io_vec)
+ return NULL;
+ if (!bio->bi_io_vec->bv_page)
+ return NULL;
+ if (!bio_has_data((struct bio *)bio))
+ return NULL;
+
+ if (PageAnon(bio->bi_io_vec->bv_page)) {
+ struct inode *inode;
+
+ //Using direct-io (O_DIRECT) without page cache
+ inode = dio_bio_get_inode((struct bio *)bio);
+ pr_debug("inode on direct-io, inode = 0x%pK.\n", inode);
+
+ return inode;
+ }
+
+ mapping = page_mapping(bio->bi_io_vec->bv_page);
+ if (!mapping)
+ return NULL;
+
+ if (!mapping->host)
+ return NULL;
+
+ return bio->bi_io_vec->bv_page->mapping->host;
+}
+
+/**
+ * pfk_key_size_to_key_type() - translate key size to key size enum
+ * @key_size: key size in bytes
+ * @key_size_type: pointer to store the output enum (can be null)
+ *
+ * return 0 in case of success, error otherwise (i.e not supported key size)
+ */
+int pfk_key_size_to_key_type(size_t key_size,
+ enum ice_crpto_key_size *key_size_type)
+{
+ /*
+ * currently only 32 bit key size is supported
+ * in the future, table with supported key sizes might
+ * be introduced
+ */
+
+ if (key_size != PFK_SUPPORTED_KEY_SIZE) {
+ pr_err("not supported key size %zu\n", key_size);
+ return -EINVAL;
+ }
+
+ if (key_size_type)
+ *key_size_type = ICE_CRYPTO_KEY_SIZE_256;
+
+ return 0;
+}
+
+/*
+ * Retrieves filesystem type from inode's superblock
+ */
+bool pfe_is_inode_filesystem_type(const struct inode *inode,
+ const char *fs_type)
+{
+ if (!inode || !fs_type)
+ return false;
+
+ if (!inode->i_sb)
+ return false;
+
+ if (!inode->i_sb->s_type)
+ return false;
+
+ return (strcmp(inode->i_sb->s_type->name, fs_type) == 0);
+}
+
+
+/**
+ * pfk_load_key_start() - loads PFE encryption key to the ICE
+ * Can also be invoked from non
+ * PFE context, in this case it
+ * is not relevant and is_pfe
+ * flag is set to false
+ *
+ * @bio: Pointer to the BIO structure
+ * @ice_setting: Pointer to ice setting structure that will be filled with
+ * ice configuration values, including the index to which the key was loaded
+ * @is_pfe: will be false if inode is not relevant to PFE, in such a case
+ * it should be treated as non PFE by the block layer
+ *
+ * Returns the index where the key is stored in encryption hw and additional
+ * information that will be used later for configuration of the encryption hw.
+ *
+ * Must be followed by pfk_load_key_end when key is no longer used by ice
+ *
+ */
+int pfk_load_key_start(const struct bio *bio,
+ struct ice_crypto_setting *ice_setting, bool *is_pfe,
+ bool async)
+{
+ int ret = 0;
+ struct pfk_key_info key_info = {NULL, NULL, 0, 0};
+ enum ice_cryto_algo_mode algo_mode = ICE_CRYPTO_ALGO_MODE_AES_XTS;
+ enum ice_crpto_key_size key_size_type = 0;
+ u32 key_index = 0;
+ struct inode *inode = NULL;
+ enum pfe_type which_pfe = INVALID_PFE;
+
+ if (!is_pfe) {
+ pr_err("is_pfe is NULL\n");
+ return -EINVAL;
+ }
+
+ /*
+ * only a few errors below can indicate that
+ * this function was not invoked within PFE context,
+ * otherwise we will consider it PFE
+ */
+ *is_pfe = true;
+
+ if (!pfk_is_ready())
+ return -ENODEV;
+
+ if (!ice_setting) {
+ pr_err("ice setting is NULL\n");
+ return -EINVAL;
+ }
+//pr_err("%s %d\n", __func__, __LINE__);
+ inode = pfk_bio_get_inode(bio);
+ if (!inode) {
+ *is_pfe = false;
+ return -EINVAL;
+ }
+ //pr_err("%s %d\n", __func__, __LINE__);
+ which_pfe = pfk_get_pfe_type(inode);
+ if (which_pfe == INVALID_PFE) {
+ *is_pfe = false;
+ return -EPERM;
+ }
+
+ pr_debug("parsing file %s with PFE %d\n",
+ inode_to_filename(inode), which_pfe);
+//pr_err("%s %d\n", __func__, __LINE__);
+ ret = (*(pfk_parse_inode_ftable[which_pfe]))
+ (bio, inode, &key_info, &algo_mode, is_pfe);
+ if (ret != 0)
+ return ret;
+//pr_err("%s %d\n", __func__, __LINE__);
+ ret = pfk_key_size_to_key_type(key_info.key_size, &key_size_type);
+ if (ret != 0)
+ return ret;
+//pr_err("%s %d\n", __func__, __LINE__);
+ ret = pfk_kc_load_key_start(key_info.key, key_info.key_size,
+ key_info.salt, key_info.salt_size, &key_index, async);
+ if (ret) {
+ if (ret != -EBUSY && ret != -EAGAIN)
+ pr_err("start: could not load key into pfk key cache, error %d\n",
+ ret);
+
+ return ret;
+ }
+
+ ice_setting->key_size = key_size_type;
+ ice_setting->algo_mode = algo_mode;
+ /* hardcoded for now */
+ ice_setting->key_mode = ICE_CRYPTO_USE_LUT_SW_KEY;
+ ice_setting->key_index = key_index;
+
+ pr_debug("loaded key for file %s key_index %d\n",
+ inode_to_filename(inode), key_index);
+
+ return 0;
+}
+
+/**
+ * pfk_load_key_end() - marks the PFE key as no longer used by ICE
+ * Can also be invoked from non
+ * PFE context, in this case it is not
+ * relevant and is_pfe flag is
+ * set to false
+ *
+ * @bio: Pointer to the BIO structure
+ * @is_pfe: Pointer to is_pfe flag, which will be true if function was invoked
+ * from PFE context
+ */
+int pfk_load_key_end(const struct bio *bio, bool *is_pfe)
+{
+ int ret = 0;
+ struct pfk_key_info key_info = {0};
+ enum pfe_type which_pfe = INVALID_PFE;
+ struct inode *inode = NULL;
+
+ if (!is_pfe) {
+ pr_err("is_pfe is NULL\n");
+ return -EINVAL;
+ }
+
+ /* only a few errors below can indicate that
+ * this function was not invoked within PFE context,
+ * otherwise we will consider it PFE
+ */
+ *is_pfe = true;
+
+ if (!pfk_is_ready())
+ return -ENODEV;
+
+ inode = pfk_bio_get_inode(bio);
+ if (!inode) {
+ *is_pfe = false;
+ return -EINVAL;
+ }
+
+ which_pfe = pfk_get_pfe_type(inode);
+ if (which_pfe == INVALID_PFE) {
+ *is_pfe = false;
+ return -EPERM;
+ }
+
+ ret = (*(pfk_parse_inode_ftable[which_pfe]))
+ (bio, inode, &key_info, NULL, is_pfe);
+ if (ret != 0)
+ return ret;
+
+ pfk_kc_load_key_end(key_info.key, key_info.key_size,
+ key_info.salt, key_info.salt_size);
+
+ pr_debug("finished using key for file %s\n",
+ inode_to_filename(inode));
+
+ return 0;
+}
+
+/**
+ * pfk_allow_merge_bio() - Check if 2 BIOs can be merged.
+ * @bio1: Pointer to first BIO structure.
+ * @bio2: Pointer to second BIO structure.
+ *
+ * Prevent merging of BIOs from encrypted and non-encrypted
+ * files, or files encrypted with different key.
+ * Also prevent non encrypted and encrypted data from the same file
+ * to be merged (ecryptfs header if stored inside file should be non
+ * encrypted)
+ * This API is called by the file system block layer.
+ *
+ * Return: true if the BIOs allowed to be merged, false
+ * otherwise.
+ */
+bool pfk_allow_merge_bio(const struct bio *bio1, const struct bio *bio2)
+{
+ struct inode *inode1 = NULL;
+ struct inode *inode2 = NULL;
+ enum pfe_type which_pfe1 = INVALID_PFE;
+ enum pfe_type which_pfe2 = INVALID_PFE;
+
+ if (!pfk_is_ready())
+ return false;
+
+ if (!bio1 || !bio2)
+ return false;
+
+ if (bio1 == bio2)
+ return true;
+
+ inode1 = pfk_bio_get_inode(bio1);
+ inode2 = pfk_bio_get_inode(bio2);
+
+
+ which_pfe1 = pfk_get_pfe_type(inode1);
+ which_pfe2 = pfk_get_pfe_type(inode2);
+
+ /* nodes with different encryption, do not merge */
+ if (which_pfe1 != which_pfe2)
+ return false;
+
+ /* both nodes do not have encryption, allow merge */
+ if (which_pfe1 == INVALID_PFE)
+ return true;
+
+ return (*(pfk_allow_merge_bio_ftable[which_pfe1]))(bio1, bio2,
+ inode1, inode2);
+}
+/**
+ * Flush key table on storage core reset. During core reset key configuration
+ * is lost in ICE. We need to flash the cache, so that the keys will be
+ * reconfigured again for every subsequent transaction
+ */
+void pfk_clear_on_reset(void)
+{
+ if (!pfk_is_ready())
+ return;
+
+ pfk_kc_clear_on_reset();
+}
+
+module_init(pfk_init);
+module_exit(pfk_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Per-File-Key driver");
diff --git a/security/pfe/pfk_ext4.c b/security/pfe/pfk_ext4.c
new file mode 100644
index 0000000..7ce70bc
--- /dev/null
+++ b/security/pfe/pfk_ext4.c
@@ -0,0 +1,212 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Per-File-Key (PFK) - EXT4
+ *
+ * This driver is used for working with EXT4 crypt extension
+ *
+ * The key information is stored in node by EXT4 when file is first opened
+ * and will be later accessed by Block Device Driver to actually load the key
+ * to encryption hw.
+ *
+ * PFK exposes API's for loading and removing keys from encryption hw
+ * and also API to determine whether 2 adjacent blocks can be agregated by
+ * Block Layer in one request to encryption hw.
+ *
+ */
+
+
+/* Uncomment the line below to enable debug messages */
+/* #define DEBUG 1 */
+#define pr_fmt(fmt) "pfk_ext4 [%s]: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/errno.h>
+#include <linux/printk.h>
+
+#include "ext4_ice.h"
+#include "pfk_ext4.h"
+
+static bool pfk_ext4_ready;
+
+/*
+ * pfk_ext4_deinit() - Deinit function, should be invoked by upper PFK layer
+ */
+void pfk_ext4_deinit(void)
+{
+ pfk_ext4_ready = false;
+}
+
+/*
+ * pfk_ecryptfs_init() - Init function, should be invoked by upper PFK layer
+ */
+int __init pfk_ext4_init(void)
+{
+ pfk_ext4_ready = true;
+ pr_info("PFK EXT4 inited successfully\n");
+
+ return 0;
+}
+
+/**
+ * pfk_ecryptfs_is_ready() - driver is initialized and ready.
+ *
+ * Return: true if the driver is ready.
+ */
+static inline bool pfk_ext4_is_ready(void)
+{
+ return pfk_ext4_ready;
+}
+
+/**
+ * pfk_ext4_dump_inode() - dumps all interesting info about inode to the screen
+ *
+ *
+ */
+/*
+ * static void pfk_ext4_dump_inode(const struct inode* inode)
+ * {
+ * struct ext4_crypt_info *ci = ext4_encryption_info((struct inode*)inode);
+ *
+ * pr_debug("dumping inode with address 0x%p\n", inode);
+ * pr_debug("S_ISREG is %d\n", S_ISREG(inode->i_mode));
+ * pr_debug("EXT4_INODE_ENCRYPT flag is %d\n",
+ * ext4_test_inode_flag((struct inode*)inode, EXT4_INODE_ENCRYPT));
+ * if (ci) {
+ * pr_debug("crypt_info address 0x%p\n", ci);
+ * pr_debug("ci->ci_data_mode %d\n", ci->ci_data_mode);
+ * } else {
+ * pr_debug("crypt_info is NULL\n");
+ * }
+ * }
+ */
+
+/**
+ * pfk_is_ext4_type() - return true if inode belongs to ICE EXT4 PFE
+ * @inode: inode pointer
+ */
+bool pfk_is_ext4_type(const struct inode *inode)
+{
+ if (!pfe_is_inode_filesystem_type(inode, "ext4"))
+ return false;
+
+ return ext4_should_be_processed_by_ice(inode);
+}
+
+/**
+ * pfk_ext4_parse_cipher() - parse cipher from inode to enum
+ * @inode: inode
+ * @algo: pointer to store the output enum (can be null)
+ *
+ * return 0 in case of success, error otherwise (i.e not supported cipher)
+ */
+static int pfk_ext4_parse_cipher(const struct inode *inode,
+ enum ice_cryto_algo_mode *algo)
+{
+ /*
+ * currently only AES XTS algo is supported
+ * in the future, table with supported ciphers might
+ * be introduced
+ */
+
+ if (!inode)
+ return -EINVAL;
+
+ if (!ext4_is_aes_xts_cipher(inode)) {
+ pr_err("ext4 alghoritm is not supported by pfk\n");
+ return -EINVAL;
+ }
+
+ if (algo)
+ *algo = ICE_CRYPTO_ALGO_MODE_AES_XTS;
+
+ return 0;
+}
+
+
+int pfk_ext4_parse_inode(const struct bio *bio,
+ const struct inode *inode,
+ struct pfk_key_info *key_info,
+ enum ice_cryto_algo_mode *algo,
+ bool *is_pfe)
+{
+ int ret = 0;
+
+ if (!is_pfe)
+ return -EINVAL;
+
+ /*
+ * only a few errors below can indicate that
+ * this function was not invoked within PFE context,
+ * otherwise we will consider it PFE
+ */
+ *is_pfe = true;
+
+ if (!pfk_ext4_is_ready())
+ return -ENODEV;
+
+ if (!inode)
+ return -EINVAL;
+
+ if (!key_info)
+ return -EINVAL;
+
+ key_info->key = ext4_get_ice_encryption_key(inode);
+ if (!key_info->key) {
+ pr_err("could not parse key from ext4\n");
+ return -EINVAL;
+ }
+
+ key_info->key_size = ext4_get_ice_encryption_key_size(inode);
+ if (!key_info->key_size) {
+ pr_err("could not parse key size from ext4\n");
+ return -EINVAL;
+ }
+
+ key_info->salt = ext4_get_ice_encryption_salt(inode);
+ if (!key_info->salt) {
+ pr_err("could not parse salt from ext4\n");
+ return -EINVAL;
+ }
+
+ key_info->salt_size = ext4_get_ice_encryption_salt_size(inode);
+ if (!key_info->salt_size) {
+ pr_err("could not parse salt size from ext4\n");
+ return -EINVAL;
+ }
+
+ ret = pfk_ext4_parse_cipher(inode, algo);
+ if (ret != 0) {
+ pr_err("not supported cipher\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+bool pfk_ext4_allow_merge_bio(const struct bio *bio1,
+ const struct bio *bio2, const struct inode *inode1,
+ const struct inode *inode2)
+{
+ /* if there is no ext4 pfk, don't disallow merging blocks */
+ if (!pfk_ext4_is_ready())
+ return true;
+
+ if (!inode1 || !inode2)
+ return false;
+
+ return ext4_is_ice_encryption_info_equal(inode1, inode2);
+}
+
diff --git a/security/pfe/pfk_ext4.h b/security/pfe/pfk_ext4.h
new file mode 100644
index 0000000..1f33632
--- /dev/null
+++ b/security/pfe/pfk_ext4.h
@@ -0,0 +1,37 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _PFK_EXT4_H_
+#define _PFK_EXT4_H_
+
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <crypto/ice.h>
+#include "pfk_internal.h"
+
+bool pfk_is_ext4_type(const struct inode *inode);
+
+int pfk_ext4_parse_inode(const struct bio *bio,
+ const struct inode *inode,
+ struct pfk_key_info *key_info,
+ enum ice_cryto_algo_mode *algo,
+ bool *is_pfe);
+
+bool pfk_ext4_allow_merge_bio(const struct bio *bio1,
+ const struct bio *bio2, const struct inode *inode1,
+ const struct inode *inode2);
+
+int __init pfk_ext4_init(void);
+
+void pfk_ext4_deinit(void);
+
+#endif /* _PFK_EXT4_H_ */
diff --git a/security/pfe/pfk_ice.c b/security/pfe/pfk_ice.c
new file mode 100644
index 0000000..f0bbf9c
--- /dev/null
+++ b/security/pfe/pfk_ice.c
@@ -0,0 +1,188 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/async.h>
+#include <linux/mm.h>
+#include <linux/of.h>
+#include <soc/qcom/scm.h>
+#include <linux/device-mapper.h>
+#include <soc/qcom/qseecomi.h>
+#include <crypto/ice.h>
+#include "pfk_ice.h"
+
+
+/**********************************/
+/** global definitions **/
+/**********************************/
+
+#define TZ_ES_SET_ICE_KEY 0x2
+#define TZ_ES_INVALIDATE_ICE_KEY 0x3
+
+/* index 0 and 1 is reserved for FDE */
+#define MIN_ICE_KEY_INDEX 2
+
+#define MAX_ICE_KEY_INDEX 31
+
+
+#define TZ_ES_SET_ICE_KEY_ID \
+ TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, TZ_SVC_ES, TZ_ES_SET_ICE_KEY)
+
+
+#define TZ_ES_INVALIDATE_ICE_KEY_ID \
+ TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, \
+ TZ_SVC_ES, TZ_ES_INVALIDATE_ICE_KEY)
+
+
+#define TZ_ES_SET_ICE_KEY_PARAM_ID \
+ TZ_SYSCALL_CREATE_PARAM_ID_5( \
+ TZ_SYSCALL_PARAM_TYPE_VAL, \
+ TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL, \
+ TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_ES_INVALIDATE_ICE_KEY_PARAM_ID \
+ TZ_SYSCALL_CREATE_PARAM_ID_1( \
+ TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define ICE_KEY_SIZE 32
+#define ICE_SALT_SIZE 32
+
+static uint8_t ice_key[ICE_KEY_SIZE];
+static uint8_t ice_salt[ICE_KEY_SIZE];
+
+int qti_pfk_ice_set_key(uint32_t index, uint8_t *key, uint8_t *salt,
+ char *storage_type)
+{
+ struct scm_desc desc = {0};
+ int ret, ret1;
+ char *tzbuf_key = (char *)ice_key;
+ char *tzbuf_salt = (char *)ice_salt;
+ char *s_type = storage_type;
+
+ uint32_t smc_id = 0;
+ u32 tzbuflen_key = sizeof(ice_key);
+ u32 tzbuflen_salt = sizeof(ice_salt);
+
+ if (index < MIN_ICE_KEY_INDEX || index > MAX_ICE_KEY_INDEX) {
+ pr_err("%s Invalid index %d\n", __func__, index);
+ return -EINVAL;
+ }
+ if (!key || !salt) {
+ pr_err("%s Invalid key/salt\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!tzbuf_key || !tzbuf_salt) {
+ pr_err("%s No Memory\n", __func__);
+ return -ENOMEM;
+ }
+
+ if (s_type == NULL) {
+ pr_err("%s Invalid Storage type\n", __func__);
+ return -EINVAL;
+ }
+
+ memset(tzbuf_key, 0, tzbuflen_key);
+ memset(tzbuf_salt, 0, tzbuflen_salt);
+
+ memcpy(ice_key, key, tzbuflen_key);
+ memcpy(ice_salt, salt, tzbuflen_salt);
+
+ dmac_flush_range(tzbuf_key, tzbuf_key + tzbuflen_key);
+ dmac_flush_range(tzbuf_salt, tzbuf_salt + tzbuflen_salt);
+
+ smc_id = TZ_ES_SET_ICE_KEY_ID;
+
+ desc.arginfo = TZ_ES_SET_ICE_KEY_PARAM_ID;
+ desc.args[0] = index;
+ desc.args[1] = virt_to_phys(tzbuf_key);
+ desc.args[2] = tzbuflen_key;
+ desc.args[3] = virt_to_phys(tzbuf_salt);
+ desc.args[4] = tzbuflen_salt;
+
+ ret = qcom_ice_setup_ice_hw((const char *)s_type, true);
+
+ if (ret) {
+ pr_err("%s: could not enable clocks: %d\n", __func__, ret);
+ goto out;
+ }
+
+ ret = scm_call2(smc_id, &desc);
+
+ if (ret) {
+ pr_err("%s: Set Key Error: %d\n", __func__, ret);
+ if (ret == -EBUSY) {
+ if (qcom_ice_setup_ice_hw((const char *)s_type, false))
+ pr_err("%s: clock disable failed\n", __func__);
+ goto out;
+ }
+ /*Try to invalidate the key to keep ICE in proper state*/
+ smc_id = TZ_ES_INVALIDATE_ICE_KEY_ID;
+ desc.arginfo = TZ_ES_INVALIDATE_ICE_KEY_PARAM_ID;
+ desc.args[0] = index;
+ ret1 = scm_call2(smc_id, &desc);
+ if (ret1)
+ pr_err("%s: Invalidate Key Error: %d\n", __func__,
+ ret1);
+ }
+ ret = qcom_ice_setup_ice_hw((const char *)s_type, false);
+
+out:
+ return ret;
+}
+
+int qti_pfk_ice_invalidate_key(uint32_t index, char *storage_type)
+{
+ struct scm_desc desc = {0};
+ int ret;
+
+ uint32_t smc_id = 0;
+
+ if (index < MIN_ICE_KEY_INDEX || index > MAX_ICE_KEY_INDEX) {
+ pr_err("%s Invalid index %d\n", __func__, index);
+ return -EINVAL;
+ }
+
+ if (storage_type == NULL) {
+ pr_err("%s Invalid Storage type\n", __func__);
+ return -EINVAL;
+ }
+
+ smc_id = TZ_ES_INVALIDATE_ICE_KEY_ID;
+
+ desc.arginfo = TZ_ES_INVALIDATE_ICE_KEY_PARAM_ID;
+ desc.args[0] = index;
+
+ ret = qcom_ice_setup_ice_hw((const char *)storage_type, true);
+
+ if (ret) {
+ pr_err("%s: could not enable clocks: 0x%x\n", __func__, ret);
+ return ret;
+ }
+
+ ret = scm_call2(smc_id, &desc);
+
+ if (ret) {
+ pr_err("%s: Error: 0x%x\n", __func__, ret);
+ if (qcom_ice_setup_ice_hw((const char *)storage_type, false))
+ pr_err("%s: could not disable clocks\n", __func__);
+ } else {
+ ret = qcom_ice_setup_ice_hw((const char *)storage_type, false);
+ }
+
+ return ret;
+}
diff --git a/security/pfe/pfk_ice.h b/security/pfe/pfk_ice.h
new file mode 100644
index 0000000..fb7c0d1
--- /dev/null
+++ b/security/pfe/pfk_ice.h
@@ -0,0 +1,33 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef PFK_ICE_H_
+#define PFK_ICE_H_
+
+/*
+ * PFK ICE
+ *
+ * ICE keys configuration through scm calls.
+ *
+ */
+
+#include <linux/types.h>
+
+int pfk_ice_init(void);
+int pfk_ice_deinit(void);
+
+int qti_pfk_ice_set_key(uint32_t index, uint8_t *key, uint8_t *salt,
+ char *storage_type);
+int qti_pfk_ice_invalidate_key(uint32_t index, char *storage_type);
+
+
+#endif /* PFK_ICE_H_ */
diff --git a/security/pfe/pfk_internal.h b/security/pfe/pfk_internal.h
new file mode 100644
index 0000000..86526fa
--- /dev/null
+++ b/security/pfe/pfk_internal.h
@@ -0,0 +1,34 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _PFK_INTERNAL_H_
+#define _PFK_INTERNAL_H_
+
+#include <linux/types.h>
+#include <crypto/ice.h>
+
+struct pfk_key_info {
+ const unsigned char *key;
+ const unsigned char *salt;
+ size_t key_size;
+ size_t salt_size;
+};
+
+int pfk_key_size_to_key_type(size_t key_size,
+ enum ice_crpto_key_size *key_size_type);
+
+bool pfe_is_inode_filesystem_type(const struct inode *inode,
+ const char *fs_type);
+
+char *inode_to_filename(const struct inode *inode);
+
+#endif /* _PFK_INTERNAL_H_ */
diff --git a/security/pfe/pfk_kc.c b/security/pfe/pfk_kc.c
new file mode 100644
index 0000000..da71f80
--- /dev/null
+++ b/security/pfe/pfk_kc.c
@@ -0,0 +1,905 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * PFK Key Cache
+ *
+ * Key Cache used internally in PFK.
+ * The purpose of the cache is to save access time to QSEE when loading keys.
+ * Currently the cache is the same size as the total number of keys that can
+ * be loaded to ICE. Since this number is relatively small, the algorithms for
+ * cache eviction are simple, linear and based on last usage timestamp, i.e
+ * the node that will be evicted is the one with the oldest timestamp.
+ * Empty entries always have the oldest timestamp.
+ */
+
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <crypto/ice.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/jiffies.h>
+#include <linux/slab.h>
+#include <linux/printk.h>
+#include <linux/sched.h>
+
+#include "pfk_kc.h"
+#include "pfk_ice.h"
+
+
+/** the first available index in ice engine */
+#define PFK_KC_STARTING_INDEX 2
+
+/** currently the only supported key and salt sizes */
+#define PFK_KC_KEY_SIZE 32
+#define PFK_KC_SALT_SIZE 32
+
+/** Table size */
+/* TODO replace by some constant from ice.h */
+#define PFK_KC_TABLE_SIZE ((32) - (PFK_KC_STARTING_INDEX))
+
+/** The maximum key and salt size */
+#define PFK_MAX_KEY_SIZE PFK_KC_KEY_SIZE
+#define PFK_MAX_SALT_SIZE PFK_KC_SALT_SIZE
+#define PFK_UFS "ufs"
+
+static DEFINE_SPINLOCK(kc_lock);
+static unsigned long flags;
+static bool kc_ready;
+static char *s_type = "sdcc";
+
+/**
+ * enum pfk_kc_entry_state - state of the entry inside kc table
+ *
+ * @FREE: entry is free
+ * @ACTIVE_ICE_PRELOAD: entry is actively used by ICE engine
+ and cannot be used by others. SCM call
+ to load key to ICE is pending to be performed
+ * @ACTIVE_ICE_LOADED: entry is actively used by ICE engine and
+ cannot be used by others. SCM call to load the
+ key to ICE was successfully executed and key is
+ now loaded
+ * @INACTIVE_INVALIDATING: entry is being invalidated during file close
+ and cannot be used by others until invalidation
+ is complete
+ * @INACTIVE: entry's key is already loaded, but is not
+ currently being used. It can be re-used for
+ optimization and to avoid SCM call cost or
+ it can be taken by another key if there are
+ no FREE entries
+ * @SCM_ERROR: error occurred while scm call was performed to
+ load the key to ICE
+ */
+enum pfk_kc_entry_state {
+ FREE,
+ ACTIVE_ICE_PRELOAD,
+ ACTIVE_ICE_LOADED,
+ INACTIVE_INVALIDATING,
+ INACTIVE,
+ SCM_ERROR
+};
+
+struct kc_entry {
+ unsigned char key[PFK_MAX_KEY_SIZE];
+ size_t key_size;
+
+ unsigned char salt[PFK_MAX_SALT_SIZE];
+ size_t salt_size;
+
+ u64 time_stamp;
+ u32 key_index;
+
+ struct task_struct *thread_pending;
+
+ enum pfk_kc_entry_state state;
+
+ /* ref count for the number of requests in the HW queue for this key */
+ int loaded_ref_cnt;
+ int scm_error;
+};
+
+static struct kc_entry kc_table[PFK_KC_TABLE_SIZE];
+
+/**
+ * kc_is_ready() - driver is initialized and ready.
+ *
+ * Return: true if the key cache is ready.
+ */
+static inline bool kc_is_ready(void)
+{
+ return kc_ready;
+}
+
+static inline void kc_spin_lock(void)
+{
+ spin_lock_irqsave(&kc_lock, flags);
+}
+
+static inline void kc_spin_unlock(void)
+{
+ spin_unlock_irqrestore(&kc_lock, flags);
+}
+
+/**
+ * kc_entry_is_available() - checks whether the entry is available
+ *
+ * Return true if it is , false otherwise or if invalid
+ * Should be invoked under spinlock
+ */
+static bool kc_entry_is_available(const struct kc_entry *entry)
+{
+ if (!entry)
+ return false;
+
+ return (entry->state == FREE || entry->state == INACTIVE);
+}
+
+/**
+ * kc_entry_wait_till_available() - waits till entry is available
+ *
+ * Returns 0 in case of success or -ERESTARTSYS if the wait was interrupted
+ * by signal
+ *
+ * Should be invoked under spinlock
+ */
+static int kc_entry_wait_till_available(struct kc_entry *entry)
+{
+ int res = 0;
+
+ while (!kc_entry_is_available(entry)) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (signal_pending(current)) {
+ res = -ERESTARTSYS;
+ break;
+ }
+ /* assuming only one thread can try to invalidate
+ * the same entry
+ */
+ entry->thread_pending = current;
+ kc_spin_unlock();
+ schedule();
+ kc_spin_lock();
+ }
+ set_current_state(TASK_RUNNING);
+
+ return res;
+}
+
+/**
+ * kc_entry_start_invalidating() - moves entry to state
+ * INACTIVE_INVALIDATING
+ * If entry is in use, waits till
+ * it gets available
+ * @entry: pointer to entry
+ *
+ * Return 0 in case of success, otherwise error
+ * Should be invoked under spinlock
+ */
+static int kc_entry_start_invalidating(struct kc_entry *entry)
+{
+ int res;
+
+ res = kc_entry_wait_till_available(entry);
+ if (res)
+ return res;
+
+ entry->state = INACTIVE_INVALIDATING;
+
+ return 0;
+}
+
+/**
+ * kc_entry_finish_invalidating() - moves entry to state FREE
+ * wakes up all the tasks waiting
+ * on it
+ *
+ * @entry: pointer to entry
+ *
+ * Return 0 in case of success, otherwise error
+ * Should be invoked under spinlock
+ */
+static void kc_entry_finish_invalidating(struct kc_entry *entry)
+{
+ if (!entry)
+ return;
+
+ if (entry->state != INACTIVE_INVALIDATING)
+ return;
+
+ entry->state = FREE;
+}
+
+/**
+ * kc_min_entry() - compare two entries to find one with minimal time
+ * @a: ptr to the first entry. If NULL the other entry will be returned
+ * @b: pointer to the second entry
+ *
+ * Return the entry which timestamp is the minimal, or b if a is NULL
+ */
+static inline struct kc_entry *kc_min_entry(struct kc_entry *a,
+ struct kc_entry *b)
+{
+ if (!a)
+ return b;
+
+ if (time_before64(b->time_stamp, a->time_stamp))
+ return b;
+
+ return a;
+}
+
+/**
+ * kc_entry_at_index() - return entry at specific index
+ * @index: index of entry to be accessed
+ *
+ * Return entry
+ * Should be invoked under spinlock
+ */
+static struct kc_entry *kc_entry_at_index(int index)
+{
+ return &(kc_table[index]);
+}
+
+/**
+ * kc_find_key_at_index() - find kc entry starting at specific index
+ * @key: key to look for
+ * @key_size: the key size
+ * @salt: salt to look for
+ * @salt_size: the salt size
+ * @sarting_index: index to start search with, if entry found, updated with
+ * index of that entry
+ *
+ * Return entry or NULL in case of error
+ * Should be invoked under spinlock
+ */
+static struct kc_entry *kc_find_key_at_index(const unsigned char *key,
+ size_t key_size, const unsigned char *salt, size_t salt_size,
+ int *starting_index)
+{
+ struct kc_entry *entry = NULL;
+ int i = 0;
+
+ for (i = *starting_index; i < PFK_KC_TABLE_SIZE; i++) {
+ entry = kc_entry_at_index(i);
+
+ if (salt != NULL) {
+ if (entry->salt_size != salt_size)
+ continue;
+
+ if (memcmp(entry->salt, salt, salt_size) != 0)
+ continue;
+ }
+
+ if (entry->key_size != key_size)
+ continue;
+
+ if (memcmp(entry->key, key, key_size) == 0) {
+ *starting_index = i;
+ return entry;
+ }
+ }
+
+ return NULL;
+}
+
+/**
+ * kc_find_key() - find kc entry
+ * @key: key to look for
+ * @key_size: the key size
+ * @salt: salt to look for
+ * @salt_size: the salt size
+ *
+ * Return entry or NULL in case of error
+ * Should be invoked under spinlock
+ */
+static struct kc_entry *kc_find_key(const unsigned char *key, size_t key_size,
+ const unsigned char *salt, size_t salt_size)
+{
+ int index = 0;
+
+ return kc_find_key_at_index(key, key_size, salt, salt_size, &index);
+}
+
+/**
+ * kc_find_oldest_entry_non_locked() - finds the entry with minimal timestamp
+ * that is not locked
+ *
+ * Returns entry with minimal timestamp. Empty entries have timestamp
+ * of 0, therefore they are returned first.
+ * If all the entries are locked, will return NULL
+ * Should be invoked under spin lock
+ */
+static struct kc_entry *kc_find_oldest_entry_non_locked(void)
+{
+ struct kc_entry *curr_min_entry = NULL;
+ struct kc_entry *entry = NULL;
+ int i = 0;
+
+ for (i = 0; i < PFK_KC_TABLE_SIZE; i++) {
+ entry = kc_entry_at_index(i);
+
+ if (entry->state == FREE)
+ return entry;
+
+ if (entry->state == INACTIVE)
+ curr_min_entry = kc_min_entry(curr_min_entry, entry);
+ }
+
+ return curr_min_entry;
+}
+
+/**
+ * kc_update_timestamp() - updates timestamp of entry to current
+ *
+ * @entry: entry to update
+ *
+ */
+static void kc_update_timestamp(struct kc_entry *entry)
+{
+ if (!entry)
+ return;
+
+ entry->time_stamp = get_jiffies_64();
+}
+
+/**
+ * kc_clear_entry() - clear the key from entry and mark entry not in use
+ *
+ * @entry: pointer to entry
+ *
+ * Should be invoked under spinlock
+ */
+static void kc_clear_entry(struct kc_entry *entry)
+{
+ if (!entry)
+ return;
+
+ memset(entry->key, 0, entry->key_size);
+ memset(entry->salt, 0, entry->salt_size);
+
+ entry->key_size = 0;
+ entry->salt_size = 0;
+
+ entry->time_stamp = 0;
+ entry->scm_error = 0;
+
+ entry->state = FREE;
+
+ entry->loaded_ref_cnt = 0;
+ entry->thread_pending = NULL;
+}
+
+/**
+ * kc_update_entry() - replaces the key in given entry and
+ * loads the new key to ICE
+ *
+ * @entry: entry to replace key in
+ * @key: key
+ * @key_size: key_size
+ * @salt: salt
+ * @salt_size: salt_size
+ *
+ * The previous key is securely released and wiped, the new one is loaded
+ * to ICE.
+ * Should be invoked under spinlock
+ */
+static int kc_update_entry(struct kc_entry *entry, const unsigned char *key,
+ size_t key_size, const unsigned char *salt, size_t salt_size)
+{
+ int ret;
+
+ kc_clear_entry(entry);
+
+ memcpy(entry->key, key, key_size);
+ entry->key_size = key_size;
+
+ memcpy(entry->salt, salt, salt_size);
+ entry->salt_size = salt_size;
+
+ /* Mark entry as no longer free before releasing the lock */
+ entry->state = ACTIVE_ICE_PRELOAD;
+ kc_spin_unlock();
+
+ ret = qti_pfk_ice_set_key(entry->key_index, entry->key,
+ entry->salt, s_type);
+
+ kc_spin_lock();
+ return ret;
+}
+
+/**
+ * pfk_kc_init() - init function
+ *
+ * Return 0 in case of success, error otherwise
+ */
+int pfk_kc_init(void)
+{
+ int i = 0;
+ struct kc_entry *entry = NULL;
+
+ kc_spin_lock();
+ for (i = 0; i < PFK_KC_TABLE_SIZE; i++) {
+ entry = kc_entry_at_index(i);
+ entry->key_index = PFK_KC_STARTING_INDEX + i;
+ }
+ kc_ready = true;
+ kc_spin_unlock();
+ return 0;
+}
+
+/**
+ * pfk_kc_denit() - deinit function
+ *
+ * Return 0 in case of success, error otherwise
+ */
+int pfk_kc_deinit(void)
+{
+ int res = pfk_kc_clear();
+
+ kc_ready = false;
+ return res;
+}
+
+/**
+ * pfk_kc_load_key_start() - retrieve the key from cache or add it if
+ * it's not there and return the ICE hw key index in @key_index.
+ * @key: pointer to the key
+ * @key_size: the size of the key
+ * @salt: pointer to the salt
+ * @salt_size: the size of the salt
+ * @key_index: the pointer to key_index where the output will be stored
+ * @async: whether scm calls are allowed in the caller context
+ *
+ * If key is present in cache, than the key_index will be retrieved from cache.
+ * If it is not present, the oldest entry from kc table will be evicted,
+ * the key will be loaded to ICE via QSEE to the index that is the evicted
+ * entry number and stored in cache.
+ * Entry that is going to be used is marked as being used, it will mark
+ * as not being used when ICE finishes using it and pfk_kc_load_key_end
+ * will be invoked.
+ * As QSEE calls can only be done from a non-atomic context, when @async flag
+ * is set to 'false', it specifies that it is ok to make the calls in the
+ * current context. Otherwise, when @async is set, the caller should retry the
+ * call again from a different context, and -EAGAIN error will be returned.
+ *
+ * Return 0 in case of success, error otherwise
+ */
+int pfk_kc_load_key_start(const unsigned char *key, size_t key_size,
+ const unsigned char *salt, size_t salt_size, u32 *key_index,
+ bool async)
+{
+ int ret = 0;
+ struct kc_entry *entry = NULL;
+ bool entry_exists = false;
+
+ if (!kc_is_ready())
+ return -ENODEV;
+
+ if (!key || !salt || !key_index) {
+ pr_err("%s key/salt/key_index NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ if (key_size != PFK_KC_KEY_SIZE) {
+ pr_err("unsupported key size %zu\n", key_size);
+ return -EINVAL;
+ }
+
+ if (salt_size != PFK_KC_SALT_SIZE) {
+ pr_err("unsupported salt size %zu\n", salt_size);
+ return -EINVAL;
+ }
+
+ kc_spin_lock();
+
+ entry = kc_find_key(key, key_size, salt, salt_size);
+ if (!entry) {
+ if (async) {
+ pr_debug("%s task will populate entry\n", __func__);
+ kc_spin_unlock();
+ return -EAGAIN;
+ }
+
+ entry = kc_find_oldest_entry_non_locked();
+ if (!entry) {
+ /* could not find a single non locked entry,
+ * return EBUSY to upper layers so that the
+ * request will be rescheduled
+ */
+ kc_spin_unlock();
+ return -EBUSY;
+ }
+ } else {
+ entry_exists = true;
+ }
+
+ pr_debug("entry with index %d is in state %d\n",
+ entry->key_index, entry->state);
+
+ switch (entry->state) {
+ case (INACTIVE):
+ if (entry_exists) {
+ kc_update_timestamp(entry);
+ entry->state = ACTIVE_ICE_LOADED;
+
+ if (!strcmp(s_type, (char *)PFK_UFS)) {
+ if (async)
+ entry->loaded_ref_cnt++;
+ } else {
+ entry->loaded_ref_cnt++;
+ }
+ break;
+ }
+ case (FREE):
+ ret = kc_update_entry(entry, key, key_size, salt, salt_size);
+ if (ret) {
+ entry->state = SCM_ERROR;
+ entry->scm_error = ret;
+ pr_err("%s: key load error (%d)\n", __func__, ret);
+ } else {
+ kc_update_timestamp(entry);
+ entry->state = ACTIVE_ICE_LOADED;
+
+ /*
+ * In case of UFS only increase ref cnt for async calls,
+ * sync calls from within work thread do not pass
+ * requests further to HW
+ */
+ if (!strcmp(s_type, (char *)PFK_UFS)) {
+ if (async)
+ entry->loaded_ref_cnt++;
+ } else {
+ entry->loaded_ref_cnt++;
+ }
+ }
+ break;
+ case (ACTIVE_ICE_PRELOAD):
+ case (INACTIVE_INVALIDATING):
+ ret = -EAGAIN;
+ break;
+ case (ACTIVE_ICE_LOADED):
+ kc_update_timestamp(entry);
+
+ if (!strcmp(s_type, (char *)PFK_UFS)) {
+ if (async)
+ entry->loaded_ref_cnt++;
+ } else {
+ entry->loaded_ref_cnt++;
+ }
+ break;
+ case(SCM_ERROR):
+ ret = entry->scm_error;
+ kc_clear_entry(entry);
+ entry->state = FREE;
+ break;
+ default:
+ pr_err("invalid state %d for entry with key index %d\n",
+ entry->state, entry->key_index);
+ ret = -EINVAL;
+ }
+
+ *key_index = entry->key_index;
+ kc_spin_unlock();
+
+ return ret;
+}
+
+/**
+ * pfk_kc_load_key_end() - finish the process of key loading that was started
+ * by pfk_kc_load_key_start
+ * by marking the entry as not
+ * being in use
+ * @key: pointer to the key
+ * @key_size: the size of the key
+ * @salt: pointer to the salt
+ * @salt_size: the size of the salt
+ *
+ */
+void pfk_kc_load_key_end(const unsigned char *key, size_t key_size,
+ const unsigned char *salt, size_t salt_size)
+{
+ struct kc_entry *entry = NULL;
+ struct task_struct *tmp_pending = NULL;
+ int ref_cnt = 0;
+
+ if (!kc_is_ready())
+ return;
+
+ if (!key || !salt)
+ return;
+
+ if (key_size != PFK_KC_KEY_SIZE)
+ return;
+
+ if (salt_size != PFK_KC_SALT_SIZE)
+ return;
+
+ kc_spin_lock();
+
+ entry = kc_find_key(key, key_size, salt, salt_size);
+ if (!entry) {
+ kc_spin_unlock();
+ pr_err("internal error, there should an entry to unlock\n");
+
+ return;
+ }
+ ref_cnt = --entry->loaded_ref_cnt;
+
+ if (ref_cnt < 0)
+ pr_err("internal error, ref count should never be negative\n");
+
+ if (!ref_cnt) {
+ entry->state = INACTIVE;
+ /*
+ * wake-up invalidation if it's waiting
+ * for the entry to be released
+ */
+ if (entry->thread_pending) {
+ tmp_pending = entry->thread_pending;
+ entry->thread_pending = NULL;
+
+ kc_spin_unlock();
+ wake_up_process(tmp_pending);
+ return;
+ }
+ }
+
+ kc_spin_unlock();
+}
+
+/**
+ * pfk_kc_remove_key() - remove the key from cache and from ICE engine
+ * @key: pointer to the key
+ * @key_size: the size of the key
+ * @salt: pointer to the key
+ * @salt_size: the size of the key
+ *
+ * Return 0 in case of success, error otherwise (also in case of non
+ * (existing key)
+ */
+int pfk_kc_remove_key_with_salt(const unsigned char *key, size_t key_size,
+ const unsigned char *salt, size_t salt_size)
+{
+ struct kc_entry *entry = NULL;
+ int res = 0;
+
+ if (!kc_is_ready())
+ return -ENODEV;
+
+ if (!key)
+ return -EINVAL;
+
+ if (!salt)
+ return -EINVAL;
+
+ if (key_size != PFK_KC_KEY_SIZE)
+ return -EINVAL;
+
+ if (salt_size != PFK_KC_SALT_SIZE)
+ return -EINVAL;
+
+ kc_spin_lock();
+
+ entry = kc_find_key(key, key_size, salt, salt_size);
+ if (!entry) {
+ pr_debug("%s: key does not exist\n", __func__);
+ kc_spin_unlock();
+ return -EINVAL;
+ }
+
+ res = kc_entry_start_invalidating(entry);
+ if (res != 0) {
+ kc_spin_unlock();
+ return res;
+ }
+ kc_clear_entry(entry);
+
+ kc_spin_unlock();
+
+ qti_pfk_ice_invalidate_key(entry->key_index, s_type);
+
+ kc_spin_lock();
+ kc_entry_finish_invalidating(entry);
+ kc_spin_unlock();
+
+ return 0;
+}
+
+/**
+ * pfk_kc_remove_key() - remove the key from cache and from ICE engine
+ * when no salt is available. Will only search key part, if there are several,
+ * all will be removed
+ *
+ * @key: pointer to the key
+ * @key_size: the size of the key
+ *
+ * Return 0 in case of success, error otherwise (also for non-existing key)
+ */
+int pfk_kc_remove_key(const unsigned char *key, size_t key_size)
+{
+ struct kc_entry *entry = NULL;
+ int index = 0;
+ int temp_indexes[PFK_KC_TABLE_SIZE] = {0};
+ int temp_indexes_size = 0;
+ int i = 0;
+ int res = 0;
+
+ if (!kc_is_ready())
+ return -ENODEV;
+
+ if (!key)
+ return -EINVAL;
+
+ if (key_size != PFK_KC_KEY_SIZE)
+ return -EINVAL;
+
+ memset(temp_indexes, -1, sizeof(temp_indexes));
+
+ kc_spin_lock();
+
+ entry = kc_find_key_at_index(key, key_size, NULL, 0, &index);
+ if (!entry) {
+ pr_err("%s: key does not exist\n", __func__);
+ kc_spin_unlock();
+ return -EINVAL;
+ }
+
+ res = kc_entry_start_invalidating(entry);
+ if (res != 0) {
+ kc_spin_unlock();
+ return res;
+ }
+
+ temp_indexes[temp_indexes_size++] = index;
+ kc_clear_entry(entry);
+
+ /* let's clean additional entries with the same key if there are any */
+ do {
+ index++;
+ entry = kc_find_key_at_index(key, key_size, NULL, 0, &index);
+ if (!entry)
+ break;
+
+ res = kc_entry_start_invalidating(entry);
+ if (res != 0) {
+ kc_spin_unlock();
+ goto out;
+ }
+
+ temp_indexes[temp_indexes_size++] = index;
+
+ kc_clear_entry(entry);
+
+
+ } while (true);
+
+ kc_spin_unlock();
+
+ temp_indexes_size--;
+ for (i = temp_indexes_size; i >= 0 ; i--)
+ qti_pfk_ice_invalidate_key(
+ kc_entry_at_index(temp_indexes[i])->key_index,
+ s_type);
+
+ /* fall through */
+ res = 0;
+
+out:
+ kc_spin_lock();
+ for (i = temp_indexes_size; i >= 0 ; i--)
+ kc_entry_finish_invalidating(
+ kc_entry_at_index(temp_indexes[i]));
+ kc_spin_unlock();
+
+ return res;
+}
+
+/**
+ * pfk_kc_clear() - clear the table and remove all keys from ICE
+ *
+ * Return 0 on success, error otherwise
+ *
+ */
+int pfk_kc_clear(void)
+{
+ struct kc_entry *entry = NULL;
+ int i = 0;
+ int res = 0;
+
+ if (!kc_is_ready())
+ return -ENODEV;
+
+ kc_spin_lock();
+ for (i = 0; i < PFK_KC_TABLE_SIZE; i++) {
+ entry = kc_entry_at_index(i);
+ res = kc_entry_start_invalidating(entry);
+ if (res != 0) {
+ kc_spin_unlock();
+ goto out;
+ }
+ kc_clear_entry(entry);
+ }
+ kc_spin_unlock();
+
+ for (i = 0; i < PFK_KC_TABLE_SIZE; i++)
+ qti_pfk_ice_invalidate_key(kc_entry_at_index(i)->key_index,
+ s_type);
+
+ /* fall through */
+ res = 0;
+out:
+ kc_spin_lock();
+ for (i = 0; i < PFK_KC_TABLE_SIZE; i++)
+ kc_entry_finish_invalidating(kc_entry_at_index(i));
+ kc_spin_unlock();
+
+ return res;
+}
+
+/**
+ * pfk_kc_clear_on_reset() - clear the table and remove all keys from ICE
+ * The assumption is that at this point we don't have any pending transactions
+ * Also, there is no need to clear keys from ICE
+ *
+ * Return 0 on success, error otherwise
+ *
+ */
+void pfk_kc_clear_on_reset(void)
+{
+ struct kc_entry *entry = NULL;
+ int i = 0;
+
+ if (!kc_is_ready())
+ return;
+
+ kc_spin_lock();
+ for (i = 0; i < PFK_KC_TABLE_SIZE; i++) {
+ entry = kc_entry_at_index(i);
+ kc_clear_entry(entry);
+ }
+ kc_spin_unlock();
+}
+
+static int pfk_kc_find_storage_type(char **device)
+{
+ char boot[20] = {'\0'};
+ char *match = (char *)strnstr(saved_command_line,
+ "androidboot.bootdevice=",
+ strlen(saved_command_line));
+ if (match) {
+ memcpy(boot, (match + strlen("androidboot.bootdevice=")),
+ sizeof(boot) - 1);
+ if (strnstr(boot, PFK_UFS, strlen(boot)))
+ *device = PFK_UFS;
+
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int __init pfk_kc_pre_init(void)
+{
+ return pfk_kc_find_storage_type(&s_type);
+}
+
+static void __exit pfk_kc_exit(void)
+{
+ s_type = NULL;
+}
+
+module_init(pfk_kc_pre_init);
+module_exit(pfk_kc_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Per-File-Key-KC driver");
diff --git a/security/pfe/pfk_kc.h b/security/pfe/pfk_kc.h
new file mode 100644
index 0000000..dc4ad15
--- /dev/null
+++ b/security/pfe/pfk_kc.h
@@ -0,0 +1,33 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef PFK_KC_H_
+#define PFK_KC_H_
+
+#include <linux/types.h>
+
+int pfk_kc_init(void);
+int pfk_kc_deinit(void);
+int pfk_kc_load_key_start(const unsigned char *key, size_t key_size,
+ const unsigned char *salt, size_t salt_size, u32 *key_index,
+ bool async);
+void pfk_kc_load_key_end(const unsigned char *key, size_t key_size,
+ const unsigned char *salt, size_t salt_size);
+int pfk_kc_remove_key_with_salt(const unsigned char *key, size_t key_size,
+ const unsigned char *salt, size_t salt_size);
+int pfk_kc_remove_key(const unsigned char *key, size_t key_size);
+int pfk_kc_clear(void);
+void pfk_kc_clear_on_reset(void);
+extern char *saved_command_line;
+
+
+#endif /* PFK_KC_H_ */
diff --git a/security/security.c b/security/security.c
index 6a7b359..e1f9e32 100644
--- a/security/security.c
+++ b/security/security.c
@@ -524,6 +524,14 @@
}
EXPORT_SYMBOL_GPL(security_inode_create);
+int security_inode_post_create(struct inode *dir, struct dentry *dentry,
+ umode_t mode)
+{
+ if (unlikely(IS_PRIVATE(dir)))
+ return 0;
+ return call_int_hook(inode_post_create, 0, dir, dentry, mode);
+}
+
int security_inode_link(struct dentry *old_dentry, struct inode *dir,
struct dentry *new_dentry)
{
@@ -1668,6 +1676,8 @@
.inode_init_security =
LIST_HEAD_INIT(security_hook_heads.inode_init_security),
.inode_create = LIST_HEAD_INIT(security_hook_heads.inode_create),
+ .inode_post_create =
+ LIST_HEAD_INIT(security_hook_heads.inode_post_create),
.inode_link = LIST_HEAD_INIT(security_hook_heads.inode_link),
.inode_unlink = LIST_HEAD_INIT(security_hook_heads.inode_unlink),
.inode_symlink =
diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h
index c21e135..13011038 100644
--- a/security/selinux/include/objsec.h
+++ b/security/selinux/include/objsec.h
@@ -25,8 +25,9 @@
#include <linux/in.h>
#include <linux/spinlock.h>
#include <net/net_namespace.h>
-#include "flask.h"
-#include "avc.h"
+//#include "flask.h"
+//#include "avc.h"
+#include "security.h"
struct task_security_struct {
u32 osid; /* SID prior to last execve */
@@ -52,6 +53,8 @@
u32 sid; /* SID of this object */
u16 sclass; /* security class of this object */
unsigned char initialized; /* initialization flag */
+ u32 tag; /* Per-File-Encryption tag */
+ void *pfk_data; /* Per-File-Key data from ecryptfs */
struct mutex lock;
};
diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h
index 308a286..b8e98c1 100644
--- a/security/selinux/include/security.h
+++ b/security/selinux/include/security.h
@@ -12,7 +12,6 @@
#include <linux/dcache.h>
#include <linux/magic.h>
#include <linux/types.h>
-#include "flask.h"
#define SECSID_NULL 0x00000000 /* unspecified SID */
#define SECSID_WILD 0xffffffff /* wildcard SID */
diff --git a/sound/core/seq/oss/seq_oss_midi.c b/sound/core/seq/oss/seq_oss_midi.c
index aaff9ee..b30b213 100644
--- a/sound/core/seq/oss/seq_oss_midi.c
+++ b/sound/core/seq/oss/seq_oss_midi.c
@@ -612,9 +612,7 @@
if (!dp->timer->running)
len = snd_seq_oss_timer_start(dp->timer);
if (ev->type == SNDRV_SEQ_EVENT_SYSEX) {
- if ((ev->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) == SNDRV_SEQ_EVENT_LENGTH_VARIABLE)
- snd_seq_oss_readq_puts(dp->readq, mdev->seq_device,
- ev->data.ext.ptr, ev->data.ext.len);
+ snd_seq_oss_readq_sysex(dp->readq, mdev->seq_device, ev);
} else {
len = snd_midi_event_decode(mdev->coder, msg, sizeof(msg), ev);
if (len > 0)
diff --git a/sound/core/seq/oss/seq_oss_readq.c b/sound/core/seq/oss/seq_oss_readq.c
index 046cb586..06b2122 100644
--- a/sound/core/seq/oss/seq_oss_readq.c
+++ b/sound/core/seq/oss/seq_oss_readq.c
@@ -118,6 +118,35 @@
}
/*
+ * put MIDI sysex bytes; the event buffer may be chained, thus it has
+ * to be expanded via snd_seq_dump_var_event().
+ */
+struct readq_sysex_ctx {
+ struct seq_oss_readq *readq;
+ int dev;
+};
+
+static int readq_dump_sysex(void *ptr, void *buf, int count)
+{
+ struct readq_sysex_ctx *ctx = ptr;
+
+ return snd_seq_oss_readq_puts(ctx->readq, ctx->dev, buf, count);
+}
+
+int snd_seq_oss_readq_sysex(struct seq_oss_readq *q, int dev,
+ struct snd_seq_event *ev)
+{
+ struct readq_sysex_ctx ctx = {
+ .readq = q,
+ .dev = dev
+ };
+
+ if ((ev->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARIABLE)
+ return 0;
+ return snd_seq_dump_var_event(ev, readq_dump_sysex, &ctx);
+}
+
+/*
* copy an event to input queue:
* return zero if enqueued
*/
diff --git a/sound/core/seq/oss/seq_oss_readq.h b/sound/core/seq/oss/seq_oss_readq.h
index f1463f1..8d033ca 100644
--- a/sound/core/seq/oss/seq_oss_readq.h
+++ b/sound/core/seq/oss/seq_oss_readq.h
@@ -44,6 +44,8 @@
void snd_seq_oss_readq_clear(struct seq_oss_readq *readq);
unsigned int snd_seq_oss_readq_poll(struct seq_oss_readq *readq, struct file *file, poll_table *wait);
int snd_seq_oss_readq_puts(struct seq_oss_readq *readq, int dev, unsigned char *data, int len);
+int snd_seq_oss_readq_sysex(struct seq_oss_readq *q, int dev,
+ struct snd_seq_event *ev);
int snd_seq_oss_readq_put_event(struct seq_oss_readq *readq, union evrec *ev);
int snd_seq_oss_readq_put_timestamp(struct seq_oss_readq *readq, unsigned long curt, int seq_mode);
int snd_seq_oss_readq_pick(struct seq_oss_readq *q, union evrec *rec);
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
index c411483..45ef591 100644
--- a/sound/core/seq/seq_clientmgr.c
+++ b/sound/core/seq/seq_clientmgr.c
@@ -663,7 +663,7 @@
if (atomic)
read_lock(&grp->list_lock);
else
- down_read(&grp->list_mutex);
+ down_read_nested(&grp->list_mutex, hop);
list_for_each_entry(subs, &grp->list_head, src_list) {
/* both ports ready? */
if (atomic_read(&subs->ref_count) != 2)
diff --git a/sound/core/timer_compat.c b/sound/core/timer_compat.c
index 6a437eb..59127b6 100644
--- a/sound/core/timer_compat.c
+++ b/sound/core/timer_compat.c
@@ -133,7 +133,8 @@
#endif /* CONFIG_X86_X32 */
};
-static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg)
+static long __snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd,
+ unsigned long arg)
{
void __user *argp = compat_ptr(arg);
@@ -153,7 +154,7 @@
case SNDRV_TIMER_IOCTL_PAUSE:
case SNDRV_TIMER_IOCTL_PAUSE_OLD:
case SNDRV_TIMER_IOCTL_NEXT_DEVICE:
- return snd_timer_user_ioctl(file, cmd, (unsigned long)argp);
+ return __snd_timer_user_ioctl(file, cmd, (unsigned long)argp);
case SNDRV_TIMER_IOCTL_GPARAMS32:
return snd_timer_user_gparams_compat(file, argp);
case SNDRV_TIMER_IOCTL_INFO32:
@@ -167,3 +168,15 @@
}
return -ENOIOCTLCMD;
}
+
+static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct snd_timer_user *tu = file->private_data;
+ long ret;
+
+ mutex_lock(&tu->ioctl_lock);
+ ret = __snd_timer_user_ioctl_compat(file, cmd, arg);
+ mutex_unlock(&tu->ioctl_lock);
+ return ret;
+}
diff --git a/sound/soc/codecs/adau17x1.c b/sound/soc/codecs/adau17x1.c
index 439aa3f..79dcb1e 100644
--- a/sound/soc/codecs/adau17x1.c
+++ b/sound/soc/codecs/adau17x1.c
@@ -91,6 +91,27 @@
return 0;
}
+static int adau17x1_adc_fixup(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct adau *adau = snd_soc_codec_get_drvdata(codec);
+
+ /*
+ * If we are capturing, toggle the ADOSR bit in Converter Control 0 to
+ * avoid losing SNR (workaround from ADI). This must be done after
+ * the ADC(s) have been enabled. According to the data sheet, it is
+ * normally illegal to set this bit when the sampling rate is 96 kHz,
+ * but according to ADI it is acceptable for this workaround.
+ */
+ regmap_update_bits(adau->regmap, ADAU17X1_CONVERTER0,
+ ADAU17X1_CONVERTER0_ADOSR, ADAU17X1_CONVERTER0_ADOSR);
+ regmap_update_bits(adau->regmap, ADAU17X1_CONVERTER0,
+ ADAU17X1_CONVERTER0_ADOSR, 0);
+
+ return 0;
+}
+
static const char * const adau17x1_mono_stereo_text[] = {
"Stereo",
"Mono Left Channel (L+R)",
@@ -122,7 +143,8 @@
SND_SOC_DAPM_MUX("Right DAC Mode Mux", SND_SOC_NOPM, 0, 0,
&adau17x1_dac_mode_mux),
- SND_SOC_DAPM_ADC("Left Decimator", NULL, ADAU17X1_ADC_CONTROL, 0, 0),
+ SND_SOC_DAPM_ADC_E("Left Decimator", NULL, ADAU17X1_ADC_CONTROL, 0, 0,
+ adau17x1_adc_fixup, SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_ADC("Right Decimator", NULL, ADAU17X1_ADC_CONTROL, 1, 0),
SND_SOC_DAPM_DAC("Left DAC", NULL, ADAU17X1_DAC_CONTROL0, 0, 0),
SND_SOC_DAPM_DAC("Right DAC", NULL, ADAU17X1_DAC_CONTROL0, 1, 0),
diff --git a/sound/soc/codecs/adau17x1.h b/sound/soc/codecs/adau17x1.h
index bf04b7ef..db35003 100644
--- a/sound/soc/codecs/adau17x1.h
+++ b/sound/soc/codecs/adau17x1.h
@@ -129,5 +129,7 @@
#define ADAU17X1_CONVERTER0_CONVSR_MASK 0x7
+#define ADAU17X1_CONVERTER0_ADOSR BIT(3)
+
#endif
diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
index bd19fad..c17f262 100644
--- a/sound/soc/intel/boards/bytcr_rt5640.c
+++ b/sound/soc/intel/boards/bytcr_rt5640.c
@@ -807,7 +807,6 @@
static struct platform_driver snd_byt_rt5640_mc_driver = {
.driver = {
.name = "bytcr_rt5640",
- .pm = &snd_soc_pm_ops,
},
.probe = snd_byt_rt5640_mc_probe,
};
diff --git a/sound/soc/intel/boards/bytcr_rt5651.c b/sound/soc/intel/boards/bytcr_rt5651.c
index eabff3a..ae49f81 100644
--- a/sound/soc/intel/boards/bytcr_rt5651.c
+++ b/sound/soc/intel/boards/bytcr_rt5651.c
@@ -317,7 +317,6 @@
static struct platform_driver snd_byt_rt5651_mc_driver = {
.driver = {
.name = "bytcr_rt5651",
- .pm = &snd_soc_pm_ops,
},
.probe = snd_byt_rt5651_mc_probe,
};
diff --git a/sound/soc/sunxi/sun4i-spdif.c b/sound/soc/sunxi/sun4i-spdif.c
index 88fbb3a..048de15 100644
--- a/sound/soc/sunxi/sun4i-spdif.c
+++ b/sound/soc/sunxi/sun4i-spdif.c
@@ -403,14 +403,6 @@
.name = "spdif",
};
-static const struct snd_soc_dapm_widget dit_widgets[] = {
- SND_SOC_DAPM_OUTPUT("spdif-out"),
-};
-
-static const struct snd_soc_dapm_route dit_routes[] = {
- { "spdif-out", NULL, "Playback" },
-};
-
static const struct of_device_id sun4i_spdif_of_match[] = {
{ .compatible = "allwinner,sun4i-a10-spdif", },
{ .compatible = "allwinner,sun6i-a31-spdif", },
diff --git a/sound/usb/usb_audio_qmi_svc.c b/sound/usb/usb_audio_qmi_svc.c
index 0aeabfe..e2cebf15 100644
--- a/sound/usb/usb_audio_qmi_svc.c
+++ b/sound/usb/usb_audio_qmi_svc.c
@@ -68,6 +68,8 @@
unsigned long xfer_buf_va;
size_t xfer_buf_size;
phys_addr_t xfer_buf_pa;
+ unsigned int data_ep_pipe;
+ unsigned int sync_ep_pipe;
u8 *xfer_buf;
u8 intf_num;
u8 pcm_card_num;
@@ -415,6 +417,7 @@
int protocol, card_num, pcm_dev_num;
void *hdr_ptr;
u8 *xfer_buf;
+ unsigned int data_ep_pipe = 0, sync_ep_pipe = 0;
u32 len, mult, remainder, xfer_buf_len, sg_len, i, total_len = 0;
unsigned long va, va_sg, tr_data_va = 0, tr_sync_va = 0;
phys_addr_t xhci_pa, xfer_buf_pa, tr_data_pa = 0, tr_sync_pa = 0;
@@ -531,6 +534,7 @@
subs->data_endpoint->ep_num);
goto err;
}
+ data_ep_pipe = subs->data_endpoint->pipe;
memcpy(&resp->std_as_data_ep_desc, &ep->desc, sizeof(ep->desc));
resp->std_as_data_ep_desc_valid = 1;
@@ -548,6 +552,7 @@
pr_debug("%s: implicit fb on data ep\n", __func__);
goto skip_sync_ep;
}
+ sync_ep_pipe = subs->sync_endpoint->pipe;
memcpy(&resp->std_as_sync_ep_desc, &ep->desc, sizeof(ep->desc));
resp->std_as_sync_ep_desc_valid = 1;
@@ -704,6 +709,8 @@
uadev[card_num].info[info_idx].xfer_buf_va = va;
uadev[card_num].info[info_idx].xfer_buf_pa = xfer_buf_pa;
uadev[card_num].info[info_idx].xfer_buf_size = len;
+ uadev[card_num].info[info_idx].data_ep_pipe = data_ep_pipe;
+ uadev[card_num].info[info_idx].sync_ep_pipe = sync_ep_pipe;
uadev[card_num].info[info_idx].xfer_buf = xfer_buf;
uadev[card_num].info[info_idx].pcm_card_num = card_num;
uadev[card_num].info[info_idx].pcm_dev_num = pcm_dev_num;
@@ -732,6 +739,26 @@
static void uaudio_dev_intf_cleanup(struct usb_device *udev,
struct intf_info *info)
{
+
+ struct usb_host_endpoint *ep;
+
+ if (info->data_ep_pipe) {
+ ep = usb_pipe_endpoint(udev, info->data_ep_pipe);
+ if (!ep)
+ pr_debug("%s: no data ep\n", __func__);
+ else
+ usb_stop_endpoint(udev, ep);
+ info->data_ep_pipe = 0;
+ }
+ if (info->sync_ep_pipe) {
+ ep = usb_pipe_endpoint(udev, info->sync_ep_pipe);
+ if (!ep)
+ pr_debug("%s: no sync ep\n", __func__);
+ else
+ usb_stop_endpoint(udev, ep);
+ info->sync_ep_pipe = 0;
+ }
+
uaudio_iommu_unmap(MEM_XFER_RING, info->data_xfer_ring_va,
info->data_xfer_ring_size);
info->data_xfer_ring_va = 0;
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 4e778ea..415a9c3 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -309,10 +309,11 @@
event_attr_init(attr);
- evsel = perf_evsel__new_idx(attr, (*idx)++);
+ evsel = perf_evsel__new_idx(attr, *idx);
if (!evsel)
return NULL;
+ (*idx)++;
evsel->cpus = cpu_map__get(cpus);
evsel->own_cpus = cpu_map__get(cpus);
diff --git a/tools/testing/selftests/firmware/fw_filesystem.sh b/tools/testing/selftests/firmware/fw_filesystem.sh
index 5c495ad..d8ac9ba 100755
--- a/tools/testing/selftests/firmware/fw_filesystem.sh
+++ b/tools/testing/selftests/firmware/fw_filesystem.sh
@@ -48,18 +48,18 @@
NAME=$(basename "$FW")
-if printf '\000' >"$DIR"/trigger_request; then
+if printf '\000' >"$DIR"/trigger_request 2> /dev/null; then
echo "$0: empty filename should not succeed" >&2
exit 1
fi
-if printf '\000' >"$DIR"/trigger_async_request; then
+if printf '\000' >"$DIR"/trigger_async_request 2> /dev/null; then
echo "$0: empty filename should not succeed (async)" >&2
exit 1
fi
# Request a firmware that doesn't exist, it should fail.
-if echo -n "nope-$NAME" >"$DIR"/trigger_request; then
+if echo -n "nope-$NAME" >"$DIR"/trigger_request 2> /dev/null; then
echo "$0: firmware shouldn't have loaded" >&2
exit 1
fi
diff --git a/tools/testing/selftests/firmware/fw_userhelper.sh b/tools/testing/selftests/firmware/fw_userhelper.sh
index b9983f8..01c626a 100755
--- a/tools/testing/selftests/firmware/fw_userhelper.sh
+++ b/tools/testing/selftests/firmware/fw_userhelper.sh
@@ -64,9 +64,33 @@
echo "ABCD0123" >"$FW"
NAME=$(basename "$FW")
+DEVPATH="$DIR"/"nope-$NAME"/loading
+
# Test failure when doing nothing (timeout works).
-echo 1 >/sys/class/firmware/timeout
-echo -n "$NAME" >"$DIR"/trigger_request
+echo -n 2 >/sys/class/firmware/timeout
+echo -n "nope-$NAME" >"$DIR"/trigger_request 2>/dev/null &
+
+# Give the kernel some time to load the loading file, must be less
+# than the timeout above.
+sleep 1
+if [ ! -f $DEVPATH ]; then
+ echo "$0: fallback mechanism immediately cancelled"
+ echo ""
+ echo "The file never appeared: $DEVPATH"
+ echo ""
+ echo "This might be a distribution udev rule setup by your distribution"
+ echo "to immediately cancel all fallback requests, this must be"
+ echo "removed before running these tests. To confirm look for"
+ echo "a firmware rule like /lib/udev/rules.d/50-firmware.rules"
+ echo "and see if you have something like this:"
+ echo ""
+ echo "SUBSYSTEM==\"firmware\", ACTION==\"add\", ATTR{loading}=\"-1\""
+ echo ""
+ echo "If you do remove this file or comment out this line before"
+ echo "proceeding with these tests."
+ exit 1
+fi
+
if diff -q "$FW" /dev/test_firmware >/dev/null ; then
echo "$0: firmware was not expected to match" >&2
exit 1