Merge "drm/msm/sde: query display driver for display topology" into msm-4.9
diff --git a/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt b/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt
index ffba081..b043a93 100644
--- a/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt
+++ b/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt
@@ -193,6 +193,8 @@
"dsi_cmd_mode" = enable command mode.
- qcom,5v-boost-gpio: Specifies the panel gpio for display 5v boost.
- qcom,mdss-dsi-te-check-enable: Boolean to enable Tear Check configuration.
+- qcom,mdss-dsi-te-using-wd: Boolean entry enables the watchdog timer support to generate the vsync signal
+ for command mode panel. By default, panel TE will be used to generate the vsync.
- qcom,mdss-dsi-te-using-te-pin: Boolean to specify whether using hardware vsync.
- qcom,mdss-dsi-te-pin-select: Specifies TE operating mode.
0 = TE through embedded dcs command
@@ -568,6 +570,7 @@
qcom,mdss-dsi-interleave-mode = <0>;
qcom,mdss-dsi-panel-type = "dsi_video_mode";
qcom,mdss-dsi-te-check-enable;
+ qcom,mdss-dsi-te-using-wd;
qcom,mdss-dsi-te-using-te-pin;
qcom,mdss-dsi-te-dcs-command = <1>;
qcom,mdss-dsi-wr-mem-continue = <0x3c>;
diff --git a/Documentation/devicetree/bindings/regulator/cprh-kbss-regulator.txt b/Documentation/devicetree/bindings/regulator/cprh-kbss-regulator.txt
index 8efa85d..0c6a9f2 100644
--- a/Documentation/devicetree/bindings/regulator/cprh-kbss-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/cprh-kbss-regulator.txt
@@ -213,6 +213,18 @@
target quotient adjustment due to an ACD up recommendation.
Valid values are 0 through 3.
+- qcom,cpr-acd-notwait-for-cl-settled
+ Usage: optional; meaningful only if qcom,cpr-acd-avg-enable is specified.
+ Value type: <empty>
+ Definition: Boolean flag which indicates ACD down recommendations do not
+ need to wait for CPR closed-loop to settle.
+
+- qcom,cpr-acd-avg-fast-update
+ Usage: optional; meaningful only if qcom,cpr-acd-avg-enable is specified.
+ Value type: <empty>
+ Definition: Boolean flag which indicates CPR should issue immediate
+ voltage updates following ACD requests.
+
- qcom,cpr-acd-avg-enable
Usage: optional
Value type: <empty>
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-rumi.dts b/arch/arm/boot/dts/qcom/sdxpoorwills-rumi.dts
index 78a26c2..fc4ff37 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-rumi.dts
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-rumi.dts
@@ -28,3 +28,11 @@
pinctrl-0 = <&uart2_console_active>;
status = "ok";
};
+
+&gdsc_usb30 {
+ compatible = "regulator-fixed";
+};
+
+&gdsc_pcie {
+ compatible = "regulator-fixed";
+};
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
index 0078617..ca6922d 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
@@ -12,6 +12,8 @@
#include "skeleton.dtsi"
+
+#include <dt-bindings/clock/qcom,rpmh.h>
#include <dt-bindings/clock/qcom,gcc-sdxpoorwills.h>
/ {
@@ -148,6 +150,12 @@
#clock-cells = <1>;
};
+ clock_rpmh: qcom,rpmhclk {
+ compatible = "qcom,dummycc";
+ clock-output-names = "rpmh_clocks";
+ #clock-cells = <1>;
+ };
+
blsp1_uart2: serial@831000 {
compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
reg = <0x831000 0x200>;
@@ -157,4 +165,18 @@
<&clock_gcc GCC_BLSP1_AHB_CLK>;
clock-names = "core", "iface";
};
+
+ gdsc_usb30: qcom,gdsc@10b004 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_usb30";
+ reg = <0x0010b004 0x4>;
+ status = "ok";
+ };
+
+ gdsc_pcie: qcom,gdsc@137004 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_pcie";
+ reg = <0x00137004 0x4>;
+ status = "ok";
+ };
};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dsc-wqxga-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dsc-wqxga-cmd.dtsi
index c6dfc8d..c52c18b 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dsc-wqxga-cmd.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dsc-wqxga-cmd.dtsi
@@ -221,41 +221,12 @@
qcom,mdss-dsi-te-using-te-pin;
qcom,compression-mode = "dsc";
- qcom,config-select = <&dsi_nt35597_truly_dsc_cmd_config0>;
+ qcom,mdss-dsc-slice-height = <16>;
+ qcom,mdss-dsc-slice-width = <720>;
+ qcom,mdss-dsc-slice-per-pkt = <2>;
+ qcom,mdss-dsc-bit-per-component = <8>;
+ qcom,mdss-dsc-bit-per-pixel = <8>;
+ qcom,mdss-dsc-block-prediction-enable;
- dsi_nt35597_truly_dsc_cmd_config0: config0 {
- qcom,mdss-dsc-encoders = <1>;
- qcom,mdss-dsc-slice-height = <16>;
- qcom,mdss-dsc-slice-width = <720>;
- qcom,mdss-dsc-slice-per-pkt = <2>;
-
- qcom,mdss-dsc-bit-per-component = <8>;
- qcom,mdss-dsc-bit-per-pixel = <8>;
- qcom,mdss-dsc-block-prediction-enable;
- };
-
- dsi_nt35597_truly_dsc_cmd_config1: config1 {
- qcom,lm-split = <720 720>;
- qcom,mdss-dsc-encoders = <1>; /* 3D Mux */
- qcom,mdss-dsc-slice-height = <16>;
- qcom,mdss-dsc-slice-width = <720>;
- qcom,mdss-dsc-slice-per-pkt = <2>;
-
- qcom,mdss-dsc-bit-per-component = <8>;
- qcom,mdss-dsc-bit-per-pixel = <8>;
- qcom,mdss-dsc-block-prediction-enable;
- };
-
- dsi_nt35597_truly_dsc_cmd_config2: config2 {
- qcom,lm-split = <720 720>;
- qcom,mdss-dsc-encoders = <2>; /* DSC Merge */
- qcom,mdss-dsc-slice-height = <16>;
- qcom,mdss-dsc-slice-width = <720>;
- qcom,mdss-dsc-slice-per-pkt = <2>;
-
- qcom,mdss-dsc-bit-per-component = <8>;
- qcom,mdss-dsc-bit-per-pixel = <8>;
- qcom,mdss-dsc-block-prediction-enable;
- };
};
};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dsc-wqxga-video.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dsc-wqxga-video.dtsi
index 334120a..fe9129c 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dsc-wqxga-video.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dsc-wqxga-video.dtsi
@@ -208,41 +208,11 @@
qcom,mdss-pan-physical-height-dimension = <131>;
qcom,compression-mode = "dsc";
- qcom,config-select = <&dsi_nt35597_truly_dsc_video_config0>;
-
- dsi_nt35597_truly_dsc_video_config0: config0 {
- qcom,mdss-dsc-encoders = <1>;
- qcom,mdss-dsc-slice-height = <16>;
- qcom,mdss-dsc-slice-width = <720>;
- qcom,mdss-dsc-slice-per-pkt = <2>;
-
- qcom,mdss-dsc-bit-per-component = <8>;
- qcom,mdss-dsc-bit-per-pixel = <8>;
- qcom,mdss-dsc-block-prediction-enable;
- };
-
- dsi_nt35597_truly_dsc_video_config1: config1 {
- qcom,lm-split = <720 720>;
- qcom,mdss-dsc-encoders = <1>; /* 3D Mux */
- qcom,mdss-dsc-slice-height = <16>;
- qcom,mdss-dsc-slice-width = <720>;
- qcom,mdss-dsc-slice-per-pkt = <2>;
-
- qcom,mdss-dsc-bit-per-component = <8>;
- qcom,mdss-dsc-bit-per-pixel = <8>;
- qcom,mdss-dsc-block-prediction-enable;
- };
-
- dsi_nt35597_truly_dsc_video_config2: config2 {
- qcom,lm-split = <720 720>;
- qcom,mdss-dsc-encoders = <2>; /* DSC Merge */
- qcom,mdss-dsc-slice-height = <16>;
- qcom,mdss-dsc-slice-width = <720>;
- qcom,mdss-dsc-slice-per-pkt = <2>;
-
- qcom,mdss-dsc-bit-per-component = <8>;
- qcom,mdss-dsc-bit-per-pixel = <8>;
- qcom,mdss-dsc-block-prediction-enable;
- };
+ qcom,mdss-dsc-slice-height = <16>;
+ qcom,mdss-dsc-slice-width = <720>;
+ qcom,mdss-dsc-slice-per-pkt = <2>;
+ qcom,mdss-dsc-bit-per-component = <8>;
+ qcom,mdss-dsc-bit-per-pixel = <8>;
+ qcom,mdss-dsc-block-prediction-enable;
};
};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-cmd.dtsi
index 25c949c..061f1d9 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-cmd.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-cmd.dtsi
@@ -77,17 +77,11 @@
05 01 00 00 78 00 02 10 00];
qcom,compression-mode = "dsc";
- qcom,config-select = <&dsi_sharp_dsc_cmd_config0>;
-
- dsi_sharp_dsc_cmd_config0: config0 {
- qcom,mdss-dsc-encoders = <1>;
- qcom,mdss-dsc-slice-height = <32>;
- qcom,mdss-dsc-slice-width = <1080>;
- qcom,mdss-dsc-slice-per-pkt = <1>;
-
- qcom,mdss-dsc-bit-per-component = <8>;
- qcom,mdss-dsc-bit-per-pixel = <8>;
- qcom,mdss-dsc-block-prediction-enable;
- };
+ qcom,mdss-dsc-slice-height = <32>;
+ qcom,mdss-dsc-slice-width = <1080>;
+ qcom,mdss-dsc-slice-per-pkt = <1>;
+ qcom,mdss-dsc-bit-per-component = <8>;
+ qcom,mdss-dsc-bit-per-pixel = <8>;
+ qcom,mdss-dsc-block-prediction-enable;
};
};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-video.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-video.dtsi
index cc093d6..e43da55 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-video.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-video.dtsi
@@ -70,17 +70,11 @@
05 01 00 00 78 00 02 10 00];
qcom,compression-mode = "dsc";
- qcom,config-select = <&dsi_sharp_dsc_video_config0>;
-
- dsi_sharp_dsc_video_config0: config0 {
- qcom,mdss-dsc-encoders = <1>;
- qcom,mdss-dsc-slice-height = <32>;
- qcom,mdss-dsc-slice-width = <1080>;
- qcom,mdss-dsc-slice-per-pkt = <1>;
-
- qcom,mdss-dsc-bit-per-component = <8>;
- qcom,mdss-dsc-bit-per-pixel = <8>;
- qcom,mdss-dsc-block-prediction-enable;
- };
+ qcom,mdss-dsc-slice-height = <32>;
+ qcom,mdss-dsc-slice-width = <1080>;
+ qcom,mdss-dsc-slice-per-pkt = <1>;
+ qcom,mdss-dsc-bit-per-component = <8>;
+ qcom,mdss-dsc-bit-per-pixel = <8>;
+ qcom,mdss-dsc-block-prediction-enable;
};
};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sim-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sim-cmd.dtsi
index 241aa71..1f08294 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-sim-cmd.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sim-cmd.dtsi
@@ -58,6 +58,7 @@
qcom,mdss-dsi-wr-mem-continue = <0x3c>;
qcom,mdss-dsi-te-dcs-command = <1>;
qcom,mdss-dsi-te-check-enable;
+ qcom,mdss-dsi-te-using-wd;
qcom,mdss-dsi-te-using-te-pin;
qcom,mdss-dsi-panel-hdr-enabled;
qcom,mdss-dsi-panel-hdr-color-primaries = <14500 15500 32000
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sim-dualmipi-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sim-dualmipi-cmd.dtsi
index 509547f..36f36fb 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-sim-dualmipi-cmd.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sim-dualmipi-cmd.dtsi
@@ -55,6 +55,7 @@
qcom,mdss-dsi-wr-mem-continue = <0x3c>;
qcom,mdss-dsi-te-dcs-command = <1>;
qcom,mdss-dsi-te-check-enable;
+ qcom,mdss-dsi-te-using-wd;
qcom,mdss-dsi-te-using-te-pin;
qcom,mdss-dsi-on-command = [29 01 00 00 00 00 02 b0 03
05 01 00 00 0a 00 01 00
diff --git a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts
index 6569219..122299c 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts
@@ -27,7 +27,7 @@
};
&mdss_mdp {
- connectors = <&sde_wb &dsi_sharp_4k_dsc_video_display>;
+ connectors = <&sde_rscc &sde_wb &dsi_sharp_4k_dsc_video_display>;
};
&dsi_sharp_4k_dsc_video {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts
index 2e893de..55e615c 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts
@@ -27,7 +27,7 @@
};
&mdss_mdp {
- connectors = <&sde_wb &dsi_sharp_4k_dsc_video_display>;
+ connectors = <&sde_rscc &sde_wb &dsi_sharp_4k_dsc_video_display>;
};
&dsi_sharp_4k_dsc_video {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi b/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
index c0556e4..f6493ac 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
@@ -904,7 +904,7 @@
coresight-name = "coresight-tpda-llm-silver";
qcom,tpda-atid = <72>;
- qcom,cmb-elem-size = <0 64>;
+ qcom,cmb-elem-size = <0 32>;
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
@@ -959,7 +959,7 @@
coresight-name = "coresight-tpda-llm-gold";
qcom,tpda-atid = <73>;
- qcom,cmb-elem-size = <0 64>;
+ qcom,cmb-elem-size = <0 32>;
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
index 4a8d06d..1ac661d 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
@@ -118,3 +118,32 @@
status = "ok";
};
+
+&labibb {
+ status = "ok";
+ qcom,qpnp-labibb-mode = "lcd";
+};
+
+&pmi8998_wled {
+ status = "okay";
+ qcom,led-strings-list = [01 02];
+};
+
+&mdss_mdp {
+ connectors = <&sde_rscc &sde_wb &dsi_sharp_4k_dsc_video_display>;
+};
+
+&dsi_sharp_4k_dsc_video {
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,mdss-dsi-panel-mode-gpio-state = "dual_port";
+ qcom,panel-mode-gpio = <&tlmm 52 0>;
+ qcom,platform-te-gpio = <&tlmm 10 0>;
+ qcom,platform-reset-gpio = <&tlmm 6 0>;
+};
+
+&dsi_sharp_4k_dsc_video_display {
+ qcom,dsi-display-active;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi b/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
index f6c1d76..79ac3b1 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
@@ -47,7 +47,7 @@
reg = <0x3800 0x100>;
regulator-name = "pm8998_s13";
regulator-min-microvolt = <568000>;
- regulator-max-microvolt = <928000>;
+ regulator-max-microvolt = <996000>;
qcom,enable-time = <500>;
regulator-always-on;
};
@@ -98,7 +98,7 @@
"APSS_SILVER_CPRH_STATUS_1",
"SILVER_SAW4_PMIC_STS";
- qcom,cpr-aging-ref-voltage = <928000>;
+ qcom,cpr-aging-ref-voltage = <996000>;
vdd-supply = <&pm8998_s13>;
thread@1 {
@@ -111,33 +111,39 @@
apc0_pwrcl_vreg: regulator {
regulator-name = "apc0_pwrcl_corner";
regulator-min-microvolt = <1>;
- regulator-max-microvolt = <17>;
+ regulator-max-microvolt = <19>;
- qcom,cpr-fuse-corners = <3>;
- qcom,cpr-fuse-combos = <8>;
- qcom,cpr-speed-bins = <1>;
- qcom,cpr-speed-bin-corners = <17>;
- qcom,cpr-corners = <17>;
+ qcom,cpr-fuse-corners = <4>;
+ qcom,cpr-fuse-combos = <16>;
+ qcom,cpr-speed-bins = <2>;
+ qcom,cpr-speed-bin-corners = <19 19>;
+ qcom,cpr-corners = <19>;
- qcom,cpr-corner-fmax-map = <6 12 17>;
+ qcom,cpr-corner-fmax-map = <6 12 17 19>;
qcom,cpr-voltage-ceiling =
<872000 872000 872000 872000 872000
872000 872000 872000 872000 872000
872000 872000 872000 872000 872000
- 872000 928000>;
+ 872000 928000 996000 996000>;
qcom,cpr-voltage-floor =
+ /* Speed bin 0 */
<568000 568000 568000 568000 568000
568000 568000 568000 568000 584000
584000 584000 632000 632000 632000
- 632000 672000>;
+ 632000 672000 996000 996000>,
+ /* Speed bin 1 */
+ <568000 568000 568000 568000 568000
+ 568000 568000 568000 568000 584000
+ 584000 584000 632000 632000 632000
+ 632000 672000 712000 712000>;
qcom,cpr-floor-to-ceiling-max-range =
<32000 32000 32000 32000 32000
32000 32000 32000 32000 32000
32000 32000 32000 32000 32000
- 32000 32000>;
+ 32000 32000 40000 40000>;
qcom,corner-frequencies =
<300000000 422400000 499200000
@@ -145,7 +151,8 @@
825600000 902400000 979200000
1056000000 1132800000 1209600000
1286400000 1363200000 1440000000
- 1516800000 1593600000>;
+ 1516800000 1593600000 1651200000
+ 1708800000>;
qcom,cpr-ro-scaling-factor =
<2594 2795 2576 2761 2469 2673 2198
@@ -156,22 +163,28 @@
2043 2947>,
<2259 2389 2387 2531 2294 2464 2218
2476 2525 2855 2817 2836 2740 2490
+ 1950 2632>,
+ <2259 2389 2387 2531 2294 2464 2218
+ 2476 2525 2855 2817 2836 2740 2490
1950 2632>;
qcom,cpr-open-loop-voltage-fuse-adjustment =
- <100000 100000 100000>;
+ <100000 100000 100000 100000>;
qcom,cpr-closed-loop-voltage-fuse-adjustment =
- <100000 100000 100000>;
+ <100000 100000 100000 100000>;
qcom,allow-voltage-interpolation;
qcom,allow-quotient-interpolation;
qcom,cpr-scaled-open-loop-voltage-as-ceiling;
qcom,cpr-aging-max-voltage-adjustment = <15000>;
- qcom,cpr-aging-ref-corner = <17>;
+ qcom,cpr-aging-ref-corner = <19>;
qcom,cpr-aging-ro-scaling-factor = <1620>;
qcom,allow-aging-voltage-adjustment =
+ /* Speed bin 0 */
+ <0 1 1 1 1 1 1 1>,
+ /* Speed bin 1 */
<0 1 1 1 1 1 1 1>;
qcom,allow-aging-open-loop-voltage-adjustment =
<1>;
@@ -188,32 +201,41 @@
apc0_l3_vreg: regulator {
regulator-name = "apc0_l3_corner";
regulator-min-microvolt = <1>;
- regulator-max-microvolt = <9>;
+ regulator-max-microvolt = <11>;
- qcom,cpr-fuse-corners = <3>;
- qcom,cpr-fuse-combos = <8>;
- qcom,cpr-speed-bins = <1>;
- qcom,cpr-speed-bin-corners = <9>;
- qcom,cpr-corners = <9>;
+ qcom,cpr-fuse-corners = <4>;
+ qcom,cpr-fuse-combos = <16>;
+ qcom,cpr-speed-bins = <2>;
+ qcom,cpr-speed-bin-corners = <11 11>;
+ qcom,cpr-corners = <11>;
- qcom,cpr-corner-fmax-map = <4 7 9>;
+ qcom,cpr-corner-fmax-map = <4 7 9 11>;
qcom,cpr-voltage-ceiling =
<872000 872000 872000 872000 872000
- 872000 872000 872000 928000>;
+ 872000 872000 872000 928000 996000
+ 996000>;
qcom,cpr-voltage-floor =
+ /* Speed bin 0 */
<568000 568000 568000 568000 568000
- 584000 584000 632000 672000>;
+ 584000 584000 632000 672000 996000
+ 996000>,
+ /* Speed bin 1 */
+ <568000 568000 568000 568000 568000
+ 584000 584000 632000 672000 712000
+ 712000>;
qcom,cpr-floor-to-ceiling-max-range =
<32000 32000 32000 32000 32000
- 32000 32000 32000 32000>;
+ 32000 32000 32000 32000 40000
+ 40000>;
qcom,corner-frequencies =
<300000000 422400000 499200000
576000000 652800000 729600000
- 806400000 883200000 960000000>;
+ 806400000 883200000 960000000
+ 1036800000 1094400000>;
qcom,cpr-ro-scaling-factor =
<2857 3056 2828 2952 2699 2796 2447
@@ -224,22 +246,28 @@
3164 2656>,
<2439 2577 2552 2667 2461 2577 2394
2536 2132 2307 2191 2903 2838 2912
+ 2501 2095>,
+ <2439 2577 2552 2667 2461 2577 2394
+ 2536 2132 2307 2191 2903 2838 2912
2501 2095>;
qcom,cpr-open-loop-voltage-fuse-adjustment =
- <100000 100000 100000>;
+ <100000 100000 100000 100000>;
qcom,cpr-closed-loop-voltage-fuse-adjustment =
- <100000 100000 100000>;
+ <100000 100000 100000 100000>;
qcom,allow-voltage-interpolation;
qcom,allow-quotient-interpolation;
qcom,cpr-scaled-open-loop-voltage-as-ceiling;
qcom,cpr-aging-max-voltage-adjustment = <15000>;
- qcom,cpr-aging-ref-corner = <9>;
+ qcom,cpr-aging-ref-corner = <11>;
qcom,cpr-aging-ro-scaling-factor = <1620>;
qcom,allow-aging-voltage-adjustment =
+ /* Speed bin 0 */
+ <0 1 1 1 1 1 1 1>,
+ /* Speed bin 1 */
<0 1 1 1 1 1 1 1>;
qcom,allow-aging-open-loop-voltage-adjustment =
<1>;
@@ -305,39 +333,68 @@
apc1_perfcl_vreg: regulator {
regulator-name = "apc1_perfcl_corner";
regulator-min-microvolt = <1>;
- regulator-max-microvolt = <24>;
+ regulator-max-microvolt = <26>;
qcom,cpr-fuse-corners = <3>;
- qcom,cpr-fuse-combos = <8>;
- qcom,cpr-speed-bins = <1>;
- qcom,cpr-speed-bin-corners = <22>;
- qcom,cpr-corners = <22>;
+ qcom,cpr-fuse-combos = <16>;
+ qcom,cpr-speed-bins = <2>;
+ qcom,cpr-speed-bin-corners = <22 24>;
+ qcom,cpr-corners =
+ /* Speed bin 0 */
+ <22 22 22 22 22 22 22 22>,
+ /* Speed bin 1 */
+ <24 24 24 24 24 24 24 24>;
qcom,cpr-corner-fmax-map =
- <10 17 22>;
+ /* Speed bin 0 */
+ <10 17 22>,
+ /* Speed bin 1 */
+ <10 17 24>;
qcom,cpr-voltage-ceiling =
+ /* Speed bin 0 */
<828000 828000 828000 828000 828000
828000 828000 828000 828000 828000
828000 828000 828000 828000 828000
828000 828000 884000 952000 952000
- 1056000 1056000>;
+ 1056000 1056000>,
+ /* Speed bin 1 */
+ <828000 828000 828000 828000 828000
+ 828000 828000 828000 828000 828000
+ 828000 828000 828000 828000 828000
+ 828000 828000 884000 952000 952000
+ 1056000 1056000 1056000 1056000>;
qcom,cpr-voltage-floor =
+ /* Speed bin 0 */
<568000 568000 568000 568000 568000
568000 568000 568000 568000 568000
584000 584000 632000 632000 632000
632000 632000 672000 712000 712000
- 772000 772000>;
+ 772000 772000>,
+ /* Speed bin 1 */
+ <568000 568000 568000 568000 568000
+ 568000 568000 568000 568000 568000
+ 584000 584000 632000 632000 632000
+ 632000 632000 672000 712000 712000
+ 772000 772000 772000 772000>;
qcom,cpr-floor-to-ceiling-max-range =
+ /* Speed bin 0 */
<32000 32000 32000 32000 32000
32000 32000 32000 32000 32000
32000 32000 32000 32000 32000
32000 32000 40000 40000 40000
- 40000 40000>;
+ 40000 40000>,
+ /* Speed bin 1 */
+ <32000 32000 32000 32000 32000
+ 32000 32000 32000 32000 32000
+ 32000 32000 32000 32000 32000
+ 32000 32000 40000 40000 40000
+ 40000 40000 40000 40000>;
qcom,corner-frequencies =
+ /* Speed bin 0 */
<300000000 422400000 499200000
576000000 652800000 729600000
806400000 883200000 960000000
@@ -345,7 +402,16 @@
1267200000 1344000000 1420800000
1497600000 1574400000 1651200000
1728000000 1804800000 1881600000
- 1958400000>;
+ 1958400000>,
+ /* Speed bin 1 */
+ <300000000 422400000 499200000
+ 576000000 652800000 729600000
+ 806400000 883200000 960000000
+ 1036800000 1113600000 1190400000
+ 1267200000 1344000000 1420800000
+ 1497600000 1574400000 1651200000
+ 1728000000 1804800000 1881600000
+ 1958400000 2035200000 2092800000>;
qcom,cpr-ro-scaling-factor =
<2857 3056 2828 2952 2699 2796 2447
@@ -369,9 +435,12 @@
qcom,cpr-scaled-open-loop-voltage-as-ceiling;
qcom,cpr-aging-max-voltage-adjustment = <15000>;
- qcom,cpr-aging-ref-corner = <22>;
+ qcom,cpr-aging-ref-corner = <22 24>;
qcom,cpr-aging-ro-scaling-factor = <1700>;
qcom,allow-aging-voltage-adjustment =
+ /* Speed bin 0 */
+ <0 1 1 1 1 1 1 1>,
+ /* Speed bin 1 */
<0 1 1 1 1 1 1 1>;
qcom,allow-aging-open-loop-voltage-adjustment =
<1>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
index efd8f45..6cafd59 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
@@ -299,47 +299,70 @@
};
&mdss_mdp {
- connectors = <&sde_wb &dsi_dual_nt35597_truly_video_display>;
+ connectors = <&sde_rscc &sde_wb &dsi_dual_nt35597_truly_video_display>;
};
&dsi_dual_nt35597_truly_video {
qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07 07 05 03 04 00];
qcom,mdss-dsi-t-clk-post = <0x0D>;
qcom,mdss-dsi-t-clk-pre = <0x2D>;
+ qcom,display-topology = <2 0 2>,
+ <1 0 2>;
+ qcom,default-topology-index = <0>;
};
&dsi_dual_nt35597_truly_cmd {
qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07 07 05 03 04 00];
qcom,mdss-dsi-t-clk-post = <0x0D>;
qcom,mdss-dsi-t-clk-pre = <0x2D>;
+ qcom,display-topology = <2 0 2>,
+ <1 0 2>;
+ qcom,default-topology-index = <0>;
};
&dsi_nt35597_truly_dsc_cmd {
qcom,mdss-dsi-panel-phy-timings = [00 15 05 05 20 1f 05 05 03 03 04 00];
qcom,mdss-dsi-t-clk-post = <0x0b>;
qcom,mdss-dsi-t-clk-pre = <0x23>;
+ qcom,display-topology = <2 2 2>;
+ qcom,default-topology-index = <0>;
};
&dsi_nt35597_truly_dsc_video {
qcom,mdss-dsi-panel-phy-timings = [00 15 05 05 20 1f 05 05 03 03 04 00];
qcom,mdss-dsi-t-clk-post = <0x0b>;
qcom,mdss-dsi-t-clk-pre = <0x23>;
+ qcom,display-topology = <2 2 2>;
+ qcom,default-topology-index = <0>;
};
&dsi_sharp_4k_dsc_video {
qcom,mdss-dsi-panel-phy-timings = [00 18 06 06 21 20 06 06 04 03 04 00];
qcom,mdss-dsi-t-clk-post = <0x0c>;
qcom,mdss-dsi-t-clk-pre = <0x27>;
+ qcom,display-topology = <2 2 2>;
+ qcom,default-topology-index = <0>;
};
&dsi_sharp_4k_dsc_cmd {
qcom,mdss-dsi-panel-phy-timings = [00 18 06 06 21 20 06 06 04 03 04 00];
qcom,mdss-dsi-t-clk-post = <0x0c>;
qcom,mdss-dsi-t-clk-pre = <0x27>;
+ qcom,display-topology = <2 2 2>;
+ qcom,default-topology-index = <0>;
};
&dsi_dual_sharp_1080_120hz_cmd {
qcom,mdss-dsi-panel-phy-timings = [00 24 09 09 26 24 09 09 06 03 04 00];
qcom,mdss-dsi-t-clk-post = <0x0f>;
qcom,mdss-dsi-t-clk-pre = <0x36>;
+ qcom,display-topology = <2 0 2>,
+ <1 0 2>;
+ qcom,default-topology-index = <0>;
+};
+
+&dsi_sharp_1080_cmd {
+ qcom,display-topology = <2 0 2>,
+ <1 0 2>;
+ qcom,default-topology-index = <0>;
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
index 9c497fa..c7dd883 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
@@ -37,8 +37,8 @@
interrupts = <0 83 0>;
interrupt-controller;
#interrupt-cells = <1>;
- iommus = <&apps_smmu 0x880 0x0>, <&apps_smmu 0x888 0x0>,
- <&apps_smmu 0xc80 0x0>, <&apps_smmu 0xc88 0x0>;
+ iommus = <&apps_smmu 0x880 0x8>,
+ <&apps_smmu 0xc80 0x8>;
#address-cells = <1>;
#size-cells = <0>;
@@ -177,7 +177,6 @@
};
sde_rscc: qcom,sde_rscc@af20000 {
- status = "disabled";
cell-index = <0>;
compatible = "qcom,sde-rsc";
reg = <0xaf20000 0x1c44>,
@@ -186,13 +185,16 @@
qcom,sde-rsc-version = <1>;
vdd-supply = <&mdss_core_gdsc>;
- clocks = <&clock_dispcc DISP_CC_MDSS_RSCC_AHB_CLK>,
- <&clock_dispcc DISP_CC_MDSS_RSCC_VSYNC_CLK>;
- clock-names = "iface_clk", "vsync_clk";
+ clocks = <&clock_dispcc DISP_CC_MDSS_RSCC_VSYNC_CLK>,
+ <&clock_dispcc DISP_CC_MDSS_RSCC_AHB_CLK>;
+ clock-names = "vsync_clk", "iface_clk";
clock-rate = <0 0>;
qcom,sde-dram-channels = <2>;
+ mboxes = <&disp_rsc 0>;
+ mbox-names = "disp_rsc";
+
/* data and reg bus scale settings */
qcom,sde-data-bus {
qcom,msm-bus,name = "disp_rsc";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-vidc.dtsi b/arch/arm64/boot/dts/qcom/sdm845-vidc.dtsi
index 6f4b4ca..4fe9282 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-vidc.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-vidc.dtsi
@@ -97,8 +97,7 @@
compatible = "qcom,msm-vidc,context-bank";
label = "venus_ns";
iommus =
- <&apps_smmu 0x10a0 0x0>,
- <&apps_smmu 0x10a8 0x0>,
+ <&apps_smmu 0x10a0 0x8>,
<&apps_smmu 0x10b0 0x0>;
buffer-types = <0xfff>;
virtual-addr-pool = <0x70800000 0x6f800000>;
@@ -108,10 +107,8 @@
compatible = "qcom,msm-vidc,context-bank";
label = "venus_sec_bitstream";
iommus =
- <&apps_smmu 0x10a1 0x0>,
- <&apps_smmu 0x10a9 0x0>,
- <&apps_smmu 0x10a5 0x0>,
- <&apps_smmu 0x10ad 0x0>;
+ <&apps_smmu 0x10a1 0x8>,
+ <&apps_smmu 0x10a5 0x8>;
buffer-types = <0x241>;
virtual-addr-pool = <0x4b000000 0x25800000>;
qcom,secure-context-bank;
@@ -121,8 +118,7 @@
compatible = "qcom,msm-vidc,context-bank";
label = "venus_sec_pixel";
iommus =
- <&apps_smmu 0x10a3 0x0>,
- <&apps_smmu 0x10ab 0x0>;
+ <&apps_smmu 0x10a3 0x8>;
buffer-types = <0x106>;
virtual-addr-pool = <0x25800000 0x25800000>;
qcom,secure-context-bank;
@@ -132,8 +128,7 @@
compatible = "qcom,msm-vidc,context-bank";
label = "venus_sec_non_pixel";
iommus =
- <&apps_smmu 0x10a4 0x0>,
- <&apps_smmu 0x10ac 0x0>,
+ <&apps_smmu 0x10a4 0x8>,
<&apps_smmu 0x10b4 0x0>;
buffer-types = <0x480>;
virtual-addr-pool = <0x1000000 0x24800000>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index 95c1d65..0b9e118 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -53,6 +53,7 @@
qcom,lmh-dcvs = <&lmh_dcvs0>;
#cooling-cells = <2>;
next-level-cache = <&L2_0>;
+ sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_0>;
L2_0: l2-cache {
compatible = "arm,arch-cache";
cache-size = <0x20000>;
@@ -86,6 +87,7 @@
qcom,lmh-dcvs = <&lmh_dcvs0>;
#cooling-cells = <2>;
next-level-cache = <&L2_100>;
+ sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_0>;
L2_100: l2-cache {
compatible = "arm,arch-cache";
cache-size = <0x20000>;
@@ -113,6 +115,7 @@
qcom,lmh-dcvs = <&lmh_dcvs0>;
#cooling-cells = <2>;
next-level-cache = <&L2_200>;
+ sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_0>;
L2_200: l2-cache {
compatible = "arm,arch-cache";
cache-size = <0x20000>;
@@ -140,6 +143,7 @@
qcom,lmh-dcvs = <&lmh_dcvs0>;
#cooling-cells = <2>;
next-level-cache = <&L2_300>;
+ sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_0>;
L2_300: l2-cache {
compatible = "arm,arch-cache";
cache-size = <0x20000>;
@@ -167,6 +171,7 @@
qcom,lmh-dcvs = <&lmh_dcvs1>;
#cooling-cells = <2>;
next-level-cache = <&L2_400>;
+ sched-energy-costs = <&CPU_COST_1 &CLUSTER_COST_1>;
L2_400: l2-cache {
compatible = "arm,arch-cache";
cache-size = <0x40000>;
@@ -194,6 +199,7 @@
qcom,lmh-dcvs = <&lmh_dcvs1>;
#cooling-cells = <2>;
next-level-cache = <&L2_500>;
+ sched-energy-costs = <&CPU_COST_1 &CLUSTER_COST_1>;
L2_500: l2-cache {
compatible = "arm,arch-cache";
cache-size = <0x40000>;
@@ -221,6 +227,7 @@
qcom,lmh-dcvs = <&lmh_dcvs1>;
#cooling-cells = <2>;
next-level-cache = <&L2_600>;
+ sched-energy-costs = <&CPU_COST_1 &CLUSTER_COST_1>;
L2_600: l2-cache {
compatible = "arm,arch-cache";
cache-size = <0x40000>;
@@ -248,6 +255,7 @@
qcom,lmh-dcvs = <&lmh_dcvs1>;
#cooling-cells = <2>;
next-level-cache = <&L2_700>;
+ sched-energy-costs = <&CPU_COST_1 &CLUSTER_COST_1>;
L2_700: l2-cache {
compatible = "arm,arch-cache";
cache-size = <0x40000>;
@@ -303,6 +311,115 @@
};
};
+ energy-costs {
+ CPU_COST_0: core-cost0 {
+ busy-cost-data = <
+ 92 34 /* 300000 */
+ 129 40 /* 422400 */
+ 153 43 /* 499200 */
+ 177 48 /* 576000 */
+ 200 52 /* 652800 */
+ 230 58 /* 748800 */
+ 253 64 /* 825600 */
+ 277 70 /* 902400 */
+ 301 76 /* 979200 */
+ 324 83 /* 1056000 */
+ 348 90 /* 1132800 */
+ 371 98 /* 1209600 */
+ 395 105 /* 1286400 */
+ 419 114 /* 1363200 */
+ 442 123 /* 1440000 */
+ 466 135 /* 1516800 */
+ 490 152 /* 1593600 */
+ >;
+ idle-cost-data = <
+ 22 18 14 12
+ >;
+ };
+ CPU_COST_1: core-cost1 {
+ busy-cost-data = <
+ 156 240 /* 300000 */
+ 220 247 /* 422400 */
+ 261 252 /* 499200 */
+ 301 257 /* 576000 */
+ 341 264 /* 652800 */
+ 381 272 /* 729600 */
+ 421 281 /* 806400 */
+ 461 292 /* 883200 */
+ 501 306 /* 960000 */
+ 542 324 /* 1036800 */
+ 582 346 /* 1113600 */
+ 622 373 /* 1190400 */
+ 662 407 /* 1267200 */
+ 702 450 /* 1344000 */
+ 742 504 /* 1420800 */
+ 783 570 /* 1497600 */
+ 823 649 /* 1574400 */
+ 863 743 /* 1651200 */
+ 903 849 /* 1728000 */
+ 943 960 /* 1804800 */
+ 983 1062 /* 1881600 */
+ 1024 1131 /* 1958400 */
+ >;
+ idle-cost-data = <
+ 520 500 480 460
+ >;
+ };
+ CLUSTER_COST_0: cluster-cost0 {
+ busy-cost-data = <
+ 92 3 /* 300000 */
+ 129 4 /* 422400 */
+ 153 4 /* 499200 */
+ 177 4 /* 576000 */
+ 200 5 /* 652800 */
+ 230 5 /* 748800 */
+ 253 6 /* 825600 */
+ 277 7 /* 902400 */
+ 301 7 /* 979200 */
+ 324 8 /* 1056000 */
+ 348 9 /* 1132800 */
+ 371 9 /* 1209600 */
+ 395 10 /* 1286400 */
+ 419 11 /* 1363200 */
+ 442 12 /* 1440000 */
+ 466 13 /* 1516800 */
+ 490 15 /* 1593600 */
+ >;
+ idle-cost-data = <
+ 4 3 2 1
+ >;
+ };
+ CLUSTER_COST_1: cluster-cost1 {
+ busy-cost-data = <
+ 156 24 /* 300000 */
+ 220 24 /* 422400 */
+ 261 25 /* 499200 */
+ 301 25 /* 576000 */
+ 341 26 /* 652800 */
+ 381 27 /* 729600 */
+ 421 28 /* 806400 */
+ 461 29 /* 883200 */
+ 501 30 /* 960000 */
+ 542 32 /* 1036800 */
+ 582 34 /* 1113600 */
+ 622 37 /* 1190400 */
+ 662 40 /* 1267200 */
+ 702 45 /* 1344000 */
+ 742 50 /* 1420800 */
+ 783 57 /* 1497600 */
+ 823 64 /* 1574400 */
+ 863 74 /* 1651200 */
+ 903 84 /* 1728000 */
+ 943 96 /* 1804800 */
+ 983 106 /* 1881600 */
+ 1024 113 /* 1958400 */
+ >;
+ idle-cost-data = <
+ 4 3 2 1
+ >;
+ };
+ }; /* energy-costs */
+
psci {
compatible = "arm,psci-1.0";
method = "smc";
@@ -759,6 +876,7 @@
clock-names = "devfreq_clk";
clocks = <&clock_cpucc L3_CLUSTER0_VOTE_CLK>;
governor = "performance";
+ qcom,prepare-clk;
freq-tbl-khz =
< 300000 >,
< 422400 >,
@@ -776,6 +894,7 @@
clock-names = "devfreq_clk";
clocks = <&clock_cpucc L3_CLUSTER1_VOTE_CLK>;
governor = "performance";
+ qcom,prepare-clk;
freq-tbl-khz =
< 300000 >,
< 422400 >,
@@ -1057,7 +1176,7 @@
"ref_aux_clk";
clocks = <&clock_rpmh RPMH_CXO_CLK>,
<&clock_gcc GCC_UFS_MEM_CLKREF_CLK>,
- <&clock_gcc GCC_UFS_PHY_PHY_AUX_CLK>;
+ <&clock_gcc GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK>;
status = "disabled";
};
@@ -1083,13 +1202,12 @@
"tx_lane0_sync_clk",
"rx_lane0_sync_clk",
"rx_lane1_sync_clk";
- /* TODO: add HW CTL clocks when available */
clocks =
- <&clock_gcc GCC_UFS_PHY_AXI_CLK>,
- <&clock_gcc GCC_AGGRE_UFS_PHY_AXI_CLK>,
+ <&clock_gcc GCC_UFS_PHY_AXI_HW_CTL_CLK>,
+ <&clock_gcc GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK>,
<&clock_gcc GCC_UFS_PHY_AHB_CLK>,
- <&clock_gcc GCC_UFS_PHY_UNIPRO_CORE_CLK>,
- <&clock_gcc GCC_UFS_PHY_ICE_CORE_CLK>,
+ <&clock_gcc GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK>,
+ <&clock_gcc GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK>,
<&clock_rpmh RPMH_CXO_CLK>,
<&clock_gcc GCC_UFS_PHY_TX_SYMBOL_0_CLK>,
<&clock_gcc GCC_UFS_PHY_RX_SYMBOL_0_CLK>,
@@ -1178,7 +1296,7 @@
"ref_aux_clk";
clocks = <&clock_rpmh RPMH_CXO_CLK>,
<&clock_gcc GCC_UFS_CARD_CLKREF_CLK>,
- <&clock_gcc GCC_UFS_CARD_PHY_AUX_CLK>;
+ <&clock_gcc GCC_UFS_CARD_PHY_AUX_HW_CTL_CLK>;
status = "disabled";
};
@@ -1202,13 +1320,12 @@
"ref_clk",
"tx_lane0_sync_clk",
"rx_lane0_sync_clk";
- /* TODO: add HW CTL clocks when available */
clocks =
- <&clock_gcc GCC_UFS_CARD_AXI_CLK>,
- <&clock_gcc GCC_AGGRE_UFS_CARD_AXI_CLK>,
+ <&clock_gcc GCC_UFS_CARD_AXI_HW_CTL_CLK>,
+ <&clock_gcc GCC_AGGRE_UFS_CARD_AXI_HW_CTL_CLK>,
<&clock_gcc GCC_UFS_CARD_AHB_CLK>,
- <&clock_gcc GCC_UFS_CARD_UNIPRO_CORE_CLK>,
- <&clock_gcc GCC_UFS_CARD_ICE_CORE_CLK>,
+ <&clock_gcc GCC_UFS_CARD_UNIPRO_CORE_HW_CTL_CLK>,
+ <&clock_gcc GCC_UFS_CARD_ICE_CORE_HW_CTL_CLK>,
<&clock_rpmh RPMH_CXO_CLK>,
<&clock_gcc GCC_UFS_CARD_TX_SYMBOL_0_CLK>,
<&clock_gcc GCC_UFS_CARD_RX_SYMBOL_0_CLK>;
@@ -1574,66 +1691,54 @@
qcom,msm_fastrpc_compute_cb1 {
compatible = "qcom,msm-fastrpc-compute-cb";
label = "cdsprpc-smd";
- iommus = <&apps_smmu 0x1401 0x0>,
- <&apps_smmu 0x1421 0x0>;
+ iommus = <&apps_smmu 0x1401 0x30>;
};
qcom,msm_fastrpc_compute_cb2 {
compatible = "qcom,msm-fastrpc-compute-cb";
label = "cdsprpc-smd";
- iommus = <&apps_smmu 0x1402 0x0>,
- <&apps_smmu 0x1422 0x0>;
+ iommus = <&apps_smmu 0x1402 0x30>;
};
qcom,msm_fastrpc_compute_cb3 {
compatible = "qcom,msm-fastrpc-compute-cb";
label = "cdsprpc-smd";
- iommus = <&apps_smmu 0x1403 0x0>,
- <&apps_smmu 0x1423 0x0>;
+ iommus = <&apps_smmu 0x1403 0x30>;
};
qcom,msm_fastrpc_compute_cb4 {
compatible = "qcom,msm-fastrpc-compute-cb";
label = "cdsprpc-smd";
- iommus = <&apps_smmu 0x1404 0x0>,
- <&apps_smmu 0x1424 0x0>;
+ iommus = <&apps_smmu 0x1404 0x30>;
};
qcom,msm_fastrpc_compute_cb5 {
compatible = "qcom,msm-fastrpc-compute-cb";
label = "cdsprpc-smd";
- iommus = <&apps_smmu 0x1405 0x0>,
- <&apps_smmu 0x1425 0x0>;
+ iommus = <&apps_smmu 0x1405 0x30>;
};
qcom,msm_fastrpc_compute_cb6 {
compatible = "qcom,msm-fastrpc-compute-cb";
label = "cdsprpc-smd";
- iommus = <&apps_smmu 0x1406 0x0>,
- <&apps_smmu 0x1426 0x0>;
+ iommus = <&apps_smmu 0x1406 0x30>;
};
qcom,msm_fastrpc_compute_cb7 {
compatible = "qcom,msm-fastrpc-compute-cb";
label = "cdsprpc-smd";
- iommus = <&apps_smmu 0x1407 0x0>,
- <&apps_smmu 0x1427 0x0>;
+ iommus = <&apps_smmu 0x1407 0x30>;
};
qcom,msm_fastrpc_compute_cb8 {
compatible = "qcom,msm-fastrpc-compute-cb";
label = "cdsprpc-smd";
- iommus = <&apps_smmu 0x1408 0x0>,
- <&apps_smmu 0x1428 0x0>;
+ iommus = <&apps_smmu 0x1408 0x30>;
};
qcom,msm_fastrpc_compute_cb9 {
compatible = "qcom,msm-fastrpc-compute-cb";
label = "cdsprpc-smd";
qcom,secure-context-bank;
- iommus = <&apps_smmu 0x1409 0x0>,
- <&apps_smmu 0x1419 0x0>,
- <&apps_smmu 0x1429 0x0>;
+ iommus = <&apps_smmu 0x1409 0x30>;
};
qcom,msm_fastrpc_compute_cb10 {
compatible = "qcom,msm-fastrpc-compute-cb";
label = "cdsprpc-smd";
qcom,secure-context-bank;
- iommus = <&apps_smmu 0x140A 0x0>,
- <&apps_smmu 0x141A 0x0>,
- <&apps_smmu 0x142A 0x0>;
+ iommus = <&apps_smmu 0x140A 0x30>;
};
qcom,msm_fastrpc_compute_cb11 {
compatible = "qcom,msm-fastrpc-compute-cb";
@@ -2480,8 +2585,7 @@
<0xa0000000 0x10000000>,
<0xb0000000 0x10000>;
reg-names = "membase", "smmu_iova_base", "smmu_iova_ipa";
- iommus = <&apps_smmu 0x0040 0x0>,
- <&apps_smmu 0x0041 0x0>;
+ iommus = <&apps_smmu 0x0040 0x1>;
interrupts = <0 414 0 /* CE0 */ >,
<0 415 0 /* CE1 */ >,
<0 416 0 /* CE2 */ >,
diff --git a/arch/arm64/configs/sdm845-perf_defconfig b/arch/arm64/configs/sdm845-perf_defconfig
index fe3eff2..9b5de00 100644
--- a/arch/arm64/configs/sdm845-perf_defconfig
+++ b/arch/arm64/configs/sdm845-perf_defconfig
@@ -6,6 +6,7 @@
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_SCHED_WALT=y
CONFIG_RCU_EXPERT=y
CONFIG_RCU_FAST_NO_HZ=y
CONFIG_IKCONFIG=y
@@ -16,14 +17,13 @@
CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_SCHEDTUNE=y
CONFIG_RT_GROUP_SCHED=y
-CONFIG_SCHED_HMP=y
-CONFIG_SCHED_HMP_CSTATE_AWARE=y
CONFIG_SCHED_CORE_CTL=y
CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_PID_NS is not set
CONFIG_SCHED_AUTOGROUP=y
CONFIG_SCHED_TUNE=y
+CONFIG_DEFAULT_USE_ENERGY_AWARE=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_RD_XZ is not set
# CONFIG_RD_LZO is not set
@@ -81,6 +81,7 @@
CONFIG_CPU_FREQ_GOV_INTERACTIVE=y
CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
CONFIG_CPU_BOOST=y
+CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
CONFIG_CPU_FREQ_MSM=y
CONFIG_NET=y
CONFIG_PACKET=y
diff --git a/arch/arm64/configs/sdm845_defconfig b/arch/arm64/configs/sdm845_defconfig
index 7d7f6f6..615150a 100644
--- a/arch/arm64/configs/sdm845_defconfig
+++ b/arch/arm64/configs/sdm845_defconfig
@@ -5,6 +5,7 @@
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_SCHED_WALT=y
CONFIG_TASKSTATS=y
CONFIG_TASK_DELAY_ACCT=y
CONFIG_TASK_XACCT=y
@@ -20,14 +21,13 @@
CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_SCHEDTUNE=y
CONFIG_RT_GROUP_SCHED=y
-CONFIG_SCHED_HMP=y
-CONFIG_SCHED_HMP_CSTATE_AWARE=y
CONFIG_SCHED_CORE_CTL=y
CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_PID_NS is not set
CONFIG_SCHED_AUTOGROUP=y
CONFIG_SCHED_TUNE=y
+CONFIG_DEFAULT_USE_ENERGY_AWARE=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_RD_XZ is not set
# CONFIG_RD_LZO is not set
@@ -87,6 +87,7 @@
CONFIG_CPU_FREQ_GOV_INTERACTIVE=y
CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
CONFIG_CPU_BOOST=y
+CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
CONFIG_CPU_FREQ_MSM=y
CONFIG_NET=y
CONFIG_PACKET=y
diff --git a/drivers/clk/qcom/clk-debug.c b/drivers/clk/qcom/clk-debug.c
index 53288f7..fcc2493 100644
--- a/drivers/clk/qcom/clk-debug.c
+++ b/drivers/clk/qcom/clk-debug.c
@@ -133,12 +133,16 @@
{
struct clk_debug_mux *meas = to_clk_measure(hw);
int i, num_parents = clk_hw_get_num_parents(hw);
+ struct clk_hw *hw_clk = clk_hw_get_parent(hw);
+
+ if (!hw_clk)
+ return 0;
for (i = 0; i < num_parents; i++) {
if (!strcmp(meas->parent[i].parents,
- hw->init->parent_names[i])) {
- pr_debug("%s: Clock name %s index %d\n", __func__,
- hw->init->name, i);
+ clk_hw_get_name(hw_clk))) {
+ pr_debug("%s: clock parent - %s, index %d\n", __func__,
+ meas->parent[i].parents, i);
return i;
}
}
@@ -158,8 +162,8 @@
/* Update the recursive debug mux */
regmap_read(meas->regmap[dbg_cc],
meas->parent[index].mux_offset, ®val);
- regval &= ~meas->parent[index].mux_sel_mask <<
- meas->parent[index].mux_sel_shift;
+ regval &= ~(meas->parent[index].mux_sel_mask <<
+ meas->parent[index].mux_sel_shift);
regval |= (meas->parent[index].dbg_cc_mux_sel &
meas->parent[index].mux_sel_mask) <<
meas->parent[index].mux_sel_shift;
@@ -168,31 +172,34 @@
regmap_read(meas->regmap[dbg_cc],
meas->parent[index].post_div_offset, ®val);
- regval &= ~meas->parent[index].post_div_mask <<
- meas->parent[index].post_div_shift;
+ regval &= ~(meas->parent[index].post_div_mask <<
+ meas->parent[index].post_div_shift);
regval |= ((meas->parent[index].post_div_val - 1) &
meas->parent[index].post_div_mask) <<
meas->parent[index].post_div_shift;
regmap_write(meas->regmap[dbg_cc],
meas->parent[index].post_div_offset, regval);
- regmap_read(meas->regmap[dbg_cc],
+ /* Not all recursive muxes have a DEBUG clock. */
+ if (meas->parent[index].cbcr_offset != U32_MAX) {
+ regmap_read(meas->regmap[dbg_cc],
meas->parent[index].cbcr_offset, ®val);
- regval |= BIT(0);
- regmap_write(meas->regmap[dbg_cc],
+ regval |= BIT(0);
+ regmap_write(meas->regmap[dbg_cc],
meas->parent[index].cbcr_offset, regval);
+ }
}
/* Update the debug sel for GCC */
regmap_read(meas->regmap[GCC], meas->debug_offset, ®val);
- regval &= ~meas->src_sel_mask << meas->src_sel_shift;
+ regval &= ~(meas->src_sel_mask << meas->src_sel_shift);
regval |= (meas->parent[index].prim_mux_sel & meas->src_sel_mask) <<
meas->src_sel_shift;
regmap_write(meas->regmap[GCC], meas->debug_offset, regval);
/* Set the GCC mux's post divider bits */
regmap_read(meas->regmap[GCC], meas->post_div_offset, ®val);
- regval &= ~meas->post_div_mask << meas->post_div_shift;
+ regval &= ~(meas->post_div_mask << meas->post_div_shift);
regval |= ((meas->parent[index].prim_mux_div_val - 1) &
meas->post_div_mask) << meas->post_div_shift;
regmap_write(meas->regmap[GCC], meas->post_div_offset, regval);
@@ -234,6 +241,10 @@
if (meas->parent[index].dbg_cc != GCC)
*val *= meas->parent[index].post_div_val;
*val *= meas->parent[index].prim_mux_div_val;
+
+ /* Accommodate for any pre-set dividers */
+ if (meas->parent[index].misc_div_val)
+ *val *= meas->parent[index].misc_div_val;
}
meas_rate = clk_get_rate(hw->clk);
@@ -244,7 +255,6 @@
sw_rate = clk_get_rate(par->clk);
if (sw_rate && meas_rate >= (sw_rate * 2))
*val *= DIV_ROUND_CLOSEST(meas_rate, sw_rate);
-
mutex_unlock(&clk_debug_lock);
return ret;
diff --git a/drivers/clk/qcom/clk-debug.h b/drivers/clk/qcom/clk-debug.h
index 280704e..aa8d97b 100644
--- a/drivers/clk/qcom/clk-debug.h
+++ b/drivers/clk/qcom/clk-debug.h
@@ -66,6 +66,7 @@
* @mux_offset: the debug mux offset.
* @post_div_offset: register with post-divider settings for the debug mux.
* @cbcr_offset: branch register to turn on debug mux.
+ * @misc_div_val: includes any pre-set dividers in the measurement logic.
*/
struct clk_src {
const char *parents;
@@ -81,6 +82,7 @@
u32 mux_offset;
u32 post_div_offset;
u32 cbcr_offset;
+ u32 misc_div_val;
};
#define MUX_SRC_LIST(...) \
diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c b/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c
index 2cb9d05..93ad1b0 100644
--- a/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c
+++ b/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c
@@ -603,6 +603,9 @@
pr_err("dsi pll resources not available\n");
return;
}
+ pll->cached_cfg0 = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG0);
+ pll->cached_cfg1 = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG1);
+ pr_debug("cfg0=%d,cfg1=%d\n", pll->cached_cfg0, pll->cached_cfg1);
pll->vco_cached_rate = clk_hw_get_rate(hw);
dsi_pll_disable(vco);
@@ -637,6 +640,12 @@
mdss_pll_resource_enable(pll, false);
return rc;
}
+ pr_debug("cfg0=%d, cfg1=%d\n", pll->cached_cfg0,
+ pll->cached_cfg1);
+ MDSS_PLL_REG_W(pll->phy_base, PHY_CMN_CLK_CFG0,
+ pll->cached_cfg0);
+ MDSS_PLL_REG_W(pll->phy_base, PHY_CMN_CLK_CFG1,
+ pll->cached_cfg1);
}
rc = dsi_pll_enable(vco);
@@ -1111,7 +1120,7 @@
.max_rate = 3500000000UL,
.hw.init = &(struct clk_init_data){
.name = "dsi0pll_vco_clk",
- .parent_names = (const char *[]){"xo_board"},
+ .parent_names = (const char *[]){"bi_tcxo"},
.num_parents = 1,
.ops = &clk_ops_vco_10nm,
.flags = CLK_GET_RATE_NOCACHE,
@@ -1124,7 +1133,7 @@
.max_rate = 3500000000UL,
.hw.init = &(struct clk_init_data){
.name = "dsi1pll_vco_clk",
- .parent_names = (const char *[]){"xo_board"},
+ .parent_names = (const char *[]){"bi_tcxo"},
.num_parents = 1,
.ops = &clk_ops_vco_10nm,
.flags = CLK_GET_RATE_NOCACHE,
diff --git a/drivers/clk/qcom/mdss/mdss-pll.h b/drivers/clk/qcom/mdss/mdss-pll.h
index eccfcea..ee91e11 100644
--- a/drivers/clk/qcom/mdss/mdss-pll.h
+++ b/drivers/clk/qcom/mdss/mdss-pll.h
@@ -94,6 +94,8 @@
* suspend/resume scenario. Cached the vco rate for such plls.
*/
unsigned long vco_cached_rate;
+ u32 cached_cfg0;
+ u32 cached_cfg1;
/* dsi/edp/hmdi pll interface type */
u32 pll_interface_type;
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c
index 122a63d..48c2370 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c
@@ -248,7 +248,7 @@
reg_ctrl = DSI_R32(ctrl, DSI_COMMAND_COMPRESSION_MODE_CTRL);
reg_ctrl2 = DSI_R32(ctrl, DSI_COMMAND_COMPRESSION_MODE_CTRL2);
width_final = mode->dsc->pclk_per_line;
- stride_final = width_final * (h_stride / mode->h_active);
+ stride_final = mode->dsc->bytes_per_pkt;
reg = 0x39 << 8;
/*
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
index 3402d48..2c0d5de 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
@@ -2742,6 +2742,8 @@
break;
case DSI_OP_CMD_MODE:
info->capabilities |= MSM_DISPLAY_CAP_CMD_MODE;
+ info->is_te_using_watchdog_timer =
+ display->panel->te_using_watchdog_timer;
break;
default:
pr_err("unknwown dsi panel mode %d\n",
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
index cb4afe4..312f3e6 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
@@ -540,15 +540,42 @@
}
#endif
+static int dsi_panel_update_backlight(struct dsi_panel *panel,
+ u32 bl_lvl)
+{
+ int rc = 0;
+ struct mipi_dsi_device *dsi;
+
+ if (!panel || (bl_lvl > 0xffff)) {
+ pr_err("invalid params\n");
+ return -EINVAL;
+ }
+
+ dsi = &panel->mipi_device;
+
+ mutex_lock(&panel->panel_lock);
+
+ rc = mipi_dsi_dcs_set_display_brightness(dsi, bl_lvl);
+ if (rc < 0)
+ pr_err("failed to update dcs backlight:%d\n", bl_lvl);
+
+ mutex_unlock(&panel->panel_lock);
+ return rc;
+}
+
int dsi_panel_set_backlight(struct dsi_panel *panel, u32 bl_lvl)
{
int rc = 0;
struct dsi_backlight_config *bl = &panel->bl_config;
+ pr_debug("backlight type:%d lvl:%d\n", bl->type, bl_lvl);
switch (bl->type) {
case DSI_BACKLIGHT_WLED:
led_trigger_event(bl->wled, bl_lvl);
break;
+ case DSI_BACKLIGHT_DCS:
+ dsi_panel_update_backlight(panel, bl_lvl);
+ break;
default:
pr_err("Backlight type(%d) not supported\n", bl->type);
rc = -ENOTSUPP;
@@ -566,6 +593,8 @@
case DSI_BACKLIGHT_WLED:
rc = dsi_panel_led_bl_register(panel, bl);
break;
+ case DSI_BACKLIGHT_DCS:
+ break;
default:
pr_err("Backlight type(%d) not supported\n", bl->type);
rc = -ENOTSUPP;
@@ -585,6 +614,8 @@
case DSI_BACKLIGHT_WLED:
led_trigger_unregister_simple(bl->wled);
break;
+ case DSI_BACKLIGHT_DCS:
+ break;
default:
pr_err("Backlight type(%d) not supported\n", bl->type);
rc = -ENOTSUPP;
@@ -1525,14 +1556,17 @@
return rc;
}
-static int dsi_panel_parse_features(struct dsi_panel *panel,
+static int dsi_panel_parse_misc_features(struct dsi_panel *panel,
struct device_node *of_node)
{
panel->ulps_enabled =
of_property_read_bool(of_node, "qcom,ulps-enabled");
- pr_debug("ulps_enabled:%d\n", panel->ulps_enabled);
+ if (panel->ulps_enabled)
+ pr_debug("ulps_enabled:%d\n", panel->ulps_enabled);
+ panel->te_using_watchdog_timer = of_property_read_bool(of_node,
+ "qcom,mdss-dsi-te-using-wd");
return 0;
}
@@ -2249,7 +2283,7 @@
if (rc)
pr_err("failed to parse panel jitter config, rc=%d\n", rc);
- rc = dsi_panel_parse_features(panel, of_node);
+ rc = dsi_panel_parse_misc_features(panel, of_node);
if (rc)
pr_err("failed to parse panel features, rc=%d\n", rc);
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
index 9f63089..de2b5b1 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
@@ -184,6 +184,7 @@
u32 panel_jitter;
u32 panel_prefill_lines;
bool panel_initialized;
+ bool te_using_watchdog_timer;
bool dsc_enabled;
char dsc_pps_cmd[DSI_CMD_PPS_SIZE];
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 322b7f2..d50a185 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -389,6 +389,8 @@
* @max_height: Max height of display. In case of hot pluggable display
* this is max height supported by controller
* @is_primary: Set to true if display is primary display
+ * @is_te_using_watchdog_timer: Boolean to indicate watchdog TE is
+ * used instead of panel TE in cmd mode panels
* @frame_rate: Display frame rate
* @prefill_lines: prefill lines based on porches.
* @vtotal: display vertical total
@@ -412,6 +414,7 @@
uint32_t max_height;
bool is_primary;
+ bool is_te_using_watchdog_timer;
uint32_t frame_rate;
uint32_t prefill_lines;
uint32_t vtotal;
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
index 1ae5ec1..39127e0 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.c
@@ -1199,7 +1199,10 @@
struct sde_encoder_virt *sde_enc = NULL;
struct msm_drm_private *priv;
struct sde_kms *sde_kms;
+ struct sde_hw_mdp *hw_mdptop;
+ int i = 0;
int ret = 0;
+ struct sde_watchdog_te_status te_cfg = { 0 };
if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) {
SDE_ERROR("invalid parameters\n");
@@ -1214,6 +1217,14 @@
}
sde_kms = to_sde_kms(priv->kms);
+ hw_mdptop = sde_kms->hw_mdp;
+
+ if (!hw_mdptop) {
+ SDE_ERROR("invalid mdptop\n");
+ return;
+ }
+
+ sde_kms = to_sde_kms(priv->kms);
if (!sde_kms) {
SDE_ERROR("invalid sde_kms\n");
return;
@@ -1230,6 +1241,16 @@
if (ret)
SDE_ERROR_ENC(sde_enc, "failed to setup DSC:%d\n", ret);
}
+
+ if (hw_mdptop->ops.setup_vsync_sel) {
+ for (i = 0; i < sde_enc->num_phys_encs; i++)
+ te_cfg.ppnumber[i] = sde_enc->hw_pp[i]->idx;
+
+ te_cfg.pp_count = sde_enc->num_phys_encs;
+ te_cfg.frame_rate = sde_enc->disp_info.frame_rate;
+ hw_mdptop->ops.setup_vsync_sel(hw_mdptop, &te_cfg,
+ sde_enc->disp_info.is_te_using_watchdog_timer);
+ }
}
void sde_encoder_virt_restore(struct drm_encoder *drm_enc)
diff --git a/drivers/gpu/drm/msm/sde/sde_fence.c b/drivers/gpu/drm/msm/sde/sde_fence.c
index 826fe14..c83472a 100644
--- a/drivers/gpu/drm/msm/sde/sde_fence.c
+++ b/drivers/gpu/drm/msm/sde/sde_fence.c
@@ -207,8 +207,9 @@
if (!sde_fence)
return -ENOMEM;
- snprintf(sde_fence->name, SDE_FENCE_NAME_SIZE, "fence%u", val);
-
+ sde_fence->ctx = fence_ctx;
+ snprintf(sde_fence->name, SDE_FENCE_NAME_SIZE, "sde_fence:%s:%u",
+ sde_fence->ctx->name, val);
fence_init(&sde_fence->base, &sde_fence_ops, &ctx->lock,
ctx->context, val);
@@ -231,13 +232,13 @@
}
fd_install(fd, sync_file->file);
+ sde_fence->fd = fd;
+ kref_get(&ctx->kref);
spin_lock(&ctx->list_lock);
- sde_fence->ctx = fence_ctx;
- sde_fence->fd = fd;
list_add_tail(&sde_fence->fence_list, &ctx->fence_list_head);
- kref_get(&ctx->kref);
spin_unlock(&ctx->list_lock);
+
exit:
return fd;
}
@@ -357,6 +358,8 @@
}
spin_unlock_irqrestore(&ctx->lock, flags);
+ SDE_EVT32(ctx->drm_id, ctx->done_count);
+
spin_lock(&ctx->list_lock);
if (list_empty(&ctx->fence_list_head)) {
SDE_DEBUG("nothing to trigger!-no get_prop call\n");
@@ -370,7 +373,7 @@
list_for_each_entry_safe(fc, next, &local_list_head, fence_list) {
spin_lock_irqsave(&ctx->lock, flags);
- is_signaled = fence_signal_locked(&fc->base);
+ is_signaled = fence_is_signaled_locked(&fc->base);
spin_unlock_irqrestore(&ctx->lock, flags);
if (is_signaled) {
@@ -381,6 +384,4 @@
spin_unlock(&ctx->list_lock);
}
}
-
- SDE_EVT32(ctx->drm_id, ctx->done_count);
}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_top.c b/drivers/gpu/drm/msm/sde/sde_hw_top.c
index cf54611..bd212e2 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_top.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_top.c
@@ -34,6 +34,17 @@
#define TRAFFIC_SHAPER_WR_CLIENT(num) (0x060 + (num * 4))
#define TRAFFIC_SHAPER_FIXPOINT_FACTOR 4
+#define MDP_WD_TIMER_0_CTL 0x380
+#define MDP_WD_TIMER_0_CTL2 0x384
+#define MDP_WD_TIMER_0_LOAD_VALUE 0x388
+
+#define MDP_TICK_COUNT 16
+#define XO_CLK_RATE 19200
+#define MS_TICKS_IN_SEC 1000
+
+#define CALCULATE_WD_LOAD_VALUE(fps) \
+ ((uint32_t)((MS_TICKS_IN_SEC * XO_CLK_RATE)/(MDP_TICK_COUNT * fps)))
+
#define DCE_SEL 0x450
static void sde_hw_setup_split_pipe(struct sde_hw_mdp *mdp,
@@ -192,6 +203,39 @@
status->wb[WB_3] = 0;
}
+static void sde_hw_setup_vsync_sel(struct sde_hw_mdp *mdp,
+ struct sde_watchdog_te_status *cfg, bool watchdog_te)
+{
+ struct sde_hw_blk_reg_map *c = &mdp->hw;
+ u32 reg = 0;
+ int i = 0;
+ u32 pp_offset[] = {0xC, 0x8, 0x4, 0x13};
+
+ if (!mdp)
+ return;
+
+ reg = SDE_REG_READ(c, MDP_VSYNC_SEL);
+ for (i = 0; i < cfg->pp_count; i++) {
+ if (watchdog_te)
+ reg |= 0xF << pp_offset[cfg->ppnumber[i] - 1];
+ else
+ reg &= ~(0xF << pp_offset[cfg->ppnumber[i] - 1]);
+ }
+
+ SDE_REG_WRITE(c, MDP_VSYNC_SEL, reg);
+
+ if (watchdog_te) {
+ SDE_REG_WRITE(c, MDP_WD_TIMER_0_LOAD_VALUE,
+ CALCULATE_WD_LOAD_VALUE(cfg->frame_rate));
+
+ SDE_REG_WRITE(c, MDP_WD_TIMER_0_CTL, BIT(0)); /* clear timer */
+ reg = SDE_REG_READ(c, MDP_WD_TIMER_0_CTL2);
+ reg |= BIT(8); /* enable heartbeat timer */
+ reg |= BIT(0); /* enable WD timer */
+ SDE_REG_WRITE(c, MDP_WD_TIMER_0_CTL2, reg);
+ }
+}
+
static void sde_hw_get_safe_status(struct sde_hw_mdp *mdp,
struct sde_danger_safe_status *status)
{
@@ -261,6 +305,7 @@
ops->setup_cdm_output = sde_hw_setup_cdm_output;
ops->setup_clk_force_ctrl = sde_hw_setup_clk_force_ctrl;
ops->get_danger_status = sde_hw_get_danger_status;
+ ops->setup_vsync_sel = sde_hw_setup_vsync_sel;
ops->get_safe_status = sde_hw_get_safe_status;
ops->setup_dce = sde_hw_setup_dce;
ops->reset_ubwc = sde_hw_reset_ubwc;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_top.h b/drivers/gpu/drm/msm/sde/sde_hw_top.h
index 7511358..9cb4494 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_top.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_top.h
@@ -77,6 +77,18 @@
};
/**
+ * struct sde_watchdog_te_status - configure watchdog timer to generate TE
+ * @pp_count: number of ping pongs active
+ * @frame_rate: Display frame rate
+ * @ppnumber: base address of ping pong info
+ */
+struct sde_watchdog_te_status {
+ u32 pp_count;
+ u32 frame_rate;
+ u32 ppnumber[];
+};
+
+/**
* struct sde_hw_mdp_ops - interface to the MDP TOP Hw driver functions
* Assumption is these functions will be called after clocks are enabled.
* @setup_split_pipe : Programs the pipe control registers
@@ -142,6 +154,15 @@
struct sde_danger_safe_status *status);
/**
+ * setup_vsync_sel - get vsync configuration details
+ * @mdp: mdp top context driver
+ * @cfg: watchdog timer configuration
+ * @watchdog_te: watchdog timer enable
+ */
+ void (*setup_vsync_sel)(struct sde_hw_mdp *mdp,
+ struct sde_watchdog_te_status *cfg, bool watchdog_te);
+
+ /**
* get_safe_status - get safe status
* @mdp: mdp top context driver
* @status: Pointer to danger safe status
diff --git a/drivers/gpu/drm/msm/sde_rsc.c b/drivers/gpu/drm/msm/sde_rsc.c
index 1f770c3..3413ee7 100644
--- a/drivers/gpu/drm/msm/sde_rsc.c
+++ b/drivers/gpu/drm/msm/sde_rsc.c
@@ -780,14 +780,15 @@
const char __user *p, size_t count, loff_t *ppos)
{
struct sde_rsc_priv *rsc = file->private_data;
- char *input, *mode;
- u32 mode0_state = 0, mode1_state = 0, mode2_state = 0;
+ char *input;
+ u32 mode_state = 0;
int rc;
- if (!rsc || !rsc->hw_ops.mode_ctrl)
+ if (!rsc || !rsc->hw_ops.mode_ctrl || !count ||
+ count > MAX_COUNT_SIZE_SUPPORTED)
return 0;
- input = kmalloc(count, GFP_KERNEL);
+ input = kmalloc(count + 1, GFP_KERNEL);
if (!input)
return -ENOMEM;
@@ -795,43 +796,35 @@
kfree(input);
return -EFAULT;
}
- input[count - 1] = '\0';
+ input[count] = '\0';
+
+ rc = kstrtoint(input, 0, &mode_state);
+ if (rc) {
+ pr_err("mode_state: int conversion failed rc:%d\n", rc);
+ goto end;
+ }
+
+ pr_debug("mode_state: %d\n", mode_state);
+ mode_state &= 0x7;
+ if (mode_state != ALL_MODES_DISABLED &&
+ mode_state != ALL_MODES_ENABLED &&
+ mode_state != ONLY_MODE_0_ENABLED &&
+ mode_state != ONLY_MODE_0_1_ENABLED) {
+ pr_err("invalid mode:%d combination\n", mode_state);
+ goto end;
+ }
mutex_lock(&rsc->client_lock);
rc = sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, true);
if (rc)
goto clk_enable_fail;
- mode = strnstr(input, "mode0=", strlen("mode0="));
- if (mode) {
- mode0_state = mode[0] - '0';
- mode0_state &= BIT(0);
- rsc->hw_ops.mode_ctrl(rsc, MODE0_UPDATE, NULL, 0, mode0_state);
- goto end;
- }
-
- mode = strnstr(input, "mode1=", strlen("mode1="));
- if (mode) {
- mode1_state = mode[0] - '0';
- mode1_state &= BIT(0);
- rsc->hw_ops.mode_ctrl(rsc, MODE1_UPDATE, NULL, 0, mode1_state);
- goto end;
- }
-
- mode = strnstr(input, "mode2=", strlen("mode2="));
- if (mode) {
- mode2_state = mode[0] - '0';
- mode2_state &= BIT(0);
- rsc->hw_ops.mode_ctrl(rsc, MODE2_UPDATE, NULL, 0, mode2_state);
- }
-
-end:
+ rsc->hw_ops.mode_ctrl(rsc, MODE_UPDATE, NULL, 0, mode_state);
sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, false);
+
clk_enable_fail:
mutex_unlock(&rsc->client_lock);
-
- pr_info("req: mode0:%d mode1:%d mode2:%d\n", mode0_state, mode1_state,
- mode2_state);
+end:
kfree(input);
return count;
}
@@ -879,14 +872,15 @@
const char __user *p, size_t count, loff_t *ppos)
{
struct sde_rsc_priv *rsc = file->private_data;
- char *input, *vsync_mode;
+ char *input;
u32 vsync_state = 0;
int rc;
- if (!rsc || !rsc->hw_ops.hw_vsync)
+ if (!rsc || !rsc->hw_ops.hw_vsync || !count ||
+ count > MAX_COUNT_SIZE_SUPPORTED)
return 0;
- input = kmalloc(count, GFP_KERNEL);
+ input = kmalloc(count + 1, GFP_KERNEL);
if (!input)
return -ENOMEM;
@@ -894,18 +888,21 @@
kfree(input);
return -EFAULT;
}
- input[count - 1] = '\0';
+ input[count] = '\0';
- vsync_mode = strnstr(input, "vsync_mode=", strlen("vsync_mode="));
- if (vsync_mode) {
- vsync_state = vsync_mode[0] - '0';
- vsync_state &= 0x7;
+ rc = kstrtoint(input, 0, &vsync_state);
+ if (rc) {
+ pr_err("vsync_state: int conversion failed rc:%d\n", rc);
+ goto end;
}
+ pr_debug("vsync_state: %d\n", vsync_state);
+ vsync_state &= 0x7;
+
mutex_lock(&rsc->client_lock);
rc = sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, true);
if (rc)
- goto end;
+ goto clk_en_fail;
if (vsync_state)
rsc->hw_ops.hw_vsync(rsc, VSYNC_ENABLE, NULL,
@@ -915,8 +912,9 @@
sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, false);
-end:
+clk_en_fail:
mutex_unlock(&rsc->client_lock);
+end:
kfree(input);
return count;
}
diff --git a/drivers/gpu/drm/msm/sde_rsc_hw.c b/drivers/gpu/drm/msm/sde_rsc_hw.c
index c87dac4..3332a05 100644
--- a/drivers/gpu/drm/msm/sde_rsc_hw.c
+++ b/drivers/gpu/drm/msm/sde_rsc_hw.c
@@ -572,7 +572,7 @@
}
int rsc_hw_mode_ctrl(struct sde_rsc_priv *rsc, enum rsc_mode_req request,
- char *buffer, int buffer_size, bool mode)
+ char *buffer, int buffer_size, u32 mode)
{
u32 blen = 0;
u32 slot_time;
@@ -588,28 +588,19 @@
rsc->debug_mode));
break;
- case MODE0_UPDATE:
- slot_time = mode ? rsc->timer_config.rsc_time_slot_0_ns :
+ case MODE_UPDATE:
+ slot_time = mode & BIT(0) ? 0x0 :
+ rsc->timer_config.rsc_time_slot_2_ns;
+ dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_TIME_SLOT_TABLE_0_DRV0,
+ slot_time, rsc->debug_mode);
+
+ slot_time = mode & BIT(1) ?
+ rsc->timer_config.rsc_time_slot_0_ns :
rsc->timer_config.rsc_time_slot_2_ns;
dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_TIME_SLOT_TABLE_1_DRV0,
slot_time, rsc->debug_mode);
- slot_time = mode ? rsc->timer_config.rsc_time_slot_1_ns :
- rsc->timer_config.rsc_time_slot_2_ns;
- dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_TIME_SLOT_TABLE_2_DRV0,
- slot_time, rsc->debug_mode);
- rsc->power_collapse_block = mode;
- break;
- case MODE1_UPDATE:
- slot_time = mode ? rsc->timer_config.rsc_time_slot_1_ns :
- rsc->timer_config.rsc_time_slot_2_ns;
- dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_TIME_SLOT_TABLE_2_DRV0,
- slot_time, rsc->debug_mode);
- rsc->power_collapse_block = mode;
- break;
-
- case MODE2_UPDATE:
- rsc->power_collapse_block = mode;
+ rsc->power_collapse_block = !(mode & BIT(2));
break;
default:
@@ -673,7 +664,7 @@
return blen;
blen = snprintf(buffer, buffer_size - blen, "vsync0:0x%x\n",
- dss_reg_r(&rsc->drv_io,
+ dss_reg_r(&rsc->wrapper_io,
SDE_RSCC_WRAPPER_VSYNC_TIMESTAMP0,
rsc->debug_mode));
if (blen >= buffer_size)
@@ -681,15 +672,15 @@
blen += snprintf(buffer + blen, buffer_size - blen,
"vsync1:0x%x\n",
- dss_reg_r(&rsc->drv_io,
+ dss_reg_r(&rsc->wrapper_io,
SDE_RSCC_WRAPPER_VSYNC_TIMESTAMP1,
rsc->debug_mode));
break;
case VSYNC_ENABLE:
- reg = BIT(8) | BIT(9) | ((mode & 0x7) < 10);
+ reg = BIT(8) | ((mode & 0x7) < 10);
dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_DEBUG_BUS,
- mode, rsc->debug_mode);
+ reg, rsc->debug_mode);
break;
case VSYNC_DISABLE:
diff --git a/drivers/gpu/drm/msm/sde_rsc_priv.h b/drivers/gpu/drm/msm/sde_rsc_priv.h
index b83a866..b90b0ac 100644
--- a/drivers/gpu/drm/msm/sde_rsc_priv.h
+++ b/drivers/gpu/drm/msm/sde_rsc_priv.h
@@ -25,20 +25,27 @@
#define MAX_RSC_COUNT 5
+#define ALL_MODES_DISABLED 0x0
+#define ONLY_MODE_0_ENABLED 0x1
+#define ONLY_MODE_0_1_ENABLED 0x3
+#define ALL_MODES_ENABLED 0x7
+
+#define MAX_COUNT_SIZE_SUPPORTED 128
+
struct sde_rsc_priv;
/**
* rsc_mode_req: sde rsc mode request information
* MODE_READ: read vsync status
- * MODE0_UPDATE: mode0 status , this should be 0x0
- * MODE1_UPDATE: mode1 status , this should be 0x1
- * MODE2_UPDATE: mode2 status , this should be 0x2
+ * MODE_UPDATE: mode timeslot update
+ * 0x0: all modes are disabled.
+ * 0x1: Mode-0 is enabled and other two modes are disabled.
+ * 0x3: Mode-0 & Mode-1 are enabled and mode-2 is disabled.
+ * 0x7: all modes are enabled.
*/
enum rsc_mode_req {
MODE_READ,
- MODE0_UPDATE = 0x1,
- MODE1_UPDATE = 0x2,
- MODE2_UPDATE = 0x3,
+ MODE_UPDATE = 0x1,
};
/**
@@ -78,7 +85,7 @@
int (*state_update)(struct sde_rsc_priv *rsc, enum sde_rsc_state state);
int (*debug_show)(struct seq_file *s, struct sde_rsc_priv *rsc);
int (*mode_ctrl)(struct sde_rsc_priv *rsc, enum rsc_mode_req request,
- char *buffer, int buffer_size, bool mode);
+ char *buffer, int buffer_size, u32 mode);
};
/**
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 68d7653..ebaa1a9 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -1403,7 +1403,7 @@
}
/* GPU comes up in secured mode, make it unsecured by default */
- if (ADRENO_FEATURE(adreno_dev, ADRENO_CONTENT_PROTECTION))
+ if (adreno_dev->zap_loaded)
ret = adreno_switch_to_unsecure_mode(adreno_dev, rb);
else
adreno_writereg(adreno_dev,
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index 530529f..7c76580 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -410,6 +410,7 @@
* @gpu_llc_slice_enable: To enable the GPU system cache slice or not
* @gpuhtw_llc_slice: GPU pagetables system cache slice descriptor
* @gpuhtw_llc_slice_enable: To enable the GPUHTW system cache slice or not
+ * @zap_loaded: Used to track if zap was successfully loaded or not
*/
struct adreno_device {
struct kgsl_device dev; /* Must be first field in this struct */
@@ -473,6 +474,7 @@
bool gpu_llc_slice_enable;
void *gpuhtw_llc_slice;
bool gpuhtw_llc_slice_enable;
+ unsigned int zap_loaded;
};
/**
diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c
index 1e95e38..6c8b677 100644
--- a/drivers/gpu/msm/adreno_a5xx.c
+++ b/drivers/gpu/msm/adreno_a5xx.c
@@ -30,7 +30,6 @@
#include "kgsl_trace.h"
#include "adreno_a5xx_packets.h"
-static int zap_ucode_loaded;
static int critical_packet_constructed;
static struct kgsl_memdesc crit_pkts;
@@ -2179,7 +2178,7 @@
* appropriate register,
* skip if retention is supported for the CPZ register
*/
- if (zap_ucode_loaded && !(ADRENO_FEATURE(adreno_dev,
+ if (adreno_dev->zap_loaded && !(ADRENO_FEATURE(adreno_dev,
ADRENO_CPZ_RETENTION))) {
int ret;
struct scm_desc desc = {0};
@@ -2197,14 +2196,13 @@
}
/* Load the zap shader firmware through PIL if its available */
- if (adreno_dev->gpucore->zap_name && !zap_ucode_loaded) {
+ if (adreno_dev->gpucore->zap_name && !adreno_dev->zap_loaded) {
ptr = subsystem_get(adreno_dev->gpucore->zap_name);
/* Return error if the zap shader cannot be loaded */
if (IS_ERR_OR_NULL(ptr))
return (ptr == NULL) ? -ENODEV : PTR_ERR(ptr);
-
- zap_ucode_loaded = 1;
+ adreno_dev->zap_loaded = 1;
}
return 0;
diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c
index 585beb9..77080df 100644
--- a/drivers/gpu/msm/adreno_a6xx.c
+++ b/drivers/gpu/msm/adreno_a6xx.c
@@ -490,7 +490,7 @@
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
struct adreno_firmware *fw = ADRENO_FW(adreno_dev, ADRENO_FW_SQE);
uint64_t gpuaddr;
- static void *zap;
+ void *zap;
int ret = 0;
gpuaddr = fw->memdesc.gpuaddr;
@@ -500,14 +500,15 @@
upper_32_bits(gpuaddr));
/* Load the zap shader firmware through PIL if its available */
- if (adreno_dev->gpucore->zap_name && !zap) {
+ if (adreno_dev->gpucore->zap_name && !adreno_dev->zap_loaded) {
zap = subsystem_get(adreno_dev->gpucore->zap_name);
/* Return error if the zap shader cannot be loaded */
if (IS_ERR_OR_NULL(zap)) {
ret = (zap == NULL) ? -ENODEV : PTR_ERR(zap);
zap = NULL;
- }
+ } else
+ adreno_dev->zap_loaded = 1;
}
return ret;
diff --git a/drivers/gpu/msm/adreno_a6xx_snapshot.c b/drivers/gpu/msm/adreno_a6xx_snapshot.c
index 01ecb01..decbff3 100644
--- a/drivers/gpu/msm/adreno_a6xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a6xx_snapshot.c
@@ -180,6 +180,7 @@
unsigned int statetype;
const unsigned int *regs;
unsigned int num_sets;
+ unsigned int offset;
} a6xx_non_ctx_dbgahb[] = {
{ 0x0002F800, 0x40, a6xx_hlsq_non_ctx_registers,
ARRAY_SIZE(a6xx_hlsq_non_ctx_registers) / 2 },
@@ -735,10 +736,8 @@
return data_size + sizeof(*header);
}
-
-
-static size_t a6xx_snapshot_non_ctx_dbgahb(struct kgsl_device *device, u8 *buf,
- size_t remain, void *priv)
+static size_t a6xx_legacy_snapshot_non_ctx_dbgahb(struct kgsl_device *device,
+ u8 *buf, size_t remain, void *priv)
{
struct kgsl_snapshot_regs *header =
(struct kgsl_snapshot_regs *)buf;
@@ -783,6 +782,57 @@
return (count * 8) + sizeof(*header);
}
+static size_t a6xx_snapshot_non_ctx_dbgahb(struct kgsl_device *device, u8 *buf,
+ size_t remain, void *priv)
+{
+ struct kgsl_snapshot_regs *header =
+ (struct kgsl_snapshot_regs *)buf;
+ struct a6xx_non_ctx_dbgahb_registers *regs =
+ (struct a6xx_non_ctx_dbgahb_registers *)priv;
+ unsigned int count = 0;
+ unsigned int *data = (unsigned int *)(buf + sizeof(*header));
+ unsigned int i, k;
+ unsigned int *src;
+
+ if (crash_dump_valid == false)
+ return a6xx_legacy_snapshot_non_ctx_dbgahb(device, buf, remain,
+ regs);
+
+ if (remain < sizeof(*header)) {
+ SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
+ return 0;
+ }
+
+ remain -= sizeof(*header);
+
+ src = (unsigned int *)(a6xx_crashdump_registers.hostptr + regs->offset);
+
+ for (i = 0; i < regs->num_sets; i++) {
+ unsigned int start;
+ unsigned int end;
+
+ start = regs->regs[2 * i];
+ end = regs->regs[(2 * i) + 1];
+
+ if (remain < (end - start + 1) * 8) {
+ SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
+ goto out;
+ }
+
+ remain -= ((end - start) + 1) * 8;
+
+ for (k = start; k <= end; k++, count++) {
+ *data++ = k;
+ *data++ = *src++;
+ }
+ }
+out:
+ header->count = count;
+
+ /* Return the size of the section */
+ return (count * 8) + sizeof(*header);
+}
+
static void a6xx_snapshot_dbgahb_regs(struct kgsl_device *device,
struct kgsl_snapshot *snapshot)
{
@@ -1491,6 +1541,40 @@
return qwords;
}
+static int _a6xx_crashdump_init_non_ctx_dbgahb(uint64_t *ptr, uint64_t *offset)
+{
+ int qwords = 0;
+ unsigned int i, k;
+ unsigned int count;
+
+ for (i = 0; i < ARRAY_SIZE(a6xx_non_ctx_dbgahb); i++) {
+ struct a6xx_non_ctx_dbgahb_registers *regs =
+ &a6xx_non_ctx_dbgahb[i];
+
+ regs->offset = *offset;
+
+ /* Program the aperture */
+ ptr[qwords++] = (regs->statetype & 0xff) << 8;
+ ptr[qwords++] = (((uint64_t)A6XX_HLSQ_DBG_READ_SEL << 44)) |
+ (1 << 21) | 1;
+
+ for (k = 0; k < regs->num_sets; k++) {
+ unsigned int start = regs->regs[2 * k];
+
+ count = REG_PAIR_COUNT(regs->regs, k);
+ ptr[qwords++] =
+ a6xx_crashdump_registers.gpuaddr + *offset;
+ ptr[qwords++] =
+ (((uint64_t)(A6XX_HLSQ_DBG_AHB_READ_APERTURE +
+ start - regs->regbase / 4) << 44)) |
+ count;
+
+ *offset += count * sizeof(unsigned int);
+ }
+ }
+ return qwords;
+}
+
void a6xx_crashdump_init(struct adreno_device *adreno_dev)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
@@ -1578,6 +1662,26 @@
}
}
+ /*
+ * Calculate the script and data size for non context debug
+ * AHB registers
+ */
+ for (i = 0; i < ARRAY_SIZE(a6xx_non_ctx_dbgahb); i++) {
+ struct a6xx_non_ctx_dbgahb_registers *regs =
+ &a6xx_non_ctx_dbgahb[i];
+
+ /* 16 bytes for programming the aperture */
+ script_size += 16;
+
+ /* Reading each pair of registers takes 16 bytes */
+ script_size += 16 * regs->num_sets;
+
+ /* A dword per register read from the cluster list */
+ for (k = 0; k < regs->num_sets; k++)
+ data_size += REG_PAIR_COUNT(regs->regs, k) *
+ sizeof(unsigned int);
+ }
+
/* Now allocate the script and data buffers */
/* The script buffers needs 2 extra qwords on the end */
@@ -1619,6 +1723,8 @@
ptr += _a6xx_crashdump_init_ctx_dbgahb(ptr, &offset);
+ ptr += _a6xx_crashdump_init_non_ctx_dbgahb(ptr, &offset);
+
*ptr++ = 0;
*ptr++ = 0;
}
diff --git a/drivers/gpu/msm/kgsl_sync.c b/drivers/gpu/msm/kgsl_sync.c
index 973a2ff..96873c4 100644
--- a/drivers/gpu/msm/kgsl_sync.c
+++ b/drivers/gpu/msm/kgsl_sync.c
@@ -803,11 +803,23 @@
return "kgsl-syncsource-timeline";
}
+static void kgsl_syncsource_fence_value_str(struct fence *fence,
+ char *str, int size)
+{
+ /*
+ * Each fence is independent of the others on the same timeline.
+ * We use a different context for each of them.
+ */
+ snprintf(str, size, "%llu", fence->context);
+}
+
static const struct fence_ops kgsl_syncsource_fence_ops = {
.get_driver_name = kgsl_syncsource_driver_name,
.get_timeline_name = kgsl_syncsource_get_timeline_name,
.enable_signaling = kgsl_syncsource_enable_signaling,
.wait = fence_default_wait,
.release = kgsl_syncsource_fence_release,
+
+ .fence_value_str = kgsl_syncsource_fence_value_str,
};
diff --git a/drivers/mailbox/qti-tcs.c b/drivers/mailbox/qti-tcs.c
index 6d0e913..b328a2a 100644
--- a/drivers/mailbox/qti-tcs.c
+++ b/drivers/mailbox/qti-tcs.c
@@ -134,6 +134,9 @@
struct workqueue_struct *wq;
struct tcs_response_pool *resp_pool;
atomic_t tcs_in_use[MAX_POOL_SIZE];
+ /* Debug info */
+ u64 tcs_last_sent_ts[MAX_POOL_SIZE];
+ u64 tcs_last_recv_ts[MAX_POOL_SIZE];
atomic_t tcs_send_count[MAX_POOL_SIZE];
atomic_t tcs_irq_count[MAX_POOL_SIZE];
};
@@ -170,8 +173,9 @@
struct tcs_response_pool *pool = drv->resp_pool;
struct tcs_response *resp = ERR_PTR(-ENOMEM);
int pos;
+ unsigned long flags;
- spin_lock(&pool->lock);
+ spin_lock_irqsave(&pool->lock, flags);
pos = find_first_zero_bit(pool->avail, MAX_POOL_SIZE);
if (pos != MAX_POOL_SIZE) {
bitmap_set(pool->avail, pos, 1);
@@ -182,7 +186,7 @@
resp->err = err;
resp->in_use = false;
}
- spin_unlock(&pool->lock);
+ spin_unlock_irqrestore(&pool->lock, flags);
return resp;
}
@@ -190,34 +194,62 @@
static void free_response(struct tcs_response *resp)
{
struct tcs_response_pool *pool = resp->drv->resp_pool;
+ unsigned long flags;
- spin_lock(&pool->lock);
+ spin_lock_irqsave(&pool->lock, flags);
resp->err = -EINVAL;
bitmap_clear(pool->avail, resp->idx, 1);
- spin_unlock(&pool->lock);
+ spin_unlock_irqrestore(&pool->lock, flags);
}
-static inline struct tcs_response *get_response(struct tcs_drv *drv, u32 m)
+static inline struct tcs_response *get_response(struct tcs_drv *drv, u32 m,
+ bool for_use)
{
struct tcs_response_pool *pool = drv->resp_pool;
struct tcs_response *resp = NULL;
int pos = 0;
+ unsigned long flags;
+ spin_lock_irqsave(&pool->lock, flags);
do {
pos = find_next_bit(pool->avail, MAX_POOL_SIZE, pos);
if (pos == MAX_POOL_SIZE)
break;
+
resp = &pool->resp[pos];
if (resp->m == m && !resp->in_use) {
- resp->in_use = true;
+ resp->in_use = for_use;
break;
}
pos++;
+ udelay(1);
} while (1);
+ spin_unlock_irqrestore(&pool->lock, flags);
return resp;
}
+static void print_response(struct tcs_drv *drv, int m)
+{
+ struct tcs_response *resp;
+ struct tcs_mbox_msg *msg;
+ int i;
+
+ resp = get_response(drv, m, false);
+ if (!resp)
+ return;
+
+ msg = resp->msg;
+ pr_info("Response object idx=%d:\n\tfor-tcs=%d\tin-use=%d\n",
+ resp->idx, resp->m, resp->in_use);
+ pr_info("Msg: state=%d\n", msg->state);
+ for (i = 0; i < msg->num_payload; i++)
+ pr_info("addr=0x%x data=0x%x complete=0x%x\n",
+ msg->payload[i].addr,
+ msg->payload[i].data,
+ msg->payload[i].complete);
+}
+
static inline u32 read_drv_config(void __iomem *base)
{
return le32_to_cpu(readl_relaxed(base + DRV_PRNT_CHLD_CONFIG));
@@ -257,17 +289,19 @@
static inline struct tcs_mbox *get_tcs_from_index(struct tcs_drv *drv, int m)
{
- struct tcs_mbox *tcs;
+ struct tcs_mbox *tcs = NULL;
int i;
- for (i = 0; i < TCS_TYPE_NR; i++) {
+ for (i = 0; i < drv->num_tcs; i++) {
tcs = &drv->tcs[i];
- if (tcs->tcs_mask & BIT(m))
+ if (tcs->tcs_mask & (u32)BIT(m))
break;
}
- if (i == TCS_TYPE_NR)
+ if (i == drv->num_tcs) {
+ WARN(1, "Incorrect TCS index %d", m);
tcs = NULL;
+ }
return tcs;
}
@@ -369,16 +403,10 @@
continue;
atomic_inc(&drv->tcs_irq_count[m]);
- resp = get_response(drv, m);
+ resp = get_response(drv, m, true);
if (!resp) {
pr_err("No resp request for TCS-%d\n", m);
- continue;
- }
-
- tcs = get_tcs_from_index(drv, m);
- if (!tcs) {
- pr_err("TCS-%d doesn't exist in DRV\n", m);
- continue;
+ goto no_resp;
}
/* Check if all commands were completed */
@@ -404,7 +432,8 @@
resp->err);
/* Clear the AMC mode for non-ACTIVE TCSes */
- if (tcs->type != ACTIVE_TCS) {
+ tcs = get_tcs_from_index(drv, m);
+ if (tcs && tcs->type != ACTIVE_TCS) {
data = read_tcs_reg(base, TCS_DRV_CONTROL, m, 0);
data &= ~TCS_AMC_MODE_ENABLE;
write_tcs_reg(base, TCS_DRV_CONTROL, m, 0, data);
@@ -419,11 +448,16 @@
write_tcs_reg(base, TCS_DRV_CMD_ENABLE, m, 0, 0);
}
+no_resp:
+ /* Record the recvd time stamp */
+ drv->tcs_last_recv_ts[m] = arch_counter_get_cntvct();
+
/* Clear the TCS IRQ status */
write_tcs_reg(base, TCS_DRV_IRQ_CLEAR, 0, 0, BIT(m));
/* Clean up response object and notify mbox in tasklet */
- send_tcs_response(resp);
+ if (resp)
+ send_tcs_response(resp);
/* Notify the client that this request is completed. */
atomic_set(&drv->tcs_in_use[m], 0);
@@ -628,7 +662,6 @@
static int tcs_mbox_write(struct mbox_chan *chan, struct tcs_mbox_msg *msg,
bool trigger)
{
- const struct device *dev = chan->cl->dev;
struct tcs_drv *drv = container_of(chan->mbox, struct tcs_drv, mbox);
int d = drv->drv_id;
struct tcs_mbox *tcs;
@@ -647,7 +680,6 @@
spin_lock_irqsave(&tcs->tcs_lock, flags);
slot = find_slots(tcs, msg);
if (slot < 0) {
- dev_err(dev, "No TCS slot found.\n");
spin_unlock_irqrestore(&tcs->tcs_lock, flags);
if (resp)
free_response(resp);
@@ -683,6 +715,7 @@
/* Enable interrupt for active votes through wake TCS */
if (tcs->type != ACTIVE_TCS)
enable_tcs_irq(drv, m, true);
+ drv->tcs_last_sent_ts[m] = arch_counter_get_cntvct();
}
/* Write to the TCS or AMC */
@@ -725,6 +758,24 @@
return 0;
}
+static void dump_tcs_stats(struct tcs_drv *drv)
+{
+ int i;
+ unsigned long long curr = arch_counter_get_cntvct();
+
+ for (i = 0; i < drv->num_tcs; i++) {
+ if (!atomic_read(&drv->tcs_in_use[i]))
+ continue;
+ pr_info("Time: %llu: TCS-%d:\n\tReq Sent:%d Last Sent:%llu\n\tResp Recv:%d Last Recvd:%llu\n",
+ curr, i,
+ atomic_read(&drv->tcs_send_count[i]),
+ drv->tcs_last_sent_ts[i],
+ atomic_read(&drv->tcs_irq_count[i]),
+ drv->tcs_last_recv_ts[i]);
+ print_response(drv, i);
+ }
+}
+
/**
* chan_tcs_write: Validate the incoming message and write to the
* appropriate TCS block.
@@ -738,35 +789,41 @@
*/
static int chan_tcs_write(struct mbox_chan *chan, void *data)
{
+ struct tcs_drv *drv = container_of(chan->mbox, struct tcs_drv, mbox);
struct tcs_mbox_msg *msg = data;
const struct device *dev = chan->cl->dev;
- int ret = -EINVAL;
+ int ret = 0;
int count = 0;
if (!msg) {
- dev_err(dev, "Payload error.\n");
+ dev_err(dev, "Payload error\n");
+ ret = -EINVAL;
goto tx_fail;
}
if (!msg->payload || msg->num_payload > MAX_RPMH_PAYLOAD) {
- dev_err(dev, "Payload error.\n");
+ dev_err(dev, "Payload error\n");
+ ret = -EINVAL;
goto tx_fail;
}
if (msg->invalidate || msg->is_control) {
- dev_err(dev, "Incorrect API.\n");
+ dev_err(dev, "Incorrect API\n");
+ ret = -EINVAL;
goto tx_fail;
}
if (msg->state != RPMH_ACTIVE_ONLY_STATE &&
msg->state != RPMH_AWAKE_STATE) {
- dev_err(dev, "Incorrect API.\n");
+ dev_err(dev, "Incorrect API\n");
+ ret = -EINVAL;
goto tx_fail;
}
/* Read requests should always be single */
if (msg->is_read && msg->num_payload > 1) {
- dev_err(dev, "Incorrect read request.\n");
+ dev_err(dev, "Incorrect read request\n");
+ ret = -EINVAL;
goto tx_fail;
}
@@ -780,25 +837,29 @@
/* Post the message to the TCS and trigger */
do {
ret = tcs_mbox_write(chan, msg, true);
- if (ret == -EBUSY) {
- ret = -EIO;
- udelay(10);
- } else
+ if (ret != -EBUSY)
break;
+ udelay(100);
} while (++count < 10);
tx_fail:
- if (ret) {
- struct tcs_drv *drv = container_of(chan->mbox,
- struct tcs_drv, mbox);
+ /* If there was an error in the request, schedule a response */
+ if (ret < 0 && ret != -EBUSY) {
struct tcs_response *resp = setup_response(
drv, msg, chan, TCS_M_INIT, ret);
dev_err(dev, "Error sending RPMH message %d\n", ret);
send_tcs_response(resp);
+ ret = 0;
}
- return 0;
+ /* If we were just busy waiting for TCS, dump the state and return */
+ if (ret == -EBUSY) {
+ dev_err(dev, "TCS Busy, retrying RPMH message send\n");
+ dump_tcs_stats(drv);
+ }
+
+ return ret;
}
static void __tcs_write_hidden(struct tcs_drv *drv, int d,
@@ -827,7 +888,7 @@
return PTR_ERR(tcs);
if (msg->num_payload != tcs->ncpt) {
- dev_err(dev, "Request must fit the control TCS size.\n");
+ dev_err(dev, "Request must fit the control TCS size\n");
return -EINVAL;
}
@@ -851,12 +912,12 @@
int ret = -EINVAL;
if (!msg) {
- dev_err(dev, "Payload error.\n");
+ dev_err(dev, "Payload error\n");
goto tx_done;
}
if (msg->num_payload > MAX_RPMH_PAYLOAD) {
- dev_err(dev, "Payload error.\n");
+ dev_err(dev, "Payload error\n");
goto tx_done;
}
diff --git a/drivers/media/platform/msm/vidc/hfi_packetization.c b/drivers/media/platform/msm/vidc/hfi_packetization.c
index 6601c9a..9331c94 100644
--- a/drivers/media/platform/msm/vidc/hfi_packetization.c
+++ b/drivers/media/platform/msm/vidc/hfi_packetization.c
@@ -32,7 +32,6 @@
HFI_H264_PROFILE_CONSTRAINED_BASE,
[ilog2(HAL_H264_PROFILE_CONSTRAINED_HIGH)] =
HFI_H264_PROFILE_CONSTRAINED_HIGH,
- [ilog2(HAL_VPX_PROFILE_VERSION_1)] = HFI_VPX_PROFILE_VERSION_1,
};
static int entropy_mode[] = {
diff --git a/drivers/media/platform/msm/vidc/msm_vdec.c b/drivers/media/platform/msm/vidc/msm_vdec.c
index a86c677..c42d7aa 100644
--- a/drivers/media/platform/msm/vidc/msm_vdec.c
+++ b/drivers/media/platform/msm/vidc/msm_vdec.c
@@ -219,13 +219,9 @@
.name = "VP8 Profile Level",
.type = V4L2_CTRL_TYPE_MENU,
.minimum = V4L2_MPEG_VIDC_VIDEO_VP8_UNUSED,
- .maximum = V4L2_MPEG_VIDC_VIDEO_VP8_VERSION_1,
+ .maximum = V4L2_MPEG_VIDC_VIDEO_VP8_VERSION_3,
.default_value = V4L2_MPEG_VIDC_VIDEO_VP8_VERSION_0,
- .menu_skip_mask = ~(
- (1 << V4L2_MPEG_VIDC_VIDEO_VP8_UNUSED) |
- (1 << V4L2_MPEG_VIDC_VIDEO_VP8_VERSION_0) |
- (1 << V4L2_MPEG_VIDC_VIDEO_VP8_VERSION_1)
- ),
+ .menu_skip_mask = 0,
.qmenu = vp8_profile_level,
.flags = V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY,
},
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index 006cd49..e3d52bf 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -432,13 +432,9 @@
.name = "VP8 Profile Level",
.type = V4L2_CTRL_TYPE_MENU,
.minimum = V4L2_MPEG_VIDC_VIDEO_VP8_UNUSED,
- .maximum = V4L2_MPEG_VIDC_VIDEO_VP8_VERSION_1,
+ .maximum = V4L2_MPEG_VIDC_VIDEO_VP8_VERSION_3,
.default_value = V4L2_MPEG_VIDC_VIDEO_VP8_VERSION_0,
- .menu_skip_mask = ~(
- (1 << V4L2_MPEG_VIDC_VIDEO_VP8_UNUSED) |
- (1 << V4L2_MPEG_VIDC_VIDEO_VP8_VERSION_0) |
- (1 << V4L2_MPEG_VIDC_VIDEO_VP8_VERSION_1)
- ),
+ .menu_skip_mask = 0,
.qmenu = vp8_profile_level,
},
{
@@ -1308,10 +1304,10 @@
break;
case V4L2_CID_MPEG_VIDC_VIDEO_VP8_PROFILE_LEVEL:
property_id = HAL_PARAM_PROFILE_LEVEL_CURRENT;
- profile_level.profile = msm_comm_v4l2_to_hal(
+ profile_level.profile = HAL_VPX_PROFILE_MAIN;
+ profile_level.level = msm_comm_v4l2_to_hal(
V4L2_CID_MPEG_VIDC_VIDEO_VP8_PROFILE_LEVEL,
ctrl->val);
- profile_level.level = HAL_VPX_PROFILE_UNUSED;
pdata = &profile_level;
break;
case V4L2_CID_MPEG_VIDC_VIDEO_HEVC_PROFILE:
diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c
index 576809b..1cab039 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc.c
@@ -1888,13 +1888,13 @@
switch (ctrl->id) {
case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
- case V4L2_CID_MPEG_VIDC_VIDEO_VP8_PROFILE_LEVEL:
case V4L2_CID_MPEG_VIDC_VIDEO_MPEG2_PROFILE:
case V4L2_CID_MPEG_VIDC_VIDEO_HEVC_PROFILE:
ctrl->val = inst->profile;
break;
case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
+ case V4L2_CID_MPEG_VIDC_VIDEO_VP8_PROFILE_LEVEL:
case V4L2_CID_MPEG_VIDC_VIDEO_MPEG2_LEVEL:
case V4L2_CID_MPEG_VIDC_VIDEO_HEVC_TIER_LEVEL:
ctrl->val = inst->level;
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index 0efe93b..4f53850 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -321,15 +321,15 @@
case V4L2_CID_MPEG_VIDC_VIDEO_VP8_PROFILE_LEVEL:
switch (value) {
case V4L2_MPEG_VIDC_VIDEO_VP8_VERSION_0:
- return HAL_VPX_PROFILE_VERSION_0;
+ return HAL_VPX_LEVEL_VERSION_0;
case V4L2_MPEG_VIDC_VIDEO_VP8_VERSION_1:
- return HAL_VPX_PROFILE_VERSION_1;
+ return HAL_VPX_LEVEL_VERSION_1;
case V4L2_MPEG_VIDC_VIDEO_VP8_VERSION_2:
- return HAL_VPX_PROFILE_VERSION_2;
+ return HAL_VPX_LEVEL_VERSION_2;
case V4L2_MPEG_VIDC_VIDEO_VP8_VERSION_3:
- return HAL_VPX_PROFILE_VERSION_3;
+ return HAL_VPX_LEVEL_VERSION_3;
case V4L2_MPEG_VIDC_VIDEO_VP8_UNUSED:
- return HAL_VPX_PROFILE_UNUSED;
+ return HAL_VPX_LEVEL_UNUSED;
default:
goto unknown_value;
}
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
index 474c2fb6..537a1c6 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
@@ -371,15 +371,18 @@
};
enum hal_vpx_profile {
- HAL_VPX_PROFILE_SIMPLE = 0x00000001,
- HAL_VPX_PROFILE_ADVANCED = 0x00000002,
- HAL_VPX_PROFILE_VERSION_0 = 0x00000004,
- HAL_VPX_PROFILE_VERSION_1 = 0x00000008,
- HAL_VPX_PROFILE_VERSION_2 = 0x00000010,
- HAL_VPX_PROFILE_VERSION_3 = 0x00000020,
+ HAL_VPX_PROFILE_MAIN = 0x00000001,
HAL_VPX_PROFILE_UNUSED = 0x10000000,
};
+enum hal_vpx_level {
+ HAL_VPX_LEVEL_VERSION_0 = 0x00000001,
+ HAL_VPX_LEVEL_VERSION_1 = 0x00000002,
+ HAL_VPX_LEVEL_VERSION_2 = 0x00000004,
+ HAL_VPX_LEVEL_VERSION_3 = 0x00000008,
+ HAL_VPX_LEVEL_UNUSED = 0x10000000,
+};
+
struct hal_frame_rate {
enum hal_buffer buffer_type;
u32 frame_rate;
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
index 81b4d91..2dd25f3 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
@@ -122,12 +122,12 @@
#define HFI_MPEG2_LEVEL_H14 0x00000004
#define HFI_MPEG2_LEVEL_HL 0x00000008
-#define HFI_VPX_PROFILE_SIMPLE 0x00000001
-#define HFI_VPX_PROFILE_ADVANCED 0x00000002
-#define HFI_VPX_PROFILE_VERSION_0 0x00000004
-#define HFI_VPX_PROFILE_VERSION_1 0x00000008
-#define HFI_VPX_PROFILE_VERSION_2 0x00000010
-#define HFI_VPX_PROFILE_VERSION_3 0x00000020
+#define HFI_VPX_PROFILE_MAIN 0x00000001
+
+#define HFI_VPX_LEVEL_VERSION_0 0x00000001
+#define HFI_VPX_LEVEL_VERSION_1 0x00000002
+#define HFI_VPX_LEVEL_VERSION_2 0x00000004
+#define HFI_VPX_LEVEL_VERSION_3 0x00000008
#define HFI_HEVC_PROFILE_MAIN 0x00000001
#define HFI_HEVC_PROFILE_MAIN10 0x00000002
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index 862b147..e7b16b3 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -1884,7 +1884,7 @@
if (ep_idx == -1)
continue;
- gsi_ep_cfg = ipa3_get_gsi_ep_info(ep_idx);
+ gsi_ep_cfg = ipa3_get_gsi_ep_info(client_idx);
if (!gsi_ep_cfg) {
IPAERR("failed to get GSI config\n");
ipa_assert();
@@ -2362,6 +2362,7 @@
void ipa3_q6_post_shutdown_cleanup(void)
{
int client_idx;
+ int ep_idx;
IPADBG_LOW("ENTER\n");
@@ -2378,6 +2379,10 @@
for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++)
if (IPA_CLIENT_IS_Q6_PROD(client_idx)) {
+ ep_idx = ipa3_get_ep_mapping(client_idx);
+ if (ep_idx == -1)
+ continue;
+
if (ipa3_uc_is_gsi_channel_empty(client_idx)) {
IPAERR("fail to validate Q6 ch emptiness %d\n",
client_idx);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index f066d94..6321ca9 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -1703,10 +1703,11 @@
const struct ipa_gsi_ep_config *ipa3_get_gsi_ep_info
(enum ipa_client_type client)
{
- if (client >= IPA_CLIENT_MAX || client < 0) {
- IPAERR("Bad client number! client =%d\n", client);
+ int ep_idx;
+
+ ep_idx = ipa3_get_ep_mapping(client);
+ if (ep_idx == IPA_EP_NOT_ALLOCATED)
return NULL;
- }
return &(ipa3_ep_mapping[ipa3_get_hw_type_index()]
[client].ipa_gsi_ep_info);
diff --git a/drivers/regulator/cpr3-regulator.c b/drivers/regulator/cpr3-regulator.c
index c45fb0d..b1e6a3b 100644
--- a/drivers/regulator/cpr3-regulator.c
+++ b/drivers/regulator/cpr3-regulator.c
@@ -290,6 +290,10 @@
#define CPRH_MISC_REG2_ACD_ADJ_STEP_SIZE_UP_SHIFT 22
#define CPRH_MISC_REG2_ACD_ADJ_STEP_SIZE_DOWN_MASK GENMASK(21, 20)
#define CPRH_MISC_REG2_ACD_ADJ_STEP_SIZE_DOWN_SHIFT 20
+#define CPRH_MISC_REG2_ACD_NOTWAIT_4_CL_SETTLE_MASK BIT(16)
+#define CPRH_MISC_REG2_ACD_NOTWAIT_4_CL_SETTLE_EN BIT(16)
+#define CPRH_MISC_REG2_ACD_AVG_FAST_UPDATE_EN_MASK BIT(13)
+#define CPRH_MISC_REG2_ACD_AVG_FAST_UPDATE_EN BIT(13)
#define CPRH_MISC_REG2_ACD_AVG_EN_MASK BIT(12)
#define CPRH_MISC_REG2_ACD_AVG_ENABLE BIT(12)
@@ -1449,6 +1453,16 @@
ctrl->acd_adj_down_step_size <<
CPRH_MISC_REG2_ACD_ADJ_STEP_SIZE_DOWN_SHIFT);
cpr3_masked_write(ctrl, CPRH_REG_MISC_REG2,
+ CPRH_MISC_REG2_ACD_NOTWAIT_4_CL_SETTLE_MASK,
+ (ctrl->acd_notwait_for_cl_settled
+ ? CPRH_MISC_REG2_ACD_NOTWAIT_4_CL_SETTLE_EN
+ : 0));
+ cpr3_masked_write(ctrl, CPRH_REG_MISC_REG2,
+ CPRH_MISC_REG2_ACD_AVG_FAST_UPDATE_EN_MASK,
+ (ctrl->acd_adj_avg_fast_update
+ ? CPRH_MISC_REG2_ACD_AVG_FAST_UPDATE_EN
+ : 0));
+ cpr3_masked_write(ctrl, CPRH_REG_MISC_REG2,
CPRH_MISC_REG2_ACD_AVG_EN_MASK,
CPRH_MISC_REG2_ACD_AVG_ENABLE);
}
diff --git a/drivers/regulator/cpr3-regulator.h b/drivers/regulator/cpr3-regulator.h
index 8535020..a315e46 100644
--- a/drivers/regulator/cpr3-regulator.h
+++ b/drivers/regulator/cpr3-regulator.h
@@ -661,6 +661,10 @@
* @acd_adj_up_step_size: ACD step size in units of PMIC steps used for
* target quotient adjustment due to an ACD up
* recommendation.
+ * @acd_notwait_for_cl_settled: Boolean which indicates ACD down recommendations
+ * do not need to wait for CPR closed-loop to settle.
+ * @acd_adj_avg_fast_update: Boolean which indicates if CPR should issue
+ * immediate voltage updates on ACD requests.
* @acd_avg_enabled: Boolean defining the enable state of the ACD AVG
* feature.
* @count_mode: CPR controller count mode
@@ -828,6 +832,8 @@
u32 acd_adj_up_step_limit;
u32 acd_adj_down_step_size;
u32 acd_adj_up_step_size;
+ bool acd_notwait_for_cl_settled;
+ bool acd_adj_avg_fast_update;
bool acd_avg_enabled;
enum cpr3_count_mode count_mode;
u32 count_repeat;
diff --git a/drivers/regulator/cprh-kbss-regulator.c b/drivers/regulator/cprh-kbss-regulator.c
index b84d9f0..cf7c35d 100644
--- a/drivers/regulator/cprh-kbss-regulator.c
+++ b/drivers/regulator/cprh-kbss-regulator.c
@@ -34,9 +34,16 @@
#include "cpr3-regulator.h"
-#define MSM8998_KBSS_FUSE_CORNERS 4
-#define SDM660_KBSS_FUSE_CORNERS 5
-#define SDM845_KBSS_FUSE_CORNERS 3
+#define MSM8998_KBSS_FUSE_CORNERS 4
+#define SDM660_KBSS_FUSE_CORNERS 5
+
+#define SDM845_KBSS_POWER_CLUSTER_FUSE_CORNERS 4
+#define SDM845_KBSS_PERFORMANCE_CLUSTER_FUSE_CORNERS 3
+/*
+ * This must be set to the larger of SDM845_KBSS_POWER_CLUSTER_FUSE_CORNERS and
+ * SDM845_KBSS_PERFORMANCE_CLUSTER_FUSE_CORNERS values.
+ */
+#define SDM845_KBSS_MAX_FUSE_CORNERS 4
/**
* struct cprh_kbss_fuses - KBSS specific fuse data
@@ -79,7 +86,7 @@
*/
#define CPRH_MSM8998_KBSS_FUSE_COMBO_COUNT 32
#define CPRH_SDM660_KBSS_FUSE_COMBO_COUNT 16
-#define CPRH_SDM845_KBSS_FUSE_COMBO_COUNT 8
+#define CPRH_SDM845_KBSS_FUSE_COMBO_COUNT 16
/*
* Constants which define the name of each fuse corner.
@@ -146,16 +153,18 @@
#define CPRH_KBSS_PERFORMANCE_CLUSTER_THREAD_ID 0
static const char * const
-cprh_sdm845_kbss_fuse_corner_name[2][SDM845_KBSS_FUSE_CORNERS] = {
+cprh_sdm845_kbss_fuse_corner_name[2][SDM845_KBSS_MAX_FUSE_CORNERS] = {
[CPRH_KBSS_POWER_CLUSTER_ID] = {
"LowSVS",
"SVS_L1",
"NOM_L1",
+ "TURBO",
},
[CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
"SVS",
"NOM",
"TURBO_L2",
+ "",
},
};
@@ -325,17 +334,19 @@
* different fuse rows.
*/
static const struct cpr3_fuse_param
-sdm845_kbss_ro_sel_param[2][2][SDM845_KBSS_FUSE_CORNERS][3] = {
+sdm845_kbss_ro_sel_param[2][2][SDM845_KBSS_MAX_FUSE_CORNERS][3] = {
[CPRH_KBSS_POWER_CLUSTER_ID] = {
[CPRH_KBSS_POWER_CLUSTER_THREAD_ID] = {
{{66, 52, 55}, {} },
{{66, 48, 51}, {} },
{{66, 44, 47}, {} },
+ {{66, 40, 43}, {} },
},
[CPRH_KBSS_L3_THREAD_ID] = {
{{66, 52, 55}, {} },
{{66, 48, 51}, {} },
{{66, 44, 47}, {} },
+ {{66, 40, 43}, {} },
},
},
[CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
@@ -348,17 +359,19 @@
};
static const struct cpr3_fuse_param
-sdm845_kbss_init_voltage_param[2][2][SDM845_KBSS_FUSE_CORNERS][3] = {
+sdm845_kbss_init_voltage_param[2][2][SDM845_KBSS_MAX_FUSE_CORNERS][3] = {
[CPRH_KBSS_POWER_CLUSTER_ID] = {
[CPRH_KBSS_POWER_CLUSTER_THREAD_ID] = {
{{67, 10, 15}, {} },
{{67, 4, 9}, {} },
{{66, 62, 63}, {67, 0, 3}, {} },
+ {{66, 56, 61}, {} },
},
[CPRH_KBSS_L3_THREAD_ID] = {
{{68, 47, 52}, {} },
{{68, 41, 46}, {} },
{{68, 35, 40}, {} },
+ {{68, 29, 34}, {} },
},
},
[CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
@@ -371,17 +384,19 @@
};
static const struct cpr3_fuse_param
-sdm845_kbss_target_quot_param[2][2][SDM845_KBSS_FUSE_CORNERS][3] = {
+sdm845_kbss_target_quot_param[2][2][SDM845_KBSS_MAX_FUSE_CORNERS][3] = {
[CPRH_KBSS_POWER_CLUSTER_ID] = {
[CPRH_KBSS_POWER_CLUSTER_THREAD_ID] = {
{{67, 52, 63}, {} },
{{67, 40, 51}, {} },
{{67, 28, 39}, {} },
+ {{67, 16, 27}, {} },
},
[CPRH_KBSS_L3_THREAD_ID] = {
{{69, 25, 36}, {} },
{{69, 13, 24}, {} },
{{69, 1, 12}, {} },
+ {{68, 53, 63}, {69, 0, 0}, {} },
},
},
[CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
@@ -394,17 +409,19 @@
};
static const struct cpr3_fuse_param
-sdm845_kbss_quot_offset_param[2][2][SDM845_KBSS_FUSE_CORNERS][2] = {
+sdm845_kbss_quot_offset_param[2][2][SDM845_KBSS_MAX_FUSE_CORNERS][2] = {
[CPRH_KBSS_POWER_CLUSTER_ID] = {
[CPRH_KBSS_POWER_CLUSTER_THREAD_ID] = {
{{} },
{{68, 14, 20}, {} },
{{68, 7, 13}, {} },
+ {{68, 0, 6}, {} },
},
[CPRH_KBSS_L3_THREAD_ID] = {
{{} },
{{69, 51, 57}, {} },
{{69, 44, 50}, {} },
+ {{69, 37, 43}, {} },
},
},
[CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
@@ -539,17 +556,19 @@
* Open loop voltage fuse reference voltages in microvolts for SDM845
*/
static const int
-sdm845_kbss_fuse_ref_volt[2][2][SDM845_KBSS_FUSE_CORNERS] = {
+sdm845_kbss_fuse_ref_volt[2][2][SDM845_KBSS_MAX_FUSE_CORNERS] = {
[CPRH_KBSS_POWER_CLUSTER_ID] = {
[CPRH_KBSS_POWER_CLUSTER_THREAD_ID] = {
688000,
812000,
896000,
+ 900000,
},
[CPRH_KBSS_L3_THREAD_ID] = {
688000,
812000,
896000,
+ 900000,
},
},
[CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
@@ -976,7 +995,10 @@
break;
case SDM845_V1_SOC_ID:
case SDM845_V2_SOC_ID:
- fuse_corners = SDM845_KBSS_FUSE_CORNERS;
+ fuse_corners = vreg->thread->ctrl->ctrl_id
+ == CPRH_KBSS_POWER_CLUSTER_ID
+ ? SDM845_KBSS_POWER_CLUSTER_FUSE_CORNERS
+ : SDM845_KBSS_PERFORMANCE_CLUSTER_FUSE_CORNERS;
break;
default:
cpr3_err(vreg, "unsupported soc id = %d\n", soc_revision);
@@ -2260,6 +2282,13 @@
rc);
return rc;
}
+
+ ctrl->acd_notwait_for_cl_settled =
+ of_property_read_bool(ctrl->dev->of_node,
+ "qcom,cpr-acd-notwait-for-cl-settled");
+ ctrl->acd_adj_avg_fast_update =
+ of_property_read_bool(ctrl->dev->of_node,
+ "qcom,cpr-acd-avg-fast-update");
}
rc = of_property_read_u32(ctrl->dev->of_node,
diff --git a/drivers/soc/qcom/cmd-db.c b/drivers/soc/qcom/cmd-db.c
index 5cc04c0..252bd21 100644
--- a/drivers/soc/qcom/cmd-db.c
+++ b/drivers/soc/qcom/cmd-db.c
@@ -19,6 +19,9 @@
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/kernel.h>
+#include <linux/debugfs.h>
+#include <linux/fs.h>
+#include <linux/seq_file.h>
#include <soc/qcom/cmd-db.h>
#define RESOURCE_ID_LEN 8
@@ -239,6 +242,108 @@
return ret < 0 ? 0 : (ent.addr >> SLAVE_ID_SHIFT) & SLAVE_ID_MASK;
}
+static void *cmd_db_start(struct seq_file *m, loff_t *pos)
+{
+ struct cmd_db_header *hdr = m->private;
+ int slv_idx, ent_idx;
+ struct entry_header *ent;
+ int total = 0;
+
+ for (slv_idx = 0; slv_idx < MAX_SLV_ID; slv_idx++) {
+
+ if (!hdr->header[slv_idx].cnt)
+ continue;
+ ent_idx = *pos - total;
+ if (ent_idx < hdr->header[slv_idx].cnt)
+ break;
+
+ total += hdr->header[slv_idx].cnt;
+ }
+
+ if (slv_idx == MAX_SLV_ID)
+ return NULL;
+
+ ent = start_addr + hdr->header[slv_idx].header_offset + sizeof(*hdr);
+ return &ent[ent_idx];
+
+}
+
+static void cmd_db_stop(struct seq_file *m, void *v)
+{
+}
+
+static void *cmd_db_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ (*pos)++;
+ return cmd_db_start(m, pos);
+}
+
+static int cmd_db_seq_show(struct seq_file *m, void *v)
+{
+ struct entry_header *eh = v;
+ struct cmd_db_header *hdr = m->private;
+ char buf[9] = {0};
+
+ if (!eh)
+ return 0;
+
+ memcpy(buf, &eh->res_id, min(sizeof(eh->res_id), sizeof(buf)));
+
+ seq_printf(m, "Address: 0x%05x, id: %s", eh->addr, buf);
+
+ if (eh->len) {
+ int slv_id = (eh->addr >> SLAVE_ID_SHIFT) & SLAVE_ID_MASK;
+ u8 aux[32] = {0};
+ int len;
+ int k;
+
+ len = min_t(u32, eh->len, sizeof(aux));
+
+ for (k = 0; k < MAX_SLV_ID; k++) {
+ if (hdr->header[k].slv_id == slv_id)
+ break;
+ }
+
+ if (k == MAX_SLV_ID)
+ return -EINVAL;
+
+ memcpy_fromio(aux, start_addr + hdr->header[k].data_offset
+ + eh->offset + sizeof(*cmd_db_header), len);
+
+ seq_puts(m, ", aux data: ");
+
+ for (k = 0; k < len; k++)
+ seq_printf(m, "%02x ", aux[k]);
+
+ }
+ seq_puts(m, "\n");
+ return 0;
+}
+
+static const struct seq_operations cmd_db_seq_ops = {
+ .start = cmd_db_start,
+ .stop = cmd_db_stop,
+ .next = cmd_db_next,
+ .show = cmd_db_seq_show,
+};
+
+static int cmd_db_file_open(struct inode *inode, struct file *file)
+{
+ int ret = seq_open(file, &cmd_db_seq_ops);
+ struct seq_file *s = (struct seq_file *)(file->private_data);
+
+ s->private = inode->i_private;
+ return ret;
+}
+
+static const struct file_operations cmd_db_fops = {
+ .owner = THIS_MODULE,
+ .open = cmd_db_file_open,
+ .read = seq_read,
+ .release = seq_release,
+ .llseek = no_llseek,
+};
+
static int cmd_db_dev_probe(struct platform_device *pdev)
{
struct resource res;
@@ -279,6 +384,10 @@
cmd_db_status = 0;
of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+ if (!debugfs_create_file("cmd_db", 0444, NULL,
+ cmd_db_header, &cmd_db_fops))
+ pr_err("Couldn't create debugfs\n");
+
if (cmd_db_is_standalone() == 1)
pr_info("Command DB is initialized in standalone mode.\n");
diff --git a/drivers/thermal/qcom/msm_lmh_dcvs.c b/drivers/thermal/qcom/msm_lmh_dcvs.c
index bfaf7c7..f8a7945 100644
--- a/drivers/thermal/qcom/msm_lmh_dcvs.c
+++ b/drivers/thermal/qcom/msm_lmh_dcvs.c
@@ -34,6 +34,9 @@
#include "../thermal_core.h"
+#define CREATE_TRACE_POINTS
+#include <trace/events/lmh.h>
+
#define LIMITS_DCVSH 0x10
#define LIMITS_PROFILE_CHANGE 0x01
#define LIMITS_NODE_DCVS 0x44435653
@@ -60,10 +63,10 @@
#define LIMITS_TEMP_HIGH_THRESH_MAX 120000
#define LIMITS_LOW_THRESHOLD_OFFSET 500
#define LIMITS_POLLING_DELAY_MS 10
-#define LIMITS_CLUSTER_0_REQ 0x179C1B04
-#define LIMITS_CLUSTER_1_REQ 0x179C3B04
-#define LIMITS_CLUSTER_0_INT_CLR 0x179CE808
-#define LIMITS_CLUSTER_1_INT_CLR 0x179CC808
+#define LIMITS_CLUSTER_0_REQ 0x17D43704
+#define LIMITS_CLUSTER_1_REQ 0x17D45F04
+#define LIMITS_CLUSTER_0_INT_CLR 0x17D78808
+#define LIMITS_CLUSTER_1_INT_CLR 0x17D70808
#define LIMITS_CLUSTER_0_MIN_FREQ 0x17D78BC0
#define LIMITS_CLUSTER_1_MIN_FREQ 0x17D70BC0
#define dcvsh_get_frequency(_val, _max) do { \
@@ -146,6 +149,9 @@
goto notify_exit;
}
+ pr_debug("CPU:%d max value read:%lu\n",
+ cpumask_first(&hw->core_map),
+ max_limit);
freq_val = FREQ_KHZ_TO_HZ(max_limit);
rcu_read_lock();
opp_entry = dev_pm_opp_find_freq_floor(cpu_dev, &freq_val);
@@ -165,6 +171,9 @@
max_limit = FREQ_HZ_TO_KHZ(freq_val);
sched_update_cpu_freq_min_max(&hw->core_map, 0, max_limit);
+ pr_debug("CPU:%d max limit:%lu\n", cpumask_first(&hw->core_map),
+ max_limit);
+ trace_lmh_dcvs_freq(cpumask_first(&hw->core_map), max_limit);
notify_exit:
hw->hw_freq_limit = max_limit;
@@ -344,6 +353,7 @@
ret = limits_dcvs_write(hw->affinity, LIMITS_SUB_FN_GENERAL,
LIMITS_DOMAIN_MAX, max_freq);
mutex_unlock(&hw->access_lock);
+ lmh_dcvs_notify(hw);
return ret;
}
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 5cc0a36..c573113 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1506,7 +1506,7 @@
#endif /* CONFIG_SMP */
-#ifdef CONFIG_SCHED_HMP
+#ifdef CONFIG_SCHED_WALT
static int sched_init_task_load_show(struct seq_file *m, void *v)
{
@@ -3062,7 +3062,7 @@
#ifdef CONFIG_SMP
REG("sched_wake_up_idle", S_IRUGO|S_IWUSR, proc_pid_sched_wake_up_idle_operations),
#endif
-#ifdef CONFIG_SCHED_HMP
+#ifdef CONFIG_SCHED_WALT
REG("sched_init_task_load", S_IRUGO|S_IWUSR, proc_pid_sched_init_task_load_operations),
REG("sched_group_id", S_IRUGO|S_IWUGO, proc_pid_sched_group_id_operations),
#endif
diff --git a/include/dt-bindings/clock/qcom,rpmh.h b/include/dt-bindings/clock/qcom,rpmh.h
index f3c3d1d..b2907e0 100644
--- a/include/dt-bindings/clock/qcom,rpmh.h
+++ b/include/dt-bindings/clock/qcom,rpmh.h
@@ -27,5 +27,7 @@
#define RPMH_RF_CLK2_A 9
#define RPMH_RF_CLK3 10
#define RPMH_RF_CLK3_A 11
+#define RPMH_RF_CLK4 12
+#define RPMH_RF_CLK4_A 13
#endif
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 52524a8..decb943 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -179,6 +179,7 @@
extern void sched_update_nr_prod(int cpu, long delta, bool inc);
extern void sched_get_nr_running_avg(int *avg, int *iowait_avg, int *big_avg);
+extern unsigned int sched_get_cpu_util(int cpu);
extern void calc_global_load(unsigned long ticks);
@@ -1483,7 +1484,9 @@
u32 sum_history[RAVG_HIST_SIZE_MAX];
u32 *curr_window_cpu, *prev_window_cpu;
u32 curr_window, prev_window;
+#ifdef CONFIG_SCHED_HMP
u64 curr_burst, avg_burst, avg_sleep_time;
+#endif
u16 active_windows;
u32 pred_demand;
u8 busy_buckets[NUM_BUSY_BUCKETS];
@@ -1659,7 +1662,8 @@
const struct sched_class *sched_class;
struct sched_entity se;
struct sched_rt_entity rt;
-#ifdef CONFIG_SCHED_HMP
+ u64 last_sleep_ts;
+#ifdef CONFIG_SCHED_WALT
struct ravg ravg;
/*
* 'init_load_pct' represents the initial task load assigned to children
@@ -1672,6 +1676,7 @@
struct related_thread_group *grp;
struct list_head grp_list;
u64 cpu_cycles;
+ bool misfit;
#endif
#ifdef CONFIG_CGROUP_SCHED
@@ -2635,8 +2640,6 @@
extern unsigned long sched_get_busy(int cpu);
extern void sched_get_cpus_busy(struct sched_load *busy,
const struct cpumask *query_cpus);
-extern void sched_set_io_is_busy(int val);
-extern int sched_set_boost(int enable);
extern int sched_set_init_task_load(struct task_struct *p, int init_load_pct);
extern u32 sched_get_init_task_load(struct task_struct *p);
extern int sched_set_static_cpu_pwr_cost(int cpu, unsigned int cost);
@@ -2652,25 +2655,12 @@
int wakeup_energy, int wakeup_latency);
extern void sched_set_cluster_dstate(const cpumask_t *cluster_cpus, int dstate,
int wakeup_energy, int wakeup_latency);
-extern int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb);
-extern u64 sched_ktime_clock(void);
extern int sched_set_group_id(struct task_struct *p, unsigned int group_id);
extern unsigned int sched_get_group_id(struct task_struct *p);
#else /* CONFIG_SCHED_HMP */
static inline void free_task_load_ptrs(struct task_struct *p) { }
-static inline u64 sched_ktime_clock(void)
-{
- return 0;
-}
-
-static inline int
-register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb)
-{
- return 0;
-}
-
static inline int sched_set_window(u64 window_start, unsigned int window_size)
{
return -EINVAL;
@@ -2682,21 +2672,11 @@
static inline void sched_get_cpus_busy(struct sched_load *busy,
const struct cpumask *query_cpus) {};
-static inline void sched_set_io_is_busy(int val) {};
-
-static inline int sched_set_boost(int enable)
-{
- return -EINVAL;
-}
-
static inline int sched_update_freq_max_load(const cpumask_t *cpumask)
{
return 0;
}
-static inline void sched_update_cpu_freq_min_max(const cpumask_t *cpus,
- u32 fmin, u32 fmax) { }
-
static inline void
sched_set_cpu_cstate(int cpu, int cstate, int wakeup_energy, int wakeup_latency)
{
@@ -2708,6 +2688,37 @@
}
#endif /* CONFIG_SCHED_HMP */
+#ifdef CONFIG_SCHED_WALT
+extern int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb);
+extern void sched_set_io_is_busy(int val);
+extern int sched_set_group_id(struct task_struct *p, unsigned int group_id);
+extern unsigned int sched_get_group_id(struct task_struct *p);
+extern int sched_set_init_task_load(struct task_struct *p, int init_load_pct);
+extern u32 sched_get_init_task_load(struct task_struct *p);
+extern void sched_update_cpu_freq_min_max(const cpumask_t *cpus, u32 fmin,
+ u32 fmax);
+extern int sched_set_boost(int enable);
+#else
+static inline int
+register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb)
+{
+ return 0;
+}
+static inline void sched_set_io_is_busy(int val) {};
+
+static inline int sched_set_boost(int enable)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_SCHED_WALT */
+
+#ifndef CONFIG_SCHED_WALT
+#ifndef CONFIG_SCHED_HMP
+static inline void sched_update_cpu_freq_min_max(const cpumask_t *cpus,
+ u32 fmin, u32 fmax) { }
+#endif /* CONFIG_SCHED_HMP */
+#endif /* CONFIG_SCHED_WALT */
+
#ifdef CONFIG_NO_HZ_COMMON
void calc_load_enter_idle(void);
void calc_load_exit_idle(void);
@@ -2962,7 +2973,7 @@
#endif
extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
extern void sched_dead(struct task_struct *p);
-#ifdef CONFIG_SCHED_HMP
+#ifdef CONFIG_SCHED_WALT
extern void sched_exit(struct task_struct *p);
#else
static inline void sched_exit(struct task_struct *p) { }
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index ae9032a..190bf3b 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -25,8 +25,14 @@
#ifdef CONFIG_SCHED_WALT
extern unsigned int sysctl_sched_use_walt_cpu_util;
extern unsigned int sysctl_sched_use_walt_task_util;
-extern unsigned int sysctl_sched_walt_init_task_load_pct;
-extern unsigned int sysctl_sched_walt_cpu_high_irqload;
+extern unsigned int sysctl_sched_init_task_load_pct;
+#endif
+
+#ifdef CONFIG_SCHED_WALT
+extern unsigned int sysctl_sched_cpu_high_irqload;
+extern unsigned int sysctl_sched_use_walt_cpu_util;
+extern unsigned int sysctl_sched_use_walt_task_util;
+extern unsigned int sysctl_sched_boost;
#endif
#ifdef CONFIG_SCHED_HMP
@@ -43,8 +49,6 @@
extern unsigned int sysctl_sched_freq_reporting_policy;
extern unsigned int sysctl_sched_window_stats_policy;
extern unsigned int sysctl_sched_ravg_hist_size;
-extern unsigned int sysctl_sched_cpu_high_irqload;
-extern unsigned int sysctl_sched_init_task_load_pct;
extern unsigned int sysctl_sched_spill_nr_run;
extern unsigned int sysctl_sched_spill_load_pct;
extern unsigned int sysctl_sched_upmigrate_pct;
@@ -52,12 +56,10 @@
extern unsigned int sysctl_sched_group_upmigrate_pct;
extern unsigned int sysctl_sched_group_downmigrate_pct;
extern unsigned int sysctl_early_detection_duration;
-extern unsigned int sysctl_sched_boost;
extern unsigned int sysctl_sched_small_wakee_task_load_pct;
extern unsigned int sysctl_sched_big_waker_task_load_pct;
extern unsigned int sysctl_sched_select_prev_cpu_us;
extern unsigned int sysctl_sched_restrict_cluster_spill;
-extern unsigned int sysctl_sched_new_task_windows;
extern unsigned int sysctl_sched_pred_alert_freq;
extern unsigned int sysctl_sched_freq_aggregate;
extern unsigned int sysctl_sched_enable_thread_grouping;
diff --git a/include/trace/events/lmh.h b/include/trace/events/lmh.h
new file mode 100644
index 0000000..f6edacf
--- /dev/null
+++ b/include/trace/events/lmh.h
@@ -0,0 +1,41 @@
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM lmh
+
+#if !defined(_TRACE_LMH_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_LMH_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(lmh_dcvs_freq,
+ TP_PROTO(unsigned long cpu, unsigned long freq),
+
+ TP_ARGS(cpu, freq),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, cpu)
+ __field(unsigned long, freq)
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ __entry->freq = freq;
+ ),
+
+ TP_printk("cpu:%lu max frequency:%lu", __entry->cpu, __entry->freq)
+);
+#endif /* _TRACE_LMH_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/power.h b/include/trace/events/power.h
index e792405..d55175e 100644
--- a/include/trace/events/power.h
+++ b/include/trace/events/power.h
@@ -767,6 +767,33 @@
__entry->vote)
);
+TRACE_EVENT(sugov_util_update,
+ TP_PROTO(int cpu,
+ unsigned long util, unsigned long max_cap,
+ unsigned long nl, unsigned long pl,
+ unsigned int flags),
+ TP_ARGS(cpu, util, max_cap, nl, pl, flags),
+ TP_STRUCT__entry(
+ __field( int, cpu)
+ __field( unsigned long, util)
+ __field( unsigned long, max_cap)
+ __field( unsigned long, nl)
+ __field( unsigned long, pl)
+ __field( unsigned int, flags)
+ ),
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ __entry->util = util;
+ __entry->max_cap = max_cap;
+ __entry->nl = nl;
+ __entry->pl = pl;
+ __entry->flags = flags;
+ ),
+ TP_printk("cpu=%d util=%lu max_cap=%lu nl=%lu pl=%lu flags=%x",
+ __entry->cpu, __entry->util, __entry->max_cap,
+ __entry->nl, __entry->pl, __entry->flags)
+);
+
#endif /* _TRACE_POWER_H */
/* This part must be outside protection */
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 0427805..4a9c625 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -108,169 +108,11 @@
)
);
-#ifdef CONFIG_SCHED_HMP
-
+#ifdef CONFIG_SCHED_WALT
struct group_cpu_time;
-struct migration_sum_data;
extern const char *task_event_names[];
-extern const char *migrate_type_names[];
-TRACE_EVENT(sched_task_load,
-
- TP_PROTO(struct task_struct *p, bool boost, int reason,
- bool sync, bool need_idle, u32 flags, int best_cpu),
-
- TP_ARGS(p, boost, reason, sync, need_idle, flags, best_cpu),
-
- TP_STRUCT__entry(
- __array( char, comm, TASK_COMM_LEN )
- __field( pid_t, pid )
- __field(unsigned int, demand )
- __field( bool, boost )
- __field( int, reason )
- __field( bool, sync )
- __field( bool, need_idle )
- __field( u32, flags )
- __field( int, best_cpu )
- __field( u64, latency )
- __field( int, grp_id )
- __field( u64, avg_burst )
- __field( u64, avg_sleep )
- ),
-
- TP_fast_assign(
- memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
- __entry->pid = p->pid;
- __entry->demand = p->ravg.demand;
- __entry->boost = boost;
- __entry->reason = reason;
- __entry->sync = sync;
- __entry->need_idle = need_idle;
- __entry->flags = flags;
- __entry->best_cpu = best_cpu;
- __entry->latency = p->state == TASK_WAKING ?
- sched_ktime_clock() -
- p->ravg.mark_start : 0;
- __entry->grp_id = p->grp ? p->grp->id : 0;
- __entry->avg_burst = p->ravg.avg_burst;
- __entry->avg_sleep = p->ravg.avg_sleep_time;
- ),
-
- TP_printk("%d (%s): demand=%u boost=%d reason=%d sync=%d need_idle=%d flags=%x grp=%d best_cpu=%d latency=%llu avg_burst=%llu avg_sleep=%llu",
- __entry->pid, __entry->comm, __entry->demand,
- __entry->boost, __entry->reason, __entry->sync,
- __entry->need_idle, __entry->flags, __entry->grp_id,
- __entry->best_cpu, __entry->latency, __entry->avg_burst,
- __entry->avg_sleep)
-);
-
-TRACE_EVENT(sched_set_preferred_cluster,
-
- TP_PROTO(struct related_thread_group *grp, u64 total_demand),
-
- TP_ARGS(grp, total_demand),
-
- TP_STRUCT__entry(
- __field( int, id )
- __field( u64, demand )
- __field( int, cluster_first_cpu )
- __array( char, comm, TASK_COMM_LEN )
- __field( pid_t, pid )
- __field(unsigned int, task_demand )
- ),
-
- TP_fast_assign(
- __entry->id = grp->id;
- __entry->demand = total_demand;
- __entry->cluster_first_cpu = grp->preferred_cluster ?
- cluster_first_cpu(grp->preferred_cluster)
- : -1;
- ),
-
- TP_printk("group_id %d total_demand %llu preferred_cluster_first_cpu %d",
- __entry->id, __entry->demand,
- __entry->cluster_first_cpu)
-);
-
-DECLARE_EVENT_CLASS(sched_cpu_load,
-
- TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost, int temp),
-
- TP_ARGS(rq, idle, irqload, power_cost, temp),
-
- TP_STRUCT__entry(
- __field(unsigned int, cpu )
- __field(unsigned int, idle )
- __field(unsigned int, nr_running )
- __field(unsigned int, nr_big_tasks )
- __field(unsigned int, load_scale_factor )
- __field(unsigned int, capacity )
- __field( u64, cumulative_runnable_avg )
- __field( u64, irqload )
- __field(unsigned int, max_freq )
- __field(unsigned int, power_cost )
- __field( int, cstate )
- __field( int, dstate )
- __field( int, temp )
- ),
-
- TP_fast_assign(
- __entry->cpu = rq->cpu;
- __entry->idle = idle;
- __entry->nr_running = rq->nr_running;
- __entry->nr_big_tasks = rq->hmp_stats.nr_big_tasks;
- __entry->load_scale_factor = cpu_load_scale_factor(rq->cpu);
- __entry->capacity = cpu_capacity(rq->cpu);
- __entry->cumulative_runnable_avg = rq->hmp_stats.cumulative_runnable_avg;
- __entry->irqload = irqload;
- __entry->max_freq = cpu_max_freq(rq->cpu);
- __entry->power_cost = power_cost;
- __entry->cstate = rq->cstate;
- __entry->dstate = rq->cluster->dstate;
- __entry->temp = temp;
- ),
-
- TP_printk("cpu %u idle %d nr_run %u nr_big %u lsf %u capacity %u cr_avg %llu irqload %llu fmax %u power_cost %u cstate %d dstate %d temp %d",
- __entry->cpu, __entry->idle, __entry->nr_running, __entry->nr_big_tasks,
- __entry->load_scale_factor, __entry->capacity,
- __entry->cumulative_runnable_avg, __entry->irqload,
- __entry->max_freq, __entry->power_cost, __entry->cstate,
- __entry->dstate, __entry->temp)
-);
-
-DEFINE_EVENT(sched_cpu_load, sched_cpu_load_wakeup,
- TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost, int temp),
- TP_ARGS(rq, idle, irqload, power_cost, temp)
-);
-
-DEFINE_EVENT(sched_cpu_load, sched_cpu_load_lb,
- TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost, int temp),
- TP_ARGS(rq, idle, irqload, power_cost, temp)
-);
-
-DEFINE_EVENT(sched_cpu_load, sched_cpu_load_cgroup,
- TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost, int temp),
- TP_ARGS(rq, idle, irqload, power_cost, temp)
-);
-
-TRACE_EVENT(sched_set_boost,
-
- TP_PROTO(int type),
-
- TP_ARGS(type),
-
- TP_STRUCT__entry(
- __field(int, type )
- ),
-
- TP_fast_assign(
- __entry->type = type;
- ),
-
- TP_printk("type %d", __entry->type)
-);
-
-#if defined(CREATE_TRACE_POINTS) && defined(CONFIG_SCHED_HMP)
+#if defined(CREATE_TRACE_POINTS) && defined(CONFIG_SCHED_WALT)
static inline void __window_data(u32 *dst, u32 *src)
{
if (src)
@@ -343,6 +185,117 @@
}
#endif
+TRACE_EVENT(sched_update_pred_demand,
+
+ TP_PROTO(struct rq *rq, struct task_struct *p, u32 runtime, int pct,
+ unsigned int pred_demand),
+
+ TP_ARGS(rq, p, runtime, pct, pred_demand),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field(unsigned int, runtime )
+ __field( int, pct )
+ __field(unsigned int, pred_demand )
+ __array( u8, bucket, NUM_BUSY_BUCKETS)
+ __field( int, cpu )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+ __entry->pid = p->pid;
+ __entry->runtime = runtime;
+ __entry->pct = pct;
+ __entry->pred_demand = pred_demand;
+ memcpy(__entry->bucket, p->ravg.busy_buckets,
+ NUM_BUSY_BUCKETS * sizeof(u8));
+ __entry->cpu = rq->cpu;
+ ),
+
+ TP_printk("%d (%s): runtime %u pct %d cpu %d pred_demand %u (buckets: %u %u %u %u %u %u %u %u %u %u)",
+ __entry->pid, __entry->comm,
+ __entry->runtime, __entry->pct, __entry->cpu,
+ __entry->pred_demand, __entry->bucket[0], __entry->bucket[1],
+ __entry->bucket[2], __entry->bucket[3],__entry->bucket[4],
+ __entry->bucket[5], __entry->bucket[6], __entry->bucket[7],
+ __entry->bucket[8], __entry->bucket[9])
+);
+
+TRACE_EVENT(sched_update_history,
+
+ TP_PROTO(struct rq *rq, struct task_struct *p, u32 runtime, int samples,
+ enum task_event evt),
+
+ TP_ARGS(rq, p, runtime, samples, evt),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field(unsigned int, runtime )
+ __field( int, samples )
+ __field(enum task_event, evt )
+ __field(unsigned int, demand )
+ __field(unsigned int, pred_demand )
+ __array( u32, hist, RAVG_HIST_SIZE_MAX)
+ __field(unsigned int, nr_big_tasks )
+ __field( int, cpu )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+ __entry->pid = p->pid;
+ __entry->runtime = runtime;
+ __entry->samples = samples;
+ __entry->evt = evt;
+ __entry->demand = p->ravg.demand;
+ __entry->pred_demand = p->ravg.pred_demand;
+ memcpy(__entry->hist, p->ravg.sum_history,
+ RAVG_HIST_SIZE_MAX * sizeof(u32));
+ __entry->nr_big_tasks = rq->hmp_stats.nr_big_tasks;
+ __entry->cpu = rq->cpu;
+ ),
+
+ TP_printk("%d (%s): runtime %u samples %d event %s demand %u pred_demand %u"
+ " (hist: %u %u %u %u %u) cpu %d nr_big %u",
+ __entry->pid, __entry->comm,
+ __entry->runtime, __entry->samples,
+ task_event_names[__entry->evt],
+ __entry->demand, __entry->pred_demand,
+ __entry->hist[0], __entry->hist[1],
+ __entry->hist[2], __entry->hist[3],
+ __entry->hist[4], __entry->cpu, __entry->nr_big_tasks)
+);
+
+TRACE_EVENT(sched_get_task_cpu_cycles,
+
+ TP_PROTO(int cpu, int event, u64 cycles, u64 exec_time),
+
+ TP_ARGS(cpu, event, cycles, exec_time),
+
+ TP_STRUCT__entry(
+ __field(int, cpu )
+ __field(int, event )
+ __field(u64, cycles )
+ __field(u64, exec_time )
+ __field(u32, freq )
+ __field(u32, legacy_freq )
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ __entry->event = event;
+ __entry->cycles = cycles;
+ __entry->exec_time = exec_time;
+ __entry->freq = cpu_cycles_to_freq(cycles, exec_time);
+ __entry->legacy_freq = cpu_cur_freq(cpu);
+ ),
+
+ TP_printk("cpu=%d event=%d cycles=%llu exec_time=%llu freq=%u legacy_freq=%u",
+ __entry->cpu, __entry->event, __entry->cycles,
+ __entry->exec_time, __entry->freq, __entry->legacy_freq)
+);
+
TRACE_EVENT(sched_update_task_ravg,
TP_PROTO(struct task_struct *p, struct rq *rq, enum task_event evt,
@@ -434,148 +387,92 @@
__entry->curr_top, __entry->prev_top)
);
-TRACE_EVENT(sched_get_task_cpu_cycles,
+TRACE_EVENT(sched_update_task_ravg_mini,
- TP_PROTO(int cpu, int event, u64 cycles, u64 exec_time),
+ TP_PROTO(struct task_struct *p, struct rq *rq, enum task_event evt,
+ u64 wallclock, u64 irqtime, u64 cycles, u64 exec_time,
+ struct group_cpu_time *cpu_time),
- TP_ARGS(cpu, event, cycles, exec_time),
-
- TP_STRUCT__entry(
- __field(int, cpu )
- __field(int, event )
- __field(u64, cycles )
- __field(u64, exec_time )
- __field(u32, freq )
- __field(u32, legacy_freq )
- ),
-
- TP_fast_assign(
- __entry->cpu = cpu;
- __entry->event = event;
- __entry->cycles = cycles;
- __entry->exec_time = exec_time;
- __entry->freq = cpu_cycles_to_freq(cycles, exec_time);
- __entry->legacy_freq = cpu_cur_freq(cpu);
- ),
-
- TP_printk("cpu=%d event=%d cycles=%llu exec_time=%llu freq=%u legacy_freq=%u",
- __entry->cpu, __entry->event, __entry->cycles,
- __entry->exec_time, __entry->freq, __entry->legacy_freq)
-);
-
-TRACE_EVENT(sched_update_history,
-
- TP_PROTO(struct rq *rq, struct task_struct *p, u32 runtime, int samples,
- enum task_event evt),
-
- TP_ARGS(rq, p, runtime, samples, evt),
+ TP_ARGS(p, rq, evt, wallclock, irqtime, cycles, exec_time, cpu_time),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
- __field(unsigned int, runtime )
- __field( int, samples )
+ __field( u64, wallclock )
+ __field( u64, mark_start )
+ __field( u64, delta_m )
+ __field( u64, win_start )
+ __field( u64, delta )
__field(enum task_event, evt )
__field(unsigned int, demand )
- __field(unsigned int, pred_demand )
- __array( u32, hist, RAVG_HIST_SIZE_MAX)
- __field(unsigned int, nr_big_tasks )
__field( int, cpu )
+ __field( u64, rq_cs )
+ __field( u64, rq_ps )
+ __field( u64, grp_cs )
+ __field( u64, grp_ps )
+ __field( u32, curr_window )
+ __field( u32, prev_window )
),
TP_fast_assign(
- memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
- __entry->pid = p->pid;
- __entry->runtime = runtime;
- __entry->samples = samples;
+ __entry->wallclock = wallclock;
+ __entry->win_start = rq->window_start;
+ __entry->delta = (wallclock - rq->window_start);
__entry->evt = evt;
- __entry->demand = p->ravg.demand;
- __entry->pred_demand = p->ravg.pred_demand;
- memcpy(__entry->hist, p->ravg.sum_history,
- RAVG_HIST_SIZE_MAX * sizeof(u32));
- __entry->nr_big_tasks = rq->hmp_stats.nr_big_tasks;
__entry->cpu = rq->cpu;
- ),
-
- TP_printk("%d (%s): runtime %u samples %d event %s demand %u pred_demand %u"
- " (hist: %u %u %u %u %u) cpu %d nr_big %u",
- __entry->pid, __entry->comm,
- __entry->runtime, __entry->samples,
- task_event_names[__entry->evt],
- __entry->demand, __entry->pred_demand,
- __entry->hist[0], __entry->hist[1],
- __entry->hist[2], __entry->hist[3],
- __entry->hist[4], __entry->cpu, __entry->nr_big_tasks)
-);
-
-TRACE_EVENT(sched_reset_all_window_stats,
-
- TP_PROTO(u64 window_start, u64 window_size, u64 time_taken,
- int reason, unsigned int old_val, unsigned int new_val),
-
- TP_ARGS(window_start, window_size, time_taken,
- reason, old_val, new_val),
-
- TP_STRUCT__entry(
- __field( u64, window_start )
- __field( u64, window_size )
- __field( u64, time_taken )
- __field( int, reason )
- __field(unsigned int, old_val )
- __field(unsigned int, new_val )
- ),
-
- TP_fast_assign(
- __entry->window_start = window_start;
- __entry->window_size = window_size;
- __entry->time_taken = time_taken;
- __entry->reason = reason;
- __entry->old_val = old_val;
- __entry->new_val = new_val;
- ),
-
- TP_printk("time_taken %llu window_start %llu window_size %llu reason %s old_val %u new_val %u",
- __entry->time_taken, __entry->window_start,
- __entry->window_size,
- sched_window_reset_reasons[__entry->reason],
- __entry->old_val, __entry->new_val)
-);
-
-TRACE_EVENT(sched_update_pred_demand,
-
- TP_PROTO(struct rq *rq, struct task_struct *p, u32 runtime, int pct,
- unsigned int pred_demand),
-
- TP_ARGS(rq, p, runtime, pct, pred_demand),
-
- TP_STRUCT__entry(
- __array( char, comm, TASK_COMM_LEN )
- __field( pid_t, pid )
- __field(unsigned int, runtime )
- __field( int, pct )
- __field(unsigned int, pred_demand )
- __array( u8, bucket, NUM_BUSY_BUCKETS)
- __field( int, cpu )
- ),
-
- TP_fast_assign(
memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
__entry->pid = p->pid;
- __entry->runtime = runtime;
- __entry->pct = pct;
- __entry->pred_demand = pred_demand;
- memcpy(__entry->bucket, p->ravg.busy_buckets,
- NUM_BUSY_BUCKETS * sizeof(u8));
- __entry->cpu = rq->cpu;
+ __entry->mark_start = p->ravg.mark_start;
+ __entry->delta_m = (wallclock - p->ravg.mark_start);
+ __entry->demand = p->ravg.demand;
+ __entry->rq_cs = rq->curr_runnable_sum;
+ __entry->rq_ps = rq->prev_runnable_sum;
+ __entry->grp_cs = cpu_time ? cpu_time->curr_runnable_sum : 0;
+ __entry->grp_ps = cpu_time ? cpu_time->prev_runnable_sum : 0;
+ __entry->curr_window = p->ravg.curr_window;
+ __entry->prev_window = p->ravg.prev_window;
),
- TP_printk("%d (%s): runtime %u pct %d cpu %d pred_demand %u (buckets: %u %u %u %u %u %u %u %u %u %u)",
- __entry->pid, __entry->comm,
- __entry->runtime, __entry->pct, __entry->cpu,
- __entry->pred_demand, __entry->bucket[0], __entry->bucket[1],
- __entry->bucket[2], __entry->bucket[3] ,__entry->bucket[4],
- __entry->bucket[5], __entry->bucket[6], __entry->bucket[7],
- __entry->bucket[8], __entry->bucket[9])
+ TP_printk("wc %llu ws %llu delta %llu event %s cpu %d task %d (%s) ms %llu delta %llu demand %u rq_cs %llu rq_ps %llu cur_window %u prev_window %u grp_cs %lld grp_ps %lld",
+ __entry->wallclock, __entry->win_start, __entry->delta,
+ task_event_names[__entry->evt], __entry->cpu,
+ __entry->pid, __entry->comm, __entry->mark_start,
+ __entry->delta_m, __entry->demand,
+ __entry->rq_cs, __entry->rq_ps, __entry->curr_window,
+ __entry->prev_window,
+ __entry->grp_cs,
+ __entry->grp_ps)
+);
+
+struct migration_sum_data;
+extern const char *migrate_type_names[];
+
+TRACE_EVENT(sched_set_preferred_cluster,
+
+ TP_PROTO(struct related_thread_group *grp, u64 total_demand),
+
+ TP_ARGS(grp, total_demand),
+
+ TP_STRUCT__entry(
+ __field( int, id )
+ __field( u64, demand )
+ __field( int, cluster_first_cpu )
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field(unsigned int, task_demand )
+ ),
+
+ TP_fast_assign(
+ __entry->id = grp->id;
+ __entry->demand = total_demand;
+ __entry->cluster_first_cpu = grp->preferred_cluster ?
+ cluster_first_cpu(grp->preferred_cluster)
+ : -1;
+ ),
+
+ TP_printk("group_id %d total_demand %llu preferred_cluster_first_cpu %d",
+ __entry->id, __entry->demand,
+ __entry->cluster_first_cpu)
);
TRACE_EVENT(sched_migration_update_sum,
@@ -626,6 +523,172 @@
__entry->src_nt_cs, __entry->src_nt_ps, __entry->dst_nt_cs, __entry->dst_nt_ps)
);
+TRACE_EVENT(sched_set_boost,
+
+ TP_PROTO(int type),
+
+ TP_ARGS(type),
+
+ TP_STRUCT__entry(
+ __field(int, type )
+ ),
+
+ TP_fast_assign(
+ __entry->type = type;
+ ),
+
+ TP_printk("type %d", __entry->type)
+);
+
+#endif
+
+#ifdef CONFIG_SCHED_WALT
+DECLARE_EVENT_CLASS(sched_cpu_load,
+
+ TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost, int temp),
+
+ TP_ARGS(rq, idle, irqload, power_cost, temp),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, cpu )
+ __field(unsigned int, idle )
+ __field(unsigned int, nr_running )
+ __field(unsigned int, nr_big_tasks )
+ __field(unsigned int, load_scale_factor )
+ __field(unsigned int, capacity )
+ __field( u64, cumulative_runnable_avg )
+ __field( u64, irqload )
+ __field(unsigned int, max_freq )
+ __field(unsigned int, power_cost )
+ __field( int, cstate )
+ __field( int, dstate )
+ __field( int, temp )
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = rq->cpu;
+ __entry->idle = idle;
+ __entry->nr_running = rq->nr_running;
+ __entry->nr_big_tasks = rq->hmp_stats.nr_big_tasks;
+ __entry->load_scale_factor = cpu_load_scale_factor(rq->cpu);
+ __entry->capacity = cpu_capacity(rq->cpu);
+ __entry->cumulative_runnable_avg = rq->hmp_stats.cumulative_runnable_avg;
+ __entry->irqload = irqload;
+ __entry->max_freq = cpu_max_freq(rq->cpu);
+ __entry->power_cost = power_cost;
+ __entry->cstate = rq->cstate;
+ __entry->dstate = rq->cluster->dstate;
+ __entry->temp = temp;
+ ),
+
+ TP_printk("cpu %u idle %d nr_run %u nr_big %u lsf %u capacity %u cr_avg %llu irqload %llu fmax %u power_cost %u cstate %d dstate %d temp %d",
+ __entry->cpu, __entry->idle, __entry->nr_running, __entry->nr_big_tasks,
+ __entry->load_scale_factor, __entry->capacity,
+ __entry->cumulative_runnable_avg, __entry->irqload,
+ __entry->max_freq, __entry->power_cost, __entry->cstate,
+ __entry->dstate, __entry->temp)
+);
+
+DEFINE_EVENT(sched_cpu_load, sched_cpu_load_lb,
+ TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost, int temp),
+ TP_ARGS(rq, idle, irqload, power_cost, temp)
+);
+#endif
+
+#ifdef CONFIG_SCHED_HMP
+
+TRACE_EVENT(sched_task_load,
+
+ TP_PROTO(struct task_struct *p, bool boost, int reason,
+ bool sync, bool need_idle, u32 flags, int best_cpu),
+
+ TP_ARGS(p, boost, reason, sync, need_idle, flags, best_cpu),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field(unsigned int, demand )
+ __field( bool, boost )
+ __field( int, reason )
+ __field( bool, sync )
+ __field( bool, need_idle )
+ __field( u32, flags )
+ __field( int, best_cpu )
+ __field( u64, latency )
+ __field( int, grp_id )
+ __field( u64, avg_burst )
+ __field( u64, avg_sleep )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+ __entry->pid = p->pid;
+ __entry->demand = p->ravg.demand;
+ __entry->boost = boost;
+ __entry->reason = reason;
+ __entry->sync = sync;
+ __entry->need_idle = need_idle;
+ __entry->flags = flags;
+ __entry->best_cpu = best_cpu;
+ __entry->latency = p->state == TASK_WAKING ?
+ sched_ktime_clock() -
+ p->ravg.mark_start : 0;
+ __entry->grp_id = p->grp ? p->grp->id : 0;
+ __entry->avg_burst = p->ravg.avg_burst;
+ __entry->avg_sleep = p->ravg.avg_sleep_time;
+ ),
+
+ TP_printk("%d (%s): demand=%u boost=%d reason=%d sync=%d need_idle=%d flags=%x grp=%d best_cpu=%d latency=%llu avg_burst=%llu avg_sleep=%llu",
+ __entry->pid, __entry->comm, __entry->demand,
+ __entry->boost, __entry->reason, __entry->sync,
+ __entry->need_idle, __entry->flags, __entry->grp_id,
+ __entry->best_cpu, __entry->latency, __entry->avg_burst,
+ __entry->avg_sleep)
+);
+
+DEFINE_EVENT(sched_cpu_load, sched_cpu_load_wakeup,
+ TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost, int temp),
+ TP_ARGS(rq, idle, irqload, power_cost, temp)
+);
+
+DEFINE_EVENT(sched_cpu_load, sched_cpu_load_cgroup,
+ TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost, int temp),
+ TP_ARGS(rq, idle, irqload, power_cost, temp)
+);
+
+TRACE_EVENT(sched_reset_all_window_stats,
+
+ TP_PROTO(u64 window_start, u64 window_size, u64 time_taken,
+ int reason, unsigned int old_val, unsigned int new_val),
+
+ TP_ARGS(window_start, window_size, time_taken,
+ reason, old_val, new_val),
+
+ TP_STRUCT__entry(
+ __field( u64, window_start )
+ __field( u64, window_size )
+ __field( u64, time_taken )
+ __field( int, reason )
+ __field(unsigned int, old_val )
+ __field(unsigned int, new_val )
+ ),
+
+ TP_fast_assign(
+ __entry->window_start = window_start;
+ __entry->window_size = window_size;
+ __entry->time_taken = time_taken;
+ __entry->reason = reason;
+ __entry->old_val = old_val;
+ __entry->new_val = new_val;
+ ),
+
+ TP_printk("time_taken %llu window_start %llu window_size %llu reason %s old_val %u new_val %u",
+ __entry->time_taken, __entry->window_start,
+ __entry->window_size,
+ sched_window_reset_reasons[__entry->reason],
+ __entry->old_val, __entry->new_val)
+);
+
TRACE_EVENT(sched_get_busy,
TP_PROTO(int cpu, u64 load, u64 nload, u64 pload, int early),
@@ -690,6 +753,153 @@
#endif /* CONFIG_SCHED_HMP */
+#ifdef CONFIG_SMP
+TRACE_EVENT(sched_cpu_util,
+
+ TP_PROTO(struct task_struct *p, int cpu, int task_util, unsigned long curr_util, unsigned long new_cum_util, int sync),
+
+ TP_ARGS(p, cpu, task_util, curr_util, new_cum_util, sync),
+
+ TP_STRUCT__entry(
+ __array(char, comm, TASK_COMM_LEN )
+ __field(int, pid )
+ __field(unsigned int, cpu )
+ __field(int, task_util )
+ __field(unsigned int, nr_running )
+ __field(long, cpu_util )
+ __field(long, cpu_util_cum )
+ __field(long, new_cum_util )
+ __field(unsigned int, capacity_curr )
+ __field(unsigned int, capacity )
+ __field(unsigned long, curr_util )
+ __field(int, sync )
+ __field(int, idle_state )
+ __field(unsigned int, irqload )
+ __field(int, high_irqload )
+ __field(int, task_in_cum_demand )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+ __entry->pid = p->pid;
+ __entry->cpu = cpu;
+ __entry->task_util = task_util;
+ __entry->nr_running = cpu_rq(cpu)->nr_running;
+ __entry->cpu_util = cpu_util(cpu);
+ __entry->cpu_util_cum = cpu_util_cum(cpu, 0);
+ __entry->new_cum_util = new_cum_util;
+ __entry->task_in_cum_demand = task_in_cum_window_demand(cpu_rq(cpu), p);
+ __entry->capacity_curr = capacity_curr_of(cpu);
+ __entry->capacity = capacity_of(cpu);
+ __entry->curr_util = curr_util;
+ __entry->sync = sync;
+ __entry->idle_state = idle_get_state_idx(cpu_rq(cpu));
+ __entry->irqload = sched_irqload(cpu);
+ __entry->high_irqload = sched_cpu_high_irqload(cpu);
+ ),
+
+ TP_printk("comm=%s pid=%d cpu=%d task_util=%d nr_running=%d cpu_util=%ld cpu_util_cum=%ld new_cum_util=%ld task_in_cum=%d capacity_curr=%u capacity=%u curr_util=%ld sync=%d idle_state=%d irqload=%u high_irqload=%u",
+ __entry->comm, __entry->pid, __entry->cpu, __entry->task_util, __entry->nr_running, __entry->cpu_util, __entry->cpu_util_cum, __entry->new_cum_util, __entry->task_in_cum_demand, __entry->capacity_curr, __entry->capacity, __entry->curr_util, __entry->sync, __entry->idle_state, __entry->irqload, __entry->high_irqload)
+);
+
+TRACE_EVENT(sched_energy_diff_packing,
+
+ TP_PROTO(struct task_struct *p, unsigned long task_util,
+ int targeted_cpus, int nrg_pack, int nrg_spread),
+
+ TP_ARGS(p, task_util, targeted_cpus, nrg_pack, nrg_spread),
+
+ TP_STRUCT__entry(
+ __array(char, comm, TASK_COMM_LEN )
+ __field(int, pid )
+ __field(unsigned long, task_util )
+ __field(int, targeted_cpus )
+ __field(int, nrg_pack )
+ __field(int, nrg_spread )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+ __entry->pid = p->pid;
+ __entry->task_util = task_util;
+ __entry->targeted_cpus = targeted_cpus;
+ __entry->nrg_pack = nrg_pack;
+ __entry->nrg_spread = nrg_spread;
+ ),
+
+ TP_printk("comm=%s pid=%d task_util=%lu targeted_cpus=%d nrg_pack=%d nrg_spread=%d nrg_diff=%d",
+ __entry->comm, __entry->pid, __entry->task_util,
+ __entry->targeted_cpus, __entry->nrg_pack,
+ __entry->nrg_spread, __entry->nrg_pack - __entry->nrg_spread)
+);
+
+DECLARE_EVENT_CLASS(sched_task_util,
+
+ TP_PROTO(struct task_struct *p, int task_cpu, unsigned long task_util, int nominated_cpu, int target_cpu, int ediff, bool need_idle),
+
+ TP_ARGS(p, task_cpu, task_util, nominated_cpu, target_cpu, ediff, need_idle),
+
+ TP_STRUCT__entry(
+ __array(char, comm, TASK_COMM_LEN )
+ __field(int, pid )
+ __field(int, task_cpu )
+ __field(unsigned long, task_util )
+ __field(unsigned long, cpu_util_freq )
+ __field(int, nominated_cpu )
+ __field(int, target_cpu )
+ __field(int, ediff )
+ __field(bool, need_idle )
+ __field(u64, latency )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+ __entry->pid = p->pid;
+ __entry->task_cpu = task_cpu;
+ __entry->task_util = task_util;
+ __entry->cpu_util_freq = cpu_util_freq(target_cpu, NULL);
+ __entry->nominated_cpu = nominated_cpu;
+ __entry->target_cpu = target_cpu;
+ __entry->ediff = ediff;
+ __entry->need_idle = need_idle;
+ __entry->latency = sched_ktime_clock() - p->ravg.mark_start;
+ ),
+
+ TP_printk("comm=%s pid=%d task_cpu=%d task_util=%lu nominated_cpu=%d target_cpu=%d energy_diff=%d need_idle=%d latency=%llu",
+ __entry->comm, __entry->pid, __entry->task_cpu, __entry->task_util, __entry->nominated_cpu, __entry->target_cpu, __entry->ediff, __entry->need_idle, __entry->latency)
+);
+
+DEFINE_EVENT(sched_task_util, sched_task_util_bias_to_waker,
+ TP_PROTO(struct task_struct *p, int task_cpu, unsigned long task_util, int nominated_cpu, int target_cpu, int ediff, bool need_idle),
+ TP_ARGS(p, task_cpu, task_util, nominated_cpu, target_cpu, ediff, need_idle)
+);
+
+DEFINE_EVENT(sched_task_util, sched_task_util_colocated,
+ TP_PROTO(struct task_struct *p, int task_cpu, unsigned long task_util, int nominated_cpu, int target_cpu, int ediff, bool need_idle),
+ TP_ARGS(p, task_cpu, task_util, nominated_cpu, target_cpu, ediff, need_idle)
+);
+
+DEFINE_EVENT(sched_task_util, sched_task_util_overutilzed,
+ TP_PROTO(struct task_struct *p, int task_cpu, unsigned long task_util, int nominated_cpu, int target_cpu, int ediff, bool need_idle),
+ TP_ARGS(p, task_cpu, task_util, nominated_cpu, target_cpu, ediff, need_idle)
+);
+
+DEFINE_EVENT(sched_task_util, sched_task_util_energy_diff,
+ TP_PROTO(struct task_struct *p, int task_cpu, unsigned long task_util, int nominated_cpu, int target_cpu, int ediff, bool need_idle),
+ TP_ARGS(p, task_cpu, task_util, nominated_cpu, target_cpu, ediff, need_idle)
+);
+
+DEFINE_EVENT(sched_task_util, sched_task_util_energy_aware,
+ TP_PROTO(struct task_struct *p, int task_cpu, unsigned long task_util, int nominated_cpu, int target_cpu, int ediff, bool need_idle),
+ TP_ARGS(p, task_cpu, task_util, nominated_cpu, target_cpu, ediff, need_idle)
+);
+
+DEFINE_EVENT(sched_task_util, sched_task_util_imbalance,
+ TP_PROTO(struct task_struct *p, int task_cpu, unsigned long task_util, int nominated_cpu, int target_cpu, int ediff, bool need_idle),
+ TP_ARGS(p, task_cpu, task_util, nominated_cpu, target_cpu, ediff, need_idle)
+);
+#endif
+
/*
* Tracepoint for waking up a task:
*/
@@ -1539,6 +1749,43 @@
__entry->capb, __entry->capa, __entry->capd,
__entry->nrgn, __entry->nrgp)
);
+
+TRACE_EVENT(sched_group_energy,
+
+ TP_PROTO(int cpu, long group_util, u64 total_nrg,
+ int busy_nrg, int idle_nrg, int grp_idle_idx,
+ int new_capacity),
+
+ TP_ARGS(cpu, group_util, total_nrg,
+ busy_nrg, idle_nrg, grp_idle_idx,
+ new_capacity),
+
+ TP_STRUCT__entry(
+ __field(int, cpu)
+ __field(long, group_util)
+ __field(u64, total_nrg)
+ __field(int, busy_nrg)
+ __field(int, idle_nrg)
+ __field(int, grp_idle_idx)
+ __field(int, new_capacity)
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ __entry->group_util = group_util;
+ __entry->total_nrg = total_nrg;
+ __entry->busy_nrg = busy_nrg;
+ __entry->idle_nrg = idle_nrg;
+ __entry->grp_idle_idx = grp_idle_idx;
+ __entry->new_capacity = new_capacity;
+ ),
+
+ TP_printk("cpu=%d group_util=%ld total_nrg=%llu busy_nrg=%d idle_nrg=%d grp_idle_idx=%d new_capacity=%d",
+ __entry->cpu, __entry->group_util,
+ __entry->total_nrg, __entry->busy_nrg, __entry->idle_nrg,
+ __entry->grp_idle_idx, __entry->new_capacity)
+);
+
/*
* Tracepoint for schedtune_tasks_update
*/
diff --git a/init/Kconfig b/init/Kconfig
index 2c382dc1..bdfcc0f 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1261,6 +1261,7 @@
config SCHED_HMP
bool "Scheduler support for heterogenous multi-processor systems"
+ select SCHED_WALT
depends on SMP && FAIR_GROUP_SCHED
help
This feature will let the scheduler optimize task placement on
@@ -1268,6 +1269,13 @@
in their instructions per-cycle capability or the maximum
frequency they can attain.
+config SCHED_WALT
+ bool "WALT"
+ depends on SMP && FAIR_GROUP_SCHED
+ help
+ Use Window-Assisted Load Tracking (WALT) as an alternative or
+ additional load tracking scheme in lieu of or along with PELT.
+
config SCHED_HMP_CSTATE_AWARE
bool "CPU C-state aware scheduler"
depends on SCHED_HMP
diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
index 90d10e8..27a7574 100644
--- a/kernel/sched/Makefile
+++ b/kernel/sched/Makefile
@@ -20,6 +20,7 @@
obj-y += wait.o swait.o completion.o idle.o sched_avg.o
obj-$(CONFIG_SCHED_HMP) += hmp.o boost.o
obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o energy.o
+obj-$(CONFIG_SCHED_WALT) += walt.o boost.o
obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o
obj-$(CONFIG_SCHEDSTATS) += stats.o
obj-$(CONFIG_SCHED_DEBUG) += debug.o
diff --git a/kernel/sched/boost.c b/kernel/sched/boost.c
index 5bdd51b..f5e87791 100644
--- a/kernel/sched/boost.c
+++ b/kernel/sched/boost.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,6 +10,7 @@
* GNU General Public License for more details.
*/
+#include <linux/jiffies.h>
#include "sched.h"
#include <linux/of.h>
#include <linux/sched/core_ctl.h>
@@ -139,6 +140,7 @@
case RESTRAINED_BOOST:
freq_aggr_threshold_backup =
update_freq_aggregate_threshold(1);
+ mod_timer(&sched_grp_timer, jiffies + 1);
break;
default:
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index f7f5256..ccb2321 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -76,7 +76,6 @@
#include <linux/frame.h>
#include <linux/prefetch.h>
#include <linux/irq.h>
-#include <linux/sched/core_ctl.h>
#include <asm/switch_to.h>
#include <asm/tlb.h>
@@ -87,6 +86,7 @@
#endif
#include "sched.h"
+#include "walt.h"
#include "../workqueue_internal.h"
#include "../smpboot.h"
#include "../time/tick-internal.h"
@@ -801,6 +801,9 @@
if (task_contributes_to_load(p))
rq->nr_uninterruptible++;
+ if (flags & DEQUEUE_SLEEP)
+ clear_ed_task(p, rq);
+
dequeue_task(rq, p, flags);
}
@@ -2193,6 +2196,9 @@
notif_required = true;
}
+ if (!__task_in_cum_window_demand(cpu_rq(cpu), p))
+ inc_cum_window_demand(cpu_rq(cpu), p, task_load(p));
+
note_task_waking(p, wallclock);
#endif /* CONFIG_SMP */
@@ -2265,6 +2271,8 @@
update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
update_task_ravg(p, rq, TASK_WAKE, wallclock, 0);
+ if (!__task_in_cum_window_demand(rq, p))
+ inc_cum_window_demand(rq, p, task_load(p));
cpufreq_update_util(rq, 0);
ttwu_activate(rq, p, ENQUEUE_WAKEUP);
note_task_waking(p, wallclock);
@@ -2352,8 +2360,9 @@
p->se.prev_sum_exec_runtime = 0;
p->se.nr_migrations = 0;
p->se.vruntime = 0;
+ p->last_sleep_ts = 0;
+
INIT_LIST_HEAD(&p->se.group_node);
- walt_init_new_task_load(p);
#ifdef CONFIG_FAIR_GROUP_SCHED
p->se.cfs_rq = NULL;
@@ -2718,8 +2727,6 @@
add_new_task_to_grp(p);
raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
- walt_init_new_task_load(p);
-
p->state = TASK_RUNNING;
#ifdef CONFIG_SMP
/*
@@ -3255,15 +3262,9 @@
return ns;
}
-#ifdef CONFIG_CPU_FREQ_GOV_SCHED
+unsigned int capacity_margin_freq = 1280; /* ~20% margin */
-static inline
-unsigned long add_capacity_margin(unsigned long cpu_capacity)
-{
- cpu_capacity = cpu_capacity * capacity_margin;
- cpu_capacity /= SCHED_CAPACITY_SCALE;
- return cpu_capacity;
-}
+#ifdef CONFIG_CPU_FREQ_GOV_SCHED
static inline
unsigned long sum_capacity_reqs(unsigned long cfs_cap,
@@ -3287,7 +3288,7 @@
* To make free room for a task that is building up its "real"
* utilization and to harm its performance the least, request
* a jump to a higher OPP as soon as the margin of free capacity
- * is impacted (specified by capacity_margin).
+ * is impacted (specified by capacity_margin_freq).
*/
set_cfs_cpu_capacity(cpu, true, cpu_utilization);
}
@@ -3295,22 +3296,13 @@
#ifdef CONFIG_SCHED_WALT
static void sched_freq_tick_walt(int cpu)
{
- unsigned long cpu_utilization = cpu_util(cpu);
- unsigned long capacity_curr = capacity_curr_of(cpu);
+ unsigned long cpu_utilization = cpu_util_freq(cpu, NULL);
if (walt_disabled || !sysctl_sched_use_walt_cpu_util)
return sched_freq_tick_pelt(cpu);
- /*
- * Add a margin to the WALT utilization.
- * NOTE: WALT tracks a single CPU signal for all the scheduling
- * classes, thus this margin is going to be added to the DL class as
- * well, which is something we do not do in sched_freq_tick_pelt case.
- */
- cpu_utilization = add_capacity_margin(cpu_utilization);
- if (cpu_utilization <= capacity_curr)
- return;
-
+ cpu_utilization = cpu_utilization * SCHED_CAPACITY_SCALE /
+ capacity_orig_of(cpu);
/*
* It is likely that the load is growing so we
* keep the added margin in our request as an
@@ -3326,22 +3318,42 @@
static void sched_freq_tick(int cpu)
{
- unsigned long capacity_orig, capacity_curr;
-
if (!sched_freq())
return;
- capacity_orig = capacity_orig_of(cpu);
- capacity_curr = capacity_curr_of(cpu);
- if (capacity_curr == capacity_orig)
- return;
-
_sched_freq_tick(cpu);
}
#else
static inline void sched_freq_tick(int cpu) { }
#endif /* CONFIG_CPU_FREQ_GOV_SCHED */
+#ifdef CONFIG_SCHED_WALT
+static atomic64_t walt_irq_work_lastq_ws;
+
+static inline u64 walt_window_start_of(struct rq *rq)
+{
+ return rq->window_start;
+}
+
+static inline void run_walt_irq_work(u64 window_start, struct rq *rq)
+{
+ /* No HMP since that uses sched_get_cpus_busy */
+ if (rq->window_start != window_start &&
+ atomic_cmpxchg(&walt_irq_work_lastq_ws, window_start,
+ rq->window_start) == window_start)
+ irq_work_queue(&rq->irq_work);
+}
+#else
+static inline u64 walt_window_start_of(struct rq *rq)
+{
+ return 0;
+}
+
+static inline void run_walt_irq_work(u64 window_start, struct rq *rq)
+{
+}
+#endif
+
/*
* This function gets called by the timer code, with HZ frequency.
* We call it with interrupts disabled.
@@ -3355,25 +3367,33 @@
bool early_notif;
u32 old_load;
struct related_thread_group *grp;
+ u64 window_start;
sched_clock_tick();
raw_spin_lock(&rq->lock);
+ /*
+ * Record current window_start. If after utra() below the window
+ * has rolled over, schedule a load-reporting irq-work
+ */
+ window_start = walt_window_start_of(rq);
+
old_load = task_load(curr);
set_window_start(rq);
- update_rq_clock(rq);
- curr->sched_class->task_tick(rq, curr, 0);
- cpu_load_update_active(rq);
- walt_update_task_ravg(rq->curr, rq, TASK_UPDATE,
- walt_ktime_clock(), 0);
- calc_global_load_tick(rq);
wallclock = sched_ktime_clock();
update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
+ update_rq_clock(rq);
+ curr->sched_class->task_tick(rq, curr, 0);
+ cpu_load_update_active(rq);
+ calc_global_load_tick(rq);
cpufreq_update_util(rq, 0);
+
+ run_walt_irq_work(window_start, rq);
+
early_notif = early_detection_notify(rq, wallclock);
raw_spin_unlock(&rq->lock);
@@ -3398,8 +3418,6 @@
if (curr->sched_class == &fair_sched_class)
check_for_migration(rq, curr);
- if (cpu == tick_do_timer_cpu)
- core_ctl_check(wallclock);
sched_freq_tick(cpu);
}
@@ -3706,6 +3724,9 @@
wallclock = sched_ktime_clock();
if (likely(prev != next)) {
+ if (!prev->on_rq)
+ prev->last_sleep_ts = wallclock;
+
update_task_ravg(prev, rq, PUT_PREV_TASK, wallclock, 0);
update_task_ravg(next, rq, PICK_NEXT_TASK, wallclock, 0);
cpufreq_update_util(rq, 0);
@@ -8163,7 +8184,6 @@
{
cpumask_var_t non_isolated_cpus;
- walt_init_cpu_efficiency();
alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
@@ -8377,10 +8397,12 @@
rq->avg_idle = 2*sysctl_sched_migration_cost;
rq->max_idle_balance_cost = sysctl_sched_migration_cost;
rq->push_task = NULL;
-#ifdef CONFIG_SCHED_HMP
+#ifdef CONFIG_SCHED_WALT
cpumask_set_cpu(i, &rq->freq_domain_cpumask);
+ init_irq_work(&rq->irq_work, walt_irq_work);
rq->hmp_stats.cumulative_runnable_avg = 0;
rq->window_start = 0;
+ rq->cum_window_start = 0;
rq->hmp_stats.nr_big_tasks = 0;
rq->hmp_flags = 0;
rq->cur_irqload = 0;
@@ -8406,6 +8428,7 @@
rq->old_estimated_time = 0;
rq->old_busy_time_group = 0;
rq->hmp_stats.pred_demands_sum = 0;
+ rq->ed_task = NULL;
rq->curr_table = 0;
rq->prev_top = 0;
rq->curr_top = 0;
@@ -8422,6 +8445,7 @@
clear_top_tasks_bitmap(rq->top_tasks_bitmap[j]);
}
+ rq->cum_window_demand = 0;
#endif
INIT_LIST_HEAD(&rq->cfs_tasks);
@@ -9646,7 +9670,7 @@
/* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
};
-#ifdef CONFIG_SCHED_HMP
+#ifdef CONFIG_SCHED_WALT
/*
* sched_exit() - Set EXITING_TASK_MARKER in task's ravg.demand field
*
@@ -9674,6 +9698,7 @@
update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
dequeue_task(rq, p, 0);
reset_task_stats(p);
+ dec_cum_window_demand(rq, p);
p->ravg.mark_start = wallclock;
p->ravg.sum_history[0] = EXITING_TASK_MARKER;
free_task_load_ptrs(p);
@@ -9682,4 +9707,4 @@
clear_ed_task(p, rq);
task_rq_unlock(rq, p, &rf);
}
-#endif /* CONFIG_SCHED_HMP */
+#endif /* CONFIG_SCHED_WALT */
diff --git a/kernel/sched/core_ctl.c b/kernel/sched/core_ctl.c
index 1040a43..005d15e 100644
--- a/kernel/sched/core_ctl.c
+++ b/kernel/sched/core_ctl.c
@@ -45,7 +45,6 @@
spinlock_t pending_lock;
bool is_big_cluster;
int nrrun;
- bool nrrun_changed;
struct task_struct *core_ctl_thread;
unsigned int first_cpu;
unsigned int boost;
@@ -433,33 +432,16 @@
/* ==================== runqueue based core count =================== */
-#define RQ_AVG_TOLERANCE 2
-#define RQ_AVG_DEFAULT_MS 20
#define NR_RUNNING_TOLERANCE 5
-static unsigned int rq_avg_period_ms = RQ_AVG_DEFAULT_MS;
-static s64 rq_avg_timestamp_ms;
-
-static void update_running_avg(bool trigger_update)
+static void update_running_avg(void)
{
- int avg, iowait_avg, big_avg, old_nrrun;
- s64 now;
- unsigned long flags;
+ int avg, iowait_avg, big_avg;
struct cluster_data *cluster;
unsigned int index = 0;
- spin_lock_irqsave(&state_lock, flags);
-
- now = ktime_to_ms(ktime_get());
- if (now - rq_avg_timestamp_ms < rq_avg_period_ms - RQ_AVG_TOLERANCE) {
- spin_unlock_irqrestore(&state_lock, flags);
- return;
- }
- rq_avg_timestamp_ms = now;
sched_get_nr_running_avg(&avg, &iowait_avg, &big_avg);
- spin_unlock_irqrestore(&state_lock, flags);
-
/*
* Round up to the next integer if the average nr running tasks
* is within NR_RUNNING_TOLERANCE/100 of the next integer.
@@ -478,7 +460,6 @@
for_each_cluster(cluster, index) {
if (!cluster->inited)
continue;
- old_nrrun = cluster->nrrun;
/*
* Big cluster only need to take care of big tasks, but if
* there are not enough big cores, big tasks need to be run
@@ -489,14 +470,7 @@
* than scheduler, and can't predict scheduler's behavior.
*/
cluster->nrrun = cluster->is_big_cluster ? big_avg : avg;
- if (cluster->nrrun != old_nrrun) {
- if (trigger_update)
- apply_need(cluster);
- else
- cluster->nrrun_changed = true;
- }
}
- return;
}
/* adjust needed CPUs based on current runqueue information */
@@ -605,24 +579,15 @@
wake_up_core_ctl_thread(cluster);
}
-static int core_ctl_set_busy(unsigned int cpu, unsigned int busy)
+static void core_ctl_set_busy(struct cpu_data *c, unsigned int busy)
{
- struct cpu_data *c = &per_cpu(cpu_state, cpu);
- struct cluster_data *cluster = c->cluster;
unsigned int old_is_busy = c->is_busy;
- if (!cluster || !cluster->inited)
- return 0;
+ if (c->busy == busy)
+ return;
- update_running_avg(false);
- if (c->busy == busy && !cluster->nrrun_changed)
- return 0;
c->busy = busy;
- cluster->nrrun_changed = false;
-
- apply_need(cluster);
- trace_core_ctl_set_busy(cpu, busy, old_is_busy, c->is_busy);
- return 0;
+ trace_core_ctl_set_busy(c->cpu, busy, old_is_busy, c->is_busy);
}
/* ========================= core count enforcement ==================== */
@@ -639,21 +604,6 @@
}
static u64 core_ctl_check_timestamp;
-static u64 core_ctl_check_interval;
-
-static bool do_check(u64 wallclock)
-{
- bool do_check = false;
- unsigned long flags;
-
- spin_lock_irqsave(&state_lock, flags);
- if ((wallclock - core_ctl_check_timestamp) >= core_ctl_check_interval) {
- core_ctl_check_timestamp = wallclock;
- do_check = true;
- }
- spin_unlock_irqrestore(&state_lock, flags);
- return do_check;
-}
int core_ctl_set_boost(bool boost)
{
@@ -695,21 +645,39 @@
}
EXPORT_SYMBOL(core_ctl_set_boost);
-void core_ctl_check(u64 wallclock)
+void core_ctl_check(u64 window_start)
{
+ int cpu;
+ unsigned int busy;
+ struct cpu_data *c;
+ struct cluster_data *cluster;
+ unsigned int index = 0;
+
if (unlikely(!initialized))
return;
- if (do_check(wallclock)) {
- unsigned int index = 0;
- struct cluster_data *cluster;
+ if (window_start == core_ctl_check_timestamp)
+ return;
- update_running_avg(true);
+ core_ctl_check_timestamp = window_start;
- for_each_cluster(cluster, index) {
- if (eval_need(cluster))
- wake_up_core_ctl_thread(cluster);
- }
+ for_each_possible_cpu(cpu) {
+
+ c = &per_cpu(cpu_state, cpu);
+ cluster = c->cluster;
+
+ if (!cluster || !cluster->inited)
+ continue;
+
+ busy = sched_get_cpu_util(cpu);
+ core_ctl_set_busy(c, busy);
+ }
+
+ update_running_avg();
+
+ for_each_cluster(cluster, index) {
+ if (eval_need(cluster))
+ wake_up_core_ctl_thread(cluster);
}
}
@@ -1079,74 +1047,25 @@
return kobject_add(&cluster->kobj, &dev->kobj, "core_ctl");
}
-static int cpufreq_policy_cb(struct notifier_block *nb, unsigned long val,
- void *data)
-{
- struct cpufreq_policy *policy = data;
- int ret;
-
- switch (val) {
- case CPUFREQ_CREATE_POLICY:
- ret = cluster_init(policy->related_cpus);
- if (ret)
- pr_warn("unable to create core ctl group: %d\n", ret);
- break;
- }
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block cpufreq_pol_nb = {
- .notifier_call = cpufreq_policy_cb,
-};
-
-static int cpufreq_gov_cb(struct notifier_block *nb, unsigned long val,
- void *data)
-{
- struct cpufreq_govinfo *info = data;
-
- switch (val) {
- case CPUFREQ_LOAD_CHANGE:
- core_ctl_set_busy(info->cpu, info->load);
- break;
- }
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block cpufreq_gov_nb = {
- .notifier_call = cpufreq_gov_cb,
-};
-
static int __init core_ctl_init(void)
{
unsigned int cpu;
+ struct cpumask cpus = *cpu_possible_mask;
if (should_skip(cpu_possible_mask))
return 0;
- core_ctl_check_interval = (rq_avg_period_ms - RQ_AVG_TOLERANCE)
- * NSEC_PER_MSEC;
-
register_cpu_notifier(&cpu_notifier);
- cpufreq_register_notifier(&cpufreq_pol_nb, CPUFREQ_POLICY_NOTIFIER);
- cpufreq_register_notifier(&cpufreq_gov_nb, CPUFREQ_GOVINFO_NOTIFIER);
- cpu_maps_update_begin();
- for_each_online_cpu(cpu) {
- struct cpufreq_policy *policy;
+ for_each_cpu(cpu, &cpus) {
int ret;
+ const struct cpumask *cluster_cpus = cpu_coregroup_mask(cpu);
- policy = cpufreq_cpu_get(cpu);
- if (policy) {
- ret = cluster_init(policy->related_cpus);
- if (ret)
- pr_warn("unable to create core ctl group: %d\n"
- , ret);
- cpufreq_cpu_put(policy);
- }
+ ret = cluster_init(cluster_cpus);
+ if (ret)
+ pr_warn("unable to create core ctl group: %d\n", ret);
+ cpumask_andnot(&cpus, &cpus, cluster_cpus);
}
- cpu_maps_update_done();
initialized = true;
return 0;
}
diff --git a/kernel/sched/cpufreq_sched.c b/kernel/sched/cpufreq_sched.c
index 1d471d5..11b75e3 100644
--- a/kernel/sched/cpufreq_sched.c
+++ b/kernel/sched/cpufreq_sched.c
@@ -234,8 +234,12 @@
scr = &per_cpu(cpu_sched_capacity_reqs, cpu);
- new_capacity = scr->cfs + scr->rt;
- new_capacity = new_capacity * capacity_margin
+#ifdef CONFIG_SCHED_WALT
+ if (walt_disabled || !sysctl_sched_use_walt_cpu_util)
+ new_capacity = scr->cfs + scr->rt;
+#endif
+ new_capacity = scr->cfs;
+ new_capacity = new_capacity * capacity_margin_freq
/ SCHED_CAPACITY_SCALE;
new_capacity += scr->dl;
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 27d96e2..42630ec 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -23,6 +23,7 @@
struct sugov_tunables {
struct gov_attr_set attr_set;
unsigned int rate_limit_us;
+ unsigned int hispeed_freq;
};
struct sugov_policy {
@@ -34,8 +35,11 @@
raw_spinlock_t update_lock; /* For shared policies */
u64 last_freq_update_time;
s64 freq_update_delay_ns;
+ u64 hispeed_jmp_ts;
unsigned int next_freq;
unsigned int cached_raw_freq;
+ unsigned long hispeed_util;
+ unsigned long max;
/* The next fields are only needed if fast switch cannot be used. */
struct irq_work irq_work;
@@ -56,10 +60,13 @@
unsigned long iowait_boost_max;
u64 last_update;
+ struct sched_walt_cpu_load walt_load;
+
/* The fields below are only needed when sharing a policy. */
unsigned long util;
unsigned long max;
unsigned int flags;
+ unsigned int cpu;
};
static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu);
@@ -70,9 +77,6 @@
{
s64 delta_ns;
- if (sg_policy->work_in_progress)
- return false;
-
if (unlikely(sg_policy->need_freq_update)) {
sg_policy->need_freq_update = false;
/*
@@ -113,6 +117,7 @@
}
}
+#define TARGET_LOAD 80
/**
* get_next_freq - Compute a new frequency for a given cpufreq policy.
* @sg_policy: schedutil policy object to compute the new frequency for.
@@ -150,15 +155,18 @@
return cpufreq_driver_resolve_freq(policy, freq);
}
-static void sugov_get_util(unsigned long *util, unsigned long *max)
+static void sugov_get_util(unsigned long *util, unsigned long *max, int cpu)
{
- struct rq *rq = this_rq();
+ struct rq *rq = cpu_rq(cpu);
unsigned long cfs_max;
+ struct sugov_cpu *loadcpu = &per_cpu(sugov_cpu, cpu);
- cfs_max = arch_scale_cpu_capacity(NULL, smp_processor_id());
+ cfs_max = arch_scale_cpu_capacity(NULL, cpu);
*util = min(rq->cfs.avg.util_avg, cfs_max);
*max = cfs_max;
+
+ *util = cpu_util_freq(cpu, &loadcpu->walt_load);
}
static void sugov_set_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
@@ -191,6 +199,34 @@
sg_cpu->iowait_boost >>= 1;
}
+#define NL_RATIO 75
+#define HISPEED_LOAD 90
+static void sugov_walt_adjust(struct sugov_cpu *sg_cpu, unsigned long *util,
+ unsigned long *max)
+{
+ struct sugov_policy *sg_policy = sg_cpu->sg_policy;
+ unsigned long cap_cur = capacity_curr_of(sg_cpu->cpu);
+ bool is_migration = sg_cpu->flags & SCHED_CPUFREQ_INTERCLUSTER_MIG;
+ unsigned long nl = sg_cpu->walt_load.nl;
+ unsigned long cpu_util = sg_cpu->util;
+ bool is_hiload;
+
+ is_hiload = (cpu_util >= mult_frac(cap_cur,
+ HISPEED_LOAD,
+ 100));
+
+ if (is_hiload && !is_migration &&
+ sg_policy->next_freq < sg_policy->tunables->hispeed_freq) {
+ *util = max(*util, sg_policy->hispeed_util);
+ sg_policy->hispeed_jmp_ts = sg_cpu->last_update;
+ }
+
+ if (is_hiload && nl >= mult_frac(cpu_util, NL_RATIO, 100))
+ *util = *max;
+
+ *util = max(*util, sg_cpu->walt_load.pl);
+}
+
static void sugov_update_single(struct update_util_data *hook, u64 time,
unsigned int flags)
{
@@ -206,11 +242,14 @@
if (!sugov_should_update_freq(sg_policy, time))
return;
+ flags &= ~SCHED_CPUFREQ_RT_DL;
+
if (flags & SCHED_CPUFREQ_RT_DL) {
next_f = policy->cpuinfo.max_freq;
} else {
- sugov_get_util(&util, &max);
+ sugov_get_util(&util, &max, sg_cpu->cpu);
sugov_iowait_boost(sg_cpu, &util, &max);
+ sugov_walt_adjust(sg_cpu, &util, &max);
next_f = get_next_freq(sg_policy, util, max);
}
sugov_update_commit(sg_policy, time, next_f);
@@ -230,13 +269,14 @@
return max_f;
sugov_iowait_boost(sg_cpu, &util, &max);
+ sugov_walt_adjust(sg_cpu, &util, &max);
for_each_cpu(j, policy->cpus) {
struct sugov_cpu *j_sg_cpu;
unsigned long j_util, j_max;
s64 delta_ns;
- if (j == smp_processor_id())
+ if (j == sg_cpu->cpu)
continue;
j_sg_cpu = &per_cpu(sugov_cpu, j);
@@ -248,7 +288,7 @@
* idle now (and clear iowait_boost for it).
*/
delta_ns = last_freq_update_time - j_sg_cpu->last_update;
- if (delta_ns > TICK_NSEC) {
+ if (delta_ns > sched_ravg_window) {
j_sg_cpu->iowait_boost = 0;
continue;
}
@@ -263,6 +303,7 @@
}
sugov_iowait_boost(j_sg_cpu, &util, &max);
+ sugov_walt_adjust(j_sg_cpu, &util, &max);
}
return get_next_freq(sg_policy, util, max);
@@ -273,13 +314,24 @@
{
struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
struct sugov_policy *sg_policy = sg_cpu->sg_policy;
- unsigned long util, max;
+ unsigned long util, max, hs_util;
unsigned int next_f;
- sugov_get_util(&util, &max);
+ sugov_get_util(&util, &max, sg_cpu->cpu);
+
+ flags &= ~SCHED_CPUFREQ_RT_DL;
raw_spin_lock(&sg_policy->update_lock);
+ if (sg_policy->max != max) {
+ hs_util = mult_frac(max,
+ sg_policy->tunables->hispeed_freq,
+ sg_policy->policy->cpuinfo.max_freq);
+ hs_util = mult_frac(hs_util, TARGET_LOAD, 100);
+ sg_policy->hispeed_util = hs_util;
+ sg_policy->max = max;
+ }
+
sg_cpu->util = util;
sg_cpu->max = max;
sg_cpu->flags = flags;
@@ -287,6 +339,10 @@
sugov_set_iowait_boost(sg_cpu, time, flags);
sg_cpu->last_update = time;
+ trace_sugov_util_update(sg_cpu->cpu, sg_cpu->util, max,
+ sg_cpu->walt_load.nl,
+ sg_cpu->walt_load.pl, flags);
+
if (sugov_should_update_freq(sg_policy, time)) {
next_f = sugov_next_freq_shared(sg_cpu, util, max, flags);
sugov_update_commit(sg_policy, time, next_f);
@@ -364,10 +420,42 @@
return count;
}
+static ssize_t hispeed_freq_show(struct gov_attr_set *attr_set, char *buf)
+{
+ struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
+
+ return sprintf(buf, "%u\n", tunables->hispeed_freq);
+}
+
+static ssize_t hispeed_freq_store(struct gov_attr_set *attr_set,
+ const char *buf, size_t count)
+{
+ struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
+ unsigned int val;
+ struct sugov_policy *sg_policy;
+ unsigned long hs_util;
+
+ if (kstrtouint(buf, 10, &val))
+ return -EINVAL;
+
+ tunables->hispeed_freq = val;
+ list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook) {
+ hs_util = mult_frac(sg_policy->max,
+ sg_policy->tunables->hispeed_freq,
+ sg_policy->policy->cpuinfo.max_freq);
+ hs_util = mult_frac(hs_util, TARGET_LOAD, 100);
+ sg_policy->hispeed_util = hs_util;
+ }
+
+ return count;
+}
+
static struct governor_attr rate_limit_us = __ATTR_RW(rate_limit_us);
+static struct governor_attr hispeed_freq = __ATTR_RW(hispeed_freq);
static struct attribute *sugov_attributes[] = {
&rate_limit_us.attr,
+ &hispeed_freq.attr,
NULL
};
@@ -512,6 +600,7 @@
}
tunables->rate_limit_us = LATENCY_MULTIPLIER;
+ tunables->hispeed_freq = 0;
lat = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
if (lat)
tunables->rate_limit_us *= lat;
@@ -585,6 +674,7 @@
memset(sg_cpu, 0, sizeof(*sg_cpu));
sg_cpu->sg_policy = sg_policy;
+ sg_cpu->cpu = cpu;
sg_cpu->flags = SCHED_CPUFREQ_RT;
sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 0085f66..10a807c 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -15,10 +15,11 @@
* Fabio Checconi <fchecconi@gmail.com>
*/
#include "sched.h"
+#include "walt.h"
#include <linux/slab.h>
-#ifdef CONFIG_SCHED_HMP
+#ifdef CONFIG_SCHED_WALT
static void
inc_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p)
@@ -43,7 +44,7 @@
pred_demand_delta);
}
-#else /* CONFIG_SCHED_HMP */
+#else /* CONFIG_SCHED_WALT */
static inline void
inc_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p) { }
@@ -51,7 +52,7 @@
static inline void
dec_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p) { }
-#endif /* CONFIG_SCHED_HMP */
+#endif /* CONFIG_SCHED_WALT */
struct dl_bandwidth def_dl_bandwidth;
@@ -1843,7 +1844,7 @@
.switched_to = switched_to_dl,
.update_curr = update_curr_dl,
-#ifdef CONFIG_SCHED_HMP
+#ifdef CONFIG_SCHED_WALT
.fixup_hmp_sched_stats = fixup_hmp_sched_stats_dl,
#endif
};
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index ae8bd29..ed9f6db 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -19,6 +19,7 @@
#include <linux/debugfs.h>
#include "sched.h"
+#include "walt.h"
static DEFINE_SPINLOCK(sched_debug_lock);
@@ -599,7 +600,7 @@
cfs_rq->throttle_count);
SEQ_printf(m, " .%-30s: %d\n", "runtime_enabled",
cfs_rq->runtime_enabled);
-#ifdef CONFIG_SCHED_HMP
+#ifdef CONFIG_SCHED_WALT
SEQ_printf(m, " .%-30s: %d\n", "nr_big_tasks",
cfs_rq->hmp_stats.nr_big_tasks);
SEQ_printf(m, " .%-30s: %llu\n", "cumulative_runnable_avg",
@@ -696,9 +697,11 @@
#ifdef CONFIG_SMP
P(cpu_capacity);
#endif
+#ifdef CONFIG_SCHED_WALT
#ifdef CONFIG_SCHED_HMP
P(static_cpu_pwr_cost);
P(cluster->static_cluster_pwr_cost);
+#endif
P(cluster->load_scale_factor);
P(cluster->capacity);
P(cluster->max_possible_capacity);
@@ -706,7 +709,9 @@
P(cluster->cur_freq);
P(cluster->max_freq);
P(cluster->exec_scale_factor);
+#ifdef CONFIG_SCHED_WALT
P(hmp_stats.nr_big_tasks);
+#endif
SEQ_printf(m, " .%-30s: %llu\n", "hmp_stats.cumulative_runnable_avg",
rq->hmp_stats.cumulative_runnable_avg);
#endif
@@ -788,9 +793,11 @@
PN(sysctl_sched_wakeup_granularity);
P(sysctl_sched_child_runs_first);
P(sysctl_sched_features);
+#ifdef CONFIG_SCHED_WALT
#ifdef CONFIG_SCHED_HMP
P(sched_upmigrate);
P(sched_downmigrate);
+#endif
P(sched_init_task_load_windows);
P(min_capacity);
P(max_capacity);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
old mode 100644
new mode 100755
index 6fb615e..82e6490
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -34,6 +34,7 @@
#include "sched.h"
#include "tune.h"
+#include "walt.h"
#include <trace/events/sched.h>
/* QHMP/Zone forward declarations */
@@ -42,8 +43,12 @@
struct sd_lb_stats;
struct sg_lb_stats;
-#ifdef CONFIG_SCHED_HMP
+#ifdef CONFIG_SCHED_WALT
+static void fixup_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p,
+ u32 new_task_load, u32 new_pred_demand);
+#endif
+#ifdef CONFIG_SCHED_HMP
#ifdef CONFIG_CFS_BANDWIDTH
static void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
struct task_struct *p, int change_cra);
@@ -67,8 +72,6 @@
struct task_struct *p, int change_cra) { }
#endif /* CONFIG_CFS_BANDWIDTH */
-static void fixup_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p,
- u32 new_task_load, u32 new_pred_demand);
#ifdef CONFIG_SMP
static struct rq *find_busiest_queue_hmp(struct lb_env *env,
@@ -122,6 +125,10 @@
#endif /* CONFIG_SCHED_HMP */
+#ifdef CONFIG_SCHED_WALT
+static inline bool task_fits_max(struct task_struct *p, int cpu);
+#endif
+
/*
* Targeted preemption latency for CPU-bound tasks:
* (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
@@ -137,7 +144,7 @@
unsigned int sysctl_sched_latency = 6000000ULL;
unsigned int normalized_sysctl_sched_latency = 6000000ULL;
-unsigned int sysctl_sched_is_big_little = 0;
+unsigned int sysctl_sched_is_big_little = 1;
unsigned int sysctl_sched_sync_hint_enable = 1;
unsigned int sysctl_sched_initial_task_util = 0;
unsigned int sysctl_sched_cstate_aware = 1;
@@ -145,8 +152,6 @@
#ifdef CONFIG_SCHED_WALT
unsigned int sysctl_sched_use_walt_cpu_util = 1;
unsigned int sysctl_sched_use_walt_task_util = 1;
-__read_mostly unsigned int sysctl_sched_walt_cpu_high_irqload =
- (10 * NSEC_PER_MSEC);
#endif
/*
* The initial- and re-scaling of tunables is configurable
@@ -216,7 +221,8 @@
* The margin used when comparing utilization with CPU capacity:
* util * 1024 < capacity * margin
*/
-unsigned int capacity_margin = 1280; /* ~20% */
+unsigned int capacity_margin = 1078; /* ~5% margin */
+unsigned int capacity_margin_down = 1205; /* ~15% margin */
static inline void update_load_add(struct load_weight *lw, unsigned long inc)
{
@@ -4677,7 +4683,6 @@
#endif
#ifdef CONFIG_SMP
-static bool cpu_overutilized(int cpu);
static unsigned long capacity_orig_of(int cpu);
static unsigned long cpu_util(int cpu);
static inline unsigned long boosted_cpu_util(int cpu);
@@ -4757,6 +4762,9 @@
if (!se) {
add_nr_running(rq, 1);
+#ifdef CONFIG_SCHED_WALT
+ p->misfit = !task_fits_max(p, rq->cpu);
+#endif
inc_rq_hmp_stats(rq, p, 1);
}
@@ -4850,20 +4858,10 @@
#ifdef CONFIG_SMP
if (!se) {
- /*
- * We want to potentially trigger a freq switch
- * request only for tasks that are going to sleep;
- * this is because we get here also during load
- * balancing, but in these cases it seems wise to
- * trigger as single request after load balancing is
- * done.
- */
- if (task_sleep) {
- if (rq->cfs.nr_running)
- update_capacity_of(cpu_of(rq));
- else if (sched_freq())
- set_cfs_cpu_capacity(cpu_of(rq), false, 0);
- }
+ if (rq->cfs.nr_running)
+ update_capacity_of(cpu_of(rq));
+ else if (sched_freq())
+ set_cfs_cpu_capacity(cpu_of(rq), false, 0);
}
/* Update SchedTune accouting */
@@ -5338,11 +5336,6 @@
>> SCHED_CAPACITY_SHIFT;
}
-static inline bool energy_aware(void)
-{
- return sched_feat(ENERGY_AWARE);
-}
-
/*
* Externally visible function. Let's keep the one above
* so that the check is inlined/optimized in the sched paths.
@@ -5361,6 +5354,8 @@
int dst_cpu;
int energy;
int payoff;
+ int sync_cpu;
+ unsigned long curr_util;
struct task_struct *task;
struct {
int before;
@@ -5391,18 +5386,89 @@
*/
static unsigned long __cpu_norm_util(int cpu, unsigned long capacity, int delta)
{
- int util = __cpu_util(cpu, delta);
+ int util = cpu_util_cum(cpu, delta);
if (util >= capacity)
return SCHED_CAPACITY_SCALE;
- return (util << SCHED_CAPACITY_SHIFT)/capacity;
+ return DIV_ROUND_UP(util << SCHED_CAPACITY_SHIFT, capacity);
+}
+
+static inline int task_util(struct task_struct *p)
+{
+#ifdef CONFIG_SCHED_WALT
+ if (!walt_disabled && sysctl_sched_use_walt_task_util) {
+ u64 demand = p->ravg.demand;
+
+ return (demand << 10) / sched_ravg_window;
+ }
+#endif
+ return p->se.avg.util_avg;
+}
+
+#define SCHED_ENABLE_WAKER_WAKEE 0
+
+static unsigned int sched_small_wakee_task_util = 102; /* ~10% of max cap */
+static unsigned int sched_big_waker_task_util = 256; /* 25% of max cap */
+
+static inline bool
+wake_on_waker_sibling(struct task_struct *p)
+{
+ return SCHED_ENABLE_WAKER_WAKEE &&
+ task_util(current) > sched_big_waker_task_util &&
+ task_util(p) < sched_small_wakee_task_util;
+}
+
+#define sysctl_sched_prefer_sync_wakee_to_waker 0
+
+static inline bool
+bias_to_waker_cpu(struct task_struct *p, int cpu)
+{
+ return sysctl_sched_prefer_sync_wakee_to_waker &&
+ cpu_rq(cpu)->nr_running == 1 &&
+ cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) &&
+ cpu_active(cpu) && !cpu_isolated(cpu);
}
static int calc_util_delta(struct energy_env *eenv, int cpu)
{
+#ifdef CONFIG_SCHED_WALT
+ if (cpu == eenv->src_cpu) {
+ if (!walt_disabled && sysctl_sched_use_walt_task_util &&
+ !task_in_cum_window_demand(cpu_rq(cpu), eenv->task)) {
+ if (eenv->util_delta == 0)
+ /*
+ * energy before - calculate energy cost when
+ * the new task is placed onto src_cpu. The
+ * task is not on a runqueue so its util is not
+ * in the WALT's cr_avg as it's discounted when
+ * it slept last time. Hence return task's util
+ * as delta to calculate energy cost of src_cpu
+ * as if the new task on it.
+ */
+ return task_util(eenv->task);
+ /*
+ * energy after - WALT's cr_avg already doesn't have the
+ * new task's util accounted in. Thus return 0 delta to
+ * calculate energy cost of the src_cpu without the
+ * task's util.
+ */
+ return 0;
+ }
+ /*
+ * Task is already on a runqueue for example while load
+ * balancing. WALT's cpu util already accounted the task's
+ * util. return 0 delta for energy before so energy calculation
+ * to be done with the task's util accounted, return -task_util
+ * for energy after so the calculation to be doen with
+ * discounted task's util.
+ */
+ return -eenv->util_delta;
+ }
+#else
if (cpu == eenv->src_cpu)
return -eenv->util_delta;
+#endif
if (cpu == eenv->dst_cpu)
return eenv->util_delta;
return 0;
@@ -5416,7 +5482,10 @@
for_each_cpu(i, sched_group_cpus(eenv->sg_cap)) {
delta = calc_util_delta(eenv, i);
- max_util = max(max_util, __cpu_util(i, delta));
+ /* substract sync_cpu's rq->curr util to discount its cost */
+ if (eenv->sync_cpu == i)
+ delta -= eenv->curr_util;
+ max_util = max(max_util, cpu_util_cum(i, delta));
}
return max_util;
@@ -5440,6 +5509,9 @@
for_each_cpu(i, sched_group_cpus(sg)) {
delta = calc_util_delta(eenv, i);
+ /* substract sync_cpu's rq->curr util to discount its cost */
+ if (eenv->sync_cpu == i)
+ delta -= eenv->curr_util;
util_sum += __cpu_norm_util(i, capacity, delta);
}
@@ -5448,17 +5520,26 @@
return util_sum;
}
-static int find_new_capacity(struct energy_env *eenv,
- const struct sched_group_energy * const sge)
+static int __find_new_capacity(unsigned long util,
+ const struct sched_group_energy const *sge)
{
int idx;
- unsigned long util = group_max_util(eenv);
for (idx = 0; idx < sge->nr_cap_states; idx++) {
if (sge->cap_states[idx].cap >= util)
break;
}
+ return idx;
+}
+
+static int find_new_capacity(struct energy_env *eenv,
+ const struct sched_group_energy const *sge)
+{
+ int idx;
+ unsigned long util = group_max_util(eenv);
+
+ idx = __find_new_capacity(util, sge);
eenv->cap_idx = idx;
return idx;
@@ -5491,7 +5572,8 @@
static int sched_group_energy(struct energy_env *eenv)
{
struct sched_domain *sd;
- int cpu, total_energy = 0;
+ int cpu;
+ u64 total_energy = 0;
struct cpumask visit_cpus;
struct sched_group *sg;
@@ -5557,14 +5639,23 @@
idle_idx = group_idle_state(sg);
group_util = group_norm_util(eenv, sg);
- sg_busy_energy = (group_util * sg->sge->cap_states[cap_idx].power)
- >> SCHED_CAPACITY_SHIFT;
- sg_idle_energy = ((SCHED_CAPACITY_SCALE-group_util)
- * sg->sge->idle_states[idle_idx].power)
- >> SCHED_CAPACITY_SHIFT;
+ sg_busy_energy = (group_util * sg->sge->cap_states[cap_idx].power);
+
+ if (idle_idx == 0)
+ sg_idle_energy = ((SCHED_CAPACITY_SCALE - group_util)
+ * sg->sge->cap_states[cap_idx].power);
+ else
+ sg_idle_energy = ((SCHED_CAPACITY_SCALE - group_util)
+ * sg->sge->idle_states[idle_idx].power);
total_energy += sg_busy_energy + sg_idle_energy;
+ trace_sched_group_energy(group_first_cpu(sg),
+ group_util, total_energy,
+ sg_busy_energy, sg_idle_energy,
+ idle_idx,
+ sg->sge->cap_states[eenv->cap_idx].cap);
+
if (!sd->child)
cpumask_xor(&visit_cpus, &visit_cpus, sched_group_cpus(sg));
@@ -5578,7 +5669,7 @@
continue;
}
- eenv->energy = total_energy;
+ eenv->energy = total_energy >> SCHED_CAPACITY_SHIFT;
return 0;
}
@@ -5606,6 +5697,8 @@
.dst_cpu = eenv->dst_cpu,
.nrg = { 0, 0, 0, 0},
.cap = { 0, 0, 0 },
+ .task = eenv->task,
+ .sync_cpu = eenv->sync_cpu,
};
if (eenv->src_cpu == eenv->dst_cpu)
@@ -5823,38 +5916,33 @@
return 1;
}
-static inline int task_util(struct task_struct *p)
-{
-#ifdef CONFIG_SCHED_WALT
- if (!walt_disabled && sysctl_sched_use_walt_task_util) {
- unsigned long demand = p->ravg.demand;
- return (demand << 10) / walt_ravg_window;
- }
-#endif
- return p->se.avg.util_avg;
-}
-
static inline unsigned long boosted_task_util(struct task_struct *task);
static inline bool __task_fits(struct task_struct *p, int cpu, int util)
{
- unsigned long capacity = capacity_of(cpu);
+ unsigned int margin;
util += boosted_task_util(p);
- return (capacity * 1024) > (util * capacity_margin);
+ if (capacity_orig_of(task_cpu(p)) > capacity_orig_of(cpu))
+ margin = capacity_margin_down;
+ else
+ margin = capacity_margin;
+
+ return (capacity_orig_of(cpu) * 1024) > (util * margin);
}
static inline bool task_fits_max(struct task_struct *p, int cpu)
{
- unsigned long capacity = capacity_of(cpu);
+ unsigned long capacity = capacity_orig_of(cpu);
unsigned long max_capacity = cpu_rq(cpu)->rd->max_cpu_capacity.val;
if (capacity == max_capacity)
return true;
- if (capacity * capacity_margin > max_capacity * 1024)
- return true;
+ if (sched_boost_policy() == SCHED_BOOST_ON_BIG &&
+ task_sched_boost(p))
+ return false;
return __task_fits(p, cpu, 0);
}
@@ -5864,9 +5952,15 @@
return __task_fits(p, cpu, cpu_util(cpu));
}
-static bool cpu_overutilized(int cpu)
+static bool __cpu_overutilized(int cpu, int delta)
{
- return (capacity_of(cpu) * 1024) < (cpu_util(cpu) * capacity_margin);
+ return (capacity_orig_of(cpu) * 1024) <
+ ((cpu_util(cpu) + delta) * capacity_margin);
+}
+
+bool cpu_overutilized(int cpu)
+{
+ return __cpu_overutilized(cpu, 0);
}
#ifdef CONFIG_SCHED_TUNE
@@ -5967,7 +6061,7 @@
static inline unsigned long
boosted_cpu_util(int cpu)
{
- unsigned long util = cpu_util(cpu);
+ unsigned long util = cpu_util_freq(cpu, NULL);
long margin = schedtune_cpu_margin(util, cpu);
trace_sched_boost_cpu(cpu, util, margin);
@@ -6248,7 +6342,7 @@
idle = false;
}
- if (idle)
+ if (!cpu_isolated(cpu) && idle)
return core;
}
@@ -6273,6 +6367,8 @@
for_each_cpu(cpu, cpu_smt_mask(target)) {
if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
continue;
+ if (cpu_isolated(cpu))
+ continue;
if (idle_cpu(cpu))
return cpu;
}
@@ -6325,6 +6421,8 @@
for_each_cpu_wrap(cpu, sched_domain_span(sd), target, wrap) {
if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
continue;
+ if (cpu_isolated(cpu))
+ continue;
if (idle_cpu(cpu))
break;
}
@@ -6350,13 +6448,14 @@
int best_idle_capacity = INT_MAX;
if (!sysctl_sched_cstate_aware) {
- if (idle_cpu(target))
+ if (idle_cpu(target) && !cpu_isolated(target))
return target;
/*
* If the prevous cpu is cache affine and idle, don't be stupid.
*/
- if (i != target && cpus_share_cache(i, target) && idle_cpu(i))
+ if (i != target && cpus_share_cache(i, target) &&
+ idle_cpu(i) && !cpu_isolated(i))
return i;
sd = rcu_dereference(per_cpu(sd_llc, target));
@@ -6394,6 +6493,10 @@
int idle_idx = idle_get_state_idx(rq);
unsigned long new_usage = boosted_task_util(p);
unsigned long capacity_orig = capacity_orig_of(i);
+
+ if (cpu_isolated(i))
+ continue;
+
if (new_usage > capacity_orig || !idle_cpu(i))
goto next;
@@ -6408,6 +6511,9 @@
}
} else {
for_each_cpu(i, sched_group_cpus(sg)) {
+ if (cpu_isolated(i))
+ continue;
+
if (i == target || !idle_cpu(i))
goto next;
}
@@ -6468,7 +6574,7 @@
continue;
#ifdef CONFIG_SCHED_WALT
- if (walt_cpu_high_irqload(i))
+ if (sched_cpu_high_irqload(i))
continue;
#endif
/*
@@ -6528,22 +6634,107 @@
return target_cpu;
}
+/*
+ * Should task be woken to any available idle cpu?
+ *
+ * Waking tasks to idle cpu has mixed implications on both performance and
+ * power. In many cases, scheduler can't estimate correctly impact of using idle
+ * cpus on either performance or power. PF_WAKE_UP_IDLE allows external kernel
+ * module to pass a strong hint to scheduler that the task in question should be
+ * woken to idle cpu, generally to improve performance.
+ */
+static inline int wake_to_idle(struct task_struct *p)
+{
+ return (current->flags & PF_WAKE_UP_IDLE) ||
+ (p->flags & PF_WAKE_UP_IDLE);
+}
+
+static bool
+is_packing_eligible(struct task_struct *p, unsigned long task_util,
+ struct sched_group *sg_target,
+ unsigned long target_cpu_new_util_cum,
+ int targeted_cpus)
+{
+ int cpu_cap_idx_pack, cpu_cap_idx_spread, cap_idx0, cap_idx1;
+
+ if (targeted_cpus > 1)
+ /*
+ * More than one CPUs were evaulated and target_cpu is the
+ * least loaded CPU among the CPUs. Thus target_cpu won't
+ * raise OPP.
+ */
+ return true;
+
+ /*
+ * There is only one CPU out of C-state.
+ *
+ * cpu_cap_idx_pack contains estimated OPP index of target_cpu when we
+ * pack the new task onto the target_cpu.
+ * cap_idx0 and cap_idx1 contain OPP indices of two CPUs, one for
+ * target_cpu without new task's load, one other for new idle CPU with
+ * task's load.
+ *
+ * Pack : Spread :
+ * cap_idx_pack is new OPP. max(cap_idx0, cap_idx1) is new OPP.
+ * ________________ ________________
+ * | | | | ______________
+ * | cap_idx_pack | | cap_idx0 | | cap_idx1 |
+ * | (target_cpu) | | (target_cpu) | | (idle cpu) |
+ * ---------------- ---------------- --------------
+ *
+ * The target_cpu's current capacity can be much more than target_cpu's
+ * current utilization due to for example hysteresis while task
+ * migration. In that the case, packing onto the target_cpu based on
+ * current capacity would deprive chance to lower the OPP and will end
+ * up making target_cpu to keep the higher OOP longer than spreading.
+ *
+ * Try task packing only when packing won't make to keep the current
+ * OPP longer than wihout packing.
+ */
+
+ cpu_cap_idx_pack = __find_new_capacity(target_cpu_new_util_cum,
+ sg_target->sge);
+
+ cap_idx0 = __find_new_capacity(target_cpu_new_util_cum - task_util,
+ sg_target->sge);
+ cap_idx1 = __find_new_capacity(task_util, sg_target->sge);
+
+ cpu_cap_idx_spread = max(cap_idx0, cap_idx1);
+
+ trace_sched_energy_diff_packing(p, task_util, targeted_cpus,
+ cpu_cap_idx_pack, cpu_cap_idx_spread);
+
+ return cpu_cap_idx_pack == cpu_cap_idx_spread;
+}
+
static int energy_aware_wake_cpu(struct task_struct *p, int target, int sync)
{
struct sched_domain *sd;
struct sched_group *sg, *sg_target;
int target_max_cap = INT_MAX;
- int target_cpu = task_cpu(p);
- unsigned long task_util_boosted, new_util;
+ int target_cpu, targeted_cpus = 0;
+ unsigned long task_util_boosted = 0, curr_util = 0;
+ long new_util, new_util_cum;
int i;
-
- if (sysctl_sched_sync_hint_enable && sync) {
- int cpu = smp_processor_id();
- cpumask_t search_cpus;
- cpumask_and(&search_cpus, tsk_cpus_allowed(p), cpu_online_mask);
- if (cpumask_test_cpu(cpu, &search_cpus))
- return cpu;
- }
+ int ediff = -1;
+ int cpu = smp_processor_id();
+ int min_util_cpu = -1;
+ int min_util_cpu_idle_idx = INT_MAX;
+ long min_util_cpu_util_cum = LONG_MAX;
+ unsigned int min_util = UINT_MAX;
+ int cpu_idle_idx;
+ int min_idle_idx_cpu;
+ int min_idle_idx = INT_MAX;
+ bool safe_to_pack = false;
+ unsigned int target_cpu_util = UINT_MAX;
+ long target_cpu_new_util_cum = LONG_MAX;
+ struct cpumask *rtg_target = NULL;
+ bool wake_on_sibling = false;
+ int isolated_candidate = -1;
+ bool need_idle;
+ bool skip_ediff = false;
+ enum sched_boost_policy placement_boost = task_sched_boost(p) ?
+ sched_boost_policy() : SCHED_BOOST_NONE;
sd = rcu_dereference(per_cpu(sd_ea, task_cpu(p)));
@@ -6553,7 +6744,31 @@
sg = sd->groups;
sg_target = sg;
+ sync = sync && sysctl_sched_sync_hint_enable;
+ curr_util = boosted_task_util(cpu_rq(cpu)->curr);
+
+ need_idle = wake_to_idle(p);
+
if (sysctl_sched_is_big_little) {
+ struct related_thread_group *grp;
+
+ rcu_read_lock();
+ grp = task_related_thread_group(p);
+ rcu_read_unlock();
+
+ if (grp && grp->preferred_cluster) {
+ rtg_target = &grp->preferred_cluster->cpus;
+ } else if (sync && wake_on_waker_sibling(p)) {
+ if (bias_to_waker_cpu(p, cpu)) {
+ trace_sched_task_util_bias_to_waker(p,
+ task_cpu(p), task_util(p), cpu,
+ cpu, 0, need_idle);
+ return cpu;
+ }
+ wake_on_sibling = true;
+ }
+
+ task_util_boosted = boosted_task_util(p);
/*
* Find group with sufficient capacity. We only get here if no cpu is
@@ -6563,24 +6778,96 @@
* point.
*/
do {
+ int max_cap_cpu;
+ cpumask_t avail_cpus;
+
+ /* Are all CPUs isolated in this group? */
+ if (unlikely(!sg->group_weight))
+ continue;
+
+ /* Can this task run on any CPUs of this group? */
+ cpumask_and(&avail_cpus, sched_group_cpus(sg),
+ tsk_cpus_allowed(p));
+ cpumask_andnot(&avail_cpus, &avail_cpus,
+ cpu_isolated_mask);
+ if (cpumask_empty(&avail_cpus))
+ continue;
+
/* Assuming all cpus are the same in group */
- int max_cap_cpu = group_first_cpu(sg);
+ max_cap_cpu = group_first_cpu(sg);
/*
* Assume smaller max capacity means more energy-efficient.
* Ideally we should query the energy model for the right
* answer but it easily ends up in an exhaustive search.
*/
- if (capacity_of(max_cap_cpu) < target_max_cap &&
+ if (capacity_orig_of(max_cap_cpu) < target_max_cap &&
task_fits_max(p, max_cap_cpu)) {
sg_target = sg;
+
+ if (rtg_target) {
+ /*
+ * For tasks that belong to a related
+ * thread group, select the preferred
+ * cluster if the task can fit there,
+ * otherwise select the cluster which
+ * can fit the task.
+ */
+ if (cpumask_test_cpu(max_cap_cpu,
+ rtg_target))
+ break;
+ continue;
+ } else if (wake_on_sibling) {
+ /* Skip non-sibling CPUs */
+ if (!cpumask_test_cpu(cpu,
+ sched_group_cpus(sg)))
+ continue;
+ } else if (sync && curr_util >=
+ task_util_boosted) {
+ if (cpumask_test_cpu(cpu,
+ sched_group_cpus(sg))) {
+ if (!cpumask_test_cpu(task_cpu(p),
+ sched_group_cpus(sg)))
+ skip_ediff = true;
+ break;
+ }
+ continue;
+ }
+
target_max_cap = capacity_of(max_cap_cpu);
}
} while (sg = sg->next, sg != sd->groups);
- task_util_boosted = boosted_task_util(p);
+ target_cpu = -1;
+
/* Find cpu with sufficient capacity */
for_each_cpu_and(i, tsk_cpus_allowed(p), sched_group_cpus(sg_target)) {
+ if (cpu_isolated(i))
+ continue;
+
+ if (isolated_candidate == -1)
+ isolated_candidate = i;
+
+ if (is_reserved(i))
+ continue;
+
+ if (sched_cpu_high_irqload(cpu))
+ continue;
+
+ /*
+ * Since this code is inside sched_is_big_little,
+ * we are going to assume that boost policy is
+ * SCHED_BOOST_ON_BIG.
+ */
+ if (placement_boost != SCHED_BOOST_NONE) {
+ new_util = cpu_util(i);
+ if (new_util < min_util) {
+ min_util_cpu = i;
+ min_util = new_util;
+ }
+ continue;
+ }
+
/*
* p's blocked utilization is still accounted for on prev_cpu
* so prev_cpu will receive a negative bias due to the double
@@ -6588,6 +6875,19 @@
*/
new_util = cpu_util(i) + task_util_boosted;
+ if (task_in_cum_window_demand(cpu_rq(i), p))
+ new_util_cum = cpu_util_cum(i, 0) +
+ task_util_boosted - task_util(p);
+ else
+ new_util_cum = cpu_util_cum(i, 0) +
+ task_util_boosted;
+
+ if (sync && i == cpu)
+ new_util -= curr_util;
+
+ trace_sched_cpu_util(p, i, task_util_boosted, curr_util,
+ new_util_cum, sync);
+
/*
* Ensure minimum capacity to grant the required boost.
* The target CPU can be already at a capacity level higher
@@ -6596,15 +6896,96 @@
if (new_util > capacity_orig_of(i))
continue;
- if (new_util < capacity_curr_of(i)) {
- target_cpu = i;
- if (cpu_rq(i)->nr_running)
+ cpu_idle_idx = cpu_rq(i)->nr_running ? -1 :
+ idle_get_state_idx(cpu_rq(i));
+
+ if (!need_idle &&
+ (!wake_on_sibling ||
+ (wake_on_sibling && i != cpu)) &&
+ add_capacity_margin(new_util_cum) <
+ capacity_curr_of(i)) {
+ if (sysctl_sched_cstate_aware) {
+ if (cpu_idle_idx < min_idle_idx) {
+ min_idle_idx = cpu_idle_idx;
+ min_idle_idx_cpu = i;
+ target_cpu = i;
+ target_cpu_util = new_util;
+ target_cpu_new_util_cum =
+ new_util_cum;
+ targeted_cpus = 1;
+ } else if (cpu_idle_idx ==
+ min_idle_idx &&
+ (target_cpu_util >
+ new_util ||
+ (target_cpu_util ==
+ new_util &&
+ (i == task_cpu(p) ||
+ (target_cpu !=
+ task_cpu(p) &&
+ target_cpu_new_util_cum >
+ new_util_cum))))) {
+ min_idle_idx_cpu = i;
+ target_cpu = i;
+ target_cpu_util = new_util;
+ target_cpu_new_util_cum =
+ new_util_cum;
+ targeted_cpus++;
+ }
+ } else if (cpu_rq(i)->nr_running) {
+ target_cpu = i;
break;
+ }
+ } else if (!need_idle &&
+ (!wake_on_sibling ||
+ (wake_on_sibling && i != cpu))) {
+ /*
+ * At least one CPU other than target_cpu is
+ * going to raise CPU's OPP higher than current
+ * because current CPU util is more than current
+ * capacity + margin. We can safely do task
+ * packing without worrying about doing such
+ * itself raises OPP.
+ */
+ safe_to_pack = true;
}
- /* cpu has capacity at higher OPP, keep it as fallback */
- if (target_cpu == task_cpu(p))
- target_cpu = i;
+ /*
+ * cpu has capacity at higher OPP, keep it as
+ * fallback.
+ */
+ if (new_util < min_util) {
+ min_util_cpu = i;
+ min_util = new_util;
+ min_util_cpu_idle_idx = cpu_idle_idx;
+ min_util_cpu_util_cum = new_util_cum;
+ } else if (sysctl_sched_cstate_aware &&
+ min_util == new_util) {
+ if (min_util_cpu == task_cpu(p))
+ continue;
+
+ if (i == task_cpu(p) ||
+ (cpu_idle_idx < min_util_cpu_idle_idx ||
+ (cpu_idle_idx == min_util_cpu_idle_idx &&
+ min_util_cpu_util_cum > new_util_cum))) {
+ min_util_cpu = i;
+ min_util_cpu_idle_idx = cpu_idle_idx;
+ min_util_cpu_util_cum = new_util_cum;
+ }
+ }
+ }
+
+ if (target_cpu == -1 ||
+ (target_cpu != min_util_cpu && !safe_to_pack &&
+ !is_packing_eligible(p, task_util_boosted, sg_target,
+ target_cpu_new_util_cum,
+ targeted_cpus))) {
+ if (likely(min_util_cpu != -1))
+ target_cpu = min_util_cpu;
+ else if (cpu_isolated(task_cpu(p)) &&
+ isolated_candidate != -1)
+ target_cpu = isolated_candidate;
+ else
+ target_cpu = task_cpu(p);
}
} else {
/*
@@ -6618,6 +6999,8 @@
bool prefer_idle = 0;
#endif
int tmp_target = find_best_target(p, boosted, prefer_idle);
+
+ target_cpu = task_cpu(p);
if (tmp_target >= 0) {
target_cpu = tmp_target;
if ((boosted || prefer_idle) && idle_cpu(target_cpu))
@@ -6625,22 +7008,73 @@
}
}
- if (target_cpu != task_cpu(p)) {
+ if (wake_on_sibling && target_cpu != -1) {
+ trace_sched_task_util_bias_to_waker(p, task_cpu(p),
+ task_util(p), target_cpu,
+ target_cpu, 0, need_idle);
+ return target_cpu;
+ }
+
+ if (target_cpu != task_cpu(p) && !cpu_isolated(task_cpu(p))) {
struct energy_env eenv = {
.util_delta = task_util(p),
.src_cpu = task_cpu(p),
.dst_cpu = target_cpu,
.task = p,
+ .sync_cpu = sync ? smp_processor_id() : -1,
+ .curr_util = curr_util,
};
- /* Not enough spare capacity on previous cpu */
- if (cpu_overutilized(task_cpu(p)))
+ /*
+ * We always want to migrate the task to the preferred cluster.
+ */
+ if (rtg_target) {
+ trace_sched_task_util_colocated(p, task_cpu(p),
+ task_util(p),
+ cpumask_first(rtg_target),
+ target_cpu, 0, need_idle);
return target_cpu;
+ }
- if (energy_diff(&eenv) >= 0)
- return task_cpu(p);
+#ifdef CONFIG_SCHED_WALT
+ if (walt_disabled || !sysctl_sched_use_walt_cpu_util)
+ task_util_boosted = 0;
+#else
+ task_util_boosted = 0;
+#endif
+ /* Not enough spare capacity on previous cpu */
+ if (__cpu_overutilized(task_cpu(p), task_util_boosted)) {
+ trace_sched_task_util_overutilzed(p, task_cpu(p),
+ task_util(p), target_cpu,
+ target_cpu, 0, need_idle);
+ return target_cpu;
+ }
+
+ if (!skip_ediff)
+ ediff = energy_diff(&eenv);
+
+ if (!sysctl_sched_cstate_aware) {
+ if (ediff >= 0) {
+ trace_sched_task_util_energy_diff(p,
+ task_cpu(p), task_util(p),
+ target_cpu, task_cpu(p), ediff,
+ need_idle);
+ return task_cpu(p);
+ }
+ } else {
+ if (ediff > 0) {
+ trace_sched_task_util_energy_diff(p,
+ task_cpu(p), task_util(p),
+ target_cpu, task_cpu(p), ediff,
+ need_idle);
+ return task_cpu(p);
+ }
+ }
}
+ trace_sched_task_util_energy_aware(p, task_cpu(p), task_util(p),
+ target_cpu, target_cpu, ediff,
+ need_idle);
return target_cpu;
}
@@ -6669,6 +7103,9 @@
return select_best_cpu(p, prev_cpu, 0, sync);
#endif
+ if (energy_aware())
+ return energy_aware_wake_cpu(p, prev_cpu, sync);
+
if (sd_flag & SD_BALANCE_WAKE) {
record_wakee(p);
want_affine = (!wake_wide(p) && task_fits_max(p, cpu) &&
@@ -7332,8 +7769,11 @@
unsigned int loop_max;
enum fbq_type fbq_type;
+ enum group_type busiest_group_type;
struct list_head tasks;
+#ifdef CONFIG_SCHED_HMP
enum sched_boost_policy boost_policy;
+#endif
};
/*
@@ -7431,7 +7871,9 @@
int can_migrate_task(struct task_struct *p, struct lb_env *env)
{
int tsk_cache_hot;
+#ifdef CONFIG_SCHED_HMP
int twf, group_cpus;
+#endif
lockdep_assert_held(&env->src_rq->lock);
@@ -7475,9 +7917,34 @@
return 0;
}
+ if (energy_aware() && !env->dst_rq->rd->overutilized &&
+ env->idle == CPU_NEWLY_IDLE) {
+ long util_cum_dst, util_cum_src;
+ unsigned long demand;
+
+ demand = task_util(p);
+ util_cum_dst = cpu_util_cum(env->dst_cpu, 0) + demand;
+ util_cum_src = cpu_util_cum(env->src_cpu, 0) - demand;
+
+ if (util_cum_dst > util_cum_src)
+ return 0;
+ }
+
/* Record that we found atleast one task that could run on dst_cpu */
env->flags &= ~LBF_ALL_PINNED;
+#ifdef CONFIG_SCHED_WALT
+ if (env->flags & LBF_IGNORE_PREFERRED_CLUSTER_TASKS &&
+ !preferred_cluster(cpu_rq(env->dst_cpu)->cluster, p))
+ return 0;
+
+ /* Don't detach task if it doesn't fit on the destination */
+ if (env->flags & LBF_IGNORE_BIG_TASKS &&
+ !task_fits_max(p, env->dst_cpu))
+ return 0;
+#endif
+
+#ifdef CONFIG_SCHED_HMP
if (cpu_capacity(env->dst_cpu) > cpu_capacity(env->src_cpu)) {
if (nr_big_tasks(env->src_rq) && !is_big_task(p))
return 0;
@@ -7496,10 +7963,6 @@
if (env->flags & LBF_IGNORE_BIG_TASKS && !twf)
return 0;
- if (env->flags & LBF_IGNORE_PREFERRED_CLUSTER_TASKS &&
- !preferred_cluster(rq_cluster(cpu_rq(env->dst_cpu)), p))
- return 0;
-
/*
* Group imbalance can sometimes cause work to be pulled across groups
* even though the group could have managed the imbalance on its own.
@@ -7510,6 +7973,7 @@
SCHED_CAPACITY_SCALE);
if (!twf && env->busiest_nr_running <= group_cpus)
return 0;
+#endif
if (task_running(env->src_rq, p)) {
schedstat_inc(p->se.statistics.nr_failed_migrations_running);
@@ -7977,6 +8441,8 @@
int max_cap_cpu;
unsigned long flags;
+ capacity = min(capacity, thermal_cap(cpu));
+
cpu_rq(cpu)->cpu_capacity_orig = capacity;
mcc = &cpu_rq(cpu)->rd->max_cpu_capacity;
@@ -7998,6 +8464,8 @@
raw_spin_unlock_irqrestore(&mcc->lock, flags);
skip_unlock: __attribute__ ((unused));
+ sdg->sgc->max_capacity = capacity;
+
capacity *= scale_rt_capacity(cpu);
capacity >>= SCHED_CAPACITY_SHIFT;
@@ -8006,7 +8474,6 @@
cpu_rq(cpu)->cpu_capacity = capacity;
sdg->sgc->capacity = capacity;
- sdg->sgc->max_capacity = capacity;
}
void update_group_capacity(struct sched_domain *sd, int cpu)
@@ -8176,6 +8643,17 @@
return false;
}
+
+/*
+ * group_smaller_cpu_capacity: Returns true if sched_group sg has smaller
+ * per-cpu capacity than sched_group ref.
+ */
+static inline bool
+group_smaller_cpu_capacity(struct sched_group *sg, struct sched_group *ref)
+{
+ return sg->sgc->max_capacity < ref->sgc->max_capacity;
+}
+
static inline enum
group_type group_classify(struct sched_group *group,
struct sg_lb_stats *sgs)
@@ -8253,11 +8731,11 @@
if (!nr_running && idle_cpu(i))
sgs->idle_cpus++;
- if (cpu_overutilized(i)) {
+ if (cpu_overutilized(i))
*overutilized = true;
- if (!sgs->group_misfit_task && rq->misfit_task)
- sgs->group_misfit_task = capacity_of(i);
- }
+
+ if (!sgs->group_misfit_task && rq->misfit_task)
+ sgs->group_misfit_task = capacity_of(i);
}
/* Isolated CPU has no weight */
@@ -8312,9 +8790,25 @@
if (sgs->group_type < busiest->group_type)
return false;
+ /*
+ * Candidate sg doesn't face any serious load-balance problems
+ * so don't pick it if the local sg is already filled up.
+ */
+ if (sgs->group_type == group_other &&
+ !group_has_capacity(env, &sds->local_stat))
+ return false;
+
if (sgs->avg_load <= busiest->avg_load)
return false;
+ /*
+ * Candiate sg has no more than one task per cpu and has higher
+ * per-cpu capacity. No reason to pull tasks to less capable cpus.
+ */
+ if (sgs->sum_nr_running <= sgs->group_weight &&
+ group_smaller_cpu_capacity(sds->local, sg))
+ return false;
+
/* This is the busiest node in its class. */
if (!(env->sd->flags & SD_ASYM_PACKING))
return true;
@@ -8424,6 +8918,15 @@
sgs->group_type = group_classify(sg, sgs);
}
+ /*
+ * Ignore task groups with misfit tasks if local group has no
+ * capacity or if per-cpu capacity isn't higher.
+ */
+ if (sgs->group_type == group_misfit_task && sds->local &&
+ (!group_has_capacity(env, &sds->local_stat) ||
+ !group_smaller_cpu_capacity(sg, sds->local)))
+ sgs->group_type = group_other;
+
if (update_sd_pick_busiest(env, sds, sg, sgs)) {
sds->busiest = sg;
sds->busiest_stat = *sgs;
@@ -8611,6 +9114,22 @@
*/
if (busiest->avg_load <= sds->avg_load ||
local->avg_load >= sds->avg_load) {
+ /* Misfitting tasks should be migrated in any case */
+ if (busiest->group_type == group_misfit_task) {
+ env->imbalance = busiest->group_misfit_task;
+ return;
+ }
+
+ /*
+ * Busiest group is overloaded, local is not, use the spare
+ * cycles to maximize throughput
+ */
+ if (busiest->group_type == group_overloaded &&
+ local->group_type <= group_misfit_task) {
+ env->imbalance = busiest->load_per_task;
+ return;
+ }
+
env->imbalance = 0;
return fix_small_imbalance(env, sds);
}
@@ -8644,6 +9163,11 @@
(sds->avg_load - local->avg_load) * local->group_capacity
) / SCHED_CAPACITY_SCALE;
+ /* Boost imbalance to allow misfit task to be balanced. */
+ if (busiest->group_type == group_misfit_task)
+ env->imbalance = max_t(long, env->imbalance,
+ busiest->group_misfit_task);
+
/*
* if *imbalance is less than the average load per runnable task
* there is no guarantee that any tasks will be moved so we'll have
@@ -8680,8 +9204,34 @@
*/
update_sd_lb_stats(env, &sds);
- if (energy_aware() && !env->dst_rq->rd->overutilized)
- goto out_balanced;
+ if (energy_aware() && !env->dst_rq->rd->overutilized) {
+ int cpu_local, cpu_busiest;
+ long util_cum;
+ unsigned long capacity_local, capacity_busiest;
+
+ if (env->idle != CPU_NEWLY_IDLE)
+ goto out_balanced;
+
+ if (!sds.local || !sds.busiest)
+ goto out_balanced;
+
+ cpu_local = group_first_cpu(sds.local);
+ cpu_busiest = group_first_cpu(sds.busiest);
+
+ /* TODO: don't assume same cap cpus are in same domain */
+ capacity_local = capacity_orig_of(cpu_local);
+ capacity_busiest = capacity_orig_of(cpu_busiest);
+ if (capacity_local > capacity_busiest) {
+ goto out_balanced;
+ } else if (capacity_local == capacity_busiest) {
+ if (cpu_rq(cpu_busiest)->nr_running < 2)
+ goto out_balanced;
+
+ util_cum = cpu_util_cum(cpu_busiest, 0);
+ if (util_cum < cpu_util_cum(cpu_local, 0))
+ goto out_balanced;
+ }
+ }
local = &sds.local_stat;
busiest = &sds.busiest_stat;
@@ -8716,6 +9266,11 @@
busiest->group_no_capacity)
goto force_balance;
+ /* Misfitting tasks should be dealt with regardless of the avg load */
+ if (busiest->group_type == group_misfit_task) {
+ goto force_balance;
+ }
+
/*
* If the local group is busier than the selected busiest group
* don't try and pull any tasks.
@@ -8739,7 +9294,8 @@
* might end up to just move the imbalance on another group
*/
if ((busiest->group_type != group_overloaded) &&
- (local->idle_cpus <= (busiest->idle_cpus + 1)))
+ (local->idle_cpus <= (busiest->idle_cpus + 1)) &&
+ !group_smaller_cpu_capacity(sds.busiest, sds.local))
goto out_balanced;
} else {
/*
@@ -8752,6 +9308,7 @@
}
force_balance:
+ env->busiest_group_type = busiest->group_type;
/* Looks like there is an imbalance. Compute it */
calculate_imbalance(env, &sds);
return sds.busiest;
@@ -8814,10 +9371,29 @@
*/
if (rq->nr_running == 1 && wl > env->imbalance &&
- !check_cpu_capacity(rq, env->sd))
+ !check_cpu_capacity(rq, env->sd) &&
+ env->busiest_group_type != group_misfit_task)
continue;
/*
+ * After enable energy awared scheduling, it has higher
+ * priority to migrate misfit task rather than from most
+ * loaded CPU; E.g. one CPU with single misfit task and
+ * other CPUs with multiple lower load tasks, we should
+ * firstly make sure the misfit task can be migrated onto
+ * higher capacity CPU.
+ */
+ if (energy_aware() &&
+ capacity_orig_of(i) < capacity_orig_of(env->dst_cpu) &&
+ rq->misfit_task &&
+ env->busiest_group_type == group_misfit_task) {
+ busiest_load = wl;
+ busiest_capacity = capacity;
+ busiest = rq;
+ break;
+ }
+
+ /*
* For the load comparisons with the other cpu's, consider
* the weighted_cpuload() scaled with the cpu capacity, so
* that the load can be moved away from the cpu that is
@@ -8876,12 +9452,10 @@
return 1;
}
- if (energy_aware() && (capacity_of(env->src_cpu) < capacity_of(env->dst_cpu)) &&
- env->src_rq->cfs.h_nr_running == 1 &&
- cpu_overutilized(env->src_cpu) &&
- !cpu_overutilized(env->dst_cpu)) {
- return 1;
- }
+ if ((env->idle != CPU_NOT_IDLE) &&
+ (capacity_orig_of(env->src_cpu) < capacity_orig_of(env->dst_cpu)) &&
+ env->src_rq->misfit_task)
+ return 1;
return unlikely(sd->nr_balance_failed >
sd->cache_nice_tries + NEED_ACTIVE_BALANCE_THRESHOLD);
@@ -8963,7 +9537,9 @@
.loop = 0,
.busiest_nr_running = 0,
.busiest_grp_capacity = 0,
+#ifdef CONFIG_SCHED_HMP
.boost_policy = sched_boost_policy(),
+#endif
};
/*
@@ -9419,7 +9995,9 @@
.busiest_grp_capacity = 0,
.flags = 0,
.loop = 0,
+#ifdef CONFIG_SCHED_HMP
.boost_policy = sched_boost_policy(),
+#endif
};
bool moved = false;
@@ -9536,12 +10114,33 @@
static inline int find_new_ilb(int type)
{
- int ilb = cpumask_first(nohz.idle_cpus_mask);
+ int ilb = nr_cpu_ids;
+ struct sched_domain *sd;
+ int cpu = raw_smp_processor_id();
+ struct rq *rq = cpu_rq(cpu);
+ cpumask_t cpumask;
#ifdef CONFIG_SCHED_HMP
return find_new_hmp_ilb(type);
#endif
+ rcu_read_lock();
+ sd = rcu_dereference_check_sched_domain(rq->sd);
+ if (sd) {
+ cpumask_and(&cpumask, nohz.idle_cpus_mask,
+ sched_domain_span(sd));
+ ilb = cpumask_first(&cpumask);
+ }
+ rcu_read_unlock();
+
+ if (sd && (ilb >= nr_cpu_ids || !idle_cpu(ilb))) {
+ if (!energy_aware() ||
+ (capacity_orig_of(cpu) ==
+ cpu_rq(cpu)->rd->max_cpu_capacity.val ||
+ cpu_overutilized(cpu)))
+ ilb = cpumask_first(nohz.idle_cpus_mask);
+ }
+
if (ilb < nr_cpu_ids && idle_cpu(ilb))
return ilb;
@@ -9892,9 +10491,8 @@
if (time_before(now, nohz.next_balance))
return false;
- if (rq->nr_running >= 2 &&
- (!energy_aware() || cpu_overutilized(cpu)))
- return true;
+ if (energy_aware())
+ return rq->nr_running >= 2 && cpu_overutilized(cpu);
#ifndef CONFIG_SCHED_HMP
rcu_read_lock();
@@ -10004,6 +10602,10 @@
{
struct cfs_rq *cfs_rq;
struct sched_entity *se = &curr->se;
+#ifdef CONFIG_SMP
+ bool old_misfit = curr->misfit;
+ bool misfit;
+#endif
for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
@@ -10019,7 +10621,13 @@
trace_sched_overutilized(true);
}
- rq->misfit_task = !task_fits_max(curr, rq->cpu);
+ misfit = !task_fits_max(curr, rq->cpu);
+ rq->misfit_task = misfit;
+
+ if (old_misfit != misfit) {
+ adjust_nr_big_tasks(&rq->hmp_stats, 1, misfit);
+ curr->misfit = misfit;
+ }
#endif
}
@@ -10481,7 +11089,7 @@
#ifdef CONFIG_FAIR_GROUP_SCHED
.task_change_group = task_change_group_fair,
#endif
-#ifdef CONFIG_SCHED_HMP
+#ifdef CONFIG_SCHED_WALT
.fixup_hmp_sched_stats = fixup_hmp_sched_stats_fair,
#endif
};
@@ -10531,6 +11139,154 @@
}
+/* WALT sched implementation begins here */
+
+#if defined(CONFIG_SCHED_WALT) && defined(CONFIG_CFS_BANDWIDTH)
+static inline struct task_group *next_task_group(struct task_group *tg)
+{
+ tg = list_entry_rcu(tg->list.next, typeof(struct task_group), list);
+
+ return (&tg->list == &task_groups) ? NULL : tg;
+}
+
+/* Iterate over all cfs_rq in a cpu */
+#define for_each_cfs_rq(cfs_rq, tg, cpu) \
+ for (tg = container_of(&task_groups, struct task_group, list); \
+ ((tg = next_task_group(tg)) && (cfs_rq = tg->cfs_rq[cpu]));)
+
+void reset_cfs_rq_hmp_stats(int cpu, int reset_cra)
+{
+ struct task_group *tg;
+ struct cfs_rq *cfs_rq;
+
+ rcu_read_lock();
+
+ for_each_cfs_rq(cfs_rq, tg, cpu)
+ reset_hmp_stats(&cfs_rq->hmp_stats, reset_cra);
+
+ rcu_read_unlock();
+}
+
+static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq);
+
+static void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
+ struct task_struct *p, int change_cra);
+static void dec_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
+ struct task_struct *p, int change_cra);
+
+/* Add task's contribution to a cpu' HMP statistics */
+void inc_hmp_sched_stats_fair(struct rq *rq,
+ struct task_struct *p, int change_cra)
+{
+ struct cfs_rq *cfs_rq;
+ struct sched_entity *se = &p->se;
+
+ /*
+ * Although below check is not strictly required (as
+ * inc/dec_nr_big_task and inc/dec_cumulative_runnable_avg called
+ * from inc_cfs_rq_hmp_stats() have similar checks), we gain a bit on
+ * efficiency by short-circuiting for_each_sched_entity() loop when
+ * sched_disable_window_stats
+ */
+ if (sched_disable_window_stats)
+ return;
+
+ for_each_sched_entity(se) {
+ cfs_rq = cfs_rq_of(se);
+ inc_cfs_rq_hmp_stats(cfs_rq, p, change_cra);
+ if (cfs_rq_throttled(cfs_rq))
+ break;
+ }
+
+ /* Update rq->hmp_stats only if we didn't find any throttled cfs_rq */
+ if (!se)
+ inc_rq_hmp_stats(rq, p, change_cra);
+}
+
+static void fixup_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p,
+ u32 new_task_load, u32 new_pred_demand)
+{
+ struct cfs_rq *cfs_rq;
+ struct sched_entity *se = &p->se;
+ s64 task_load_delta = (s64)new_task_load - task_load(p);
+ s64 pred_demand_delta = PRED_DEMAND_DELTA;
+
+ for_each_sched_entity(se) {
+ cfs_rq = cfs_rq_of(se);
+
+ fixup_cumulative_runnable_avg(&cfs_rq->hmp_stats, p,
+ task_load_delta,
+ pred_demand_delta);
+ fixup_nr_big_tasks(&cfs_rq->hmp_stats, p, task_load_delta);
+ if (cfs_rq_throttled(cfs_rq))
+ break;
+ }
+
+ /* Fix up rq->hmp_stats only if we didn't find any throttled cfs_rq */
+ if (!se) {
+ fixup_cumulative_runnable_avg(&rq->hmp_stats, p,
+ task_load_delta,
+ pred_demand_delta);
+ fixup_nr_big_tasks(&rq->hmp_stats, p, task_load_delta);
+ }
+}
+
+#elif defined(CONFIG_SCHED_WALT)
+
+inline void reset_cfs_rq_hmp_stats(int cpu, int reset_cra) { }
+
+static void
+fixup_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p,
+ u32 new_task_load, u32 new_pred_demand)
+{
+ s64 task_load_delta = (s64)new_task_load - task_load(p);
+ s64 pred_demand_delta = PRED_DEMAND_DELTA;
+
+ fixup_cumulative_runnable_avg(&rq->hmp_stats, p, task_load_delta,
+ pred_demand_delta);
+ fixup_nr_big_tasks(&rq->hmp_stats, p, task_load_delta);
+}
+
+static inline int task_will_be_throttled(struct task_struct *p)
+{
+ return 0;
+}
+
+void inc_hmp_sched_stats_fair(struct rq *rq,
+ struct task_struct *p, int change_cra)
+{
+ inc_nr_big_task(&rq->hmp_stats, p);
+}
+
+static inline int
+kick_active_balance(struct rq *rq, struct task_struct *p, int new_cpu)
+{
+ unsigned long flags;
+ int rc = 0;
+
+ /* Invoke active balance to force migrate currently running task */
+ raw_spin_lock_irqsave(&rq->lock, flags);
+ if (!rq->active_balance) {
+ rq->active_balance = 1;
+ rq->push_cpu = new_cpu;
+ get_task_struct(p);
+ rq->push_task = p;
+ rc = 1;
+ }
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
+
+ return rc;
+}
+
+#else
+
+static inline int task_will_be_throttled(struct task_struct *p)
+{
+ return 0;
+}
+
+#endif
+
/* QHMP/Zone sched implementation begins here */
#ifdef CONFIG_SCHED_HMP
@@ -10584,21 +11340,6 @@
s64 highest_spare_capacity;
};
-/*
- * Should task be woken to any available idle cpu?
- *
- * Waking tasks to idle cpu has mixed implications on both performance and
- * power. In many cases, scheduler can't estimate correctly impact of using idle
- * cpus on either performance or power. PF_WAKE_UP_IDLE allows external kernel
- * module to pass a strong hint to scheduler that the task in question should be
- * woken to idle cpu, generally to improve performance.
- */
-static inline int wake_to_idle(struct task_struct *p)
-{
- return (current->flags & PF_WAKE_UP_IDLE) ||
- (p->flags & PF_WAKE_UP_IDLE);
-}
-
static int spill_threshold_crossed(struct cpu_select_env *env, struct rq *rq)
{
u64 total_load;
@@ -11222,128 +11963,6 @@
return target;
}
-#ifdef CONFIG_CFS_BANDWIDTH
-
-static inline struct task_group *next_task_group(struct task_group *tg)
-{
- tg = list_entry_rcu(tg->list.next, typeof(struct task_group), list);
-
- return (&tg->list == &task_groups) ? NULL : tg;
-}
-
-/* Iterate over all cfs_rq in a cpu */
-#define for_each_cfs_rq(cfs_rq, tg, cpu) \
- for (tg = container_of(&task_groups, struct task_group, list); \
- ((tg = next_task_group(tg)) && (cfs_rq = tg->cfs_rq[cpu]));)
-
-void reset_cfs_rq_hmp_stats(int cpu, int reset_cra)
-{
- struct task_group *tg;
- struct cfs_rq *cfs_rq;
-
- rcu_read_lock();
-
- for_each_cfs_rq(cfs_rq, tg, cpu)
- reset_hmp_stats(&cfs_rq->hmp_stats, reset_cra);
-
- rcu_read_unlock();
-}
-
-static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq);
-
-static void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
- struct task_struct *p, int change_cra);
-static void dec_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
- struct task_struct *p, int change_cra);
-
-/* Add task's contribution to a cpu' HMP statistics */
-void inc_hmp_sched_stats_fair(struct rq *rq,
- struct task_struct *p, int change_cra)
-{
- struct cfs_rq *cfs_rq;
- struct sched_entity *se = &p->se;
-
- /*
- * Although below check is not strictly required (as
- * inc/dec_nr_big_task and inc/dec_cumulative_runnable_avg called
- * from inc_cfs_rq_hmp_stats() have similar checks), we gain a bit on
- * efficiency by short-circuiting for_each_sched_entity() loop when
- * sched_disable_window_stats
- */
- if (sched_disable_window_stats)
- return;
-
- for_each_sched_entity(se) {
- cfs_rq = cfs_rq_of(se);
- inc_cfs_rq_hmp_stats(cfs_rq, p, change_cra);
- if (cfs_rq_throttled(cfs_rq))
- break;
- }
-
- /* Update rq->hmp_stats only if we didn't find any throttled cfs_rq */
- if (!se)
- inc_rq_hmp_stats(rq, p, change_cra);
-}
-
-static void fixup_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p,
- u32 new_task_load, u32 new_pred_demand)
-{
- struct cfs_rq *cfs_rq;
- struct sched_entity *se = &p->se;
- s64 task_load_delta = (s64)new_task_load - task_load(p);
- s64 pred_demand_delta = PRED_DEMAND_DELTA;
-
- for_each_sched_entity(se) {
- cfs_rq = cfs_rq_of(se);
-
- fixup_cumulative_runnable_avg(&cfs_rq->hmp_stats, p,
- task_load_delta,
- pred_demand_delta);
- fixup_nr_big_tasks(&cfs_rq->hmp_stats, p, task_load_delta);
- if (cfs_rq_throttled(cfs_rq))
- break;
- }
-
- /* Fix up rq->hmp_stats only if we didn't find any throttled cfs_rq */
- if (!se) {
- fixup_cumulative_runnable_avg(&rq->hmp_stats, p,
- task_load_delta,
- pred_demand_delta);
- fixup_nr_big_tasks(&rq->hmp_stats, p, task_load_delta);
- }
-}
-
-static int task_will_be_throttled(struct task_struct *p);
-
-#else /* CONFIG_CFS_BANDWIDTH */
-
-inline void reset_cfs_rq_hmp_stats(int cpu, int reset_cra) { }
-
-static void
-fixup_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p,
- u32 new_task_load, u32 new_pred_demand)
-{
- s64 task_load_delta = (s64)new_task_load - task_load(p);
- s64 pred_demand_delta = PRED_DEMAND_DELTA;
-
- fixup_cumulative_runnable_avg(&rq->hmp_stats, p, task_load_delta,
- pred_demand_delta);
- fixup_nr_big_tasks(&rq->hmp_stats, p, task_load_delta);
-}
-
-static inline int task_will_be_throttled(struct task_struct *p)
-{
- return 0;
-}
-
-void inc_hmp_sched_stats_fair(struct rq *rq,
- struct task_struct *p, int change_cra)
-{
- inc_nr_big_task(&rq->hmp_stats, p);
-}
-
-#endif /* CONFIG_CFS_BANDWIDTH */
-
/*
* Reset balance_interval at all sched_domain levels of given cpu, so that it
* honors kick.
@@ -11411,26 +12030,6 @@
return 0;
}
-static inline int
-kick_active_balance(struct rq *rq, struct task_struct *p, int new_cpu)
-{
- unsigned long flags;
- int rc = 0;
-
- /* Invoke active balance to force migrate currently running task */
- raw_spin_lock_irqsave(&rq->lock, flags);
- if (!rq->active_balance) {
- rq->active_balance = 1;
- rq->push_cpu = new_cpu;
- get_task_struct(p);
- rq->push_task = p;
- rc = 1;
- }
- raw_spin_unlock_irqrestore(&rq->lock, flags);
-
- return rc;
-}
-
static DEFINE_RAW_SPINLOCK(migration_lock);
/*
@@ -11706,4 +12305,30 @@
}
#endif /* CONFIG_CFS_BANDWIDTH */
+#elif defined(CONFIG_SCHED_WALT)
+
+void check_for_migration(struct rq *rq, struct task_struct *p)
+{
+ int new_cpu;
+ int active_balance;
+ int cpu = task_cpu(p);
+
+ if (rq->misfit_task) {
+ if (rq->curr->state != TASK_RUNNING ||
+ rq->curr->nr_cpus_allowed == 1)
+ return;
+
+ new_cpu = energy_aware_wake_cpu(p, cpu, 0);
+ if (new_cpu != cpu) {
+ active_balance = kick_active_balance(rq, p, new_cpu);
+ if (active_balance) {
+ mark_reserved(new_cpu);
+ stop_one_cpu_nowait(cpu,
+ active_load_balance_cpu_stop, rq,
+ &rq->active_balance_work);
+ }
+ }
+ }
+}
+
#endif /* CONFIG_SCHED_HMP */
diff --git a/kernel/sched/hmp.c b/kernel/sched/hmp.c
index 4de373f..6c28298 100644
--- a/kernel/sched/hmp.c
+++ b/kernel/sched/hmp.c
@@ -19,53 +19,12 @@
#include <linux/syscore_ops.h>
#include "sched.h"
+#include "walt.h"
#include <trace/events/sched.h>
#define CSTATE_LATENCY_GRANULARITY_SHIFT (6)
-const char *task_event_names[] = {"PUT_PREV_TASK", "PICK_NEXT_TASK",
- "TASK_WAKE", "TASK_MIGRATE", "TASK_UPDATE", "IRQ_UPDATE"};
-
-const char *migrate_type_names[] = {"GROUP_TO_RQ", "RQ_TO_GROUP"};
-
-static ktime_t ktime_last;
-static bool sched_ktime_suspended;
-
-static bool use_cycle_counter;
-static struct cpu_cycle_counter_cb cpu_cycle_counter_cb;
-
-u64 sched_ktime_clock(void)
-{
- if (unlikely(sched_ktime_suspended))
- return ktime_to_ns(ktime_last);
- return ktime_get_ns();
-}
-
-static void sched_resume(void)
-{
- sched_ktime_suspended = false;
-}
-
-static int sched_suspend(void)
-{
- ktime_last = ktime_get();
- sched_ktime_suspended = true;
- return 0;
-}
-
-static struct syscore_ops sched_syscore_ops = {
- .resume = sched_resume,
- .suspend = sched_suspend
-};
-
-static int __init sched_init_ops(void)
-{
- register_syscore_ops(&sched_syscore_ops);
- return 0;
-}
-late_initcall(sched_init_ops);
-
inline void clear_ed_task(struct task_struct *p, struct rq *rq)
{
if (p == rq->ed_task)
@@ -222,428 +181,11 @@
return ret;
}
-unsigned int max_possible_efficiency = 1;
-unsigned int min_possible_efficiency = UINT_MAX;
-
unsigned long __weak arch_get_cpu_efficiency(int cpu)
{
return SCHED_CAPACITY_SCALE;
}
-/* Keep track of max/min capacity possible across CPUs "currently" */
-static void __update_min_max_capacity(void)
-{
- int i;
- int max_cap = 0, min_cap = INT_MAX;
-
- for_each_online_cpu(i) {
- max_cap = max(max_cap, cpu_capacity(i));
- min_cap = min(min_cap, cpu_capacity(i));
- }
-
- max_capacity = max_cap;
- min_capacity = min_cap;
-}
-
-static void update_min_max_capacity(void)
-{
- unsigned long flags;
- int i;
-
- local_irq_save(flags);
- for_each_possible_cpu(i)
- raw_spin_lock(&cpu_rq(i)->lock);
-
- __update_min_max_capacity();
-
- for_each_possible_cpu(i)
- raw_spin_unlock(&cpu_rq(i)->lock);
- local_irq_restore(flags);
-}
-
-/*
- * Return 'capacity' of a cpu in reference to "least" efficient cpu, such that
- * least efficient cpu gets capacity of 1024
- */
-static unsigned long
-capacity_scale_cpu_efficiency(struct sched_cluster *cluster)
-{
- return (1024 * cluster->efficiency) / min_possible_efficiency;
-}
-
-/*
- * Return 'capacity' of a cpu in reference to cpu with lowest max_freq
- * (min_max_freq), such that one with lowest max_freq gets capacity of 1024.
- */
-static unsigned long capacity_scale_cpu_freq(struct sched_cluster *cluster)
-{
- return (1024 * cluster_max_freq(cluster)) / min_max_freq;
-}
-
-/*
- * Return load_scale_factor of a cpu in reference to "most" efficient cpu, so
- * that "most" efficient cpu gets a load_scale_factor of 1
- */
-static inline unsigned long
-load_scale_cpu_efficiency(struct sched_cluster *cluster)
-{
- return DIV_ROUND_UP(1024 * max_possible_efficiency,
- cluster->efficiency);
-}
-
-/*
- * Return load_scale_factor of a cpu in reference to cpu with best max_freq
- * (max_possible_freq), so that one with best max_freq gets a load_scale_factor
- * of 1.
- */
-static inline unsigned long load_scale_cpu_freq(struct sched_cluster *cluster)
-{
- return DIV_ROUND_UP(1024 * max_possible_freq,
- cluster_max_freq(cluster));
-}
-
-static int compute_capacity(struct sched_cluster *cluster)
-{
- int capacity = 1024;
-
- capacity *= capacity_scale_cpu_efficiency(cluster);
- capacity >>= 10;
-
- capacity *= capacity_scale_cpu_freq(cluster);
- capacity >>= 10;
-
- return capacity;
-}
-
-static int compute_max_possible_capacity(struct sched_cluster *cluster)
-{
- int capacity = 1024;
-
- capacity *= capacity_scale_cpu_efficiency(cluster);
- capacity >>= 10;
-
- capacity *= (1024 * cluster->max_possible_freq) / min_max_freq;
- capacity >>= 10;
-
- return capacity;
-}
-
-static int compute_load_scale_factor(struct sched_cluster *cluster)
-{
- int load_scale = 1024;
-
- /*
- * load_scale_factor accounts for the fact that task load
- * is in reference to "best" performing cpu. Task's load will need to be
- * scaled (up) by a factor to determine suitability to be placed on a
- * (little) cpu.
- */
- load_scale *= load_scale_cpu_efficiency(cluster);
- load_scale >>= 10;
-
- load_scale *= load_scale_cpu_freq(cluster);
- load_scale >>= 10;
-
- return load_scale;
-}
-
-struct list_head cluster_head;
-static DEFINE_MUTEX(cluster_lock);
-static cpumask_t all_cluster_cpus = CPU_MASK_NONE;
-DECLARE_BITMAP(all_cluster_ids, NR_CPUS);
-struct sched_cluster *sched_cluster[NR_CPUS];
-int num_clusters;
-
-unsigned int max_power_cost = 1;
-
-struct sched_cluster init_cluster = {
- .list = LIST_HEAD_INIT(init_cluster.list),
- .id = 0,
- .max_power_cost = 1,
- .min_power_cost = 1,
- .capacity = 1024,
- .max_possible_capacity = 1024,
- .efficiency = 1,
- .load_scale_factor = 1024,
- .cur_freq = 1,
- .max_freq = 1,
- .max_mitigated_freq = UINT_MAX,
- .min_freq = 1,
- .max_possible_freq = 1,
- .dstate = 0,
- .dstate_wakeup_energy = 0,
- .dstate_wakeup_latency = 0,
- .exec_scale_factor = 1024,
- .notifier_sent = 0,
- .wake_up_idle = 0,
-};
-
-static void update_all_clusters_stats(void)
-{
- struct sched_cluster *cluster;
- u64 highest_mpc = 0, lowest_mpc = U64_MAX;
-
- pre_big_task_count_change(cpu_possible_mask);
-
- for_each_sched_cluster(cluster) {
- u64 mpc;
-
- cluster->capacity = compute_capacity(cluster);
- mpc = cluster->max_possible_capacity =
- compute_max_possible_capacity(cluster);
- cluster->load_scale_factor = compute_load_scale_factor(cluster);
-
- cluster->exec_scale_factor =
- DIV_ROUND_UP(cluster->efficiency * 1024,
- max_possible_efficiency);
-
- if (mpc > highest_mpc)
- highest_mpc = mpc;
-
- if (mpc < lowest_mpc)
- lowest_mpc = mpc;
- }
-
- max_possible_capacity = highest_mpc;
- min_max_possible_capacity = lowest_mpc;
-
- __update_min_max_capacity();
- sched_update_freq_max_load(cpu_possible_mask);
- post_big_task_count_change(cpu_possible_mask);
-}
-
-static void assign_cluster_ids(struct list_head *head)
-{
- struct sched_cluster *cluster;
- int pos = 0;
-
- list_for_each_entry(cluster, head, list) {
- cluster->id = pos;
- sched_cluster[pos++] = cluster;
- }
-}
-
-static void
-move_list(struct list_head *dst, struct list_head *src, bool sync_rcu)
-{
- struct list_head *first, *last;
-
- first = src->next;
- last = src->prev;
-
- if (sync_rcu) {
- INIT_LIST_HEAD_RCU(src);
- synchronize_rcu();
- }
-
- first->prev = dst;
- dst->prev = last;
- last->next = dst;
-
- /* Ensure list sanity before making the head visible to all CPUs. */
- smp_mb();
- dst->next = first;
-}
-
-static int
-compare_clusters(void *priv, struct list_head *a, struct list_head *b)
-{
- struct sched_cluster *cluster1, *cluster2;
- int ret;
-
- cluster1 = container_of(a, struct sched_cluster, list);
- cluster2 = container_of(b, struct sched_cluster, list);
-
- /*
- * Don't assume higher capacity means higher power. If the
- * power cost is same, sort the higher capacity cluster before
- * the lower capacity cluster to start placing the tasks
- * on the higher capacity cluster.
- */
- ret = cluster1->max_power_cost > cluster2->max_power_cost ||
- (cluster1->max_power_cost == cluster2->max_power_cost &&
- cluster1->max_possible_capacity <
- cluster2->max_possible_capacity);
-
- return ret;
-}
-
-static void sort_clusters(void)
-{
- struct sched_cluster *cluster;
- struct list_head new_head;
- unsigned int tmp_max = 1;
-
- INIT_LIST_HEAD(&new_head);
-
- for_each_sched_cluster(cluster) {
- cluster->max_power_cost = power_cost(cluster_first_cpu(cluster),
- max_task_load());
- cluster->min_power_cost = power_cost(cluster_first_cpu(cluster),
- 0);
-
- if (cluster->max_power_cost > tmp_max)
- tmp_max = cluster->max_power_cost;
- }
- max_power_cost = tmp_max;
-
- move_list(&new_head, &cluster_head, true);
-
- list_sort(NULL, &new_head, compare_clusters);
- assign_cluster_ids(&new_head);
-
- /*
- * Ensure cluster ids are visible to all CPUs before making
- * cluster_head visible.
- */
- move_list(&cluster_head, &new_head, false);
-}
-
-static void
-insert_cluster(struct sched_cluster *cluster, struct list_head *head)
-{
- struct sched_cluster *tmp;
- struct list_head *iter = head;
-
- list_for_each_entry(tmp, head, list) {
- if (cluster->max_power_cost < tmp->max_power_cost)
- break;
- iter = &tmp->list;
- }
-
- list_add(&cluster->list, iter);
-}
-
-static struct sched_cluster *alloc_new_cluster(const struct cpumask *cpus)
-{
- struct sched_cluster *cluster = NULL;
-
- cluster = kzalloc(sizeof(struct sched_cluster), GFP_ATOMIC);
- if (!cluster) {
- __WARN_printf("Cluster allocation failed. \
- Possible bad scheduling\n");
- return NULL;
- }
-
- INIT_LIST_HEAD(&cluster->list);
- cluster->max_power_cost = 1;
- cluster->min_power_cost = 1;
- cluster->capacity = 1024;
- cluster->max_possible_capacity = 1024;
- cluster->efficiency = 1;
- cluster->load_scale_factor = 1024;
- cluster->cur_freq = 1;
- cluster->max_freq = 1;
- cluster->max_mitigated_freq = UINT_MAX;
- cluster->min_freq = 1;
- cluster->max_possible_freq = 1;
- cluster->dstate = 0;
- cluster->dstate_wakeup_energy = 0;
- cluster->dstate_wakeup_latency = 0;
- cluster->freq_init_done = false;
-
- raw_spin_lock_init(&cluster->load_lock);
- cluster->cpus = *cpus;
- cluster->efficiency = arch_get_cpu_efficiency(cpumask_first(cpus));
-
- if (cluster->efficiency > max_possible_efficiency)
- max_possible_efficiency = cluster->efficiency;
- if (cluster->efficiency < min_possible_efficiency)
- min_possible_efficiency = cluster->efficiency;
-
- cluster->notifier_sent = 0;
- return cluster;
-}
-
-static void add_cluster(const struct cpumask *cpus, struct list_head *head)
-{
- struct sched_cluster *cluster = alloc_new_cluster(cpus);
- int i;
-
- if (!cluster)
- return;
-
- for_each_cpu(i, cpus)
- cpu_rq(i)->cluster = cluster;
-
- insert_cluster(cluster, head);
- set_bit(num_clusters, all_cluster_ids);
- num_clusters++;
-}
-
-void update_cluster_topology(void)
-{
- struct cpumask cpus = *cpu_possible_mask;
- const struct cpumask *cluster_cpus;
- struct list_head new_head;
- int i;
-
- INIT_LIST_HEAD(&new_head);
-
- for_each_cpu(i, &cpus) {
- cluster_cpus = cpu_coregroup_mask(i);
- cpumask_or(&all_cluster_cpus, &all_cluster_cpus, cluster_cpus);
- cpumask_andnot(&cpus, &cpus, cluster_cpus);
- add_cluster(cluster_cpus, &new_head);
- }
-
- assign_cluster_ids(&new_head);
-
- /*
- * Ensure cluster ids are visible to all CPUs before making
- * cluster_head visible.
- */
- move_list(&cluster_head, &new_head, false);
- update_all_clusters_stats();
-}
-
-void init_clusters(void)
-{
- bitmap_clear(all_cluster_ids, 0, NR_CPUS);
- init_cluster.cpus = *cpu_possible_mask;
- raw_spin_lock_init(&init_cluster.load_lock);
- INIT_LIST_HEAD(&cluster_head);
-}
-
-int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb)
-{
- mutex_lock(&cluster_lock);
- if (!cb->get_cpu_cycle_counter) {
- mutex_unlock(&cluster_lock);
- return -EINVAL;
- }
-
- cpu_cycle_counter_cb = *cb;
- use_cycle_counter = true;
- mutex_unlock(&cluster_lock);
-
- return 0;
-}
-
-/* Clear any HMP scheduler related requests pending from or on cpu */
-void clear_hmp_request(int cpu)
-{
- struct rq *rq = cpu_rq(cpu);
- unsigned long flags;
-
- clear_boost_kick(cpu);
- clear_reserved(cpu);
- if (rq->push_task) {
- struct task_struct *push_task = NULL;
-
- raw_spin_lock_irqsave(&rq->lock, flags);
- if (rq->push_task) {
- clear_reserved(rq->push_cpu);
- push_task = rq->push_task;
- rq->push_task = NULL;
- }
- rq->active_balance = 0;
- raw_spin_unlock_irqrestore(&rq->lock, flags);
- if (push_task)
- put_task_struct(push_task);
- }
-}
-
int sched_set_static_cpu_pwr_cost(int cpu, unsigned int cost)
{
struct rq *rq = cpu_rq(cpu);
@@ -684,49 +226,12 @@
}
/*
- * sched_window_stats_policy and sched_ravg_hist_size have a 'sysctl' copy
- * associated with them. This is required for atomic update of those variables
- * when being modifed via sysctl interface.
- *
- * IMPORTANT: Initialize both copies to same value!!
- */
-
-/*
* Tasks that are runnable continuously for a period greather than
* EARLY_DETECTION_DURATION can be flagged early as potential
* high load tasks.
*/
#define EARLY_DETECTION_DURATION 9500000
-static __read_mostly unsigned int sched_ravg_hist_size = 5;
-__read_mostly unsigned int sysctl_sched_ravg_hist_size = 5;
-
-static __read_mostly unsigned int sched_window_stats_policy =
- WINDOW_STATS_MAX_RECENT_AVG;
-__read_mostly unsigned int sysctl_sched_window_stats_policy =
- WINDOW_STATS_MAX_RECENT_AVG;
-
-#define SCHED_ACCOUNT_WAIT_TIME 1
-
-__read_mostly unsigned int sysctl_sched_cpu_high_irqload = (10 * NSEC_PER_MSEC);
-
-/*
- * Enable colocation and frequency aggregation for all threads in a process.
- * The children inherits the group id from the parent.
- */
-unsigned int __read_mostly sysctl_sched_enable_thread_grouping;
-
-
-#define SCHED_NEW_TASK_WINDOWS 5
-
-#define SCHED_FREQ_ACCOUNT_WAIT_TIME 0
-
-/*
- * This governs what load needs to be used when reporting CPU busy time
- * to the cpufreq governor.
- */
-__read_mostly unsigned int sysctl_sched_freq_reporting_policy;
-
/*
* For increase, send notification if
* freq_required - cur_freq > sysctl_sched_freq_inc_notify
@@ -738,129 +243,20 @@
* cur_freq - freq_required > sysctl_sched_freq_dec_notify
*/
__read_mostly int sysctl_sched_freq_dec_notify = 10 * 1024 * 1024; /* - 10GHz */
-
-static __read_mostly unsigned int sched_io_is_busy;
-
__read_mostly unsigned int sysctl_sched_pred_alert_freq = 10 * 1024 * 1024;
-/*
- * Maximum possible frequency across all cpus. Task demand and cpu
- * capacity (cpu_power) metrics are scaled in reference to it.
- */
-unsigned int max_possible_freq = 1;
-
-/*
- * Minimum possible max_freq across all cpus. This will be same as
- * max_possible_freq on homogeneous systems and could be different from
- * max_possible_freq on heterogenous systems. min_max_freq is used to derive
- * capacity (cpu_power) of cpus.
- */
-unsigned int min_max_freq = 1;
-
-unsigned int max_capacity = 1024; /* max(rq->capacity) */
-unsigned int min_capacity = 1024; /* min(rq->capacity) */
-unsigned int max_possible_capacity = 1024; /* max(rq->max_possible_capacity) */
-unsigned int
-min_max_possible_capacity = 1024; /* min(rq->max_possible_capacity) */
-
-/* Min window size (in ns) = 10ms */
-#define MIN_SCHED_RAVG_WINDOW 10000000
-
-/* Max window size (in ns) = 1s */
-#define MAX_SCHED_RAVG_WINDOW 1000000000
-
-/* Window size (in ns) */
-__read_mostly unsigned int sched_ravg_window = MIN_SCHED_RAVG_WINDOW;
-
/* Maximum allowed threshold before freq aggregation must be enabled */
#define MAX_FREQ_AGGR_THRESH 1000
-/* Temporarily disable window-stats activity on all cpus */
-unsigned int __read_mostly sched_disable_window_stats;
-
-struct related_thread_group *related_thread_groups[MAX_NUM_CGROUP_COLOC_ID];
-static LIST_HEAD(active_related_thread_groups);
-static DEFINE_RWLOCK(related_thread_group_lock);
-
#define for_each_related_thread_group(grp) \
list_for_each_entry(grp, &active_related_thread_groups, list)
-/*
- * Task load is categorized into buckets for the purpose of top task tracking.
- * The entire range of load from 0 to sched_ravg_window needs to be covered
- * in NUM_LOAD_INDICES number of buckets. Therefore the size of each bucket
- * is given by sched_ravg_window / NUM_LOAD_INDICES. Since the default value
- * of sched_ravg_window is MIN_SCHED_RAVG_WINDOW, use that to compute
- * sched_load_granule.
- */
-__read_mostly unsigned int sched_load_granule =
- MIN_SCHED_RAVG_WINDOW / NUM_LOAD_INDICES;
-
/* Size of bitmaps maintained to track top tasks */
static const unsigned int top_tasks_bitmap_size =
BITS_TO_LONGS(NUM_LOAD_INDICES + 1) * sizeof(unsigned long);
-/*
- * Demand aggregation for frequency purpose:
- *
- * 'sched_freq_aggregate' controls aggregation of cpu demand of related threads
- * for frequency determination purpose. This aggregation is done per-cluster.
- *
- * CPU demand of tasks from various related groups is aggregated per-cluster and
- * added to the "max_busy_cpu" in that cluster, where max_busy_cpu is determined
- * by just rq->prev_runnable_sum.
- *
- * Some examples follow, which assume:
- * Cluster0 = CPU0-3, Cluster1 = CPU4-7
- * One related thread group A that has tasks A0, A1, A2
- *
- * A->cpu_time[X].curr/prev_sum = counters in which cpu execution stats of
- * tasks belonging to group A are accumulated when they run on cpu X.
- *
- * CX->curr/prev_sum = counters in which cpu execution stats of all tasks
- * not belonging to group A are accumulated when they run on cpu X
- *
- * Lets say the stats for window M was as below:
- *
- * C0->prev_sum = 1ms, A->cpu_time[0].prev_sum = 5ms
- * Task A0 ran 5ms on CPU0
- * Task B0 ran 1ms on CPU0
- *
- * C1->prev_sum = 5ms, A->cpu_time[1].prev_sum = 6ms
- * Task A1 ran 4ms on CPU1
- * Task A2 ran 2ms on CPU1
- * Task B1 ran 5ms on CPU1
- *
- * C2->prev_sum = 0ms, A->cpu_time[2].prev_sum = 0
- * CPU2 idle
- *
- * C3->prev_sum = 0ms, A->cpu_time[3].prev_sum = 0
- * CPU3 idle
- *
- * In this case, CPU1 was most busy going by just its prev_sum counter. Demand
- * from all group A tasks are added to CPU1. IOW, at end of window M, cpu busy
- * time reported to governor will be:
- *
- *
- * C0 busy time = 1ms
- * C1 busy time = 5 + 5 + 6 = 16ms
- *
- */
-static __read_mostly unsigned int sched_freq_aggregate = 1;
__read_mostly unsigned int sysctl_sched_freq_aggregate = 1;
-unsigned int __read_mostly sysctl_sched_freq_aggregate_threshold_pct;
-static unsigned int __read_mostly sched_freq_aggregate_threshold;
-
-/* Initial task load. Newly created tasks are assigned this load. */
-unsigned int __read_mostly sched_init_task_load_windows;
-unsigned int __read_mostly sysctl_sched_init_task_load_pct = 15;
-
-unsigned int max_task_load(void)
-{
- return sched_ravg_window;
-}
-
/* A cpu can no longer accommodate more tasks if:
*
* rq->nr_running > sysctl_sched_spill_nr_run ||
@@ -912,21 +308,6 @@
unsigned int __read_mostly sysctl_sched_downmigrate_pct = 60;
/*
- * Task groups whose aggregate demand on a cpu is more than
- * sched_group_upmigrate need to be up-migrated if possible.
- */
-unsigned int __read_mostly sched_group_upmigrate;
-unsigned int __read_mostly sysctl_sched_group_upmigrate_pct = 100;
-
-/*
- * Task groups, once up-migrated, will need to drop their aggregate
- * demand to less than sched_group_downmigrate before they are "down"
- * migrated.
- */
-unsigned int __read_mostly sched_group_downmigrate;
-unsigned int __read_mostly sysctl_sched_group_downmigrate_pct = 95;
-
-/*
* The load scale factor of a CPU gets boosted when its max frequency
* is restricted due to which the tasks are migrating to higher capacity
* CPUs early. The sched_upmigrate threshold is auto-upgraded by
@@ -1027,21 +408,6 @@
pct_to_real(sysctl_sched_freq_aggregate_threshold_pct);
}
-u32 sched_get_init_task_load(struct task_struct *p)
-{
- return p->init_load_pct;
-}
-
-int sched_set_init_task_load(struct task_struct *p, int init_load_pct)
-{
- if (init_load_pct < 0 || init_load_pct > 100)
- return -EINVAL;
-
- p->init_load_pct = init_load_pct;
-
- return 0;
-}
-
#ifdef CONFIG_CGROUP_SCHED
int upmigrate_discouraged(struct task_struct *p)
@@ -1129,37 +495,6 @@
return task_load_will_fit(p, tload, cpu, sched_boost_policy());
}
-static int
-group_will_fit(struct sched_cluster *cluster, struct related_thread_group *grp,
- u64 demand, bool group_boost)
-{
- int cpu = cluster_first_cpu(cluster);
- int prev_capacity = 0;
- unsigned int threshold = sched_group_upmigrate;
- u64 load;
-
- if (cluster->capacity == max_capacity)
- return 1;
-
- if (group_boost)
- return 0;
-
- if (!demand)
- return 1;
-
- if (grp->preferred_cluster)
- prev_capacity = grp->preferred_cluster->capacity;
-
- if (cluster->capacity < prev_capacity)
- threshold = sched_group_downmigrate;
-
- load = scale_load_to_cpu(demand, cpu);
- if (load < threshold)
- return 1;
-
- return 0;
-}
-
/*
* Return the cost of running task p on CPU cpu. This function
* currently assumes that task p is the only task which will run on
@@ -1232,64 +567,6 @@
}
-void inc_nr_big_task(struct hmp_sched_stats *stats, struct task_struct *p)
-{
- if (sched_disable_window_stats)
- return;
-
- if (is_big_task(p))
- stats->nr_big_tasks++;
-}
-
-void dec_nr_big_task(struct hmp_sched_stats *stats, struct task_struct *p)
-{
- if (sched_disable_window_stats)
- return;
-
- if (is_big_task(p))
- stats->nr_big_tasks--;
-
- BUG_ON(stats->nr_big_tasks < 0);
-}
-
-void inc_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra)
-{
- inc_nr_big_task(&rq->hmp_stats, p);
- if (change_cra)
- inc_cumulative_runnable_avg(&rq->hmp_stats, p);
-}
-
-void dec_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra)
-{
- dec_nr_big_task(&rq->hmp_stats, p);
- if (change_cra)
- dec_cumulative_runnable_avg(&rq->hmp_stats, p);
-}
-
-void reset_hmp_stats(struct hmp_sched_stats *stats, int reset_cra)
-{
- stats->nr_big_tasks = 0;
- if (reset_cra) {
- stats->cumulative_runnable_avg = 0;
- stats->pred_demands_sum = 0;
- }
-}
-
-int preferred_cluster(struct sched_cluster *cluster, struct task_struct *p)
-{
- struct related_thread_group *grp;
- int rc = 1;
-
- rcu_read_lock();
-
- grp = task_related_thread_group(p);
- if (grp)
- rc = (grp->preferred_cluster == cluster);
-
- rcu_read_unlock();
- return rc;
-}
-
struct sched_cluster *rq_cluster(struct rq *rq)
{
return rq->cluster;
@@ -1370,25 +647,6 @@
local_irq_enable();
}
-DEFINE_MUTEX(policy_mutex);
-
-unsigned int update_freq_aggregate_threshold(unsigned int threshold)
-{
- unsigned int old_threshold;
-
- mutex_lock(&policy_mutex);
-
- old_threshold = sysctl_sched_freq_aggregate_threshold_pct;
-
- sysctl_sched_freq_aggregate_threshold_pct = threshold;
- sched_freq_aggregate_threshold =
- pct_to_real(sysctl_sched_freq_aggregate_threshold_pct);
-
- mutex_unlock(&policy_mutex);
-
- return old_threshold;
-}
-
static inline int invalid_value_freq_input(unsigned int *data)
{
if (data == &sysctl_sched_freq_aggregate)
@@ -1539,46 +797,6 @@
p->ravg.prev_window_cpu = NULL;
}
-void init_new_task_load(struct task_struct *p, bool idle_task)
-{
- int i;
- u32 init_load_windows = sched_init_task_load_windows;
- u32 init_load_pct = current->init_load_pct;
-
- p->init_load_pct = 0;
- rcu_assign_pointer(p->grp, NULL);
- INIT_LIST_HEAD(&p->grp_list);
- memset(&p->ravg, 0, sizeof(struct ravg));
- p->cpu_cycles = 0;
- p->ravg.curr_burst = 0;
- /*
- * Initialize the avg_burst to twice the threshold, so that
- * a task would not be classified as short burst right away
- * after fork. It takes at least 6 sleep-wakeup cycles for
- * the avg_burst to go below the threshold.
- */
- p->ravg.avg_burst = 2 * (u64)sysctl_sched_short_burst;
- p->ravg.avg_sleep_time = 0;
-
- p->ravg.curr_window_cpu = kcalloc(nr_cpu_ids, sizeof(u32), GFP_KERNEL);
- p->ravg.prev_window_cpu = kcalloc(nr_cpu_ids, sizeof(u32), GFP_KERNEL);
-
- /* Don't have much choice. CPU frequency would be bogus */
- BUG_ON(!p->ravg.curr_window_cpu || !p->ravg.prev_window_cpu);
-
- if (idle_task)
- return;
-
- if (init_load_pct)
- init_load_windows = div64_u64((u64)init_load_pct *
- (u64)sched_ravg_window, 100);
-
- p->ravg.demand = init_load_windows;
- p->ravg.pred_demand = 0;
- for (i = 0; i < RAVG_HIST_SIZE_MAX; ++i)
- p->ravg.sum_history[i] = init_load_windows;
-}
-
/* Return task demand in percentage scale */
unsigned int pct_task_load(struct task_struct *p)
{
@@ -1589,29 +807,6 @@
return load;
}
-/*
- * Return total number of tasks "eligible" to run on highest capacity cpu
- *
- * This is simply nr_big_tasks for cpus which are not of max_capacity and
- * nr_running for cpus of max_capacity
- */
-unsigned int nr_eligible_big_tasks(int cpu)
-{
- struct rq *rq = cpu_rq(cpu);
- int nr_big = rq->hmp_stats.nr_big_tasks;
- int nr = rq->nr_running;
-
- if (cpu_max_possible_capacity(cpu) != max_possible_capacity)
- return nr_big;
-
- return nr;
-}
-
-static inline int exiting_task(struct task_struct *p)
-{
- return (p->ravg.sum_history[0] == EXITING_TASK_MARKER);
-}
-
static int __init set_sched_ravg_window(char *str)
{
unsigned int window_size;
@@ -1630,21 +825,6 @@
early_param("sched_ravg_window", set_sched_ravg_window);
-static inline void
-update_window_start(struct rq *rq, u64 wallclock)
-{
- s64 delta;
- int nr_windows;
-
- delta = wallclock - rq->window_start;
- BUG_ON(delta < 0);
- if (delta < sched_ravg_window)
- return;
-
- nr_windows = div64_u64(delta, sched_ravg_window);
- rq->window_start += (u64)nr_windows * (u64)sched_ravg_window;
-}
-
#define DIV64_U64_ROUNDUP(X, Y) div64_u64((X) + (Y - 1), Y)
static inline u64 scale_exec_time(u64 delta, struct rq *rq)
@@ -1659,14 +839,6 @@
return delta;
}
-static inline int cpu_is_waiting_on_io(struct rq *rq)
-{
- if (!sched_io_is_busy)
- return 0;
-
- return atomic_read(&rq->nr_iowait);
-}
-
/* Does freq_required sufficiently exceed or fall behind cur_freq? */
static inline int
nearly_same_freq(unsigned int cur_freq, unsigned int freq_required)
@@ -1712,7 +884,6 @@
}
}
-static inline u64 freq_policy_load(struct rq *rq, u64 load);
/*
* Should scheduler alert governor for changing frequency?
*
@@ -1814,44 +985,6 @@
}
}
-static int account_busy_for_cpu_time(struct rq *rq, struct task_struct *p,
- u64 irqtime, int event)
-{
- if (is_idle_task(p)) {
- /* TASK_WAKE && TASK_MIGRATE is not possible on idle task! */
- if (event == PICK_NEXT_TASK)
- return 0;
-
- /* PUT_PREV_TASK, TASK_UPDATE && IRQ_UPDATE are left */
- return irqtime || cpu_is_waiting_on_io(rq);
- }
-
- if (event == TASK_WAKE)
- return 0;
-
- if (event == PUT_PREV_TASK || event == IRQ_UPDATE)
- return 1;
-
- /*
- * TASK_UPDATE can be called on sleeping task, when its moved between
- * related groups
- */
- if (event == TASK_UPDATE) {
- if (rq->curr == p)
- return 1;
-
- return p->on_rq ? SCHED_FREQ_ACCOUNT_WAIT_TIME : 0;
- }
-
- /* TASK_MIGRATE, PICK_NEXT_TASK left */
- return SCHED_FREQ_ACCOUNT_WAIT_TIME;
-}
-
-static inline bool is_new_task(struct task_struct *p)
-{
- return p->ravg.active_windows < SCHED_NEW_TASK_WINDOWS;
-}
-
#define INC_STEP 8
#define DEC_STEP 2
#define CONSISTENT_THRES 16
@@ -1906,12 +1039,6 @@
return bidx;
}
-static inline u64
-scale_load_to_freq(u64 load, unsigned int src_freq, unsigned int dst_freq)
-{
- return div64_u64(load * (u64)src_freq, (u64)dst_freq);
-}
-
/*
* get_pred_busy - calculate predicted demand for a task on runqueue
*
@@ -2004,975 +1131,6 @@
p->ravg.curr_window);
}
-/*
- * predictive demand of a task is calculated at the window roll-over.
- * if the task current window busy time exceeds the predicted
- * demand, update it here to reflect the task needs.
- */
-void update_task_pred_demand(struct rq *rq, struct task_struct *p, int event)
-{
- u32 new, old;
-
- if (is_idle_task(p) || exiting_task(p))
- return;
-
- if (event != PUT_PREV_TASK && event != TASK_UPDATE &&
- (!SCHED_FREQ_ACCOUNT_WAIT_TIME ||
- (event != TASK_MIGRATE &&
- event != PICK_NEXT_TASK)))
- return;
-
- /*
- * TASK_UPDATE can be called on sleeping task, when its moved between
- * related groups
- */
- if (event == TASK_UPDATE) {
- if (!p->on_rq && !SCHED_FREQ_ACCOUNT_WAIT_TIME)
- return;
- }
-
- new = calc_pred_demand(rq, p);
- old = p->ravg.pred_demand;
-
- if (old >= new)
- return;
-
- if (task_on_rq_queued(p) && (!task_has_dl_policy(p) ||
- !p->dl.dl_throttled))
- p->sched_class->fixup_hmp_sched_stats(rq, p,
- p->ravg.demand,
- new);
-
- p->ravg.pred_demand = new;
-}
-
-void clear_top_tasks_bitmap(unsigned long *bitmap)
-{
- memset(bitmap, 0, top_tasks_bitmap_size);
- __set_bit(NUM_LOAD_INDICES, bitmap);
-}
-
-/*
- * Special case the last index and provide a fast path for index = 0.
- * Note that sched_load_granule can change underneath us if we are not
- * holding any runqueue locks while calling the two functions below.
- */
-static u32 top_task_load(struct rq *rq)
-{
- int index = rq->prev_top;
- u8 prev = 1 - rq->curr_table;
-
- if (!index) {
- int msb = NUM_LOAD_INDICES - 1;
-
- if (!test_bit(msb, rq->top_tasks_bitmap[prev]))
- return 0;
- else
- return sched_load_granule;
- } else if (index == NUM_LOAD_INDICES - 1) {
- return sched_ravg_window;
- } else {
- return (index + 1) * sched_load_granule;
- }
-}
-
-static int load_to_index(u32 load)
-{
- if (load < sched_load_granule)
- return 0;
- else if (load >= sched_ravg_window)
- return NUM_LOAD_INDICES - 1;
- else
- return load / sched_load_granule;
-}
-
-static void update_top_tasks(struct task_struct *p, struct rq *rq,
- u32 old_curr_window, int new_window, bool full_window)
-{
- u8 curr = rq->curr_table;
- u8 prev = 1 - curr;
- u8 *curr_table = rq->top_tasks[curr];
- u8 *prev_table = rq->top_tasks[prev];
- int old_index, new_index, update_index;
- u32 curr_window = p->ravg.curr_window;
- u32 prev_window = p->ravg.prev_window;
- bool zero_index_update;
-
- if (old_curr_window == curr_window && !new_window)
- return;
-
- old_index = load_to_index(old_curr_window);
- new_index = load_to_index(curr_window);
-
- if (!new_window) {
- zero_index_update = !old_curr_window && curr_window;
- if (old_index != new_index || zero_index_update) {
- if (old_curr_window)
- curr_table[old_index] -= 1;
- if (curr_window)
- curr_table[new_index] += 1;
- if (new_index > rq->curr_top)
- rq->curr_top = new_index;
- }
-
- if (!curr_table[old_index])
- __clear_bit(NUM_LOAD_INDICES - old_index - 1,
- rq->top_tasks_bitmap[curr]);
-
- if (curr_table[new_index] == 1)
- __set_bit(NUM_LOAD_INDICES - new_index - 1,
- rq->top_tasks_bitmap[curr]);
-
- return;
- }
-
- /*
- * The window has rolled over for this task. By the time we get
- * here, curr/prev swaps would has already occurred. So we need
- * to use prev_window for the new index.
- */
- update_index = load_to_index(prev_window);
-
- if (full_window) {
- /*
- * Two cases here. Either 'p' ran for the entire window or
- * it didn't run at all. In either case there is no entry
- * in the prev table. If 'p' ran the entire window, we just
- * need to create a new entry in the prev table. In this case
- * update_index will be correspond to sched_ravg_window
- * so we can unconditionally update the top index.
- */
- if (prev_window) {
- prev_table[update_index] += 1;
- rq->prev_top = update_index;
- }
-
- if (prev_table[update_index] == 1)
- __set_bit(NUM_LOAD_INDICES - update_index - 1,
- rq->top_tasks_bitmap[prev]);
- } else {
- zero_index_update = !old_curr_window && prev_window;
- if (old_index != update_index || zero_index_update) {
- if (old_curr_window)
- prev_table[old_index] -= 1;
-
- prev_table[update_index] += 1;
-
- if (update_index > rq->prev_top)
- rq->prev_top = update_index;
-
- if (!prev_table[old_index])
- __clear_bit(NUM_LOAD_INDICES - old_index - 1,
- rq->top_tasks_bitmap[prev]);
-
- if (prev_table[update_index] == 1)
- __set_bit(NUM_LOAD_INDICES - update_index - 1,
- rq->top_tasks_bitmap[prev]);
- }
- }
-
- if (curr_window) {
- curr_table[new_index] += 1;
-
- if (new_index > rq->curr_top)
- rq->curr_top = new_index;
-
- if (curr_table[new_index] == 1)
- __set_bit(NUM_LOAD_INDICES - new_index - 1,
- rq->top_tasks_bitmap[curr]);
- }
-}
-
-static inline void clear_top_tasks_table(u8 *table)
-{
- memset(table, 0, NUM_LOAD_INDICES * sizeof(u8));
-}
-
-static void rollover_top_tasks(struct rq *rq, bool full_window)
-{
- u8 curr_table = rq->curr_table;
- u8 prev_table = 1 - curr_table;
- int curr_top = rq->curr_top;
-
- clear_top_tasks_table(rq->top_tasks[prev_table]);
- clear_top_tasks_bitmap(rq->top_tasks_bitmap[prev_table]);
-
- if (full_window) {
- curr_top = 0;
- clear_top_tasks_table(rq->top_tasks[curr_table]);
- clear_top_tasks_bitmap(
- rq->top_tasks_bitmap[curr_table]);
- }
-
- rq->curr_table = prev_table;
- rq->prev_top = curr_top;
- rq->curr_top = 0;
-}
-
-static u32 empty_windows[NR_CPUS];
-
-static void rollover_task_window(struct task_struct *p, bool full_window)
-{
- u32 *curr_cpu_windows = empty_windows;
- u32 curr_window;
- int i;
-
- /* Rollover the sum */
- curr_window = 0;
-
- if (!full_window) {
- curr_window = p->ravg.curr_window;
- curr_cpu_windows = p->ravg.curr_window_cpu;
- }
-
- p->ravg.prev_window = curr_window;
- p->ravg.curr_window = 0;
-
- /* Roll over individual CPU contributions */
- for (i = 0; i < nr_cpu_ids; i++) {
- p->ravg.prev_window_cpu[i] = curr_cpu_windows[i];
- p->ravg.curr_window_cpu[i] = 0;
- }
-}
-
-static void rollover_cpu_window(struct rq *rq, bool full_window)
-{
- u64 curr_sum = rq->curr_runnable_sum;
- u64 nt_curr_sum = rq->nt_curr_runnable_sum;
- u64 grp_curr_sum = rq->grp_time.curr_runnable_sum;
- u64 grp_nt_curr_sum = rq->grp_time.nt_curr_runnable_sum;
-
- if (unlikely(full_window)) {
- curr_sum = 0;
- nt_curr_sum = 0;
- grp_curr_sum = 0;
- grp_nt_curr_sum = 0;
- }
-
- rq->prev_runnable_sum = curr_sum;
- rq->nt_prev_runnable_sum = nt_curr_sum;
- rq->grp_time.prev_runnable_sum = grp_curr_sum;
- rq->grp_time.nt_prev_runnable_sum = grp_nt_curr_sum;
-
- rq->curr_runnable_sum = 0;
- rq->nt_curr_runnable_sum = 0;
- rq->grp_time.curr_runnable_sum = 0;
- rq->grp_time.nt_curr_runnable_sum = 0;
-}
-
-/*
- * Account cpu activity in its busy time counters (rq->curr/prev_runnable_sum)
- */
-static void update_cpu_busy_time(struct task_struct *p, struct rq *rq,
- int event, u64 wallclock, u64 irqtime)
-{
- int new_window, full_window = 0;
- int p_is_curr_task = (p == rq->curr);
- u64 mark_start = p->ravg.mark_start;
- u64 window_start = rq->window_start;
- u32 window_size = sched_ravg_window;
- u64 delta;
- u64 *curr_runnable_sum = &rq->curr_runnable_sum;
- u64 *prev_runnable_sum = &rq->prev_runnable_sum;
- u64 *nt_curr_runnable_sum = &rq->nt_curr_runnable_sum;
- u64 *nt_prev_runnable_sum = &rq->nt_prev_runnable_sum;
- bool new_task;
- struct related_thread_group *grp;
- int cpu = rq->cpu;
- u32 old_curr_window = p->ravg.curr_window;
-
- new_window = mark_start < window_start;
- if (new_window) {
- full_window = (window_start - mark_start) >= window_size;
- if (p->ravg.active_windows < USHRT_MAX)
- p->ravg.active_windows++;
- }
-
- new_task = is_new_task(p);
-
- /*
- * Handle per-task window rollover. We don't care about the idle
- * task or exiting tasks.
- */
- if (!is_idle_task(p) && !exiting_task(p)) {
- if (new_window)
- rollover_task_window(p, full_window);
- }
-
- if (p_is_curr_task && new_window) {
- rollover_cpu_window(rq, full_window);
- rollover_top_tasks(rq, full_window);
- }
-
- if (!account_busy_for_cpu_time(rq, p, irqtime, event))
- goto done;
-
- grp = p->grp;
- if (grp && sched_freq_aggregate) {
- struct group_cpu_time *cpu_time = &rq->grp_time;
-
- curr_runnable_sum = &cpu_time->curr_runnable_sum;
- prev_runnable_sum = &cpu_time->prev_runnable_sum;
-
- nt_curr_runnable_sum = &cpu_time->nt_curr_runnable_sum;
- nt_prev_runnable_sum = &cpu_time->nt_prev_runnable_sum;
- }
-
- if (!new_window) {
- /*
- * account_busy_for_cpu_time() = 1 so busy time needs
- * to be accounted to the current window. No rollover
- * since we didn't start a new window. An example of this is
- * when a task starts execution and then sleeps within the
- * same window.
- */
-
- if (!irqtime || !is_idle_task(p) || cpu_is_waiting_on_io(rq))
- delta = wallclock - mark_start;
- else
- delta = irqtime;
- delta = scale_exec_time(delta, rq);
- *curr_runnable_sum += delta;
- if (new_task)
- *nt_curr_runnable_sum += delta;
-
- if (!is_idle_task(p) && !exiting_task(p)) {
- p->ravg.curr_window += delta;
- p->ravg.curr_window_cpu[cpu] += delta;
- }
-
- goto done;
- }
-
- if (!p_is_curr_task) {
- /*
- * account_busy_for_cpu_time() = 1 so busy time needs
- * to be accounted to the current window. A new window
- * has also started, but p is not the current task, so the
- * window is not rolled over - just split up and account
- * as necessary into curr and prev. The window is only
- * rolled over when a new window is processed for the current
- * task.
- *
- * Irqtime can't be accounted by a task that isn't the
- * currently running task.
- */
-
- if (!full_window) {
- /*
- * A full window hasn't elapsed, account partial
- * contribution to previous completed window.
- */
- delta = scale_exec_time(window_start - mark_start, rq);
- if (!exiting_task(p)) {
- p->ravg.prev_window += delta;
- p->ravg.prev_window_cpu[cpu] += delta;
- }
- } else {
- /*
- * Since at least one full window has elapsed,
- * the contribution to the previous window is the
- * full window (window_size).
- */
- delta = scale_exec_time(window_size, rq);
- if (!exiting_task(p)) {
- p->ravg.prev_window = delta;
- p->ravg.prev_window_cpu[cpu] = delta;
- }
- }
-
- *prev_runnable_sum += delta;
- if (new_task)
- *nt_prev_runnable_sum += delta;
-
- /* Account piece of busy time in the current window. */
- delta = scale_exec_time(wallclock - window_start, rq);
- *curr_runnable_sum += delta;
- if (new_task)
- *nt_curr_runnable_sum += delta;
-
- if (!exiting_task(p)) {
- p->ravg.curr_window = delta;
- p->ravg.curr_window_cpu[cpu] = delta;
- }
-
- goto done;
- }
-
- if (!irqtime || !is_idle_task(p) || cpu_is_waiting_on_io(rq)) {
- /*
- * account_busy_for_cpu_time() = 1 so busy time needs
- * to be accounted to the current window. A new window
- * has started and p is the current task so rollover is
- * needed. If any of these three above conditions are true
- * then this busy time can't be accounted as irqtime.
- *
- * Busy time for the idle task or exiting tasks need not
- * be accounted.
- *
- * An example of this would be a task that starts execution
- * and then sleeps once a new window has begun.
- */
-
- if (!full_window) {
- /*
- * A full window hasn't elapsed, account partial
- * contribution to previous completed window.
- */
- delta = scale_exec_time(window_start - mark_start, rq);
- if (!is_idle_task(p) && !exiting_task(p)) {
- p->ravg.prev_window += delta;
- p->ravg.prev_window_cpu[cpu] += delta;
- }
- } else {
- /*
- * Since at least one full window has elapsed,
- * the contribution to the previous window is the
- * full window (window_size).
- */
- delta = scale_exec_time(window_size, rq);
- if (!is_idle_task(p) && !exiting_task(p)) {
- p->ravg.prev_window = delta;
- p->ravg.prev_window_cpu[cpu] = delta;
- }
- }
-
- /*
- * Rollover is done here by overwriting the values in
- * prev_runnable_sum and curr_runnable_sum.
- */
- *prev_runnable_sum += delta;
- if (new_task)
- *nt_prev_runnable_sum += delta;
-
- /* Account piece of busy time in the current window. */
- delta = scale_exec_time(wallclock - window_start, rq);
- *curr_runnable_sum += delta;
- if (new_task)
- *nt_curr_runnable_sum += delta;
-
- if (!is_idle_task(p) && !exiting_task(p)) {
- p->ravg.curr_window = delta;
- p->ravg.curr_window_cpu[cpu] = delta;
- }
-
- goto done;
- }
-
- if (irqtime) {
- /*
- * account_busy_for_cpu_time() = 1 so busy time needs
- * to be accounted to the current window. A new window
- * has started and p is the current task so rollover is
- * needed. The current task must be the idle task because
- * irqtime is not accounted for any other task.
- *
- * Irqtime will be accounted each time we process IRQ activity
- * after a period of idleness, so we know the IRQ busy time
- * started at wallclock - irqtime.
- */
-
- BUG_ON(!is_idle_task(p));
- mark_start = wallclock - irqtime;
-
- /*
- * Roll window over. If IRQ busy time was just in the current
- * window then that is all that need be accounted.
- */
- if (mark_start > window_start) {
- *curr_runnable_sum = scale_exec_time(irqtime, rq);
- return;
- }
-
- /*
- * The IRQ busy time spanned multiple windows. Process the
- * busy time preceding the current window start first.
- */
- delta = window_start - mark_start;
- if (delta > window_size)
- delta = window_size;
- delta = scale_exec_time(delta, rq);
- *prev_runnable_sum += delta;
-
- /* Process the remaining IRQ busy time in the current window. */
- delta = wallclock - window_start;
- rq->curr_runnable_sum = scale_exec_time(delta, rq);
-
- return;
- }
-
-done:
- if (!is_idle_task(p) && !exiting_task(p))
- update_top_tasks(p, rq, old_curr_window,
- new_window, full_window);
-}
-
-static inline u32 predict_and_update_buckets(struct rq *rq,
- struct task_struct *p, u32 runtime) {
-
- int bidx;
- u32 pred_demand;
-
- bidx = busy_to_bucket(runtime);
- pred_demand = get_pred_busy(rq, p, bidx, runtime);
- bucket_increase(p->ravg.busy_buckets, bidx);
-
- return pred_demand;
-}
-
-static void update_task_cpu_cycles(struct task_struct *p, int cpu)
-{
- if (use_cycle_counter)
- p->cpu_cycles = cpu_cycle_counter_cb.get_cpu_cycle_counter(cpu);
-}
-
-static void
-update_task_rq_cpu_cycles(struct task_struct *p, struct rq *rq, int event,
- u64 wallclock, u64 irqtime)
-{
- u64 cur_cycles;
- int cpu = cpu_of(rq);
-
- lockdep_assert_held(&rq->lock);
-
- if (!use_cycle_counter) {
- rq->cc.cycles = cpu_cur_freq(cpu);
- rq->cc.time = 1;
- return;
- }
-
- cur_cycles = cpu_cycle_counter_cb.get_cpu_cycle_counter(cpu);
-
- /*
- * If current task is idle task and irqtime == 0 CPU was
- * indeed idle and probably its cycle counter was not
- * increasing. We still need estimatied CPU frequency
- * for IO wait time accounting. Use the previously
- * calculated frequency in such a case.
- */
- if (!is_idle_task(rq->curr) || irqtime) {
- if (unlikely(cur_cycles < p->cpu_cycles))
- rq->cc.cycles = cur_cycles + (U64_MAX - p->cpu_cycles);
- else
- rq->cc.cycles = cur_cycles - p->cpu_cycles;
- rq->cc.cycles = rq->cc.cycles * NSEC_PER_MSEC;
-
- if (event == IRQ_UPDATE && is_idle_task(p))
- /*
- * Time between mark_start of idle task and IRQ handler
- * entry time is CPU cycle counter stall period.
- * Upon IRQ handler entry sched_account_irqstart()
- * replenishes idle task's cpu cycle counter so
- * rq->cc.cycles now represents increased cycles during
- * IRQ handler rather than time between idle entry and
- * IRQ exit. Thus use irqtime as time delta.
- */
- rq->cc.time = irqtime;
- else
- rq->cc.time = wallclock - p->ravg.mark_start;
- BUG_ON((s64)rq->cc.time < 0);
- }
-
- p->cpu_cycles = cur_cycles;
-
- trace_sched_get_task_cpu_cycles(cpu, event, rq->cc.cycles, rq->cc.time);
-}
-
-static int
-account_busy_for_task_demand(struct rq *rq, struct task_struct *p, int event)
-{
- /*
- * No need to bother updating task demand for exiting tasks
- * or the idle task.
- */
- if (exiting_task(p) || is_idle_task(p))
- return 0;
-
- /*
- * When a task is waking up it is completing a segment of non-busy
- * time. Likewise, if wait time is not treated as busy time, then
- * when a task begins to run or is migrated, it is not running and
- * is completing a segment of non-busy time.
- */
- if (event == TASK_WAKE || (!SCHED_ACCOUNT_WAIT_TIME &&
- (event == PICK_NEXT_TASK || event == TASK_MIGRATE)))
- return 0;
-
- /*
- * TASK_UPDATE can be called on sleeping task, when its moved between
- * related groups
- */
- if (event == TASK_UPDATE) {
- if (rq->curr == p)
- return 1;
-
- return p->on_rq ? SCHED_ACCOUNT_WAIT_TIME : 0;
- }
-
- return 1;
-}
-
-/*
- * Called when new window is starting for a task, to record cpu usage over
- * recently concluded window(s). Normally 'samples' should be 1. It can be > 1
- * when, say, a real-time task runs without preemption for several windows at a
- * stretch.
- */
-static void update_history(struct rq *rq, struct task_struct *p,
- u32 runtime, int samples, int event)
-{
- u32 *hist = &p->ravg.sum_history[0];
- int ridx, widx;
- u32 max = 0, avg, demand, pred_demand;
- u64 sum = 0;
-
- /* Ignore windows where task had no activity */
- if (!runtime || is_idle_task(p) || exiting_task(p) || !samples)
- goto done;
-
- /* Push new 'runtime' value onto stack */
- widx = sched_ravg_hist_size - 1;
- ridx = widx - samples;
- for (; ridx >= 0; --widx, --ridx) {
- hist[widx] = hist[ridx];
- sum += hist[widx];
- if (hist[widx] > max)
- max = hist[widx];
- }
-
- for (widx = 0; widx < samples && widx < sched_ravg_hist_size; widx++) {
- hist[widx] = runtime;
- sum += hist[widx];
- if (hist[widx] > max)
- max = hist[widx];
- }
-
- p->ravg.sum = 0;
-
- if (sched_window_stats_policy == WINDOW_STATS_RECENT) {
- demand = runtime;
- } else if (sched_window_stats_policy == WINDOW_STATS_MAX) {
- demand = max;
- } else {
- avg = div64_u64(sum, sched_ravg_hist_size);
- if (sched_window_stats_policy == WINDOW_STATS_AVG)
- demand = avg;
- else
- demand = max(avg, runtime);
- }
- pred_demand = predict_and_update_buckets(rq, p, runtime);
-
- /*
- * A throttled deadline sched class task gets dequeued without
- * changing p->on_rq. Since the dequeue decrements hmp stats
- * avoid decrementing it here again.
- */
- if (task_on_rq_queued(p) && (!task_has_dl_policy(p) ||
- !p->dl.dl_throttled))
- p->sched_class->fixup_hmp_sched_stats(rq, p, demand,
- pred_demand);
-
- p->ravg.demand = demand;
- p->ravg.pred_demand = pred_demand;
-
-done:
- trace_sched_update_history(rq, p, runtime, samples, event);
-}
-
-static u64 add_to_task_demand(struct rq *rq, struct task_struct *p, u64 delta)
-{
- delta = scale_exec_time(delta, rq);
- p->ravg.sum += delta;
- if (unlikely(p->ravg.sum > sched_ravg_window))
- p->ravg.sum = sched_ravg_window;
-
- return delta;
-}
-
-/*
- * Account cpu demand of task and/or update task's cpu demand history
- *
- * ms = p->ravg.mark_start;
- * wc = wallclock
- * ws = rq->window_start
- *
- * Three possibilities:
- *
- * a) Task event is contained within one window.
- * window_start < mark_start < wallclock
- *
- * ws ms wc
- * | | |
- * V V V
- * |---------------|
- *
- * In this case, p->ravg.sum is updated *iff* event is appropriate
- * (ex: event == PUT_PREV_TASK)
- *
- * b) Task event spans two windows.
- * mark_start < window_start < wallclock
- *
- * ms ws wc
- * | | |
- * V V V
- * -----|-------------------
- *
- * In this case, p->ravg.sum is updated with (ws - ms) *iff* event
- * is appropriate, then a new window sample is recorded followed
- * by p->ravg.sum being set to (wc - ws) *iff* event is appropriate.
- *
- * c) Task event spans more than two windows.
- *
- * ms ws_tmp ws wc
- * | | | |
- * V V V V
- * ---|-------|-------|-------|-------|------
- * | |
- * |<------ nr_full_windows ------>|
- *
- * In this case, p->ravg.sum is updated with (ws_tmp - ms) first *iff*
- * event is appropriate, window sample of p->ravg.sum is recorded,
- * 'nr_full_window' samples of window_size is also recorded *iff*
- * event is appropriate and finally p->ravg.sum is set to (wc - ws)
- * *iff* event is appropriate.
- *
- * IMPORTANT : Leave p->ravg.mark_start unchanged, as update_cpu_busy_time()
- * depends on it!
- */
-static u64 update_task_demand(struct task_struct *p, struct rq *rq,
- int event, u64 wallclock)
-{
- u64 mark_start = p->ravg.mark_start;
- u64 delta, window_start = rq->window_start;
- int new_window, nr_full_windows;
- u32 window_size = sched_ravg_window;
- u64 runtime;
-
- new_window = mark_start < window_start;
- if (!account_busy_for_task_demand(rq, p, event)) {
- if (new_window)
- /*
- * If the time accounted isn't being accounted as
- * busy time, and a new window started, only the
- * previous window need be closed out with the
- * pre-existing demand. Multiple windows may have
- * elapsed, but since empty windows are dropped,
- * it is not necessary to account those.
- */
- update_history(rq, p, p->ravg.sum, 1, event);
- return 0;
- }
-
- if (!new_window) {
- /*
- * The simple case - busy time contained within the existing
- * window.
- */
- return add_to_task_demand(rq, p, wallclock - mark_start);
- }
-
- /*
- * Busy time spans at least two windows. Temporarily rewind
- * window_start to first window boundary after mark_start.
- */
- delta = window_start - mark_start;
- nr_full_windows = div64_u64(delta, window_size);
- window_start -= (u64)nr_full_windows * (u64)window_size;
-
- /* Process (window_start - mark_start) first */
- runtime = add_to_task_demand(rq, p, window_start - mark_start);
-
- /* Push new sample(s) into task's demand history */
- update_history(rq, p, p->ravg.sum, 1, event);
- if (nr_full_windows) {
- u64 scaled_window = scale_exec_time(window_size, rq);
-
- update_history(rq, p, scaled_window, nr_full_windows, event);
- runtime += nr_full_windows * scaled_window;
- }
-
- /*
- * Roll window_start back to current to process any remainder
- * in current window.
- */
- window_start += (u64)nr_full_windows * (u64)window_size;
-
- /* Process (wallclock - window_start) next */
- mark_start = window_start;
- runtime += add_to_task_demand(rq, p, wallclock - mark_start);
-
- return runtime;
-}
-
-static inline void
-update_task_burst(struct task_struct *p, struct rq *rq, int event, u64 runtime)
-{
- /*
- * update_task_demand() has checks for idle task and
- * exit task. The runtime may include the wait time,
- * so update the burst only for the cases where the
- * task is running.
- */
- if (event == PUT_PREV_TASK || (event == TASK_UPDATE &&
- rq->curr == p))
- p->ravg.curr_burst += runtime;
-}
-
-/* Reflect task activity on its demand and cpu's busy time statistics */
-void update_task_ravg(struct task_struct *p, struct rq *rq, int event,
- u64 wallclock, u64 irqtime)
-{
- u64 runtime;
-
- if (!rq->window_start || sched_disable_window_stats ||
- p->ravg.mark_start == wallclock)
- return;
-
- lockdep_assert_held(&rq->lock);
-
- update_window_start(rq, wallclock);
-
- if (!p->ravg.mark_start) {
- update_task_cpu_cycles(p, cpu_of(rq));
- goto done;
- }
-
- update_task_rq_cpu_cycles(p, rq, event, wallclock, irqtime);
- runtime = update_task_demand(p, rq, event, wallclock);
- if (runtime)
- update_task_burst(p, rq, event, runtime);
- update_cpu_busy_time(p, rq, event, wallclock, irqtime);
- update_task_pred_demand(rq, p, event);
-done:
- trace_sched_update_task_ravg(p, rq, event, wallclock, irqtime,
- rq->cc.cycles, rq->cc.time,
- p->grp ? &rq->grp_time : NULL);
-
- p->ravg.mark_start = wallclock;
-}
-
-void sched_account_irqtime(int cpu, struct task_struct *curr,
- u64 delta, u64 wallclock)
-{
- struct rq *rq = cpu_rq(cpu);
- unsigned long flags, nr_windows;
- u64 cur_jiffies_ts;
-
- raw_spin_lock_irqsave(&rq->lock, flags);
-
- /*
- * cputime (wallclock) uses sched_clock so use the same here for
- * consistency.
- */
- delta += sched_clock() - wallclock;
- cur_jiffies_ts = get_jiffies_64();
-
- if (is_idle_task(curr))
- update_task_ravg(curr, rq, IRQ_UPDATE, sched_ktime_clock(),
- delta);
-
- nr_windows = cur_jiffies_ts - rq->irqload_ts;
-
- if (nr_windows) {
- if (nr_windows < 10) {
- /* Decay CPU's irqload by 3/4 for each window. */
- rq->avg_irqload *= (3 * nr_windows);
- rq->avg_irqload = div64_u64(rq->avg_irqload,
- 4 * nr_windows);
- } else {
- rq->avg_irqload = 0;
- }
- rq->avg_irqload += rq->cur_irqload;
- rq->cur_irqload = 0;
- }
-
- rq->cur_irqload += delta;
- rq->irqload_ts = cur_jiffies_ts;
- raw_spin_unlock_irqrestore(&rq->lock, flags);
-}
-
-void sched_account_irqstart(int cpu, struct task_struct *curr, u64 wallclock)
-{
- struct rq *rq = cpu_rq(cpu);
-
- if (!rq->window_start || sched_disable_window_stats)
- return;
-
- if (is_idle_task(curr)) {
- /* We're here without rq->lock held, IRQ disabled */
- raw_spin_lock(&rq->lock);
- update_task_cpu_cycles(curr, cpu);
- raw_spin_unlock(&rq->lock);
- }
-}
-
-void reset_task_stats(struct task_struct *p)
-{
- u32 sum = 0;
- u32 *curr_window_ptr = NULL;
- u32 *prev_window_ptr = NULL;
-
- if (exiting_task(p)) {
- sum = EXITING_TASK_MARKER;
- } else {
- curr_window_ptr = p->ravg.curr_window_cpu;
- prev_window_ptr = p->ravg.prev_window_cpu;
- memset(curr_window_ptr, 0, sizeof(u32) * nr_cpu_ids);
- memset(prev_window_ptr, 0, sizeof(u32) * nr_cpu_ids);
- }
-
- memset(&p->ravg, 0, sizeof(struct ravg));
-
- p->ravg.curr_window_cpu = curr_window_ptr;
- p->ravg.prev_window_cpu = prev_window_ptr;
-
- p->ravg.avg_burst = 2 * (u64)sysctl_sched_short_burst;
-
- /* Retain EXITING_TASK marker */
- p->ravg.sum_history[0] = sum;
-}
-
-void mark_task_starting(struct task_struct *p)
-{
- u64 wallclock;
- struct rq *rq = task_rq(p);
-
- if (!rq->window_start || sched_disable_window_stats) {
- reset_task_stats(p);
- return;
- }
-
- wallclock = sched_ktime_clock();
- p->ravg.mark_start = p->last_wake_ts = wallclock;
- p->last_cpu_selected_ts = wallclock;
- p->last_switch_out_ts = 0;
- update_task_cpu_cycles(p, cpu_of(rq));
-}
-
-void set_window_start(struct rq *rq)
-{
- static int sync_cpu_available;
-
- if (rq->window_start)
- return;
-
- if (!sync_cpu_available) {
- rq->window_start = sched_ktime_clock();
- sync_cpu_available = 1;
- } else {
- struct rq *sync_rq = cpu_rq(cpumask_any(cpu_online_mask));
-
- raw_spin_unlock(&rq->lock);
- double_rq_lock(rq, sync_rq);
- rq->window_start = sync_rq->window_start;
- rq->curr_runnable_sum = rq->prev_runnable_sum = 0;
- rq->nt_curr_runnable_sum = rq->nt_prev_runnable_sum = 0;
- raw_spin_unlock(&sync_rq->lock);
- }
-
- rq->curr->ravg.mark_start = rq->window_start;
-}
-
static void reset_all_task_stats(void)
{
struct task_struct *g, *p;
@@ -3078,59 +1236,6 @@
sched_ktime_clock() - start_ts, reason, old, new);
}
-/*
- * In this function we match the accumulated subtractions with the current
- * and previous windows we are operating with. Ignore any entries where
- * the window start in the load_subtraction struct does not match either
- * the curent or the previous window. This could happen whenever CPUs
- * become idle or busy with interrupts disabled for an extended period.
- */
-static inline void account_load_subtractions(struct rq *rq)
-{
- u64 ws = rq->window_start;
- u64 prev_ws = ws - sched_ravg_window;
- struct load_subtractions *ls = rq->load_subs;
- int i;
-
- for (i = 0; i < NUM_TRACKED_WINDOWS; i++) {
- if (ls[i].window_start == ws) {
- rq->curr_runnable_sum -= ls[i].subs;
- rq->nt_curr_runnable_sum -= ls[i].new_subs;
- } else if (ls[i].window_start == prev_ws) {
- rq->prev_runnable_sum -= ls[i].subs;
- rq->nt_prev_runnable_sum -= ls[i].new_subs;
- }
-
- ls[i].subs = 0;
- ls[i].new_subs = 0;
- }
-
- BUG_ON((s64)rq->prev_runnable_sum < 0);
- BUG_ON((s64)rq->curr_runnable_sum < 0);
- BUG_ON((s64)rq->nt_prev_runnable_sum < 0);
- BUG_ON((s64)rq->nt_curr_runnable_sum < 0);
-}
-
-static inline u64 freq_policy_load(struct rq *rq, u64 load)
-{
- unsigned int reporting_policy = sysctl_sched_freq_reporting_policy;
-
- switch (reporting_policy) {
- case FREQ_REPORT_MAX_CPU_LOAD_TOP_TASK:
- load = max_t(u64, load, top_task_load(rq));
- break;
- case FREQ_REPORT_TOP_TASK:
- load = top_task_load(rq);
- break;
- case FREQ_REPORT_CPU_LOAD:
- break;
- default:
- break;
- }
-
- return load;
-}
-
void sched_get_cpus_busy(struct sched_load *busy,
const struct cpumask *query_cpus)
{
@@ -3296,11 +1401,6 @@
}
}
-void sched_set_io_is_busy(int val)
-{
- sched_io_is_busy = val;
-}
-
int sched_set_window(u64 window_start, unsigned int window_size)
{
u64 now, cur_jiffies, jiffy_ktime_ns;
@@ -3350,289 +1450,6 @@
rq->load_subs[index].new_subs = 0;
}
-static bool get_subtraction_index(struct rq *rq, u64 ws)
-{
- int i;
- u64 oldest = ULLONG_MAX;
- int oldest_index = 0;
-
- for (i = 0; i < NUM_TRACKED_WINDOWS; i++) {
- u64 entry_ws = rq->load_subs[i].window_start;
-
- if (ws == entry_ws)
- return i;
-
- if (entry_ws < oldest) {
- oldest = entry_ws;
- oldest_index = i;
- }
- }
-
- create_subtraction_entry(rq, ws, oldest_index);
- return oldest_index;
-}
-
-static void update_rq_load_subtractions(int index, struct rq *rq,
- u32 sub_load, bool new_task)
-{
- rq->load_subs[index].subs += sub_load;
- if (new_task)
- rq->load_subs[index].new_subs += sub_load;
-}
-
-static void update_cluster_load_subtractions(struct task_struct *p,
- int cpu, u64 ws, bool new_task)
-{
- struct sched_cluster *cluster = cpu_cluster(cpu);
- struct cpumask cluster_cpus = cluster->cpus;
- u64 prev_ws = ws - sched_ravg_window;
- int i;
-
- cpumask_clear_cpu(cpu, &cluster_cpus);
- raw_spin_lock(&cluster->load_lock);
-
- for_each_cpu(i, &cluster_cpus) {
- struct rq *rq = cpu_rq(i);
- int index;
-
- if (p->ravg.curr_window_cpu[i]) {
- index = get_subtraction_index(rq, ws);
- update_rq_load_subtractions(index, rq,
- p->ravg.curr_window_cpu[i], new_task);
- p->ravg.curr_window_cpu[i] = 0;
- }
-
- if (p->ravg.prev_window_cpu[i]) {
- index = get_subtraction_index(rq, prev_ws);
- update_rq_load_subtractions(index, rq,
- p->ravg.prev_window_cpu[i], new_task);
- p->ravg.prev_window_cpu[i] = 0;
- }
- }
-
- raw_spin_unlock(&cluster->load_lock);
-}
-
-static inline void inter_cluster_migration_fixup
- (struct task_struct *p, int new_cpu, int task_cpu, bool new_task)
-{
- struct rq *dest_rq = cpu_rq(new_cpu);
- struct rq *src_rq = cpu_rq(task_cpu);
-
- if (same_freq_domain(new_cpu, task_cpu))
- return;
-
- p->ravg.curr_window_cpu[new_cpu] = p->ravg.curr_window;
- p->ravg.prev_window_cpu[new_cpu] = p->ravg.prev_window;
-
- dest_rq->curr_runnable_sum += p->ravg.curr_window;
- dest_rq->prev_runnable_sum += p->ravg.prev_window;
-
- src_rq->curr_runnable_sum -= p->ravg.curr_window_cpu[task_cpu];
- src_rq->prev_runnable_sum -= p->ravg.prev_window_cpu[task_cpu];
-
- if (new_task) {
- dest_rq->nt_curr_runnable_sum += p->ravg.curr_window;
- dest_rq->nt_prev_runnable_sum += p->ravg.prev_window;
-
- src_rq->nt_curr_runnable_sum -=
- p->ravg.curr_window_cpu[task_cpu];
- src_rq->nt_prev_runnable_sum -=
- p->ravg.prev_window_cpu[task_cpu];
- }
-
- p->ravg.curr_window_cpu[task_cpu] = 0;
- p->ravg.prev_window_cpu[task_cpu] = 0;
-
- update_cluster_load_subtractions(p, task_cpu,
- src_rq->window_start, new_task);
-
- BUG_ON((s64)src_rq->prev_runnable_sum < 0);
- BUG_ON((s64)src_rq->curr_runnable_sum < 0);
- BUG_ON((s64)src_rq->nt_prev_runnable_sum < 0);
- BUG_ON((s64)src_rq->nt_curr_runnable_sum < 0);
-}
-
-static int get_top_index(unsigned long *bitmap, unsigned long old_top)
-{
- int index = find_next_bit(bitmap, NUM_LOAD_INDICES, old_top);
-
- if (index == NUM_LOAD_INDICES)
- return 0;
-
- return NUM_LOAD_INDICES - 1 - index;
-}
-
-static void
-migrate_top_tasks(struct task_struct *p, struct rq *src_rq, struct rq *dst_rq)
-{
- int index;
- int top_index;
- u32 curr_window = p->ravg.curr_window;
- u32 prev_window = p->ravg.prev_window;
- u8 src = src_rq->curr_table;
- u8 dst = dst_rq->curr_table;
- u8 *src_table;
- u8 *dst_table;
-
- if (curr_window) {
- src_table = src_rq->top_tasks[src];
- dst_table = dst_rq->top_tasks[dst];
- index = load_to_index(curr_window);
- src_table[index] -= 1;
- dst_table[index] += 1;
-
- if (!src_table[index])
- __clear_bit(NUM_LOAD_INDICES - index - 1,
- src_rq->top_tasks_bitmap[src]);
-
- if (dst_table[index] == 1)
- __set_bit(NUM_LOAD_INDICES - index - 1,
- dst_rq->top_tasks_bitmap[dst]);
-
- if (index > dst_rq->curr_top)
- dst_rq->curr_top = index;
-
- top_index = src_rq->curr_top;
- if (index == top_index && !src_table[index])
- src_rq->curr_top = get_top_index(
- src_rq->top_tasks_bitmap[src], top_index);
- }
-
- if (prev_window) {
- src = 1 - src;
- dst = 1 - dst;
- src_table = src_rq->top_tasks[src];
- dst_table = dst_rq->top_tasks[dst];
- index = load_to_index(prev_window);
- src_table[index] -= 1;
- dst_table[index] += 1;
-
- if (!src_table[index])
- __clear_bit(NUM_LOAD_INDICES - index - 1,
- src_rq->top_tasks_bitmap[src]);
-
- if (dst_table[index] == 1)
- __set_bit(NUM_LOAD_INDICES - index - 1,
- dst_rq->top_tasks_bitmap[dst]);
-
- if (index > dst_rq->prev_top)
- dst_rq->prev_top = index;
-
- top_index = src_rq->prev_top;
- if (index == top_index && !src_table[index])
- src_rq->prev_top = get_top_index(
- src_rq->top_tasks_bitmap[src], top_index);
- }
-}
-
-void fixup_busy_time(struct task_struct *p, int new_cpu)
-{
- struct rq *src_rq = task_rq(p);
- struct rq *dest_rq = cpu_rq(new_cpu);
- u64 wallclock;
- u64 *src_curr_runnable_sum, *dst_curr_runnable_sum;
- u64 *src_prev_runnable_sum, *dst_prev_runnable_sum;
- u64 *src_nt_curr_runnable_sum, *dst_nt_curr_runnable_sum;
- u64 *src_nt_prev_runnable_sum, *dst_nt_prev_runnable_sum;
- bool new_task;
- struct related_thread_group *grp;
-
- if (!p->on_rq && p->state != TASK_WAKING)
- return;
-
- if (exiting_task(p)) {
- clear_ed_task(p, src_rq);
- return;
- }
-
- if (p->state == TASK_WAKING)
- double_rq_lock(src_rq, dest_rq);
-
- if (sched_disable_window_stats)
- goto done;
-
- wallclock = sched_ktime_clock();
-
- update_task_ravg(task_rq(p)->curr, task_rq(p),
- TASK_UPDATE,
- wallclock, 0);
- update_task_ravg(dest_rq->curr, dest_rq,
- TASK_UPDATE, wallclock, 0);
-
- update_task_ravg(p, task_rq(p), TASK_MIGRATE,
- wallclock, 0);
-
- update_task_cpu_cycles(p, new_cpu);
-
- new_task = is_new_task(p);
- /* Protected by rq_lock */
- grp = p->grp;
-
- /*
- * For frequency aggregation, we continue to do migration fixups
- * even for intra cluster migrations. This is because, the aggregated
- * load has to reported on a single CPU regardless.
- */
- if (grp && sched_freq_aggregate) {
- struct group_cpu_time *cpu_time;
-
- cpu_time = &src_rq->grp_time;
- src_curr_runnable_sum = &cpu_time->curr_runnable_sum;
- src_prev_runnable_sum = &cpu_time->prev_runnable_sum;
- src_nt_curr_runnable_sum = &cpu_time->nt_curr_runnable_sum;
- src_nt_prev_runnable_sum = &cpu_time->nt_prev_runnable_sum;
-
- cpu_time = &dest_rq->grp_time;
- dst_curr_runnable_sum = &cpu_time->curr_runnable_sum;
- dst_prev_runnable_sum = &cpu_time->prev_runnable_sum;
- dst_nt_curr_runnable_sum = &cpu_time->nt_curr_runnable_sum;
- dst_nt_prev_runnable_sum = &cpu_time->nt_prev_runnable_sum;
-
- if (p->ravg.curr_window) {
- *src_curr_runnable_sum -= p->ravg.curr_window;
- *dst_curr_runnable_sum += p->ravg.curr_window;
- if (new_task) {
- *src_nt_curr_runnable_sum -=
- p->ravg.curr_window;
- *dst_nt_curr_runnable_sum +=
- p->ravg.curr_window;
- }
- }
-
- if (p->ravg.prev_window) {
- *src_prev_runnable_sum -= p->ravg.prev_window;
- *dst_prev_runnable_sum += p->ravg.prev_window;
- if (new_task) {
- *src_nt_prev_runnable_sum -=
- p->ravg.prev_window;
- *dst_nt_prev_runnable_sum +=
- p->ravg.prev_window;
- }
- }
- } else {
- inter_cluster_migration_fixup(p, new_cpu,
- task_cpu(p), new_task);
- }
-
- migrate_top_tasks(p, src_rq, dest_rq);
-
- if (!same_freq_domain(new_cpu, task_cpu(p))) {
- cpufreq_update_util(dest_rq, SCHED_CPUFREQ_INTERCLUSTER_MIG);
- cpufreq_update_util(src_rq, SCHED_CPUFREQ_INTERCLUSTER_MIG);
- }
-
- if (p == src_rq->ed_task) {
- src_rq->ed_task = NULL;
- if (!dest_rq->ed_task)
- dest_rq->ed_task = p;
- }
-
-done:
- if (p->state == TASK_WAKING)
- double_rq_unlock(src_rq, dest_rq);
-}
-
#define sched_up_down_migrate_auto_update 1
static void check_for_up_down_migrate_update(const struct cpumask *cpus)
{
@@ -3653,426 +1470,7 @@
update_up_down_migrate();
}
-/* Return cluster which can offer required capacity for group */
-static struct sched_cluster *best_cluster(struct related_thread_group *grp,
- u64 total_demand, bool group_boost)
-{
- struct sched_cluster *cluster = NULL;
-
- for_each_sched_cluster(cluster) {
- if (group_will_fit(cluster, grp, total_demand, group_boost))
- return cluster;
- }
-
- return sched_cluster[0];
-}
-
-static void _set_preferred_cluster(struct related_thread_group *grp)
-{
- struct task_struct *p;
- u64 combined_demand = 0;
- bool boost_on_big = sched_boost_policy() == SCHED_BOOST_ON_BIG;
- bool group_boost = false;
- u64 wallclock;
-
- if (list_empty(&grp->tasks))
- return;
-
- wallclock = sched_ktime_clock();
-
- /*
- * wakeup of two or more related tasks could race with each other and
- * could result in multiple calls to _set_preferred_cluster being issued
- * at same time. Avoid overhead in such cases of rechecking preferred
- * cluster
- */
- if (wallclock - grp->last_update < sched_ravg_window / 10)
- return;
-
- list_for_each_entry(p, &grp->tasks, grp_list) {
- if (boost_on_big && task_sched_boost(p)) {
- group_boost = true;
- break;
- }
-
- if (p->ravg.mark_start < wallclock -
- (sched_ravg_window * sched_ravg_hist_size))
- continue;
-
- combined_demand += p->ravg.demand;
-
- }
-
- grp->preferred_cluster = best_cluster(grp,
- combined_demand, group_boost);
- grp->last_update = sched_ktime_clock();
- trace_sched_set_preferred_cluster(grp, combined_demand);
-}
-
-void set_preferred_cluster(struct related_thread_group *grp)
-{
- raw_spin_lock(&grp->lock);
- _set_preferred_cluster(grp);
- raw_spin_unlock(&grp->lock);
-}
-
-#define ADD_TASK 0
-#define REM_TASK 1
-
-#define DEFAULT_CGROUP_COLOC_ID 1
-
-/*
- * Task's cpu usage is accounted in:
- * rq->curr/prev_runnable_sum, when its ->grp is NULL
- * grp->cpu_time[cpu]->curr/prev_runnable_sum, when its ->grp is !NULL
- *
- * Transfer task's cpu usage between those counters when transitioning between
- * groups
- */
-static void transfer_busy_time(struct rq *rq, struct related_thread_group *grp,
- struct task_struct *p, int event)
-{
- u64 wallclock;
- struct group_cpu_time *cpu_time;
- u64 *src_curr_runnable_sum, *dst_curr_runnable_sum;
- u64 *src_prev_runnable_sum, *dst_prev_runnable_sum;
- u64 *src_nt_curr_runnable_sum, *dst_nt_curr_runnable_sum;
- u64 *src_nt_prev_runnable_sum, *dst_nt_prev_runnable_sum;
- int migrate_type;
- int cpu = cpu_of(rq);
- bool new_task;
- int i;
-
- if (!sched_freq_aggregate)
- return;
-
- wallclock = sched_ktime_clock();
-
- update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
- update_task_ravg(p, rq, TASK_UPDATE, wallclock, 0);
- new_task = is_new_task(p);
-
- cpu_time = &rq->grp_time;
- if (event == ADD_TASK) {
- migrate_type = RQ_TO_GROUP;
-
- src_curr_runnable_sum = &rq->curr_runnable_sum;
- dst_curr_runnable_sum = &cpu_time->curr_runnable_sum;
- src_prev_runnable_sum = &rq->prev_runnable_sum;
- dst_prev_runnable_sum = &cpu_time->prev_runnable_sum;
-
- src_nt_curr_runnable_sum = &rq->nt_curr_runnable_sum;
- dst_nt_curr_runnable_sum = &cpu_time->nt_curr_runnable_sum;
- src_nt_prev_runnable_sum = &rq->nt_prev_runnable_sum;
- dst_nt_prev_runnable_sum = &cpu_time->nt_prev_runnable_sum;
-
- *src_curr_runnable_sum -= p->ravg.curr_window_cpu[cpu];
- *src_prev_runnable_sum -= p->ravg.prev_window_cpu[cpu];
- if (new_task) {
- *src_nt_curr_runnable_sum -=
- p->ravg.curr_window_cpu[cpu];
- *src_nt_prev_runnable_sum -=
- p->ravg.prev_window_cpu[cpu];
- }
-
- update_cluster_load_subtractions(p, cpu,
- rq->window_start, new_task);
-
- } else {
- migrate_type = GROUP_TO_RQ;
-
- src_curr_runnable_sum = &cpu_time->curr_runnable_sum;
- dst_curr_runnable_sum = &rq->curr_runnable_sum;
- src_prev_runnable_sum = &cpu_time->prev_runnable_sum;
- dst_prev_runnable_sum = &rq->prev_runnable_sum;
-
- src_nt_curr_runnable_sum = &cpu_time->nt_curr_runnable_sum;
- dst_nt_curr_runnable_sum = &rq->nt_curr_runnable_sum;
- src_nt_prev_runnable_sum = &cpu_time->nt_prev_runnable_sum;
- dst_nt_prev_runnable_sum = &rq->nt_prev_runnable_sum;
-
- *src_curr_runnable_sum -= p->ravg.curr_window;
- *src_prev_runnable_sum -= p->ravg.prev_window;
- if (new_task) {
- *src_nt_curr_runnable_sum -= p->ravg.curr_window;
- *src_nt_prev_runnable_sum -= p->ravg.prev_window;
- }
-
- /*
- * Need to reset curr/prev windows for all CPUs, not just the
- * ones in the same cluster. Since inter cluster migrations
- * did not result in the appropriate book keeping, the values
- * per CPU would be inaccurate.
- */
- for_each_possible_cpu(i) {
- p->ravg.curr_window_cpu[i] = 0;
- p->ravg.prev_window_cpu[i] = 0;
- }
- }
-
- *dst_curr_runnable_sum += p->ravg.curr_window;
- *dst_prev_runnable_sum += p->ravg.prev_window;
- if (new_task) {
- *dst_nt_curr_runnable_sum += p->ravg.curr_window;
- *dst_nt_prev_runnable_sum += p->ravg.prev_window;
- }
-
- /*
- * When a task enter or exits a group, it's curr and prev windows are
- * moved to a single CPU. This behavior might be sub-optimal in the
- * exit case, however, it saves us the overhead of handling inter
- * cluster migration fixups while the task is part of a related group.
- */
- p->ravg.curr_window_cpu[cpu] = p->ravg.curr_window;
- p->ravg.prev_window_cpu[cpu] = p->ravg.prev_window;
-
- trace_sched_migration_update_sum(p, migrate_type, rq);
-
- BUG_ON((s64)*src_curr_runnable_sum < 0);
- BUG_ON((s64)*src_prev_runnable_sum < 0);
- BUG_ON((s64)*src_nt_curr_runnable_sum < 0);
- BUG_ON((s64)*src_nt_prev_runnable_sum < 0);
-}
-
-static inline struct related_thread_group*
-lookup_related_thread_group(unsigned int group_id)
-{
- return related_thread_groups[group_id];
-}
-
-int alloc_related_thread_groups(void)
-{
- int i, ret;
- struct related_thread_group *grp;
-
- /* groupd_id = 0 is invalid as it's special id to remove group. */
- for (i = 1; i < MAX_NUM_CGROUP_COLOC_ID; i++) {
- grp = kzalloc(sizeof(*grp), GFP_NOWAIT);
- if (!grp) {
- ret = -ENOMEM;
- goto err;
- }
-
- grp->id = i;
- INIT_LIST_HEAD(&grp->tasks);
- INIT_LIST_HEAD(&grp->list);
- raw_spin_lock_init(&grp->lock);
-
- related_thread_groups[i] = grp;
- }
-
- return 0;
-
-err:
- for (i = 1; i < MAX_NUM_CGROUP_COLOC_ID; i++) {
- grp = lookup_related_thread_group(i);
- if (grp) {
- kfree(grp);
- related_thread_groups[i] = NULL;
- } else {
- break;
- }
- }
-
- return ret;
-}
-
-static void remove_task_from_group(struct task_struct *p)
-{
- struct related_thread_group *grp = p->grp;
- struct rq *rq;
- int empty_group = 1;
- struct rq_flags rf;
-
- raw_spin_lock(&grp->lock);
-
- rq = __task_rq_lock(p, &rf);
- transfer_busy_time(rq, p->grp, p, REM_TASK);
- list_del_init(&p->grp_list);
- rcu_assign_pointer(p->grp, NULL);
- __task_rq_unlock(rq, &rf);
-
-
- if (!list_empty(&grp->tasks)) {
- empty_group = 0;
- _set_preferred_cluster(grp);
- }
-
- raw_spin_unlock(&grp->lock);
-
- /* Reserved groups cannot be destroyed */
- if (empty_group && grp->id != DEFAULT_CGROUP_COLOC_ID)
- /*
- * We test whether grp->list is attached with list_empty()
- * hence re-init the list after deletion.
- */
- list_del_init(&grp->list);
-}
-
-static int
-add_task_to_group(struct task_struct *p, struct related_thread_group *grp)
-{
- struct rq *rq;
- struct rq_flags rf;
-
- raw_spin_lock(&grp->lock);
-
- /*
- * Change p->grp under rq->lock. Will prevent races with read-side
- * reference of p->grp in various hot-paths
- */
- rq = __task_rq_lock(p, &rf);
- transfer_busy_time(rq, grp, p, ADD_TASK);
- list_add(&p->grp_list, &grp->tasks);
- rcu_assign_pointer(p->grp, grp);
- __task_rq_unlock(rq, &rf);
-
- _set_preferred_cluster(grp);
-
- raw_spin_unlock(&grp->lock);
-
- return 0;
-}
-
-void add_new_task_to_grp(struct task_struct *new)
-{
- unsigned long flags;
- struct related_thread_group *grp;
- struct task_struct *leader = new->group_leader;
- unsigned int leader_grp_id = sched_get_group_id(leader);
-
- if (!sysctl_sched_enable_thread_grouping &&
- leader_grp_id != DEFAULT_CGROUP_COLOC_ID)
- return;
-
- if (thread_group_leader(new))
- return;
-
- if (leader_grp_id == DEFAULT_CGROUP_COLOC_ID) {
- if (!same_schedtune(new, leader))
- return;
- }
-
- write_lock_irqsave(&related_thread_group_lock, flags);
-
- rcu_read_lock();
- grp = task_related_thread_group(leader);
- rcu_read_unlock();
-
- /*
- * It's possible that someone already added the new task to the
- * group. A leader's thread group is updated prior to calling
- * this function. It's also possible that the leader has exited
- * the group. In either case, there is nothing else to do.
- */
- if (!grp || new->grp) {
- write_unlock_irqrestore(&related_thread_group_lock, flags);
- return;
- }
-
- raw_spin_lock(&grp->lock);
-
- rcu_assign_pointer(new->grp, grp);
- list_add(&new->grp_list, &grp->tasks);
-
- raw_spin_unlock(&grp->lock);
- write_unlock_irqrestore(&related_thread_group_lock, flags);
-}
-
-static int __sched_set_group_id(struct task_struct *p, unsigned int group_id)
-{
- int rc = 0;
- unsigned long flags;
- struct related_thread_group *grp = NULL;
-
- if (group_id >= MAX_NUM_CGROUP_COLOC_ID)
- return -EINVAL;
-
- raw_spin_lock_irqsave(&p->pi_lock, flags);
- write_lock(&related_thread_group_lock);
-
- /* Switching from one group to another directly is not permitted */
- if ((current != p && p->flags & PF_EXITING) ||
- (!p->grp && !group_id) ||
- (p->grp && group_id))
- goto done;
-
- if (!group_id) {
- remove_task_from_group(p);
- goto done;
- }
-
- grp = lookup_related_thread_group(group_id);
- if (list_empty(&grp->list))
- list_add(&grp->list, &active_related_thread_groups);
-
- rc = add_task_to_group(p, grp);
-done:
- write_unlock(&related_thread_group_lock);
- raw_spin_unlock_irqrestore(&p->pi_lock, flags);
-
- return rc;
-}
-
-int sched_set_group_id(struct task_struct *p, unsigned int group_id)
-{
- /* DEFAULT_CGROUP_COLOC_ID is a reserved id */
- if (group_id == DEFAULT_CGROUP_COLOC_ID)
- return -EINVAL;
-
- return __sched_set_group_id(p, group_id);
-}
-
-unsigned int sched_get_group_id(struct task_struct *p)
-{
- unsigned int group_id;
- struct related_thread_group *grp;
-
- rcu_read_lock();
- grp = task_related_thread_group(p);
- group_id = grp ? grp->id : 0;
- rcu_read_unlock();
-
- return group_id;
-}
-
-#if defined(CONFIG_SCHED_TUNE) && defined(CONFIG_CGROUP_SCHEDTUNE)
-/*
- * We create a default colocation group at boot. There is no need to
- * synchronize tasks between cgroups at creation time because the
- * correct cgroup hierarchy is not available at boot. Therefore cgroup
- * colocation is turned off by default even though the colocation group
- * itself has been allocated. Furthermore this colocation group cannot
- * be destroyted once it has been created. All of this has been as part
- * of runtime optimizations.
- *
- * The job of synchronizing tasks to the colocation group is done when
- * the colocation flag in the cgroup is turned on.
- */
-static int __init create_default_coloc_group(void)
-{
- struct related_thread_group *grp = NULL;
- unsigned long flags;
-
- grp = lookup_related_thread_group(DEFAULT_CGROUP_COLOC_ID);
- write_lock_irqsave(&related_thread_group_lock, flags);
- list_add(&grp->list, &active_related_thread_groups);
- write_unlock_irqrestore(&related_thread_group_lock, flags);
-
- update_freq_aggregate_threshold(MAX_FREQ_AGGR_THRESH);
- return 0;
-}
-late_initcall(create_default_coloc_group);
-
-int sync_cgroup_colocation(struct task_struct *p, bool insert)
-{
- unsigned int grp_id = insert ? DEFAULT_CGROUP_COLOC_ID : 0;
-
- return __sched_set_group_id(p, grp_id);
-}
-#endif
-
-static void update_cpu_cluster_capacity(const cpumask_t *cpus)
+void update_cpu_cluster_capacity(const cpumask_t *cpus)
{
int i;
struct sched_cluster *cluster;
@@ -4120,66 +1518,6 @@
update_cpu_cluster_capacity(cpus);
}
-static int cpufreq_notifier_policy(struct notifier_block *nb,
- unsigned long val, void *data)
-{
- struct cpufreq_policy *policy = (struct cpufreq_policy *)data;
- struct sched_cluster *cluster = NULL;
- struct cpumask policy_cluster = *policy->related_cpus;
- unsigned int orig_max_freq = 0;
- int i, j, update_capacity = 0;
-
- if (val != CPUFREQ_NOTIFY && val != CPUFREQ_REMOVE_POLICY &&
- val != CPUFREQ_CREATE_POLICY)
- return 0;
-
- if (val == CPUFREQ_REMOVE_POLICY || val == CPUFREQ_CREATE_POLICY) {
- update_min_max_capacity();
- return 0;
- }
-
- max_possible_freq = max(max_possible_freq, policy->cpuinfo.max_freq);
- if (min_max_freq == 1)
- min_max_freq = UINT_MAX;
- min_max_freq = min(min_max_freq, policy->cpuinfo.max_freq);
- BUG_ON(!min_max_freq);
- BUG_ON(!policy->max);
-
- for_each_cpu(i, &policy_cluster) {
- cluster = cpu_rq(i)->cluster;
- cpumask_andnot(&policy_cluster, &policy_cluster,
- &cluster->cpus);
-
- orig_max_freq = cluster->max_freq;
- cluster->min_freq = policy->min;
- cluster->max_freq = policy->max;
- cluster->cur_freq = policy->cur;
-
- if (!cluster->freq_init_done) {
- mutex_lock(&cluster_lock);
- for_each_cpu(j, &cluster->cpus)
- cpumask_copy(&cpu_rq(j)->freq_domain_cpumask,
- policy->related_cpus);
- cluster->max_possible_freq = policy->cpuinfo.max_freq;
- cluster->max_possible_capacity =
- compute_max_possible_capacity(cluster);
- cluster->freq_init_done = true;
-
- sort_clusters();
- update_all_clusters_stats();
- mutex_unlock(&cluster_lock);
- continue;
- }
-
- update_capacity += (orig_max_freq != cluster->max_freq);
- }
-
- if (update_capacity)
- update_cpu_cluster_capacity(policy->related_cpus);
-
- return 0;
-}
-
static int cpufreq_notifier_trans(struct notifier_block *nb,
unsigned long val, void *data)
{
@@ -4232,10 +1570,6 @@
return 0;
}
-static struct notifier_block notifier_policy_block = {
- .notifier_call = cpufreq_notifier_policy
-};
-
static struct notifier_block notifier_trans_block = {
.notifier_call = cpufreq_notifier_trans
};
@@ -4251,14 +1585,8 @@
static int register_sched_callback(void)
{
- int ret;
-
- ret = cpufreq_register_notifier(¬ifier_policy_block,
- CPUFREQ_POLICY_NOTIFIER);
-
- if (!ret)
- ret = cpufreq_register_notifier(¬ifier_trans_block,
- CPUFREQ_TRANSITION_NOTIFIER);
+ cpufreq_register_notifier(¬ifier_trans_block,
+ CPUFREQ_TRANSITION_NOTIFIER);
register_cpu_pwr_stats_ready_notifier(¬ifier_pwr_stats_ready);
@@ -4273,49 +1601,6 @@
*/
core_initcall(register_sched_callback);
-int update_preferred_cluster(struct related_thread_group *grp,
- struct task_struct *p, u32 old_load)
-{
- u32 new_load = task_load(p);
-
- if (!grp)
- return 0;
-
- /*
- * Update if task's load has changed significantly or a complete window
- * has passed since we last updated preference
- */
- if (abs(new_load - old_load) > sched_ravg_window / 4 ||
- sched_ktime_clock() - grp->last_update > sched_ravg_window)
- return 1;
-
- return 0;
-}
-
-bool early_detection_notify(struct rq *rq, u64 wallclock)
-{
- struct task_struct *p;
- int loop_max = 10;
-
- if (sched_boost_policy() == SCHED_BOOST_NONE || !rq->cfs.h_nr_running)
- return 0;
-
- rq->ed_task = NULL;
- list_for_each_entry(p, &rq->cfs_tasks, se.group_node) {
- if (!loop_max)
- break;
-
- if (wallclock - p->last_wake_ts >= EARLY_DETECTION_DURATION) {
- rq->ed_task = p;
- return 1;
- }
-
- loop_max--;
- }
-
- return 0;
-}
-
void update_avg_burst(struct task_struct *p)
{
update_avg(&p->ravg.avg_burst, p->ravg.curr_burst);
diff --git a/kernel/sched/idle_task.c b/kernel/sched/idle_task.c
index 13c8818..b852cbe 100644
--- a/kernel/sched/idle_task.c
+++ b/kernel/sched/idle_task.c
@@ -78,7 +78,7 @@
{
}
-#ifdef CONFIG_SCHED_HMP
+#ifdef CONFIG_SCHED_WALT
static void
fixup_hmp_sched_stats_idle(struct rq *rq, struct task_struct *p,
u32 new_task_load, u32 new_pred_demand)
@@ -114,7 +114,7 @@
.prio_changed = prio_changed_idle,
.switched_to = switched_to_idle,
.update_curr = update_curr_idle,
-#ifdef CONFIG_SCHED_HMP
+#ifdef CONFIG_SCHED_WALT
.fixup_hmp_sched_stats = fixup_hmp_sched_stats_idle,
#endif
};
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 709f719..2703e0d 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -4,12 +4,13 @@
*/
#include "sched.h"
+#include "walt.h"
#include <linux/slab.h>
#include <linux/irq_work.h>
#include <trace/events/sched.h>
-#ifdef CONFIG_SCHED_HMP
+#ifdef CONFIG_SCHED_WALT
static void
inc_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p)
@@ -37,6 +38,7 @@
#ifdef CONFIG_SMP
static int find_lowest_rq(struct task_struct *task);
+#ifdef CONFIG_SCHED_HMP
static int
select_task_rq_rt_hmp(struct task_struct *p, int cpu, int sd_flag, int flags)
{
@@ -50,8 +52,9 @@
return cpu;
}
+#endif /* CONFIG_SCHED_HMP */
#endif /* CONFIG_SMP */
-#else /* CONFIG_SCHED_HMP */
+#else /* CONFIG_SCHED_WALT */
static inline void
inc_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p) { }
@@ -1527,9 +1530,10 @@
* This test is optimistic, if we get it wrong the load-balancer
* will have to sort it out.
*/
- if (curr && unlikely(rt_task(curr)) &&
- (tsk_nr_cpus_allowed(curr) < 2 ||
- curr->prio <= p->prio)) {
+ if (energy_aware() ||
+ (curr && unlikely(rt_task(curr)) &&
+ (tsk_nr_cpus_allowed(curr) < 2 ||
+ curr->prio <= p->prio))) {
int target = find_lowest_rq(p);
/*
@@ -1820,12 +1824,35 @@
}
#endif /* CONFIG_SCHED_HMP */
+static inline unsigned long task_util(struct task_struct *p)
+{
+#ifdef CONFIG_SCHED_WALT
+ if (!walt_disabled && sysctl_sched_use_walt_task_util) {
+ u64 demand = p->ravg.demand;
+
+ return (demand << 10) / sched_ravg_window;
+ }
+#endif
+ return p->se.avg.util_avg;
+}
+
static int find_lowest_rq(struct task_struct *task)
{
struct sched_domain *sd;
+ struct sched_group *sg, *sg_target;
struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
int this_cpu = smp_processor_id();
- int cpu = task_cpu(task);
+ int cpu, best_cpu;
+ struct cpumask search_cpu, backup_search_cpu;
+ unsigned long cpu_capacity;
+ unsigned long best_capacity;
+ unsigned long util, best_cpu_util = ULONG_MAX;
+ int best_cpu_idle_idx = INT_MAX;
+ int cpu_idle_idx = -1;
+ long new_util_cum;
+ int max_spare_cap_cpu = -1;
+ long max_spare_cap = -LONG_MAX;
+ bool placement_boost;
#ifdef CONFIG_SCHED_HMP
return find_lowest_rq_hmp(task);
@@ -1841,6 +1868,117 @@
if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
return -1; /* No targets found */
+ if (energy_aware() && sysctl_sched_is_big_little) {
+ sg_target = NULL;
+ best_cpu = -1;
+
+ /*
+ * Since this code is inside sched_is_big_little, we are going
+ * to assume that boost policy is SCHED_BOOST_ON_BIG
+ */
+ placement_boost = sched_boost() == FULL_THROTTLE_BOOST;
+ best_capacity = placement_boost ? 0 : ULONG_MAX;
+
+ rcu_read_lock();
+ sd = rcu_dereference(per_cpu(sd_ea, task_cpu(task)));
+ if (!sd) {
+ rcu_read_unlock();
+ goto noea;
+ }
+
+ sg = sd->groups;
+ do {
+ cpu = group_first_cpu(sg);
+ cpu_capacity = capacity_orig_of(cpu);
+
+ if (unlikely(placement_boost)) {
+ if (cpu_capacity > best_capacity) {
+ best_capacity = cpu_capacity;
+ sg_target = sg;
+ }
+ } else {
+ if (cpu_capacity < best_capacity) {
+ best_capacity = cpu_capacity;
+ sg_target = sg;
+ }
+ }
+ } while (sg = sg->next, sg != sd->groups);
+ rcu_read_unlock();
+
+ cpumask_and(&search_cpu, lowest_mask,
+ sched_group_cpus(sg_target));
+ cpumask_copy(&backup_search_cpu, lowest_mask);
+ cpumask_andnot(&backup_search_cpu, &backup_search_cpu,
+ &search_cpu);
+
+retry:
+ for_each_cpu(cpu, &search_cpu) {
+ /*
+ * Don't use capcity_curr_of() since it will
+ * double count rt task load.
+ */
+ util = cpu_util(cpu);
+ if (!cpu_overutilized(cpu)) {
+ if (cpu_isolated(cpu))
+ continue;
+
+ if (sched_cpu_high_irqload(cpu))
+ continue;
+
+ new_util_cum = cpu_util_cum(cpu, 0);
+
+ if (!task_in_cum_window_demand(cpu_rq(cpu),
+ task))
+ new_util_cum += task_util(task);
+
+ trace_sched_cpu_util(task, cpu, task_util(task),
+ 0, new_util_cum, 0);
+
+ if (sysctl_sched_cstate_aware)
+ cpu_idle_idx =
+ (cpu == smp_processor_id() ||
+ cpu_rq(cpu)->nr_running) ?
+ -1 :
+ idle_get_state_idx(cpu_rq(cpu));
+
+ if (add_capacity_margin(new_util_cum) <
+ capacity_curr_of(cpu)) {
+ if (cpu_idle_idx < best_cpu_idle_idx ||
+ (best_cpu != task_cpu(task) &&
+ (best_cpu_idle_idx ==
+ cpu_idle_idx &&
+ best_cpu_util > util))) {
+ best_cpu_util = util;
+ best_cpu = cpu;
+ best_cpu_idle_idx =
+ cpu_idle_idx;
+ }
+ } else {
+ long spare_cap = capacity_of(cpu) -
+ util;
+
+ if (spare_cap > 0 &&
+ max_spare_cap < spare_cap) {
+ max_spare_cap_cpu = cpu;
+ max_spare_cap = spare_cap;
+ }
+ }
+ }
+ }
+
+ if (best_cpu != -1) {
+ return best_cpu;
+ } else if (max_spare_cap_cpu != -1) {
+ return max_spare_cap_cpu;
+ } else if (!cpumask_empty(&backup_search_cpu)) {
+ cpumask_copy(&search_cpu, &backup_search_cpu);
+ cpumask_clear(&backup_search_cpu);
+ goto retry;
+ }
+ }
+
+noea:
+ cpu = task_cpu(task);
/*
* At this point we have built a mask of cpus representing the
* lowest priority tasks in the system. Now we want to elect
@@ -2563,7 +2701,7 @@
.switched_to = switched_to_rt,
.update_curr = update_curr_rt,
-#ifdef CONFIG_SCHED_HMP
+#ifdef CONFIG_SCHED_WALT
.fixup_hmp_sched_stats = fixup_hmp_sched_stats_rt,
#endif
};
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 5e25011..29b6e3d 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -25,9 +25,8 @@
struct rq;
struct cpuidle_state;
-#ifdef CONFIG_SCHED_HMP
-#define NUM_TRACKED_WINDOWS 2
-#define NUM_LOAD_INDICES 1000
+#ifdef CONFIG_SCHED_WALT
+extern unsigned int sched_ravg_window;
struct hmp_sched_stats {
int nr_big_tasks;
@@ -35,10 +34,9 @@
u64 pred_demands_sum;
};
-struct load_subtractions {
- u64 window_start;
- u64 subs;
- u64 new_subs;
+struct cpu_cycle {
+ u64 cycles;
+ u64 time;
};
struct group_cpu_time {
@@ -48,6 +46,15 @@
u64 nt_prev_runnable_sum;
};
+struct load_subtractions {
+ u64 window_start;
+ u64 subs;
+ u64 new_subs;
+};
+
+#define NUM_TRACKED_WINDOWS 2
+#define NUM_LOAD_INDICES 1000
+
struct sched_cluster {
raw_spinlock_t load_lock;
struct list_head list;
@@ -72,15 +79,13 @@
unsigned int static_cluster_pwr_cost;
int notifier_sent;
bool wake_up_idle;
-};
-
-struct cpu_cycle {
- u64 cycles;
- u64 time;
+ u64 aggr_grp_load;
};
extern unsigned int sched_disable_window_stats;
-#endif /* CONFIG_SCHED_HMP */
+
+extern struct timer_list sched_grp_timer;
+#endif /* CONFIG_SCHED_WALT */
/* task_struct::on_rq states: */
@@ -507,7 +512,7 @@
#endif
#ifdef CONFIG_CFS_BANDWIDTH
-#ifdef CONFIG_SCHED_HMP
+#ifdef CONFIG_SCHED_WALT
struct hmp_sched_stats hmp_stats;
#endif
@@ -764,13 +769,14 @@
u64 max_idle_balance_cost;
#endif
-#ifdef CONFIG_SCHED_HMP
+#ifdef CONFIG_SCHED_WALT
struct sched_cluster *cluster;
struct cpumask freq_domain_cpumask;
struct hmp_sched_stats hmp_stats;
int cstate, wakeup_latency, wakeup_energy;
u64 window_start;
+ s64 cum_window_start;
u64 load_reported_window;
unsigned long hmp_flags;
@@ -786,6 +792,7 @@
u64 prev_runnable_sum;
u64 nt_curr_runnable_sum;
u64 nt_prev_runnable_sum;
+ u64 cum_window_demand;
struct group_cpu_time grp_time;
struct load_subtractions load_subs[NUM_TRACKED_WINDOWS];
DECLARE_BITMAP_ARRAY(top_tasks_bitmap,
@@ -794,6 +801,7 @@
u8 curr_table;
int prev_top;
int curr_top;
+ struct irq_work irq_work;
#endif
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
@@ -1421,7 +1429,7 @@
#ifdef CONFIG_FAIR_GROUP_SCHED
void (*task_change_group) (struct task_struct *p, int type);
#endif
-#ifdef CONFIG_SCHED_HMP
+#ifdef CONFIG_SCHED_WALT
void (*fixup_hmp_sched_stats)(struct rq *rq, struct task_struct *p,
u32 new_task_load, u32 new_pred_demand);
#endif
@@ -1458,6 +1466,8 @@
extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask);
+bool cpu_overutilized(int cpu);
+
#endif
#ifdef CONFIG_CPU_IDLE
@@ -1702,7 +1712,6 @@
}
extern unsigned int sysctl_sched_use_walt_cpu_util;
-extern unsigned int walt_ravg_window;
extern unsigned int walt_disabled;
/*
@@ -1733,13 +1742,14 @@
*/
static inline unsigned long __cpu_util(int cpu, int delta)
{
- unsigned long util = cpu_rq(cpu)->cfs.avg.util_avg;
+ u64 util = cpu_rq(cpu)->cfs.avg.util_avg;
unsigned long capacity = capacity_orig_of(cpu);
#ifdef CONFIG_SCHED_WALT
if (!walt_disabled && sysctl_sched_use_walt_cpu_util) {
- util = cpu_rq(cpu)->prev_runnable_sum << SCHED_CAPACITY_SHIFT;
- do_div(util, walt_ravg_window);
+ util = cpu_rq(cpu)->hmp_stats.cumulative_runnable_avg;
+ util = div64_u64(util,
+ sched_ravg_window >> SCHED_CAPACITY_SHIFT);
}
#endif
delta += util;
@@ -1754,11 +1764,76 @@
return __cpu_util(cpu, 0);
}
+struct sched_walt_cpu_load {
+ unsigned long prev_window_util;
+ unsigned long nl;
+ unsigned long pl;
+};
+
+static inline unsigned long cpu_util_cum(int cpu, int delta)
+{
+ u64 util = cpu_rq(cpu)->cfs.avg.util_avg;
+ unsigned long capacity = capacity_orig_of(cpu);
+
+#ifdef CONFIG_SCHED_WALT
+ if (!walt_disabled && sysctl_sched_use_walt_cpu_util) {
+ util = cpu_rq(cpu)->cum_window_demand;
+ util = div64_u64(util,
+ sched_ravg_window >> SCHED_CAPACITY_SHIFT);
+ }
#endif
+ delta += util;
+ if (delta < 0)
+ return 0;
+
+ return (delta >= capacity) ? capacity : delta;
+}
+
+#ifdef CONFIG_SCHED_WALT
+u64 freq_policy_load(struct rq *rq);
+#endif
+
+static inline unsigned long
+cpu_util_freq(int cpu, struct sched_walt_cpu_load *walt_load)
+{
+ struct rq *rq = cpu_rq(cpu);
+ u64 util = rq->cfs.avg.util_avg;
+ unsigned long capacity = capacity_orig_of(cpu);
+
+#ifdef CONFIG_SCHED_WALT
+ if (!walt_disabled && sysctl_sched_use_walt_cpu_util) {
+
+ util = freq_policy_load(rq);
+ util = div64_u64(util,
+ sched_ravg_window >> SCHED_CAPACITY_SHIFT);
+
+ if (walt_load) {
+ u64 nl = cpu_rq(cpu)->nt_prev_runnable_sum +
+ rq->grp_time.nt_prev_runnable_sum;
+
+ nl = div64_u64(nl, sched_ravg_window >>
+ SCHED_CAPACITY_SHIFT);
+
+ walt_load->prev_window_util = util;
+ walt_load->nl = nl;
+ }
+ }
+#endif
+ return (util >= capacity) ? capacity : util;
+}
+#endif
+
+extern unsigned int capacity_margin_freq;
+
+static inline unsigned long add_capacity_margin(unsigned long cpu_capacity)
+{
+ cpu_capacity = cpu_capacity * capacity_margin_freq;
+ cpu_capacity /= SCHED_CAPACITY_SCALE;
+ return cpu_capacity;
+}
#ifdef CONFIG_CPU_FREQ_GOV_SCHED
#define capacity_max SCHED_CAPACITY_SCALE
-extern unsigned int capacity_margin;
extern struct static_key __sched_freq;
static inline bool sched_freq(void)
@@ -2144,7 +2219,7 @@
{
struct update_util_data *data;
-#ifdef CONFIG_SCHED_HMP
+#ifdef CONFIG_SCHED_WALT
/*
* Skip if we've already reported, but not if this is an inter-cluster
* migration
@@ -2156,9 +2231,10 @@
rq->load_reported_window = rq->window_start;
#endif
- data = rcu_dereference_sched(*this_cpu_ptr(&cpufreq_update_util_data));
+ data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data,
+ cpu_of(rq)));
if (data)
- data->func(data, rq_clock(rq), flags);
+ data->func(data, sched_clock(), flags);
}
static inline void cpufreq_update_this_cpu(struct rq *rq, unsigned int flags)
@@ -2179,7 +2255,7 @@
#define arch_scale_freq_invariant() (false)
#endif
-#ifdef CONFIG_SCHED_HMP
+#ifdef CONFIG_SCHED_WALT
static inline int cluster_first_cpu(struct sched_cluster *cluster)
{
@@ -2217,11 +2293,9 @@
#define IRQLOAD_MIGRATION 3
extern struct mutex policy_mutex;
-extern unsigned int sched_ravg_window;
extern unsigned int sched_disable_window_stats;
extern unsigned int max_possible_freq;
extern unsigned int min_max_freq;
-extern unsigned int pct_task_load(struct task_struct *p);
extern unsigned int max_possible_efficiency;
extern unsigned int min_possible_efficiency;
extern unsigned int max_capacity;
@@ -2245,37 +2319,14 @@
extern unsigned int __read_mostly sysctl_sched_spill_nr_run;
extern unsigned int __read_mostly sched_load_granule;
-extern void init_new_task_load(struct task_struct *p, bool idle_task);
extern u64 sched_ktime_clock(void);
-extern int got_boost_kick(void);
extern int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb);
-extern void update_task_ravg(struct task_struct *p, struct rq *rq, int event,
- u64 wallclock, u64 irqtime);
-extern bool early_detection_notify(struct rq *rq, u64 wallclock);
-extern void clear_ed_task(struct task_struct *p, struct rq *rq);
-extern void fixup_busy_time(struct task_struct *p, int new_cpu);
-extern void clear_boost_kick(int cpu);
-extern void clear_hmp_request(int cpu);
-extern void mark_task_starting(struct task_struct *p);
-extern void set_window_start(struct rq *rq);
-extern void update_cluster_topology(void);
-extern void note_task_waking(struct task_struct *p, u64 wallclock);
-extern void set_task_last_switch_out(struct task_struct *p, u64 wallclock);
-extern void init_clusters(void);
extern void reset_cpu_hmp_stats(int cpu, int reset_cra);
-extern unsigned int max_task_load(void);
-extern void sched_account_irqtime(int cpu, struct task_struct *curr,
- u64 delta, u64 wallclock);
-extern void sched_account_irqstart(int cpu, struct task_struct *curr,
- u64 wallclock);
-extern unsigned int cpu_temp(int cpu);
-extern unsigned int nr_eligible_big_tasks(int cpu);
extern int update_preferred_cluster(struct related_thread_group *grp,
struct task_struct *p, u32 old_load);
extern void set_preferred_cluster(struct related_thread_group *grp);
extern void add_new_task_to_grp(struct task_struct *new);
extern unsigned int update_freq_aggregate_threshold(unsigned int threshold);
-extern void update_avg_burst(struct task_struct *p);
extern void update_avg(u64 *avg, u64 sample);
#define NO_BOOST 0
@@ -2283,11 +2334,6 @@
#define CONSERVATIVE_BOOST 2
#define RESTRAINED_BOOST 3
-static inline struct sched_cluster *cpu_cluster(int cpu)
-{
- return cpu_rq(cpu)->cluster;
-}
-
static inline int cpu_capacity(int cpu)
{
return cpu_rq(cpu)->cluster->capacity;
@@ -2308,11 +2354,6 @@
return cpu_rq(cpu)->cluster->efficiency;
}
-static inline unsigned int cpu_cur_freq(int cpu)
-{
- return cpu_rq(cpu)->cluster->cur_freq;
-}
-
static inline unsigned int cpu_min_freq(int cpu)
{
return cpu_rq(cpu)->cluster->min_freq;
@@ -2338,9 +2379,60 @@
return cpu_rq(cpu)->cluster->max_possible_freq;
}
-static inline int same_cluster(int src_cpu, int dst_cpu)
+/* Keep track of max/min capacity possible across CPUs "currently" */
+static inline void __update_min_max_capacity(void)
{
- return cpu_rq(src_cpu)->cluster == cpu_rq(dst_cpu)->cluster;
+ int i;
+ int max_cap = 0, min_cap = INT_MAX;
+
+ for_each_online_cpu(i) {
+ max_cap = max(max_cap, cpu_capacity(i));
+ min_cap = min(min_cap, cpu_capacity(i));
+ }
+
+ max_capacity = max_cap;
+ min_capacity = min_cap;
+}
+
+/*
+ * Return load_scale_factor of a cpu in reference to "most" efficient cpu, so
+ * that "most" efficient cpu gets a load_scale_factor of 1
+ */
+static inline unsigned long
+load_scale_cpu_efficiency(struct sched_cluster *cluster)
+{
+ return DIV_ROUND_UP(1024 * max_possible_efficiency,
+ cluster->efficiency);
+}
+
+/*
+ * Return load_scale_factor of a cpu in reference to cpu with best max_freq
+ * (max_possible_freq), so that one with best max_freq gets a load_scale_factor
+ * of 1.
+ */
+static inline unsigned long load_scale_cpu_freq(struct sched_cluster *cluster)
+{
+ return DIV_ROUND_UP(1024 * max_possible_freq,
+ cluster_max_freq(cluster));
+}
+
+static inline int compute_load_scale_factor(struct sched_cluster *cluster)
+{
+ int load_scale = 1024;
+
+ /*
+ * load_scale_factor accounts for the fact that task load
+ * is in reference to "best" performing cpu. Task's load will need to be
+ * scaled (up) by a factor to determine suitability to be placed on a
+ * (little) cpu.
+ */
+ load_scale *= load_scale_cpu_efficiency(cluster);
+ load_scale >>= 10;
+
+ load_scale *= load_scale_cpu_freq(cluster);
+ load_scale >>= 10;
+
+ return load_scale;
}
static inline int cpu_max_power_cost(int cpu)
@@ -2353,11 +2445,6 @@
return cpu_rq(cpu)->cluster->min_power_cost;
}
-static inline u32 cpu_cycles_to_freq(u64 cycles, u32 period)
-{
- return div64_u64(cycles, period);
-}
-
static inline bool hmp_capable(void)
{
return max_possible_capacity != min_max_possible_capacity;
@@ -2380,91 +2467,49 @@
return task_load;
}
+/*
+ * Return 'capacity' of a cpu in reference to "least" efficient cpu, such that
+ * least efficient cpu gets capacity of 1024
+ */
+static unsigned long
+capacity_scale_cpu_efficiency(struct sched_cluster *cluster)
+{
+ return (1024 * cluster->efficiency) / min_possible_efficiency;
+}
+
+/*
+ * Return 'capacity' of a cpu in reference to cpu with lowest max_freq
+ * (min_max_freq), such that one with lowest max_freq gets capacity of 1024.
+ */
+static unsigned long capacity_scale_cpu_freq(struct sched_cluster *cluster)
+{
+ return (1024 * cluster_max_freq(cluster)) / min_max_freq;
+}
+
+static inline int compute_capacity(struct sched_cluster *cluster)
+{
+ int capacity = 1024;
+
+ capacity *= capacity_scale_cpu_efficiency(cluster);
+ capacity >>= 10;
+
+ capacity *= capacity_scale_cpu_freq(cluster);
+ capacity >>= 10;
+
+ return capacity;
+}
+
static inline unsigned int task_load(struct task_struct *p)
{
return p->ravg.demand;
}
-static inline void
-inc_cumulative_runnable_avg(struct hmp_sched_stats *stats,
- struct task_struct *p)
-{
- u32 task_load;
-
- if (sched_disable_window_stats)
- return;
-
- task_load = sched_disable_window_stats ? 0 : p->ravg.demand;
-
- stats->cumulative_runnable_avg += task_load;
- stats->pred_demands_sum += p->ravg.pred_demand;
-}
-
-static inline void
-dec_cumulative_runnable_avg(struct hmp_sched_stats *stats,
- struct task_struct *p)
-{
- u32 task_load;
-
- if (sched_disable_window_stats)
- return;
-
- task_load = sched_disable_window_stats ? 0 : p->ravg.demand;
-
- stats->cumulative_runnable_avg -= task_load;
-
- BUG_ON((s64)stats->cumulative_runnable_avg < 0);
-
- stats->pred_demands_sum -= p->ravg.pred_demand;
- BUG_ON((s64)stats->pred_demands_sum < 0);
-}
-
-static inline void
-fixup_cumulative_runnable_avg(struct hmp_sched_stats *stats,
- struct task_struct *p, s64 task_load_delta,
- s64 pred_demand_delta)
-{
- if (sched_disable_window_stats)
- return;
-
- stats->cumulative_runnable_avg += task_load_delta;
- BUG_ON((s64)stats->cumulative_runnable_avg < 0);
-
- stats->pred_demands_sum += pred_demand_delta;
- BUG_ON((s64)stats->pred_demands_sum < 0);
-}
-
#define pct_to_real(tunable) \
(div64_u64((u64)tunable * (u64)max_task_load(), 100))
#define real_to_pct(tunable) \
(div64_u64((u64)tunable * (u64)100, (u64)max_task_load()))
-#define SCHED_HIGH_IRQ_TIMEOUT 3
-static inline u64 sched_irqload(int cpu)
-{
- struct rq *rq = cpu_rq(cpu);
- s64 delta;
-
- delta = get_jiffies_64() - rq->irqload_ts;
- /*
- * Current context can be preempted by irq and rq->irqload_ts can be
- * updated by irq context so that delta can be negative.
- * But this is okay and we can safely return as this means there
- * was recent irq occurrence.
- */
-
- if (delta < SCHED_HIGH_IRQ_TIMEOUT)
- return rq->avg_irqload;
- else
- return 0;
-}
-
-static inline int sched_cpu_high_irqload(int cpu)
-{
- return sched_irqload(cpu) >= sysctl_sched_cpu_high_irqload;
-}
-
static inline bool task_in_related_thread_group(struct task_struct *p)
{
return !!(rcu_access_pointer(p->grp) != NULL);
@@ -2478,12 +2523,6 @@
#define PRED_DEMAND_DELTA ((s64)new_pred_demand - p->ravg.pred_demand)
-extern void
-check_for_freq_change(struct rq *rq, bool check_pred, bool check_groups);
-
-extern void notify_migration(int src_cpu, int dest_cpu,
- bool src_cpu_dead, struct task_struct *p);
-
/* Is frequency of two cpus synchronized with each other? */
static inline int same_freq_domain(int src_cpu, int dst_cpu)
{
@@ -2498,28 +2537,6 @@
#define BOOST_KICK 0
#define CPU_RESERVED 1
-static inline int is_reserved(int cpu)
-{
- struct rq *rq = cpu_rq(cpu);
-
- return test_bit(CPU_RESERVED, &rq->hmp_flags);
-}
-
-static inline int mark_reserved(int cpu)
-{
- struct rq *rq = cpu_rq(cpu);
-
- /* Name boost_flags as hmp_flags? */
- return test_and_set_bit(CPU_RESERVED, &rq->hmp_flags);
-}
-
-static inline void clear_reserved(int cpu)
-{
- struct rq *rq = cpu_rq(cpu);
-
- clear_bit(CPU_RESERVED, &rq->hmp_flags);
-}
-
static inline u64 cpu_cravg_sync(int cpu, int sync)
{
struct rq *rq = cpu_rq(cpu);
@@ -2542,43 +2559,24 @@
return load;
}
-static inline bool is_short_burst_task(struct task_struct *p)
-{
- return p->ravg.avg_burst < sysctl_sched_short_burst &&
- p->ravg.avg_sleep_time > sysctl_sched_short_sleep;
-}
-
-extern void check_for_migration(struct rq *rq, struct task_struct *p);
-extern void pre_big_task_count_change(const struct cpumask *cpus);
-extern void post_big_task_count_change(const struct cpumask *cpus);
-extern void set_hmp_defaults(void);
extern int power_delta_exceeded(unsigned int cpu_cost, unsigned int base_cost);
-extern unsigned int power_cost(int cpu, u64 demand);
extern void reset_all_window_stats(u64 window_start, unsigned int window_size);
extern int sched_boost(void);
extern int task_load_will_fit(struct task_struct *p, u64 task_load, int cpu,
enum sched_boost_policy boost_policy);
-extern enum sched_boost_policy sched_boost_policy(void);
extern int task_will_fit(struct task_struct *p, int cpu);
extern u64 cpu_load(int cpu);
extern u64 cpu_load_sync(int cpu, int sync);
extern int preferred_cluster(struct sched_cluster *cluster,
struct task_struct *p);
-extern void inc_nr_big_task(struct hmp_sched_stats *stats,
- struct task_struct *p);
-extern void dec_nr_big_task(struct hmp_sched_stats *stats,
- struct task_struct *p);
extern void inc_rq_hmp_stats(struct rq *rq,
struct task_struct *p, int change_cra);
extern void dec_rq_hmp_stats(struct rq *rq,
struct task_struct *p, int change_cra);
extern void reset_hmp_stats(struct hmp_sched_stats *stats, int reset_cra);
-extern int is_big_task(struct task_struct *p);
extern int upmigrate_discouraged(struct task_struct *p);
extern struct sched_cluster *rq_cluster(struct rq *rq);
extern int nr_big_tasks(struct rq *rq);
-extern void fixup_nr_big_tasks(struct hmp_sched_stats *stats,
- struct task_struct *p, s64 delta);
extern void reset_task_stats(struct task_struct *p);
extern void reset_cfs_rq_hmp_stats(int cpu, int reset_cra);
extern void inc_hmp_sched_stats_fair(struct rq *rq,
@@ -2587,7 +2585,6 @@
struct cftype *cft);
extern int cpu_upmigrate_discourage_write_u64(struct cgroup_subsys_state *css,
struct cftype *cft, u64 upmigrate_discourage);
-extern void sched_boost_parse_dt(void);
extern void clear_top_tasks_bitmap(unsigned long *bitmap);
#if defined(CONFIG_SCHED_TUNE) && defined(CONFIG_CGROUP_SCHEDTUNE)
@@ -2617,57 +2614,85 @@
extern unsigned long all_cluster_ids[];
-#else /* CONFIG_SCHED_HMP */
+extern void check_for_migration(struct rq *rq, struct task_struct *p);
+
+static inline int is_reserved(int cpu)
+{
+ struct rq *rq = cpu_rq(cpu);
+
+ return test_bit(CPU_RESERVED, &rq->hmp_flags);
+}
+
+static inline int mark_reserved(int cpu)
+{
+ struct rq *rq = cpu_rq(cpu);
+
+ return test_and_set_bit(CPU_RESERVED, &rq->hmp_flags);
+}
+
+static inline void clear_reserved(int cpu)
+{
+ struct rq *rq = cpu_rq(cpu);
+
+ clear_bit(CPU_RESERVED, &rq->hmp_flags);
+}
+
+static inline bool
+__task_in_cum_window_demand(struct rq *rq, struct task_struct *p)
+{
+ return (p->on_rq || p->last_sleep_ts >= rq->window_start);
+}
+
+static inline bool
+task_in_cum_window_demand(struct rq *rq, struct task_struct *p)
+{
+ return cpu_of(rq) == task_cpu(p) && __task_in_cum_window_demand(rq, p);
+}
+
+static inline void
+dec_cum_window_demand(struct rq *rq, struct task_struct *p)
+{
+ rq->cum_window_demand -= p->ravg.demand;
+ WARN_ON_ONCE(rq->cum_window_demand < 0);
+}
+
+static inline void
+inc_cum_window_demand(struct rq *rq, struct task_struct *p, s64 delta)
+{
+ rq->cum_window_demand += delta;
+}
+
+extern void update_cpu_cluster_capacity(const cpumask_t *cpus);
+
+extern unsigned long thermal_cap(int cpu);
+
+extern void clear_hmp_request(int cpu);
+
+extern int got_boost_kick(void);
+extern void clear_boost_kick(int cpu);
+extern enum sched_boost_policy sched_boost_policy(void);
+extern void sched_boost_parse_dt(void);
+extern void clear_ed_task(struct task_struct *p, struct rq *rq);
+extern bool early_detection_notify(struct rq *rq, u64 wallclock);
+
+#else /* CONFIG_SCHED_WALT */
struct hmp_sched_stats;
struct related_thread_group;
struct sched_cluster;
-static inline enum sched_boost_policy sched_boost_policy(void)
-{
- return SCHED_BOOST_NONE;
-}
-
static inline bool task_sched_boost(struct task_struct *p)
{
return true;
}
-static inline int got_boost_kick(void)
-{
- return 0;
-}
-
-static inline void update_task_ravg(struct task_struct *p, struct rq *rq,
- int event, u64 wallclock, u64 irqtime) { }
-
-static inline bool early_detection_notify(struct rq *rq, u64 wallclock)
-{
- return 0;
-}
-
-static inline void clear_ed_task(struct task_struct *p, struct rq *rq) { }
-static inline void fixup_busy_time(struct task_struct *p, int new_cpu) { }
-static inline void clear_boost_kick(int cpu) { }
-static inline void clear_hmp_request(int cpu) { }
-static inline void mark_task_starting(struct task_struct *p) { }
-static inline void set_window_start(struct rq *rq) { }
-static inline void init_clusters(void) {}
-static inline void update_cluster_topology(void) { }
-static inline void note_task_waking(struct task_struct *p, u64 wallclock) { }
-static inline void set_task_last_switch_out(struct task_struct *p,
- u64 wallclock) { }
+static inline void check_for_migration(struct rq *rq, struct task_struct *p) { }
static inline int task_will_fit(struct task_struct *p, int cpu)
{
return 1;
}
-static inline unsigned int power_cost(int cpu, u64 demand)
-{
- return SCHED_CAPACITY_SCALE;
-}
-
static inline int sched_boost(void)
{
return 0;
@@ -2693,11 +2718,6 @@
return 0;
}
-static inline unsigned int cpu_temp(int cpu)
-{
- return 0;
-}
-
static inline void
inc_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra) { }
@@ -2715,51 +2735,16 @@
return NULL;
}
-static inline void init_new_task_load(struct task_struct *p, bool idle_task)
-{
-}
-
static inline u64 scale_load_to_cpu(u64 load, int cpu)
{
return load;
}
-static inline unsigned int nr_eligible_big_tasks(int cpu)
-{
- return 0;
-}
-
-static inline int pct_task_load(struct task_struct *p) { return 0; }
-
static inline int cpu_capacity(int cpu)
{
return SCHED_CAPACITY_SCALE;
}
-static inline int same_cluster(int src_cpu, int dst_cpu) { return 1; }
-
-static inline void inc_cumulative_runnable_avg(struct hmp_sched_stats *stats,
- struct task_struct *p)
-{
-}
-
-static inline void dec_cumulative_runnable_avg(struct hmp_sched_stats *stats,
- struct task_struct *p)
-{
-}
-
-static inline void sched_account_irqtime(int cpu, struct task_struct *curr,
- u64 delta, u64 wallclock)
-{
-}
-
-static inline void sched_account_irqstart(int cpu, struct task_struct *curr,
- u64 wallclock)
-{
-}
-
-static inline int sched_cpu_high_irqload(int cpu) { return 0; }
-
static inline void set_preferred_cluster(struct related_thread_group *grp) { }
static inline bool task_in_related_thread_group(struct task_struct *p)
@@ -2785,24 +2770,12 @@
#define PRED_DEMAND_DELTA (0)
-static inline void
-check_for_freq_change(struct rq *rq, bool check_pred, bool check_groups) { }
-
-static inline void notify_migration(int src_cpu, int dest_cpu,
- bool src_cpu_dead, struct task_struct *p) { }
-
static inline int same_freq_domain(int src_cpu, int dst_cpu)
{
return 1;
}
-static inline void check_for_migration(struct rq *rq, struct task_struct *p) { }
-static inline void pre_big_task_count_change(void) { }
-static inline void post_big_task_count_change(void) { }
-static inline void set_hmp_defaults(void) { }
-
static inline void clear_reserved(int cpu) { }
-static inline void sched_boost_parse_dt(void) {}
static inline int alloc_related_thread_groups(void) { return 0; }
#define trace_sched_cpu_load(...)
@@ -2810,6 +2783,140 @@
#define trace_sched_cpu_load_cgroup(...)
#define trace_sched_cpu_load_wakeup(...)
-static inline void update_avg_burst(struct task_struct *p) {}
+static inline bool
+__task_in_cum_window_demand(struct rq *rq, struct task_struct *p)
+{
+ return 0;
+}
-#endif /* CONFIG_SCHED_HMP */
+static inline void
+dec_cum_window_demand(struct rq *rq, struct task_struct *p) { }
+
+static inline void
+inc_cum_window_demand(struct rq *rq, struct task_struct *p, s64 delta) { }
+
+static inline void update_cpu_cluster_capacity(const cpumask_t *cpus) { }
+
+#ifdef CONFIG_SMP
+static inline unsigned long thermal_cap(int cpu)
+{
+ return cpu_rq(cpu)->cpu_capacity_orig;
+}
+#endif
+
+static inline void clear_hmp_request(int cpu) { }
+
+static inline int got_boost_kick(void)
+{
+ return 0;
+}
+
+static inline void clear_boost_kick(int cpu) { }
+
+static inline enum sched_boost_policy sched_boost_policy(void)
+{
+ return SCHED_BOOST_NONE;
+}
+
+static inline void sched_boost_parse_dt(void) { }
+
+static inline void clear_ed_task(struct task_struct *p, struct rq *rq) { }
+
+static inline bool early_detection_notify(struct rq *rq, u64 wallclock)
+{
+ return 0;
+}
+
+#endif /* CONFIG_SCHED_WALT */
+
+#ifdef CONFIG_SCHED_HMP
+#define energy_aware() false
+
+extern int is_big_task(struct task_struct *p);
+extern unsigned int pct_task_load(struct task_struct *p);
+extern void notify_migration(int src_cpu, int dest_cpu,
+ bool src_cpu_dead, struct task_struct *p);
+extern void note_task_waking(struct task_struct *p, u64 wallclock);
+extern void
+check_for_freq_change(struct rq *rq, bool check_pred, bool check_groups);
+extern void fixup_nr_big_tasks(struct hmp_sched_stats *stats,
+ struct task_struct *p, s64 delta);
+extern unsigned int power_cost(int cpu, u64 demand);
+extern unsigned int cpu_temp(int cpu);
+extern void pre_big_task_count_change(const struct cpumask *cpus);
+extern void post_big_task_count_change(const struct cpumask *cpus);
+extern void set_hmp_defaults(void);
+extern void update_avg_burst(struct task_struct *p);
+extern void set_task_last_switch_out(struct task_struct *p, u64 wallclock);
+
+extern unsigned int nr_eligible_big_tasks(int cpu);
+
+static inline void
+inc_nr_big_task(struct hmp_sched_stats *stats, struct task_struct *p)
+{
+ if (sched_disable_window_stats)
+ return;
+
+ if (is_big_task(p))
+ stats->nr_big_tasks++;
+}
+
+static inline void
+dec_nr_big_task(struct hmp_sched_stats *stats, struct task_struct *p)
+{
+ if (sched_disable_window_stats)
+ return;
+
+ if (is_big_task(p))
+ stats->nr_big_tasks--;
+
+ BUG_ON(stats->nr_big_tasks < 0);
+}
+
+static inline bool is_short_burst_task(struct task_struct *p)
+{
+ return p->ravg.avg_burst < sysctl_sched_short_burst &&
+ p->ravg.avg_sleep_time > sysctl_sched_short_sleep;
+}
+
+#else
+static inline bool energy_aware(void)
+{
+ return sched_feat(ENERGY_AWARE);
+}
+
+static inline int pct_task_load(struct task_struct *p) { return 0; }
+
+static inline void notify_migration(int src_cpu, int dest_cpu,
+ bool src_cpu_dead, struct task_struct *p) { }
+
+static inline void note_task_waking(struct task_struct *p, u64 wallclock) { }
+
+static inline void
+check_for_freq_change(struct rq *rq, bool check_pred, bool check_groups) { }
+
+static inline void fixup_nr_big_tasks(struct hmp_sched_stats *stats,
+ struct task_struct *p, s64 delta) { }
+
+static inline unsigned int power_cost(int cpu, u64 demand)
+{
+ return SCHED_CAPACITY_SCALE;
+}
+
+static inline unsigned int cpu_temp(int cpu)
+{
+ return 0;
+}
+
+static inline void pre_big_task_count_change(const struct cpumask *cpus) { }
+
+static inline void post_big_task_count_change(const struct cpumask *cpus) { }
+
+static inline void set_hmp_defaults(void) { }
+
+static inline void update_avg_burst(struct task_struct *p) { }
+
+static inline void set_task_last_switch_out(struct task_struct *p,
+ u64 wallclock) { }
+
+#endif /* CONFIG_SCHED_HMP */
diff --git a/kernel/sched/sched_avg.c b/kernel/sched/sched_avg.c
index 29d8a26..f820094 100644
--- a/kernel/sched/sched_avg.c
+++ b/kernel/sched/sched_avg.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012, 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -20,6 +20,7 @@
#include <linux/math64.h>
#include "sched.h"
+#include "walt.h"
#include <trace/events/sched.h>
static DEFINE_PER_CPU(u64, nr_prod_sum);
@@ -127,3 +128,35 @@
spin_unlock_irqrestore(&per_cpu(nr_lock, cpu), flags);
}
EXPORT_SYMBOL(sched_update_nr_prod);
+
+/*
+ * Returns the CPU utilization % in the last window.
+ *
+ */
+unsigned int sched_get_cpu_util(int cpu)
+{
+ struct rq *rq = cpu_rq(cpu);
+ u64 util = 0;
+ unsigned long capacity = SCHED_CAPACITY_SCALE, flags;
+ unsigned int busy;
+
+ raw_spin_lock_irqsave(&rq->lock, flags);
+
+#ifdef CONFIG_SMP
+ util = rq->cfs.avg.util_avg;
+ capacity = capacity_orig_of(cpu);
+#endif
+
+#ifdef CONFIG_SCHED_WALT
+ if (!walt_disabled && sysctl_sched_use_walt_cpu_util) {
+ util = rq->prev_runnable_sum + rq->grp_time.prev_runnable_sum;
+ util = div64_u64(util,
+ sched_ravg_window >> SCHED_CAPACITY_SHIFT);
+ }
+#endif
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
+
+ util = (util >= capacity) ? capacity : util;
+ busy = (util * 100) / capacity;
+ return busy;
+}
diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c
index a440769..dcc4a36 100644
--- a/kernel/sched/stop_task.c
+++ b/kernel/sched/stop_task.c
@@ -18,7 +18,7 @@
}
#endif /* CONFIG_SMP */
-#ifdef CONFIG_SCHED_HMP
+#ifdef CONFIG_SCHED_WALT
static void
inc_hmp_sched_stats_stop(struct rq *rq, struct task_struct *p)
@@ -43,7 +43,7 @@
pred_demand_delta);
}
-#else /* CONFIG_SCHED_HMP */
+#else /* CONFIG_SCHED_WALT */
static inline void
inc_hmp_sched_stats_stop(struct rq *rq, struct task_struct *p) { }
@@ -51,7 +51,7 @@
static inline void
dec_hmp_sched_stats_stop(struct rq *rq, struct task_struct *p) { }
-#endif /* CONFIG_SCHED_HMP */
+#endif /* CONFIG_SCHED_WALT */
static void
check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags)
@@ -172,7 +172,7 @@
.prio_changed = prio_changed_stop,
.switched_to = switched_to_stop,
.update_curr = update_curr_stop,
-#ifdef CONFIG_SCHED_HMP
+#ifdef CONFIG_SCHED_WALT
.fixup_hmp_sched_stats = fixup_hmp_sched_stats_stop,
#endif
};
diff --git a/kernel/sched/tune.c b/kernel/sched/tune.c
index 5e5811c..bae3b2b 100644
--- a/kernel/sched/tune.c
+++ b/kernel/sched/tune.c
@@ -122,7 +122,7 @@
/* Boost value for tasks on that SchedTune CGroup */
int boost;
-#ifdef CONFIG_SCHED_HMP
+#ifdef CONFIG_SCHED_WALT
/* Toggle ability to override sched boost enabled */
bool sched_boost_no_override;
@@ -147,7 +147,7 @@
/* Controls whether further updates are allowed to the colocate flag */
bool colocate_update_disabled;
-#endif /* CONFIG_SCHED_HMP */
+#endif /* CONFIG_SCHED_WALT */
/* Performance Boost (B) region threshold params */
int perf_boost_idx;
@@ -187,7 +187,7 @@
static struct schedtune
root_schedtune = {
.boost = 0,
-#ifdef CONFIG_SCHED_HMP
+#ifdef CONFIG_SCHED_WALT
.sched_boost_no_override = false,
.sched_boost_enabled = true,
.sched_boost_enabled_backup = true,
@@ -274,7 +274,7 @@
/* Boost groups affecting each CPU in the system */
DEFINE_PER_CPU(struct boost_groups, cpu_boost_groups);
-#ifdef CONFIG_SCHED_HMP
+#ifdef CONFIG_SCHED_WALT
static inline void init_sched_boost(struct schedtune *st)
{
st->sched_boost_no_override = false;
@@ -343,7 +343,7 @@
return 0;
}
-#endif /* CONFIG_SCHED_HMP */
+#endif /* CONFIG_SCHED_WALT */
static void
schedtune_cpu_update(int cpu)
@@ -548,7 +548,7 @@
return 0;
}
-#ifdef CONFIG_SCHED_HMP
+#ifdef CONFIG_SCHED_WALT
static u64 sched_boost_enabled_read(struct cgroup_subsys_state *css,
struct cftype *cft)
{
@@ -589,11 +589,11 @@
return 0;
}
-#else /* CONFIG_SCHED_HMP */
+#else /* CONFIG_SCHED_WALT */
static inline void init_sched_boost(struct schedtune *st) { }
-#endif /* CONFIG_SCHED_HMP */
+#endif /* CONFIG_SCHED_WALT */
void schedtune_cancel_attach(struct cgroup_taskset *tset)
{
@@ -729,7 +729,7 @@
return st->boost;
}
-#ifdef CONFIG_SCHED_HMP
+#ifdef CONFIG_SCHED_WALT
static void schedtune_attach(struct cgroup_taskset *tset)
{
struct task_struct *task;
@@ -786,7 +786,7 @@
}
static struct cftype files[] = {
-#ifdef CONFIG_SCHED_HMP
+#ifdef CONFIG_SCHED_WALT
{
.name = "sched_boost_no_override",
.read_u64 = sched_boost_override_read,
diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c
index 1b4bb23..58854b0 100644
--- a/kernel/sched/walt.c
+++ b/kernel/sched/walt.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -21,38 +21,158 @@
#include <linux/syscore_ops.h>
#include <linux/cpufreq.h>
+#include <linux/list_sort.h>
+#include <linux/jiffies.h>
+#include <linux/sched/core_ctl.h>
#include <trace/events/sched.h>
#include "sched.h"
#include "walt.h"
-#define WINDOW_STATS_RECENT 0
-#define WINDOW_STATS_MAX 1
-#define WINDOW_STATS_MAX_RECENT_AVG 2
-#define WINDOW_STATS_AVG 3
-#define WINDOW_STATS_INVALID_POLICY 4
+#include <trace/events/sched.h>
-#define EXITING_TASK_MARKER 0xdeaddead
+const char *task_event_names[] = {"PUT_PREV_TASK", "PICK_NEXT_TASK",
+ "TASK_WAKE", "TASK_MIGRATE", "TASK_UPDATE",
+ "IRQ_UPDATE"};
-static __read_mostly unsigned int walt_ravg_hist_size = 5;
-static __read_mostly unsigned int walt_window_stats_policy =
- WINDOW_STATS_MAX_RECENT_AVG;
-static __read_mostly unsigned int walt_account_wait_time = 1;
-static __read_mostly unsigned int walt_freq_account_wait_time = 0;
-static __read_mostly unsigned int walt_io_is_busy = 0;
+const char *migrate_type_names[] = {"GROUP_TO_RQ", "RQ_TO_GROUP",
+ "RQ_TO_RQ", "GROUP_TO_GROUP"};
-unsigned int sysctl_sched_walt_init_task_load_pct = 15;
+#define SCHED_FREQ_ACCOUNT_WAIT_TIME 0
+#define SCHED_ACCOUNT_WAIT_TIME 1
+
+#define EARLY_DETECTION_DURATION 9500000
+
+static ktime_t ktime_last;
+static bool sched_ktime_suspended;
+static struct cpu_cycle_counter_cb cpu_cycle_counter_cb;
+static bool use_cycle_counter;
+DEFINE_MUTEX(cluster_lock);
+
+u64 sched_ktime_clock(void)
+{
+ if (unlikely(sched_ktime_suspended))
+ return ktime_to_ns(ktime_last);
+ return ktime_get_ns();
+}
+
+static void sched_resume(void)
+{
+ sched_ktime_suspended = false;
+}
+
+static int sched_suspend(void)
+{
+ ktime_last = ktime_get();
+ sched_ktime_suspended = true;
+ return 0;
+}
+
+static struct syscore_ops sched_syscore_ops = {
+ .resume = sched_resume,
+ .suspend = sched_suspend
+};
+
+static int __init sched_init_ops(void)
+{
+ register_syscore_ops(&sched_syscore_ops);
+ return 0;
+}
+late_initcall(sched_init_ops);
+
+static void acquire_rq_locks_irqsave(const cpumask_t *cpus,
+ unsigned long *flags)
+{
+ int cpu;
+
+ local_irq_save(*flags);
+ for_each_cpu(cpu, cpus)
+ raw_spin_lock(&cpu_rq(cpu)->lock);
+}
+
+static void release_rq_locks_irqrestore(const cpumask_t *cpus,
+ unsigned long *flags)
+{
+ int cpu;
+
+ for_each_cpu(cpu, cpus)
+ raw_spin_unlock(&cpu_rq(cpu)->lock);
+ local_irq_restore(*flags);
+}
+
+struct timer_list sched_grp_timer;
+static void sched_agg_grp_load(unsigned long data)
+{
+ struct sched_cluster *cluster;
+ unsigned long flags;
+ int cpu;
+
+ acquire_rq_locks_irqsave(cpu_possible_mask, &flags);
+
+ for_each_sched_cluster(cluster) {
+ u64 aggr_grp_load = 0;
+
+ for_each_cpu(cpu, &cluster->cpus) {
+ struct rq *rq = cpu_rq(cpu);
+
+ if (rq->curr)
+ update_task_ravg(rq->curr, rq, TASK_UPDATE,
+ sched_ktime_clock(), 0);
+ aggr_grp_load +=
+ rq->grp_time.prev_runnable_sum;
+ }
+
+ cluster->aggr_grp_load = aggr_grp_load;
+ }
+
+ release_rq_locks_irqrestore(cpu_possible_mask, &flags);
+
+ if (sched_boost() == RESTRAINED_BOOST)
+ mod_timer(&sched_grp_timer, jiffies + 1);
+}
+
+static int __init setup_sched_grp_timer(void)
+{
+ init_timer_deferrable(&sched_grp_timer);
+ sched_grp_timer.function = sched_agg_grp_load;
+ return 0;
+}
+late_initcall(setup_sched_grp_timer);
/* 1 -> use PELT based load stats, 0 -> use window-based load stats */
unsigned int __read_mostly walt_disabled = 0;
-static unsigned int max_possible_efficiency = 1024;
-static unsigned int min_possible_efficiency = 1024;
+__read_mostly unsigned int sysctl_sched_cpu_high_irqload = (10 * NSEC_PER_MSEC);
+
+/*
+ * sched_window_stats_policy and sched_ravg_hist_size have a 'sysctl' copy
+ * associated with them. This is required for atomic update of those variables
+ * when being modifed via sysctl interface.
+ *
+ * IMPORTANT: Initialize both copies to same value!!
+ */
+
+__read_mostly unsigned int sched_ravg_hist_size = 5;
+__read_mostly unsigned int sysctl_sched_ravg_hist_size = 5;
+
+static __read_mostly unsigned int sched_io_is_busy = 1;
+
+__read_mostly unsigned int sched_window_stats_policy =
+ WINDOW_STATS_MAX_RECENT_AVG;
+__read_mostly unsigned int sysctl_sched_window_stats_policy =
+ WINDOW_STATS_MAX_RECENT_AVG;
+
+/* Window size (in ns) */
+__read_mostly unsigned int sched_ravg_window = MIN_SCHED_RAVG_WINDOW;
+
+/* Initial task load. Newly created tasks are assigned this load. */
+unsigned int __read_mostly sched_init_task_load_windows;
+unsigned int __read_mostly sysctl_sched_init_task_load_pct = 15;
/*
* Maximum possible frequency across all cpus. Task demand and cpu
* capacity (cpu_power) metrics are scaled in reference to it.
*/
-static unsigned int max_possible_freq = 1;
+unsigned int max_possible_freq = 1;
/*
* Minimum possible max_freq across all cpus. This will be same as
@@ -60,126 +180,126 @@
* max_possible_freq on heterogenous systems. min_max_freq is used to derive
* capacity (cpu_power) of cpus.
*/
-static unsigned int min_max_freq = 1;
+unsigned int min_max_freq = 1;
-static unsigned int max_capacity = 1024;
-static unsigned int min_capacity = 1024;
-static unsigned int max_load_scale_factor = 1024;
-static unsigned int max_possible_capacity = 1024;
+unsigned int max_capacity = 1024; /* max(rq->capacity) */
+unsigned int min_capacity = 1024; /* min(rq->capacity) */
+unsigned int max_possible_capacity = 1024; /* max(rq->max_possible_capacity) */
+unsigned int
+min_max_possible_capacity = 1024; /* min(rq->max_possible_capacity) */
-/* Mask of all CPUs that have max_possible_capacity */
-static cpumask_t mpc_mask = CPU_MASK_ALL;
+/* Temporarily disable window-stats activity on all cpus */
+unsigned int __read_mostly sched_disable_window_stats;
-/* Window size (in ns) */
-__read_mostly unsigned int walt_ravg_window = 20000000;
+/*
+ * Task load is categorized into buckets for the purpose of top task tracking.
+ * The entire range of load from 0 to sched_ravg_window needs to be covered
+ * in NUM_LOAD_INDICES number of buckets. Therefore the size of each bucket
+ * is given by sched_ravg_window / NUM_LOAD_INDICES. Since the default value
+ * of sched_ravg_window is MIN_SCHED_RAVG_WINDOW, use that to compute
+ * sched_load_granule.
+ */
+__read_mostly unsigned int sched_load_granule =
+ MIN_SCHED_RAVG_WINDOW / NUM_LOAD_INDICES;
-/* Min window size (in ns) = 10ms */
-#define MIN_SCHED_RAVG_WINDOW 10000000
+/* Size of bitmaps maintained to track top tasks */
+static const unsigned int top_tasks_bitmap_size =
+ BITS_TO_LONGS(NUM_LOAD_INDICES + 1) * sizeof(unsigned long);
-/* Max window size (in ns) = 1s */
-#define MAX_SCHED_RAVG_WINDOW 1000000000
+/*
+ * This governs what load needs to be used when reporting CPU busy time
+ * to the cpufreq governor.
+ */
+__read_mostly unsigned int sysctl_sched_freq_reporting_policy;
-static unsigned int sync_cpu;
-static ktime_t ktime_last;
-static __read_mostly bool walt_ktime_suspended;
-
-static unsigned int task_load(struct task_struct *p)
+static int __init set_sched_ravg_window(char *str)
{
- return p->ravg.demand;
-}
+ unsigned int window_size;
-void
-walt_inc_cumulative_runnable_avg(struct rq *rq,
- struct task_struct *p)
-{
- rq->cumulative_runnable_avg += p->ravg.demand;
-}
+ get_option(&str, &window_size);
-void
-walt_dec_cumulative_runnable_avg(struct rq *rq,
- struct task_struct *p)
-{
- rq->cumulative_runnable_avg -= p->ravg.demand;
- BUG_ON((s64)rq->cumulative_runnable_avg < 0);
-}
-
-static void
-fixup_cumulative_runnable_avg(struct rq *rq,
- struct task_struct *p, s64 task_load_delta)
-{
- rq->cumulative_runnable_avg += task_load_delta;
- if ((s64)rq->cumulative_runnable_avg < 0)
- panic("cra less than zero: tld: %lld, task_load(p) = %u\n",
- task_load_delta, task_load(p));
-}
-
-u64 walt_ktime_clock(void)
-{
- if (unlikely(walt_ktime_suspended))
- return ktime_to_ns(ktime_last);
- return ktime_get_ns();
-}
-
-static void walt_resume(void)
-{
- walt_ktime_suspended = false;
-}
-
-static int walt_suspend(void)
-{
- ktime_last = ktime_get();
- walt_ktime_suspended = true;
- return 0;
-}
-
-static struct syscore_ops walt_syscore_ops = {
- .resume = walt_resume,
- .suspend = walt_suspend
-};
-
-static int __init walt_init_ops(void)
-{
- register_syscore_ops(&walt_syscore_ops);
- return 0;
-}
-late_initcall(walt_init_ops);
-
-void walt_inc_cfs_cumulative_runnable_avg(struct cfs_rq *cfs_rq,
- struct task_struct *p)
-{
- cfs_rq->cumulative_runnable_avg += p->ravg.demand;
-}
-
-void walt_dec_cfs_cumulative_runnable_avg(struct cfs_rq *cfs_rq,
- struct task_struct *p)
-{
- cfs_rq->cumulative_runnable_avg -= p->ravg.demand;
-}
-
-static int exiting_task(struct task_struct *p)
-{
- if (p->flags & PF_EXITING) {
- if (p->ravg.sum_history[0] != EXITING_TASK_MARKER) {
- p->ravg.sum_history[0] = EXITING_TASK_MARKER;
- }
- return 1;
+ if (window_size < MIN_SCHED_RAVG_WINDOW ||
+ window_size > MAX_SCHED_RAVG_WINDOW) {
+ WARN_ON(1);
+ return -EINVAL;
}
+
+ sched_ravg_window = window_size;
return 0;
}
-static int __init set_walt_ravg_window(char *str)
+early_param("sched_ravg_window", set_sched_ravg_window);
+
+void inc_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra)
{
- get_option(&str, &walt_ravg_window);
-
- walt_disabled = (walt_ravg_window < MIN_SCHED_RAVG_WINDOW ||
- walt_ravg_window > MAX_SCHED_RAVG_WINDOW);
- return 0;
+ inc_nr_big_task(&rq->hmp_stats, p);
+ if (change_cra)
+ inc_cumulative_runnable_avg(&rq->hmp_stats, p);
}
-early_param("walt_ravg_window", set_walt_ravg_window);
+void dec_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra)
+{
+ dec_nr_big_task(&rq->hmp_stats, p);
+ if (change_cra)
+ dec_cumulative_runnable_avg(&rq->hmp_stats, p);
+}
+
+void reset_hmp_stats(struct hmp_sched_stats *stats, int reset_cra)
+{
+ stats->nr_big_tasks = 0; /* never happens on EAS */
+ if (reset_cra) {
+ stats->cumulative_runnable_avg = 0;
+ stats->pred_demands_sum = 0;
+ }
+}
+
+/*
+ * Demand aggregation for frequency purpose:
+ *
+ * CPU demand of tasks from various related groups is aggregated per-cluster and
+ * added to the "max_busy_cpu" in that cluster, where max_busy_cpu is determined
+ * by just rq->prev_runnable_sum.
+ *
+ * Some examples follow, which assume:
+ * Cluster0 = CPU0-3, Cluster1 = CPU4-7
+ * One related thread group A that has tasks A0, A1, A2
+ *
+ * A->cpu_time[X].curr/prev_sum = counters in which cpu execution stats of
+ * tasks belonging to group A are accumulated when they run on cpu X.
+ *
+ * CX->curr/prev_sum = counters in which cpu execution stats of all tasks
+ * not belonging to group A are accumulated when they run on cpu X
+ *
+ * Lets say the stats for window M was as below:
+ *
+ * C0->prev_sum = 1ms, A->cpu_time[0].prev_sum = 5ms
+ * Task A0 ran 5ms on CPU0
+ * Task B0 ran 1ms on CPU0
+ *
+ * C1->prev_sum = 5ms, A->cpu_time[1].prev_sum = 6ms
+ * Task A1 ran 4ms on CPU1
+ * Task A2 ran 2ms on CPU1
+ * Task B1 ran 5ms on CPU1
+ *
+ * C2->prev_sum = 0ms, A->cpu_time[2].prev_sum = 0
+ * CPU2 idle
+ *
+ * C3->prev_sum = 0ms, A->cpu_time[3].prev_sum = 0
+ * CPU3 idle
+ *
+ * In this case, CPU1 was most busy going by just its prev_sum counter. Demand
+ * from all group A tasks are added to CPU1. IOW, at end of window M, cpu busy
+ * time reported to governor will be:
+ *
+ *
+ * C0 busy time = 1ms
+ * C1 busy time = 5 + 5 + 6 = 16ms
+ *
+ */
+__read_mostly int sched_freq_aggregate_threshold;
static void
-update_window_start(struct rq *rq, u64 wallclock)
+update_window_start(struct rq *rq, u64 wallclock, int event)
{
s64 delta;
int nr_windows;
@@ -191,42 +311,123 @@
WARN_ONCE(1, "WALT wallclock appears to have gone backwards or reset\n");
}
- if (delta < walt_ravg_window)
+ if (delta < sched_ravg_window)
return;
- nr_windows = div64_u64(delta, walt_ravg_window);
- rq->window_start += (u64)nr_windows * (u64)walt_ravg_window;
+ nr_windows = div64_u64(delta, sched_ravg_window);
+ rq->window_start += (u64)nr_windows * (u64)sched_ravg_window;
+
+ rq->cum_window_demand = rq->hmp_stats.cumulative_runnable_avg;
+ if (event == PUT_PREV_TASK)
+ rq->cum_window_demand += rq->curr->ravg.demand;
}
-static u64 scale_exec_time(u64 delta, struct rq *rq)
+int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb)
{
- unsigned int cur_freq = rq->cur_freq;
- int sf;
+ mutex_lock(&cluster_lock);
+ if (!cb->get_cpu_cycle_counter) {
+ mutex_unlock(&cluster_lock);
+ return -EINVAL;
+ }
- if (unlikely(cur_freq > max_possible_freq))
- cur_freq = rq->max_possible_freq;
+ cpu_cycle_counter_cb = *cb;
+ use_cycle_counter = true;
+ mutex_unlock(&cluster_lock);
- /* round up div64 */
- delta = div64_u64(delta * cur_freq + max_possible_freq - 1,
- max_possible_freq);
-
- sf = DIV_ROUND_UP(rq->efficiency * 1024, max_possible_efficiency);
-
- delta *= sf;
- delta >>= 10;
-
- return delta;
+ return 0;
}
-static int cpu_is_waiting_on_io(struct rq *rq)
+static void update_task_cpu_cycles(struct task_struct *p, int cpu)
{
- if (!walt_io_is_busy)
+ if (use_cycle_counter)
+ p->cpu_cycles = cpu_cycle_counter_cb.get_cpu_cycle_counter(cpu);
+}
+
+void clear_ed_task(struct task_struct *p, struct rq *rq)
+{
+ if (p == rq->ed_task)
+ rq->ed_task = NULL;
+}
+
+bool early_detection_notify(struct rq *rq, u64 wallclock)
+{
+ struct task_struct *p;
+ int loop_max = 10;
+
+ if (sched_boost_policy() == SCHED_BOOST_NONE || !rq->cfs.h_nr_running)
return 0;
- return atomic_read(&rq->nr_iowait);
+ rq->ed_task = NULL;
+ list_for_each_entry(p, &rq->cfs_tasks, se.group_node) {
+ if (!loop_max)
+ break;
+
+ if (wallclock - p->last_wake_ts >= EARLY_DETECTION_DURATION) {
+ rq->ed_task = p;
+ return 1;
+ }
+
+ loop_max--;
+ }
+
+ return 0;
}
-void walt_account_irqtime(int cpu, struct task_struct *curr,
+void sched_account_irqstart(int cpu, struct task_struct *curr, u64 wallclock)
+{
+ struct rq *rq = cpu_rq(cpu);
+
+ if (!rq->window_start || sched_disable_window_stats)
+ return;
+
+ if (is_idle_task(curr)) {
+ /* We're here without rq->lock held, IRQ disabled */
+ raw_spin_lock(&rq->lock);
+ update_task_cpu_cycles(curr, cpu);
+ raw_spin_unlock(&rq->lock);
+ }
+}
+
+/*
+ * Return total number of tasks "eligible" to run on highest capacity cpu
+ *
+ * This is simply nr_big_tasks for cpus which are not of max_capacity and
+ * nr_running for cpus of max_capacity
+ */
+unsigned int nr_eligible_big_tasks(int cpu)
+{
+ struct rq *rq = cpu_rq(cpu);
+
+ if (cpu_max_possible_capacity(cpu) != max_possible_capacity)
+ return rq->hmp_stats.nr_big_tasks;
+
+ return rq->nr_running;
+}
+
+void clear_hmp_request(int cpu)
+{
+ struct rq *rq = cpu_rq(cpu);
+ unsigned long flags;
+
+ clear_boost_kick(cpu);
+ clear_reserved(cpu);
+ if (rq->push_task) {
+ struct task_struct *push_task = NULL;
+
+ raw_spin_lock_irqsave(&rq->lock, flags);
+ if (rq->push_task) {
+ clear_reserved(rq->push_cpu);
+ push_task = rq->push_task;
+ rq->push_task = NULL;
+ }
+ rq->active_balance = 0;
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
+ if (push_task)
+ put_task_struct(push_task);
+ }
+}
+
+void sched_account_irqtime(int cpu, struct task_struct *curr,
u64 delta, u64 wallclock)
{
struct rq *rq = cpu_rq(cpu);
@@ -243,7 +444,7 @@
cur_jiffies_ts = get_jiffies_64();
if (is_idle_task(curr))
- walt_update_task_ravg(curr, rq, IRQ_UPDATE, walt_ktime_clock(),
+ update_task_ravg(curr, rq, IRQ_UPDATE, sched_ktime_clock(),
delta);
nr_windows = cur_jiffies_ts - rq->irqload_ts;
@@ -266,29 +467,824 @@
raw_spin_unlock_irqrestore(&rq->lock, flags);
}
+/*
+ * Special case the last index and provide a fast path for index = 0.
+ * Note that sched_load_granule can change underneath us if we are not
+ * holding any runqueue locks while calling the two functions below.
+ */
+static u32 top_task_load(struct rq *rq)
+{
+ int index = rq->prev_top;
+ u8 prev = 1 - rq->curr_table;
-#define WALT_HIGH_IRQ_TIMEOUT 3
+ if (!index) {
+ int msb = NUM_LOAD_INDICES - 1;
-u64 walt_irqload(int cpu) {
- struct rq *rq = cpu_rq(cpu);
- s64 delta;
- delta = get_jiffies_64() - rq->irqload_ts;
-
- /*
- * Current context can be preempted by irq and rq->irqload_ts can be
- * updated by irq context so that delta can be negative.
- * But this is okay and we can safely return as this means there
- * was recent irq occurrence.
- */
-
- if (delta < WALT_HIGH_IRQ_TIMEOUT)
- return rq->avg_irqload;
- else
- return 0;
+ if (!test_bit(msb, rq->top_tasks_bitmap[prev]))
+ return 0;
+ else
+ return sched_load_granule;
+ } else if (index == NUM_LOAD_INDICES - 1) {
+ return sched_ravg_window;
+ } else {
+ return (index + 1) * sched_load_granule;
+ }
}
-int walt_cpu_high_irqload(int cpu) {
- return walt_irqload(cpu) >= sysctl_sched_walt_cpu_high_irqload;
+u64 freq_policy_load(struct rq *rq)
+{
+ unsigned int reporting_policy = sysctl_sched_freq_reporting_policy;
+ struct sched_cluster *cluster = rq->cluster;
+ u64 aggr_grp_load = cluster->aggr_grp_load;
+ u64 load;
+
+ if (rq->ed_task != NULL)
+ return sched_ravg_window;
+
+ if (aggr_grp_load > sched_freq_aggregate_threshold)
+ load = rq->prev_runnable_sum + aggr_grp_load;
+ else
+ load = rq->prev_runnable_sum + rq->grp_time.prev_runnable_sum;
+
+ switch (reporting_policy) {
+ case FREQ_REPORT_MAX_CPU_LOAD_TOP_TASK:
+ load = max_t(u64, load, top_task_load(rq));
+ break;
+ case FREQ_REPORT_TOP_TASK:
+ load = top_task_load(rq);
+ break;
+ case FREQ_REPORT_CPU_LOAD:
+ break;
+ default:
+ break;
+ }
+
+ return load;
+}
+
+/*
+ * In this function we match the accumulated subtractions with the current
+ * and previous windows we are operating with. Ignore any entries where
+ * the window start in the load_subtraction struct does not match either
+ * the curent or the previous window. This could happen whenever CPUs
+ * become idle or busy with interrupts disabled for an extended period.
+ */
+static inline void account_load_subtractions(struct rq *rq)
+{
+ u64 ws = rq->window_start;
+ u64 prev_ws = ws - sched_ravg_window;
+ struct load_subtractions *ls = rq->load_subs;
+ int i;
+
+ for (i = 0; i < NUM_TRACKED_WINDOWS; i++) {
+ if (ls[i].window_start == ws) {
+ rq->curr_runnable_sum -= ls[i].subs;
+ rq->nt_curr_runnable_sum -= ls[i].new_subs;
+ } else if (ls[i].window_start == prev_ws) {
+ rq->prev_runnable_sum -= ls[i].subs;
+ rq->nt_prev_runnable_sum -= ls[i].new_subs;
+ }
+
+ ls[i].subs = 0;
+ ls[i].new_subs = 0;
+ }
+
+ BUG_ON((s64)rq->prev_runnable_sum < 0);
+ BUG_ON((s64)rq->curr_runnable_sum < 0);
+ BUG_ON((s64)rq->nt_prev_runnable_sum < 0);
+ BUG_ON((s64)rq->nt_curr_runnable_sum < 0);
+}
+
+static inline void create_subtraction_entry(struct rq *rq, u64 ws, int index)
+{
+ rq->load_subs[index].window_start = ws;
+ rq->load_subs[index].subs = 0;
+ rq->load_subs[index].new_subs = 0;
+}
+
+static int get_top_index(unsigned long *bitmap, unsigned long old_top)
+{
+ int index = find_next_bit(bitmap, NUM_LOAD_INDICES, old_top);
+
+ if (index == NUM_LOAD_INDICES)
+ return 0;
+
+ return NUM_LOAD_INDICES - 1 - index;
+}
+
+static bool get_subtraction_index(struct rq *rq, u64 ws)
+{
+ int i;
+ u64 oldest = ULLONG_MAX;
+ int oldest_index = 0;
+
+ for (i = 0; i < NUM_TRACKED_WINDOWS; i++) {
+ u64 entry_ws = rq->load_subs[i].window_start;
+
+ if (ws == entry_ws)
+ return i;
+
+ if (entry_ws < oldest) {
+ oldest = entry_ws;
+ oldest_index = i;
+ }
+ }
+
+ create_subtraction_entry(rq, ws, oldest_index);
+ return oldest_index;
+}
+
+static void update_rq_load_subtractions(int index, struct rq *rq,
+ u32 sub_load, bool new_task)
+{
+ rq->load_subs[index].subs += sub_load;
+ if (new_task)
+ rq->load_subs[index].new_subs += sub_load;
+}
+
+void update_cluster_load_subtractions(struct task_struct *p,
+ int cpu, u64 ws, bool new_task)
+{
+ struct sched_cluster *cluster = cpu_cluster(cpu);
+ struct cpumask cluster_cpus = cluster->cpus;
+ u64 prev_ws = ws - sched_ravg_window;
+ int i;
+
+ cpumask_clear_cpu(cpu, &cluster_cpus);
+ raw_spin_lock(&cluster->load_lock);
+
+ for_each_cpu(i, &cluster_cpus) {
+ struct rq *rq = cpu_rq(i);
+ int index;
+
+ if (p->ravg.curr_window_cpu[i]) {
+ index = get_subtraction_index(rq, ws);
+ update_rq_load_subtractions(index, rq,
+ p->ravg.curr_window_cpu[i], new_task);
+ p->ravg.curr_window_cpu[i] = 0;
+ }
+
+ if (p->ravg.prev_window_cpu[i]) {
+ index = get_subtraction_index(rq, prev_ws);
+ update_rq_load_subtractions(index, rq,
+ p->ravg.prev_window_cpu[i], new_task);
+ p->ravg.prev_window_cpu[i] = 0;
+ }
+ }
+
+ raw_spin_unlock(&cluster->load_lock);
+}
+
+#ifdef CONFIG_SCHED_HMP
+static inline void
+init_new_task_load_hmp(struct task_struct *p, bool idle_task)
+{
+ p->ravg.curr_burst = 0;
+ /*
+ * Initialize the avg_burst to twice the threshold, so that
+ * a task would not be classified as short burst right away
+ * after fork. It takes at least 6 sleep-wakeup cycles for
+ * the avg_burst to go below the threshold.
+ */
+ p->ravg.avg_burst = 2 * (u64)sysctl_sched_short_burst;
+ p->ravg.avg_sleep_time = 0;
+}
+
+static inline void
+update_task_burst(struct task_struct *p, struct rq *rq, int event, u64 runtime)
+{
+ /*
+ * update_task_demand() has checks for idle task and
+ * exit task. The runtime may include the wait time,
+ * so update the burst only for the cases where the
+ * task is running.
+ */
+ if (event == PUT_PREV_TASK || (event == TASK_UPDATE &&
+ rq->curr == p))
+ p->ravg.curr_burst += runtime;
+}
+
+static void reset_task_stats_hmp(struct task_struct *p)
+{
+ p->ravg.avg_burst = 2 * (u64)sysctl_sched_short_burst;
+}
+#else
+static inline void
+init_new_task_load_hmp(struct task_struct *p, bool idle_task)
+{
+}
+
+static inline void
+update_task_burst(struct task_struct *p, struct rq *rq, int event, int runtime)
+{
+}
+
+static void reset_task_stats_hmp(struct task_struct *p)
+{
+}
+#endif
+
+static inline void inter_cluster_migration_fixup
+ (struct task_struct *p, int new_cpu, int task_cpu, bool new_task)
+{
+ struct rq *dest_rq = cpu_rq(new_cpu);
+ struct rq *src_rq = cpu_rq(task_cpu);
+
+ if (same_freq_domain(new_cpu, task_cpu))
+ return;
+
+ p->ravg.curr_window_cpu[new_cpu] = p->ravg.curr_window;
+ p->ravg.prev_window_cpu[new_cpu] = p->ravg.prev_window;
+
+ dest_rq->curr_runnable_sum += p->ravg.curr_window;
+ dest_rq->prev_runnable_sum += p->ravg.prev_window;
+
+ src_rq->curr_runnable_sum -= p->ravg.curr_window_cpu[task_cpu];
+ src_rq->prev_runnable_sum -= p->ravg.prev_window_cpu[task_cpu];
+
+ if (new_task) {
+ dest_rq->nt_curr_runnable_sum += p->ravg.curr_window;
+ dest_rq->nt_prev_runnable_sum += p->ravg.prev_window;
+
+ src_rq->nt_curr_runnable_sum -=
+ p->ravg.curr_window_cpu[task_cpu];
+ src_rq->nt_prev_runnable_sum -=
+ p->ravg.prev_window_cpu[task_cpu];
+ }
+
+ p->ravg.curr_window_cpu[task_cpu] = 0;
+ p->ravg.prev_window_cpu[task_cpu] = 0;
+
+ update_cluster_load_subtractions(p, task_cpu,
+ src_rq->window_start, new_task);
+
+ BUG_ON((s64)src_rq->prev_runnable_sum < 0);
+ BUG_ON((s64)src_rq->curr_runnable_sum < 0);
+ BUG_ON((s64)src_rq->nt_prev_runnable_sum < 0);
+ BUG_ON((s64)src_rq->nt_curr_runnable_sum < 0);
+}
+
+static int load_to_index(u32 load)
+{
+ if (load < sched_load_granule)
+ return 0;
+ else if (load >= sched_ravg_window)
+ return NUM_LOAD_INDICES - 1;
+ else
+ return load / sched_load_granule;
+}
+
+static void
+migrate_top_tasks(struct task_struct *p, struct rq *src_rq, struct rq *dst_rq)
+{
+ int index;
+ int top_index;
+ u32 curr_window = p->ravg.curr_window;
+ u32 prev_window = p->ravg.prev_window;
+ u8 src = src_rq->curr_table;
+ u8 dst = dst_rq->curr_table;
+ u8 *src_table;
+ u8 *dst_table;
+
+ if (curr_window) {
+ src_table = src_rq->top_tasks[src];
+ dst_table = dst_rq->top_tasks[dst];
+ index = load_to_index(curr_window);
+ src_table[index] -= 1;
+ dst_table[index] += 1;
+
+ if (!src_table[index])
+ __clear_bit(NUM_LOAD_INDICES - index - 1,
+ src_rq->top_tasks_bitmap[src]);
+
+ if (dst_table[index] == 1)
+ __set_bit(NUM_LOAD_INDICES - index - 1,
+ dst_rq->top_tasks_bitmap[dst]);
+
+ if (index > dst_rq->curr_top)
+ dst_rq->curr_top = index;
+
+ top_index = src_rq->curr_top;
+ if (index == top_index && !src_table[index])
+ src_rq->curr_top = get_top_index(
+ src_rq->top_tasks_bitmap[src], top_index);
+ }
+
+ if (prev_window) {
+ src = 1 - src;
+ dst = 1 - dst;
+ src_table = src_rq->top_tasks[src];
+ dst_table = dst_rq->top_tasks[dst];
+ index = load_to_index(prev_window);
+ src_table[index] -= 1;
+ dst_table[index] += 1;
+
+ if (!src_table[index])
+ __clear_bit(NUM_LOAD_INDICES - index - 1,
+ src_rq->top_tasks_bitmap[src]);
+
+ if (dst_table[index] == 1)
+ __set_bit(NUM_LOAD_INDICES - index - 1,
+ dst_rq->top_tasks_bitmap[dst]);
+
+ if (index > dst_rq->prev_top)
+ dst_rq->prev_top = index;
+
+ top_index = src_rq->prev_top;
+ if (index == top_index && !src_table[index])
+ src_rq->prev_top = get_top_index(
+ src_rq->top_tasks_bitmap[src], top_index);
+ }
+}
+
+void fixup_busy_time(struct task_struct *p, int new_cpu)
+{
+ struct rq *src_rq = task_rq(p);
+ struct rq *dest_rq = cpu_rq(new_cpu);
+ u64 wallclock;
+ u64 *src_curr_runnable_sum, *dst_curr_runnable_sum;
+ u64 *src_prev_runnable_sum, *dst_prev_runnable_sum;
+ u64 *src_nt_curr_runnable_sum, *dst_nt_curr_runnable_sum;
+ u64 *src_nt_prev_runnable_sum, *dst_nt_prev_runnable_sum;
+ bool new_task;
+ struct related_thread_group *grp;
+
+ if (!p->on_rq && p->state != TASK_WAKING)
+ return;
+
+ if (exiting_task(p)) {
+ clear_ed_task(p, src_rq);
+ return;
+ }
+
+ if (p->state == TASK_WAKING)
+ double_rq_lock(src_rq, dest_rq);
+
+ if (sched_disable_window_stats)
+ goto done;
+
+ wallclock = sched_ktime_clock();
+
+ update_task_ravg(task_rq(p)->curr, task_rq(p),
+ TASK_UPDATE,
+ wallclock, 0);
+ update_task_ravg(dest_rq->curr, dest_rq,
+ TASK_UPDATE, wallclock, 0);
+
+ update_task_ravg(p, task_rq(p), TASK_MIGRATE,
+ wallclock, 0);
+
+ update_task_cpu_cycles(p, new_cpu);
+
+ if (__task_in_cum_window_demand(src_rq, p)) {
+ dec_cum_window_demand(src_rq, p);
+ inc_cum_window_demand(dest_rq, p, p->ravg.demand);
+ }
+
+ new_task = is_new_task(p);
+ /* Protected by rq_lock */
+ grp = p->grp;
+
+ /*
+ * For frequency aggregation, we continue to do migration fixups
+ * even for intra cluster migrations. This is because, the aggregated
+ * load has to reported on a single CPU regardless.
+ */
+ if (grp) {
+ struct group_cpu_time *cpu_time;
+
+ cpu_time = &src_rq->grp_time;
+ src_curr_runnable_sum = &cpu_time->curr_runnable_sum;
+ src_prev_runnable_sum = &cpu_time->prev_runnable_sum;
+ src_nt_curr_runnable_sum = &cpu_time->nt_curr_runnable_sum;
+ src_nt_prev_runnable_sum = &cpu_time->nt_prev_runnable_sum;
+
+ cpu_time = &dest_rq->grp_time;
+ dst_curr_runnable_sum = &cpu_time->curr_runnable_sum;
+ dst_prev_runnable_sum = &cpu_time->prev_runnable_sum;
+ dst_nt_curr_runnable_sum = &cpu_time->nt_curr_runnable_sum;
+ dst_nt_prev_runnable_sum = &cpu_time->nt_prev_runnable_sum;
+
+ if (p->ravg.curr_window) {
+ *src_curr_runnable_sum -= p->ravg.curr_window;
+ *dst_curr_runnable_sum += p->ravg.curr_window;
+ if (new_task) {
+ *src_nt_curr_runnable_sum -=
+ p->ravg.curr_window;
+ *dst_nt_curr_runnable_sum +=
+ p->ravg.curr_window;
+ }
+ }
+
+ if (p->ravg.prev_window) {
+ *src_prev_runnable_sum -= p->ravg.prev_window;
+ *dst_prev_runnable_sum += p->ravg.prev_window;
+ if (new_task) {
+ *src_nt_prev_runnable_sum -=
+ p->ravg.prev_window;
+ *dst_nt_prev_runnable_sum +=
+ p->ravg.prev_window;
+ }
+ }
+ } else {
+ inter_cluster_migration_fixup(p, new_cpu,
+ task_cpu(p), new_task);
+ }
+
+ migrate_top_tasks(p, src_rq, dest_rq);
+
+ if (!same_freq_domain(new_cpu, task_cpu(p))) {
+ cpufreq_update_util(dest_rq, SCHED_CPUFREQ_INTERCLUSTER_MIG);
+ cpufreq_update_util(src_rq, SCHED_CPUFREQ_INTERCLUSTER_MIG);
+ }
+
+ if (p == src_rq->ed_task) {
+ src_rq->ed_task = NULL;
+ if (!dest_rq->ed_task)
+ dest_rq->ed_task = p;
+ }
+
+done:
+ if (p->state == TASK_WAKING)
+ double_rq_unlock(src_rq, dest_rq);
+}
+
+void set_window_start(struct rq *rq)
+{
+ static int sync_cpu_available;
+
+ if (rq->window_start)
+ return;
+
+ if (!sync_cpu_available) {
+ rq->window_start = sched_ktime_clock();
+ sync_cpu_available = 1;
+ } else {
+ struct rq *sync_rq = cpu_rq(cpumask_any(cpu_online_mask));
+
+ raw_spin_unlock(&rq->lock);
+ double_rq_lock(rq, sync_rq);
+ rq->window_start = sync_rq->window_start;
+ rq->curr_runnable_sum = rq->prev_runnable_sum = 0;
+ rq->nt_curr_runnable_sum = rq->nt_prev_runnable_sum = 0;
+ raw_spin_unlock(&sync_rq->lock);
+ }
+
+ rq->curr->ravg.mark_start = rq->window_start;
+}
+
+unsigned int max_possible_efficiency = 1;
+unsigned int min_possible_efficiency = UINT_MAX;
+
+#define INC_STEP 8
+#define DEC_STEP 2
+#define CONSISTENT_THRES 16
+#define INC_STEP_BIG 16
+/*
+ * bucket_increase - update the count of all buckets
+ *
+ * @buckets: array of buckets tracking busy time of a task
+ * @idx: the index of bucket to be incremented
+ *
+ * Each time a complete window finishes, count of bucket that runtime
+ * falls in (@idx) is incremented. Counts of all other buckets are
+ * decayed. The rate of increase and decay could be different based
+ * on current count in the bucket.
+ */
+static inline void bucket_increase(u8 *buckets, int idx)
+{
+ int i, step;
+
+ for (i = 0; i < NUM_BUSY_BUCKETS; i++) {
+ if (idx != i) {
+ if (buckets[i] > DEC_STEP)
+ buckets[i] -= DEC_STEP;
+ else
+ buckets[i] = 0;
+ } else {
+ step = buckets[i] >= CONSISTENT_THRES ?
+ INC_STEP_BIG : INC_STEP;
+ if (buckets[i] > U8_MAX - step)
+ buckets[i] = U8_MAX;
+ else
+ buckets[i] += step;
+ }
+ }
+}
+
+static inline int busy_to_bucket(u32 normalized_rt)
+{
+ int bidx;
+
+ bidx = mult_frac(normalized_rt, NUM_BUSY_BUCKETS, max_task_load());
+ bidx = min(bidx, NUM_BUSY_BUCKETS - 1);
+
+ /*
+ * Combine lowest two buckets. The lowest frequency falls into
+ * 2nd bucket and thus keep predicting lowest bucket is not
+ * useful.
+ */
+ if (!bidx)
+ bidx++;
+
+ return bidx;
+}
+
+/*
+ * get_pred_busy - calculate predicted demand for a task on runqueue
+ *
+ * @rq: runqueue of task p
+ * @p: task whose prediction is being updated
+ * @start: starting bucket. returned prediction should not be lower than
+ * this bucket.
+ * @runtime: runtime of the task. returned prediction should not be lower
+ * than this runtime.
+ * Note: @start can be derived from @runtime. It's passed in only to
+ * avoid duplicated calculation in some cases.
+ *
+ * A new predicted busy time is returned for task @p based on @runtime
+ * passed in. The function searches through buckets that represent busy
+ * time equal to or bigger than @runtime and attempts to find the bucket to
+ * to use for prediction. Once found, it searches through historical busy
+ * time and returns the latest that falls into the bucket. If no such busy
+ * time exists, it returns the medium of that bucket.
+ */
+static u32 get_pred_busy(struct rq *rq, struct task_struct *p,
+ int start, u32 runtime)
+{
+ int i;
+ u8 *buckets = p->ravg.busy_buckets;
+ u32 *hist = p->ravg.sum_history;
+ u32 dmin, dmax;
+ u64 cur_freq_runtime = 0;
+ int first = NUM_BUSY_BUCKETS, final;
+ u32 ret = runtime;
+
+ /* skip prediction for new tasks due to lack of history */
+ if (unlikely(is_new_task(p)))
+ goto out;
+
+ /* find minimal bucket index to pick */
+ for (i = start; i < NUM_BUSY_BUCKETS; i++) {
+ if (buckets[i]) {
+ first = i;
+ break;
+ }
+ }
+ /* if no higher buckets are filled, predict runtime */
+ if (first >= NUM_BUSY_BUCKETS)
+ goto out;
+
+ /* compute the bucket for prediction */
+ final = first;
+
+ /* determine demand range for the predicted bucket */
+ if (final < 2) {
+ /* lowest two buckets are combined */
+ dmin = 0;
+ final = 1;
+ } else {
+ dmin = mult_frac(final, max_task_load(), NUM_BUSY_BUCKETS);
+ }
+ dmax = mult_frac(final + 1, max_task_load(), NUM_BUSY_BUCKETS);
+
+ /*
+ * search through runtime history and return first runtime that falls
+ * into the range of predicted bucket.
+ */
+ for (i = 0; i < sched_ravg_hist_size; i++) {
+ if (hist[i] >= dmin && hist[i] < dmax) {
+ ret = hist[i];
+ break;
+ }
+ }
+ /* no historical runtime within bucket found, use average of the bin */
+ if (ret < dmin)
+ ret = (dmin + dmax) / 2;
+ /*
+ * when updating in middle of a window, runtime could be higher
+ * than all recorded history. Always predict at least runtime.
+ */
+ ret = max(runtime, ret);
+out:
+ trace_sched_update_pred_demand(rq, p, runtime,
+ mult_frac((unsigned int)cur_freq_runtime, 100,
+ sched_ravg_window), ret);
+ return ret;
+}
+
+static inline u32 calc_pred_demand(struct rq *rq, struct task_struct *p)
+{
+ if (p->ravg.pred_demand >= p->ravg.curr_window)
+ return p->ravg.pred_demand;
+
+ return get_pred_busy(rq, p, busy_to_bucket(p->ravg.curr_window),
+ p->ravg.curr_window);
+}
+
+/*
+ * predictive demand of a task is calculated at the window roll-over.
+ * if the task current window busy time exceeds the predicted
+ * demand, update it here to reflect the task needs.
+ */
+void update_task_pred_demand(struct rq *rq, struct task_struct *p, int event)
+{
+ u32 new, old;
+
+ if (is_idle_task(p) || exiting_task(p))
+ return;
+
+ if (event != PUT_PREV_TASK && event != TASK_UPDATE &&
+ (!SCHED_FREQ_ACCOUNT_WAIT_TIME ||
+ (event != TASK_MIGRATE &&
+ event != PICK_NEXT_TASK)))
+ return;
+
+ /*
+ * TASK_UPDATE can be called on sleeping task, when its moved between
+ * related groups
+ */
+ if (event == TASK_UPDATE) {
+ if (!p->on_rq && !SCHED_FREQ_ACCOUNT_WAIT_TIME)
+ return;
+ }
+
+ new = calc_pred_demand(rq, p);
+ old = p->ravg.pred_demand;
+
+ if (old >= new)
+ return;
+
+ if (task_on_rq_queued(p) && (!task_has_dl_policy(p) ||
+ !p->dl.dl_throttled))
+ p->sched_class->fixup_hmp_sched_stats(rq, p,
+ p->ravg.demand,
+ new);
+
+ p->ravg.pred_demand = new;
+}
+
+void clear_top_tasks_bitmap(unsigned long *bitmap)
+{
+ memset(bitmap, 0, top_tasks_bitmap_size);
+ __set_bit(NUM_LOAD_INDICES, bitmap);
+}
+
+static void update_top_tasks(struct task_struct *p, struct rq *rq,
+ u32 old_curr_window, int new_window, bool full_window)
+{
+ u8 curr = rq->curr_table;
+ u8 prev = 1 - curr;
+ u8 *curr_table = rq->top_tasks[curr];
+ u8 *prev_table = rq->top_tasks[prev];
+ int old_index, new_index, update_index;
+ u32 curr_window = p->ravg.curr_window;
+ u32 prev_window = p->ravg.prev_window;
+ bool zero_index_update;
+
+ if (old_curr_window == curr_window && !new_window)
+ return;
+
+ old_index = load_to_index(old_curr_window);
+ new_index = load_to_index(curr_window);
+
+ if (!new_window) {
+ zero_index_update = !old_curr_window && curr_window;
+ if (old_index != new_index || zero_index_update) {
+ if (old_curr_window)
+ curr_table[old_index] -= 1;
+ if (curr_window)
+ curr_table[new_index] += 1;
+ if (new_index > rq->curr_top)
+ rq->curr_top = new_index;
+ }
+
+ if (!curr_table[old_index])
+ __clear_bit(NUM_LOAD_INDICES - old_index - 1,
+ rq->top_tasks_bitmap[curr]);
+
+ if (curr_table[new_index] == 1)
+ __set_bit(NUM_LOAD_INDICES - new_index - 1,
+ rq->top_tasks_bitmap[curr]);
+
+ return;
+ }
+
+ /*
+ * The window has rolled over for this task. By the time we get
+ * here, curr/prev swaps would has already occurred. So we need
+ * to use prev_window for the new index.
+ */
+ update_index = load_to_index(prev_window);
+
+ if (full_window) {
+ /*
+ * Two cases here. Either 'p' ran for the entire window or
+ * it didn't run at all. In either case there is no entry
+ * in the prev table. If 'p' ran the entire window, we just
+ * need to create a new entry in the prev table. In this case
+ * update_index will be correspond to sched_ravg_window
+ * so we can unconditionally update the top index.
+ */
+ if (prev_window) {
+ prev_table[update_index] += 1;
+ rq->prev_top = update_index;
+ }
+
+ if (prev_table[update_index] == 1)
+ __set_bit(NUM_LOAD_INDICES - update_index - 1,
+ rq->top_tasks_bitmap[prev]);
+ } else {
+ zero_index_update = !old_curr_window && prev_window;
+ if (old_index != update_index || zero_index_update) {
+ if (old_curr_window)
+ prev_table[old_index] -= 1;
+
+ prev_table[update_index] += 1;
+
+ if (update_index > rq->prev_top)
+ rq->prev_top = update_index;
+
+ if (!prev_table[old_index])
+ __clear_bit(NUM_LOAD_INDICES - old_index - 1,
+ rq->top_tasks_bitmap[prev]);
+
+ if (prev_table[update_index] == 1)
+ __set_bit(NUM_LOAD_INDICES - update_index - 1,
+ rq->top_tasks_bitmap[prev]);
+ }
+ }
+
+ if (curr_window) {
+ curr_table[new_index] += 1;
+
+ if (new_index > rq->curr_top)
+ rq->curr_top = new_index;
+
+ if (curr_table[new_index] == 1)
+ __set_bit(NUM_LOAD_INDICES - new_index - 1,
+ rq->top_tasks_bitmap[curr]);
+ }
+}
+
+static void rollover_top_tasks(struct rq *rq, bool full_window)
+{
+ u8 curr_table = rq->curr_table;
+ u8 prev_table = 1 - curr_table;
+ int curr_top = rq->curr_top;
+
+ clear_top_tasks_table(rq->top_tasks[prev_table]);
+ clear_top_tasks_bitmap(rq->top_tasks_bitmap[prev_table]);
+
+ if (full_window) {
+ curr_top = 0;
+ clear_top_tasks_table(rq->top_tasks[curr_table]);
+ clear_top_tasks_bitmap(
+ rq->top_tasks_bitmap[curr_table]);
+ }
+
+ rq->curr_table = prev_table;
+ rq->prev_top = curr_top;
+ rq->curr_top = 0;
+}
+
+static u32 empty_windows[NR_CPUS];
+
+static void rollover_task_window(struct task_struct *p, bool full_window)
+{
+ u32 *curr_cpu_windows = empty_windows;
+ u32 curr_window;
+ int i;
+
+ /* Rollover the sum */
+ curr_window = 0;
+
+ if (!full_window) {
+ curr_window = p->ravg.curr_window;
+ curr_cpu_windows = p->ravg.curr_window_cpu;
+ }
+
+ p->ravg.prev_window = curr_window;
+ p->ravg.curr_window = 0;
+
+ /* Roll over individual CPU contributions */
+ for (i = 0; i < nr_cpu_ids; i++) {
+ p->ravg.prev_window_cpu[i] = curr_cpu_windows[i];
+ p->ravg.curr_window_cpu[i] = 0;
+ }
+}
+
+void sched_set_io_is_busy(int val)
+{
+ sched_io_is_busy = val;
+}
+
+static inline int cpu_is_waiting_on_io(struct rq *rq)
+{
+ if (!sched_io_is_busy)
+ return 0;
+
+ return atomic_read(&rq->nr_iowait);
}
static int account_busy_for_cpu_time(struct rq *rq, struct task_struct *p,
@@ -306,99 +1302,150 @@
if (event == TASK_WAKE)
return 0;
- if (event == PUT_PREV_TASK || event == IRQ_UPDATE ||
- event == TASK_UPDATE)
+ if (event == PUT_PREV_TASK || event == IRQ_UPDATE)
return 1;
- /* Only TASK_MIGRATE && PICK_NEXT_TASK left */
- return walt_freq_account_wait_time;
+ /*
+ * TASK_UPDATE can be called on sleeping task, when its moved between
+ * related groups
+ */
+ if (event == TASK_UPDATE) {
+ if (rq->curr == p)
+ return 1;
+
+ return p->on_rq ? SCHED_FREQ_ACCOUNT_WAIT_TIME : 0;
+ }
+
+ /* TASK_MIGRATE, PICK_NEXT_TASK left */
+ return SCHED_FREQ_ACCOUNT_WAIT_TIME;
+}
+
+#define DIV64_U64_ROUNDUP(X, Y) div64_u64((X) + (Y - 1), Y)
+
+static inline u64 scale_exec_time(u64 delta, struct rq *rq)
+{
+ u32 freq;
+
+ freq = cpu_cycles_to_freq(rq->cc.cycles, rq->cc.time);
+ delta = DIV64_U64_ROUNDUP(delta * freq, max_possible_freq);
+ delta *= rq->cluster->exec_scale_factor;
+ delta >>= 10;
+
+ return delta;
+}
+
+static void rollover_cpu_window(struct rq *rq, bool full_window)
+{
+ u64 curr_sum = rq->curr_runnable_sum;
+ u64 nt_curr_sum = rq->nt_curr_runnable_sum;
+ u64 grp_curr_sum = rq->grp_time.curr_runnable_sum;
+ u64 grp_nt_curr_sum = rq->grp_time.nt_curr_runnable_sum;
+
+ if (unlikely(full_window)) {
+ curr_sum = 0;
+ nt_curr_sum = 0;
+ grp_curr_sum = 0;
+ grp_nt_curr_sum = 0;
+ }
+
+ rq->prev_runnable_sum = curr_sum;
+ rq->nt_prev_runnable_sum = nt_curr_sum;
+ rq->grp_time.prev_runnable_sum = grp_curr_sum;
+ rq->grp_time.nt_prev_runnable_sum = grp_nt_curr_sum;
+
+ rq->curr_runnable_sum = 0;
+ rq->nt_curr_runnable_sum = 0;
+ rq->grp_time.curr_runnable_sum = 0;
+ rq->grp_time.nt_curr_runnable_sum = 0;
}
/*
* Account cpu activity in its busy time counters (rq->curr/prev_runnable_sum)
*/
static void update_cpu_busy_time(struct task_struct *p, struct rq *rq,
- int event, u64 wallclock, u64 irqtime)
+ int event, u64 wallclock, u64 irqtime)
{
- int new_window, nr_full_windows = 0;
+ int new_window, full_window = 0;
int p_is_curr_task = (p == rq->curr);
u64 mark_start = p->ravg.mark_start;
u64 window_start = rq->window_start;
- u32 window_size = walt_ravg_window;
+ u32 window_size = sched_ravg_window;
u64 delta;
+ u64 *curr_runnable_sum = &rq->curr_runnable_sum;
+ u64 *prev_runnable_sum = &rq->prev_runnable_sum;
+ u64 *nt_curr_runnable_sum = &rq->nt_curr_runnable_sum;
+ u64 *nt_prev_runnable_sum = &rq->nt_prev_runnable_sum;
+ bool new_task;
+ struct related_thread_group *grp;
+ int cpu = rq->cpu;
+ u32 old_curr_window = p->ravg.curr_window;
new_window = mark_start < window_start;
if (new_window) {
- nr_full_windows = div64_u64((window_start - mark_start),
- window_size);
+ full_window = (window_start - mark_start) >= window_size;
if (p->ravg.active_windows < USHRT_MAX)
p->ravg.active_windows++;
}
- /* Handle per-task window rollover. We don't care about the idle
- * task or exiting tasks. */
- if (new_window && !is_idle_task(p) && !exiting_task(p)) {
- u32 curr_window = 0;
+ new_task = is_new_task(p);
- if (!nr_full_windows)
- curr_window = p->ravg.curr_window;
-
- p->ravg.prev_window = curr_window;
- p->ravg.curr_window = 0;
+ /*
+ * Handle per-task window rollover. We don't care about the idle
+ * task or exiting tasks.
+ */
+ if (!is_idle_task(p) && !exiting_task(p)) {
+ if (new_window)
+ rollover_task_window(p, full_window);
}
- if (!account_busy_for_cpu_time(rq, p, irqtime, event)) {
- /* account_busy_for_cpu_time() = 0, so no update to the
- * task's current window needs to be made. This could be
- * for example
- *
- * - a wakeup event on a task within the current
- * window (!new_window below, no action required),
- * - switching to a new task from idle (PICK_NEXT_TASK)
- * in a new window where irqtime is 0 and we aren't
- * waiting on IO */
+ if (p_is_curr_task && new_window) {
+ rollover_cpu_window(rq, full_window);
+ rollover_top_tasks(rq, full_window);
+ }
- if (!new_window)
- return;
+ if (!account_busy_for_cpu_time(rq, p, irqtime, event))
+ goto done;
- /* A new window has started. The RQ demand must be rolled
- * over if p is the current task. */
- if (p_is_curr_task) {
- u64 prev_sum = 0;
+ grp = p->grp;
+ if (grp) {
+ struct group_cpu_time *cpu_time = &rq->grp_time;
- /* p is either idle task or an exiting task */
- if (!nr_full_windows) {
- prev_sum = rq->curr_runnable_sum;
- }
+ curr_runnable_sum = &cpu_time->curr_runnable_sum;
+ prev_runnable_sum = &cpu_time->prev_runnable_sum;
- rq->prev_runnable_sum = prev_sum;
- rq->curr_runnable_sum = 0;
- }
-
- return;
+ nt_curr_runnable_sum = &cpu_time->nt_curr_runnable_sum;
+ nt_prev_runnable_sum = &cpu_time->nt_prev_runnable_sum;
}
if (!new_window) {
- /* account_busy_for_cpu_time() = 1 so busy time needs
+ /*
+ * account_busy_for_cpu_time() = 1 so busy time needs
* to be accounted to the current window. No rollover
* since we didn't start a new window. An example of this is
* when a task starts execution and then sleeps within the
- * same window. */
+ * same window.
+ */
if (!irqtime || !is_idle_task(p) || cpu_is_waiting_on_io(rq))
delta = wallclock - mark_start;
else
delta = irqtime;
delta = scale_exec_time(delta, rq);
- rq->curr_runnable_sum += delta;
- if (!is_idle_task(p) && !exiting_task(p))
- p->ravg.curr_window += delta;
+ *curr_runnable_sum += delta;
+ if (new_task)
+ *nt_curr_runnable_sum += delta;
- return;
+ if (!is_idle_task(p) && !exiting_task(p)) {
+ p->ravg.curr_window += delta;
+ p->ravg.curr_window_cpu[cpu] += delta;
+ }
+
+ goto done;
}
if (!p_is_curr_task) {
- /* account_busy_for_cpu_time() = 1 so busy time needs
+ /*
+ * account_busy_for_cpu_time() = 1 so busy time needs
* to be accounted to the current window. A new window
* has also started, but p is not the current task, so the
* window is not rolled over - just split up and account
@@ -407,35 +1454,53 @@
* task.
*
* Irqtime can't be accounted by a task that isn't the
- * currently running task. */
+ * currently running task.
+ */
- if (!nr_full_windows) {
- /* A full window hasn't elapsed, account partial
- * contribution to previous completed window. */
+ if (!full_window) {
+ /*
+ * A full window hasn't elapsed, account partial
+ * contribution to previous completed window.
+ */
delta = scale_exec_time(window_start - mark_start, rq);
- if (!exiting_task(p))
+ if (!exiting_task(p)) {
p->ravg.prev_window += delta;
+ p->ravg.prev_window_cpu[cpu] += delta;
+ }
} else {
- /* Since at least one full window has elapsed,
+ /*
+ * Since at least one full window has elapsed,
* the contribution to the previous window is the
- * full window (window_size). */
+ * full window (window_size).
+ */
delta = scale_exec_time(window_size, rq);
- if (!exiting_task(p))
+ if (!exiting_task(p)) {
p->ravg.prev_window = delta;
+ p->ravg.prev_window_cpu[cpu] = delta;
+ }
}
- rq->prev_runnable_sum += delta;
+
+ *prev_runnable_sum += delta;
+ if (new_task)
+ *nt_prev_runnable_sum += delta;
/* Account piece of busy time in the current window. */
delta = scale_exec_time(wallclock - window_start, rq);
- rq->curr_runnable_sum += delta;
- if (!exiting_task(p))
- p->ravg.curr_window = delta;
+ *curr_runnable_sum += delta;
+ if (new_task)
+ *nt_curr_runnable_sum += delta;
- return;
+ if (!exiting_task(p)) {
+ p->ravg.curr_window = delta;
+ p->ravg.curr_window_cpu[cpu] = delta;
+ }
+
+ goto done;
}
if (!irqtime || !is_idle_task(p) || cpu_is_waiting_on_io(rq)) {
- /* account_busy_for_cpu_time() = 1 so busy time needs
+ /*
+ * account_busy_for_cpu_time() = 1 so busy time needs
* to be accounted to the current window. A new window
* has started and p is the current task so rollover is
* needed. If any of these three above conditions are true
@@ -445,44 +1510,57 @@
* be accounted.
*
* An example of this would be a task that starts execution
- * and then sleeps once a new window has begun. */
-
- if (!nr_full_windows) {
- /* A full window hasn't elapsed, account partial
- * contribution to previous completed window. */
- delta = scale_exec_time(window_start - mark_start, rq);
- if (!is_idle_task(p) && !exiting_task(p))
- p->ravg.prev_window += delta;
-
- delta += rq->curr_runnable_sum;
- } else {
- /* Since at least one full window has elapsed,
- * the contribution to the previous window is the
- * full window (window_size). */
- delta = scale_exec_time(window_size, rq);
- if (!is_idle_task(p) && !exiting_task(p))
- p->ravg.prev_window = delta;
-
- }
- /*
- * Rollover for normal runnable sum is done here by overwriting
- * the values in prev_runnable_sum and curr_runnable_sum.
- * Rollover for new task runnable sum has completed by previous
- * if-else statement.
+ * and then sleeps once a new window has begun.
*/
- rq->prev_runnable_sum = delta;
+
+ if (!full_window) {
+ /*
+ * A full window hasn't elapsed, account partial
+ * contribution to previous completed window.
+ */
+ delta = scale_exec_time(window_start - mark_start, rq);
+ if (!is_idle_task(p) && !exiting_task(p)) {
+ p->ravg.prev_window += delta;
+ p->ravg.prev_window_cpu[cpu] += delta;
+ }
+ } else {
+ /*
+ * Since at least one full window has elapsed,
+ * the contribution to the previous window is the
+ * full window (window_size).
+ */
+ delta = scale_exec_time(window_size, rq);
+ if (!is_idle_task(p) && !exiting_task(p)) {
+ p->ravg.prev_window = delta;
+ p->ravg.prev_window_cpu[cpu] = delta;
+ }
+ }
+
+ /*
+ * Rollover is done here by overwriting the values in
+ * prev_runnable_sum and curr_runnable_sum.
+ */
+ *prev_runnable_sum += delta;
+ if (new_task)
+ *nt_prev_runnable_sum += delta;
/* Account piece of busy time in the current window. */
delta = scale_exec_time(wallclock - window_start, rq);
- rq->curr_runnable_sum = delta;
- if (!is_idle_task(p) && !exiting_task(p))
- p->ravg.curr_window = delta;
+ *curr_runnable_sum += delta;
+ if (new_task)
+ *nt_curr_runnable_sum += delta;
- return;
+ if (!is_idle_task(p) && !exiting_task(p)) {
+ p->ravg.curr_window = delta;
+ p->ravg.curr_window_cpu[cpu] = delta;
+ }
+
+ goto done;
}
if (irqtime) {
- /* account_busy_for_cpu_time() = 1 so busy time needs
+ /*
+ * account_busy_for_cpu_time() = 1 so busy time needs
* to be accounted to the current window. A new window
* has started and p is the current task so rollover is
* needed. The current task must be the idle task because
@@ -490,26 +1568,30 @@
*
* Irqtime will be accounted each time we process IRQ activity
* after a period of idleness, so we know the IRQ busy time
- * started at wallclock - irqtime. */
+ * started at wallclock - irqtime.
+ */
BUG_ON(!is_idle_task(p));
mark_start = wallclock - irqtime;
- /* Roll window over. If IRQ busy time was just in the current
- * window then that is all that need be accounted. */
- rq->prev_runnable_sum = rq->curr_runnable_sum;
+ /*
+ * Roll window over. If IRQ busy time was just in the current
+ * window then that is all that need be accounted.
+ */
if (mark_start > window_start) {
- rq->curr_runnable_sum = scale_exec_time(irqtime, rq);
+ *curr_runnable_sum = scale_exec_time(irqtime, rq);
return;
}
- /* The IRQ busy time spanned multiple windows. Process the
- * busy time preceding the current window start first. */
+ /*
+ * The IRQ busy time spanned multiple windows. Process the
+ * busy time preceding the current window start first.
+ */
delta = window_start - mark_start;
if (delta > window_size)
delta = window_size;
delta = scale_exec_time(delta, rq);
- rq->prev_runnable_sum += delta;
+ *prev_runnable_sum += delta;
/* Process the remaining IRQ busy time in the current window. */
delta = wallclock - window_start;
@@ -518,24 +1600,57 @@
return;
}
- BUG();
+done:
+ if (!is_idle_task(p) && !exiting_task(p))
+ update_top_tasks(p, rq, old_curr_window,
+ new_window, full_window);
}
-static int account_busy_for_task_demand(struct task_struct *p, int event)
+
+static inline u32 predict_and_update_buckets(struct rq *rq,
+ struct task_struct *p, u32 runtime) {
+
+ int bidx;
+ u32 pred_demand;
+
+ bidx = busy_to_bucket(runtime);
+ pred_demand = get_pred_busy(rq, p, bidx, runtime);
+ bucket_increase(p->ravg.busy_buckets, bidx);
+
+ return pred_demand;
+}
+
+static int
+account_busy_for_task_demand(struct rq *rq, struct task_struct *p, int event)
{
- /* No need to bother updating task demand for exiting tasks
- * or the idle task. */
+ /*
+ * No need to bother updating task demand for exiting tasks
+ * or the idle task.
+ */
if (exiting_task(p) || is_idle_task(p))
return 0;
- /* When a task is waking up it is completing a segment of non-busy
+ /*
+ * When a task is waking up it is completing a segment of non-busy
* time. Likewise, if wait time is not treated as busy time, then
* when a task begins to run or is migrated, it is not running and
- * is completing a segment of non-busy time. */
- if (event == TASK_WAKE || (!walt_account_wait_time &&
+ * is completing a segment of non-busy time.
+ */
+ if (event == TASK_WAKE || (!SCHED_ACCOUNT_WAIT_TIME &&
(event == PICK_NEXT_TASK || event == TASK_MIGRATE)))
return 0;
+ /*
+ * TASK_UPDATE can be called on sleeping task, when its moved between
+ * related groups
+ */
+ if (event == TASK_UPDATE) {
+ if (rq->curr == p)
+ return 1;
+
+ return p->on_rq ? SCHED_ACCOUNT_WAIT_TIME : 0;
+ }
+
return 1;
}
@@ -550,15 +1665,18 @@
{
u32 *hist = &p->ravg.sum_history[0];
int ridx, widx;
- u32 max = 0, avg, demand;
+ u32 max = 0, avg, demand, pred_demand;
u64 sum = 0;
+ u64 prev_demand;
/* Ignore windows where task had no activity */
if (!runtime || is_idle_task(p) || exiting_task(p) || !samples)
- goto done;
+ goto done;
+
+ prev_demand = p->ravg.demand;
/* Push new 'runtime' value onto stack */
- widx = walt_ravg_hist_size - 1;
+ widx = sched_ravg_hist_size - 1;
ridx = widx - samples;
for (; ridx >= 0; --widx, --ridx) {
hist[widx] = hist[ridx];
@@ -567,7 +1685,7 @@
max = hist[widx];
}
- for (widx = 0; widx < samples && widx < walt_ravg_hist_size; widx++) {
+ for (widx = 0; widx < samples && widx < sched_ravg_hist_size; widx++) {
hist[widx] = runtime;
sum += hist[widx];
if (hist[widx] > max)
@@ -576,17 +1694,18 @@
p->ravg.sum = 0;
- if (walt_window_stats_policy == WINDOW_STATS_RECENT) {
+ if (sched_window_stats_policy == WINDOW_STATS_RECENT) {
demand = runtime;
- } else if (walt_window_stats_policy == WINDOW_STATS_MAX) {
+ } else if (sched_window_stats_policy == WINDOW_STATS_MAX) {
demand = max;
} else {
- avg = div64_u64(sum, walt_ravg_hist_size);
- if (walt_window_stats_policy == WINDOW_STATS_AVG)
+ avg = div64_u64(sum, sched_ravg_hist_size);
+ if (sched_window_stats_policy == WINDOW_STATS_AVG)
demand = avg;
else
demand = max(avg, runtime);
}
+ pred_demand = predict_and_update_buckets(rq, p, runtime);
/*
* A throttled deadline sched class task gets dequeued without
@@ -595,22 +1714,27 @@
*/
if (task_on_rq_queued(p) && (!task_has_dl_policy(p) ||
!p->dl.dl_throttled))
- fixup_cumulative_runnable_avg(rq, p, demand);
+ p->sched_class->fixup_hmp_sched_stats(rq, p, demand,
+ pred_demand);
p->ravg.demand = demand;
+ p->ravg.pred_demand = pred_demand;
+
+ if (__task_in_cum_window_demand(rq, p))
+ inc_cum_window_demand(rq, p, p->ravg.demand - prev_demand);
done:
- trace_walt_update_history(rq, p, runtime, samples, event);
- return;
+ trace_sched_update_history(rq, p, runtime, samples, event);
}
-static void add_to_task_demand(struct rq *rq, struct task_struct *p,
- u64 delta)
+static u64 add_to_task_demand(struct rq *rq, struct task_struct *p, u64 delta)
{
delta = scale_exec_time(delta, rq);
p->ravg.sum += delta;
- if (unlikely(p->ravg.sum > walt_ravg_window))
- p->ravg.sum = walt_ravg_window;
+ if (unlikely(p->ravg.sum > sched_ravg_window))
+ p->ravg.sum = sched_ravg_window;
+
+ return delta;
}
/*
@@ -663,326 +1787,507 @@
* IMPORTANT : Leave p->ravg.mark_start unchanged, as update_cpu_busy_time()
* depends on it!
*/
-static void update_task_demand(struct task_struct *p, struct rq *rq,
- int event, u64 wallclock)
+static u64 update_task_demand(struct task_struct *p, struct rq *rq,
+ int event, u64 wallclock)
{
u64 mark_start = p->ravg.mark_start;
u64 delta, window_start = rq->window_start;
int new_window, nr_full_windows;
- u32 window_size = walt_ravg_window;
+ u32 window_size = sched_ravg_window;
+ u64 runtime;
new_window = mark_start < window_start;
- if (!account_busy_for_task_demand(p, event)) {
+ if (!account_busy_for_task_demand(rq, p, event)) {
if (new_window)
- /* If the time accounted isn't being accounted as
+ /*
+ * If the time accounted isn't being accounted as
* busy time, and a new window started, only the
* previous window need be closed out with the
* pre-existing demand. Multiple windows may have
* elapsed, but since empty windows are dropped,
- * it is not necessary to account those. */
+ * it is not necessary to account those.
+ */
update_history(rq, p, p->ravg.sum, 1, event);
- return;
+ return 0;
}
if (!new_window) {
- /* The simple case - busy time contained within the existing
- * window. */
- add_to_task_demand(rq, p, wallclock - mark_start);
- return;
+ /*
+ * The simple case - busy time contained within the existing
+ * window.
+ */
+ return add_to_task_demand(rq, p, wallclock - mark_start);
}
- /* Busy time spans at least two windows. Temporarily rewind
- * window_start to first window boundary after mark_start. */
+ /*
+ * Busy time spans at least two windows. Temporarily rewind
+ * window_start to first window boundary after mark_start.
+ */
delta = window_start - mark_start;
nr_full_windows = div64_u64(delta, window_size);
window_start -= (u64)nr_full_windows * (u64)window_size;
/* Process (window_start - mark_start) first */
- add_to_task_demand(rq, p, window_start - mark_start);
+ runtime = add_to_task_demand(rq, p, window_start - mark_start);
/* Push new sample(s) into task's demand history */
update_history(rq, p, p->ravg.sum, 1, event);
- if (nr_full_windows)
- update_history(rq, p, scale_exec_time(window_size, rq),
- nr_full_windows, event);
+ if (nr_full_windows) {
+ u64 scaled_window = scale_exec_time(window_size, rq);
- /* Roll window_start back to current to process any remainder
- * in current window. */
+ update_history(rq, p, scaled_window, nr_full_windows, event);
+ runtime += nr_full_windows * scaled_window;
+ }
+
+ /*
+ * Roll window_start back to current to process any remainder
+ * in current window.
+ */
window_start += (u64)nr_full_windows * (u64)window_size;
/* Process (wallclock - window_start) next */
mark_start = window_start;
- add_to_task_demand(rq, p, wallclock - mark_start);
+ runtime += add_to_task_demand(rq, p, wallclock - mark_start);
+
+ return runtime;
+}
+
+static void
+update_task_rq_cpu_cycles(struct task_struct *p, struct rq *rq, int event,
+ u64 wallclock, u64 irqtime)
+{
+ u64 cur_cycles;
+ int cpu = cpu_of(rq);
+
+ lockdep_assert_held(&rq->lock);
+
+ if (!use_cycle_counter) {
+ rq->cc.cycles = cpu_cur_freq(cpu);
+ rq->cc.time = 1;
+ return;
+ }
+
+ cur_cycles = cpu_cycle_counter_cb.get_cpu_cycle_counter(cpu);
+
+ /*
+ * If current task is idle task and irqtime == 0 CPU was
+ * indeed idle and probably its cycle counter was not
+ * increasing. We still need estimatied CPU frequency
+ * for IO wait time accounting. Use the previously
+ * calculated frequency in such a case.
+ */
+ if (!is_idle_task(rq->curr) || irqtime) {
+ if (unlikely(cur_cycles < p->cpu_cycles))
+ rq->cc.cycles = cur_cycles + (U64_MAX - p->cpu_cycles);
+ else
+ rq->cc.cycles = cur_cycles - p->cpu_cycles;
+ rq->cc.cycles = rq->cc.cycles * NSEC_PER_MSEC;
+
+ if (event == IRQ_UPDATE && is_idle_task(p))
+ /*
+ * Time between mark_start of idle task and IRQ handler
+ * entry time is CPU cycle counter stall period.
+ * Upon IRQ handler entry sched_account_irqstart()
+ * replenishes idle task's cpu cycle counter so
+ * rq->cc.cycles now represents increased cycles during
+ * IRQ handler rather than time between idle entry and
+ * IRQ exit. Thus use irqtime as time delta.
+ */
+ rq->cc.time = irqtime;
+ else
+ rq->cc.time = wallclock - p->ravg.mark_start;
+ BUG_ON((s64)rq->cc.time < 0);
+ }
+
+ p->cpu_cycles = cur_cycles;
+
+ trace_sched_get_task_cpu_cycles(cpu, event, rq->cc.cycles, rq->cc.time);
}
/* Reflect task activity on its demand and cpu's busy time statistics */
-void walt_update_task_ravg(struct task_struct *p, struct rq *rq,
- int event, u64 wallclock, u64 irqtime)
+void update_task_ravg(struct task_struct *p, struct rq *rq, int event,
+ u64 wallclock, u64 irqtime)
{
- if (walt_disabled || !rq->window_start)
+ u64 runtime;
+
+ if (!rq->window_start || sched_disable_window_stats ||
+ p->ravg.mark_start == wallclock)
return;
lockdep_assert_held(&rq->lock);
- update_window_start(rq, wallclock);
+ update_window_start(rq, wallclock, event);
- if (!p->ravg.mark_start)
+ if (!p->ravg.mark_start) {
+ update_task_cpu_cycles(p, cpu_of(rq));
goto done;
+ }
- update_task_demand(p, rq, event, wallclock);
+ update_task_rq_cpu_cycles(p, rq, event, wallclock, irqtime);
+ runtime = update_task_demand(p, rq, event, wallclock);
+ if (runtime)
+ update_task_burst(p, rq, event, runtime);
update_cpu_busy_time(p, rq, event, wallclock, irqtime);
-
+ update_task_pred_demand(rq, p, event);
done:
- trace_walt_update_task_ravg(p, rq, event, wallclock, irqtime);
+ trace_sched_update_task_ravg(p, rq, event, wallclock, irqtime,
+ rq->cc.cycles, rq->cc.time, &rq->grp_time);
+ trace_sched_update_task_ravg_mini(p, rq, event, wallclock, irqtime,
+ rq->cc.cycles, rq->cc.time, &rq->grp_time);
p->ravg.mark_start = wallclock;
}
-unsigned long __weak arch_get_cpu_efficiency(int cpu)
+u32 sched_get_init_task_load(struct task_struct *p)
{
- return SCHED_CAPACITY_SCALE;
+ return p->init_load_pct;
}
-void walt_init_cpu_efficiency(void)
+int sched_set_init_task_load(struct task_struct *p, int init_load_pct)
{
- int i, efficiency;
- unsigned int max = 0, min = UINT_MAX;
+ if (init_load_pct < 0 || init_load_pct > 100)
+ return -EINVAL;
- for_each_possible_cpu(i) {
- efficiency = arch_get_cpu_efficiency(i);
- cpu_rq(i)->efficiency = efficiency;
+ p->init_load_pct = init_load_pct;
- if (efficiency > max)
- max = efficiency;
- if (efficiency < min)
- min = efficiency;
- }
-
- if (max)
- max_possible_efficiency = max;
-
- if (min)
- min_possible_efficiency = min;
+ return 0;
}
-static void reset_task_stats(struct task_struct *p)
+void init_new_task_load(struct task_struct *p, bool idle_task)
+{
+ int i;
+ u32 init_load_windows = sched_init_task_load_windows;
+ u32 init_load_pct = current->init_load_pct;
+
+ p->init_load_pct = 0;
+ rcu_assign_pointer(p->grp, NULL);
+ INIT_LIST_HEAD(&p->grp_list);
+ memset(&p->ravg, 0, sizeof(struct ravg));
+ p->cpu_cycles = 0;
+
+ init_new_task_load_hmp(p, idle_task);
+
+ p->ravg.curr_window_cpu = kcalloc(nr_cpu_ids, sizeof(u32), GFP_KERNEL);
+ p->ravg.prev_window_cpu = kcalloc(nr_cpu_ids, sizeof(u32), GFP_KERNEL);
+
+ /* Don't have much choice. CPU frequency would be bogus */
+ BUG_ON(!p->ravg.curr_window_cpu || !p->ravg.prev_window_cpu);
+
+ if (idle_task)
+ return;
+
+ if (init_load_pct)
+ init_load_windows = div64_u64((u64)init_load_pct *
+ (u64)sched_ravg_window, 100);
+
+ p->ravg.demand = init_load_windows;
+ p->ravg.pred_demand = 0;
+ for (i = 0; i < RAVG_HIST_SIZE_MAX; ++i)
+ p->ravg.sum_history[i] = init_load_windows;
+ p->misfit = false;
+}
+
+void reset_task_stats(struct task_struct *p)
{
u32 sum = 0;
+ u32 *curr_window_ptr = NULL;
+ u32 *prev_window_ptr = NULL;
- if (exiting_task(p))
+ if (exiting_task(p)) {
sum = EXITING_TASK_MARKER;
+ } else {
+ curr_window_ptr = p->ravg.curr_window_cpu;
+ prev_window_ptr = p->ravg.prev_window_cpu;
+ memset(curr_window_ptr, 0, sizeof(u32) * nr_cpu_ids);
+ memset(prev_window_ptr, 0, sizeof(u32) * nr_cpu_ids);
+ }
memset(&p->ravg, 0, sizeof(struct ravg));
+
+ p->ravg.curr_window_cpu = curr_window_ptr;
+ p->ravg.prev_window_cpu = prev_window_ptr;
+
+ reset_task_stats_hmp(p);
+
/* Retain EXITING_TASK marker */
p->ravg.sum_history[0] = sum;
}
-void walt_mark_task_starting(struct task_struct *p)
+void mark_task_starting(struct task_struct *p)
{
u64 wallclock;
struct rq *rq = task_rq(p);
- if (!rq->window_start) {
+ if (!rq->window_start || sched_disable_window_stats) {
reset_task_stats(p);
return;
}
- wallclock = walt_ktime_clock();
- p->ravg.mark_start = wallclock;
+ wallclock = sched_ktime_clock();
+ p->ravg.mark_start = p->last_wake_ts = wallclock;
+ p->last_cpu_selected_ts = wallclock;
+ p->last_switch_out_ts = 0;
+ update_task_cpu_cycles(p, cpu_of(rq));
}
-void walt_set_window_start(struct rq *rq)
+static cpumask_t all_cluster_cpus = CPU_MASK_NONE;
+DECLARE_BITMAP(all_cluster_ids, NR_CPUS);
+struct sched_cluster *sched_cluster[NR_CPUS];
+int num_clusters;
+
+struct list_head cluster_head;
+
+static void
+insert_cluster(struct sched_cluster *cluster, struct list_head *head)
{
- int cpu = cpu_of(rq);
- struct rq *sync_rq = cpu_rq(sync_cpu);
+ struct sched_cluster *tmp;
+ struct list_head *iter = head;
- if (rq->window_start)
- return;
-
- if (cpu == sync_cpu) {
- rq->window_start = walt_ktime_clock();
- } else {
- raw_spin_unlock(&rq->lock);
- double_rq_lock(rq, sync_rq);
- rq->window_start = cpu_rq(sync_cpu)->window_start;
- rq->curr_runnable_sum = rq->prev_runnable_sum = 0;
- raw_spin_unlock(&sync_rq->lock);
+ list_for_each_entry(tmp, head, list) {
+ if (cluster->max_power_cost < tmp->max_power_cost)
+ break;
+ iter = &tmp->list;
}
- rq->curr->ravg.mark_start = rq->window_start;
+ list_add(&cluster->list, iter);
}
-void walt_migrate_sync_cpu(int cpu)
+static struct sched_cluster *alloc_new_cluster(const struct cpumask *cpus)
{
- if (cpu == sync_cpu)
- sync_cpu = smp_processor_id();
+ struct sched_cluster *cluster = NULL;
+
+ cluster = kzalloc(sizeof(struct sched_cluster), GFP_ATOMIC);
+ if (!cluster) {
+ __WARN_printf("Cluster allocation failed. Possible bad scheduling\n");
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&cluster->list);
+ cluster->max_power_cost = 1;
+ cluster->min_power_cost = 1;
+ cluster->capacity = 1024;
+ cluster->max_possible_capacity = 1024;
+ cluster->efficiency = 1;
+ cluster->load_scale_factor = 1024;
+ cluster->cur_freq = 1;
+ cluster->max_freq = 1;
+ cluster->max_mitigated_freq = UINT_MAX;
+ cluster->min_freq = 1;
+ cluster->max_possible_freq = 1;
+ cluster->dstate = 0;
+ cluster->dstate_wakeup_energy = 0;
+ cluster->dstate_wakeup_latency = 0;
+ cluster->freq_init_done = false;
+
+ raw_spin_lock_init(&cluster->load_lock);
+ cluster->cpus = *cpus;
+ cluster->efficiency = arch_get_cpu_efficiency(cpumask_first(cpus));
+
+ if (cluster->efficiency > max_possible_efficiency)
+ max_possible_efficiency = cluster->efficiency;
+ if (cluster->efficiency < min_possible_efficiency)
+ min_possible_efficiency = cluster->efficiency;
+
+ cluster->notifier_sent = 0;
+ return cluster;
}
-void walt_fixup_busy_time(struct task_struct *p, int new_cpu)
+static void add_cluster(const struct cpumask *cpus, struct list_head *head)
{
- struct rq *src_rq = task_rq(p);
- struct rq *dest_rq = cpu_rq(new_cpu);
- u64 wallclock;
-
- if (!p->on_rq && p->state != TASK_WAKING)
- return;
-
- if (exiting_task(p)) {
- return;
- }
-
- if (p->state == TASK_WAKING)
- double_rq_lock(src_rq, dest_rq);
-
- wallclock = walt_ktime_clock();
-
- walt_update_task_ravg(task_rq(p)->curr, task_rq(p),
- TASK_UPDATE, wallclock, 0);
- walt_update_task_ravg(dest_rq->curr, dest_rq,
- TASK_UPDATE, wallclock, 0);
-
- walt_update_task_ravg(p, task_rq(p), TASK_MIGRATE, wallclock, 0);
-
- if (p->ravg.curr_window) {
- src_rq->curr_runnable_sum -= p->ravg.curr_window;
- dest_rq->curr_runnable_sum += p->ravg.curr_window;
- }
-
- if (p->ravg.prev_window) {
- src_rq->prev_runnable_sum -= p->ravg.prev_window;
- dest_rq->prev_runnable_sum += p->ravg.prev_window;
- }
-
- if ((s64)src_rq->prev_runnable_sum < 0) {
- src_rq->prev_runnable_sum = 0;
- WARN_ON(1);
- }
- if ((s64)src_rq->curr_runnable_sum < 0) {
- src_rq->curr_runnable_sum = 0;
- WARN_ON(1);
- }
-
- trace_walt_migration_update_sum(src_rq, p);
- trace_walt_migration_update_sum(dest_rq, p);
-
- if (p->state == TASK_WAKING)
- double_rq_unlock(src_rq, dest_rq);
-}
-
-/* Keep track of max/min capacity possible across CPUs "currently" */
-static void __update_min_max_capacity(void)
-{
- int i;
- int max = 0, min = INT_MAX;
-
- for_each_online_cpu(i) {
- if (cpu_rq(i)->capacity > max)
- max = cpu_rq(i)->capacity;
- if (cpu_rq(i)->capacity < min)
- min = cpu_rq(i)->capacity;
- }
-
- max_capacity = max;
- min_capacity = min;
-}
-
-static void update_min_max_capacity(void)
-{
- unsigned long flags;
+ struct sched_cluster *cluster = alloc_new_cluster(cpus);
int i;
- local_irq_save(flags);
- for_each_possible_cpu(i)
- raw_spin_lock(&cpu_rq(i)->lock);
+ if (!cluster)
+ return;
- __update_min_max_capacity();
+ for_each_cpu(i, cpus)
+ cpu_rq(i)->cluster = cluster;
- for_each_possible_cpu(i)
- raw_spin_unlock(&cpu_rq(i)->lock);
- local_irq_restore(flags);
+ insert_cluster(cluster, head);
+ set_bit(num_clusters, all_cluster_ids);
+ num_clusters++;
}
-/*
- * Return 'capacity' of a cpu in reference to "least" efficient cpu, such that
- * least efficient cpu gets capacity of 1024
- */
-static unsigned long capacity_scale_cpu_efficiency(int cpu)
-{
- return (1024 * cpu_rq(cpu)->efficiency) / min_possible_efficiency;
-}
-
-/*
- * Return 'capacity' of a cpu in reference to cpu with lowest max_freq
- * (min_max_freq), such that one with lowest max_freq gets capacity of 1024.
- */
-static unsigned long capacity_scale_cpu_freq(int cpu)
-{
- return (1024 * cpu_rq(cpu)->max_freq) / min_max_freq;
-}
-
-/*
- * Return load_scale_factor of a cpu in reference to "most" efficient cpu, so
- * that "most" efficient cpu gets a load_scale_factor of 1
- */
-static unsigned long load_scale_cpu_efficiency(int cpu)
-{
- return DIV_ROUND_UP(1024 * max_possible_efficiency,
- cpu_rq(cpu)->efficiency);
-}
-
-/*
- * Return load_scale_factor of a cpu in reference to cpu with best max_freq
- * (max_possible_freq), so that one with best max_freq gets a load_scale_factor
- * of 1.
- */
-static unsigned long load_scale_cpu_freq(int cpu)
-{
- return DIV_ROUND_UP(1024 * max_possible_freq, cpu_rq(cpu)->max_freq);
-}
-
-static int compute_capacity(int cpu)
+static int compute_max_possible_capacity(struct sched_cluster *cluster)
{
int capacity = 1024;
- capacity *= capacity_scale_cpu_efficiency(cpu);
+ capacity *= capacity_scale_cpu_efficiency(cluster);
capacity >>= 10;
- capacity *= capacity_scale_cpu_freq(cpu);
+ capacity *= (1024 * cluster->max_possible_freq) / min_max_freq;
capacity >>= 10;
return capacity;
}
-static int compute_load_scale_factor(int cpu)
+static void update_min_max_capacity(void)
{
- int load_scale = 1024;
+ unsigned long flags;
+
+ acquire_rq_locks_irqsave(cpu_possible_mask, &flags);
+ __update_min_max_capacity();
+ release_rq_locks_irqrestore(cpu_possible_mask, &flags);
+}
+
+unsigned int max_power_cost = 1;
+
+static int
+compare_clusters(void *priv, struct list_head *a, struct list_head *b)
+{
+ struct sched_cluster *cluster1, *cluster2;
+ int ret;
+
+ cluster1 = container_of(a, struct sched_cluster, list);
+ cluster2 = container_of(b, struct sched_cluster, list);
/*
- * load_scale_factor accounts for the fact that task load
- * is in reference to "best" performing cpu. Task's load will need to be
- * scaled (up) by a factor to determine suitability to be placed on a
- * (little) cpu.
+ * Don't assume higher capacity means higher power. If the
+ * power cost is same, sort the higher capacity cluster before
+ * the lower capacity cluster to start placing the tasks
+ * on the higher capacity cluster.
*/
- load_scale *= load_scale_cpu_efficiency(cpu);
- load_scale >>= 10;
+ ret = cluster1->max_power_cost > cluster2->max_power_cost ||
+ (cluster1->max_power_cost == cluster2->max_power_cost &&
+ cluster1->max_possible_capacity <
+ cluster2->max_possible_capacity);
- load_scale *= load_scale_cpu_freq(cpu);
- load_scale >>= 10;
-
- return load_scale;
+ return ret;
}
+void sort_clusters(void)
+{
+ struct sched_cluster *cluster;
+ struct list_head new_head;
+ unsigned int tmp_max = 1;
+
+ INIT_LIST_HEAD(&new_head);
+
+ for_each_sched_cluster(cluster) {
+ cluster->max_power_cost = power_cost(cluster_first_cpu(cluster),
+ max_task_load());
+ cluster->min_power_cost = power_cost(cluster_first_cpu(cluster),
+ 0);
+
+ if (cluster->max_power_cost > tmp_max)
+ tmp_max = cluster->max_power_cost;
+ }
+ max_power_cost = tmp_max;
+
+ move_list(&new_head, &cluster_head, true);
+
+ list_sort(NULL, &new_head, compare_clusters);
+ assign_cluster_ids(&new_head);
+
+ /*
+ * Ensure cluster ids are visible to all CPUs before making
+ * cluster_head visible.
+ */
+ move_list(&cluster_head, &new_head, false);
+}
+
+static void update_all_clusters_stats(void)
+{
+ struct sched_cluster *cluster;
+ u64 highest_mpc = 0, lowest_mpc = U64_MAX;
+ unsigned long flags;
+
+ acquire_rq_locks_irqsave(cpu_possible_mask, &flags);
+
+ for_each_sched_cluster(cluster) {
+ u64 mpc;
+
+ cluster->capacity = compute_capacity(cluster);
+ mpc = cluster->max_possible_capacity =
+ compute_max_possible_capacity(cluster);
+ cluster->load_scale_factor = compute_load_scale_factor(cluster);
+
+ cluster->exec_scale_factor =
+ DIV_ROUND_UP(cluster->efficiency * 1024,
+ max_possible_efficiency);
+
+ if (mpc > highest_mpc)
+ highest_mpc = mpc;
+
+ if (mpc < lowest_mpc)
+ lowest_mpc = mpc;
+ }
+
+ max_possible_capacity = highest_mpc;
+ min_max_possible_capacity = lowest_mpc;
+
+ __update_min_max_capacity();
+ sched_update_freq_max_load(cpu_possible_mask);
+ release_rq_locks_irqrestore(cpu_possible_mask, &flags);
+}
+
+void update_cluster_topology(void)
+{
+ struct cpumask cpus = *cpu_possible_mask;
+ const struct cpumask *cluster_cpus;
+ struct list_head new_head;
+ int i;
+
+ INIT_LIST_HEAD(&new_head);
+
+ for_each_cpu(i, &cpus) {
+ cluster_cpus = cpu_coregroup_mask(i);
+ cpumask_or(&all_cluster_cpus, &all_cluster_cpus, cluster_cpus);
+ cpumask_andnot(&cpus, &cpus, cluster_cpus);
+ add_cluster(cluster_cpus, &new_head);
+ }
+
+ assign_cluster_ids(&new_head);
+
+ /*
+ * Ensure cluster ids are visible to all CPUs before making
+ * cluster_head visible.
+ */
+ move_list(&cluster_head, &new_head, false);
+ update_all_clusters_stats();
+}
+
+struct sched_cluster init_cluster = {
+ .list = LIST_HEAD_INIT(init_cluster.list),
+ .id = 0,
+ .max_power_cost = 1,
+ .min_power_cost = 1,
+ .capacity = 1024,
+ .max_possible_capacity = 1024,
+ .efficiency = 1,
+ .load_scale_factor = 1024,
+ .cur_freq = 1,
+ .max_freq = 1,
+ .max_mitigated_freq = UINT_MAX,
+ .min_freq = 1,
+ .max_possible_freq = 1,
+ .dstate = 0,
+ .dstate_wakeup_energy = 0,
+ .dstate_wakeup_latency = 0,
+ .exec_scale_factor = 1024,
+ .notifier_sent = 0,
+ .wake_up_idle = 0,
+ .aggr_grp_load = 0,
+};
+
+void init_clusters(void)
+{
+ bitmap_clear(all_cluster_ids, 0, NR_CPUS);
+ init_cluster.cpus = *cpu_possible_mask;
+ raw_spin_lock_init(&init_cluster.load_lock);
+ INIT_LIST_HEAD(&cluster_head);
+}
+
+static unsigned long cpu_max_table_freq[NR_CPUS];
+
static int cpufreq_notifier_policy(struct notifier_block *nb,
unsigned long val, void *data)
{
struct cpufreq_policy *policy = (struct cpufreq_policy *)data;
- int i, update_max = 0;
- u64 highest_mpc = 0, highest_mplsf = 0;
- const struct cpumask *cpus = policy->related_cpus;
- unsigned int orig_min_max_freq = min_max_freq;
- unsigned int orig_max_possible_freq = max_possible_freq;
- /* Initialized to policy->max in case policy->related_cpus is empty! */
- unsigned int orig_max_freq = policy->max;
+ struct sched_cluster *cluster = NULL;
+ struct cpumask policy_cluster = *policy->related_cpus;
+ unsigned int orig_max_freq = 0;
+ int i, j, update_capacity = 0;
if (val != CPUFREQ_NOTIFY && val != CPUFREQ_REMOVE_POLICY &&
val != CPUFREQ_CREATE_POLICY)
@@ -993,16 +2298,6 @@
return 0;
}
- for_each_cpu(i, policy->related_cpus) {
- cpumask_copy(&cpu_rq(i)->freq_domain_cpumask,
- policy->related_cpus);
- orig_max_freq = cpu_rq(i)->max_freq;
- cpu_rq(i)->min_freq = policy->min;
- cpu_rq(i)->max_freq = policy->max;
- cpu_rq(i)->cur_freq = policy->cur;
- cpu_rq(i)->max_possible_freq = policy->cpuinfo.max_freq;
- }
-
max_possible_freq = max(max_possible_freq, policy->cpuinfo.max_freq);
if (min_max_freq == 1)
min_max_freq = UINT_MAX;
@@ -1010,108 +2305,40 @@
BUG_ON(!min_max_freq);
BUG_ON(!policy->max);
- /* Changes to policy other than max_freq don't require any updates */
- if (orig_max_freq == policy->max)
- return 0;
+ for_each_cpu(i, &policy_cluster)
+ cpu_max_table_freq[i] = policy->cpuinfo.max_freq;
- /*
- * A changed min_max_freq or max_possible_freq (possible during bootup)
- * needs to trigger re-computation of load_scale_factor and capacity for
- * all possible cpus (even those offline). It also needs to trigger
- * re-computation of nr_big_task count on all online cpus.
- *
- * A changed rq->max_freq otoh needs to trigger re-computation of
- * load_scale_factor and capacity for just the cluster of cpus involved.
- * Since small task definition depends on max_load_scale_factor, a
- * changed load_scale_factor of one cluster could influence
- * classification of tasks in another cluster. Hence a changed
- * rq->max_freq will need to trigger re-computation of nr_big_task
- * count on all online cpus.
- *
- * While it should be sufficient for nr_big_tasks to be
- * re-computed for only online cpus, we have inadequate context
- * information here (in policy notifier) with regard to hotplug-safety
- * context in which notification is issued. As a result, we can't use
- * get_online_cpus() here, as it can lead to deadlock. Until cpufreq is
- * fixed up to issue notification always in hotplug-safe context,
- * re-compute nr_big_task for all possible cpus.
- */
+ for_each_cpu(i, &policy_cluster) {
+ cluster = cpu_rq(i)->cluster;
+ cpumask_andnot(&policy_cluster, &policy_cluster,
+ &cluster->cpus);
- if (orig_min_max_freq != min_max_freq ||
- orig_max_possible_freq != max_possible_freq) {
- cpus = cpu_possible_mask;
- update_max = 1;
- }
+ orig_max_freq = cluster->max_freq;
+ cluster->min_freq = policy->min;
+ cluster->max_freq = policy->max;
+ cluster->cur_freq = policy->cur;
- /*
- * Changed load_scale_factor can trigger reclassification of tasks as
- * big or small. Make this change "atomic" so that tasks are accounted
- * properly due to changed load_scale_factor
- */
- for_each_cpu(i, cpus) {
- struct rq *rq = cpu_rq(i);
+ if (!cluster->freq_init_done) {
+ mutex_lock(&cluster_lock);
+ for_each_cpu(j, &cluster->cpus)
+ cpumask_copy(&cpu_rq(j)->freq_domain_cpumask,
+ policy->related_cpus);
+ cluster->max_possible_freq = policy->cpuinfo.max_freq;
+ cluster->max_possible_capacity =
+ compute_max_possible_capacity(cluster);
+ cluster->freq_init_done = true;
- rq->capacity = compute_capacity(i);
- rq->load_scale_factor = compute_load_scale_factor(i);
-
- if (update_max) {
- u64 mpc, mplsf;
-
- mpc = div_u64(((u64) rq->capacity) *
- rq->max_possible_freq, rq->max_freq);
- rq->max_possible_capacity = (int) mpc;
-
- mplsf = div_u64(((u64) rq->load_scale_factor) *
- rq->max_possible_freq, rq->max_freq);
-
- if (mpc > highest_mpc) {
- highest_mpc = mpc;
- cpumask_clear(&mpc_mask);
- cpumask_set_cpu(i, &mpc_mask);
- } else if (mpc == highest_mpc) {
- cpumask_set_cpu(i, &mpc_mask);
- }
-
- if (mplsf > highest_mplsf)
- highest_mplsf = mplsf;
+ sort_clusters();
+ update_all_clusters_stats();
+ mutex_unlock(&cluster_lock);
+ continue;
}
+
+ update_capacity += (orig_max_freq != cluster->max_freq);
}
- if (update_max) {
- max_possible_capacity = highest_mpc;
- max_load_scale_factor = highest_mplsf;
- }
-
- __update_min_max_capacity();
-
- return 0;
-}
-
-static int cpufreq_notifier_trans(struct notifier_block *nb,
- unsigned long val, void *data)
-{
- struct cpufreq_freqs *freq = (struct cpufreq_freqs *)data;
- unsigned int cpu = freq->cpu, new_freq = freq->new;
- unsigned long flags;
- int i;
-
- if (val != CPUFREQ_POSTCHANGE)
- return 0;
-
- BUG_ON(!new_freq);
-
- if (cpu_rq(cpu)->cur_freq == new_freq)
- return 0;
-
- for_each_cpu(i, &cpu_rq(cpu)->freq_domain_cpumask) {
- struct rq *rq = cpu_rq(i);
-
- raw_spin_lock_irqsave(&rq->lock, flags);
- walt_update_task_ravg(rq->curr, rq, TASK_UPDATE,
- walt_ktime_clock(), 0);
- rq->cur_freq = new_freq;
- raw_spin_unlock_irqrestore(&rq->lock, flags);
- }
+ if (update_capacity)
+ update_cpu_cluster_capacity(policy->related_cpus);
return 0;
}
@@ -1120,49 +2347,683 @@
.notifier_call = cpufreq_notifier_policy
};
-static struct notifier_block notifier_trans_block = {
- .notifier_call = cpufreq_notifier_trans
-};
-
-static int register_sched_callback(void)
+static int register_walt_callback(void)
{
- int ret;
-
- ret = cpufreq_register_notifier(¬ifier_policy_block,
- CPUFREQ_POLICY_NOTIFIER);
-
- if (!ret)
- ret = cpufreq_register_notifier(¬ifier_trans_block,
- CPUFREQ_TRANSITION_NOTIFIER);
-
- return 0;
+ return cpufreq_register_notifier(¬ifier_policy_block,
+ CPUFREQ_POLICY_NOTIFIER);
}
-
/*
* cpufreq callbacks can be registered at core_initcall or later time.
* Any registration done prior to that is "forgotten" by cpufreq. See
* initialization of variable init_cpufreq_transition_notifier_list_called
* for further information.
*/
-core_initcall(register_sched_callback);
+core_initcall(register_walt_callback);
-void walt_init_new_task_load(struct task_struct *p)
+static void transfer_busy_time(struct rq *rq, struct related_thread_group *grp,
+ struct task_struct *p, int event);
+
+/*
+ * Enable colocation and frequency aggregation for all threads in a process.
+ * The children inherits the group id from the parent.
+ */
+unsigned int __read_mostly sysctl_sched_enable_thread_grouping;
+
+/* Maximum allowed threshold before freq aggregation must be enabled */
+#define MAX_FREQ_AGGR_THRESH 1000
+
+struct related_thread_group *related_thread_groups[MAX_NUM_CGROUP_COLOC_ID];
+static LIST_HEAD(active_related_thread_groups);
+DEFINE_RWLOCK(related_thread_group_lock);
+
+unsigned int __read_mostly sysctl_sched_freq_aggregate_threshold_pct;
+
+/*
+ * Task groups whose aggregate demand on a cpu is more than
+ * sched_group_upmigrate need to be up-migrated if possible.
+ */
+unsigned int __read_mostly sched_group_upmigrate = 20000000;
+unsigned int __read_mostly sysctl_sched_group_upmigrate_pct = 100;
+
+/*
+ * Task groups, once up-migrated, will need to drop their aggregate
+ * demand to less than sched_group_downmigrate before they are "down"
+ * migrated.
+ */
+unsigned int __read_mostly sched_group_downmigrate = 19000000;
+unsigned int __read_mostly sysctl_sched_group_downmigrate_pct = 95;
+
+static int
+group_will_fit(struct sched_cluster *cluster, struct related_thread_group *grp,
+ u64 demand, bool group_boost)
{
- int i;
- u32 init_load_windows =
- div64_u64((u64)sysctl_sched_walt_init_task_load_pct *
- (u64)walt_ravg_window, 100);
- u32 init_load_pct = current->init_load_pct;
+ int cpu = cluster_first_cpu(cluster);
+ int prev_capacity = 0;
+ unsigned int threshold = sched_group_upmigrate;
+ u64 load;
- p->init_load_pct = 0;
- memset(&p->ravg, 0, sizeof(struct ravg));
+ if (cluster->capacity == max_capacity)
+ return 1;
- if (init_load_pct) {
- init_load_windows = div64_u64((u64)init_load_pct *
- (u64)walt_ravg_window, 100);
+ if (group_boost)
+ return 0;
+
+ if (!demand)
+ return 1;
+
+ if (grp->preferred_cluster)
+ prev_capacity = grp->preferred_cluster->capacity;
+
+ if (cluster->capacity < prev_capacity)
+ threshold = sched_group_downmigrate;
+
+ load = scale_load_to_cpu(demand, cpu);
+ if (load < threshold)
+ return 1;
+
+ return 0;
+}
+
+unsigned long __weak arch_get_cpu_efficiency(int cpu)
+{
+ return SCHED_CAPACITY_SCALE;
+}
+
+/* Return cluster which can offer required capacity for group */
+static struct sched_cluster *best_cluster(struct related_thread_group *grp,
+ u64 total_demand, bool group_boost)
+{
+ struct sched_cluster *cluster = NULL;
+
+ for_each_sched_cluster(cluster) {
+ if (group_will_fit(cluster, grp, total_demand, group_boost))
+ return cluster;
}
- p->ravg.demand = init_load_windows;
- for (i = 0; i < RAVG_HIST_SIZE_MAX; ++i)
- p->ravg.sum_history[i] = init_load_windows;
+ return sched_cluster[0];
+}
+
+int preferred_cluster(struct sched_cluster *cluster, struct task_struct *p)
+{
+ struct related_thread_group *grp;
+ int rc = 1;
+
+ rcu_read_lock();
+
+ grp = task_related_thread_group(p);
+ if (grp)
+ rc = (grp->preferred_cluster == cluster);
+
+ rcu_read_unlock();
+ return rc;
+}
+
+static void _set_preferred_cluster(struct related_thread_group *grp)
+{
+ struct task_struct *p;
+ u64 combined_demand = 0;
+ bool boost_on_big = sched_boost_policy() == SCHED_BOOST_ON_BIG;
+ bool group_boost = false;
+ u64 wallclock;
+
+ if (list_empty(&grp->tasks))
+ return;
+
+ wallclock = sched_ktime_clock();
+
+ /*
+ * wakeup of two or more related tasks could race with each other and
+ * could result in multiple calls to _set_preferred_cluster being issued
+ * at same time. Avoid overhead in such cases of rechecking preferred
+ * cluster
+ */
+ if (wallclock - grp->last_update < sched_ravg_window / 10)
+ return;
+
+ list_for_each_entry(p, &grp->tasks, grp_list) {
+ if (boost_on_big && task_sched_boost(p)) {
+ group_boost = true;
+ break;
+ }
+
+ if (p->ravg.mark_start < wallclock -
+ (sched_ravg_window * sched_ravg_hist_size))
+ continue;
+
+ combined_demand += p->ravg.demand;
+
+ }
+
+ grp->preferred_cluster = best_cluster(grp,
+ combined_demand, group_boost);
+ grp->last_update = sched_ktime_clock();
+ trace_sched_set_preferred_cluster(grp, combined_demand);
+}
+
+void set_preferred_cluster(struct related_thread_group *grp)
+{
+ raw_spin_lock(&grp->lock);
+ _set_preferred_cluster(grp);
+ raw_spin_unlock(&grp->lock);
+}
+
+int update_preferred_cluster(struct related_thread_group *grp,
+ struct task_struct *p, u32 old_load)
+{
+ u32 new_load = task_load(p);
+
+ if (!grp)
+ return 0;
+
+ /*
+ * Update if task's load has changed significantly or a complete window
+ * has passed since we last updated preference
+ */
+ if (abs(new_load - old_load) > sched_ravg_window / 4 ||
+ sched_ktime_clock() - grp->last_update > sched_ravg_window)
+ return 1;
+
+ return 0;
+}
+
+DEFINE_MUTEX(policy_mutex);
+
+#define pct_to_real(tunable) \
+ (div64_u64((u64)tunable * (u64)max_task_load(), 100))
+
+unsigned int update_freq_aggregate_threshold(unsigned int threshold)
+{
+ unsigned int old_threshold;
+
+ mutex_lock(&policy_mutex);
+
+ old_threshold = sysctl_sched_freq_aggregate_threshold_pct;
+
+ sysctl_sched_freq_aggregate_threshold_pct = threshold;
+ sched_freq_aggregate_threshold =
+ pct_to_real(sysctl_sched_freq_aggregate_threshold_pct);
+
+ mutex_unlock(&policy_mutex);
+
+ return old_threshold;
+}
+
+#define ADD_TASK 0
+#define REM_TASK 1
+
+#define DEFAULT_CGROUP_COLOC_ID 1
+
+static inline struct related_thread_group*
+lookup_related_thread_group(unsigned int group_id)
+{
+ return related_thread_groups[group_id];
+}
+
+int alloc_related_thread_groups(void)
+{
+ int i, ret;
+ struct related_thread_group *grp;
+
+ /* groupd_id = 0 is invalid as it's special id to remove group. */
+ for (i = 1; i < MAX_NUM_CGROUP_COLOC_ID; i++) {
+ grp = kzalloc(sizeof(*grp), GFP_NOWAIT);
+ if (!grp) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ grp->id = i;
+ INIT_LIST_HEAD(&grp->tasks);
+ INIT_LIST_HEAD(&grp->list);
+ raw_spin_lock_init(&grp->lock);
+
+ related_thread_groups[i] = grp;
+ }
+
+ return 0;
+
+err:
+ for (i = 1; i < MAX_NUM_CGROUP_COLOC_ID; i++) {
+ grp = lookup_related_thread_group(i);
+ if (grp) {
+ kfree(grp);
+ related_thread_groups[i] = NULL;
+ } else {
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static void remove_task_from_group(struct task_struct *p)
+{
+ struct related_thread_group *grp = p->grp;
+ struct rq *rq;
+ int empty_group = 1;
+ struct rq_flags rf;
+
+ raw_spin_lock(&grp->lock);
+
+ rq = __task_rq_lock(p, &rf);
+ transfer_busy_time(rq, p->grp, p, REM_TASK);
+ list_del_init(&p->grp_list);
+ rcu_assign_pointer(p->grp, NULL);
+ __task_rq_unlock(rq, &rf);
+
+
+ if (!list_empty(&grp->tasks)) {
+ empty_group = 0;
+ _set_preferred_cluster(grp);
+ }
+
+ raw_spin_unlock(&grp->lock);
+
+ /* Reserved groups cannot be destroyed */
+ if (empty_group && grp->id != DEFAULT_CGROUP_COLOC_ID)
+ /*
+ * We test whether grp->list is attached with list_empty()
+ * hence re-init the list after deletion.
+ */
+ list_del_init(&grp->list);
+}
+
+static int
+add_task_to_group(struct task_struct *p, struct related_thread_group *grp)
+{
+ struct rq *rq;
+ struct rq_flags rf;
+
+ raw_spin_lock(&grp->lock);
+
+ /*
+ * Change p->grp under rq->lock. Will prevent races with read-side
+ * reference of p->grp in various hot-paths
+ */
+ rq = __task_rq_lock(p, &rf);
+ transfer_busy_time(rq, grp, p, ADD_TASK);
+ list_add(&p->grp_list, &grp->tasks);
+ rcu_assign_pointer(p->grp, grp);
+ __task_rq_unlock(rq, &rf);
+
+ _set_preferred_cluster(grp);
+
+ raw_spin_unlock(&grp->lock);
+
+ return 0;
+}
+
+void add_new_task_to_grp(struct task_struct *new)
+{
+ unsigned long flags;
+ struct related_thread_group *grp;
+ struct task_struct *leader = new->group_leader;
+ unsigned int leader_grp_id = sched_get_group_id(leader);
+
+ if (!sysctl_sched_enable_thread_grouping &&
+ leader_grp_id != DEFAULT_CGROUP_COLOC_ID)
+ return;
+
+ if (thread_group_leader(new))
+ return;
+
+ if (leader_grp_id == DEFAULT_CGROUP_COLOC_ID) {
+ if (!same_schedtune(new, leader))
+ return;
+ }
+
+ write_lock_irqsave(&related_thread_group_lock, flags);
+
+ rcu_read_lock();
+ grp = task_related_thread_group(leader);
+ rcu_read_unlock();
+
+ /*
+ * It's possible that someone already added the new task to the
+ * group. A leader's thread group is updated prior to calling
+ * this function. It's also possible that the leader has exited
+ * the group. In either case, there is nothing else to do.
+ */
+ if (!grp || new->grp) {
+ write_unlock_irqrestore(&related_thread_group_lock, flags);
+ return;
+ }
+
+ raw_spin_lock(&grp->lock);
+
+ rcu_assign_pointer(new->grp, grp);
+ list_add(&new->grp_list, &grp->tasks);
+
+ raw_spin_unlock(&grp->lock);
+ write_unlock_irqrestore(&related_thread_group_lock, flags);
+}
+
+static int __sched_set_group_id(struct task_struct *p, unsigned int group_id)
+{
+ int rc = 0;
+ unsigned long flags;
+ struct related_thread_group *grp = NULL;
+
+ if (group_id >= MAX_NUM_CGROUP_COLOC_ID)
+ return -EINVAL;
+
+ raw_spin_lock_irqsave(&p->pi_lock, flags);
+ write_lock(&related_thread_group_lock);
+
+ /* Switching from one group to another directly is not permitted */
+ if ((current != p && p->flags & PF_EXITING) ||
+ (!p->grp && !group_id) ||
+ (p->grp && group_id))
+ goto done;
+
+ if (!group_id) {
+ remove_task_from_group(p);
+ goto done;
+ }
+
+ grp = lookup_related_thread_group(group_id);
+ if (list_empty(&grp->list))
+ list_add(&grp->list, &active_related_thread_groups);
+
+ rc = add_task_to_group(p, grp);
+done:
+ write_unlock(&related_thread_group_lock);
+ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+
+ return rc;
+}
+
+int sched_set_group_id(struct task_struct *p, unsigned int group_id)
+{
+ /* DEFAULT_CGROUP_COLOC_ID is a reserved id */
+ if (group_id == DEFAULT_CGROUP_COLOC_ID)
+ return -EINVAL;
+
+ return __sched_set_group_id(p, group_id);
+}
+
+unsigned int sched_get_group_id(struct task_struct *p)
+{
+ unsigned int group_id;
+ struct related_thread_group *grp;
+
+ rcu_read_lock();
+ grp = task_related_thread_group(p);
+ group_id = grp ? grp->id : 0;
+ rcu_read_unlock();
+
+ return group_id;
+}
+
+#if defined(CONFIG_SCHED_TUNE) && defined(CONFIG_CGROUP_SCHEDTUNE)
+/*
+ * We create a default colocation group at boot. There is no need to
+ * synchronize tasks between cgroups at creation time because the
+ * correct cgroup hierarchy is not available at boot. Therefore cgroup
+ * colocation is turned off by default even though the colocation group
+ * itself has been allocated. Furthermore this colocation group cannot
+ * be destroyted once it has been created. All of this has been as part
+ * of runtime optimizations.
+ *
+ * The job of synchronizing tasks to the colocation group is done when
+ * the colocation flag in the cgroup is turned on.
+ */
+static int __init create_default_coloc_group(void)
+{
+ struct related_thread_group *grp = NULL;
+ unsigned long flags;
+
+ grp = lookup_related_thread_group(DEFAULT_CGROUP_COLOC_ID);
+ write_lock_irqsave(&related_thread_group_lock, flags);
+ list_add(&grp->list, &active_related_thread_groups);
+ write_unlock_irqrestore(&related_thread_group_lock, flags);
+
+ update_freq_aggregate_threshold(MAX_FREQ_AGGR_THRESH);
+ return 0;
+}
+late_initcall(create_default_coloc_group);
+
+int sync_cgroup_colocation(struct task_struct *p, bool insert)
+{
+ unsigned int grp_id = insert ? DEFAULT_CGROUP_COLOC_ID : 0;
+
+ return __sched_set_group_id(p, grp_id);
+}
+#endif
+
+void update_cpu_cluster_capacity(const cpumask_t *cpus)
+{
+ int i;
+ struct sched_cluster *cluster;
+ struct cpumask cpumask;
+ unsigned long flags;
+
+ cpumask_copy(&cpumask, cpus);
+ acquire_rq_locks_irqsave(cpu_possible_mask, &flags);
+
+ for_each_cpu(i, &cpumask) {
+ cluster = cpu_rq(i)->cluster;
+ cpumask_andnot(&cpumask, &cpumask, &cluster->cpus);
+
+ cluster->capacity = compute_capacity(cluster);
+ cluster->load_scale_factor = compute_load_scale_factor(cluster);
+ }
+
+ __update_min_max_capacity();
+
+ release_rq_locks_irqrestore(cpu_possible_mask, &flags);
+}
+
+static unsigned long max_cap[NR_CPUS];
+static unsigned long thermal_cap_cpu[NR_CPUS];
+
+unsigned long thermal_cap(int cpu)
+{
+ return thermal_cap_cpu[cpu] ?: cpu_rq(cpu)->cpu_capacity_orig;
+}
+
+unsigned long do_thermal_cap(int cpu, unsigned long thermal_max_freq)
+{
+ struct sched_domain *sd;
+ struct sched_group *sg;
+ struct rq *rq = cpu_rq(cpu);
+ int nr_cap_states;
+
+ if (!max_cap[cpu]) {
+ rcu_read_lock();
+ sd = rcu_dereference(per_cpu(sd_ea, cpu));
+ if (!sd || !sd->groups || !sd->groups->sge ||
+ !sd->groups->sge->cap_states) {
+ rcu_read_unlock();
+ return rq->cpu_capacity_orig;
+ }
+ sg = sd->groups;
+ nr_cap_states = sg->sge->nr_cap_states;
+ max_cap[cpu] = sg->sge->cap_states[nr_cap_states - 1].cap;
+ rcu_read_unlock();
+ }
+
+ if (cpu_max_table_freq[cpu] &&
+ unlikely(thermal_max_freq && thermal_max_freq
+ != cpu_max_table_freq[cpu])) {
+ return div64_ul(thermal_max_freq * max_cap[cpu],
+ cpu_max_table_freq[cpu]);
+ } else {
+ return rq->cpu_capacity_orig;
+ }
+}
+
+static DEFINE_SPINLOCK(cpu_freq_min_max_lock);
+void sched_update_cpu_freq_min_max(const cpumask_t *cpus, u32 fmin, u32 fmax)
+{
+ struct cpumask cpumask;
+ struct sched_cluster *cluster;
+ int i, update_capacity = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cpu_freq_min_max_lock, flags);
+ cpumask_copy(&cpumask, cpus);
+
+ for_each_cpu(i, &cpumask)
+ thermal_cap_cpu[i] = do_thermal_cap(i, fmax);
+
+ for_each_cpu(i, &cpumask) {
+ cluster = cpu_rq(i)->cluster;
+ cpumask_andnot(&cpumask, &cpumask, &cluster->cpus);
+ update_capacity += (cluster->max_mitigated_freq != fmax);
+ cluster->max_mitigated_freq = fmax;
+ }
+ spin_unlock_irqrestore(&cpu_freq_min_max_lock, flags);
+
+ if (update_capacity)
+ update_cpu_cluster_capacity(cpus);
+}
+
+/*
+ * Task's cpu usage is accounted in:
+ * rq->curr/prev_runnable_sum, when its ->grp is NULL
+ * grp->cpu_time[cpu]->curr/prev_runnable_sum, when its ->grp is !NULL
+ *
+ * Transfer task's cpu usage between those counters when transitioning between
+ * groups
+ */
+static void transfer_busy_time(struct rq *rq, struct related_thread_group *grp,
+ struct task_struct *p, int event)
+{
+ u64 wallclock;
+ struct group_cpu_time *cpu_time;
+ u64 *src_curr_runnable_sum, *dst_curr_runnable_sum;
+ u64 *src_prev_runnable_sum, *dst_prev_runnable_sum;
+ u64 *src_nt_curr_runnable_sum, *dst_nt_curr_runnable_sum;
+ u64 *src_nt_prev_runnable_sum, *dst_nt_prev_runnable_sum;
+ int migrate_type;
+ int cpu = cpu_of(rq);
+ bool new_task;
+ int i;
+
+ wallclock = sched_ktime_clock();
+
+ update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
+ update_task_ravg(p, rq, TASK_UPDATE, wallclock, 0);
+ new_task = is_new_task(p);
+
+ cpu_time = &rq->grp_time;
+ if (event == ADD_TASK) {
+ migrate_type = RQ_TO_GROUP;
+
+ src_curr_runnable_sum = &rq->curr_runnable_sum;
+ dst_curr_runnable_sum = &cpu_time->curr_runnable_sum;
+ src_prev_runnable_sum = &rq->prev_runnable_sum;
+ dst_prev_runnable_sum = &cpu_time->prev_runnable_sum;
+
+ src_nt_curr_runnable_sum = &rq->nt_curr_runnable_sum;
+ dst_nt_curr_runnable_sum = &cpu_time->nt_curr_runnable_sum;
+ src_nt_prev_runnable_sum = &rq->nt_prev_runnable_sum;
+ dst_nt_prev_runnable_sum = &cpu_time->nt_prev_runnable_sum;
+
+ *src_curr_runnable_sum -= p->ravg.curr_window_cpu[cpu];
+ *src_prev_runnable_sum -= p->ravg.prev_window_cpu[cpu];
+ if (new_task) {
+ *src_nt_curr_runnable_sum -=
+ p->ravg.curr_window_cpu[cpu];
+ *src_nt_prev_runnable_sum -=
+ p->ravg.prev_window_cpu[cpu];
+ }
+
+ update_cluster_load_subtractions(p, cpu,
+ rq->window_start, new_task);
+
+ } else {
+ migrate_type = GROUP_TO_RQ;
+
+ src_curr_runnable_sum = &cpu_time->curr_runnable_sum;
+ dst_curr_runnable_sum = &rq->curr_runnable_sum;
+ src_prev_runnable_sum = &cpu_time->prev_runnable_sum;
+ dst_prev_runnable_sum = &rq->prev_runnable_sum;
+
+ src_nt_curr_runnable_sum = &cpu_time->nt_curr_runnable_sum;
+ dst_nt_curr_runnable_sum = &rq->nt_curr_runnable_sum;
+ src_nt_prev_runnable_sum = &cpu_time->nt_prev_runnable_sum;
+ dst_nt_prev_runnable_sum = &rq->nt_prev_runnable_sum;
+
+ *src_curr_runnable_sum -= p->ravg.curr_window;
+ *src_prev_runnable_sum -= p->ravg.prev_window;
+ if (new_task) {
+ *src_nt_curr_runnable_sum -= p->ravg.curr_window;
+ *src_nt_prev_runnable_sum -= p->ravg.prev_window;
+ }
+
+ /*
+ * Need to reset curr/prev windows for all CPUs, not just the
+ * ones in the same cluster. Since inter cluster migrations
+ * did not result in the appropriate book keeping, the values
+ * per CPU would be inaccurate.
+ */
+ for_each_possible_cpu(i) {
+ p->ravg.curr_window_cpu[i] = 0;
+ p->ravg.prev_window_cpu[i] = 0;
+ }
+ }
+
+ *dst_curr_runnable_sum += p->ravg.curr_window;
+ *dst_prev_runnable_sum += p->ravg.prev_window;
+ if (new_task) {
+ *dst_nt_curr_runnable_sum += p->ravg.curr_window;
+ *dst_nt_prev_runnable_sum += p->ravg.prev_window;
+ }
+
+ /*
+ * When a task enter or exits a group, it's curr and prev windows are
+ * moved to a single CPU. This behavior might be sub-optimal in the
+ * exit case, however, it saves us the overhead of handling inter
+ * cluster migration fixups while the task is part of a related group.
+ */
+ p->ravg.curr_window_cpu[cpu] = p->ravg.curr_window;
+ p->ravg.prev_window_cpu[cpu] = p->ravg.prev_window;
+
+ trace_sched_migration_update_sum(p, migrate_type, rq);
+
+ BUG_ON((s64)*src_curr_runnable_sum < 0);
+ BUG_ON((s64)*src_prev_runnable_sum < 0);
+ BUG_ON((s64)*src_nt_curr_runnable_sum < 0);
+ BUG_ON((s64)*src_nt_prev_runnable_sum < 0);
+}
+
+/*
+ * Runs in hard-irq context. This should ideally run just after the latest
+ * window roll-over.
+ */
+void walt_irq_work(struct irq_work *irq_work)
+{
+ struct sched_cluster *cluster;
+ struct rq *rq;
+ int cpu;
+ u64 wc;
+
+ for_each_cpu(cpu, cpu_possible_mask)
+ raw_spin_lock(&cpu_rq(cpu)->lock);
+
+ wc = sched_ktime_clock();
+
+ for_each_sched_cluster(cluster) {
+ raw_spin_lock(&cluster->load_lock);
+
+ for_each_cpu(cpu, &cluster->cpus) {
+ rq = cpu_rq(cpu);
+ if (rq->curr) {
+ update_task_ravg(rq->curr, rq,
+ TASK_UPDATE, wc, 0);
+ account_load_subtractions(rq);
+ }
+
+ cpufreq_update_util(rq, 0);
+ }
+
+ raw_spin_unlock(&cluster->load_lock);
+ }
+
+ for_each_cpu(cpu, cpu_possible_mask)
+ raw_spin_unlock(&cpu_rq(cpu)->lock);
+
+ core_ctl_check(this_rq()->window_start);
}
diff --git a/kernel/sched/walt.h b/kernel/sched/walt.h
index e181c87..f153332 100644
--- a/kernel/sched/walt.h
+++ b/kernel/sched/walt.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -16,47 +16,344 @@
#ifdef CONFIG_SCHED_WALT
-void walt_update_task_ravg(struct task_struct *p, struct rq *rq, int event,
- u64 wallclock, u64 irqtime);
-void walt_inc_cumulative_runnable_avg(struct rq *rq, struct task_struct *p);
-void walt_dec_cumulative_runnable_avg(struct rq *rq, struct task_struct *p);
-void walt_inc_cfs_cumulative_runnable_avg(struct cfs_rq *rq,
- struct task_struct *p);
-void walt_dec_cfs_cumulative_runnable_avg(struct cfs_rq *rq,
- struct task_struct *p);
-void walt_fixup_busy_time(struct task_struct *p, int new_cpu);
-void walt_init_new_task_load(struct task_struct *p);
-void walt_mark_task_starting(struct task_struct *p);
-void walt_set_window_start(struct rq *rq);
-void walt_migrate_sync_cpu(int cpu);
-void walt_init_cpu_efficiency(void);
-u64 walt_ktime_clock(void);
-void walt_account_irqtime(int cpu, struct task_struct *curr, u64 delta,
+#include <linux/sched/sysctl.h>
+
+#define WINDOW_STATS_RECENT 0
+#define WINDOW_STATS_MAX 1
+#define WINDOW_STATS_MAX_RECENT_AVG 2
+#define WINDOW_STATS_AVG 3
+#define WINDOW_STATS_INVALID_POLICY 4
+
+/* Min window size (in ns) = 20ms */
+#define MIN_SCHED_RAVG_WINDOW 20000000
+
+/* Max window size (in ns) = 1s */
+#define MAX_SCHED_RAVG_WINDOW 1000000000
+
+#define EXITING_TASK_MARKER 0xdeaddead
+
+#define FREQ_REPORT_MAX_CPU_LOAD_TOP_TASK 0
+#define FREQ_REPORT_CPU_LOAD 1
+#define FREQ_REPORT_TOP_TASK 2
+
+#define for_each_related_thread_group(grp) \
+ list_for_each_entry(grp, &active_related_thread_groups, list)
+
+#define SCHED_NEW_TASK_WINDOWS 5
+
+extern unsigned int sched_ravg_window;
+extern unsigned int max_possible_efficiency;
+extern unsigned int min_possible_efficiency;
+extern unsigned int max_possible_freq;
+extern unsigned int sched_major_task_runtime;
+extern unsigned int __read_mostly sched_init_task_load_windows;
+extern unsigned int __read_mostly sched_load_granule;
+
+extern struct mutex cluster_lock;
+extern rwlock_t related_thread_group_lock;
+extern __read_mostly unsigned int sched_ravg_hist_size;
+extern __read_mostly unsigned int sched_freq_aggregate;
+extern __read_mostly int sched_freq_aggregate_threshold;
+extern __read_mostly unsigned int sched_window_stats_policy;
+extern __read_mostly unsigned int sched_group_upmigrate;
+extern __read_mostly unsigned int sched_group_downmigrate;
+
+extern struct sched_cluster init_cluster;
+
+extern void update_task_ravg(struct task_struct *p, struct rq *rq, int event,
+ u64 wallclock, u64 irqtime);
+
+extern unsigned int nr_eligible_big_tasks(int cpu);
+
+#ifndef CONFIG_SCHED_HMP
+static inline void
+inc_nr_big_task(struct hmp_sched_stats *stats, struct task_struct *p)
+{
+ if (sched_disable_window_stats)
+ return;
+
+ if (p->misfit)
+ stats->nr_big_tasks++;
+}
+
+static inline void
+dec_nr_big_task(struct hmp_sched_stats *stats, struct task_struct *p)
+{
+ if (sched_disable_window_stats)
+ return;
+
+ if (p->misfit)
+ stats->nr_big_tasks--;
+
+ BUG_ON(stats->nr_big_tasks < 0);
+}
+#endif
+
+static inline void
+adjust_nr_big_tasks(struct hmp_sched_stats *stats, int delta, bool inc)
+{
+ struct rq *rq = container_of(stats, struct rq, hmp_stats);
+
+ if (sched_disable_window_stats)
+ return;
+
+ sched_update_nr_prod(cpu_of(rq), 0, true);
+ stats->nr_big_tasks += inc ? delta : -delta;
+
+ BUG_ON(stats->nr_big_tasks < 0);
+}
+
+static inline void
+inc_cumulative_runnable_avg(struct hmp_sched_stats *stats,
+ struct task_struct *p)
+{
+ u32 task_load;
+
+ if (sched_disable_window_stats)
+ return;
+
+ task_load = sched_disable_window_stats ? 0 : p->ravg.demand;
+
+ stats->cumulative_runnable_avg += task_load;
+ stats->pred_demands_sum += p->ravg.pred_demand;
+}
+
+static inline void
+dec_cumulative_runnable_avg(struct hmp_sched_stats *stats,
+ struct task_struct *p)
+{
+ u32 task_load;
+
+ if (sched_disable_window_stats)
+ return;
+
+ task_load = sched_disable_window_stats ? 0 : p->ravg.demand;
+
+ stats->cumulative_runnable_avg -= task_load;
+
+ BUG_ON((s64)stats->cumulative_runnable_avg < 0);
+
+ stats->pred_demands_sum -= p->ravg.pred_demand;
+ BUG_ON((s64)stats->pred_demands_sum < 0);
+}
+
+static inline void
+fixup_cumulative_runnable_avg(struct hmp_sched_stats *stats,
+ struct task_struct *p, s64 task_load_delta,
+ s64 pred_demand_delta)
+{
+ if (sched_disable_window_stats)
+ return;
+
+ stats->cumulative_runnable_avg += task_load_delta;
+ BUG_ON((s64)stats->cumulative_runnable_avg < 0);
+
+ stats->pred_demands_sum += pred_demand_delta;
+ BUG_ON((s64)stats->pred_demands_sum < 0);
+}
+
+extern void inc_rq_hmp_stats(struct rq *rq,
+ struct task_struct *p, int change_cra);
+extern void dec_rq_hmp_stats(struct rq *rq,
+ struct task_struct *p, int change_cra);
+extern void reset_hmp_stats(struct hmp_sched_stats *stats, int reset_cra);
+extern void fixup_busy_time(struct task_struct *p, int new_cpu);
+extern void init_new_task_load(struct task_struct *p, bool idle_task);
+extern void mark_task_starting(struct task_struct *p);
+extern void set_window_start(struct rq *rq);
+void account_irqtime(int cpu, struct task_struct *curr, u64 delta,
u64 wallclock);
-u64 walt_irqload(int cpu);
-int walt_cpu_high_irqload(int cpu);
+#define SCHED_HIGH_IRQ_TIMEOUT 3
+static inline u64 sched_irqload(int cpu)
+{
+ struct rq *rq = cpu_rq(cpu);
+ s64 delta;
+
+ delta = get_jiffies_64() - rq->irqload_ts;
+ /*
+ * Current context can be preempted by irq and rq->irqload_ts can be
+ * updated by irq context so that delta can be negative.
+ * But this is okay and we can safely return as this means there
+ * was recent irq occurrence.
+ */
+
+ if (delta < SCHED_HIGH_IRQ_TIMEOUT)
+ return rq->avg_irqload;
+ else
+ return 0;
+}
+
+static inline int sched_cpu_high_irqload(int cpu)
+{
+ return sched_irqload(cpu) >= sysctl_sched_cpu_high_irqload;
+}
+
+static inline int exiting_task(struct task_struct *p)
+{
+ return (p->ravg.sum_history[0] == EXITING_TASK_MARKER);
+}
+
+extern u64 sched_ktime_clock(void);
+
+static inline struct sched_cluster *cpu_cluster(int cpu)
+{
+ return cpu_rq(cpu)->cluster;
+}
+
+static inline u64
+scale_load_to_freq(u64 load, unsigned int src_freq, unsigned int dst_freq)
+{
+ return div64_u64(load * (u64)src_freq, (u64)dst_freq);
+}
+
+static inline bool is_new_task(struct task_struct *p)
+{
+ return p->ravg.active_windows < SCHED_NEW_TASK_WINDOWS;
+}
+
+static inline void clear_top_tasks_table(u8 *table)
+{
+ memset(table, 0, NUM_LOAD_INDICES * sizeof(u8));
+}
+
+extern void update_cluster_load_subtractions(struct task_struct *p,
+ int cpu, u64 ws, bool new_task);
+extern void sched_account_irqstart(int cpu, struct task_struct *curr,
+ u64 wallclock);
+
+static inline unsigned int max_task_load(void)
+{
+ return sched_ravg_window;
+}
+
+static inline u32 cpu_cycles_to_freq(u64 cycles, u32 period)
+{
+ return div64_u64(cycles, period);
+}
+
+static inline unsigned int cpu_cur_freq(int cpu)
+{
+ return cpu_rq(cpu)->cluster->cur_freq;
+}
+
+static inline void
+move_list(struct list_head *dst, struct list_head *src, bool sync_rcu)
+{
+ struct list_head *first, *last;
+
+ first = src->next;
+ last = src->prev;
+
+ if (sync_rcu) {
+ INIT_LIST_HEAD_RCU(src);
+ synchronize_rcu();
+ }
+
+ first->prev = dst;
+ dst->prev = last;
+ last->next = dst;
+
+ /* Ensure list sanity before making the head visible to all CPUs. */
+ smp_mb();
+ dst->next = first;
+}
+
+extern void reset_task_stats(struct task_struct *p);
+extern void update_cluster_topology(void);
+
+extern struct list_head cluster_head;
+#define for_each_sched_cluster(cluster) \
+ list_for_each_entry_rcu(cluster, &cluster_head, list)
+
+extern void init_clusters(void);
+
+extern void clear_top_tasks_bitmap(unsigned long *bitmap);
+
+extern void sched_account_irqtime(int cpu, struct task_struct *curr,
+ u64 delta, u64 wallclock);
+
+static inline void assign_cluster_ids(struct list_head *head)
+{
+ struct sched_cluster *cluster;
+ int pos = 0;
+
+ list_for_each_entry(cluster, head, list) {
+ cluster->id = pos;
+ sched_cluster[pos++] = cluster;
+ }
+}
+
+static inline int same_cluster(int src_cpu, int dst_cpu)
+{
+ return cpu_rq(src_cpu)->cluster == cpu_rq(dst_cpu)->cluster;
+}
+
+void sort_clusters(void);
+
+void walt_irq_work(struct irq_work *irq_work);
#else /* CONFIG_SCHED_WALT */
-static inline void walt_update_task_ravg(struct task_struct *p, struct rq *rq,
- int event, u64 wallclock, u64 irqtime) { }
-static inline void walt_inc_cumulative_runnable_avg(struct rq *rq, struct task_struct *p) { }
-static inline void walt_dec_cumulative_runnable_avg(struct rq *rq, struct task_struct *p) { }
-static inline void walt_inc_cfs_cumulative_runnable_avg(struct cfs_rq *rq,
- struct task_struct *p) { }
-static inline void walt_dec_cfs_cumulative_runnable_avg(struct cfs_rq *rq,
- struct task_struct *p) { }
-static inline void walt_fixup_busy_time(struct task_struct *p, int new_cpu) { }
-static inline void walt_init_new_task_load(struct task_struct *p) { }
-static inline void walt_mark_task_starting(struct task_struct *p) { }
-static inline void walt_set_window_start(struct rq *rq) { }
-static inline void walt_migrate_sync_cpu(int cpu) { }
-static inline void walt_init_cpu_efficiency(void) { }
-static inline u64 walt_ktime_clock(void) { return 0; }
+static inline void update_task_ravg(struct task_struct *p, struct rq *rq,
+ int event, u64 wallclock, u64 irqtime) { }
+static inline void inc_cumulative_runnable_avg(struct hmp_sched_stats *stats,
+ struct task_struct *p)
+{
+}
+
+static inline unsigned int nr_eligible_big_tasks(int cpu)
+{
+ return 0;
+}
+
+static inline void adjust_nr_big_tasks(struct hmp_sched_stats *stats,
+ int delta, bool inc)
+{
+}
+
+static inline void inc_nr_big_task(struct hmp_sched_stats *stats,
+ struct task_struct *p)
+{
+}
+
+static inline void dec_nr_big_task(struct hmp_sched_stats *stats,
+ struct task_struct *p)
+{
+}
+static inline void dec_cumulative_runnable_avg(struct hmp_sched_stats *stats,
+ struct task_struct *p)
+{
+}
+
+static inline void fixup_busy_time(struct task_struct *p, int new_cpu) { }
+static inline void init_new_task_load(struct task_struct *p, bool idle_task)
+{
+}
+
+static inline void mark_task_starting(struct task_struct *p) { }
+static inline void set_window_start(struct rq *rq) { }
+static inline int sched_cpu_high_irqload(int cpu) { return 0; }
+
+static inline u64 sched_ktime_clock(void)
+{
+ return 0;
+}
+
+static inline void sched_account_irqstart(int cpu, struct task_struct *curr,
+ u64 wallclock)
+{
+}
+
+static inline void update_cluster_topology(void) { }
+static inline void init_clusters(void) {}
+static inline void sched_account_irqtime(int cpu, struct task_struct *curr,
+ u64 delta, u64 wallclock)
+{
+}
+
+static inline int same_cluster(int src_cpu, int dst_cpu) { return 1; }
#endif /* CONFIG_SCHED_WALT */
-extern unsigned int walt_disabled;
-
#endif
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index f55a02b..534431a 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -292,6 +292,15 @@
.mode = 0644,
.proc_handler = proc_dointvec,
},
+#ifdef CONFIG_SCHED_WALT
+ {
+ .procname = "sched_cpu_high_irqload",
+ .data = &sysctl_sched_cpu_high_irqload,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+#endif
#ifdef CONFIG_SCHED_HMP
{
.procname = "sched_freq_reporting_policy",
@@ -319,13 +328,6 @@
.extra1 = &zero,
},
{
- .procname = "sched_cpu_high_irqload",
- .data = &sysctl_sched_cpu_high_irqload,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
- {
.procname = "sched_ravg_hist_size",
.data = &sysctl_sched_ravg_hist_size,
.maxlen = sizeof(unsigned int),
@@ -480,15 +482,6 @@
.extra2 = &one_thousand,
},
{
- .procname = "sched_boost",
- .data = &sysctl_sched_boost,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = sched_boost_handler,
- .extra1 = &zero,
- .extra2 = &three,
- },
- {
.procname = "sched_short_burst_ns",
.data = &sysctl_sched_short_burst,
.maxlen = sizeof(unsigned int),
@@ -545,18 +538,13 @@
.proc_handler = proc_dointvec,
},
{
- .procname = "sched_walt_init_task_load_pct",
- .data = &sysctl_sched_walt_init_task_load_pct,
+ .procname = "sched_boost",
+ .data = &sysctl_sched_boost,
.maxlen = sizeof(unsigned int),
.mode = 0644,
- .proc_handler = proc_dointvec,
- },
- {
- .procname = "sched_walt_cpu_high_irqload",
- .data = &sysctl_sched_walt_cpu_high_irqload,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
+ .proc_handler = sched_boost_handler,
+ .extra1 = &zero,
+ .extra2 = &three,
},
#endif
{