Merge "usb: usb_bam: Don't wait for consumer request on disconnect pipes"
diff --git a/Documentation/devicetree/bindings/media/video/msm-cci.txt b/Documentation/devicetree/bindings/media/video/msm-cci.txt
index c01193c..583fbfa 100644
--- a/Documentation/devicetree/bindings/media/video/msm-cci.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-cci.txt
@@ -41,6 +41,7 @@
- "qcom,s5k3l1yx"
- "shinetech,gc0339"
- "shinetech,hi256"
+ - "shinetech,s5k4e1"
- reg : should contain i2c slave address of the device
- qcom,slave-id : should contain i2c slave address, device id address
and expected id read value
diff --git a/arch/arm/boot/dts/apq8084-coresight.dtsi b/arch/arm/boot/dts/apq8084-coresight.dtsi
new file mode 100644
index 0000000..610d80b
--- /dev/null
+++ b/arch/arm/boot/dts/apq8084-coresight.dtsi
@@ -0,0 +1,145 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+ tmc_etr: tmc@fc326000 {
+ compatible = "arm,coresight-tmc";
+ reg = <0xfc326000 0x1000>,
+ <0xfc37c000 0x3000>;
+ reg-names = "tmc-base", "bam-base";
+
+ qcom,memory-reservation-type = "EBI1";
+ qcom,memory-reservation-size = <0x100000>; /* 1M EBI1 buffer */
+
+ coresight-id = <0>;
+ coresight-name = "coresight-tmc-etr";
+ coresight-nr-inports = <1>;
+ };
+
+ replicator: replicator@fc324000 {
+ compatible = "qcom,coresight-replicator";
+ reg = <0xfc324000 0x1000>;
+ reg-names = "replicator-base";
+
+ coresight-id = <2>;
+ coresight-name = "coresight-replicator";
+ coresight-nr-inports = <1>;
+ coresight-outports = <0>;
+ coresight-child-list = <&tmc_etr>;
+ coresight-child-ports = <0>;
+ };
+
+ tmc_etf: tmc@fc325000 {
+ compatible = "arm,coresight-tmc";
+ reg = <0xfc325000 0x1000>;
+ reg-names = "tmc-base";
+
+ coresight-id = <3>;
+ coresight-name = "coresight-tmc-etf";
+ coresight-nr-inports = <1>;
+ coresight-outports = <0>;
+ coresight-child-list = <&replicator>;
+ coresight-child-ports = <0>;
+ coresight-default-sink;
+ };
+
+ funnel_merg: funnel@fc323000 {
+ compatible = "arm,coresight-funnel";
+ reg = <0xfc323000 0x1000>;
+ reg-names = "funnel-base";
+
+ coresight-id = <4>;
+ coresight-name = "coresight-funnel-merg";
+ coresight-nr-inports = <2>;
+ coresight-outports = <0>;
+ coresight-child-list = <&tmc_etf>;
+ coresight-child-ports = <0>;
+ };
+
+ funnel_in0: funnel@fc321000 {
+ compatible = "arm,coresight-funnel";
+ reg = <0xfc321000 0x1000>;
+ reg-names = "funnel-base";
+
+ coresight-id = <5>;
+ coresight-name = "coresight-funnel-in0";
+ coresight-nr-inports = <8>;
+ coresight-outports = <0>;
+ coresight-child-list = <&funnel_merg>;
+ coresight-child-ports = <0>;
+ };
+
+ funnel_in1: funnel@fc322000 {
+ compatible = "arm,coresight-funnel";
+ reg = <0xfc322000 0x1000>;
+ reg-names = "funnel-base";
+
+ coresight-id = <6>;
+ coresight-name = "coresight-funnel-in1";
+ coresight-nr-inports = <8>;
+ coresight-outports = <0>;
+ coresight-child-list = <&funnel_merg>;
+ coresight-child-ports = <1>;
+ };
+
+ funnel_kpss: funnel@fc355000 {
+ compatible = "arm,coresight-funnel";
+ reg = <0xfc355000 0x1000>;
+ reg-names = "funnel-base";
+
+ coresight-id = <7>;
+ coresight-name = "coresight-funnel-kpss";
+ coresight-nr-inports = <4>;
+ coresight-outports = <0>;
+ coresight-child-list = <&funnel_in1>;
+ coresight-child-ports = <5>;
+ };
+
+ funnel_mmss: funnel@fc36c000 {
+ compatible = "arm,coresight-funnel";
+ reg = <0xfc36c000 0x1000>;
+ reg-names = "funnel-base";
+
+ coresight-id = <8>;
+ coresight-name = "coresight-funnel-mmss";
+ coresight-nr-inports = <8>;
+ coresight-outports = <0>;
+ coresight-child-list = <&funnel_in1>;
+ coresight-child-ports = <1>;
+ };
+
+ stm: stm@fc302000 {
+ compatible = "arm,coresight-stm";
+ reg = <0xfc302000 0x1000>,
+ <0xfa280000 0x180000>;
+ reg-names = "stm-base", "stm-data-base";
+
+ coresight-id = <9>;
+ coresight-name = "coresight-stm";
+ coresight-nr-inports = <0>;
+ coresight-outports = <0>;
+ coresight-child-list = <&funnel_in1>;
+ coresight-child-ports = <7>;
+ };
+
+ csr: csr@fc301000 {
+ compatible = "qcom,coresight-csr";
+ reg = <0xfc301000 0x1000>;
+ reg-names = "csr-base";
+
+ coresight-id = <14>;
+ coresight-name = "coresight-csr";
+ coresight-nr-inports = <0>;
+
+ qcom,blk-size = <3>;
+ };
+};
diff --git a/arch/arm/boot/dts/apq8084.dtsi b/arch/arm/boot/dts/apq8084.dtsi
index e5f083a..80b6ffa 100644
--- a/arch/arm/boot/dts/apq8084.dtsi
+++ b/arch/arm/boot/dts/apq8084.dtsi
@@ -21,6 +21,7 @@
/include/ "apq8084-ion.dtsi"
/include/ "apq8084-smp2p.dtsi"
+/include/ "apq8084-coresight.dtsi"
&soc {
#address-cells = <1>;
diff --git a/arch/arm/boot/dts/mpq8092.dtsi b/arch/arm/boot/dts/mpq8092.dtsi
index e8674a0..2eaa32f 100644
--- a/arch/arm/boot/dts/mpq8092.dtsi
+++ b/arch/arm/boot/dts/mpq8092.dtsi
@@ -210,6 +210,41 @@
qcom,pet-time = <10000>;
qcom,ipi-ping;
};
+
+ qcom,ocmem@fdd00000 {
+ compatible = "qcom,msm-ocmem";
+ reg = <0xfdd00000 0x2000>,
+ <0xfdd02000 0x2000>,
+ <0xfe070000 0x400>,
+ <0xfec00000 0x180000>;
+ reg-names = "ocmem_ctrl_physical", "dm_ctrl_physical", "br_ctrl_physical", "ocmem_physical";
+ interrupts = <0 76 0>, <0 77 0>;
+ interrupt-names = "ocmem_irq", "dm_irq";
+ qcom,ocmem-num-regions = <0x3>;
+ qcom,ocmem-num-macros = <0x18>;
+ qcom,resource-type = <0x706d636f>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0xfec00000 0x180000>;
+
+ partition@0 {
+ reg = <0x0 0x100000>;
+ qcom,ocmem-part-name = "graphics";
+ qcom,ocmem-part-min = <0x80000>;
+ };
+
+ partition@80000 {
+ reg = <0x100000 0x80000>;
+ qcom,ocmem-part-name = "lp_audio";
+ qcom,ocmem-part-min = <0x80000>;
+ };
+
+ partition@100000 {
+ reg = <0x100000 0x80000>;
+ qcom,ocmem-part-name = "video";
+ qcom,ocmem-part-min = <0x55000>;
+ };
+ };
};
&gdsc_venus {
diff --git a/arch/arm/boot/dts/msm8226-ion.dtsi b/arch/arm/boot/dts/msm8226-ion.dtsi
index dee64e5..9574b7d 100644
--- a/arch/arm/boot/dts/msm8226-ion.dtsi
+++ b/arch/arm/boot/dts/msm8226-ion.dtsi
@@ -35,6 +35,13 @@
reg = <25>;
};
+ qcom,ion-heap@22 { /* adsp heap */
+ compatible = "qcom,msm-ion-reserve";
+ reg = <22>;
+ qcom,heap-align = <0x1000>;
+ linux,contiguous-region = <&adsp_mem>;
+ };
+
qcom,ion-heap@27 { /* QSECOM HEAP */
compatible = "qcom,msm-ion-reserve";
reg = <27>;
diff --git a/arch/arm/boot/dts/msm8226-mtp.dtsi b/arch/arm/boot/dts/msm8226-mtp.dtsi
index acab5e4..bd32138 100644
--- a/arch/arm/boot/dts/msm8226-mtp.dtsi
+++ b/arch/arm/boot/dts/msm8226-mtp.dtsi
@@ -37,6 +37,17 @@
};
};
+ i2c@f9925000 { /* BLSP1 QUP3 */
+ nfc-nci@0e {
+ compatible = "qcom,nfc-nci";
+ reg = <0x0e>;
+ qcom,irq-gpio = <&msmgpio 21 0x00>;
+ qcom,dis-gpio = <&msmgpio 20 0x00>;
+ interrupt-parent = <&msmgpio>;
+ interrupts = <21 0>;
+ qcom,clk-gpio = <&pm8226_gpios 3 0>;
+ };
+ };
gpio_keys {
compatible = "gpio-keys";
input-name = "gpio-keys";
@@ -342,6 +353,11 @@
};
gpio@c200 { /* GPIO 3 */
+ qcom,mode = <0>; /* QPNP_PIN_MODE_DIG_IN */
+ qcom,pull = <5>; /* QPNP_PIN_PULL_NO */
+ qcom,vin-sel = <2>; /* QPNP_PIN_VIN2 */
+ qcom,src-sel = <2>; /* QPNP_PIN_SEL_FUNC_1 */
+ qcom,master-en = <1>;
};
gpio@c300 { /* GPIO 4 */
diff --git a/arch/arm/boot/dts/msm8226-v2.dtsi b/arch/arm/boot/dts/msm8226-v2.dtsi
index db2f4e6..2148e1d 100644
--- a/arch/arm/boot/dts/msm8226-v2.dtsi
+++ b/arch/arm/boot/dts/msm8226-v2.dtsi
@@ -51,6 +51,11 @@
qcom,cpr-apc-volt-step = <10000>;
};
+&msm_gpu {
+ /* Updated chip ID */
+ qcom,chipid = <0x03000512>;
+};
+
&soc {
qcom,acpuclk@f9011050 {
reg = <0xf9011050 0x8>,
diff --git a/arch/arm/boot/dts/msm8226.dtsi b/arch/arm/boot/dts/msm8226.dtsi
index b14a406..c151948 100644
--- a/arch/arm/boot/dts/msm8226.dtsi
+++ b/arch/arm/boot/dts/msm8226.dtsi
@@ -30,6 +30,12 @@
label = "secure_mem";
};
+ adsp_mem: adsp_region {
+ linux,contiguous-region;
+ reg = <0 0x2000000>;
+ label = "adsp_mem";
+ };
+
qsecom_mem: qsecom_region {
linux,contiguous-region;
reg = <0 0x780000>;
@@ -714,6 +720,18 @@
qcom,pmic-arb-channel = <0>;
};
+ i2c@f9925000 { /* BLSP-1 QUP-3 */
+ cell-index = <2>;
+ compatible = "qcom,i2c-qup";
+ reg = <0xf9925000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg-names = "qup_phys_addr";
+ interrupts = <0 97 0>;
+ interrupt-names = "qup_err_intr";
+ qcom,i2c-bus-freq = <400000>;
+ qcom,i2c-src-freq = <19200000>;
+ };
i2c@f9926000 { /* BLSP-1 QUP-4 */
cell-index = <0>;
compatible = "qcom,i2c-qup";
diff --git a/arch/arm/boot/dts/msm8610-qrd-camera-sensor.dtsi b/arch/arm/boot/dts/msm8610-qrd-camera-sensor.dtsi
index 2e18ed7..e73573a 100644
--- a/arch/arm/boot/dts/msm8610-qrd-camera-sensor.dtsi
+++ b/arch/arm/boot/dts/msm8610-qrd-camera-sensor.dtsi
@@ -64,7 +64,7 @@
qcom,gpio-no-mux = <0>;
gpios = <&msmgpio 14 0>,
<&msmgpio 15 0>,
- <&msmgpio 8 0>;
+ <&msmgpio 88 0>;
qcom,gpio-reset = <1>;
qcom,gpio-standby = <2>;
qcom,gpio-req-tbl-num = <0 1 2>;
diff --git a/arch/arm/boot/dts/msm8610.dtsi b/arch/arm/boot/dts/msm8610.dtsi
index d6aef92..ad0980c 100644
--- a/arch/arm/boot/dts/msm8610.dtsi
+++ b/arch/arm/boot/dts/msm8610.dtsi
@@ -822,12 +822,6 @@
qcom,memory-reservation-size = <0x100000>; /* 1M EBI1 buffer */
};
- qcom,msm-contig-mem {
- compatible = "qcom,msm-contig-mem";
- qcom,memory-reservation-type = "EBI1";
- qcom,memory-reservation-size = <0x100000>; /* 1M EBI1 buffer */
- };
-
jtag_mm0: jtagmm@fc34c000 {
compatible = "qcom,jtag-mm";
reg = <0xfc34c000 0x1000>,
diff --git a/arch/arm/boot/dts/msm8974-camera-sensor-dragonboard.dtsi b/arch/arm/boot/dts/msm8974-camera-sensor-dragonboard.dtsi
index 31f3a90..b9f2125 100644
--- a/arch/arm/boot/dts/msm8974-camera-sensor-dragonboard.dtsi
+++ b/arch/arm/boot/dts/msm8974-camera-sensor-dragonboard.dtsi
@@ -36,6 +36,8 @@
qcom,mount-angle = <0>;
qcom,actuator-src = <&actuator0>;
qcom,sensor-name = "s5k3l1yx";
+ qcom,vdd-cx-supply = <&pm8841_s2>;
+ qcom,vdd-cx-name = "qcom,vdd-cx";
cam_vdig-supply = <&pm8941_l3>;
cam_vana-supply = <&pm8941_l17>;
cam_vio-supply = <&pm8941_lvs3>;
@@ -73,6 +75,8 @@
qcom,csid-sd-index = <0>;
qcom,mount-angle = <0>;
qcom,sensor-name = "imx135";
+ qcom,vdd-cx-supply = <&pm8841_s2>;
+ qcom,vdd-cx-name = "qcom,vdd-cx";
qcom,actuator-src = <&actuator1>;
cam_vdig-supply = <&pm8941_l3>;
cam_vana-supply = <&pm8941_l17>;
@@ -111,6 +115,8 @@
qcom,csid-sd-index = <0>;
qcom,mount-angle = <180>;
qcom,sensor-name = "ov2720";
+ qcom,vdd-cx-supply = <&pm8841_s2>;
+ qcom,vdd-cx-name = "qcom,vdd-cx";
cam_vdig-supply = <&pm8941_l3>;
cam_vana-supply = <&pm8941_l17>;
cam_vio-supply = <&pm8941_lvs3>;
@@ -146,6 +152,8 @@
qcom,csid-sd-index = <0>;
qcom,mount-angle = <0>;
qcom,sensor-name = "mt9m114";
+ qcom,vdd-cx-supply = <&pm8841_s2>;
+ qcom,vdd-cx-name = "qcom,vdd-cx";
cam_vdig-supply = <&pm8941_l3>;
cam_vana-supply = <&pm8941_l17>;
cam_vio-supply = <&pm8941_lvs3>;
diff --git a/arch/arm/boot/dts/msm8974pro-ab-cdp.dts b/arch/arm/boot/dts/msm8974pro-ab-cdp.dts
new file mode 100644
index 0000000..74bd23d
--- /dev/null
+++ b/arch/arm/boot/dts/msm8974pro-ab-cdp.dts
@@ -0,0 +1,28 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+/include/ "msm8974pro-ab.dtsi"
+/include/ "msm8974-cdp.dtsi"
+
+/ {
+ model = "Qualcomm MSM 8974Pro CDP";
+ compatible = "qcom,msm8974-cdp", "qcom,msm8974", "qcom,cdp";
+ qcom,msm-id = <209 1 0x10000>,
+ <211 1 0x10000>,
+ <212 1 0x10000>,
+ <214 1 0x10000>,
+ <215 1 0x10000>,
+ <217 1 0x10000>,
+ <218 1 0x10000>;
+};
diff --git a/arch/arm/boot/dts/msm8974pro-ab-fluid.dts b/arch/arm/boot/dts/msm8974pro-ab-fluid.dts
new file mode 100644
index 0000000..9a31834
--- /dev/null
+++ b/arch/arm/boot/dts/msm8974pro-ab-fluid.dts
@@ -0,0 +1,28 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+/include/ "msm8974pro-ab.dtsi"
+/include/ "msm8974-fluid.dtsi"
+
+/ {
+ model = "Qualcomm MSM 8974Pro FLUID";
+ compatible = "qcom,msm8974-fluid", "qcom,msm8974", "qcom,fluid";
+ qcom,msm-id = <209 3 0x10000>,
+ <211 3 0x10000>,
+ <212 3 0x10000>,
+ <214 3 0x10000>,
+ <215 3 0x10000>,
+ <217 3 0x10000>,
+ <218 3 0x10000>;
+};
diff --git a/arch/arm/boot/dts/msm8974pro-ab-liquid.dts b/arch/arm/boot/dts/msm8974pro-ab-liquid.dts
new file mode 100644
index 0000000..0ec9d8a
--- /dev/null
+++ b/arch/arm/boot/dts/msm8974pro-ab-liquid.dts
@@ -0,0 +1,28 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+/include/ "msm8974pro-ab.dtsi"
+/include/ "msm8974-liquid.dtsi"
+
+/ {
+ model = "Qualcomm MSM 8974Pro LIQUID";
+ compatible = "qcom,msm8974-liquid", "qcom,msm8974", "qcom,liquid";
+ qcom,msm-id = <209 9 0x10000>,
+ <211 9 0x10000>,
+ <212 9 0x10000>,
+ <214 9 0x10000>,
+ <215 9 0x10000>,
+ <217 9 0x10000>,
+ <218 9 0x10000>;
+};
diff --git a/arch/arm/boot/dts/msm8974pro-ab-mtp.dts b/arch/arm/boot/dts/msm8974pro-ab-mtp.dts
new file mode 100644
index 0000000..002baf7
--- /dev/null
+++ b/arch/arm/boot/dts/msm8974pro-ab-mtp.dts
@@ -0,0 +1,28 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+/include/ "msm8974pro-ab.dtsi"
+/include/ "msm8974-mtp.dtsi"
+
+/ {
+ model = "Qualcomm MSM 8974Pro MTP";
+ compatible = "qcom,msm8974-mtp", "qcom,msm8974", "qcom,mtp";
+ qcom,msm-id = <209 8 0x10000>,
+ <211 8 0x10000>,
+ <212 8 0x10000>,
+ <214 8 0x10000>,
+ <215 8 0x10000>,
+ <217 8 0x10000>,
+ <218 8 0x10000>;
+};
diff --git a/arch/arm/boot/dts/msm8974pro-ab.dtsi b/arch/arm/boot/dts/msm8974pro-ab.dtsi
new file mode 100644
index 0000000..9240514
--- /dev/null
+++ b/arch/arm/boot/dts/msm8974pro-ab.dtsi
@@ -0,0 +1,19 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * As a general rule, only chipset-specific property overrides should be placed
+ * inside this file. However, device definitions should be placed inside the
+ * msm8974.dtsi / msm8974pro.dtsi file(s).
+ */
+
+/include/ "msm8974pro.dtsi"
diff --git a/arch/arm/boot/dts/msm8974pro-ac-mtp.dts b/arch/arm/boot/dts/msm8974pro-ac-mtp.dts
new file mode 100644
index 0000000..c5042b7
--- /dev/null
+++ b/arch/arm/boot/dts/msm8974pro-ac-mtp.dts
@@ -0,0 +1,25 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+/include/ "msm8974pro-ac.dtsi"
+/include/ "msm8974-mtp.dtsi"
+
+/ {
+ model = "Qualcomm MSM 8974Pro-AC MTP";
+ compatible = "qcom,msm8974-mtp", "qcom,msm8974", "qcom,mtp";
+ qcom,msm-id = <194 8 0x10000>,
+ <210 8 0x10000>,
+ <213 8 0x10000>,
+ <216 8 0x10000>;
+};
diff --git a/arch/arm/boot/dts/msm8974pro-ac.dtsi b/arch/arm/boot/dts/msm8974pro-ac.dtsi
new file mode 100644
index 0000000..9240514
--- /dev/null
+++ b/arch/arm/boot/dts/msm8974pro-ac.dtsi
@@ -0,0 +1,19 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * As a general rule, only chipset-specific property overrides should be placed
+ * inside this file. However, device definitions should be placed inside the
+ * msm8974.dtsi / msm8974pro.dtsi file(s).
+ */
+
+/include/ "msm8974pro.dtsi"
diff --git a/arch/arm/boot/dts/msm8974pro.dtsi b/arch/arm/boot/dts/msm8974pro.dtsi
new file mode 100644
index 0000000..96e78ac
--- /dev/null
+++ b/arch/arm/boot/dts/msm8974pro.dtsi
@@ -0,0 +1,138 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * As a general rule, only version-specific property overrides should be placed
+ * inside this file. However, device definitions should be placed inside the
+ * msm8974.dtsi file.
+ */
+
+/include/ "msm8974.dtsi"
+/include/ "msm8974-v2-iommu.dtsi"
+/include/ "msm8974-v2-iommu-domains.dtsi"
+/include/ "msm8974-v2-pm.dtsi"
+
+&soc {
+ android_usb@fe8050c8 {
+ compatible = "qcom,android-usb";
+ reg = <0xfe8050c8 0xc8>;
+ qcom,android-usb-swfi-latency = <1>;
+ };
+
+ qcom,msm-imem@fe805000 {
+ compatible = "qcom,msm-imem";
+ reg = <0xfe805000 0x1000>; /* Address and size of IMEM */
+ };
+};
+
+/* GPU overrides */
+&msm_gpu {
+ /* Updated chip ID */
+ qcom,chipid = <0x03030001>;
+
+ /* Updated bus bandwidth requirements */
+ qcom,msm-bus,vectors-KBps =
+ /* Off */
+ <26 512 0 0>, <89 604 0 0>,
+ /* SVS */
+ <26 512 0 2400000>, <89 604 0 3000000>,
+ /* Nominal / SVS */
+ <26 512 0 4656000>, <89 604 0 3000000>,
+ /* Nominal */
+ <26 512 0 4656000>, <89 604 0 5120000>,
+ /* Turbo / Nominal */
+ <26 512 0 7464000>, <89 604 0 5120000>,
+ /* Turbo */
+ <26 512 0 7464000>, <89 604 0 6400000>;
+};
+
+&mdss_mdp {
+ qcom,vbif-settings = <0x0004 0x00000001>;
+
+ qcom,mdss-wb-off = <0x00011100 0x00011500
+ 0x00011900 0x00011D00 0x00012100>;
+ qcom,mdss-intf-off = <0x00012500 0x00012700
+ 0x00012900 0x00012b00>;
+ qcom,mdss-pingpong-off = <0x00012D00 0x00012E00 0x00012F00>;
+ qcom,mdss-has-bwc;
+ qcom,mdss-has-decimation;
+ qcom,mdss-ad-off = <0x0013100 0x00013300>;
+};
+
+&mdss_hdmi_tx {
+ reg = <0xfd922100 0x370>,
+ <0xfd922500 0x7C>,
+ <0xfc4b8000 0x60F0>;
+ reg-names = "core_physical", "phy_physical", "qfprom_physical";
+};
+
+&msm_vidc {
+ qcom,vidc-ns-map = <0x40000000 0x40000000>;
+ qcom,load-freq-tbl = <979200 465000000>,
+ <783360 465000000>,
+ <489600 266670000>,
+ <244800 133330000>;
+ qcom,reg-presets = <0x80004 0x1>,
+ <0x80070 0x11FFF>,
+ <0x80074 0xA4>,
+ <0x800A8 0x1FFF>,
+ <0x80124 0x3>,
+ <0xE0020 0x5555556>,
+ <0xE0024 0x0>;
+ qcom,bus-ports = <1>;
+ qcom,enc-ocmem-ab-ib = <0 0>,
+ <138000 1034000>,
+ <414000 1034000>,
+ <940000 1034000>,
+ <1880000 2068000>,
+ <3008000 3309000>,
+ <3760000 4136000>,
+ <4468000 2457000>;
+ qcom,dec-ocmem-ab-ib = <0 0>,
+ <176000 519000>,
+ <456000 519000>,
+ <864000 519000>,
+ <1728000 1038000>,
+ <2766000 1661000>,
+ <3456000 2076000>,
+ <3662000 2198000>;
+ qcom,enc-ddr-ab-ib = <0 0>,
+ <120000 302000>,
+ <364000 302000>,
+ <804000 302000>,
+ <1608000 604000>,
+ <2576000 967000>,
+ <4680000 1404000>,
+ <49880000 1496000>;
+ qcom,dec-ddr-ab-ib = <0 0>,
+ <208000 303000>,
+ <536000 303000>,
+ <1012000 303000>,
+ <2024000 606000>,
+ <3240000 970000>,
+ <4048000 1212000>,
+ <4264000 1279000>;
+ qcom,iommu-groups = <&venus_domain_ns &venus_domain_sec_bitstream
+ &venus_domain_sec_pixel &venus_domain_sec_non_pixel>;
+ qcom,iommu-group-buffer-types = <0xfff 0x91 0x42 0x120>;
+ qcom,buffer-type-tz-usage-table = <0x91 0x1>,
+ <0x42 0x2>,
+ <0x120 0x3>;
+};
+
+&krait_pdn {
+ qcom,use-phase-switching;
+};
+
+&tspp {
+ vdd_cx-supply = <&pm8841_s2_corner>;
+};
diff --git a/arch/arm/configs/apq8084_defconfig b/arch/arm/configs/apq8084_defconfig
index 9cd37d1..2965607 100644
--- a/arch/arm/configs/apq8084_defconfig
+++ b/arch/arm/configs/apq8084_defconfig
@@ -399,3 +399,8 @@
CONFIG_CRYPTO_DEV_QCE=m
CONFIG_CRYPTO_DEV_QCEDEV=m
CONFIG_CRC_CCITT=y
+CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_TMC=y
+CONFIG_CORESIGHT_FUNNEL=y
+CONFIG_CORESIGHT_REPLICATOR=y
+CONFIG_CORESIGHT_STM=y
diff --git a/arch/arm/configs/msm8226-perf_defconfig b/arch/arm/configs/msm8226-perf_defconfig
index a9d93b0..4a5c9a7 100644
--- a/arch/arm/configs/msm8226-perf_defconfig
+++ b/arch/arm/configs/msm8226-perf_defconfig
@@ -415,6 +415,7 @@
CONFIG_ENABLE_DEFAULT_TRACERS=y
CONFIG_DYNAMIC_DEBUG=y
CONFIG_DEBUG_USER=y
+CONFIG_DEBUG_SET_MODULE_RONX=y
CONFIG_KEYS=y
CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_ARC4=y
diff --git a/arch/arm/configs/msm8226_defconfig b/arch/arm/configs/msm8226_defconfig
index 506de21..02d4873 100644
--- a/arch/arm/configs/msm8226_defconfig
+++ b/arch/arm/configs/msm8226_defconfig
@@ -451,6 +451,7 @@
CONFIG_DEBUG_USER=y
CONFIG_DEBUG_LL=y
CONFIG_EARLY_PRINTK=y
+CONFIG_DEBUG_SET_MODULE_RONX=y
CONFIG_KEYS=y
CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_ARC4=y
diff --git a/arch/arm/configs/msm8610-perf_defconfig b/arch/arm/configs/msm8610-perf_defconfig
index 3a08849..07f7ed9 100644
--- a/arch/arm/configs/msm8610-perf_defconfig
+++ b/arch/arm/configs/msm8610-perf_defconfig
@@ -375,6 +375,7 @@
CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_ENABLE_DEFAULT_TRACERS=y
CONFIG_DEBUG_USER=y
+CONFIG_DEBUG_SET_MODULE_RONX=y
CONFIG_KEYS=y
CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_ARC4=y
diff --git a/arch/arm/configs/msm8610_defconfig b/arch/arm/configs/msm8610_defconfig
index 1998293..34c0905 100644
--- a/arch/arm/configs/msm8610_defconfig
+++ b/arch/arm/configs/msm8610_defconfig
@@ -416,6 +416,7 @@
CONFIG_ENABLE_DEFAULT_TRACERS=y
CONFIG_DYNAMIC_DEBUG=y
CONFIG_DEBUG_USER=y
+CONFIG_DEBUG_SET_MODULE_RONX=y
CONFIG_KEYS=y
CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_ARC4=y
diff --git a/arch/arm/configs/msm8974-perf_defconfig b/arch/arm/configs/msm8974-perf_defconfig
index f193216..cdf4263 100644
--- a/arch/arm/configs/msm8974-perf_defconfig
+++ b/arch/arm/configs/msm8974-perf_defconfig
@@ -472,6 +472,7 @@
CONFIG_DYNAMIC_DEBUG=y
CONFIG_DEBUG_USER=y
CONFIG_PID_IN_CONTEXTIDR=y
+CONFIG_DEBUG_SET_MODULE_RONX=y
CONFIG_KEYS=y
CONFIG_CRYPTO_NULL=y
CONFIG_CRYPTO_XCBC=y
@@ -481,3 +482,4 @@
CONFIG_CRYPTO_DEV_QCRYPTO=m
CONFIG_CRYPTO_DEV_QCE=y
CONFIG_CRYPTO_DEV_QCEDEV=y
+CONFIG_SND_SOC_MSM_HDMI_CODEC_RX=y
diff --git a/arch/arm/configs/msm8974_defconfig b/arch/arm/configs/msm8974_defconfig
index 9a52be8..19d428b 100644
--- a/arch/arm/configs/msm8974_defconfig
+++ b/arch/arm/configs/msm8974_defconfig
@@ -504,6 +504,7 @@
CONFIG_DEBUG_LL=y
CONFIG_EARLY_PRINTK=y
CONFIG_PID_IN_CONTEXTIDR=y
+CONFIG_DEBUG_SET_MODULE_RONX=y
CONFIG_KEYS=y
CONFIG_CRYPTO_NULL=y
CONFIG_CRYPTO_XCBC=y
@@ -513,3 +514,4 @@
CONFIG_CRYPTO_DEV_QCRYPTO=m
CONFIG_CRYPTO_DEV_QCE=y
CONFIG_CRYPTO_DEV_QCEDEV=y
+CONFIG_SND_SOC_MSM_HDMI_CODEC_RX=y
diff --git a/arch/arm/mach-msm/board-8084.c b/arch/arm/mach-msm/board-8084.c
index 7dc9a90..2a6bbb7 100644
--- a/arch/arm/mach-msm/board-8084.c
+++ b/arch/arm/mach-msm/board-8084.c
@@ -28,6 +28,7 @@
#include <mach/restart.h>
#include <mach/socinfo.h>
#include <mach/clk-provider.h>
+#include <mach/msm_smem.h>
#include "board-dt.h"
#include "clock.h"
#include "devices.h"
@@ -84,6 +85,7 @@
*/
void __init apq8084_add_drivers(void)
{
+ msm_smem_init();
msm_init_modem_notifier_list();
msm_smd_init();
msm_clock_init(&msm8084_clock_init_data);
diff --git a/arch/arm/mach-msm/board-8226.c b/arch/arm/mach-msm/board-8226.c
index 39efcd8..5ad6175 100644
--- a/arch/arm/mach-msm/board-8226.c
+++ b/arch/arm/mach-msm/board-8226.c
@@ -44,6 +44,7 @@
#include <mach/msm_smd.h>
#include <mach/rpm-smd.h>
#include <mach/rpm-regulator-smd.h>
+#include <mach/msm_smem.h>
#include <linux/msm_thermal.h>
#include "board-dt.h"
#include "clock.h"
@@ -107,6 +108,7 @@
*/
void __init msm8226_add_drivers(void)
{
+ msm_smem_init();
msm_init_modem_notifier_list();
msm_smd_init();
msm_rpm_driver_init();
diff --git a/arch/arm/mach-msm/board-8610.c b/arch/arm/mach-msm/board-8610.c
index b4d77f1..c6c9d14 100644
--- a/arch/arm/mach-msm/board-8610.c
+++ b/arch/arm/mach-msm/board-8610.c
@@ -45,6 +45,7 @@
#include <mach/msm_smd.h>
#include <mach/rpm-smd.h>
#include <mach/rpm-regulator-smd.h>
+#include <mach/msm_smem.h>
#include <linux/msm_thermal.h>
#include "board-dt.h"
#include "clock.h"
@@ -102,6 +103,7 @@
void __init msm8610_add_drivers(void)
{
+ msm_smem_init();
msm_init_modem_notifier_list();
msm_smd_init();
msm_rpm_driver_init();
diff --git a/arch/arm/mach-msm/board-8974.c b/arch/arm/mach-msm/board-8974.c
index 771359c..68af757 100644
--- a/arch/arm/mach-msm/board-8974.c
+++ b/arch/arm/mach-msm/board-8974.c
@@ -40,6 +40,7 @@
#include <mach/rpm-smd.h>
#include <mach/rpm-regulator-smd.h>
#include <mach/socinfo.h>
+#include <mach/msm_smem.h>
#include "board-dt.h"
#include "clock.h"
#include "devices.h"
@@ -91,6 +92,7 @@
*/
void __init msm8974_add_drivers(void)
{
+ msm_smem_init();
msm_init_modem_notifier_list();
msm_smd_init();
msm_rpm_driver_init();
diff --git a/arch/arm/mach-msm/board-9625.c b/arch/arm/mach-msm/board-9625.c
index 56246dd..fad2efc 100644
--- a/arch/arm/mach-msm/board-9625.c
+++ b/arch/arm/mach-msm/board-9625.c
@@ -39,6 +39,7 @@
#include <mach/rpm-regulator-smd.h>
#include "board-dt.h"
#include <mach/msm_bus_board.h>
+#include <mach/msm_smem.h>
#include "clock.h"
#include "modem_notifier.h"
#include "lpm_resources.h"
@@ -231,6 +232,7 @@
*/
void __init msm9625_add_drivers(void)
{
+ msm_smem_init();
msm_init_modem_notifier_list();
msm_smd_init();
msm_rpm_driver_init();
diff --git a/arch/arm/mach-msm/board-samarium.c b/arch/arm/mach-msm/board-samarium.c
index 574c979..cfdc074 100644
--- a/arch/arm/mach-msm/board-samarium.c
+++ b/arch/arm/mach-msm/board-samarium.c
@@ -26,6 +26,7 @@
#include <mach/restart.h>
#include <mach/socinfo.h>
#include <mach/clk-provider.h>
+#include <mach/msm_smem.h>
#include "board-dt.h"
#include "clock.h"
#include "devices.h"
@@ -95,6 +96,7 @@
*/
void __init msmsamarium_add_drivers(void)
{
+ msm_smem_init();
msm_clock_init(&msm_dummy_clock_init_data);
}
diff --git a/arch/arm/mach-msm/clock-8084.c b/arch/arm/mach-msm/clock-8084.c
index bec9f1b4..940e24b 100644
--- a/arch/arm/mach-msm/clock-8084.c
+++ b/arch/arm/mach-msm/clock-8084.c
@@ -369,6 +369,27 @@
CLK_DUMMY("core_clk", NULL, "fe054000.qcom,iommu", OFF),
CLK_DUMMY("iface_clk", NULL, "fe064000.qcom,iommu", OFF),
CLK_DUMMY("core_clk", NULL, "fe064000.qcom,iommu", OFF),
+
+ /* CoreSight clocks */
+ CLK_DUMMY("core_clk", qdss_clk.c, "fc326000.tmc", OFF),
+ CLK_DUMMY("core_clk", qdss_clk.c, "fc324000.replicator", OFF),
+ CLK_DUMMY("core_clk", qdss_clk.c, "fc325000.tmc", OFF),
+ CLK_DUMMY("core_clk", qdss_clk.c, "fc323000.funnel", OFF),
+ CLK_DUMMY("core_clk", qdss_clk.c, "fc321000.funnel", OFF),
+ CLK_DUMMY("core_clk", qdss_clk.c, "fc322000.funnel", OFF),
+ CLK_DUMMY("core_clk", qdss_clk.c, "fc345000.funnel", OFF),
+ CLK_DUMMY("core_clk", qdss_clk.c, "fc36c000.funnel", OFF),
+ CLK_DUMMY("core_clk", qdss_clk.c, "fc302000.stm", OFF),
+
+ CLK_DUMMY("core_a_clk", qdss_a_clk.c, "fc326000.tmc", OFF),
+ CLK_DUMMY("core_a_clk", qdss_a_clk.c, "fc324000.replicator", OFF),
+ CLK_DUMMY("core_a_clk", qdss_a_clk.c, "fc325000.tmc", OFF),
+ CLK_DUMMY("core_a_clk", qdss_a_clk.c, "fc323000.funnel", OFF),
+ CLK_DUMMY("core_a_clk", qdss_a_clk.c, "fc321000.funnel", OFF),
+ CLK_DUMMY("core_a_clk", qdss_a_clk.c, "fc322000.funnel", OFF),
+ CLK_DUMMY("core_a_clk", qdss_a_clk.c, "fc345000.funnel", OFF),
+ CLK_DUMMY("core_a_clk", qdss_a_clk.c, "fc36c000.funnel", OFF),
+ CLK_DUMMY("core_a_clk", qdss_a_clk.c, "fc302000.stm", OFF),
};
struct clock_init_data msm8084_clock_init_data __initdata = {
diff --git a/arch/arm/mach-msm/clock-8610.c b/arch/arm/mach-msm/clock-8610.c
index eb9a627..b1b9397 100644
--- a/arch/arm/mach-msm/clock-8610.c
+++ b/arch/arm/mach-msm/clock-8610.c
@@ -3013,10 +3013,12 @@
CLK_LOOKUP("cam_src_clk", mclk0_clk_src.c, "6-007d"),
CLK_LOOKUP("cam_src_clk", mclk0_clk_src.c, "6-006d"),
CLK_LOOKUP("cam_src_clk", mclk1_clk_src.c, "6-0078"),
+ CLK_LOOKUP("cam_src_clk", mclk0_clk_src.c, "6-0020"),
CLK_LOOKUP("cam_clk", mclk0_clk.c, "6-006f"),
CLK_LOOKUP("cam_clk", mclk0_clk.c, "6-007d"),
CLK_LOOKUP("cam_clk", mclk0_clk.c, "6-006d"),
CLK_LOOKUP("cam_clk", mclk1_clk.c, "6-0078"),
+ CLK_LOOKUP("cam_clk", mclk0_clk.c, "6-0020"),
/* CSIPHY clocks */
diff --git a/arch/arm/mach-msm/clock-generic.c b/arch/arm/mach-msm/clock-generic.c
index 4d74533..e66764f 100644
--- a/arch/arm/mach-msm/clock-generic.c
+++ b/arch/arm/mach-msm/clock-generic.c
@@ -13,6 +13,7 @@
#include <linux/kernel.h>
#include <linux/errno.h>
+#include <linux/err.h>
#include <linux/clk.h>
#include <mach/clk-provider.h>
@@ -67,10 +68,12 @@
{
struct mux_clk *mux = to_mux_clk(c);
int i;
- long prate, max_prate = 0, rrate = LONG_MAX;
+ unsigned long prate, max_prate = 0, rrate = ULONG_MAX;
for (i = 0; i < mux->num_parents; i++) {
prate = clk_round_rate(mux->parents[i].src, rate);
+ if (IS_ERR_VALUE(prate))
+ continue;
if (prate < rate) {
max_prate = max(prate, max_prate);
continue;
@@ -78,7 +81,7 @@
rrate = min(rrate, prate);
}
- if (rrate == LONG_MAX)
+ if (rrate == ULONG_MAX)
rrate = max_prate;
return rrate ? rrate : -EINVAL;
@@ -200,7 +203,7 @@
{
struct div_clk *d = to_div_clk(c);
unsigned int div, min_div, max_div;
- long p_rrate, rrate = LONG_MAX;
+ unsigned long p_rrate, rrate = ULONG_MAX;
rate = max(rate, 1UL);
@@ -208,12 +211,12 @@
min_div = max_div = d->div;
else {
min_div = max(d->min_div, 1U);
- max_div = min(d->max_div, (unsigned int) (LONG_MAX / rate));
+ max_div = min(d->max_div, (unsigned int) (ULONG_MAX / rate));
}
for (div = min_div; div <= max_div; div++) {
p_rrate = clk_round_rate(c->parent, rate * div);
- if (p_rrate < 0)
+ if (IS_ERR_VALUE(p_rrate))
break;
p_rrate /= div;
@@ -225,7 +228,7 @@
* for a higher divider. So, stop trying higher dividers.
*/
if (p_rrate < rate) {
- if (rrate == LONG_MAX) {
+ if (rrate == ULONG_MAX) {
rrate = p_rrate;
if (best_div)
*best_div = div;
@@ -242,7 +245,7 @@
break;
}
- if (rrate == LONG_MAX)
+ if (rrate == ULONG_MAX)
return -EINVAL;
return rrate;
diff --git a/arch/arm/mach-msm/include/mach/ipa.h b/arch/arm/mach-msm/include/mach/ipa.h
index 697de5e..b6acef2 100644
--- a/arch/arm/mach-msm/include/mach/ipa.h
+++ b/arch/arm/mach-msm/include/mach/ipa.h
@@ -648,6 +648,8 @@
int a2_mux_write(enum a2_mux_logical_channel_id lcid, struct sk_buff *skb);
+int a2_mux_is_ch_empty(enum a2_mux_logical_channel_id lcid);
+
int a2_mux_is_ch_low(enum a2_mux_logical_channel_id lcid);
int a2_mux_is_ch_full(enum a2_mux_logical_channel_id lcid);
@@ -667,6 +669,8 @@
int teth_bridge_set_aggr_params(struct teth_aggr_params *aggr_params);
+void ipa_bam_reg_dump(void);
+
#else /* CONFIG_IPA */
static inline int a2_mux_open_channel(enum a2_mux_logical_channel_id lcid,
@@ -686,6 +690,11 @@
return -EPERM;
}
+static inline int a2_mux_is_ch_empty(enum a2_mux_logical_channel_id lcid)
+{
+ return -EPERM;
+}
+
static inline int a2_mux_is_ch_low(enum a2_mux_logical_channel_id lcid)
{
return -EPERM;
@@ -1099,6 +1108,11 @@
return -EPERM;
}
+static inline void ipa_bam_reg_dump(void)
+{
+ return;
+}
+
#endif /* CONFIG_IPA*/
#endif /* _IPA_H_ */
diff --git a/arch/arm/mach-msm/include/mach/msm_smd.h b/arch/arm/mach-msm/include/mach/msm_smd.h
index 2cc7b10..62fada1 100644
--- a/arch/arm/mach-msm/include/mach/msm_smd.h
+++ b/arch/arm/mach-msm/include/mach/msm_smd.h
@@ -134,25 +134,10 @@
int disable_smsm_reset_handshake;
};
-/*
- * Shared Memory Regions
- *
- * the array of these regions is expected to be in ascending order by phys_addr
- *
- * @phys_addr: physical base address of the region
- * @size: size of the region in bytes
- */
-struct smd_smem_regions {
- phys_addr_t phys_addr;
- resource_size_t size;
-};
-
struct smd_platform {
uint32_t num_ss_configs;
struct smd_subsystem_config *smd_ss_configs;
struct smd_subsystem_restart_config *smd_ssr_config;
- uint32_t num_smem_areas;
- struct smd_smem_regions *smd_smem_areas;
};
#ifdef CONFIG_MSM_SMD
@@ -322,24 +307,6 @@
*/
int smd_is_pkt_avail(smd_channel_t *ch);
-/**
- * smd_module_init_notifier_register() - Register a smd module
- * init notifier block
- * @nb: Notifier block to be registered
- *
- * In order to mark the dependency on SMD Driver module initialization
- * register a notifier using this API. Once the smd module_init is
- * done, notification will be passed to the registered module.
- */
-int smd_module_init_notifier_register(struct notifier_block *nb);
-
-/**
- * smd_module_init_notifier_register() - Unregister a smd module
- * init notifier block
- * @nb: Notifier block to be registered
- */
-int smd_module_init_notifier_unregister(struct notifier_block *nb);
-
/*
* SMD initialization function that registers for a SMD platform driver.
*
@@ -474,16 +441,6 @@
return -ENODEV;
}
-static inline int smd_module_init_notifier_register(struct notifier_block *nb)
-{
- return -ENODEV;
-}
-
-static inline int smd_module_init_notifier_unregister(struct notifier_block *nb)
-{
- return -ENODEV;
-}
-
static inline int __init msm_smd_init(void)
{
return 0;
diff --git a/arch/arm/mach-msm/include/mach/msm_smem.h b/arch/arm/mach-msm/include/mach/msm_smem.h
index 57f22cc..64ab6bf 100644
--- a/arch/arm/mach-msm/include/mach/msm_smem.h
+++ b/arch/arm/mach-msm/include/mach/msm_smem.h
@@ -144,6 +144,20 @@
void *smem_alloc2(unsigned id, unsigned size_in);
void *smem_get_entry(unsigned id, unsigned *size);
void *smem_find(unsigned id, unsigned size);
+
+/**
+ * smem_get_entry_no_rlock - Get existing item without using remote spinlock
+ *
+ * @id: ID of SMEM item
+ * @size_out: Pointer to size variable for storing the result
+ * @returns: Pointer to SMEM item or NULL if it doesn't exist
+ *
+ * This function does not lock the remote spinlock and should only be used in
+ * failure-recover cases such as retrieving the subsystem failure reason during
+ * subsystem restart.
+ */
+void *smem_get_entry_no_rlock(unsigned id, unsigned *size_out);
+
/**
* smem_virt_to_phys() - Convert SMEM address to physical address.
*
@@ -155,6 +169,13 @@
*/
phys_addr_t smem_virt_to_phys(void *smem_address);
+/**
+ * SMEM initialization function that registers for a SMEM platform driver.
+ *
+ * @returns: success on successful driver registration.
+ */
+int __init msm_smem_init(void);
+
#else
static inline void *smem_alloc(unsigned id, unsigned size)
{
@@ -172,9 +193,17 @@
{
return NULL;
}
+void *smem_get_entry_no_rlock(unsigned id, unsigned *size_out)
+{
+ return NULL;
+}
static inline phys_addr_t smem_virt_to_phys(void *smem_address)
{
return (phys_addr_t) NULL;
}
+static int __init msm_smem_init(void)
+{
+ return 0;
+}
#endif /* CONFIG_MSM_SMD */
#endif /* _ARCH_ARM_MACH_MSM_SMEM_H_ */
diff --git a/arch/arm/mach-msm/include/mach/socinfo.h b/arch/arm/mach-msm/include/mach/socinfo.h
index d52686c..52102e3 100644
--- a/arch/arm/mach-msm/include/mach/socinfo.h
+++ b/arch/arm/mach-msm/include/mach/socinfo.h
@@ -115,6 +115,9 @@
MSM_CPU_7X27AA,
MSM_CPU_9615,
MSM_CPU_8974,
+ MSM_CPU_8974PRO_AA,
+ MSM_CPU_8974PRO_AB,
+ MSM_CPU_8974PRO_AC,
MSM_CPU_8627,
MSM_CPU_8625,
MSM_CPU_9625,
@@ -434,6 +437,42 @@
#endif
}
+static inline int cpu_is_msm8974pro_aa(void)
+{
+#ifdef CONFIG_ARCH_MSM8974
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8974PRO_AA;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8974pro_ab(void)
+{
+#ifdef CONFIG_ARCH_MSM8974
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8974PRO_AB;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8974pro_ac(void)
+{
+#ifdef CONFIG_ARCH_MSM8974
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8974PRO_AC;
+#else
+ return 0;
+#endif
+}
+
static inline int cpu_is_mpq8092(void)
{
#ifdef CONFIG_ARCH_MPQ8092
@@ -499,4 +538,10 @@
cpu_is_msm8627();
}
+static inline int soc_class_is_msm8974(void)
+{
+ return cpu_is_msm8974() || cpu_is_msm8974pro_aa() ||
+ cpu_is_msm8974pro_ab() || cpu_is_msm8974pro_ac();
+}
+
#endif
diff --git a/arch/arm/mach-msm/scm-pas.c b/arch/arm/mach-msm/scm-pas.c
index 4cb43b1..b244c6f 100644
--- a/arch/arm/mach-msm/scm-pas.c
+++ b/arch/arm/mach-msm/scm-pas.c
@@ -268,7 +268,7 @@
rate = clk_round_rate(scm_clocks[CORE_CLK_SRC], 1);
clk_set_rate(scm_clocks[CORE_CLK_SRC], rate);
- if (cpu_is_msm8974() || cpu_is_msm8226() || cpu_is_msm8610()) {
+ if (soc_class_is_msm8974() || cpu_is_msm8226() || cpu_is_msm8610()) {
scm_pas_bw_tbl[0].vectors[0].src = MSM_BUS_MASTER_CRYPTO_CORE0;
scm_pas_bw_tbl[1].vectors[0].src = MSM_BUS_MASTER_CRYPTO_CORE0;
} else {
diff --git a/arch/arm/mach-msm/smd.c b/arch/arm/mach-msm/smd.c
index 4649390..c97ba68 100644
--- a/arch/arm/mach-msm/smd.c
+++ b/arch/arm/mach-msm/smd.c
@@ -176,8 +176,6 @@
},
};
-static void *smd_dev;
-
struct interrupt_stat interrupt_stats[NUM_SMD_SUBSYSTEMS];
#define SMSM_STATE_ADDR(entry) (smsm_info.state + entry)
@@ -381,14 +379,6 @@
static DEFINE_MUTEX(smsm_lock);
static struct smsm_state_info *smsm_states;
-/**
- * Variables to indicate smd module initialization.
- * Dependents to register for smd module init notifier.
- */
-static int smd_module_inited;
-static RAW_NOTIFIER_HEAD(smd_module_init_notifier_list);
-static DEFINE_MUTEX(smd_module_init_notifier_lock);
-static void smd_module_init_notify(uint32_t state, void *data);
static int smd_stream_write_avail(struct smd_channel *ch);
static int smd_stream_read_avail(struct smd_channel *ch);
@@ -708,6 +698,7 @@
uint32_t local_pid;
uint32_t remote_pid;
char subsys_name[SMD_MAX_CH_NAME_LEN];
+ bool initialized;
};
/**
@@ -762,6 +753,11 @@
static int smd_alloc_channel(struct smd_alloc_elm *alloc_elm);
+static bool smd_edge_inited(int edge)
+{
+ return edge_to_pids[edge].initialized;
+}
+
/* on smp systems, the probe might get called from multiple cores,
hence use a lock */
static DEFINE_MUTEX(smd_probe_lock);
@@ -795,6 +791,11 @@
if (!shared[n].name[0])
continue;
+ if (!smd_initialized && !smd_edge_inited(type)) {
+ SMD_INFO("Probe skipping ch %d, edge not inited\n", n);
+ continue;
+ }
+
if (!smd_alloc_channel(&shared[n]))
smd_ch_allocated[n] = 1;
else
@@ -1913,7 +1914,7 @@
struct smd_channel *ch;
unsigned long flags;
- if (smd_initialized == 0) {
+ if (smd_initialized == 0 && !smd_edge_inited(edge)) {
SMD_INFO("smd_open() before smd_init()\n");
return -ENODEV;
}
@@ -2436,16 +2437,23 @@
struct smsm_size_info_type *smsm_size_info;
unsigned long flags;
unsigned long j_start;
+ static int first = 1;
+ remote_spinlock_t *remote_spinlock;
+
+ if (!first)
+ return 0;
+ first = 0;
/* Verify that remote spinlock is not deadlocked */
+ remote_spinlock = smem_get_remote_spinlock();
j_start = jiffies;
- while (!remote_spin_trylock_irqsave(&remote_spinlock, flags)) {
+ while (!remote_spin_trylock_irqsave(remote_spinlock, flags)) {
if (jiffies_to_msecs(jiffies - j_start) > RSPIN_INIT_WAIT_MS) {
panic("%s: Remote processor %d will not release spinlock\n",
- __func__, remote_spin_owner(&remote_spinlock));
+ __func__, remote_spin_owner(remote_spinlock));
}
}
- remote_spin_unlock_irqrestore(&remote_spinlock, flags);
+ remote_spin_unlock_irqrestore(remote_spinlock, flags);
smsm_size_info = smem_alloc(SMEM_SMSM_SIZE_INFO,
sizeof(struct smsm_size_info_type));
@@ -2860,9 +2868,6 @@
int ret;
unsigned long flags;
- if (!smd_initialized)
- return;
-
while (kfifo_len(&smsm_snapshot_fifo) >= SMSM_SNAPSHOT_SIZE) {
mutex_lock(&smsm_lock);
for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
@@ -3084,42 +3089,6 @@
}
EXPORT_SYMBOL(smsm_state_cb_deregister);
-int smd_module_init_notifier_register(struct notifier_block *nb)
-{
- int ret;
- if (!nb)
- return -EINVAL;
- mutex_lock(&smd_module_init_notifier_lock);
- ret = raw_notifier_chain_register(&smd_module_init_notifier_list, nb);
- if (smd_module_inited)
- nb->notifier_call(nb, 0, NULL);
- mutex_unlock(&smd_module_init_notifier_lock);
- return ret;
-}
-EXPORT_SYMBOL(smd_module_init_notifier_register);
-
-int smd_module_init_notifier_unregister(struct notifier_block *nb)
-{
- int ret;
- if (!nb)
- return -EINVAL;
- mutex_lock(&smd_module_init_notifier_lock);
- ret = raw_notifier_chain_unregister(&smd_module_init_notifier_list,
- nb);
- mutex_unlock(&smd_module_init_notifier_lock);
- return ret;
-}
-EXPORT_SYMBOL(smd_module_init_notifier_unregister);
-
-static void smd_module_init_notify(uint32_t state, void *data)
-{
- mutex_lock(&smd_module_init_notifier_lock);
- smd_module_inited = 1;
- raw_notifier_call_chain(&smd_module_init_notifier_list,
- state, data);
- mutex_unlock(&smd_module_init_notifier_lock);
-}
-
int smd_core_init(void)
{
int r;
@@ -3315,9 +3284,6 @@
struct smd_subsystem_config *smd_ss_config_list;
struct smd_subsystem_config *cfg;
int err_ret = 0;
- struct smd_smem_regions *smd_smem_areas;
- struct smem_area *smem_areas_tmp = NULL;
- int smem_idx;
smd_platform_data = pdev->dev.platform_data;
num_ss = smd_platform_data->num_ss_configs;
@@ -3327,57 +3293,6 @@
disable_smsm_reset_handshake = smd_platform_data->
smd_ssr_config->disable_smsm_reset_handshake;
- smd_smem_areas = smd_platform_data->smd_smem_areas;
- num_smem_areas = smd_platform_data->num_smem_areas + 1;
-
- /* Initialize main SMEM region */
- smem_areas_tmp = kmalloc_array(num_smem_areas, sizeof(struct smem_area),
- GFP_KERNEL);
- if (!smem_areas_tmp) {
- pr_err("%s: smem_areas kmalloc failed\n", __func__);
- err_ret = -ENOMEM;
- goto smem_areas_alloc_fail;
- }
-
- smem_areas_tmp[0].phys_addr = msm_shared_ram_phys;
- smem_areas_tmp[0].size = MSM_SHARED_RAM_SIZE;
- smem_areas_tmp[0].virt_addr = MSM_SHARED_RAM_BASE;
-
- /* Configure auxiliary SMEM regions */
- for (smem_idx = 1; smem_idx < num_smem_areas; ++smem_idx) {
- smem_areas_tmp[smem_idx].phys_addr =
- smd_smem_areas[smem_idx].phys_addr;
- smem_areas_tmp[smem_idx].size =
- smd_smem_areas[smem_idx].size;
- smem_areas_tmp[smem_idx].virt_addr = ioremap_nocache(
- (unsigned long)(smem_areas_tmp[smem_idx].phys_addr),
- smem_areas_tmp[smem_idx].size);
- if (!smem_areas_tmp[smem_idx].virt_addr) {
- pr_err("%s: ioremap_nocache() of addr: %pa size: %pa\n",
- __func__,
- &smem_areas_tmp[smem_idx].phys_addr,
- &smem_areas_tmp[smem_idx].size);
- err_ret = -ENOMEM;
- goto smem_failed;
- }
-
- if (OVERFLOW_ADD_UNSIGNED(uintptr_t,
- (uintptr_t)smem_areas_tmp[smem_idx].virt_addr,
- smem_areas_tmp[smem_idx].size)) {
- pr_err("%s: invalid virtual address block %i: %p:%pa\n",
- __func__, smem_idx,
- smem_areas_tmp[smem_idx].virt_addr,
- &smem_areas_tmp[smem_idx].size);
- ++smem_idx;
- err_ret = -EINVAL;
- goto smem_failed;
- }
-
- SMD_DBG("%s: %d = %pa %pa", __func__, smem_idx,
- &smd_smem_areas[smem_idx].phys_addr,
- &smd_smem_areas[smem_idx].size);
- }
-
for (i = 0; i < num_ss; i++) {
cfg = &smd_ss_config_list[i];
@@ -3421,7 +3336,6 @@
SMD_INFO("smd_core_platform_init() done\n");
- smem_areas = smem_areas_tmp;
return 0;
intr_failed:
@@ -3438,19 +3352,105 @@
(void *)cfg->smsm_int.dev_id
);
}
-smem_failed:
- for (smem_idx = smem_idx - 1; smem_idx >= 1; --smem_idx)
- iounmap(smem_areas_tmp[smem_idx].virt_addr);
-
- num_smem_areas = 0;
- kfree(smem_areas_tmp);
-
-smem_areas_alloc_fail:
return err_ret;
}
-static int __devinit parse_smd_devicetree(struct device_node *node,
- void *irq_out_base)
+static int msm_smsm_probe(struct platform_device *pdev)
+{
+ uint32_t edge;
+ char *key;
+ int ret;
+ uint32_t irq_offset;
+ uint32_t irq_bitmask;
+ uint32_t irq_line;
+ struct interrupt_config_item *private_irq;
+ struct device_node *node;
+ void *irq_out_base;
+ resource_size_t irq_out_size;
+ struct platform_device *parent_pdev;
+ struct resource *r;
+
+ disable_smsm_reset_handshake = 1;
+
+ node = pdev->dev.of_node;
+
+ if (!pdev->dev.parent) {
+ pr_err("%s: missing link to parent device\n", __func__);
+ return -ENODEV;
+ }
+
+ parent_pdev = to_platform_device(pdev->dev.parent);
+
+ key = "irq-reg-base";
+ /* existance check verified in smem driver */
+ r = platform_get_resource_byname(parent_pdev, IORESOURCE_MEM, key);
+ irq_out_size = resource_size(r);
+ irq_out_base = ioremap_nocache(r->start, irq_out_size);
+ if (!irq_out_base) {
+ pr_err("%s: ioremap_nocache() of irq_out_base addr:%pr size:%pr\n",
+ __func__, &r->start, &irq_out_size);
+ return -ENOMEM;
+ }
+ SMSM_DBG("%s: %s = %p", __func__, key, irq_out_base);
+
+ key = "qcom,smsm-edge";
+ ret = of_property_read_u32(node, key, &edge);
+ if (ret)
+ goto missing_key;
+ SMSM_DBG("%s: %s = %d", __func__, key, edge);
+
+ key = "qcom,smsm-irq-offset";
+ ret = of_property_read_u32(node, key, &irq_offset);
+ if (ret)
+ goto missing_key;
+ SMSM_DBG("%s: %s = %x", __func__, key, irq_offset);
+
+ key = "qcom,smsm-irq-bitmask";
+ ret = of_property_read_u32(node, key, &irq_bitmask);
+ if (ret)
+ goto missing_key;
+ SMSM_DBG("%s: %s = %x", __func__, key, irq_bitmask);
+
+ key = "interrupts";
+ irq_line = irq_of_parse_and_map(node, 0);
+ if (!irq_line)
+ goto missing_key;
+ SMSM_DBG("%s: %s = %d", __func__, key, irq_line);
+
+ private_irq = &private_intr_config[edge_to_pids[edge].remote_pid].smsm;
+ private_irq->out_bit_pos = irq_bitmask;
+ private_irq->out_offset = irq_offset;
+ private_irq->out_base = irq_out_base;
+ private_irq->irq_id = irq_line;
+
+ ret = request_irq(irq_line,
+ private_irq->irq_handler,
+ IRQF_TRIGGER_RISING,
+ "smsm_dev",
+ NULL);
+ if (ret < 0) {
+ pr_err("%s: request_irq() failed on %d\n", __func__, irq_line);
+ return ret;
+ } else {
+ ret = enable_irq_wake(irq_line);
+ if (ret < 0)
+ pr_err("%s: enable_irq_wake() failed on %d\n", __func__,
+ irq_line);
+ }
+
+ if (smsm_init())
+ pr_err("smsm_init() failed\n");
+
+ smsm_irq_handler(0, 0);
+
+ return 0;
+
+missing_key:
+ pr_err("%s: missing key: %s", __func__, key);
+ return -ENODEV;
+}
+
+static int msm_smd_probe(struct platform_device *pdev)
{
uint32_t edge;
char *key;
@@ -3461,6 +3461,32 @@
unsigned long irq_flags = IRQF_TRIGGER_RISING;
const char *pilstr;
struct interrupt_config_item *private_irq;
+ struct device_node *node;
+ void *irq_out_base;
+ resource_size_t irq_out_size;
+ struct platform_device *parent_pdev;
+ struct resource *r;
+
+ node = pdev->dev.of_node;
+
+ if (!pdev->dev.parent) {
+ pr_err("%s: missing link to parent device\n", __func__);
+ return -ENODEV;
+ }
+
+ parent_pdev = to_platform_device(pdev->dev.parent);
+
+ key = "irq-reg-base";
+ /* existance check verified in smem driver */
+ r = platform_get_resource_byname(parent_pdev, IORESOURCE_MEM, key);
+ irq_out_size = resource_size(r);
+ irq_out_base = ioremap_nocache(r->start, irq_out_size);
+ if (!irq_out_base) {
+ pr_err("%s: ioremap_nocache() of irq_out_base addr:%pr size:%pr\n",
+ __func__, &r->start, &irq_out_size);
+ return -ENOMEM;
+ }
+ SMD_DBG("%s: %s = %p", __func__, key, irq_out_base);
key = "qcom,smd-edge";
ret = of_property_read_u32(node, key, &edge);
@@ -3521,68 +3547,9 @@
strlcpy(edge_to_pids[edge].subsys_name, pilstr,
SMD_MAX_CH_NAME_LEN);
- return 0;
+ edge_to_pids[edge].initialized = true;
-missing_key:
- pr_err("%s: missing key: %s", __func__, key);
- return -ENODEV;
-}
-
-static int __devinit parse_smsm_devicetree(struct device_node *node,
- void *irq_out_base)
-{
- uint32_t edge;
- char *key;
- int ret;
- uint32_t irq_offset;
- uint32_t irq_bitmask;
- uint32_t irq_line;
- struct interrupt_config_item *private_irq;
-
- key = "qcom,smsm-edge";
- ret = of_property_read_u32(node, key, &edge);
- if (ret)
- goto missing_key;
- SMD_DBG("%s: %s = %d", __func__, key, edge);
-
- key = "qcom,smsm-irq-offset";
- ret = of_property_read_u32(node, key, &irq_offset);
- if (ret)
- goto missing_key;
- SMD_DBG("%s: %s = %x", __func__, key, irq_offset);
-
- key = "qcom,smsm-irq-bitmask";
- ret = of_property_read_u32(node, key, &irq_bitmask);
- if (ret)
- goto missing_key;
- SMD_DBG("%s: %s = %x", __func__, key, irq_bitmask);
-
- key = "interrupts";
- irq_line = irq_of_parse_and_map(node, 0);
- if (!irq_line)
- goto missing_key;
- SMD_DBG("%s: %s = %d", __func__, key, irq_line);
-
- private_irq = &private_intr_config[edge_to_pids[edge].remote_pid].smsm;
- private_irq->out_bit_pos = irq_bitmask;
- private_irq->out_offset = irq_offset;
- private_irq->out_base = irq_out_base;
- private_irq->irq_id = irq_line;
-
- ret = request_irq(irq_line,
- private_irq->irq_handler,
- IRQF_TRIGGER_RISING,
- "smsm_dev",
- NULL);
- if (ret < 0) {
- pr_err("%s: request_irq() failed on %d\n", __func__, irq_line);
- return ret;
- } else {
- ret = enable_irq_wake(irq_line);
- if (ret < 0)
- pr_err("%s: enable_irq_wake() failed on %d\n", __func__,
- irq_line);
- }
+ schedule_work(&probe_work);
return 0;
@@ -3591,213 +3558,7 @@
return -ENODEV;
}
-static void __devinit unparse_smd_devicetree(struct device_node *node)
-{
- uint32_t irq_line;
-
- irq_line = irq_of_parse_and_map(node, 0);
-
- free_irq(irq_line, NULL);
-}
-
-static void __devinit unparse_smsm_devicetree(struct device_node *node)
-{
- uint32_t irq_line;
-
- irq_line = irq_of_parse_and_map(node, 0);
-
- free_irq(irq_line, NULL);
-}
-
-static int __devinit smd_core_devicetree_init(struct platform_device *pdev)
-{
- char *key;
- struct resource *r;
- void *irq_out_base;
- phys_addr_t aux_mem_base;
- resource_size_t aux_mem_size;
- int temp_string_size = 11; /* max 3 digit count */
- char temp_string[temp_string_size];
- struct device_node *node;
- int ret;
- const char *compatible;
- struct ramdump_segment *ramdump_segments_tmp = NULL;
- struct smem_area *smem_areas_tmp = NULL;
- int smem_idx = 0;
- int subnode_num = 0;
- int i;
- resource_size_t irq_out_size;
-
- disable_smsm_reset_handshake = 1;
-
- key = "irq-reg-base";
- r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
- if (!r) {
- pr_err("%s: missing '%s'\n", __func__, key);
- return -ENODEV;
- }
- irq_out_size = resource_size(r);
- irq_out_base = ioremap_nocache(r->start, irq_out_size);
- if (!irq_out_base) {
- pr_err("%s: ioremap_nocache() of irq_out_base addr:%pr size:%pr\n",
- __func__, &r->start, &irq_out_size);
- return -ENOMEM;
- }
- SMD_DBG("%s: %s = %p", __func__, key, irq_out_base);
-
- num_smem_areas = 1;
- while (1) {
- scnprintf(temp_string, temp_string_size, "aux-mem%d",
- num_smem_areas);
- r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- temp_string);
- if (!r)
- break;
-
- ++num_smem_areas;
- if (num_smem_areas > 999) {
- pr_err("%s: max num aux mem regions reached\n",
- __func__);
- break;
- }
- }
-
- /* Initialize main SMEM region and SSR ramdump region */
- key = "smem";
- r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
- if (!r) {
- pr_err("%s: missing '%s'\n", __func__, key);
- return -ENODEV;
- }
-
- smem_areas_tmp = kmalloc_array(num_smem_areas, sizeof(struct smem_area),
- GFP_KERNEL);
- if (!smem_areas_tmp) {
- pr_err("%s: smem areas kmalloc failed\n", __func__);
- ret = -ENOMEM;
- goto free_smem_areas;
- }
-
- ramdump_segments_tmp = kmalloc_array(num_smem_areas,
- sizeof(struct ramdump_segment), GFP_KERNEL);
- if (!ramdump_segments_tmp) {
- pr_err("%s: ramdump segment kmalloc failed\n", __func__);
- ret = -ENOMEM;
- goto free_smem_areas;
- }
-
- smem_areas_tmp[smem_idx].phys_addr = r->start;
- smem_areas_tmp[smem_idx].size = resource_size(r);
- smem_areas_tmp[smem_idx].virt_addr = MSM_SHARED_RAM_BASE;
-
- ramdump_segments_tmp[smem_idx].address = r->start;
- ramdump_segments_tmp[smem_idx].size = resource_size(r);
- ++smem_idx;
-
- /* Configure auxiliary SMEM regions */
- while (1) {
- scnprintf(temp_string, temp_string_size, "aux-mem%d",
- smem_idx);
- r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- temp_string);
- if (!r)
- break;
- aux_mem_base = r->start;
- aux_mem_size = resource_size(r);
-
- ramdump_segments_tmp[smem_idx].address = aux_mem_base;
- ramdump_segments_tmp[smem_idx].size = aux_mem_size;
-
- smem_areas_tmp[smem_idx].phys_addr = aux_mem_base;
- smem_areas_tmp[smem_idx].size = aux_mem_size;
- smem_areas_tmp[smem_idx].virt_addr = ioremap_nocache(
- (unsigned long)(smem_areas_tmp[smem_idx].phys_addr),
- smem_areas_tmp[smem_idx].size);
- SMD_DBG("%s: %s = %pa %pa -> %p", __func__, temp_string,
- &aux_mem_base, &aux_mem_size,
- smem_areas_tmp[smem_idx].virt_addr);
-
- if (!smem_areas_tmp[smem_idx].virt_addr) {
- pr_err("%s: ioremap_nocache() of addr:%pa size: %pa\n",
- __func__,
- &smem_areas_tmp[smem_idx].phys_addr,
- &smem_areas_tmp[smem_idx].size);
- ret = -ENOMEM;
- goto free_smem_areas;
- }
-
- if (OVERFLOW_ADD_UNSIGNED(uintptr_t,
- (uintptr_t)smem_areas_tmp[smem_idx].virt_addr,
- smem_areas_tmp[smem_idx].size)) {
- pr_err("%s: invalid virtual address block %i: %p:%pa\n",
- __func__, smem_idx,
- smem_areas_tmp[smem_idx].virt_addr,
- &smem_areas_tmp[smem_idx].size);
- ++smem_idx;
- ret = -EINVAL;
- goto free_smem_areas;
- }
-
- ++smem_idx;
- if (smem_idx > 999) {
- pr_err("%s: max num aux mem regions reached\n",
- __func__);
- break;
- }
- }
-
- for_each_child_of_node(pdev->dev.of_node, node) {
- compatible = of_get_property(node, "compatible", NULL);
- if (!compatible) {
- pr_err("%s: invalid child node: compatible null\n",
- __func__);
- ret = -ENODEV;
- goto rollback_subnodes;
- }
- if (!strcmp(compatible, "qcom,smd")) {
- ret = parse_smd_devicetree(node, irq_out_base);
- if (ret)
- goto rollback_subnodes;
- } else if (!strcmp(compatible, "qcom,smsm")) {
- ret = parse_smsm_devicetree(node, irq_out_base);
- if (ret)
- goto rollback_subnodes;
- } else {
- pr_err("%s: invalid child node named: %s\n", __func__,
- compatible);
- ret = -ENODEV;
- goto rollback_subnodes;
- }
- ++subnode_num;
- }
-
- smem_areas = smem_areas_tmp;
- smem_ramdump_segments = ramdump_segments_tmp;
- return 0;
-
-rollback_subnodes:
- i = 0;
- for_each_child_of_node(pdev->dev.of_node, node) {
- if (i >= subnode_num)
- break;
- ++i;
- compatible = of_get_property(node, "compatible", NULL);
- if (!strcmp(compatible, "qcom,smd"))
- unparse_smd_devicetree(node);
- else
- unparse_smsm_devicetree(node);
- }
-free_smem_areas:
- for (smem_idx = smem_idx - 1; smem_idx >= 1; --smem_idx)
- iounmap(smem_areas_tmp[smem_idx].virt_addr);
-
- num_smem_areas = 0;
- kfree(ramdump_segments_tmp);
- kfree(smem_areas_tmp);
- return ret;
-}
-
-static int __devinit msm_smd_probe(struct platform_device *pdev)
+static int msm_smd_probe_legacy(struct platform_device *pdev)
{
int ret;
@@ -3805,13 +3566,6 @@
return -ENODEV;
SMD_INFO("smd probe\n");
- INIT_WORK(&probe_work, smd_channel_probe_worker);
-
- channel_close_wq = create_singlethread_workqueue("smd_channel_close");
- if (IS_ERR(channel_close_wq)) {
- pr_err("%s: create_singlethread_workqueue ENOMEM\n", __func__);
- return -ENOMEM;
- }
if (smsm_init()) {
pr_err("smsm_init() failed\n");
@@ -3820,13 +3574,8 @@
if (pdev) {
if (pdev->dev.of_node) {
- ret = smd_core_devicetree_init(pdev);
- if (ret) {
- pr_err("%s: device tree init failed\n",
- __func__);
- return ret;
- }
- smd_dev = &pdev->dev;
+ pr_err("%s: invalid device tree init\n", __func__);
+ return -ENODEV;
} else if (pdev->dev.platform_data) {
ret = smd_core_platform_init(pdev);
if (ret) {
@@ -3872,6 +3621,8 @@
unsigned long code,
void *data)
{
+ remote_spinlock_t *remote_spinlock;
+
/*
* Some SMD or SMSM clients assume SMD/SMSM SSR handling will be
* done in the AFTER_SHUTDOWN level. If this ever changes, extra
@@ -3886,7 +3637,8 @@
__func__, notifier->processor,
notifier->name);
- remote_spin_release(&remote_spinlock, notifier->processor);
+ remote_spinlock = smem_get_remote_spinlock();
+ remote_spin_release(remote_spinlock, notifier->processor);
remote_spin_release_all(notifier->processor);
smd_channel_reset(notifier->processor);
@@ -3912,17 +3664,39 @@
}
late_initcall(modem_restart_late_init);
-static struct of_device_id msm_smem_match_table[] = {
- { .compatible = "qcom,smem" },
+static struct of_device_id msm_smd_match_table[] = {
+ { .compatible = "qcom,smd" },
{},
};
static struct platform_driver msm_smd_driver = {
.probe = msm_smd_probe,
.driver = {
+ .name = "msm_smd_dt",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_smd_match_table,
+ },
+};
+
+static struct of_device_id msm_smsm_match_table[] = {
+ { .compatible = "qcom,smsm" },
+ {},
+};
+
+static struct platform_driver msm_smsm_driver = {
+ .probe = msm_smsm_probe,
+ .driver = {
+ .name = "msm_smsm",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_smsm_match_table,
+ },
+};
+
+static struct platform_driver msm_smd_driver_legacy = {
+ .probe = msm_smd_probe_legacy,
+ .driver = {
.name = MODULE_NAME,
.owner = THIS_MODULE,
- .of_match_table = msm_smem_match_table,
},
};
@@ -3941,9 +3715,19 @@
}
registered = true;
- rc = init_smem_remote_spinlock();
+
+ INIT_WORK(&probe_work, smd_channel_probe_worker);
+
+ channel_close_wq = create_singlethread_workqueue("smd_channel_close");
+ if (IS_ERR(channel_close_wq)) {
+ pr_err("%s: create_singlethread_workqueue ENOMEM\n", __func__);
+ return -ENOMEM;
+ }
+
+ rc = platform_driver_register(&msm_smd_driver_legacy);
if (rc) {
- pr_err("%s: remote spinlock init failed %d\n", __func__, rc);
+ pr_err("%s: msm_smd_driver_legacy register failed %d\n",
+ __func__, rc);
return rc;
}
@@ -3954,7 +3738,12 @@
return rc;
}
- smd_module_init_notify(0, NULL);
+ rc = platform_driver_register(&msm_smsm_driver);
+ if (rc) {
+ pr_err("%s: msm_smsm_driver register failed %d\n",
+ __func__, rc);
+ return rc;
+ }
return 0;
}
diff --git a/arch/arm/mach-msm/smem.c b/arch/arm/mach-msm/smem.c
index bbb6ce0..3ad2e4d 100644
--- a/arch/arm/mach-msm/smem.c
+++ b/arch/arm/mach-msm/smem.c
@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/moduleparam.h>
#include <linux/printk.h>
+#include <linux/notifier.h>
#include <mach/board.h>
#include <mach/msm_iomap.h>
@@ -53,15 +54,20 @@
pr_debug(x); \
} while (0)
-remote_spinlock_t remote_spinlock;
-int spinlocks_initialized;
-uint32_t num_smem_areas;
-struct smem_area *smem_areas;
-struct ramdump_segment *smem_ramdump_segments;
+#define SMEM_SPINLOCK_SMEM_ALLOC "S:3"
+static remote_spinlock_t remote_spinlock;
+static uint32_t num_smem_areas;
+static struct smem_area *smem_areas;
+static struct ramdump_segment *smem_ramdump_segments;
+static int spinlocks_initialized;
static void *smem_ramdump_dev;
static DEFINE_MUTEX(spinlock_init_lock);
static DEFINE_SPINLOCK(smem_init_check_lock);
+static int smem_module_inited;
+static RAW_NOTIFIER_HEAD(smem_module_init_notifier_list);
+static DEFINE_MUTEX(smem_module_init_notifier_lock);
+
struct restart_notifier_block {
unsigned processor;
@@ -82,6 +88,8 @@
{SMEM_Q6, "adsp", .nb.notifier_call = restart_notifier_cb},
};
+static int init_smem_remote_spinlock(void);
+
/**
* smem_phys_to_virt() - Convert a physical base and offset to virtual address
*
@@ -192,11 +200,21 @@
}
EXPORT_SYMBOL(smem_alloc);
-static void *__smem_get_entry(unsigned id, unsigned *size, bool skip_init_check)
+/**
+ * __smem_get_entry - Get pointer and size of existing SMEM item
+ *
+ * @id: ID of SMEM item
+ * @size: Pointer to size variable for storing the result
+ * @skip_init_check: True means do not verify that SMEM has been initialized
+ * @use_rspinlock: True to use the remote spinlock
+ * @returns: Pointer to SMEM item or NULL if it doesn't exist
+ */
+static void *__smem_get_entry(unsigned id, unsigned *size,
+ bool skip_init_check, bool use_rspinlock)
{
struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
struct smem_heap_entry *toc = shared->heap_toc;
- int use_spinlocks = spinlocks_initialized;
+ int use_spinlocks = spinlocks_initialized && use_rspinlock;
void *ret = 0;
unsigned long flags = 0;
@@ -233,7 +251,7 @@
unsigned size;
void *ptr;
- ptr = __smem_get_entry(id, &size, skip_init_check);
+ ptr = __smem_get_entry(id, &size, skip_init_check, true);
if (!ptr)
return 0;
@@ -312,10 +330,26 @@
void *smem_get_entry(unsigned id, unsigned *size)
{
- return __smem_get_entry(id, size, false);
+ return __smem_get_entry(id, size, false, true);
}
EXPORT_SYMBOL(smem_get_entry);
+/**
+ * smem_get_entry_no_rlock - Get existing item without using remote spinlock
+ *
+ * @id: ID of SMEM item
+ * @size_out: Pointer to size variable for storing the result
+ * @returns: Pointer to SMEM item or NULL if it doesn't exist
+ *
+ * This function does not lock the remote spinlock and should only be used in
+ * failure-recover cases such as retrieving the subsystem failure reason during
+ * subsystem restart.
+ */
+void *smem_get_entry_no_rlock(unsigned id, unsigned *size_out)
+{
+ return __smem_get_entry(id, size_out, false, false);
+}
+EXPORT_SYMBOL(smem_get_entry_no_rlock);
/**
* smem_get_remote_spinlock - Remote spinlock pointer for unit testing.
@@ -333,7 +367,7 @@
*
* @returns: sucess or error code for failure
*/
-int init_smem_remote_spinlock(void)
+static int init_smem_remote_spinlock(void)
{
int rc = 0;
@@ -473,3 +507,223 @@
return 0;
}
late_initcall(modem_restart_late_init);
+
+int smem_module_init_notifier_register(struct notifier_block *nb)
+{
+ int ret;
+ if (!nb)
+ return -EINVAL;
+ mutex_lock(&smem_module_init_notifier_lock);
+ ret = raw_notifier_chain_register(&smem_module_init_notifier_list, nb);
+ if (smem_module_inited)
+ nb->notifier_call(nb, 0, NULL);
+ mutex_unlock(&smem_module_init_notifier_lock);
+ return ret;
+}
+EXPORT_SYMBOL(smem_module_init_notifier_register);
+
+int smem_module_init_notifier_unregister(struct notifier_block *nb)
+{
+ int ret;
+ if (!nb)
+ return -EINVAL;
+ mutex_lock(&smem_module_init_notifier_lock);
+ ret = raw_notifier_chain_unregister(&smem_module_init_notifier_list,
+ nb);
+ mutex_unlock(&smem_module_init_notifier_lock);
+ return ret;
+}
+EXPORT_SYMBOL(smem_module_init_notifier_unregister);
+
+static void smem_module_init_notify(uint32_t state, void *data)
+{
+ mutex_lock(&smem_module_init_notifier_lock);
+ smem_module_inited = 1;
+ raw_notifier_call_chain(&smem_module_init_notifier_list,
+ state, data);
+ mutex_unlock(&smem_module_init_notifier_lock);
+}
+
+static int msm_smem_probe(struct platform_device *pdev)
+{
+ char *key;
+ struct resource *r;
+ phys_addr_t aux_mem_base;
+ resource_size_t aux_mem_size;
+ int temp_string_size = 11; /* max 3 digit count */
+ char temp_string[temp_string_size];
+ int ret;
+ struct ramdump_segment *ramdump_segments_tmp = NULL;
+ struct smem_area *smem_areas_tmp = NULL;
+ int smem_idx = 0;
+
+ if (!smem_initialized_check())
+ return -ENODEV;
+
+ key = "irq-reg-base";
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
+ if (!r) {
+ pr_err("%s: missing '%s'\n", __func__, key);
+ return -ENODEV;
+ }
+
+ num_smem_areas = 1;
+ while (1) {
+ scnprintf(temp_string, temp_string_size, "aux-mem%d",
+ num_smem_areas);
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ temp_string);
+ if (!r)
+ break;
+
+ ++num_smem_areas;
+ if (num_smem_areas > 999) {
+ pr_err("%s: max num aux mem regions reached\n",
+ __func__);
+ break;
+ }
+ }
+ /* Initialize main SMEM region and SSR ramdump region */
+ key = "smem";
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
+ if (!r) {
+ pr_err("%s: missing '%s'\n", __func__, key);
+ return -ENODEV;
+ }
+
+ smem_areas_tmp = kmalloc_array(num_smem_areas, sizeof(struct smem_area),
+ GFP_KERNEL);
+ if (!smem_areas_tmp) {
+ pr_err("%s: smem areas kmalloc failed\n", __func__);
+ ret = -ENOMEM;
+ goto free_smem_areas;
+ }
+
+ ramdump_segments_tmp = kmalloc_array(num_smem_areas,
+ sizeof(struct ramdump_segment), GFP_KERNEL);
+ if (!ramdump_segments_tmp) {
+ pr_err("%s: ramdump segment kmalloc failed\n", __func__);
+ ret = -ENOMEM;
+ goto free_smem_areas;
+ }
+ smem_areas_tmp[smem_idx].phys_addr = r->start;
+ smem_areas_tmp[smem_idx].size = resource_size(r);
+ smem_areas_tmp[smem_idx].virt_addr = MSM_SHARED_RAM_BASE;
+
+ ramdump_segments_tmp[smem_idx].address = r->start;
+ ramdump_segments_tmp[smem_idx].size = resource_size(r);
+ ++smem_idx;
+
+ /* Configure auxiliary SMEM regions */
+ while (1) {
+ scnprintf(temp_string, temp_string_size, "aux-mem%d",
+ smem_idx);
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ temp_string);
+ if (!r)
+ break;
+ aux_mem_base = r->start;
+ aux_mem_size = resource_size(r);
+
+ ramdump_segments_tmp[smem_idx].address = aux_mem_base;
+ ramdump_segments_tmp[smem_idx].size = aux_mem_size;
+
+ smem_areas_tmp[smem_idx].phys_addr = aux_mem_base;
+ smem_areas_tmp[smem_idx].size = aux_mem_size;
+ smem_areas_tmp[smem_idx].virt_addr = ioremap_nocache(
+ (unsigned long)(smem_areas_tmp[smem_idx].phys_addr),
+ smem_areas_tmp[smem_idx].size);
+ SMEM_DBG("%s: %s = %pa %pa -> %p", __func__, temp_string,
+ &aux_mem_base, &aux_mem_size,
+ smem_areas_tmp[smem_idx].virt_addr);
+
+ if (!smem_areas_tmp[smem_idx].virt_addr) {
+ pr_err("%s: ioremap_nocache() of addr:%pa size: %pa\n",
+ __func__,
+ &smem_areas_tmp[smem_idx].phys_addr,
+ &smem_areas_tmp[smem_idx].size);
+ ret = -ENOMEM;
+ goto free_smem_areas;
+ }
+
+ if (OVERFLOW_ADD_UNSIGNED(uintptr_t,
+ (uintptr_t)smem_areas_tmp[smem_idx].virt_addr,
+ smem_areas_tmp[smem_idx].size)) {
+ pr_err("%s: invalid virtual address block %i: %p:%pa\n",
+ __func__, smem_idx,
+ smem_areas_tmp[smem_idx].virt_addr,
+ &smem_areas_tmp[smem_idx].size);
+ ++smem_idx;
+ ret = -EINVAL;
+ goto free_smem_areas;
+ }
+
+ ++smem_idx;
+ if (smem_idx > 999) {
+ pr_err("%s: max num aux mem regions reached\n",
+ __func__);
+ break;
+ }
+ }
+
+ ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+ if (ret)
+ pr_err("%s: of_platform_populate failed %d\n", __func__, ret);
+
+ smem_areas = smem_areas_tmp;
+ smem_ramdump_segments = ramdump_segments_tmp;
+ return 0;
+
+free_smem_areas:
+ for (smem_idx = smem_idx - 1; smem_idx >= 1; --smem_idx)
+ iounmap(smem_areas_tmp[smem_idx].virt_addr);
+
+ num_smem_areas = 0;
+ kfree(ramdump_segments_tmp);
+ kfree(smem_areas_tmp);
+ return ret;
+}
+
+static struct of_device_id msm_smem_match_table[] = {
+ { .compatible = "qcom,smem" },
+ {},
+};
+
+static struct platform_driver msm_smem_driver = {
+ .probe = msm_smem_probe,
+ .driver = {
+ .name = "msm_smem",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_smem_match_table,
+ },
+};
+
+int __init msm_smem_init(void)
+{
+ static bool registered;
+ int rc;
+
+ if (registered)
+ return 0;
+
+ registered = true;
+
+ rc = init_smem_remote_spinlock();
+ if (rc) {
+ pr_err("%s: remote spinlock init failed %d\n", __func__, rc);
+ return rc;
+ }
+
+ rc = platform_driver_register(&msm_smem_driver);
+ if (rc) {
+ pr_err("%s: msm_smem_driver register failed %d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ smem_module_init_notify(0, NULL);
+
+ return 0;
+}
+
+module_init(msm_smem_init);
diff --git a/arch/arm/mach-msm/smem_log.c b/arch/arm/mach-msm/smem_log.c
index 87f141d2..6f20e24 100644
--- a/arch/arm/mach-msm/smem_log.c
+++ b/arch/arm/mach-msm/smem_log.c
@@ -39,6 +39,7 @@
#include "smd_private.h"
#include "smd_rpc_sym.h"
#include "modem_notifier.h"
+#include "smem_private.h"
#define DEBUG
#undef DEBUG
@@ -2005,7 +2006,7 @@
return ret;
}
-static int smd_module_init_notifier(struct notifier_block *this,
+static int smem_module_init_notifier(struct notifier_block *this,
unsigned long code,
void *_cmd)
{
@@ -2016,12 +2017,12 @@
}
static struct notifier_block nb = {
- .notifier_call = smd_module_init_notifier,
+ .notifier_call = smem_module_init_notifier,
};
static int __init smem_log_init(void)
{
- return smd_module_init_notifier_register(&nb);
+ return smem_module_init_notifier_register(&nb);
}
diff --git a/arch/arm/mach-msm/smem_private.h b/arch/arm/mach-msm/smem_private.h
index c4f9a77..ceb8028 100644
--- a/arch/arm/mach-msm/smem_private.h
+++ b/arch/arm/mach-msm/smem_private.h
@@ -17,10 +17,6 @@
#include <mach/ramdump.h>
-#define SMEM_SPINLOCK_SMEM_ALLOC "S:3"
-extern remote_spinlock_t remote_spinlock;
-extern int spinlocks_initialized; /* only modify in init_smem_remote_spinlock */
-
#define SMD_HEAP_SIZE 512
struct smem_heap_info {
@@ -58,19 +54,26 @@
void __iomem *virt_addr;
};
-extern uint32_t num_smem_areas;
-extern struct smem_area *smem_areas;
-
-extern struct ramdump_segment *smem_ramdump_segments;
-
/* used for unit testing spinlocks */
remote_spinlock_t *smem_get_remote_spinlock(void);
-/*
- * used to ensure the remote spinlock is only inited once since local
- * spinlock init code appears non-reentrant
- */
-int init_smem_remote_spinlock(void);
-
bool smem_initialized_check(void);
+
+/**
+ * smem_module_init_notifier_register() - Register a smem module
+ * init notifier block
+ * @nb: Notifier block to be registered
+ *
+ * In order to mark the dependency on SMEM Driver module initialization
+ * register a notifier using this API. Once the smem module_init is
+ * done, notification will be passed to the registered module.
+ */
+int smem_module_init_notifier_register(struct notifier_block *nb);
+
+/**
+ * smem_module_init_notifier_register() - Unregister a smem module
+ * init notifier block
+ * @nb: Notifier block to be unregistered
+ */
+int smem_module_init_notifier_unregister(struct notifier_block *nb);
#endif /* _ARCH_ARM_MACH_MSM_SMEM_PRIVATE_H_ */
diff --git a/arch/arm/mach-msm/socinfo.c b/arch/arm/mach-msm/socinfo.c
index 575cb49..41509f7 100644
--- a/arch/arm/mach-msm/socinfo.c
+++ b/arch/arm/mach-msm/socinfo.c
@@ -282,6 +282,24 @@
[185] = MSM_CPU_8974,
[186] = MSM_CPU_8974,
+ /* 8974AA IDs */
+ [208] = MSM_CPU_8974PRO_AA,
+ [211] = MSM_CPU_8974PRO_AA,
+ [214] = MSM_CPU_8974PRO_AA,
+ [217] = MSM_CPU_8974PRO_AA,
+
+ /* 8974AB IDs */
+ [209] = MSM_CPU_8974PRO_AB,
+ [212] = MSM_CPU_8974PRO_AB,
+ [215] = MSM_CPU_8974PRO_AB,
+ [218] = MSM_CPU_8974PRO_AB,
+
+ /* 8974AC IDs */
+ [194] = MSM_CPU_8974PRO_AC,
+ [210] = MSM_CPU_8974PRO_AC,
+ [213] = MSM_CPU_8974PRO_AC,
+ [216] = MSM_CPU_8974PRO_AC,
+
/* 8625 IDs */
[127] = MSM_CPU_8625,
[128] = MSM_CPU_8625,
diff --git a/block/row-iosched.c b/block/row-iosched.c
index e71f6af..3fa3b1a 100644
--- a/block/row-iosched.c
+++ b/block/row-iosched.c
@@ -97,7 +97,7 @@
/* Default values for idling on read queues (in msec) */
#define ROW_IDLE_TIME_MSEC 5
-#define ROW_READ_FREQ_MSEC 20
+#define ROW_READ_FREQ_MSEC 5
/**
* struct rowq_idling_data - parameters for idling on the queue
@@ -331,6 +331,10 @@
struct row_queue *rqueue = RQ_ROWQ(rq);
s64 diff_ms;
bool queue_was_empty = list_empty(&rqueue->fifo);
+ unsigned long bv_page_flags = 0;
+
+ if (rq->bio && rq->bio->bi_io_vec && rq->bio->bi_io_vec->bv_page)
+ bv_page_flags = rq->bio->bi_io_vec->bv_page->flags;
list_add_tail(&rq->queuelist, &rqueue->fifo);
rd->nr_reqs[rq_data_dir(rq)]++;
@@ -360,7 +364,9 @@
rqueue->idle_data.begin_idling = false;
return;
}
- if (diff_ms < rd->rd_idle_data.freq_ms) {
+
+ if ((bv_page_flags & (1L << PG_readahead)) ||
+ (diff_ms < rd->rd_idle_data.freq_ms)) {
rqueue->idle_data.begin_idling = true;
row_log_rowq(rd, rqueue->prio, "Enable idling");
} else {
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index ecd3c0d..ed266dc 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -214,6 +214,11 @@
"a330_pm4.fw", "a330_pfp.fw", &adreno_a3xx_gpudev,
512, 0, 2, SZ_128K, NO_VER, NO_VER, 0x8AD, 0x2E4,
0x201, 0x200 },
+ /* 8226v2 */
+ { ADRENO_REV_A305B, 3, 0, 5, 0x12,
+ "a330_pm4.fw", "a330_pfp.fw", &adreno_a3xx_gpudev,
+ 512, 0, 2, SZ_128K, NO_VER, NO_VER, 0x8AD, 0x2E4,
+ 0x201, 0x200 },
{ ADRENO_REV_A305C, 3, 0, 5, 0x20,
"a300_pm4.fw", "a300_pfp.fw", &adreno_a3xx_gpudev,
512, 0, 2, SZ_128K, 0x3FF037, 0x3FF016 },
@@ -253,6 +258,10 @@
struct adreno_perfcount_group *group;
unsigned int i, j;
+ /* perfcounter start does nothing on a2xx */
+ if (adreno_is_a2xx(adreno_dev))
+ return;
+
/* group id iter */
for (i = 0; i < counters->group_count; i++) {
group = &(counters->groups[i]);
diff --git a/drivers/hwmon/qpnp-adc-voltage.c b/drivers/hwmon/qpnp-adc-voltage.c
index 4306b1d..309944e 100644
--- a/drivers/hwmon/qpnp-adc-voltage.c
+++ b/drivers/hwmon/qpnp-adc-voltage.c
@@ -1164,14 +1164,14 @@
rc = qpnp_vadc_read_reg(QPNP_INT_TEST_VAL, &fab_id);
if (rc < 0) {
pr_err("qpnp adc comp id failed with %d\n", rc);
- return rc;
+ goto fail;
}
vadc->id = fab_id;
rc = qpnp_vadc_warm_rst_configure();
if (rc < 0) {
pr_err("Setting perp reset on warm reset failed %d\n", rc);
- return rc;
+ goto fail;
}
vadc->vadc_initialized = true;
diff --git a/drivers/input/touchscreen/synaptics_fw_update.c b/drivers/input/touchscreen/synaptics_fw_update.c
index 349b020..c01ab0e 100644
--- a/drivers/input/touchscreen/synaptics_fw_update.c
+++ b/drivers/input/touchscreen/synaptics_fw_update.c
@@ -29,7 +29,6 @@
#include <linux/input/synaptics_dsx.h>
#include "synaptics_i2c_rmi4.h"
-#define DEBUG_FW_UPDATE
#define SHOW_PROGRESS
#define MAX_FIRMWARE_ID_LEN 10
#define FORCE_UPDATE false
@@ -53,7 +52,13 @@
#define BLOCK_NUMBER_OFFSET 0
#define BLOCK_DATA_OFFSET 2
-#define NAME_BUFFER_SIZE 128
+#define RMI4_INFO_MAX_LEN 200
+
+#define RMI4_STORE_TS_INFO(buf, id, rev, fw_ver) \
+ snprintf(buf, RMI4_INFO_MAX_LEN, \
+ "controller\t= synaptics\n" \
+ "model\t\t= %d rev %d\n" \
+ "fw_ver\t\t= %d\n", id, rev, fw_ver)
enum falsh_config_area {
UI_CONFIG_AREA = 0x00,
@@ -77,7 +82,8 @@
enum flash_area {
NONE,
UI_FIRMWARE,
- CONFIG_AREA
+ CONFIG_AREA,
+ MISMATCH
};
enum image_file_option {
@@ -99,53 +105,6 @@
#define SLEEP_TIME_US 50
-static ssize_t fwu_sysfs_show_image(struct file *data_file,
- struct kobject *kobj, struct bin_attribute *attributes,
- char *buf, loff_t pos, size_t count);
-
-static ssize_t fwu_sysfs_store_image(struct file *data_file,
- struct kobject *kobj, struct bin_attribute *attributes,
- char *buf, loff_t pos, size_t count);
-
-static ssize_t fwu_sysfs_force_reflash_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count);
-
-static ssize_t fwu_sysfs_do_reflash_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count);
-
-static ssize_t fwu_sysfs_write_config_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count);
-
-static ssize_t fwu_sysfs_read_config_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count);
-
-static ssize_t fwu_sysfs_config_area_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count);
-
-static ssize_t fwu_sysfs_image_size_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count);
-
-static ssize_t fwu_sysfs_block_size_show(struct device *dev,
- struct device_attribute *attr, char *buf);
-
-static ssize_t fwu_sysfs_firmware_block_count_show(struct device *dev,
- struct device_attribute *attr, char *buf);
-
-static ssize_t fwu_sysfs_configuration_block_count_show(struct device *dev,
- struct device_attribute *attr, char *buf);
-
-static ssize_t fwu_sysfs_perm_config_block_count_show(struct device *dev,
- struct device_attribute *attr, char *buf);
-
-static ssize_t fwu_sysfs_bl_config_block_count_show(struct device *dev,
- struct device_attribute *attr, char *buf);
-
-static ssize_t fwu_sysfs_disp_config_block_count_show(struct device *dev,
- struct device_attribute *attr, char *buf);
-
-static ssize_t fwu_sysfs_config_id_show(struct device *dev,
- struct device_attribute *attr, char *buf);
-
static int fwu_wait_for_idle(int timeout_ms);
struct image_header_data {
@@ -163,10 +122,10 @@
unsigned char config_size[4];
/* 0x10-0x1F */
unsigned char product_id[SYNAPTICS_RMI4_PRODUCT_ID_SIZE];
- unsigned char reserved_1a;
- unsigned char reserved_1b;
- unsigned char reserved_1c;
- unsigned char reserved_1d;
+ unsigned char pkg_id_lsb;
+ unsigned char pkg_id_msb;
+ unsigned char pkg_id_rev_lsb;
+ unsigned char pkg_id_rev_msb;
unsigned char product_info[SYNAPTICS_RMI4_PRODUCT_INFO_SIZE];
/* 0x20-0x2F */
unsigned char reserved_20_2f[0x10];
@@ -178,7 +137,7 @@
/* 0x50-0x53*/
unsigned char firmware_id[4];
} __packed;
- unsigned char data[54];
+ unsigned char data[0x54];
};
};
@@ -190,6 +149,8 @@
unsigned char bootloader_version;
unsigned char product_id[SYNAPTICS_RMI4_PRODUCT_ID_SIZE + 1];
unsigned char product_info[SYNAPTICS_RMI4_PRODUCT_INFO_SIZE];
+ u16 package_id;
+ u16 package_revision_id;
unsigned int firmware_id;
bool is_contain_build_info;
};
@@ -290,59 +251,8 @@
struct f34_flash_properties flash_properties;
struct workqueue_struct *fwu_workqueue;
struct delayed_work fwu_work;
- char *firmware_name;
-};
-
-static struct bin_attribute dev_attr_data = {
- .attr = {
- .name = "data",
- .mode = (S_IRUGO | S_IWUGO),
- },
- .size = 0,
- .read = fwu_sysfs_show_image,
- .write = fwu_sysfs_store_image,
-};
-
-static struct device_attribute attrs[] = {
- __ATTR(force_update_fw, S_IWUGO,
- synaptics_rmi4_show_error,
- fwu_sysfs_force_reflash_store),
- __ATTR(update_fw, S_IWUGO,
- synaptics_rmi4_show_error,
- fwu_sysfs_do_reflash_store),
- __ATTR(writeconfig, S_IWUGO,
- synaptics_rmi4_show_error,
- fwu_sysfs_write_config_store),
- __ATTR(readconfig, S_IWUGO,
- synaptics_rmi4_show_error,
- fwu_sysfs_read_config_store),
- __ATTR(configarea, S_IWUGO,
- synaptics_rmi4_show_error,
- fwu_sysfs_config_area_store),
- __ATTR(imagesize, S_IWUGO,
- synaptics_rmi4_show_error,
- fwu_sysfs_image_size_store),
- __ATTR(blocksize, S_IRUGO,
- fwu_sysfs_block_size_show,
- synaptics_rmi4_store_error),
- __ATTR(fwblockcount, S_IRUGO,
- fwu_sysfs_firmware_block_count_show,
- synaptics_rmi4_store_error),
- __ATTR(configblockcount, S_IRUGO,
- fwu_sysfs_configuration_block_count_show,
- synaptics_rmi4_store_error),
- __ATTR(permconfigblockcount, S_IRUGO,
- fwu_sysfs_perm_config_block_count_show,
- synaptics_rmi4_store_error),
- __ATTR(blconfigblockcount, S_IRUGO,
- fwu_sysfs_bl_config_block_count_show,
- synaptics_rmi4_store_error),
- __ATTR(dispconfigblockcount, S_IRUGO,
- fwu_sysfs_disp_config_block_count_show,
- synaptics_rmi4_store_error),
- __ATTR(config_id, S_IRUGO,
- fwu_sysfs_config_id_show,
- synaptics_rmi4_store_error),
+ char firmware_name[NAME_BUFFER_SIZE];
+ char *ts_info;
};
static struct synaptics_rmi4_fwu_handle *fwu;
@@ -365,6 +275,26 @@
(unsigned int)ptr[0] * 0x1000000;
}
+static void synaptics_rmi4_update_debug_info(void)
+{
+ unsigned char pkg_id[4];
+ unsigned int build_id;
+ struct synaptics_rmi4_device_info *rmi;
+ /* read device package id */
+ fwu->fn_ptr->read(fwu->rmi4_data,
+ fwu->f01_fd.query_base_addr + 17,
+ pkg_id,
+ sizeof(pkg_id));
+ rmi = &(fwu->rmi4_data->rmi4_mod_info);
+
+ build_id = (unsigned int)rmi->build_id[0] +
+ (unsigned int)rmi->build_id[1] * 0x100 +
+ (unsigned int)rmi->build_id[2] * 0x10000;
+
+ RMI4_STORE_TS_INFO(fwu->ts_info, pkg_id[1] << 8 | pkg_id[0],
+ pkg_id[3] << 8 | pkg_id[2], build_id);
+}
+
static void parse_header(struct image_header *header,
const unsigned char *fw_image)
{
@@ -375,25 +305,32 @@
header->config_size = extract_uint(data->config_size);
memcpy(header->product_id, data->product_id,
sizeof(data->product_id));
- header->product_id[sizeof(data->product_info)] = 0;
+ header->product_id[sizeof(data->product_id)] = 0;
+
memcpy(header->product_info, data->product_info,
sizeof(data->product_info));
header->is_contain_build_info =
(data->options_firmware_id == (1 << OPTION_BUILD_INFO));
if (header->is_contain_build_info) {
+ header->package_id = (data->pkg_id_rev_msb << 8) |
+ data->pkg_id_lsb;
+ header->package_revision_id = (data->pkg_id_rev_msb << 8) |
+ data->pkg_id_rev_lsb;
+ dev_info(&fwu->rmi4_data->i2c_client->dev,
+ "%s Package ID %d Rev %d\n", __func__,
+ header->package_id, header->package_revision_id);
+
header->firmware_id = extract_uint(data->firmware_id);
dev_info(&fwu->rmi4_data->i2c_client->dev,
"%s Firwmare build id %d\n", __func__,
header->firmware_id);
}
-#ifdef DEBUG_FW_UPDATE
- dev_info(&fwu->rmi4_data->i2c_client->dev,
+ dev_dbg(&fwu->rmi4_data->i2c_client->dev,
"Firwmare size %d, config size %d\n",
header->image_size,
header->config_size);
-#endif
return;
}
@@ -544,11 +481,9 @@
{
int retval;
-#ifdef DEBUG_FW_UPDATE
- dev_info(&fwu->rmi4_data->i2c_client->dev,
+ dev_dbg(&fwu->rmi4_data->i2c_client->dev,
"%s: Reset device\n",
__func__);
-#endif
retval = fwu->rmi4_data->reset_device(fwu->rmi4_data);
if (retval < 0) {
@@ -613,6 +548,7 @@
unsigned long imageFirmwareID;
unsigned char firmware_id[4];
unsigned char config_id[4];
+ unsigned char pkg_id[4];
char *strptr;
char *imagePR = kzalloc(sizeof(MAX_FIRMWARE_ID_LEN), GFP_KERNEL);
enum flash_area flash_area = NONE;
@@ -624,6 +560,24 @@
goto exit;
}
+ if (header->is_contain_build_info) {
+ /* if package id does not match, do not update firmware */
+ fwu->fn_ptr->read(fwu->rmi4_data,
+ fwu->f01_fd.query_base_addr + 17,
+ pkg_id,
+ sizeof(pkg_id));
+
+ if (header->package_id != ((pkg_id[1] << 8) | pkg_id[0])) {
+ flash_area = MISMATCH;
+ goto exit;
+ }
+ if (header->package_revision_id !=
+ ((pkg_id[3] << 8) | pkg_id[2])) {
+ flash_area = MISMATCH;
+ goto exit;
+ }
+ }
+
retval = fwu_read_f01_device_status(&f01_device_status);
if (retval < 0) {
flash_area = NONE;
@@ -734,10 +688,13 @@
flash_area = CONFIG_AREA;
goto exit;
}
-
exit:
kfree(imagePR);
- if (flash_area == NONE)
+ if (flash_area == MISMATCH)
+ dev_info(&i2c_client->dev,
+ "%s: Package ID indicates mismatch of firmware and" \
+ " controller compatibility\n", __func__);
+ else if (flash_area == NONE)
dev_info(&i2c_client->dev,
"%s: Nothing needs to be updated\n", __func__);
else
@@ -759,9 +716,7 @@
bool f34found = false;
struct synaptics_rmi4_fn_desc rmi_fd;
-#ifdef DEBUG_FW_UPDATE
- dev_info(&fwu->rmi4_data->i2c_client->dev, "Scan PDT\n");
-#endif
+ dev_dbg(&fwu->rmi4_data->i2c_client->dev, "Scan PDT\n");
for (addr = PDT_START; addr > PDT_END; addr -= PDT_ENTRY_SIZE) {
retval = fwu->fn_ptr->read(fwu->rmi4_data,
@@ -824,13 +779,11 @@
10 : 100;
#endif
-#ifdef DEBUG_FW_UPDATE
- dev_info(&i2c_client->dev,
+ dev_dbg(&i2c_client->dev,
"%s: Start to update %s blocks\n",
__func__,
command == CMD_WRITE_CONFIG_BLOCK ?
"config" : "firmware");
-#endif
retval = fwu->fn_ptr->write(fwu->rmi4_data,
fwu->f34_fd.data_base_addr + BLOCK_NUMBER_OFFSET,
block_offset,
@@ -915,12 +868,11 @@
{
int retval;
-#ifdef DEBUG_FW_UPDATE
- dev_info(&fwu->rmi4_data->i2c_client->dev,
+ dev_dbg(&fwu->rmi4_data->i2c_client->dev,
"Write bootloader ID 0x%02X 0x%02X\n",
fwu->bootloader_id[0],
fwu->bootloader_id[1]);
-#endif
+
retval = fwu->fn_ptr->write(fwu->rmi4_data,
fwu->f34_fd.data_base_addr + BLOCK_DATA_OFFSET,
fwu->bootloader_id,
@@ -941,9 +893,8 @@
struct f01_device_status f01_device_status;
struct f01_device_control f01_device_control;
-#ifdef DEBUG_FW_UPDATE
- dev_info(&fwu->rmi4_data->i2c_client->dev, "Enter bootloader mode\n");
-#endif
+ dev_dbg(&fwu->rmi4_data->i2c_client->dev, "Enter bootloader mode\n");
+
retval = fwu_read_f01_device_status(&f01_device_status);
if (retval < 0)
return retval;
@@ -1302,26 +1253,23 @@
pr_notice("%s: Start of reflash process\n", __func__);
- if (!fwu->rmi4_data->fw_image_name) {
- retval = 0;
+ if (strnlen(fwu->rmi4_data->fw_image_name, NAME_BUFFER_SIZE) == 0) {
dev_err(&fwu->rmi4_data->i2c_client->dev,
"Firmware image name not given, skipping update\n");
- goto exit;
+ return 0;
+ }
+
+ if (strnlen(fwu->rmi4_data->fw_image_name, NAME_BUFFER_SIZE) ==
+ NAME_BUFFER_SIZE) {
+ dev_err(&fwu->rmi4_data->i2c_client->dev,
+ "Firmware image name exceeds max length (%d), " \
+ "skipping update\n", NAME_BUFFER_SIZE);
+ return 0;
}
if (fwu->ext_data_source)
fw_image = fwu->ext_data_source;
else {
- fwu->firmware_name = kcalloc(NAME_BUFFER_SIZE,
- sizeof(char), GFP_KERNEL);
- if (!fwu->firmware_name) {
- dev_err(&fwu->rmi4_data->i2c_client->dev,
- "%s Failed to allocate firmware name (%d).\n",
- __func__, NAME_BUFFER_SIZE);
- retval = -ENOMEM;
- goto memory_exit;
- }
-
snprintf(fwu->firmware_name, NAME_BUFFER_SIZE, "%s",
fwu->rmi4_data->fw_image_name);
dev_info(&fwu->rmi4_data->i2c_client->dev,
@@ -1336,8 +1284,7 @@
"%s: Firmware image %s not available\n",
__func__,
fwu->firmware_name);
- retval = -EINVAL;
- goto exit;
+ return -EINVAL;
}
dev_dbg(&fwu->rmi4_data->i2c_client->dev,
@@ -1363,6 +1310,8 @@
switch (flash_area) {
case NONE:
+ case MISMATCH:
+ retval = 0;
dev_info(&fwu->rmi4_data->i2c_client->dev,
"%s: No need to do reflash.\n",
__func__);
@@ -1408,13 +1357,11 @@
goto exit;
}
+exit:
if (fw_entry)
release_firmware(fw_entry);
pr_notice("%s: End of reflash process\n", __func__);
-exit:
- kfree(fwu->firmware_name);
-memory_exit:
return retval;
}
@@ -1428,10 +1375,21 @@
if (!fwu->initialized)
return -ENODEV;
+ fwu->rmi4_data->fw_updating = true;
+ if (fwu->rmi4_data->suspended == true) {
+ fwu->rmi4_data->fw_updating = false;
+ dev_err(&fwu->rmi4_data->i2c_client->dev,
+ "Cannot start fw upgrade while device is in suspend\n");
+ return -EBUSY;
+ }
+
fwu->ext_data_source = fw_data;
fwu->config_area = UI_CONFIG_AREA;
retval = fwu_start_reflash();
+ fwu->rmi4_data->fw_updating = false;
+
+ synaptics_rmi4_update_debug_info();
return retval;
}
@@ -1468,6 +1426,40 @@
return count;
}
+static ssize_t fwu_sysfs_fw_name_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+ char *strptr;
+
+ if (count >= NAME_BUFFER_SIZE) {
+ dev_err(&rmi4_data->i2c_client->dev,
+ "Input over %d characters long\n", NAME_BUFFER_SIZE);
+ return -EINVAL;
+ }
+
+ strptr = strnstr(buf, ".img",
+ count);
+ if (!strptr) {
+ dev_err(&rmi4_data->i2c_client->dev,
+ "Input is not valid .img file\n");
+ return -EINVAL;
+ }
+
+ strlcpy(rmi4_data->fw_image_name, buf, count);
+ return count;
+}
+
+static ssize_t fwu_sysfs_fw_name_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ if (strnlen(fwu->rmi4_data->fw_image_name, NAME_BUFFER_SIZE) > 0)
+ return snprintf(buf, PAGE_SIZE, "%s\n",
+ fwu->rmi4_data->fw_image_name);
+ else
+ return snprintf(buf, PAGE_SIZE, "No firmware name given\n");
+}
+
static ssize_t fwu_sysfs_force_reflash_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
@@ -1683,6 +1675,41 @@
config_id[0], config_id[1], config_id[2], config_id[3]);
}
+static ssize_t fwu_sysfs_package_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned char pkg_id[4];
+ /* read device package id */
+ fwu->fn_ptr->read(fwu->rmi4_data,
+ fwu->f01_fd.query_base_addr + 17,
+ pkg_id,
+ sizeof(pkg_id));
+
+ return snprintf(buf, PAGE_SIZE, "%d rev %d\n",
+ (pkg_id[1] << 8) | pkg_id[0],
+ (pkg_id[3] << 8) | pkg_id[2]);
+}
+
+static int synaptics_rmi4_debug_dump_info(struct seq_file *m, void *v)
+{
+ seq_printf(m, "%s\n", fwu->ts_info);
+
+ return 0;
+}
+
+static int debugfs_dump_info_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, synaptics_rmi4_debug_dump_info,
+ inode->i_private);
+}
+
+static const struct file_operations debug_dump_info_fops = {
+ .owner = THIS_MODULE,
+ .open = debugfs_dump_info_open,
+ .read = seq_read,
+ .release = single_release,
+};
+
static void synaptics_rmi4_fwu_attn(struct synaptics_rmi4_data *rmi4_data,
unsigned char intr_mask)
{
@@ -1692,6 +1719,65 @@
return;
}
+static struct bin_attribute dev_attr_data = {
+ .attr = {
+ .name = "data",
+ .mode = (S_IRUGO | S_IWUGO),
+ },
+ .size = 0,
+ .read = fwu_sysfs_show_image,
+ .write = fwu_sysfs_store_image,
+};
+
+static struct device_attribute attrs[] = {
+ __ATTR(fw_name, S_IWUGO | S_IRUGO,
+ fwu_sysfs_fw_name_show,
+ fwu_sysfs_fw_name_store),
+ __ATTR(force_update_fw, S_IWUGO,
+ synaptics_rmi4_show_error,
+ fwu_sysfs_force_reflash_store),
+ __ATTR(update_fw, S_IWUGO,
+ synaptics_rmi4_show_error,
+ fwu_sysfs_do_reflash_store),
+ __ATTR(writeconfig, S_IWUGO,
+ synaptics_rmi4_show_error,
+ fwu_sysfs_write_config_store),
+ __ATTR(readconfig, S_IWUGO,
+ synaptics_rmi4_show_error,
+ fwu_sysfs_read_config_store),
+ __ATTR(configarea, S_IWUGO,
+ synaptics_rmi4_show_error,
+ fwu_sysfs_config_area_store),
+ __ATTR(imagesize, S_IWUGO,
+ synaptics_rmi4_show_error,
+ fwu_sysfs_image_size_store),
+ __ATTR(blocksize, S_IRUGO,
+ fwu_sysfs_block_size_show,
+ synaptics_rmi4_store_error),
+ __ATTR(fwblockcount, S_IRUGO,
+ fwu_sysfs_firmware_block_count_show,
+ synaptics_rmi4_store_error),
+ __ATTR(configblockcount, S_IRUGO,
+ fwu_sysfs_configuration_block_count_show,
+ synaptics_rmi4_store_error),
+ __ATTR(permconfigblockcount, S_IRUGO,
+ fwu_sysfs_perm_config_block_count_show,
+ synaptics_rmi4_store_error),
+ __ATTR(blconfigblockcount, S_IRUGO,
+ fwu_sysfs_bl_config_block_count_show,
+ synaptics_rmi4_store_error),
+ __ATTR(dispconfigblockcount, S_IRUGO,
+ fwu_sysfs_disp_config_block_count_show,
+ synaptics_rmi4_store_error),
+ __ATTR(config_id, S_IRUGO,
+ fwu_sysfs_config_id_show,
+ synaptics_rmi4_store_error),
+ __ATTR(package_id, S_IRUGO,
+ fwu_sysfs_package_id_show,
+ synaptics_rmi4_store_error),
+};
+
+
static void synaptics_rmi4_fwu_work(struct work_struct *work)
{
fwu_start_reflash();
@@ -1702,6 +1788,7 @@
int retval;
unsigned char attr_count;
struct pdt_properties pdt_props;
+ struct dentry *temp;
fwu = kzalloc(sizeof(*fwu), GFP_KERNEL);
if (!fwu) {
@@ -1765,7 +1852,7 @@
fwu->initialized = true;
fwu->force_update = FORCE_UPDATE;
- retval = sysfs_create_bin_file(&rmi4_data->input_dev->dev.kobj,
+ retval = sysfs_create_bin_file(&rmi4_data->i2c_client->dev.kobj,
&dev_attr_data);
if (retval < 0) {
dev_err(&rmi4_data->i2c_client->dev,
@@ -1775,7 +1862,7 @@
}
for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
- retval = sysfs_create_file(&rmi4_data->input_dev->dev.kobj,
+ retval = sysfs_create_file(&rmi4_data->i2c_client->dev.kobj,
&attrs[attr_count].attr);
if (retval < 0) {
dev_err(&rmi4_data->i2c_client->dev,
@@ -1786,6 +1873,25 @@
}
}
+ temp = debugfs_create_file("dump_info", S_IRUSR | S_IWUSR,
+ fwu->rmi4_data->dir, fwu->rmi4_data,
+ &debug_dump_info_fops);
+ if (temp == NULL || IS_ERR(temp)) {
+ dev_err(&rmi4_data->i2c_client->dev,
+ "%s: Failed to create debugfs dump info file\n",
+ __func__);
+ retval = PTR_ERR(temp);
+ goto exit_remove_attrs;
+ }
+
+ fwu->ts_info = kzalloc(RMI4_INFO_MAX_LEN, GFP_KERNEL);
+ if (!fwu->ts_info) {
+ dev_err(&rmi4_data->i2c_client->dev, "Not enough memory\n");
+ goto exit_free_ts_info;
+ }
+
+ synaptics_rmi4_update_debug_info();
+
#ifdef INSIDE_FIRMWARE_UPDATE
fwu->fwu_workqueue = create_singlethread_workqueue("fwu_workqueue");
INIT_DELAYED_WORK(&fwu->fwu_work, synaptics_rmi4_fwu_work);
@@ -1797,7 +1903,8 @@
init_completion(&remove_complete);
return 0;
-
+exit_free_ts_info:
+ debugfs_remove(temp);
exit_remove_attrs:
for (attr_count--; attr_count >= 0; attr_count--) {
sysfs_remove_file(&rmi4_data->input_dev->dev.kobj,
diff --git a/drivers/input/touchscreen/synaptics_i2c_rmi4.c b/drivers/input/touchscreen/synaptics_i2c_rmi4.c
index b9dd4ae..9da095a 100644
--- a/drivers/input/touchscreen/synaptics_i2c_rmi4.c
+++ b/drivers/input/touchscreen/synaptics_i2c_rmi4.c
@@ -34,6 +34,7 @@
#define DRIVER_NAME "synaptics_rmi4_i2c"
#define INPUT_PHYS_NAME "synaptics_rmi4_i2c/input0"
+#define DEBUGFS_DIR_NAME "ts_debug"
#define RESET_DELAY 100
@@ -114,12 +115,6 @@
static ssize_t synaptics_rmi4_full_pm_cycle_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count);
-static ssize_t synaptics_rmi4_mode_suspend_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count);
-
-static ssize_t synaptics_rmi4_mode_resume_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count);
-
#if defined(CONFIG_FB)
static int fb_notifier_callback(struct notifier_block *self,
unsigned long event, void *data);
@@ -243,12 +238,6 @@
__ATTR(full_pm_cycle, (S_IRUGO | S_IWUGO),
synaptics_rmi4_full_pm_cycle_show,
synaptics_rmi4_full_pm_cycle_store),
- __ATTR(mode_suspend, S_IWUGO,
- synaptics_rmi4_show_error,
- synaptics_rmi4_mode_suspend_store),
- __ATTR(mode_resume, S_IWUGO,
- synaptics_rmi4_show_error,
- synaptics_rmi4_mode_resume_store),
#endif
__ATTR(reset, S_IWUGO,
synaptics_rmi4_show_error,
@@ -300,34 +289,30 @@
return count;
}
-static ssize_t synaptics_rmi4_mode_suspend_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+static int synaptics_rmi4_debug_suspend_set(void *_data, u64 val)
{
- unsigned int input;
- struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+ struct synaptics_rmi4_data *rmi4_data = _data;
- if (sscanf(buf, "%u", &input) != 1)
- return -EINVAL;
+ if (val)
+ synaptics_rmi4_suspend(&rmi4_data->input_dev->dev);
+ else
+ synaptics_rmi4_resume(&rmi4_data->input_dev->dev);
- synaptics_rmi4_suspend(&(rmi4_data->input_dev->dev));
-
- return count;
+ return 0;
}
-static ssize_t synaptics_rmi4_mode_resume_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t synaptics_rmi4_debug_suspend_get(void *_data, u64 *val)
{
- unsigned int input;
- struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+ struct synaptics_rmi4_data *rmi4_data = _data;
- if (sscanf(buf, "%u", &input) != 1)
- return -EINVAL;
+ *val = rmi4_data->suspended;
- synaptics_rmi4_resume(&(rmi4_data->input_dev->dev));
-
- return count;
+ return 0;
}
+DEFINE_SIMPLE_ATTRIBUTE(debug_suspend_fops, synaptics_rmi4_debug_suspend_get,
+ synaptics_rmi4_debug_suspend_set, "%lld\n");
+
#ifdef CONFIG_FB
static void configure_sleep(struct synaptics_rmi4_data *rmi4_data)
{
@@ -2061,6 +2046,7 @@
struct synaptics_rmi4_device_info *rmi;
struct synaptics_rmi4_platform_data *platform_data =
client->dev.platform_data;
+ struct dentry *temp;
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_BYTE_DATA)) {
@@ -2118,6 +2104,8 @@
rmi4_data->touch_stopped = false;
rmi4_data->sensor_sleep = false;
rmi4_data->irq_enabled = false;
+ rmi4_data->fw_updating = false;
+ rmi4_data->suspended = false;
rmi4_data->i2c_read = synaptics_rmi4_i2c_read;
rmi4_data->i2c_write = synaptics_rmi4_i2c_write;
@@ -2127,7 +2115,9 @@
rmi4_data->flip_x = rmi4_data->board->x_flip;
rmi4_data->flip_y = rmi4_data->board->y_flip;
- rmi4_data->fw_image_name = rmi4_data->board->fw_image_name;
+ if (rmi4_data->board->fw_image_name)
+ snprintf(rmi4_data->fw_image_name, NAME_BUFFER_SIZE, "%s",
+ rmi4_data->board->fw_image_name);
rmi4_data->input_dev->name = DRIVER_NAME;
rmi4_data->input_dev->phys = INPUT_PHYS_NAME;
@@ -2292,8 +2282,27 @@
goto err_enable_irq;
}
+ rmi4_data->dir = debugfs_create_dir(DEBUGFS_DIR_NAME, NULL);
+ if (rmi4_data->dir == NULL || IS_ERR(rmi4_data->dir)) {
+ dev_err(&client->dev,
+ "%s: Failed to create debugfs directory, rc = %ld\n",
+ __func__, PTR_ERR(rmi4_data->dir));
+ retval = PTR_ERR(rmi4_data->dir);
+ goto err_create_debugfs_dir;
+ }
+
+ temp = debugfs_create_file("suspend", S_IRUSR | S_IWUSR, rmi4_data->dir,
+ rmi4_data, &debug_suspend_fops);
+ if (temp == NULL || IS_ERR(temp)) {
+ dev_err(&client->dev,
+ "%s: Failed to create suspend debugfs file, rc = %ld\n",
+ __func__, PTR_ERR(temp));
+ retval = PTR_ERR(temp);
+ goto err_create_debugfs_file;
+ }
+
for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
- retval = sysfs_create_file(&rmi4_data->input_dev->dev.kobj,
+ retval = sysfs_create_file(&client->dev.kobj,
&attrs[attr_count].attr);
if (retval < 0) {
dev_err(&client->dev,
@@ -2317,7 +2326,10 @@
sysfs_remove_file(&rmi4_data->input_dev->dev.kobj,
&attrs[attr_count].attr);
}
-
+err_create_debugfs_file:
+ debugfs_remove_recursive(rmi4_data->dir);
+err_create_debugfs_dir:
+ free_irq(rmi4_data->irq, rmi4_data);
err_enable_irq:
cancel_delayed_work_sync(&rmi4_data->det_work);
flush_workqueue(rmi4_data->det_workqueue);
@@ -2372,6 +2384,7 @@
rmi = &(rmi4_data->rmi4_mod_info);
+ debugfs_remove_recursive(rmi4_data->dir);
cancel_delayed_work_sync(&rmi4_data->det_work);
flush_workqueue(rmi4_data->det_workqueue);
destroy_workqueue(rmi4_data->det_workqueue);
@@ -2659,19 +2672,32 @@
struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
int retval;
- if (!rmi4_data->sensor_sleep) {
- rmi4_data->touch_stopped = true;
- wake_up(&rmi4_data->wait);
- synaptics_rmi4_irq_enable(rmi4_data, false);
- synaptics_rmi4_sensor_sleep(rmi4_data);
+ if (rmi4_data->suspended) {
+ dev_info(dev, "Already in suspend state\n");
+ return 0;
}
- retval = synaptics_rmi4_regulator_lpm(rmi4_data, true);
- if (retval < 0) {
- dev_err(dev, "failed to enter low power mode\n");
- return retval;
+ if (!rmi4_data->fw_updating) {
+ if (!rmi4_data->sensor_sleep) {
+ rmi4_data->touch_stopped = true;
+ wake_up(&rmi4_data->wait);
+ synaptics_rmi4_irq_enable(rmi4_data, false);
+ synaptics_rmi4_sensor_sleep(rmi4_data);
+ }
+
+ retval = synaptics_rmi4_regulator_lpm(rmi4_data, true);
+ if (retval < 0) {
+ dev_err(dev, "failed to enter low power mode\n");
+ return retval;
+ }
+ } else {
+ dev_err(dev,
+ "Firmware updating, cannot go into suspend mode\n");
+ return 0;
}
+ rmi4_data->suspended = true;
+
return 0;
}
@@ -2690,6 +2716,11 @@
struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
int retval;
+ if (!rmi4_data->suspended) {
+ dev_info(dev, "Already in awake state\n");
+ return 0;
+ }
+
retval = synaptics_rmi4_regulator_lpm(rmi4_data, false);
if (retval < 0) {
dev_err(dev, "failed to enter active power mode\n");
@@ -2700,6 +2731,8 @@
rmi4_data->touch_stopped = false;
synaptics_rmi4_irq_enable(rmi4_data, true);
+ rmi4_data->suspended = false;
+
return 0;
}
diff --git a/drivers/input/touchscreen/synaptics_i2c_rmi4.h b/drivers/input/touchscreen/synaptics_i2c_rmi4.h
index 681b95c..5f6d6ce 100644
--- a/drivers/input/touchscreen/synaptics_i2c_rmi4.h
+++ b/drivers/input/touchscreen/synaptics_i2c_rmi4.h
@@ -34,6 +34,7 @@
#elif defined CONFIG_HAS_EARLYSUSPEND
#include <linux/earlysuspend.h>
#endif
+#include <linux/debugfs.h>
#define PDT_PROPS (0x00EF)
#define PDT_START (0x00E9)
@@ -68,6 +69,8 @@
#define MASK_2BIT 0x03
#define MASK_1BIT 0x01
+#define NAME_BUFFER_SIZE 128
+
/*
* struct synaptics_rmi4_fn_desc - function descriptor fields in PDT
* @query_base_addr: base address for query registers
@@ -183,6 +186,7 @@
* @fingers_on_2d: flag to indicate presence of fingers in 2d area
* @flip_x: set to TRUE if desired to flip direction on x-axis
* @flip_y: set to TRUE if desired to flip direction on y-axis
+ * @fw_updating: firmware is updating flag
* @sensor_sleep: flag to indicate sleep state of sensor
* @wait: wait queue for touch data polling in interrupt thread
* @i2c_read: pointer to i2c read function
@@ -202,7 +206,8 @@
#ifdef CONFIG_HAS_EARLYSUSPEND
struct early_suspend early_suspend;
#endif
- const char *fw_image_name;
+ struct dentry *dir;
+ char fw_image_name[NAME_BUFFER_SIZE];
unsigned char current_page;
unsigned char button_0d_enabled;
unsigned char full_pm_cycle;
@@ -224,6 +229,8 @@
bool sensor_sleep;
bool flip_x;
bool flip_y;
+ bool fw_updating;
+ bool suspended;
wait_queue_head_t wait;
int (*i2c_read)(struct synaptics_rmi4_data *pdata, unsigned short addr,
unsigned char *data, unsigned short length);
diff --git a/drivers/media/platform/msm/camera_v2/Kconfig b/drivers/media/platform/msm/camera_v2/Kconfig
index ec2e381..74ec99a 100644
--- a/drivers/media/platform/msm/camera_v2/Kconfig
+++ b/drivers/media/platform/msm/camera_v2/Kconfig
@@ -182,6 +182,15 @@
snapshot config = 3264 * 2448 at 18 fps.
2 lanes max fps is 18, 4 lanes max fps is 24.
+config s5k4e1
+ bool "Sensor s5k4e1 (BAYER 5MP)"
+ depends on MSMB_CAMERA
+ ---help---
+ Samsung 5 MP Bayer Sensor. It uses 2 mipi lanes,
+ supports 720P preview at 30 fps
+ and QSXGA snapshot at 15 fps.
+ This sensor driver does not support auto focus.
+
config MSM_V4L2_VIDEO_OVERLAY_DEVICE
tristate "Qualcomm MSM V4l2 video overlay device"
---help---
diff --git a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
index f658f13..d238649 100644
--- a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
+++ b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
@@ -854,7 +854,6 @@
cpp_dev->cpp_open_cnt++;
if (cpp_dev->cpp_open_cnt == 1) {
cpp_init_hardware(cpp_dev);
- iommu_attach_device(cpp_dev->domain, cpp_dev->iommu_ctx);
cpp_init_mem(cpp_dev);
cpp_dev->state = CPP_STATE_IDLE;
}
@@ -921,7 +920,6 @@
msm_camera_io_r(cpp_dev->cpp_hw_base + 0x8C));
msm_camera_io_w(0x0, cpp_dev->base + MSM_CPP_MICRO_CLKEN_CTL);
cpp_deinit_mem(cpp_dev);
- iommu_detach_device(cpp_dev->domain, cpp_dev->iommu_ctx);
cpp_release_hardware(cpp_dev);
cpp_dev->state = CPP_STATE_OFF;
}
@@ -1653,7 +1651,6 @@
return msm_register_domain(&cpp_fw_layout);
}
-
static int __devinit cpp_probe(struct platform_device *pdev)
{
struct cpp_device *cpp_dev;
@@ -1768,6 +1765,7 @@
cpp_dev->msm_sd.sd.entity.revision = cpp_dev->msm_sd.sd.devnode->num;
cpp_dev->state = CPP_STATE_BOOT;
cpp_init_hardware(cpp_dev);
+ iommu_attach_device(cpp_dev->domain, cpp_dev->iommu_ctx);
msm_camera_io_w(0x0, cpp_dev->base +
MSM_CPP_MICRO_IRQGEN_MASK);
@@ -1795,6 +1793,7 @@
cpp_timers[1].used = 0;
cpp_dev->fw_name_bin = NULL;
return rc;
+
ERROR3:
release_mem_region(cpp_dev->mem->start, resource_size(cpp_dev->mem));
ERROR2:
@@ -1824,6 +1823,7 @@
return 0;
}
+ iommu_detach_device(cpp_dev->domain, cpp_dev->iommu_ctx);
msm_sd_unregister(&cpp_dev->msm_sd);
release_mem_region(cpp_dev->mem->start, resource_size(cpp_dev->mem));
release_mem_region(cpp_dev->vbif_mem->start,
diff --git a/drivers/media/platform/msm/camera_v2/sensor/Makefile b/drivers/media/platform/msm/camera_v2/sensor/Makefile
index 40931ef..18ac623 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/Makefile
+++ b/drivers/media/platform/msm/camera_v2/sensor/Makefile
@@ -8,6 +8,7 @@
obj-$(CONFIG_S5K3L1YX) += s5k3l1yx.o
obj-$(CONFIG_IMX135) += imx135.o
obj-$(CONFIG_OV8825) += ov8825.o
+obj-$(CONFIG_s5k4e1) += s5k4e1.o
obj-$(CONFIG_OV2720) += ov2720.o
obj-$(CONFIG_OV9724) += ov9724.o
obj-$(CONFIG_HI256) += hi256.o
diff --git a/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.c b/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.c
index a6c5639..a27ca99 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.c
@@ -795,10 +795,7 @@
rc = msm_cci_config(sd, arg);
break;
case MSM_SD_SHUTDOWN: {
- struct msm_camera_cci_ctrl ctrl_cmd;
- ctrl_cmd.cmd = MSM_CCI_RELEASE;
- rc = msm_cci_config(sd, &ctrl_cmd);
- break;
+ return rc;
}
default:
rc = -ENOIOCTLCMD;
diff --git a/drivers/media/platform/msm/camera_v2/sensor/msm_sensor.c b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor.c
index 34f4428..7f474bb 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/msm_sensor.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor.c
@@ -1175,9 +1175,10 @@
case VIDIOC_MSM_SENSOR_CFG:
return s_ctrl->func_tbl->sensor_config(s_ctrl, argp);
case VIDIOC_MSM_SENSOR_RELEASE:
- case MSM_SD_SHUTDOWN:
msm_sensor_stop_stream(s_ctrl);
return 0;
+ case MSM_SD_SHUTDOWN:
+ return 0;
default:
return -ENOIOCTLCMD;
}
diff --git a/drivers/media/platform/msm/camera_v2/sensor/s5k4e1.c b/drivers/media/platform/msm/camera_v2/sensor/s5k4e1.c
new file mode 100644
index 0000000..5c70df2
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/s5k4e1.c
@@ -0,0 +1,167 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include "msm_sensor.h"
+#define s5k4e1_SENSOR_NAME "s5k4e1"
+DEFINE_MSM_MUTEX(s5k4e1_mut);
+
+static struct msm_sensor_ctrl_t s5k4e1_s_ctrl;
+
+static struct msm_sensor_power_setting s5k4e1_power_setting[] = {
+ {
+ .seq_type = SENSOR_VREG,
+ .seq_val = CAM_VIO,
+ .config_val = 0,
+ .delay = 0,
+ },
+ {
+ .seq_type = SENSOR_VREG,
+ .seq_val = CAM_VANA,
+ .config_val = 0,
+ .delay = 0,
+ },
+ {
+ .seq_type = SENSOR_VREG,
+ .seq_val = CAM_VDIG,
+ .config_val = 0,
+ .delay = 0,
+ },
+ {
+ .seq_type = SENSOR_GPIO,
+ .seq_val = SENSOR_GPIO_RESET,
+ .config_val = GPIO_OUT_LOW,
+ .delay = 1,
+ },
+ {
+ .seq_type = SENSOR_GPIO,
+ .seq_val = SENSOR_GPIO_RESET,
+ .config_val = GPIO_OUT_HIGH,
+ .delay = 30,
+ },
+ {
+ .seq_type = SENSOR_GPIO,
+ .seq_val = SENSOR_GPIO_STANDBY,
+ .config_val = GPIO_OUT_LOW,
+ .delay = 1,
+ },
+ {
+ .seq_type = SENSOR_GPIO,
+ .seq_val = SENSOR_GPIO_STANDBY,
+ .config_val = GPIO_OUT_HIGH,
+ .delay = 30,
+ },
+ {
+ .seq_type = SENSOR_CLK,
+ .seq_val = SENSOR_CAM_MCLK,
+ .config_val = 24000000,
+ .delay = 1,
+ },
+ {
+ .seq_type = SENSOR_I2C_MUX,
+ .seq_val = 0,
+ .config_val = 0,
+ .delay = 1,
+ },
+};
+
+static struct v4l2_subdev_info s5k4e1_subdev_info[] = {
+ {
+ .code = V4L2_MBUS_FMT_SGRBG10_1X10,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .fmt = 1,
+ .order = 0,
+ },
+};
+
+static const struct i2c_device_id s5k4e1_i2c_id[] = {
+ {s5k4e1_SENSOR_NAME, (kernel_ulong_t)&s5k4e1_s_ctrl},
+ { }
+};
+
+static int32_t msm_s5k4e1_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ return msm_sensor_i2c_probe(client, id, &s5k4e1_s_ctrl);
+}
+
+static struct i2c_driver s5k4e1_i2c_driver = {
+ .id_table = s5k4e1_i2c_id,
+ .probe = msm_s5k4e1_i2c_probe,
+ .driver = {
+ .name = s5k4e1_SENSOR_NAME,
+ },
+};
+
+static struct msm_camera_i2c_client s5k4e1_sensor_i2c_client = {
+ .addr_type = MSM_CAMERA_I2C_WORD_ADDR,
+};
+
+static const struct of_device_id s5k4e1_dt_match[] = {
+ {.compatible = "shinetech,s5k4e1", .data = &s5k4e1_s_ctrl},
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, s5k4e1_dt_match);
+
+static struct platform_driver s5k4e1_platform_driver = {
+ .driver = {
+ .name = "shinetech,s5k4e1",
+ .owner = THIS_MODULE,
+ .of_match_table = s5k4e1_dt_match,
+ },
+};
+
+static int32_t s5k4e1_platform_probe(struct platform_device *pdev)
+{
+ int32_t rc = 0;
+ const struct of_device_id *match;
+ match = of_match_device(s5k4e1_dt_match, &pdev->dev);
+ rc = msm_sensor_platform_probe(pdev, match->data);
+ return rc;
+}
+
+static int __init s5k4e1_init_module(void)
+{
+ int32_t rc = 0;
+ pr_info("%s:%d\n", __func__, __LINE__);
+ rc = platform_driver_probe(&s5k4e1_platform_driver,
+ s5k4e1_platform_probe);
+ if (!rc)
+ return rc;
+ pr_err("%s:%d rc %d\n", __func__, __LINE__, rc);
+ return i2c_add_driver(&s5k4e1_i2c_driver);
+}
+
+static void __exit s5k4e1_exit_module(void)
+{
+ pr_info("%s:%d\n", __func__, __LINE__);
+ if (s5k4e1_s_ctrl.pdev) {
+ msm_sensor_free_sensor_data(&s5k4e1_s_ctrl);
+ platform_driver_unregister(&s5k4e1_platform_driver);
+ } else
+ i2c_del_driver(&s5k4e1_i2c_driver);
+ return;
+}
+
+static struct msm_sensor_ctrl_t s5k4e1_s_ctrl = {
+ .sensor_i2c_client = &s5k4e1_sensor_i2c_client,
+ .power_setting_array.power_setting = s5k4e1_power_setting,
+ .power_setting_array.size = ARRAY_SIZE(s5k4e1_power_setting),
+ .msm_sensor_mutex = &s5k4e1_mut,
+ .sensor_v4l2_subdev_info = s5k4e1_subdev_info,
+ .sensor_v4l2_subdev_info_size = ARRAY_SIZE(s5k4e1_subdev_info),
+};
+
+module_init(s5k4e1_init_module);
+module_exit(s5k4e1_exit_module);
+MODULE_DESCRIPTION("s5k4e1");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c
index b81af11..d8cff35 100644
--- a/drivers/mmc/core/sdio_cis.c
+++ b/drivers/mmc/core/sdio_cis.c
@@ -55,7 +55,7 @@
for (i = 0; i < nr_strings; i++) {
buffer[i] = string;
- strlcpy(string, buf, strlen(buf));
+ strlcpy(string, buf, strlen(buf) + 1);
string += strlen(string) + 1;
buf += strlen(buf) + 1;
}
diff --git a/drivers/net/ethernet/msm/msm_rmnet_wwan.c b/drivers/net/ethernet/msm/msm_rmnet_wwan.c
index 98bdccc..3e4605f 100644
--- a/drivers/net/ethernet/msm/msm_rmnet_wwan.c
+++ b/drivers/net/ethernet/msm/msm_rmnet_wwan.c
@@ -188,6 +188,9 @@
__func__, skb);
netif_wake_queue(dev);
}
+ if (a2_mux_is_ch_empty(a2_mux_lcid_by_ch_id[wwan_ptr->ch_id]))
+ ipa_rm_inactivity_timer_release_resource(
+ ipa_rm_resource_by_ch_id[wwan_ptr->ch_id]);
spin_unlock_irqrestore(&wwan_ptr->lock, flags);
}
@@ -533,6 +536,7 @@
__func__, skb);
}
spin_unlock_irqrestore(&wwan_ptr->lock, flags);
+ return ret;
exit:
ipa_rm_inactivity_timer_release_resource(
ipa_rm_resource_by_ch_id[wwan_ptr->ch_id]);
@@ -548,6 +552,7 @@
static void wwan_tx_timeout(struct net_device *dev)
{
pr_warning("[%s] wwan_tx_timeout(), data stall in UL\n", dev->name);
+ ipa_bam_reg_dump();
}
/**
diff --git a/drivers/net/wireless/wcnss/wcnss_wlan.c b/drivers/net/wireless/wcnss/wcnss_wlan.c
index a3d7f12..5e22c35 100644
--- a/drivers/net/wireless/wcnss/wcnss_wlan.c
+++ b/drivers/net/wireless/wcnss/wcnss_wlan.c
@@ -112,6 +112,9 @@
#define CCU_PRONTO_LAST_ADDR1_OFFSET 0x10
#define CCU_PRONTO_LAST_ADDR2_OFFSET 0x14
+#define MSM_PRONTO_SAW2_BASE 0xfb219000
+#define PRONTO_SAW2_SPM_STS_OFFSET 0x0c
+
#define WCNSS_DEF_WLAN_RX_BUFF_COUNT 1024
#define WCNSS_CTRL_CHANNEL "WCNSS_CTRL"
@@ -288,6 +291,7 @@
void __iomem *riva_ccu_base;
void __iomem *pronto_a2xb_base;
void __iomem *pronto_ccpu_base;
+ void __iomem *pronto_saw2_base;
void __iomem *fiq_reg;
int ssr_boot;
int nv_downloaded;
@@ -479,6 +483,10 @@
reg = readl_relaxed(reg_addr);
pr_info_ratelimited("%s: CCU_CCPU_LAST_ADDR2 %08x\n", __func__, reg);
+ reg_addr = penv->pronto_saw2_base + PRONTO_SAW2_SPM_STS_OFFSET;
+ reg = readl_relaxed(reg_addr);
+ pr_info_ratelimited("%s: PRONTO_SAW2_SPM_STS %08x\n", __func__, reg);
+
tst_addr = penv->pronto_a2xb_base + A2XB_TSTBUS_OFFSET;
tst_ctrl_addr = penv->pronto_a2xb_base + A2XB_TSTBUS_CTRL_OFFSET;
@@ -1683,6 +1691,14 @@
ret = -ENOMEM;
goto fail_ioremap4;
}
+ penv->pronto_saw2_base = ioremap_nocache(MSM_PRONTO_SAW2_BASE,
+ SZ_32);
+ if (!penv->pronto_saw2_base) {
+ pr_err("%s: ioremap wcnss physical(saw2) failed\n",
+ __func__);
+ ret = -ENOMEM;
+ goto fail_ioremap5;
+ }
}
/* trigger initialization of the WCNSS */
@@ -1699,6 +1715,9 @@
fail_pil:
if (penv->riva_ccu_base)
iounmap(penv->riva_ccu_base);
+ if (penv->pronto_saw2_base)
+ iounmap(penv->pronto_saw2_base);
+fail_ioremap5:
if (penv->fiq_reg)
iounmap(penv->fiq_reg);
fail_ioremap4:
diff --git a/drivers/platform/msm/ipa/a2_service.c b/drivers/platform/msm/ipa/a2_service.c
index 4921ec2..ae9277e 100644
--- a/drivers/platform/msm/ipa/a2_service.c
+++ b/drivers/platform/msm/ipa/a2_service.c
@@ -1469,6 +1469,35 @@
return ret;
}
+/**
+ * a2_mux_is_ch_empty() - checks if channel is empty.
+ * @lcid: logical channel ID
+ *
+ * Returns: true if the channel is empty,
+ * false otherwise
+ */
+int a2_mux_is_ch_empty(enum a2_mux_logical_channel_id lcid)
+{
+ unsigned long flags;
+ int ret;
+
+ if (lcid >= A2_MUX_NUM_CHANNELS ||
+ lcid < 0)
+ return -EINVAL;
+ if (!a2_mux_ctx->a2_mux_initialized)
+ return -ENODEV;
+ spin_lock_irqsave(&a2_mux_ctx->bam_ch[lcid].lock, flags);
+ a2_mux_ctx->bam_ch[lcid].use_wm = 1;
+ ret = a2_mux_ctx->bam_ch[lcid].num_tx_pkts == 0;
+ if (!bam_ch_is_local_open(lcid)) {
+ ret = -ENODEV;
+ IPAERR("%s: port not open: %d\n", __func__,
+ a2_mux_ctx->bam_ch[lcid].status);
+ }
+ spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[lcid].lock, flags);
+ return ret;
+}
+
static int a2_mux_initialize_context(int handle)
{
int i;
diff --git a/drivers/platform/msm/ipa/ipa.c b/drivers/platform/msm/ipa/ipa.c
index 0724aa3..f5d7b6e 100644
--- a/drivers/platform/msm/ipa/ipa.c
+++ b/drivers/platform/msm/ipa/ipa.c
@@ -140,7 +140,7 @@
static bool hdr_tbl_lcl = 1;
module_param(hdr_tbl_lcl, bool, 0644);
MODULE_PARM_DESC(hdr_tbl_lcl, "where hdr tbl resides 1-local; 0-system");
-static bool ip4_rt_tbl_lcl = 1;
+static bool ip4_rt_tbl_lcl;
module_param(ip4_rt_tbl_lcl, bool, 0644);
MODULE_PARM_DESC(ip4_rt_tbl_lcl,
"where ip4 rt tables reside 1-local; 0-system");
diff --git a/drivers/platform/msm/ipa/ipa_client.c b/drivers/platform/msm/ipa/ipa_client.c
index e98c9b7..f20f37f 100644
--- a/drivers/platform/msm/ipa/ipa_client.c
+++ b/drivers/platform/msm/ipa/ipa_client.c
@@ -203,6 +203,7 @@
}
memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context));
+ ipa_enable_data_path(ipa_ep_idx);
ep->valid = 1;
ep->client = in->client;
@@ -380,7 +381,6 @@
return -EPERM;
}
- ipa_enable_data_path(clnt_hdl);
memset(&ipa_ctx->ep[clnt_hdl], 0, sizeof(struct ipa_ep_context));
ipa_dec_client_disable_clks();
diff --git a/drivers/platform/msm/ipa/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_debugfs.c
index 87ddf59..c429414 100644
--- a/drivers/platform/msm/ipa/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_debugfs.c
@@ -113,6 +113,7 @@
{
int nbytes;
+ ipa_inc_client_enable_clks();
if (ipa_ctx->ipa_hw_type == IPA_HW_v1_0)
nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
"IPA_VERSION=0x%x\n"
@@ -143,6 +144,7 @@
ipa_read_reg(ipa_ctx->mmio, IPA_FILTER_OFST_v2),
ipa_read_reg(ipa_ctx->mmio, IPA_SHARED_MEM_SIZE_OFST_v2)
);
+ ipa_dec_client_disable_clks();
return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
}
@@ -241,6 +243,7 @@
end_idx = start_idx + 1;
}
pos = *ppos;
+ ipa_inc_client_enable_clks();
for (i = start_idx; i < end_idx; i++) {
if (ipa_ctx->ipa_hw_type == IPA_HW_v1_0) {
@@ -292,13 +295,16 @@
*ppos = pos;
ret = simple_read_from_buffer(ubuf, count, ppos, dbg_buff,
nbytes);
- if (ret < 0)
+ if (ret < 0) {
+ ipa_dec_client_disable_clks();
return ret;
+ }
size += ret;
ubuf += nbytes;
count -= nbytes;
}
+ ipa_dec_client_disable_clks();
*ppos = pos + size;
return size;
@@ -500,6 +506,32 @@
set = &ipa_ctx->rt_tbl_set[ip];
mutex_lock(&ipa_ctx->lock);
+ if (ip == IPA_IP_v6) {
+ if (ipa_ctx->ip6_rt_tbl_lcl) {
+ nbytes = scnprintf(dbg_buff + cnt,
+ IPA_MAX_MSG_LEN - cnt,
+ "Table Resides on local memory\n");
+ cnt += nbytes;
+ } else {
+ nbytes = scnprintf(dbg_buff + cnt,
+ IPA_MAX_MSG_LEN - cnt,
+ "Table Resides on system(ddr) memory\n");
+ cnt += nbytes;
+ }
+ } else if (ip == IPA_IP_v4) {
+ if (ipa_ctx->ip4_rt_tbl_lcl) {
+ nbytes = scnprintf(dbg_buff + cnt,
+ IPA_MAX_MSG_LEN - cnt,
+ "Table Resides on local memory\n");
+ cnt += nbytes;
+ } else {
+ nbytes = scnprintf(dbg_buff + cnt,
+ IPA_MAX_MSG_LEN - cnt,
+ "Table Resides on system(ddr) memory\n");
+ cnt += nbytes;
+ }
+ }
+
list_for_each_entry(tbl, &set->head_rt_tbl_list, link) {
i = 0;
list_for_each_entry(entry, &tbl->head_rt_rule_list, link) {
@@ -683,12 +715,14 @@
if (kstrtou32(dbg_buff, 0, &option))
return -EFAULT;
+ ipa_inc_client_enable_clks();
if (option == 1)
ipa_write_reg(ipa_ctx->mmio, IPA_DEBUG_CNT_CTRL_n_OFST(0),
IPA_DBG_CNTR_ON);
else
ipa_write_reg(ipa_ctx->mmio, IPA_DEBUG_CNT_CTRL_n_OFST(0),
IPA_DBG_CNTR_OFF);
+ ipa_dec_client_disable_clks();
return count;
}
@@ -698,10 +732,12 @@
{
int nbytes;
+ ipa_inc_client_enable_clks();
nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
"IPA_DEBUG_CNT_REG_0=0x%x\n",
ipa_read_reg(ipa_ctx->mmio,
IPA_DEBUG_CNT_REG_n_OFST(0)));
+ ipa_dec_client_disable_clks();
return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
}
diff --git a/drivers/platform/msm/ipa/ipa_flt.c b/drivers/platform/msm/ipa/ipa_flt.c
index edb9fb1..2d75141 100644
--- a/drivers/platform/msm/ipa/ipa_flt.c
+++ b/drivers/platform/msm/ipa/ipa_flt.c
@@ -634,12 +634,13 @@
return -EINVAL;
}
ipa_ep_idx = ipa_get_ep_mapping(ipa_ctx->mode, ep);
- if (ipa_ep_idx == IPA_FLT_TABLE_INDEX_NOT_FOUND ||
- ipa_ctx->ep[ipa_ep_idx].valid == 0) {
- IPAERR("ep not valid and/or connected ep_idx=%d\n", ipa_ep_idx);
-
+ if (ipa_ep_idx == IPA_FLT_TABLE_INDEX_NOT_FOUND) {
+ IPAERR("ep not valid ep=%d\n", ep);
return -EINVAL;
}
+ if (ipa_ctx->ep[ipa_ep_idx].valid == 0)
+ IPADBG("ep not connected ep_idx=%d\n", ipa_ep_idx);
+
tbl = &ipa_ctx->flt_tbl[ipa_ep_idx][ip];
IPADBG("add ep flt rule ip=%d ep=%d\n", ip, ep);
diff --git a/drivers/platform/msm/ipa/ipa_ram_mmap.h b/drivers/platform/msm/ipa/ipa_ram_mmap.h
index 78093b8..20eb52a 100644
--- a/drivers/platform/msm/ipa/ipa_ram_mmap.h
+++ b/drivers/platform/msm/ipa/ipa_ram_mmap.h
@@ -21,16 +21,19 @@
#define IPA_RAM_NAT_OFST 0
#define IPA_RAM_NAT_SIZE 0
#define IPA_RAM_HDR_OFST (IPA_RAM_NAT_OFST + IPA_RAM_NAT_SIZE)
-#define IPA_RAM_HDR_SIZE 1280
+#define IPA_RAM_HDR_SIZE 1664
#define IPA_RAM_V4_FLT_OFST (IPA_RAM_HDR_OFST + IPA_RAM_HDR_SIZE)
-#define IPA_RAM_V4_FLT_SIZE 1408
+#define IPA_RAM_V4_FLT_SIZE 2176
#define IPA_RAM_V4_RT_OFST (IPA_RAM_V4_FLT_OFST + IPA_RAM_V4_FLT_SIZE)
-#define IPA_RAM_V4_RT_SIZE 2176
+#define IPA_RAM_V4_RT_SIZE 512
#define IPA_RAM_V6_FLT_OFST (IPA_RAM_V4_RT_OFST + IPA_RAM_V4_RT_SIZE)
-#define IPA_RAM_V6_FLT_SIZE 1280
+#define IPA_RAM_V6_FLT_SIZE 1792
#define IPA_RAM_V6_RT_OFST (IPA_RAM_V6_FLT_OFST + IPA_RAM_V6_FLT_SIZE)
#define IPA_RAM_V6_RT_SIZE 512
#define IPA_RAM_END_OFST (IPA_RAM_V6_RT_OFST + IPA_RAM_V6_RT_SIZE)
+
#define IPA_RAM_V6_RT_SIZE_DDR 16384
+#define IPA_RAM_V4_RT_SIZE_DDR 16384
#endif /* _IPA_RAM_MMAP_H_ */
+
diff --git a/drivers/platform/msm/ipa/ipa_rt.c b/drivers/platform/msm/ipa/ipa_rt.c
index 5c27c40..8c0adbd 100644
--- a/drivers/platform/msm/ipa/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_rt.c
@@ -377,7 +377,8 @@
}
if (ip == IPA_IP_v4) {
- avail = IPA_RAM_V4_RT_SIZE;
+ avail = ipa_ctx->ip4_rt_tbl_lcl ? IPA_RAM_V4_RT_SIZE :
+ IPA_RAM_V4_RT_SIZE_DDR;
size = sizeof(struct ipa_ip_v4_routing_init);
} else {
avail = ipa_ctx->ip6_rt_tbl_lcl ? IPA_RAM_V6_RT_SIZE :
diff --git a/drivers/platform/msm/ipa/ipa_utils.c b/drivers/platform/msm/ipa/ipa_utils.c
index 21a6dc4..e38b796 100644
--- a/drivers/platform/msm/ipa/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_utils.c
@@ -13,7 +13,9 @@
#include <net/ip.h>
#include <linux/genalloc.h> /* gen_pool_alloc() */
#include <linux/io.h>
+#include <linux/ratelimit.h>
#include "ipa_i.h"
+
static const int ipa_ofst_meq32[] = { IPA_OFFSET_MEQ32_0,
IPA_OFFSET_MEQ32_1, -1 };
static const int ipa_ofst_meq128[] = { IPA_OFFSET_MEQ128_0,
@@ -46,6 +48,7 @@
if (ipa_ctx->ipa_hw_type != IPA_HW_v1_0)
ipa_route_offset = IPA_ROUTE_OFST_v2;
+ ipa_inc_client_enable_clks();
ipa_write_reg(ipa_ctx->mmio, ipa_route_offset,
IPA_SETFIELD(route->route_dis,
IPA_ROUTE_ROUTE_DIS_SHFT,
@@ -59,6 +62,7 @@
IPA_SETFIELD(route->route_def_hdr_ofst,
IPA_ROUTE_ROUTE_DEF_HDR_OFST_SHFT,
IPA_ROUTE_ROUTE_DEF_HDR_OFST_BMSK));
+ ipa_dec_client_disable_clks();
return 0;
}
@@ -75,10 +79,12 @@
if (ipa_ctx->ipa_hw_type != IPA_HW_v1_0)
ipa_filter_ofst = IPA_FILTER_OFST_v2;
+ ipa_inc_client_enable_clks();
ipa_write_reg(ipa_ctx->mmio, ipa_filter_ofst,
IPA_SETFIELD(!disable,
IPA_FILTER_FILTER_EN_SHFT,
IPA_FILTER_FILTER_EN_BMSK));
+ ipa_dec_client_disable_clks();
return 0;
}
@@ -707,6 +713,7 @@
}
/* copy over EP cfg */
ipa_ctx->ep[clnt_hdl].cfg.nat = *ipa_ep_cfg;
+ ipa_inc_client_enable_clks();
/* clnt_hdl is used as pipe_index */
if (ipa_ctx->ipa_hw_type == IPA_HW_v1_0)
ipa_write_reg(ipa_ctx->mmio,
@@ -720,6 +727,7 @@
IPA_SETFIELD(ipa_ctx->ep[clnt_hdl].cfg.nat.nat_en,
IPA_ENDP_INIT_NAT_n_NAT_EN_SHFT,
IPA_ENDP_INIT_NAT_n_NAT_EN_BMSK));
+ ipa_dec_client_disable_clks();
return 0;
}
@@ -772,12 +780,14 @@
IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_SHFT,
IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_BMSK);
+ ipa_inc_client_enable_clks();
if (ipa_ctx->ipa_hw_type == IPA_HW_v1_0)
ipa_write_reg(ipa_ctx->mmio,
IPA_ENDP_INIT_HDR_n_OFST_v1(clnt_hdl), val);
else
ipa_write_reg(ipa_ctx->mmio,
IPA_ENDP_INIT_HDR_n_OFST_v2(clnt_hdl), val);
+ ipa_dec_client_disable_clks();
return 0;
}
@@ -826,12 +836,14 @@
IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_SHFT,
IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_BMSK);
+ ipa_inc_client_enable_clks();
if (ipa_ctx->ipa_hw_type == IPA_HW_v1_0)
ipa_write_reg(ipa_ctx->mmio,
IPA_ENDP_INIT_MODE_n_OFST_v1(clnt_hdl), val);
else
ipa_write_reg(ipa_ctx->mmio,
IPA_ENDP_INIT_MODE_n_OFST_v2(clnt_hdl), val);
+ ipa_dec_client_disable_clks();
return 0;
}
@@ -871,12 +883,14 @@
IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_SHFT,
IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_BMSK);
+ ipa_inc_client_enable_clks();
if (ipa_ctx->ipa_hw_type == IPA_HW_v1_0)
ipa_write_reg(ipa_ctx->mmio,
IPA_ENDP_INIT_AGGR_n_OFST_v1(clnt_hdl), val);
else
ipa_write_reg(ipa_ctx->mmio,
IPA_ENDP_INIT_AGGR_n_OFST_v2(clnt_hdl), val);
+ ipa_dec_client_disable_clks();
return 0;
}
@@ -919,6 +933,7 @@
/* always use the "default" routing tables whose indices are 0 */
ipa_ctx->ep[clnt_hdl].rt_tbl_idx = 0;
+ ipa_inc_client_enable_clks();
if (ipa_ctx->ipa_hw_type == IPA_HW_v1_0) {
ipa_write_reg(ipa_ctx->mmio,
IPA_ENDP_INIT_ROUTE_n_OFST_v1(clnt_hdl),
@@ -932,6 +947,7 @@
IPA_ENDP_INIT_ROUTE_n_ROUTE_TABLE_INDEX_SHFT,
IPA_ENDP_INIT_ROUTE_n_ROUTE_TABLE_INDEX_BMSK));
}
+ ipa_dec_client_disable_clks();
return 0;
}
@@ -970,12 +986,14 @@
return -EPERM;
} else {
ipa_ctx->ep[clnt_hdl].holb = *ipa_ep_cfg;
+ ipa_inc_client_enable_clks();
ipa_write_reg(ipa_ctx->mmio,
IPA_ENDP_INIT_HOL_BLOCK_EN_n_OFST(clnt_hdl),
ipa_ep_cfg->en);
ipa_write_reg(ipa_ctx->mmio,
IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_OFST(clnt_hdl),
ipa_ep_cfg->tmr_val);
+ ipa_dec_client_disable_clks();
IPAERR("cfg holb %u ep=%d tmr=%d\n", ipa_ep_cfg->en, clnt_hdl,
ipa_ep_cfg->tmr_val);
}
@@ -1229,6 +1247,7 @@
{
u32 reg_val;
+ ipa_inc_client_enable_clks();
if (ipa_ctx->ipa_hw_type == IPA_HW_v1_0) {
reg_val = ipa_read_reg(ipa_ctx->mmio,
IPA_AGGREGATION_SPARE_REG_2_OFST);
@@ -1243,6 +1262,7 @@
(reg_val & 0xfffffffe));
}
+ ipa_dec_client_disable_clks();
return 0;
}
EXPORT_SYMBOL(ipa_set_aggr_mode);
@@ -1262,11 +1282,12 @@
{
u32 reg_val;
+ if (sig == NULL) {
+ IPAERR("bad argument for ipa_set_qcncm_ndp_sig/n");
+ return -EINVAL;
+ }
+ ipa_inc_client_enable_clks();
if (ipa_ctx->ipa_hw_type == IPA_HW_v1_0) {
- if (sig == NULL) {
- IPAERR("bad argument for ipa_set_qcncm_ndp_sig/n");
- return -EINVAL;
- }
reg_val = ipa_read_reg(ipa_ctx->mmio,
IPA_AGGREGATION_SPARE_REG_2_OFST);
ipa_write_reg(ipa_ctx->mmio,
@@ -1281,6 +1302,7 @@
(sig[1] << 12) | (sig[2] << 4) |
(reg_val & 0xf000000f));
}
+ ipa_dec_client_disable_clks();
return 0;
}
EXPORT_SYMBOL(ipa_set_qcncm_ndp_sig);
@@ -1296,6 +1318,7 @@
{
u32 reg_val;
+ ipa_inc_client_enable_clks();
if (ipa_ctx->ipa_hw_type == IPA_HW_v1_0) {
reg_val = ipa_read_reg(ipa_ctx->mmio,
IPA_AGGREGATION_SPARE_REG_1_OFST);
@@ -1308,6 +1331,7 @@
ipa_write_reg(ipa_ctx->mmio, IPA_SINGLE_NDP_MODE_OFST,
(enable & 0x1) | (reg_val & 0xfffffffe));
}
+ ipa_dec_client_disable_clks();
return 0;
}
EXPORT_SYMBOL(ipa_set_single_ndp_per_mbim);
@@ -1322,10 +1346,12 @@
int ipa_set_hw_timer_fix_for_mbim_aggr(bool enable)
{
u32 reg_val;
+ ipa_inc_client_enable_clks();
reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_AGGREGATION_SPARE_REG_1_OFST);
ipa_write_reg(ipa_ctx->mmio, IPA_AGGREGATION_SPARE_REG_1_OFST,
(enable << IPA_AGGREGATION_HW_TIMER_FIX_MBIM_AGGR_SHFT) |
(reg_val & ~IPA_AGGREGATION_HW_TIMER_FIX_MBIM_AGGR_BMSK));
+ ipa_dec_client_disable_clks();
return 0;
}
EXPORT_SYMBOL(ipa_set_hw_timer_fix_for_mbim_aggr);
@@ -1358,3 +1384,26 @@
else
return 0;
}
+
+/**
+ * ipa_bam_reg_dump() - Dump selected BAM registers for IPA and DMA-BAM
+ *
+ * Function is rate limited to avoid flooding kernel log buffer
+ */
+void ipa_bam_reg_dump(void)
+{
+ static DEFINE_RATELIMIT_STATE(_rs, 500*HZ, 1);
+ if (__ratelimit(&_rs)) {
+ ipa_inc_client_enable_clks();
+ pr_err("IPA BAM START\n");
+ sps_get_bam_debug_info(ipa_ctx->bam_handle, 5, 1048575, 0, 0);
+ sps_get_bam_debug_info(ipa_ctx->bam_handle, 93, 0, 0, 0);
+ pr_err("BAM-DMA BAM START\n");
+ sps_get_bam_debug_info(sps_dma_get_bam_handle(), 5, 1044480,
+ 0, 0);
+ sps_get_bam_debug_info(sps_dma_get_bam_handle(), 93, 0, 0, 0);
+ ipa_dec_client_disable_clks();
+ }
+}
+EXPORT_SYMBOL(ipa_bam_reg_dump);
+
diff --git a/drivers/video/msm/mdss/mdp3.c b/drivers/video/msm/mdss/mdp3.c
index aecb19d..91e70a7 100644
--- a/drivers/video/msm/mdss/mdp3.c
+++ b/drivers/video/msm/mdss/mdp3.c
@@ -539,13 +539,6 @@
return 0;
}
-static int mdp3_iommu_fault_handler(struct iommu_domain *domain,
- struct device *dev, unsigned long iova, int flags, void *token)
-{
- pr_err("MDP IOMMU page fault: iova 0x%lx\n", iova);
- return 0;
-}
-
int mdp3_iommu_attach(int context)
{
struct mdp3_iommu_ctx_map *context_map;
@@ -621,9 +614,6 @@
else
return PTR_ERR(mdp3_iommu_domains[i].domain);
}
- iommu_set_fault_handler(mdp3_iommu_domains[i].domain,
- mdp3_iommu_fault_handler,
- NULL);
}
mdp3_res->domains = mdp3_iommu_domains;
diff --git a/drivers/video/msm/mdss/mdss_dsi.c b/drivers/video/msm/mdss/mdss_dsi.c
index 366209b..d2f1461 100644
--- a/drivers/video/msm/mdss/mdss_dsi.c
+++ b/drivers/video/msm/mdss/mdss_dsi.c
@@ -416,6 +416,14 @@
mdss_dsi_clk_ctrl(ctrl_pdata, 0);
+ ret = mdss_dsi_enable_bus_clocks(ctrl_pdata);
+ if (ret) {
+ pr_err("%s: failed to enable bus clocks. rc=%d\n", __func__,
+ ret);
+ mdss_dsi_panel_power_on(pdata, 0);
+ return ret;
+ }
+
/* disable DSI phy */
mdss_dsi_phy_enable(ctrl_pdata, 0);
@@ -479,6 +487,7 @@
mdss_dsi_phy_sw_reset((ctrl_pdata->ctrl_base));
mdss_dsi_phy_init(pdata);
+ mdss_dsi_disable_bus_clocks(ctrl_pdata);
mdss_dsi_clk_ctrl(ctrl_pdata, 1);
@@ -1143,15 +1152,6 @@
return rc;
}
- rc = mdss_dsi_enable_bus_clocks(ctrl_pdata);
- if (rc) {
- pr_err("%s: failed to enable bus clocks. rc=%d\n",
- __func__, rc);
- rc = mdss_dsi_panel_power_on(
- &(ctrl_pdata->panel_data), 0);
- return rc;
- }
-
mdss_dsi_clk_ctrl(ctrl_pdata, 1);
ctrl_pdata->ctrl_state |=
(CTRL_STATE_PANEL_INIT | CTRL_STATE_MDP_ACTIVE);
diff --git a/drivers/video/msm/mdss/mdss_dsi_host.c b/drivers/video/msm/mdss/mdss_dsi_host.c
index 60b8b2e..3e70afd 100644
--- a/drivers/video/msm/mdss/mdss_dsi_host.c
+++ b/drivers/video/msm/mdss/mdss_dsi_host.c
@@ -80,6 +80,7 @@
mutex_lock(&ctrl->mutex);
if (enable) {
if (ctrl->clk_cnt == 0) {
+ mdss_dsi_enable_bus_clocks(ctrl);
mdss_dsi_prepare_clocks(ctrl);
mdss_dsi_clk_enable(ctrl);
}
@@ -90,6 +91,7 @@
if (ctrl->clk_cnt == 0) {
mdss_dsi_clk_disable(ctrl);
mdss_dsi_unprepare_clocks(ctrl);
+ mdss_dsi_disable_bus_clocks(ctrl);
}
}
}
diff --git a/drivers/video/msm/mdss/mdss_io_util.c b/drivers/video/msm/mdss/mdss_io_util.c
index 809db43..31058f8 100644
--- a/drivers/video/msm/mdss/mdss_io_util.c
+++ b/drivers/video/msm/mdss/mdss_io_util.c
@@ -13,6 +13,7 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
+#include <linux/delay.h>
#include "mdss_io_util.h"
#define MAX_I2C_CMDS 16
@@ -160,19 +161,6 @@
curr_vreg->vreg_name);
goto vreg_set_voltage_fail;
}
- if (curr_vreg->peak_current >= 0) {
- rc = regulator_set_optimum_mode(
- curr_vreg->vreg,
- curr_vreg->peak_current);
- if (rc < 0) {
- DEV_ERR(
- "%pS->%s: %s set opt m fail\n",
- __builtin_return_address(0),
- __func__,
- curr_vreg->vreg_name);
- goto vreg_set_opt_mode_fail;
- }
- }
}
}
} else {
@@ -183,10 +171,6 @@
curr_vreg->vreg) > 0)
? DSS_REG_LDO : DSS_REG_VS;
if (type == DSS_REG_LDO) {
- if (curr_vreg->peak_current >= 0) {
- regulator_set_optimum_mode(
- curr_vreg->vreg, 0);
- }
regulator_set_voltage(curr_vreg->vreg,
0, curr_vreg->max_voltage);
}
@@ -201,10 +185,6 @@
if (type == DSS_REG_LDO)
regulator_set_optimum_mode(curr_vreg->vreg, 0);
-vreg_set_opt_mode_fail:
-if (type == DSS_REG_LDO)
- regulator_set_voltage(curr_vreg->vreg, 0, curr_vreg->max_voltage);
-
vreg_set_voltage_fail:
regulator_put(curr_vreg->vreg);
curr_vreg->vreg = NULL;
@@ -229,9 +209,19 @@
DEV_ERR("%pS->%s: %s regulator error. rc=%d\n",
__builtin_return_address(0), __func__,
in_vreg[i].vreg_name, rc);
- goto disable_vreg;
+ goto vreg_set_opt_mode_fail;
+ }
+ msleep(in_vreg[i].pre_on_sleep);
+ rc = regulator_set_optimum_mode(in_vreg[i].vreg,
+ in_vreg[i].peak_current);
+ if (rc < 0) {
+ DEV_ERR("%pS->%s: %s set opt m fail\n",
+ __builtin_return_address(0), __func__,
+ in_vreg[i].vreg_name);
+ goto vreg_set_opt_mode_fail;
}
rc = regulator_enable(in_vreg[i].vreg);
+ msleep(in_vreg[i].post_on_sleep);
if (rc < 0) {
DEV_ERR("%pS->%s: %s enable failed\n",
__builtin_return_address(0), __func__,
@@ -241,14 +231,26 @@
}
} else {
for (i = num_vreg-1; i >= 0; i--)
- if (regulator_is_enabled(in_vreg[i].vreg))
+ if (regulator_is_enabled(in_vreg[i].vreg)) {
+ msleep(in_vreg[i].pre_off_sleep);
+ regulator_set_optimum_mode(in_vreg[i].vreg, 0);
regulator_disable(in_vreg[i].vreg);
+ msleep(in_vreg[i].post_off_sleep);
+ }
}
return rc;
disable_vreg:
- for (i--; i >= 0; i--)
+ regulator_set_optimum_mode(in_vreg[i].vreg, 0);
+
+vreg_set_opt_mode_fail:
+ for (i--; i >= 0; i--) {
+ msleep(in_vreg[i].pre_off_sleep);
+ regulator_set_optimum_mode(in_vreg[i].vreg, 0);
regulator_disable(in_vreg[i].vreg);
+ msleep(in_vreg[i].post_off_sleep);
+ }
+
return rc;
} /* msm_dss_enable_vreg */
diff --git a/drivers/video/msm/mdss/mdss_io_util.h b/drivers/video/msm/mdss/mdss_io_util.h
index 23341d6..cb0fb70 100644
--- a/drivers/video/msm/mdss/mdss_io_util.h
+++ b/drivers/video/msm/mdss/mdss_io_util.h
@@ -53,6 +53,10 @@
int min_voltage;
int max_voltage;
int peak_current;
+ int pre_on_sleep;
+ int post_on_sleep;
+ int pre_off_sleep;
+ int post_off_sleep;
};
struct dss_gpio {
diff --git a/drivers/video/msm/mdss/mdss_mdp.c b/drivers/video/msm/mdss/mdss_mdp.c
index c4bf67e..fc8c6e3 100644
--- a/drivers/video/msm/mdss/mdss_mdp.c
+++ b/drivers/video/msm/mdss/mdss_mdp.c
@@ -703,13 +703,6 @@
return 0;
}
-static int mdss_iommu_fault_handler(struct iommu_domain *domain,
- struct device *dev, unsigned long iova, int flags, void *token)
-{
- pr_err("MDP IOMMU page fault: iova 0x%lx\n", iova);
- return 0;
-}
-
int mdss_iommu_attach(struct mdss_data_type *mdata)
{
struct iommu_domain *domain;
@@ -796,7 +789,6 @@
iomap->domain_idx);
return -EINVAL;
}
- iommu_set_fault_handler(domain, mdss_iommu_fault_handler, NULL);
iomap->ctx = msm_iommu_get_ctx(iomap->ctx_name);
if (!iomap->ctx) {
diff --git a/drivers/video/msm/mdss/mdss_mdp_intf_cmd.c b/drivers/video/msm/mdss/mdss_mdp_intf_cmd.c
index bd4f3ea..89e21d2 100644
--- a/drivers/video/msm/mdss/mdss_mdp_intf_cmd.c
+++ b/drivers/video/msm/mdss/mdss_mdp_intf_cmd.c
@@ -26,20 +26,20 @@
/* wait for at most 2 vsync for lowest refresh rate (24hz) */
#define KOFF_TIMEOUT msecs_to_jiffies(84)
+#define STOP_TIMEOUT msecs_to_jiffies(16 * (VSYNC_EXPIRE_TICK + 2))
+
struct mdss_mdp_cmd_ctx {
struct mdss_mdp_ctl *ctl;
u32 pp_num;
u8 ref_cnt;
- struct completion vsync_comp;
struct completion pp_comp;
struct completion stop_comp;
mdp_vsync_handler_t send_vsync;
int panel_on;
int koff_cnt;
int clk_enabled;
- int clk_control;
int vsync_enabled;
- int expire;
+ int rdptr_enabled;
struct mutex clk_mtx;
spinlock_t clk_lock;
struct work_struct clk_work;
@@ -54,6 +54,50 @@
struct mdss_mdp_cmd_ctx mdss_mdp_cmd_ctx_list[MAX_SESSIONS];
+static inline u32 mdss_mdp_cmd_line_count(struct mdss_mdp_ctl *ctl)
+{
+ struct mdss_mdp_mixer *mixer;
+ u32 cnt = 0xffff; /* init it to an invalid value */
+ u32 init;
+ u32 height;
+
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
+
+ mixer = mdss_mdp_mixer_get(ctl, MDSS_MDP_MIXER_MUX_LEFT);
+ if (!mixer) {
+ mixer = mdss_mdp_mixer_get(ctl, MDSS_MDP_MIXER_MUX_RIGHT);
+ if (!mixer) {
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
+ goto exit;
+ }
+ }
+
+ init = mdss_mdp_pingpong_read
+ (mixer, MDSS_MDP_REG_PP_VSYNC_INIT_VAL) & 0xffff;
+
+ height = mdss_mdp_pingpong_read
+ (mixer, MDSS_MDP_REG_PP_SYNC_CONFIG_HEIGHT) & 0xffff;
+
+ if (height < init) {
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
+ goto exit;
+ }
+
+ cnt = mdss_mdp_pingpong_read
+ (mixer, MDSS_MDP_REG_PP_INT_COUNT_VAL) & 0xffff;
+
+ if (cnt < init) /* wrap around happened at height */
+ cnt += (height - init);
+ else
+ cnt -= init;
+
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
+
+ pr_debug("cnt=%d init=%d height=%d\n", cnt, init, height);
+exit:
+ return cnt;
+}
+
/*
* TE configuration:
* dsi byte clock calculated base on 70 fps
@@ -146,6 +190,45 @@
return 0;
}
+static inline void mdss_mdp_cmd_clk_on(struct mdss_mdp_cmd_ctx *ctx)
+{
+ unsigned long flags;
+ mutex_lock(&ctx->clk_mtx);
+ if (!ctx->clk_enabled) {
+ ctx->clk_enabled = 1;
+ mdss_mdp_ctl_intf_event
+ (ctx->ctl, MDSS_EVENT_PANEL_CLK_CTRL, (void *)1);
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
+ }
+ spin_lock_irqsave(&ctx->clk_lock, flags);
+ if (!ctx->rdptr_enabled)
+ mdss_mdp_irq_enable(MDSS_MDP_IRQ_PING_PONG_RD_PTR, ctx->pp_num);
+ ctx->rdptr_enabled = VSYNC_EXPIRE_TICK;
+ spin_unlock_irqrestore(&ctx->clk_lock, flags);
+ mutex_unlock(&ctx->clk_mtx);
+}
+
+static inline void mdss_mdp_cmd_clk_off(struct mdss_mdp_cmd_ctx *ctx)
+{
+ unsigned long flags;
+ int set_clk_off = 0;
+
+ mutex_lock(&ctx->clk_mtx);
+ spin_lock_irqsave(&ctx->clk_lock, flags);
+ if (!ctx->rdptr_enabled)
+ set_clk_off = 1;
+ spin_unlock_irqrestore(&ctx->clk_lock, flags);
+
+ if (ctx->clk_enabled && set_clk_off) {
+ ctx->clk_enabled = 0;
+ mdss_mdp_ctl_intf_event
+ (ctx->ctl, MDSS_EVENT_PANEL_CLK_CTRL, (void *)0);
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
+ complete(&ctx->stop_comp);
+ }
+ mutex_unlock(&ctx->clk_mtx);
+}
+
static void mdss_mdp_cmd_readptr_done(void *arg)
{
struct mdss_mdp_ctl *ctl = arg;
@@ -157,11 +240,6 @@
return;
}
- complete_all(&ctx->vsync_comp);
-
- pr_debug("%s: num=%d ctx=%d expire=%d koff=%d\n", __func__, ctl->num,
- ctx->pp_num, ctx->expire, ctx->koff_cnt);
-
vsync_time = ktime_get();
ctl->vsync_cnt++;
@@ -169,18 +247,17 @@
if (ctx->send_vsync)
ctx->send_vsync(ctl, vsync_time);
- if (ctx->expire) {
- ctx->expire--;
- if (ctx->expire == 0) {
- if (ctx->koff_cnt <= 0) {
- ctx->clk_control = 1;
- schedule_work(&ctx->clk_work);
- } else {
- /* put off one vsync */
- ctx->expire += 1;
- }
- }
+ if (!ctx->vsync_enabled) {
+ if (ctx->rdptr_enabled)
+ ctx->rdptr_enabled--;
}
+
+ if (ctx->rdptr_enabled == 0) {
+ mdss_mdp_irq_disable_nosync
+ (MDSS_MDP_IRQ_PING_PONG_RD_PTR, ctx->pp_num);
+ schedule_work(&ctx->clk_work);
+ }
+
spin_unlock(&ctx->clk_lock);
}
@@ -199,8 +276,15 @@
complete_all(&ctx->pp_comp);
- if (ctx->koff_cnt)
+ if (ctx->koff_cnt) {
ctx->koff_cnt--;
+ if (ctx->koff_cnt) {
+ pr_err("%s: too many kickoffs=%d!\n", __func__,
+ ctx->koff_cnt);
+ ctx->koff_cnt = 0;
+ }
+ } else
+ pr_err("%s: should not have pingpong interrupt!\n", __func__);
pr_debug("%s: ctl_num=%d intf_num=%d ctx=%d kcnt=%d\n", __func__,
ctl->num, ctl->intf_num, ctx->pp_num, ctx->koff_cnt);
@@ -210,7 +294,6 @@
static void clk_ctrl_work(struct work_struct *work)
{
- unsigned long flags;
struct mdss_mdp_cmd_ctx *ctx =
container_of(work, typeof(*ctx), clk_work);
@@ -219,38 +302,14 @@
return;
}
- pr_debug("%s:ctx=%p num=%d\n", __func__, ctx, ctx->pp_num);
-
- mutex_lock(&ctx->clk_mtx);
- spin_lock_irqsave(&ctx->clk_lock, flags);
- if (ctx->clk_control && ctx->clk_enabled) {
- ctx->clk_enabled = 0;
- ctx->clk_control = 0;
- spin_unlock_irqrestore(&ctx->clk_lock, flags);
- /*
- * make sure dsi link is idle here
- */
- ctx->vsync_enabled = 0;
- mdss_mdp_irq_disable(MDSS_MDP_IRQ_PING_PONG_RD_PTR,
- ctx->pp_num);
- mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
- /* disable dsi clock */
- mdss_mdp_ctl_intf_event(ctx->ctl, MDSS_EVENT_PANEL_CLK_CTRL,
- (void *)0);
- complete(&ctx->stop_comp);
- pr_debug("%s: SET_CLK_OFF, pid=%d\n", __func__, current->pid);
- } else {
- spin_unlock_irqrestore(&ctx->clk_lock, flags);
- }
- mutex_unlock(&ctx->clk_mtx);
+ mdss_mdp_cmd_clk_off(ctx);
}
-static int mdss_mdp_cmd_vsync_ctrl(struct mdss_mdp_ctl *ctl,
- struct mdss_mdp_vsync_handler *handler)
+static int mdss_mdp_cmd_add_vsync_handler(struct mdss_mdp_ctl *ctl,
+ struct mdss_mdp_vsync_handler *handle)
{
struct mdss_mdp_cmd_ctx *ctx;
unsigned long flags;
- int enable;
ctx = (struct mdss_mdp_cmd_ctx *) ctl->priv_data;
if (!ctx) {
@@ -258,81 +317,44 @@
return -ENODEV;
}
- enable = (handler->vsync_handler != NULL);
-
- mutex_lock(&ctx->clk_mtx);
-
- pr_debug("%s: ctx=%p ctx=%d enabled=%d %d clk_enabled=%d clk_ctrl=%d\n",
- __func__, ctx, ctx->pp_num, ctx->vsync_enabled, enable,
- ctx->clk_enabled, ctx->clk_control);
-
- if (ctx->vsync_enabled == enable) {
- mutex_unlock(&ctx->clk_mtx);
+ spin_lock_irqsave(&ctx->clk_lock, flags);
+ if (ctx->vsync_enabled) {
+ spin_unlock_irqrestore(&ctx->clk_lock, flags);
return 0;
}
+ ctx->vsync_enabled = 1;
+ ctx->send_vsync = handle->vsync_handler;
+ spin_unlock_irqrestore(&ctx->clk_lock, flags);
- if (enable) {
- spin_lock_irqsave(&ctx->clk_lock, flags);
- ctx->clk_control = 0;
- ctx->expire = 0;
- ctx->send_vsync = handler->vsync_handler;
- spin_unlock_irqrestore(&ctx->clk_lock, flags);
- if (ctx->clk_enabled == 0) {
- mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_PANEL_CLK_CTRL,
- (void *)1);
- mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
- mdss_mdp_irq_enable(MDSS_MDP_IRQ_PING_PONG_RD_PTR,
- ctx->pp_num);
- ctx->vsync_enabled = 1;
- ctx->clk_enabled = 1;
- pr_debug("%s: SET_CLK_ON, pid=%d\n", __func__,
- current->pid);
- }
- } else {
- spin_lock_irqsave(&ctx->clk_lock, flags);
- ctx->expire = VSYNC_EXPIRE_TICK;
- spin_unlock_irqrestore(&ctx->clk_lock, flags);
- }
- mutex_unlock(&ctx->clk_mtx);
+ mdss_mdp_cmd_clk_on(ctx);
return 0;
}
-static void mdss_mdp_cmd_chk_clock(struct mdss_mdp_cmd_ctx *ctx)
+static int mdss_mdp_cmd_remove_vsync_handler(struct mdss_mdp_ctl *ctl,
+ struct mdss_mdp_vsync_handler *handle)
{
- unsigned long flags;
- int set_clk_on = 0;
+ struct mdss_mdp_cmd_ctx *ctx;
+ unsigned long flags;
+
+ ctx = (struct mdss_mdp_cmd_ctx *) ctl->priv_data;
if (!ctx) {
- pr_err("invalid ctx\n");
- return;
+ pr_err("%s: invalid ctx\n", __func__);
+ return -ENODEV;
}
- mutex_lock(&ctx->clk_mtx);
-
- pr_debug("%s: ctx=%p num=%d clk_enabled=%d\n", __func__,
- ctx, ctx->pp_num, ctx->clk_enabled);
spin_lock_irqsave(&ctx->clk_lock, flags);
- ctx->koff_cnt++;
- ctx->clk_control = 0;
- ctx->expire = VSYNC_EXPIRE_TICK;
- if (ctx->clk_enabled == 0) {
- set_clk_on++;
- ctx->clk_enabled = 1;
+ if (!ctx->vsync_enabled) {
+ spin_unlock_irqrestore(&ctx->clk_lock, flags);
+ return 0;
}
+ ctx->vsync_enabled = 0;
+ ctx->send_vsync = NULL;
+ ctx->rdptr_enabled = VSYNC_EXPIRE_TICK;
spin_unlock_irqrestore(&ctx->clk_lock, flags);
-
- if (set_clk_on) {
- mdss_mdp_ctl_intf_event(ctx->ctl, MDSS_EVENT_PANEL_CLK_CTRL,
- (void *)1);
- mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
- ctx->vsync_enabled = 1;
- mdss_mdp_irq_enable(MDSS_MDP_IRQ_PING_PONG_RD_PTR, ctx->pp_num);
- pr_debug("%s: ctx=%p num=%d SET_CLK_ON\n", __func__,
- ctx, ctx->pp_num);
- }
- mutex_unlock(&ctx->clk_mtx);
+ return 0;
}
static int mdss_mdp_cmd_wait4pingpong(struct mdss_mdp_ctl *ctl, void *arg)
@@ -374,6 +396,7 @@
int mdss_mdp_cmd_kickoff(struct mdss_mdp_ctl *ctl, void *arg)
{
struct mdss_mdp_cmd_ctx *ctx;
+ unsigned long flags;
int rc;
ctx = (struct mdss_mdp_cmd_ctx *) ctl->priv_data;
@@ -392,18 +415,19 @@
WARN(rc, "intf %d panel on error (%d)\n", ctl->intf_num, rc);
}
- mdss_mdp_cmd_chk_clock(ctx);
+ mdss_mdp_cmd_clk_on(ctx);
/*
* tx dcs command if had any
*/
mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_DSI_CMDLIST_KOFF, NULL);
- INIT_COMPLETION(ctx->vsync_comp);
INIT_COMPLETION(ctx->pp_comp);
mdss_mdp_irq_enable(MDSS_MDP_IRQ_PING_PONG_COMP, ctx->pp_num);
-
mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_START, 1);
+ spin_lock_irqsave(&ctx->clk_lock, flags);
+ ctx->koff_cnt++;
+ spin_unlock_irqrestore(&ctx->clk_lock, flags);
mb();
return 0;
@@ -412,8 +436,8 @@
int mdss_mdp_cmd_stop(struct mdss_mdp_ctl *ctl)
{
struct mdss_mdp_cmd_ctx *ctx;
+ unsigned long flags;
int need_wait = 0;
- struct mdss_mdp_vsync_handler null_handle;
int ret;
ctx = (struct mdss_mdp_cmd_ctx *) ctl->priv_data;
@@ -421,25 +445,27 @@
pr_err("invalid ctx\n");
return -ENODEV;
}
-
- pr_debug("%s:+ vaync_enable=%d expire=%d\n", __func__,
- ctx->vsync_enabled, ctx->expire);
-
- mutex_lock(&ctx->clk_mtx);
- if (ctx->vsync_enabled) {
+ spin_lock_irqsave(&ctx->clk_lock, flags);
+ if (ctx->rdptr_enabled) {
INIT_COMPLETION(ctx->stop_comp);
need_wait = 1;
}
- mutex_unlock(&ctx->clk_mtx);
+ if (ctx->vsync_enabled) {
+ pr_err("%s: vsync should be disabled\n", __func__);
+ ctx->vsync_enabled = 0;
+ }
+ spin_unlock_irqrestore(&ctx->clk_lock, flags);
- if (need_wait)
- wait_for_completion_interruptible(&ctx->stop_comp);
+ if (need_wait) {
+ if (wait_for_completion_timeout(&ctx->stop_comp, STOP_TIMEOUT)
+ <= 0) {
+ WARN(1, "stop cmd time out\n");
+ mdss_mdp_cmd_clk_off(ctx);
+ }
+ }
ctx->panel_on = 0;
- null_handle.vsync_handler = NULL;
- mdss_mdp_cmd_vsync_ctrl(ctl, &null_handle);
-
mdss_mdp_set_intr_callback(MDSS_MDP_IRQ_PING_PONG_RD_PTR, ctx->pp_num,
NULL, NULL);
mdss_mdp_set_intr_callback(MDSS_MDP_IRQ_PING_PONG_COMP, ctx->pp_num,
@@ -499,7 +525,6 @@
ctx->ctl = ctl;
ctx->pp_num = mixer->num;
- init_completion(&ctx->vsync_comp);
init_completion(&ctx->pp_comp);
init_completion(&ctx->stop_comp);
spin_lock_init(&ctx->clk_lock);
@@ -524,9 +549,9 @@
ctl->stop_fnc = mdss_mdp_cmd_stop;
ctl->display_fnc = mdss_mdp_cmd_kickoff;
ctl->wait_pingpong = mdss_mdp_cmd_wait4pingpong;
- ctl->add_vsync_handler = mdss_mdp_cmd_vsync_ctrl;
- ctl->remove_vsync_handler = mdss_mdp_cmd_vsync_ctrl;
-
+ ctl->add_vsync_handler = mdss_mdp_cmd_add_vsync_handler;
+ ctl->remove_vsync_handler = mdss_mdp_cmd_remove_vsync_handler;
+ ctl->read_line_cnt_fnc = mdss_mdp_cmd_line_count;
pr_debug("%s:-\n", __func__);
return 0;
diff --git a/drivers/video/msm/mdss/mdss_mdp_overlay.c b/drivers/video/msm/mdss/mdss_mdp_overlay.c
index a78a10a..780ff94 100644
--- a/drivers/video/msm/mdss/mdss_mdp_overlay.c
+++ b/drivers/video/msm/mdss/mdss_mdp_overlay.c
@@ -288,7 +288,7 @@
req->id = rot->session_id;
} else {
pr_err("Unable to setup rotator session\n");
- mdss_mdp_rotator_release(rot->session_id);
+ mdss_mdp_rotator_release(rot);
}
return ret;
@@ -894,10 +894,17 @@
pr_debug("unset ndx=%x\n", ndx);
- if (ndx & MDSS_MDP_ROT_SESSION_MASK)
- ret = mdss_mdp_rotator_release(ndx);
- else
+ if (ndx & MDSS_MDP_ROT_SESSION_MASK) {
+ struct mdss_mdp_rotator_session *rot;
+ rot = mdss_mdp_rotator_session_get(ndx);
+ if (rot) {
+ mdss_mdp_overlay_free_buf(&rot->src_buf);
+ mdss_mdp_overlay_free_buf(&rot->dst_buf);
+ ret = mdss_mdp_rotator_release(rot);
+ }
+ } else {
ret = mdss_mdp_overlay_release(mfd, ndx);
+ }
done:
mutex_unlock(&mdp5_data->ov_lock);
@@ -958,7 +965,6 @@
struct msmfb_overlay_data *req)
{
struct mdss_mdp_rotator_session *rot;
- struct mdss_mdp_data src_data, dst_data;
int ret;
u32 flgs;
@@ -970,26 +976,26 @@
flgs = rot->flags & MDP_SECURE_OVERLAY_SESSION;
- ret = mdss_mdp_overlay_get_buf(mfd, &src_data, &req->data, 1, flgs);
+ mdss_mdp_overlay_free_buf(&rot->src_buf);
+ ret = mdss_mdp_overlay_get_buf(mfd, &rot->src_buf, &req->data, 1, flgs);
if (ret) {
pr_err("src_data pmem error\n");
return ret;
}
- ret = mdss_mdp_overlay_get_buf(mfd, &dst_data, &req->dst_data, 1, flgs);
+ mdss_mdp_overlay_free_buf(&rot->dst_buf);
+ ret = mdss_mdp_overlay_get_buf(mfd, &rot->dst_buf,
+ &req->dst_data, 1, flgs);
if (ret) {
pr_err("dst_data pmem error\n");
goto dst_buf_fail;
}
- ret = mdss_mdp_rotator_queue(rot, &src_data, &dst_data);
+ ret = mdss_mdp_rotator_queue(rot, &rot->src_buf, &rot->dst_buf);
if (ret)
pr_err("rotator queue error session id=%x\n", req->id);
- mdss_mdp_overlay_free_buf(&dst_data);
dst_buf_fail:
- mdss_mdp_overlay_free_buf(&src_data);
-
return ret;
}
diff --git a/drivers/video/msm/mdss/mdss_mdp_pp.c b/drivers/video/msm/mdss/mdss_mdp_pp.c
index 75b6056..4dd9dcb 100644
--- a/drivers/video/msm/mdss/mdss_mdp_pp.c
+++ b/drivers/video/msm/mdss/mdss_mdp_pp.c
@@ -3329,6 +3329,14 @@
ret = 1;
else if (ptr >= 0x3200 || ptr == 0x100)
ret = 1;
+ else if (ptr == 0x104 || ptr == 0x614 || ptr == 0x714 ||
+ ptr == 0x814 || ptr == 0x914 || ptr == 0xa14)
+ ret = 1;
+ else if (ptr == 0x618 || ptr == 0x718 || ptr == 0x818 ||
+ ptr == 0x918 || ptr == 0xa18)
+ ret = 1;
+ else if (ptr == 0x2234 || ptr == 0x1e34 || ptr == 0x2634)
+ ret = 1;
}
end:
return ret;
diff --git a/drivers/video/msm/mdss/mdss_mdp_rotator.c b/drivers/video/msm/mdss/mdss_mdp_rotator.c
index f9894cc..8c45df8 100644
--- a/drivers/video/msm/mdss/mdss_mdp_rotator.c
+++ b/drivers/video/msm/mdss/mdss_mdp_rotator.c
@@ -366,18 +366,12 @@
return 0;
}
-int mdss_mdp_rotator_release(u32 ndx)
+int mdss_mdp_rotator_release(struct mdss_mdp_rotator_session *rot)
{
int rc = 0;
- struct mdss_mdp_rotator_session *rot;
+
mutex_lock(&rotator_lock);
- rot = mdss_mdp_rotator_session_get(ndx);
- if (rot) {
- mdss_mdp_rotator_finish(rot);
- } else {
- pr_warn("unknown session id=%x\n", ndx);
- rc = -ENOENT;
- }
+ rc = mdss_mdp_rotator_finish(rot);
mutex_unlock(&rotator_lock);
return rc;
diff --git a/drivers/video/msm/mdss/mdss_mdp_rotator.h b/drivers/video/msm/mdss/mdss_mdp_rotator.h
index 3401fe8..43c9e6a 100644
--- a/drivers/video/msm/mdss/mdss_mdp_rotator.h
+++ b/drivers/video/msm/mdss/mdss_mdp_rotator.h
@@ -39,6 +39,9 @@
u8 busy;
u8 no_wait;
+ struct mdss_mdp_data src_buf;
+ struct mdss_mdp_data dst_buf;
+
struct list_head head;
struct mdss_mdp_rotator_session *next;
};
@@ -67,7 +70,7 @@
struct mdss_mdp_data *src_data,
struct mdss_mdp_data *dst_data);
-int mdss_mdp_rotator_release(u32 ndx);
+int mdss_mdp_rotator_release(struct mdss_mdp_rotator_session *rot);
int mdss_mdp_rotator_release_all(void);
#endif /* MDSS_MDP_ROTATOR_H */
diff --git a/include/linux/compaction.h b/include/linux/compaction.h
index 51a90b7..ee1c2ff 100644
--- a/include/linux/compaction.h
+++ b/include/linux/compaction.h
@@ -22,8 +22,9 @@
extern int fragmentation_index(struct zone *zone, unsigned int order);
extern unsigned long try_to_compact_pages(struct zonelist *zonelist,
int order, gfp_t gfp_mask, nodemask_t *mask,
- bool sync);
+ bool sync, bool *contended);
extern int compact_pgdat(pg_data_t *pgdat, int order);
+extern void reset_isolation_suitable(pg_data_t *pgdat);
extern unsigned long compaction_suitable(struct zone *zone, int order);
/* Do not skip compaction more than 64 times */
@@ -61,10 +62,20 @@
return zone->compact_considered < (1UL << zone->compact_defer_shift);
}
+/* Returns true if restarting compaction after many failures */
+static inline bool compaction_restarting(struct zone *zone, int order)
+{
+ if (order < zone->compact_order_failed)
+ return false;
+
+ return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT &&
+ zone->compact_considered >= 1UL << zone->compact_defer_shift;
+}
+
#else
static inline unsigned long try_to_compact_pages(struct zonelist *zonelist,
int order, gfp_t gfp_mask, nodemask_t *nodemask,
- bool sync)
+ bool sync, bool *contended)
{
return COMPACT_CONTINUE;
}
@@ -74,6 +85,10 @@
return COMPACT_CONTINUE;
}
+static inline void reset_isolation_suitable(pg_data_t *pgdat)
+{
+}
+
static inline unsigned long compaction_suitable(struct zone *zone, int order)
{
return COMPACT_SKIPPED;
diff --git a/include/linux/diagchar.h b/include/linux/diagchar.h
index 73b94af..384f37a 100644
--- a/include/linux/diagchar.h
+++ b/include/linux/diagchar.h
@@ -118,10 +118,10 @@
/* This needs to be modified manually now, when we add
a new RANGE of SSIDs to the msg_mask_tbl */
#define MSG_MASK_TBL_CNT 24
-#define EVENT_LAST_ID 0x09B2
+#define EVENT_LAST_ID 0x09BE
#define MSG_SSID_0 0
-#define MSG_SSID_0_LAST 97
+#define MSG_SSID_0_LAST 96
#define MSG_SSID_1 500
#define MSG_SSID_1_LAST 506
#define MSG_SSID_2 1000
@@ -137,7 +137,7 @@
#define MSG_SSID_7 4600
#define MSG_SSID_7_LAST 4613
#define MSG_SSID_8 5000
-#define MSG_SSID_8_LAST 5029
+#define MSG_SSID_8_LAST 5030
#define MSG_SSID_9 5500
#define MSG_SSID_9_LAST 5516
#define MSG_SSID_10 6000
@@ -178,7 +178,7 @@
static const uint32_t msg_bld_masks_0[] = {
MSG_LVL_LOW,
MSG_LVL_MED,
- MSG_LVL_MED,
+ MSG_LVL_LOW,
MSG_LVL_ERROR,
MSG_LVL_LOW,
MSG_LVL_MED,
@@ -186,20 +186,20 @@
MSG_LVL_HIGH,
MSG_LVL_ERROR,
MSG_LVL_LOW,
- MSG_LVL_ERROR,
+ MSG_LVL_LOW,
MSG_LVL_ERROR,
MSG_LVL_MED,
MSG_LVL_MED,
MSG_LVL_MED,
MSG_LVL_HIGH,
MSG_LVL_HIGH,
- MSG_LVL_HIGH,
+ MSG_LVL_LOW,
MSG_LVL_LOW,
MSG_LVL_ERROR,
MSG_LVL_LOW,
MSG_LVL_MED,
MSG_LVL_MED,
- MSG_LVL_MED,
+ MSG_LVL_LOW,
MSG_LVL_MED,
MSG_LVL_LOW,
MSG_LVL_MED,
@@ -214,7 +214,7 @@
MSG_MASK_6|MSG_MASK_7|MSG_MASK_8|MSG_MASK_9|MSG_MASK_10| \
MSG_MASK_11|MSG_MASK_12|MSG_MASK_13|MSG_MASK_14| \
MSG_MASK_15|MSG_MASK_16|MSG_MASK_17,
- MSG_LVL_MED,
+ MSG_LVL_LOW,
MSG_LVL_MED,
MSG_LVL_HIGH,
MSG_LVL_HIGH,
@@ -238,7 +238,7 @@
MSG_LVL_MED|MSG_MASK_5 | \
MSG_MASK_6|MSG_MASK_7|MSG_MASK_8|MSG_MASK_9|MSG_MASK_10,
MSG_LVL_MED,
- MSG_LVL_MED,
+ MSG_LVL_LOW,
MSG_LVL_LOW,
MSG_LVL_MED,
MSG_LVL_LOW,
@@ -290,7 +290,6 @@
MSG_LVL_LOW,
MSG_LVL_LOW,
MSG_LVL_LOW|MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR|MSG_LVL_FATAL,
- MSG_LVL_LOW
};
static const uint32_t msg_bld_masks_1[] = {
@@ -436,6 +435,7 @@
MSG_LVL_MED,
MSG_LVL_MED,
MSG_LVL_MED,
+ MSG_LVL_MED,
MSG_LVL_MED
};
@@ -725,7 +725,7 @@
/* LOG CODES */
#define LOG_0 0x0
-#define LOG_1 0x17FA
+#define LOG_1 0x1807
#define LOG_2 0x0
#define LOG_3 0x0
#define LOG_4 0x4910
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index f94efd2..14d38a5 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -66,7 +66,6 @@
struct lruvec *mem_cgroup_lru_add_list(struct zone *, struct page *,
enum lru_list);
void mem_cgroup_lru_del_list(struct page *, enum lru_list);
-void mem_cgroup_lru_del(struct page *);
struct lruvec *mem_cgroup_lru_move_lists(struct zone *, struct page *,
enum lru_list, enum lru_list);
@@ -79,6 +78,8 @@
extern void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
int order);
+bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
+ struct mem_cgroup *memcg);
int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg);
extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
@@ -92,10 +93,13 @@
int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup)
{
struct mem_cgroup *memcg;
+ int match;
+
rcu_read_lock();
memcg = mem_cgroup_from_task(rcu_dereference((mm)->owner));
+ match = __mem_cgroup_same_or_subtree(cgroup, memcg);
rcu_read_unlock();
- return cgroup == memcg;
+ return match;
}
extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg);
@@ -262,10 +266,6 @@
{
}
-static inline void mem_cgroup_lru_del(struct page *page)
-{
-}
-
static inline struct lruvec *mem_cgroup_lru_move_lists(struct zone *zone,
struct page *page,
enum lru_list from,
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 3cc3062..b98b4b9 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -344,17 +344,6 @@
/* Architecture-specific MM context */
mm_context_t context;
- /* Swap token stuff */
- /*
- * Last value of global fault stamp as seen by this process.
- * In other words, this value gives an indication of how long
- * it has been since this task got the token.
- * Look at mm/thrash.c
- */
- unsigned int faultstamp;
- unsigned int token_priority;
- unsigned int last_interval;
-
unsigned long flags; /* Must use atomic bitops to access the bits */
struct core_state *core_state; /* coredumping support */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index d63232a..7539e03 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -204,16 +204,14 @@
#define LRU_ALL_EVICTABLE (LRU_ALL_FILE | LRU_ALL_ANON)
#define LRU_ALL ((1 << NR_LRU_LISTS) - 1)
-/* Isolate inactive pages */
-#define ISOLATE_INACTIVE ((__force isolate_mode_t)0x1)
-/* Isolate active pages */
-#define ISOLATE_ACTIVE ((__force isolate_mode_t)0x2)
/* Isolate clean file */
-#define ISOLATE_CLEAN ((__force isolate_mode_t)0x4)
+#define ISOLATE_CLEAN ((__force isolate_mode_t)0x1)
/* Isolate unmapped file */
-#define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x8)
+#define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2)
/* Isolate for asynchronous migration */
-#define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x10)
+#define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x4)
+/* Isolate unevictable pages */
+#define ISOLATE_UNEVICTABLE ((__force isolate_mode_t)0x8)
/* LRU Isolation modes. */
typedef unsigned __bitwise__ isolate_mode_t;
@@ -378,6 +376,14 @@
*/
spinlock_t lock;
int all_unreclaimable; /* All pages pinned */
+#if defined CONFIG_COMPACTION || defined CONFIG_CMA
+ /* Set to true when the PG_migrate_skip bits should be cleared */
+ bool compact_blockskip_flush;
+
+ /* pfns where compaction scanners should start */
+ unsigned long compact_cached_free_pfn;
+ unsigned long compact_cached_migrate_pfn;
+#endif
#ifdef CONFIG_MEMORY_HOTPLUG
/* see spanned/present_pages for more description */
seqlock_t span_seqlock;
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index c88d2a9..86e4c91 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -108,6 +108,7 @@
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
PG_compound_lock,
#endif
+ PG_readahead, /* page in a readahead window */
__NR_PAGEFLAGS,
/* Filesystems */
diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h
index 19ef95d..eed27f4 100644
--- a/include/linux/pageblock-flags.h
+++ b/include/linux/pageblock-flags.h
@@ -30,6 +30,9 @@
PB_migrate,
PB_migrate_end = PB_migrate + 3 - 1,
/* 3 bits required for migrate types */
+#ifdef CONFIG_COMPACTION
+ PB_migrate_skip,/* If set the block is skipped by compaction */
+#endif /* CONFIG_COMPACTION */
NR_PAGEBLOCK_BITS
};
@@ -65,10 +68,22 @@
void set_pageblock_flags_group(struct page *page, unsigned long flags,
int start_bitidx, int end_bitidx);
+#ifdef CONFIG_COMPACTION
+#define get_pageblock_skip(page) \
+ get_pageblock_flags_group(page, PB_migrate_skip, \
+ PB_migrate_skip + 1)
+#define clear_pageblock_skip(page) \
+ set_pageblock_flags_group(page, 0, PB_migrate_skip, \
+ PB_migrate_skip + 1)
+#define set_pageblock_skip(page) \
+ set_pageblock_flags_group(page, 1, PB_migrate_skip, \
+ PB_migrate_skip + 1)
+#endif /* CONFIG_COMPACTION */
+
#define get_pageblock_flags(page) \
- get_pageblock_flags_group(page, 0, NR_PAGEBLOCK_BITS-1)
+ get_pageblock_flags_group(page, 0, PB_migrate_end)
#define set_pageblock_flags(page, flags) \
set_pageblock_flags_group(page, flags, \
- 0, NR_PAGEBLOCK_BITS-1)
+ 0, PB_migrate_end)
#endif /* PAGEBLOCK_FLAGS_H */
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index fd07c45..a5ddc60 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -67,6 +67,17 @@
struct list_head same_anon_vma; /* locked by anon_vma->mutex */
};
+enum ttu_flags {
+ TTU_UNMAP = 0, /* unmap mode */
+ TTU_MIGRATION = 1, /* migration mode */
+ TTU_MUNLOCK = 2, /* munlock mode */
+ TTU_ACTION_MASK = 0xff,
+
+ TTU_IGNORE_MLOCK = (1 << 8), /* ignore mlock */
+ TTU_IGNORE_ACCESS = (1 << 9), /* don't age */
+ TTU_IGNORE_HWPOISON = (1 << 10),/* corrupted page is recoverable */
+};
+
#ifdef CONFIG_MMU
static inline void get_anon_vma(struct anon_vma *anon_vma)
{
@@ -161,16 +172,6 @@
int page_referenced_one(struct page *, struct vm_area_struct *,
unsigned long address, unsigned int *mapcount, unsigned long *vm_flags);
-enum ttu_flags {
- TTU_UNMAP = 0, /* unmap mode */
- TTU_MIGRATION = 1, /* migration mode */
- TTU_MUNLOCK = 2, /* munlock mode */
- TTU_ACTION_MASK = 0xff,
-
- TTU_IGNORE_MLOCK = (1 << 8), /* ignore mlock */
- TTU_IGNORE_ACCESS = (1 << 9), /* don't age */
- TTU_IGNORE_HWPOISON = (1 << 10),/* corrupted page is recoverable */
-};
#define TTU_ACTION(x) ((x) & TTU_ACTION_MASK)
bool is_vma_temporary_stack(struct vm_area_struct *vma);
diff --git a/include/linux/swap.h b/include/linux/swap.h
index b1fd5c7..d5bd6ee 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -251,7 +251,7 @@
/* linux/mm/vmscan.c */
extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
gfp_t gfp_mask, nodemask_t *mask);
-extern int __isolate_lru_page(struct page *page, isolate_mode_t mode, int file);
+extern int __isolate_lru_page(struct page *page, isolate_mode_t mode);
extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem,
gfp_t gfp_mask, bool noswap);
extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
@@ -355,23 +355,6 @@
extern int try_to_free_swap(struct page *);
struct backing_dev_info;
-/* linux/mm/thrash.c */
-extern struct mm_struct *swap_token_mm;
-extern void grab_swap_token(struct mm_struct *);
-extern void __put_swap_token(struct mm_struct *);
-extern void disable_swap_token(struct mem_cgroup *memcg);
-
-static inline int has_swap_token(struct mm_struct *mm)
-{
- return (mm == swap_token_mm);
-}
-
-static inline void put_swap_token(struct mm_struct *mm)
-{
- if (has_swap_token(mm))
- __put_swap_token(mm);
-}
-
#ifdef CONFIG_CGROUP_MEM_RES_CTLR
extern void
mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout);
@@ -476,24 +459,6 @@
return entry;
}
-/* linux/mm/thrash.c */
-static inline void put_swap_token(struct mm_struct *mm)
-{
-}
-
-static inline void grab_swap_token(struct mm_struct *mm)
-{
-}
-
-static inline int has_swap_token(struct mm_struct *mm)
-{
- return 0;
-}
-
-static inline void disable_swap_token(struct mem_cgroup *memcg)
-{
-}
-
static inline void
mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
{
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index 06f8e38..8bbb324 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -37,8 +37,12 @@
KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY,
KSWAPD_SKIP_CONGESTION_WAIT,
PAGEOUTRUN, ALLOCSTALL, PGROTATED,
+#ifdef CONFIG_MIGRATION
+ PGMIGRATE_SUCCESS, PGMIGRATE_FAIL,
+#endif
#ifdef CONFIG_COMPACTION
- COMPACTBLOCKS, COMPACTPAGES, COMPACTPAGEFAILED,
+ COMPACTMIGRATE_SCANNED, COMPACTFREE_SCANNED,
+ COMPACTISOLATED,
COMPACTSTALL, COMPACTFAIL, COMPACTSUCCESS,
#endif
#ifdef CONFIG_HUGETLB_PAGE
diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h
index f64560e..bab3b87 100644
--- a/include/trace/events/vmscan.h
+++ b/include/trace/events/vmscan.h
@@ -13,7 +13,7 @@
#define RECLAIM_WB_ANON 0x0001u
#define RECLAIM_WB_FILE 0x0002u
#define RECLAIM_WB_MIXED 0x0010u
-#define RECLAIM_WB_SYNC 0x0004u
+#define RECLAIM_WB_SYNC 0x0004u /* Unused, all reclaim async */
#define RECLAIM_WB_ASYNC 0x0008u
#define show_reclaim_flags(flags) \
@@ -25,15 +25,15 @@
{RECLAIM_WB_ASYNC, "RECLAIM_WB_ASYNC"} \
) : "RECLAIM_WB_NONE"
-#define trace_reclaim_flags(page, sync) ( \
+#define trace_reclaim_flags(page) ( \
(page_is_file_cache(page) ? RECLAIM_WB_FILE : RECLAIM_WB_ANON) | \
- (sync & RECLAIM_MODE_SYNC ? RECLAIM_WB_SYNC : RECLAIM_WB_ASYNC) \
+ (RECLAIM_WB_ASYNC) \
)
-#define trace_shrink_flags(file, sync) ( \
- (sync & RECLAIM_MODE_SYNC ? RECLAIM_WB_MIXED : \
- (file ? RECLAIM_WB_FILE : RECLAIM_WB_ANON)) | \
- (sync & RECLAIM_MODE_SYNC ? RECLAIM_WB_SYNC : RECLAIM_WB_ASYNC) \
+#define trace_shrink_flags(file) \
+ ( \
+ (file ? RECLAIM_WB_FILE : RECLAIM_WB_ANON) | \
+ (RECLAIM_WB_ASYNC) \
)
TRACE_EVENT(mm_vmscan_kswapd_sleep,
@@ -263,22 +263,16 @@
unsigned long nr_requested,
unsigned long nr_scanned,
unsigned long nr_taken,
- unsigned long nr_lumpy_taken,
- unsigned long nr_lumpy_dirty,
- unsigned long nr_lumpy_failed,
isolate_mode_t isolate_mode,
int file),
- TP_ARGS(order, nr_requested, nr_scanned, nr_taken, nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed, isolate_mode, file),
+ TP_ARGS(order, nr_requested, nr_scanned, nr_taken, isolate_mode, file),
TP_STRUCT__entry(
__field(int, order)
__field(unsigned long, nr_requested)
__field(unsigned long, nr_scanned)
__field(unsigned long, nr_taken)
- __field(unsigned long, nr_lumpy_taken)
- __field(unsigned long, nr_lumpy_dirty)
- __field(unsigned long, nr_lumpy_failed)
__field(isolate_mode_t, isolate_mode)
__field(int, file)
),
@@ -288,22 +282,16 @@
__entry->nr_requested = nr_requested;
__entry->nr_scanned = nr_scanned;
__entry->nr_taken = nr_taken;
- __entry->nr_lumpy_taken = nr_lumpy_taken;
- __entry->nr_lumpy_dirty = nr_lumpy_dirty;
- __entry->nr_lumpy_failed = nr_lumpy_failed;
__entry->isolate_mode = isolate_mode;
__entry->file = file;
),
- TP_printk("isolate_mode=%d order=%d nr_requested=%lu nr_scanned=%lu nr_taken=%lu contig_taken=%lu contig_dirty=%lu contig_failed=%lu file=%d",
+ TP_printk("isolate_mode=%d order=%d nr_requested=%lu nr_scanned=%lu nr_taken=%lu file=%d",
__entry->isolate_mode,
__entry->order,
__entry->nr_requested,
__entry->nr_scanned,
__entry->nr_taken,
- __entry->nr_lumpy_taken,
- __entry->nr_lumpy_dirty,
- __entry->nr_lumpy_failed,
__entry->file)
);
@@ -313,13 +301,10 @@
unsigned long nr_requested,
unsigned long nr_scanned,
unsigned long nr_taken,
- unsigned long nr_lumpy_taken,
- unsigned long nr_lumpy_dirty,
- unsigned long nr_lumpy_failed,
isolate_mode_t isolate_mode,
int file),
- TP_ARGS(order, nr_requested, nr_scanned, nr_taken, nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed, isolate_mode, file)
+ TP_ARGS(order, nr_requested, nr_scanned, nr_taken, isolate_mode, file)
);
@@ -329,13 +314,10 @@
unsigned long nr_requested,
unsigned long nr_scanned,
unsigned long nr_taken,
- unsigned long nr_lumpy_taken,
- unsigned long nr_lumpy_dirty,
- unsigned long nr_lumpy_failed,
isolate_mode_t isolate_mode,
int file),
- TP_ARGS(order, nr_requested, nr_scanned, nr_taken, nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed, isolate_mode, file)
+ TP_ARGS(order, nr_requested, nr_scanned, nr_taken, isolate_mode, file)
);
@@ -395,88 +377,6 @@
show_reclaim_flags(__entry->reclaim_flags))
);
-TRACE_EVENT(replace_swap_token,
- TP_PROTO(struct mm_struct *old_mm,
- struct mm_struct *new_mm),
-
- TP_ARGS(old_mm, new_mm),
-
- TP_STRUCT__entry(
- __field(struct mm_struct*, old_mm)
- __field(unsigned int, old_prio)
- __field(struct mm_struct*, new_mm)
- __field(unsigned int, new_prio)
- ),
-
- TP_fast_assign(
- __entry->old_mm = old_mm;
- __entry->old_prio = old_mm ? old_mm->token_priority : 0;
- __entry->new_mm = new_mm;
- __entry->new_prio = new_mm->token_priority;
- ),
-
- TP_printk("old_token_mm=%p old_prio=%u new_token_mm=%p new_prio=%u",
- __entry->old_mm, __entry->old_prio,
- __entry->new_mm, __entry->new_prio)
-);
-
-DECLARE_EVENT_CLASS(put_swap_token_template,
- TP_PROTO(struct mm_struct *swap_token_mm),
-
- TP_ARGS(swap_token_mm),
-
- TP_STRUCT__entry(
- __field(struct mm_struct*, swap_token_mm)
- ),
-
- TP_fast_assign(
- __entry->swap_token_mm = swap_token_mm;
- ),
-
- TP_printk("token_mm=%p", __entry->swap_token_mm)
-);
-
-DEFINE_EVENT(put_swap_token_template, put_swap_token,
- TP_PROTO(struct mm_struct *swap_token_mm),
- TP_ARGS(swap_token_mm)
-);
-
-DEFINE_EVENT_CONDITION(put_swap_token_template, disable_swap_token,
- TP_PROTO(struct mm_struct *swap_token_mm),
- TP_ARGS(swap_token_mm),
- TP_CONDITION(swap_token_mm != NULL)
-);
-
-TRACE_EVENT_CONDITION(update_swap_token_priority,
- TP_PROTO(struct mm_struct *mm,
- unsigned int old_prio,
- struct mm_struct *swap_token_mm),
-
- TP_ARGS(mm, old_prio, swap_token_mm),
-
- TP_CONDITION(mm->token_priority != old_prio),
-
- TP_STRUCT__entry(
- __field(struct mm_struct*, mm)
- __field(unsigned int, old_prio)
- __field(unsigned int, new_prio)
- __field(struct mm_struct*, swap_token_mm)
- __field(unsigned int, swap_token_prio)
- ),
-
- TP_fast_assign(
- __entry->mm = mm;
- __entry->old_prio = old_prio;
- __entry->new_prio = mm->token_priority;
- __entry->swap_token_mm = swap_token_mm;
- __entry->swap_token_prio = swap_token_mm ? swap_token_mm->token_priority : 0;
- ),
-
- TP_printk("mm=%p old_prio=%u new_prio=%u swap_token_mm=%p token_prio=%u",
- __entry->mm, __entry->old_prio, __entry->new_prio,
- __entry->swap_token_mm, __entry->swap_token_prio)
-);
-
#endif /* _TRACE_VMSCAN_H */
/* This part must be outside protection */
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 2f0d7542..01f68c4 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -4902,7 +4902,7 @@
* @root: the css supporsed to be an ancestor of the child.
*
* Returns true if "root" is an ancestor of "child" in its hierarchy. Because
- * this function reads css->id, this use rcu_dereference() and rcu_read_lock().
+ * this function reads css->id, the caller must hold rcu_read_lock().
* But, considering usual usage, the csses should be valid objects after test.
* Assuming that the caller will do some action to the child if this returns
* returns true, the caller must take "child";s reference count.
@@ -4914,18 +4914,18 @@
{
struct css_id *child_id;
struct css_id *root_id;
- bool ret = true;
- rcu_read_lock();
child_id = rcu_dereference(child->id);
+ if (!child_id)
+ return false;
root_id = rcu_dereference(root->id);
- if (!child_id
- || !root_id
- || (child_id->depth < root_id->depth)
- || (child_id->stack[root_id->depth] != root_id->id))
- ret = false;
- rcu_read_unlock();
- return ret;
+ if (!root_id)
+ return false;
+ if (child_id->depth < root_id->depth)
+ return false;
+ if (child_id->stack[root_id->depth] != root_id->id)
+ return false;
+ return true;
}
void free_css_id(struct cgroup_subsys *ss, struct cgroup_subsys_state *css)
diff --git a/kernel/fork.c b/kernel/fork.c
index 0de735c..a8bf721 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -597,7 +597,6 @@
list_del(&mm->mmlist);
spin_unlock(&mmlist_lock);
}
- put_swap_token(mm);
if (mm->binfmt)
module_put(mm->binfmt->module);
mmdrop(mm);
@@ -815,10 +814,6 @@
memcpy(mm, oldmm, sizeof(*mm));
mm_init_cpumask(mm);
- /* Initializing for Swap token stuff */
- mm->token_priority = 0;
- mm->last_interval = 0;
-
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
mm->pmd_huge_pte = NULL;
#endif
@@ -896,10 +891,6 @@
goto fail_nomem;
good_mm:
- /* Initializing for Swap token stuff */
- mm->token_priority = 0;
- mm->last_interval = 0;
-
tsk->mm = mm;
tsk->active_mm = mm;
return 0;
diff --git a/mm/Makefile b/mm/Makefile
index 8aada89..ccecbf9 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -25,7 +25,7 @@
obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o
obj-$(CONFIG_BOUNCE) += bounce.o
-obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o thrash.o
+obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o
obj-$(CONFIG_HAS_DMA) += dmapool.o
obj-$(CONFIG_HUGETLBFS) += hugetlb.o
obj-$(CONFIG_NUMA) += mempolicy.o
diff --git a/mm/compaction.c b/mm/compaction.c
index 353f1c5..673142d 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -16,6 +16,21 @@
#include <linux/sysfs.h>
#include "internal.h"
+#ifdef CONFIG_COMPACTION
+static inline void count_compact_event(enum vm_event_item item)
+{
+ count_vm_event(item);
+}
+
+static inline void count_compact_events(enum vm_event_item item, long delta)
+{
+ count_vm_events(item, delta);
+}
+#else
+#define count_compact_event(item) do { } while (0)
+#define count_compact_events(item, delta) do { } while (0)
+#endif
+
#if defined CONFIG_COMPACTION || defined CONFIG_CMA
#define CREATE_TRACE_POINTS
@@ -50,44 +65,228 @@
return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE;
}
+#ifdef CONFIG_COMPACTION
+/* Returns true if the pageblock should be scanned for pages to isolate. */
+static inline bool isolation_suitable(struct compact_control *cc,
+ struct page *page)
+{
+ if (cc->ignore_skip_hint)
+ return true;
+
+ return !get_pageblock_skip(page);
+}
+
+/*
+ * This function is called to clear all cached information on pageblocks that
+ * should be skipped for page isolation when the migrate and free page scanner
+ * meet.
+ */
+static void __reset_isolation_suitable(struct zone *zone)
+{
+ unsigned long start_pfn = zone->zone_start_pfn;
+ unsigned long end_pfn = zone->zone_start_pfn + zone->spanned_pages;
+ unsigned long pfn;
+
+ zone->compact_cached_migrate_pfn = start_pfn;
+ zone->compact_cached_free_pfn = end_pfn;
+ zone->compact_blockskip_flush = false;
+
+ /* Walk the zone and mark every pageblock as suitable for isolation */
+ for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
+ struct page *page;
+
+ cond_resched();
+
+ if (!pfn_valid(pfn))
+ continue;
+
+ page = pfn_to_page(pfn);
+ if (zone != page_zone(page))
+ continue;
+
+ clear_pageblock_skip(page);
+ }
+}
+
+void reset_isolation_suitable(pg_data_t *pgdat)
+{
+ int zoneid;
+
+ for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
+ struct zone *zone = &pgdat->node_zones[zoneid];
+ if (!populated_zone(zone))
+ continue;
+
+ /* Only flush if a full compaction finished recently */
+ if (zone->compact_blockskip_flush)
+ __reset_isolation_suitable(zone);
+ }
+}
+
+/*
+ * If no pages were isolated then mark this pageblock to be skipped in the
+ * future. The information is later cleared by __reset_isolation_suitable().
+ */
+static void update_pageblock_skip(struct compact_control *cc,
+ struct page *page, unsigned long nr_isolated,
+ bool migrate_scanner)
+{
+ struct zone *zone = cc->zone;
+ if (!page)
+ return;
+
+ if (!nr_isolated) {
+ unsigned long pfn = page_to_pfn(page);
+ set_pageblock_skip(page);
+
+ /* Update where compaction should restart */
+ if (migrate_scanner) {
+ if (!cc->finished_update_migrate &&
+ pfn > zone->compact_cached_migrate_pfn)
+ zone->compact_cached_migrate_pfn = pfn;
+ } else {
+ if (!cc->finished_update_free &&
+ pfn < zone->compact_cached_free_pfn)
+ zone->compact_cached_free_pfn = pfn;
+ }
+ }
+}
+#else
+static inline bool isolation_suitable(struct compact_control *cc,
+ struct page *page)
+{
+ return true;
+}
+
+static void update_pageblock_skip(struct compact_control *cc,
+ struct page *page, unsigned long nr_isolated,
+ bool migrate_scanner)
+{
+}
+#endif /* CONFIG_COMPACTION */
+
+static inline bool should_release_lock(spinlock_t *lock)
+{
+ return need_resched() || spin_is_contended(lock);
+}
+
+/*
+ * Compaction requires the taking of some coarse locks that are potentially
+ * very heavily contended. Check if the process needs to be scheduled or
+ * if the lock is contended. For async compaction, back out in the event
+ * if contention is severe. For sync compaction, schedule.
+ *
+ * Returns true if the lock is held.
+ * Returns false if the lock is released and compaction should abort
+ */
+static bool compact_checklock_irqsave(spinlock_t *lock, unsigned long *flags,
+ bool locked, struct compact_control *cc)
+{
+ if (should_release_lock(lock)) {
+ if (locked) {
+ spin_unlock_irqrestore(lock, *flags);
+ locked = false;
+ }
+
+ /* async aborts if taking too long or contended */
+ if (!cc->sync) {
+ cc->contended = true;
+ return false;
+ }
+
+ cond_resched();
+ }
+
+ if (!locked)
+ spin_lock_irqsave(lock, *flags);
+ return true;
+}
+
+static inline bool compact_trylock_irqsave(spinlock_t *lock,
+ unsigned long *flags, struct compact_control *cc)
+{
+ return compact_checklock_irqsave(lock, flags, false, cc);
+}
+
+/* Returns true if the page is within a block suitable for migration to */
+static bool suitable_migration_target(struct page *page)
+{
+ int migratetype = get_pageblock_migratetype(page);
+
+ /* Don't interfere with memory hot-remove or the min_free_kbytes blocks */
+ if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE)
+ return false;
+
+ /* If the page is a large free page, then allow migration */
+ if (PageBuddy(page) && page_order(page) >= pageblock_order)
+ return true;
+
+ /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
+ if (migrate_async_suitable(migratetype))
+ return true;
+
+ /* Otherwise skip the block */
+ return false;
+}
+
/*
* Isolate free pages onto a private freelist. Caller must hold zone->lock.
* If @strict is true, will abort returning 0 on any invalid PFNs or non-free
* pages inside of the pageblock (even though it may still end up isolating
* some pages).
*/
-static unsigned long isolate_freepages_block(unsigned long blockpfn,
+static unsigned long isolate_freepages_block(struct compact_control *cc,
+ unsigned long blockpfn,
unsigned long end_pfn,
struct list_head *freelist,
bool strict)
{
int nr_scanned = 0, total_isolated = 0;
- struct page *cursor;
+ struct page *cursor, *valid_page = NULL;
+ unsigned long nr_strict_required = end_pfn - blockpfn;
+ unsigned long flags;
+ bool locked = false;
cursor = pfn_to_page(blockpfn);
- /* Isolate free pages. This assumes the block is valid */
+ /* Isolate free pages. */
for (; blockpfn < end_pfn; blockpfn++, cursor++) {
int isolated, i;
struct page *page = cursor;
- if (!pfn_valid_within(blockpfn)) {
- if (strict)
- return 0;
- continue;
- }
nr_scanned++;
-
- if (!PageBuddy(page)) {
- if (strict)
- return 0;
+ if (!pfn_valid_within(blockpfn))
continue;
- }
+ if (!valid_page)
+ valid_page = page;
+ if (!PageBuddy(page))
+ continue;
+
+ /*
+ * The zone lock must be held to isolate freepages.
+ * Unfortunately this is a very coarse lock and can be
+ * heavily contended if there are parallel allocations
+ * or parallel compactions. For async compaction do not
+ * spin on the lock and we acquire the lock as late as
+ * possible.
+ */
+ locked = compact_checklock_irqsave(&cc->zone->lock, &flags,
+ locked, cc);
+ if (!locked)
+ break;
+
+ /* Recheck this is a suitable migration target under lock */
+ if (!strict && !suitable_migration_target(page))
+ break;
+
+ /* Recheck this is a buddy page under lock */
+ if (!PageBuddy(page))
+ continue;
/* Found a free page, break it into order-0 pages */
isolated = split_free_page(page);
if (!isolated && strict)
- return 0;
+ break;
total_isolated += isolated;
for (i = 0; i < isolated; i++) {
list_add(&page->lru, freelist);
@@ -102,6 +301,25 @@
}
trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated);
+
+ /*
+ * If strict isolation is requested by CMA then check that all the
+ * pages requested were isolated. If there were any failures, 0 is
+ * returned and CMA will fail.
+ */
+ if (strict && nr_strict_required > total_isolated)
+ total_isolated = 0;
+
+ if (locked)
+ spin_unlock_irqrestore(&cc->zone->lock, flags);
+
+ /* Update the pageblock-skip if the whole pageblock was scanned */
+ if (blockpfn == end_pfn)
+ update_pageblock_skip(cc, valid_page, total_isolated, false);
+
+ count_compact_events(COMPACTFREE_SCANNED, nr_scanned);
+ if (total_isolated)
+ count_compact_events(COMPACTISOLATED, total_isolated);
return total_isolated;
}
@@ -119,17 +337,14 @@
* a free page).
*/
unsigned long
-isolate_freepages_range(unsigned long start_pfn, unsigned long end_pfn)
+isolate_freepages_range(struct compact_control *cc,
+ unsigned long start_pfn, unsigned long end_pfn)
{
- unsigned long isolated, pfn, block_end_pfn, flags;
- struct zone *zone = NULL;
+ unsigned long isolated, pfn, block_end_pfn;
LIST_HEAD(freelist);
- if (pfn_valid(start_pfn))
- zone = page_zone(pfn_to_page(start_pfn));
-
for (pfn = start_pfn; pfn < end_pfn; pfn += isolated) {
- if (!pfn_valid(pfn) || zone != page_zone(pfn_to_page(pfn)))
+ if (!pfn_valid(pfn) || cc->zone != page_zone(pfn_to_page(pfn)))
break;
/*
@@ -139,10 +354,8 @@
block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
block_end_pfn = min(block_end_pfn, end_pfn);
- spin_lock_irqsave(&zone->lock, flags);
- isolated = isolate_freepages_block(pfn, block_end_pfn,
+ isolated = isolate_freepages_block(cc, pfn, block_end_pfn,
&freelist, true);
- spin_unlock_irqrestore(&zone->lock, flags);
/*
* In strict mode, isolate_freepages_block() returns 0 if
@@ -173,7 +386,7 @@
}
/* Update the number of anon and file isolated pages in the zone */
-static void acct_isolated(struct zone *zone, struct compact_control *cc)
+static void acct_isolated(struct zone *zone, bool locked, struct compact_control *cc)
{
struct page *page;
unsigned int count[2] = { 0, };
@@ -181,8 +394,14 @@
list_for_each_entry(page, &cc->migratepages, lru)
count[!!page_is_file_cache(page)]++;
- __mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
- __mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
+ /* If locked we can use the interrupt unsafe versions */
+ if (locked) {
+ __mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
+ __mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
+ } else {
+ mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
+ mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
+ }
}
/* Similar to reclaim, but different enough that they don't share logic */
@@ -206,6 +425,7 @@
* @cc: Compaction control structure.
* @low_pfn: The first PFN of the range.
* @end_pfn: The one-past-the-last PFN of the range.
+ * @unevictable: true if it allows to isolate unevictable pages
*
* Isolate all pages that can be migrated from the range specified by
* [low_pfn, end_pfn). Returns zero if there is a fatal signal
@@ -221,12 +441,15 @@
*/
unsigned long
isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
- unsigned long low_pfn, unsigned long end_pfn)
+ unsigned long low_pfn, unsigned long end_pfn, bool unevictable)
{
unsigned long last_pageblock_nr = 0, pageblock_nr;
unsigned long nr_scanned = 0, nr_isolated = 0;
struct list_head *migratelist = &cc->migratepages;
- isolate_mode_t mode = ISOLATE_ACTIVE|ISOLATE_INACTIVE;
+ isolate_mode_t mode = 0;
+ unsigned long flags;
+ bool locked = false;
+ struct page *page = NULL, *valid_page = NULL;
/*
* Ensure that there are not too many pages isolated from the LRU
@@ -246,25 +469,14 @@
/* Time to isolate some pages for migration */
cond_resched();
- spin_lock_irq(&zone->lru_lock);
for (; low_pfn < end_pfn; low_pfn++) {
- struct page *page;
- bool locked = true;
-
/* give a chance to irqs before checking need_resched() */
- if (!((low_pfn+1) % SWAP_CLUSTER_MAX)) {
- spin_unlock_irq(&zone->lru_lock);
- locked = false;
+ if (locked && !((low_pfn+1) % SWAP_CLUSTER_MAX)) {
+ if (should_release_lock(&zone->lru_lock)) {
+ spin_unlock_irqrestore(&zone->lru_lock, flags);
+ locked = false;
+ }
}
- if (need_resched() || spin_is_contended(&zone->lru_lock)) {
- if (locked)
- spin_unlock_irq(&zone->lru_lock);
- cond_resched();
- spin_lock_irq(&zone->lru_lock);
- if (fatal_signal_pending(current))
- break;
- } else if (!locked)
- spin_lock_irq(&zone->lru_lock);
/*
* migrate_pfn does not necessarily start aligned to a
@@ -293,6 +505,14 @@
if (page_zone(page) != zone)
continue;
+ if (!valid_page)
+ valid_page = page;
+
+ /* If isolation recently failed, do not retry */
+ pageblock_nr = low_pfn >> pageblock_order;
+ if (!isolation_suitable(cc, page))
+ goto next_pageblock;
+
/* Skip if free */
if (PageBuddy(page))
continue;
@@ -302,24 +522,43 @@
* migration is optimistic to see if the minimum amount of work
* satisfies the allocation
*/
- pageblock_nr = low_pfn >> pageblock_order;
if (!cc->sync && last_pageblock_nr != pageblock_nr &&
!migrate_async_suitable(get_pageblock_migratetype(page))) {
- low_pfn += pageblock_nr_pages;
- low_pfn = ALIGN(low_pfn, pageblock_nr_pages) - 1;
- last_pageblock_nr = pageblock_nr;
- continue;
+ cc->finished_update_migrate = true;
+ goto next_pageblock;
}
+ /* Check may be lockless but that's ok as we recheck later */
if (!PageLRU(page))
continue;
/*
- * PageLRU is set, and lru_lock excludes isolation,
- * splitting and collapsing (collapsing has already
- * happened if PageLRU is set).
+ * PageLRU is set. lru_lock normally excludes isolation
+ * splitting and collapsing (collapsing has already happened
+ * if PageLRU is set) but the lock is not necessarily taken
+ * here and it is wasteful to take it just to check transhuge.
+ * Check TransHuge without lock and skip the whole pageblock if
+ * it's either a transhuge or hugetlbfs page, as calling
+ * compound_order() without preventing THP from splitting the
+ * page underneath us may return surprising results.
*/
if (PageTransHuge(page)) {
+ if (!locked)
+ goto next_pageblock;
+ low_pfn += (1 << compound_order(page)) - 1;
+ continue;
+ }
+
+ /* Check if it is ok to still hold the lock */
+ locked = compact_checklock_irqsave(&zone->lru_lock, &flags,
+ locked, cc);
+ if (!locked || fatal_signal_pending(current))
+ break;
+
+ /* Recheck PageLRU and PageTransHuge under lock */
+ if (!PageLRU(page))
+ continue;
+ if (PageTransHuge(page)) {
low_pfn += (1 << compound_order(page)) - 1;
continue;
}
@@ -327,13 +566,17 @@
if (!cc->sync)
mode |= ISOLATE_ASYNC_MIGRATE;
+ if (unevictable)
+ mode |= ISOLATE_UNEVICTABLE;
+
/* Try isolate the page */
- if (__isolate_lru_page(page, mode, 0) != 0)
+ if (__isolate_lru_page(page, mode) != 0)
continue;
VM_BUG_ON(PageTransCompound(page));
/* Successfully isolated */
+ cc->finished_update_migrate = true;
del_page_from_lru_list(zone, page, page_lru(page));
list_add(&page->lru, migratelist);
cc->nr_migratepages++;
@@ -344,42 +587,35 @@
++low_pfn;
break;
}
+
+ continue;
+
+next_pageblock:
+ low_pfn += pageblock_nr_pages;
+ low_pfn = ALIGN(low_pfn, pageblock_nr_pages) - 1;
+ last_pageblock_nr = pageblock_nr;
}
- acct_isolated(zone, cc);
+ acct_isolated(zone, locked, cc);
- spin_unlock_irq(&zone->lru_lock);
+ if (locked)
+ spin_unlock_irqrestore(&zone->lru_lock, flags);
+
+ /* Update the pageblock-skip if the whole pageblock was scanned */
+ if (low_pfn == end_pfn)
+ update_pageblock_skip(cc, valid_page, nr_isolated, true);
trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
+ count_compact_events(COMPACTMIGRATE_SCANNED, nr_scanned);
+ if (nr_isolated)
+ count_compact_events(COMPACTISOLATED, nr_isolated);
+
return low_pfn;
}
#endif /* CONFIG_COMPACTION || CONFIG_CMA */
#ifdef CONFIG_COMPACTION
-
-/* Returns true if the page is within a block suitable for migration to */
-static bool suitable_migration_target(struct page *page)
-{
-
- int migratetype = get_pageblock_migratetype(page);
-
- /* Don't interfere with memory hot-remove or the min_free_kbytes blocks */
- if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE)
- return false;
-
- /* If the page is a large free page, then allow migration */
- if (PageBuddy(page) && page_order(page) >= pageblock_order)
- return true;
-
- /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
- if (migrate_async_suitable(migratetype))
- return true;
-
- /* Otherwise skip the block */
- return false;
-}
-
/*
* Based on information in the current compact_control, find blocks
* suitable for isolating free pages from and then isolate them.
@@ -389,7 +625,6 @@
{
struct page *page;
unsigned long high_pfn, low_pfn, pfn, zone_end_pfn, end_pfn;
- unsigned long flags;
int nr_freepages = cc->nr_freepages;
struct list_head *freelist = &cc->freepages;
@@ -437,29 +672,34 @@
if (!suitable_migration_target(page))
continue;
- /*
- * Found a block suitable for isolating free pages from. Now
- * we disabled interrupts, double check things are ok and
- * isolate the pages. This is to minimise the time IRQs
- * are disabled
- */
+ /* If isolation recently failed, do not retry */
+ if (!isolation_suitable(cc, page))
+ continue;
+
+ /* Found a block suitable for isolating free pages from */
isolated = 0;
- spin_lock_irqsave(&zone->lock, flags);
- if (suitable_migration_target(page)) {
- end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn);
- isolated = isolate_freepages_block(pfn, end_pfn,
- freelist, false);
- nr_freepages += isolated;
- }
- spin_unlock_irqrestore(&zone->lock, flags);
+
+ /*
+ * As pfn may not start aligned, pfn+pageblock_nr_page
+ * may cross a MAX_ORDER_NR_PAGES boundary and miss
+ * a pfn_valid check. Ensure isolate_freepages_block()
+ * only scans within a pageblock
+ */
+ end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
+ end_pfn = min(end_pfn, zone_end_pfn);
+ isolated = isolate_freepages_block(cc, pfn, end_pfn,
+ freelist, false);
+ nr_freepages += isolated;
/*
* Record the highest PFN we isolated pages from. When next
* looking for free pages, the search will restart here as
* page migration may have returned some pages to the allocator
*/
- if (isolated)
+ if (isolated) {
+ cc->finished_update_free = true;
high_pfn = max(high_pfn, pfn);
+ }
}
/* split_free_page does not map the pages */
@@ -544,8 +784,8 @@
}
/* Perform the isolation */
- low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn);
- if (!low_pfn)
+ low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn, false);
+ if (!low_pfn || cc->contended)
return ISOLATE_ABORT;
cc->migrate_pfn = low_pfn;
@@ -563,8 +803,18 @@
return COMPACT_PARTIAL;
/* Compaction run completes if the migrate and free scanner meet */
- if (cc->free_pfn <= cc->migrate_pfn)
+ if (cc->free_pfn <= cc->migrate_pfn) {
+ /*
+ * Mark that the PG_migrate_skip information should be cleared
+ * by kswapd when it goes to sleep. kswapd does not set the
+ * flag itself as the decision to be clear should be directly
+ * based on an allocation request.
+ */
+ if (!current_is_kswapd())
+ zone->compact_blockskip_flush = true;
+
return COMPACT_COMPLETE;
+ }
/*
* order == -1 is expected when compacting via
@@ -582,12 +832,14 @@
/* Direct compactor: Is a suitable page free? */
for (order = cc->order; order < MAX_ORDER; order++) {
+ struct free_area *area = &zone->free_area[order];
+
/* Job done if page is free of the right migratetype */
- if (!list_empty(&zone->free_area[order].free_list[cc->migratetype]))
+ if (!list_empty(&area->free_list[cc->migratetype]))
return COMPACT_PARTIAL;
/* Job done if allocation would set block type */
- if (order >= pageblock_order && zone->free_area[order].nr_free)
+ if (cc->order >= pageblock_order && area->nr_free)
return COMPACT_PARTIAL;
}
@@ -647,6 +899,8 @@
static int compact_zone(struct zone *zone, struct compact_control *cc)
{
int ret;
+ unsigned long start_pfn = zone->zone_start_pfn;
+ unsigned long end_pfn = zone->zone_start_pfn + zone->spanned_pages;
ret = compaction_suitable(zone, cc->order);
switch (ret) {
@@ -659,10 +913,29 @@
;
}
- /* Setup to move all movable pages to the end of the zone */
- cc->migrate_pfn = zone->zone_start_pfn;
- cc->free_pfn = cc->migrate_pfn + zone->spanned_pages;
- cc->free_pfn &= ~(pageblock_nr_pages-1);
+ /*
+ * Setup to move all movable pages to the end of the zone. Used cached
+ * information on where the scanners should start but check that it
+ * is initialised by ensuring the values are within zone boundaries.
+ */
+ cc->migrate_pfn = zone->compact_cached_migrate_pfn;
+ cc->free_pfn = zone->compact_cached_free_pfn;
+ if (cc->free_pfn < start_pfn || cc->free_pfn > end_pfn) {
+ cc->free_pfn = end_pfn & ~(pageblock_nr_pages-1);
+ zone->compact_cached_free_pfn = cc->free_pfn;
+ }
+ if (cc->migrate_pfn < start_pfn || cc->migrate_pfn > end_pfn) {
+ cc->migrate_pfn = start_pfn;
+ zone->compact_cached_migrate_pfn = cc->migrate_pfn;
+ }
+
+ /*
+ * Clear pageblock skip if there were failures recently and compaction
+ * is about to be retried after being deferred. kswapd does not do
+ * this reset as it'll reset the cached information when going to sleep.
+ */
+ if (compaction_restarting(zone, cc->order) && !current_is_kswapd())
+ __reset_isolation_suitable(zone);
migrate_prep_local();
@@ -673,6 +946,8 @@
switch (isolate_migratepages(zone, cc)) {
case ISOLATE_ABORT:
ret = COMPACT_PARTIAL;
+ putback_lru_pages(&cc->migratepages);
+ cc->nr_migratepages = 0;
goto out;
case ISOLATE_NONE:
continue;
@@ -687,10 +962,6 @@
update_nr_listpages(cc);
nr_remaining = cc->nr_migratepages;
- count_vm_event(COMPACTBLOCKS);
- count_vm_events(COMPACTPAGES, nr_migrate - nr_remaining);
- if (nr_remaining)
- count_vm_events(COMPACTPAGEFAILED, nr_remaining);
trace_mm_compaction_migratepages(nr_migrate - nr_remaining,
nr_remaining);
@@ -698,8 +969,11 @@
if (err) {
putback_lru_pages(&cc->migratepages);
cc->nr_migratepages = 0;
+ if (err == -ENOMEM) {
+ ret = COMPACT_PARTIAL;
+ goto out;
+ }
}
-
}
out:
@@ -712,8 +986,9 @@
static unsigned long compact_zone_order(struct zone *zone,
int order, gfp_t gfp_mask,
- bool sync)
+ bool sync, bool *contended)
{
+ unsigned long ret;
struct compact_control cc = {
.nr_freepages = 0,
.nr_migratepages = 0,
@@ -725,7 +1000,13 @@
INIT_LIST_HEAD(&cc.freepages);
INIT_LIST_HEAD(&cc.migratepages);
- return compact_zone(zone, &cc);
+ ret = compact_zone(zone, &cc);
+
+ VM_BUG_ON(!list_empty(&cc.freepages));
+ VM_BUG_ON(!list_empty(&cc.migratepages));
+
+ *contended = cc.contended;
+ return ret;
}
int sysctl_extfrag_threshold = 500;
@@ -737,12 +1018,14 @@
* @gfp_mask: The GFP mask of the current allocation
* @nodemask: The allowed nodes to allocate from
* @sync: Whether migration is synchronous or not
+ * @contended: Return value that is true if compaction was aborted due to lock contention
+ * @page: Optionally capture a free page of the requested order during compaction
*
* This is the main entry point for direct page compaction.
*/
unsigned long try_to_compact_pages(struct zonelist *zonelist,
int order, gfp_t gfp_mask, nodemask_t *nodemask,
- bool sync)
+ bool sync, bool *contended)
{
enum zone_type high_zoneidx = gfp_zone(gfp_mask);
int may_enter_fs = gfp_mask & __GFP_FS;
@@ -752,15 +1035,11 @@
int rc = COMPACT_SKIPPED;
int alloc_flags = 0;
- /*
- * Check whether it is worth even starting compaction. The order check is
- * made because an assumption is made that the page allocator can satisfy
- * the "cheaper" orders without taking special steps
- */
+ /* Check if the GFP flags allow compaction */
if (!order || !may_enter_fs || !may_perform_io)
return rc;
- count_vm_event(COMPACTSTALL);
+ count_compact_event(COMPACTSTALL);
#ifdef CONFIG_CMA
if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
@@ -771,7 +1050,8 @@
nodemask) {
int status;
- status = compact_zone_order(zone, order, gfp_mask, sync);
+ status = compact_zone_order(zone, order, gfp_mask, sync,
+ contended);
rc = max(status, rc);
/* If a normal allocation would succeed, stop compacting */
@@ -808,7 +1088,7 @@
if (cc->order > 0) {
int ok = zone_watermark_ok(zone, cc->order,
low_wmark_pages(zone), 0, 0);
- if (ok && cc->order > zone->compact_order_failed)
+ if (ok && cc->order >= zone->compact_order_failed)
zone->compact_order_failed = cc->order + 1;
/* Currently async compaction is never deferred. */
else if (!ok && cc->sync)
@@ -843,7 +1123,7 @@
}
/* Compact all nodes in the system */
-static int compact_nodes(void)
+static void compact_nodes(void)
{
int nid;
@@ -852,8 +1132,6 @@
for_each_online_node(nid)
compact_node(nid);
-
- return COMPACT_COMPLETE;
}
/* The written value is actually unused, all memory is compacted */
@@ -864,7 +1142,7 @@
void __user *buffer, size_t *length, loff_t *ppos)
{
if (write)
- return compact_nodes();
+ compact_nodes();
return 0;
}
diff --git a/mm/internal.h b/mm/internal.h
index 8c6fd44..3439ef4 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -120,17 +120,24 @@
unsigned long free_pfn; /* isolate_freepages search base */
unsigned long migrate_pfn; /* isolate_migratepages search base */
bool sync; /* Synchronous migration */
+ bool ignore_skip_hint; /* Scan blocks even if marked skip */
+ bool finished_update_free; /* True when the zone cached pfns are
+ * no longer being updated
+ */
+ bool finished_update_migrate;
int order; /* order a direct compactor needs */
int migratetype; /* MOVABLE, RECLAIMABLE etc */
struct zone *zone;
+ bool contended; /* True if a lock was contended */
};
unsigned long
-isolate_freepages_range(unsigned long start_pfn, unsigned long end_pfn);
+isolate_freepages_range(struct compact_control *cc,
+ unsigned long start_pfn, unsigned long end_pfn);
unsigned long
isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
- unsigned long low_pfn, unsigned long end_pfn);
+ unsigned long low_pfn, unsigned long end_pfn, bool unevictable);
#endif
@@ -356,3 +363,6 @@
#define ALLOC_HIGH 0x20 /* __GFP_HIGH set */
#define ALLOC_CPUSET 0x40 /* check for correct cpuset */
#define ALLOC_CMA 0x80 /* allow allocations from CMA areas */
+
+unsigned long reclaim_clean_pages_from_list(struct zone *zone,
+ struct list_head *page_list);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 7685d4a..d436634 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1116,11 +1116,6 @@
mz->lru_size[lru] -= 1 << compound_order(page);
}
-void mem_cgroup_lru_del(struct page *page)
-{
- mem_cgroup_lru_del_list(page, page_lru(page));
-}
-
/**
* mem_cgroup_lru_move_lists - account for moving a page between lrus
* @zone: zone of the page
@@ -1149,15 +1144,25 @@
* Checks whether given mem is same or in the root_mem_cgroup's
* hierarchy subtree
*/
-static bool mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
- struct mem_cgroup *memcg)
+bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
+ struct mem_cgroup *memcg)
{
- if (root_memcg != memcg) {
- return (root_memcg->use_hierarchy &&
- css_is_ancestor(&memcg->css, &root_memcg->css));
- }
+ if (root_memcg == memcg)
+ return true;
+ if (!root_memcg->use_hierarchy)
+ return false;
+ return css_is_ancestor(&memcg->css, &root_memcg->css);
+}
- return true;
+static bool mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
+ struct mem_cgroup *memcg)
+{
+ bool ret;
+
+ rcu_read_lock();
+ ret = __mem_cgroup_same_or_subtree(root_memcg, memcg);
+ rcu_read_unlock();
+ return ret;
}
int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg)
@@ -5610,7 +5615,6 @@
if (mm) {
if (mc.to)
mem_cgroup_move_charge(mm);
- put_swap_token(mm);
mmput(mm);
}
if (mc.to)
diff --git a/mm/memory.c b/mm/memory.c
index 2111354..b6ab040 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2935,7 +2935,6 @@
delayacct_set_flag(DELAYACCT_PF_SWAPIN);
page = lookup_swap_cache(entry);
if (!page) {
- grab_swap_token(mm); /* Contend for token _before_ read-in */
page = swapin_readahead(entry,
GFP_HIGHUSER_MOVABLE, vma, address);
if (!page) {
@@ -2965,6 +2964,7 @@
}
locked = lock_page_or_retry(page, mm, flags);
+
delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
if (!locked) {
ret |= VM_FAULT_RETRY;
diff --git a/mm/migrate.c b/mm/migrate.c
index 79a791f..4f5c02e 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -969,6 +969,7 @@
{
int retry = 1;
int nr_failed = 0;
+ int nr_succeeded = 0;
int pass = 0;
struct page *page;
struct page *page2;
@@ -997,6 +998,7 @@
trace_migrate_retry(retry);
break;
case 0:
+ nr_succeeded++;
break;
default:
/* Permanent failure */
@@ -1007,6 +1009,10 @@
}
rc = 0;
out:
+ if (nr_succeeded)
+ count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded);
+ if (nr_failed)
+ count_vm_events(PGMIGRATE_FAIL, nr_failed);
if (!swapwrite)
current->flags &= ~PF_SWAPWRITE;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 8a93508..9cc2f45 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1441,6 +1441,45 @@
set_page_refcounted(page + i);
}
+static int __isolate_free_page(struct page *page, unsigned int order)
+{
+ unsigned long watermark;
+ struct zone *zone;
+ int mt;
+
+ BUG_ON(!PageBuddy(page));
+
+ zone = page_zone(page);
+ mt = get_pageblock_migratetype(page);
+
+ if (mt != MIGRATE_ISOLATE && !is_migrate_cma(mt)) {
+ /* Obey watermarks as if the page was being allocated */
+ watermark = low_wmark_pages(zone) + (1 << order);
+ if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
+ return 0;
+
+ __mod_zone_freepage_state(zone, -(1UL << order), mt);
+ }
+
+ /* Remove page from free list */
+ list_del(&page->lru);
+ zone->free_area[order].nr_free--;
+ rmv_page_order(page);
+
+ /* Set the pageblock if the isolated page is at least a pageblock */
+ if (order >= pageblock_order - 1) {
+ struct page *endpage = page + (1 << order) - 1;
+ for (; page < endpage; page += pageblock_nr_pages) {
+ mt = get_pageblock_migratetype(page);
+ if (mt != MIGRATE_ISOLATE && !is_migrate_cma(mt))
+ set_pageblock_migratetype(page,
+ MIGRATE_MOVABLE);
+ }
+ }
+
+ return 1UL << order;
+}
+
/*
* Similar to split_page except the page is already free. As this is only
* being used for migration, the migratetype of the block also changes.
@@ -1454,45 +1493,18 @@
int split_free_page(struct page *page)
{
unsigned int order;
- unsigned long watermark;
- struct zone *zone;
- int mt;
+ int nr_pages;
- BUG_ON(!PageBuddy(page));
-
- zone = page_zone(page);
order = page_order(page);
- mt = get_pageblock_migratetype(page);
- /* Obey watermarks as if the page was being allocated */
- watermark = low_wmark_pages(zone) + (1 << order);
- if (!is_migrate_cma(mt) && mt != MIGRATE_ISOLATE &&
- !zone_watermark_ok(zone, 0, watermark, 0, 0))
+ nr_pages = __isolate_free_page(page, order);
+ if (!nr_pages)
return 0;
- /* Remove page from free list */
- list_del(&page->lru);
- zone->free_area[order].nr_free--;
- rmv_page_order(page);
-
- if (unlikely(mt != MIGRATE_ISOLATE))
- __mod_zone_freepage_state(zone, -(1UL << order), mt);
-
/* Split into individual pages */
set_page_refcounted(page);
split_page(page, order);
-
- if (order >= pageblock_order - 1) {
- struct page *endpage = page + (1 << order) - 1;
- for (; page < endpage; page += pageblock_nr_pages) {
- mt = get_pageblock_migratetype(page);
- if (mt != MIGRATE_ISOLATE && !is_migrate_cma(mt))
- set_pageblock_migratetype(page,
- MIGRATE_MOVABLE);
- }
- }
-
- return 1 << order;
+ return nr_pages;
}
/*
@@ -2134,11 +2146,9 @@
struct zonelist *zonelist, enum zone_type high_zoneidx,
nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
int migratetype, bool sync_migration,
- bool *deferred_compaction,
+ bool *contended_compaction, bool *deferred_compaction,
unsigned long *did_some_progress)
{
- struct page *page;
-
if (!order)
return NULL;
@@ -2149,9 +2159,12 @@
current->flags |= PF_MEMALLOC;
*did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask,
- nodemask, sync_migration);
+ nodemask, sync_migration,
+ contended_compaction);
current->flags &= ~PF_MEMALLOC;
+
if (*did_some_progress != COMPACT_SKIPPED) {
+ struct page *page;
/* Page migration frees to the PCP lists but we want merging */
drain_pages(get_cpu());
@@ -2162,6 +2175,7 @@
alloc_flags, preferred_zone,
migratetype);
if (page) {
+ preferred_zone->compact_blockskip_flush = false;
preferred_zone->compact_considered = 0;
preferred_zone->compact_defer_shift = 0;
if (order >= preferred_zone->compact_order_failed)
@@ -2195,7 +2209,7 @@
struct zonelist *zonelist, enum zone_type high_zoneidx,
nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
int migratetype, bool sync_migration,
- bool *deferred_compaction,
+ bool *contended_compaction, bool *deferred_compaction,
unsigned long *did_some_progress)
{
return NULL;
@@ -2362,6 +2376,7 @@
unsigned long did_some_progress;
bool sync_migration = false;
bool deferred_compaction = false;
+ bool contended_compaction = false;
/*
* In the slowpath, we sanity check order to avoid ever trying to
@@ -2443,6 +2458,7 @@
nodemask,
alloc_flags, preferred_zone,
migratetype, sync_migration,
+ &contended_compaction,
&deferred_compaction,
&did_some_progress);
if (page)
@@ -2452,10 +2468,11 @@
/*
* If compaction is deferred for high-order allocations, it is because
* sync compaction recently failed. In this is the case and the caller
- * has requested the system not be heavily disrupted, fail the
- * allocation now instead of entering direct reclaim
+ * requested a movable allocation that does not heavily disrupt the
+ * system then fail the allocation instead of entering direct reclaim.
*/
- if (deferred_compaction && (gfp_mask & __GFP_NO_KSWAPD))
+ if ((deferred_compaction || contended_compaction) &&
+ (gfp_mask & __GFP_NO_KSWAPD))
goto nopage;
/* Try direct reclaim and then allocating */
@@ -2526,6 +2543,7 @@
nodemask,
alloc_flags, preferred_zone,
migratetype, sync_migration,
+ &contended_compaction,
&deferred_compaction,
&did_some_progress);
if (page)
@@ -5592,26 +5610,28 @@
}
/*
- * This is designed as sub function...plz see page_isolation.c also.
- * set/clear page block's type to be ISOLATE.
- * page allocater never alloc memory from ISOLATE block.
+ * This function checks whether pageblock includes unmovable pages or not.
+ * If @count is not zero, it is okay to include less @count unmovable pages
+ *
+ * PageLRU check wihtout isolation or lru_lock could race so that
+ * MIGRATE_MOVABLE block might include unmovable pages. It means you can't
+ * expect this function should be exact.
*/
-
-static int
-__count_immobile_pages(struct zone *zone, struct page *page, int count)
+static bool
+__has_unmovable_pages(struct zone *zone, struct page *page, int count)
{
unsigned long pfn, iter, found;
int mt;
/*
* For avoiding noise data, lru_add_drain_all() should be called
- * If ZONE_MOVABLE, the zone never contains immobile pages
+ * If ZONE_MOVABLE, the zone never contains unmovable pages
*/
if (zone_idx(zone) == ZONE_MOVABLE)
- return true;
+ return false;
mt = get_pageblock_migratetype(page);
if (mt == MIGRATE_MOVABLE || is_migrate_cma(mt))
- return true;
+ return false;
pfn = page_to_pfn(page);
for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) {
@@ -5621,11 +5641,18 @@
continue;
page = pfn_to_page(check);
- if (!page_count(page)) {
+ /*
+ * We can't use page_count without pin a page
+ * because another CPU can free compound page.
+ * This check already skips compound tails of THP
+ * because their page->_count is zero at all time.
+ */
+ if (!atomic_read(&page->_count)) {
if (PageBuddy(page))
iter += (1 << page_order(page)) - 1;
continue;
}
+
if (!PageLRU(page))
found++;
/*
@@ -5642,9 +5669,9 @@
* page at boot.
*/
if (found > count)
- return false;
+ return true;
}
- return true;
+ return false;
}
bool is_pageblock_removable_nolock(struct page *page)
@@ -5668,7 +5695,7 @@
zone->zone_start_pfn + zone->spanned_pages <= pfn)
return false;
- return __count_immobile_pages(zone, page, 0);
+ return !__has_unmovable_pages(zone, page, 0);
}
int set_migratetype_isolate(struct page *page)
@@ -5707,12 +5734,12 @@
* FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
* We just check MOVABLE pages.
*/
- if (__count_immobile_pages(zone, page, arg.pages_found))
+ if (!__has_unmovable_pages(zone, page, arg.pages_found))
ret = 0;
-
/*
- * immobile means "not-on-lru" paes. If immobile is larger than
- * removable-by-driver pages reported by notifier, we'll fail.
+ * Unmovable means "not-on-lru" pages. If Unmovable pages are
+ * larger than removable-by-driver pages reported by notifier,
+ * we'll fail.
*/
out:
@@ -5775,34 +5802,27 @@
}
/* [start, end) must belong to a single zone. */
-static int __alloc_contig_migrate_range(unsigned long start, unsigned long end)
+static int __alloc_contig_migrate_range(struct compact_control *cc,
+ unsigned long start, unsigned long end)
{
/* This function is based on compact_zone() from compaction.c. */
-
+ unsigned long nr_reclaimed;
unsigned long pfn = start;
unsigned int tries = 0;
int ret = 0;
- struct compact_control cc = {
- .nr_migratepages = 0,
- .order = -1,
- .zone = page_zone(pfn_to_page(start)),
- .sync = true,
- };
- INIT_LIST_HEAD(&cc.migratepages);
-
migrate_prep();
- while (pfn < end || !list_empty(&cc.migratepages)) {
+ while (pfn < end || !list_empty(&cc->migratepages)) {
if (fatal_signal_pending(current)) {
ret = -EINTR;
break;
}
- if (list_empty(&cc.migratepages)) {
- cc.nr_migratepages = 0;
- pfn = isolate_migratepages_range(cc.zone, &cc,
- pfn, end);
+ if (list_empty(&cc->migratepages)) {
+ cc->nr_migratepages = 0;
+ pfn = isolate_migratepages_range(cc->zone, cc,
+ pfn, end, true);
if (!pfn) {
ret = -EINTR;
break;
@@ -5813,12 +5833,16 @@
break;
}
- ret = migrate_pages(&cc.migratepages,
+ nr_reclaimed = reclaim_clean_pages_from_list(cc->zone, &cc->migratepages);
+
+ cc->nr_migratepages -= nr_reclaimed;
+
+ ret = migrate_pages(&cc->migratepages,
__alloc_contig_migrate_alloc,
0, false, MIGRATE_SYNC);
}
- putback_lru_pages(&cc.migratepages);
+ putback_lru_pages(&cc->migratepages);
return ret > 0 ? 0 : ret;
}
@@ -5849,6 +5873,15 @@
unsigned long outer_start, outer_end;
int ret = 0, order;
+ struct compact_control cc = {
+ .nr_migratepages = 0,
+ .order = -1,
+ .zone = page_zone(pfn_to_page(start)),
+ .sync = true,
+ .ignore_skip_hint = true,
+ };
+ INIT_LIST_HEAD(&cc.migratepages);
+
/*
* What we do here is we mark all pageblocks in range as
* MIGRATE_ISOLATE. Because pageblock and max order pages may
@@ -5880,7 +5913,7 @@
zone->cma_alloc = 1;
- ret = __alloc_contig_migrate_range(start, end);
+ ret = __alloc_contig_migrate_range(&cc, start, end);
if (ret)
goto done;
@@ -5924,7 +5957,7 @@
/* Grab isolated pages from freelists. */
- outer_end = isolate_freepages_range(outer_start, end);
+ outer_end = isolate_freepages_range(&cc, outer_start, end);
if (!outer_end) {
ret = -EBUSY;
goto done;
@@ -5945,8 +5978,15 @@
void free_contig_range(unsigned long pfn, unsigned nr_pages)
{
- for (; nr_pages--; ++pfn)
- __free_page(pfn_to_page(pfn));
+ unsigned int count = 0;
+
+ for (; nr_pages--; pfn++) {
+ struct page *page = pfn_to_page(pfn);
+
+ count += page_count(page) != 1;
+ __free_page(page);
+ }
+ WARN(count != 0, "%d pages are still in use!\n", count);
}
#endif
@@ -6053,6 +6093,7 @@
#ifdef CONFIG_MEMORY_FAILURE
{1UL << PG_hwpoison, "hwpoison" },
#endif
+ {1UL << PG_readahead, "PG_readahead" },
{-1UL, NULL },
};
diff --git a/mm/readahead.c b/mm/readahead.c
index 728a7a3..56f8a24 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -184,6 +184,9 @@
if (!page)
break;
page->index = page_offset;
+
+ page->flags |= (1L << PG_readahead);
+
list_add(&page->lru, &page_pool);
if (page_idx == nr_to_read - lookahead_size)
SetPageReadahead(page);
diff --git a/mm/rmap.c b/mm/rmap.c
index 5b5ad58..0f3b7cd 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -755,12 +755,6 @@
pte_unmap_unlock(pte, ptl);
}
- /* Pretend the page is referenced if the task has the
- swap token and is in the middle of a page fault. */
- if (mm != current->mm && has_swap_token(mm) &&
- rwsem_is_locked(&mm->mmap_sem))
- referenced++;
-
(*mapcount)--;
if (referenced)
diff --git a/mm/thrash.c b/mm/thrash.c
deleted file mode 100644
index 57ad495..0000000
--- a/mm/thrash.c
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
- * mm/thrash.c
- *
- * Copyright (C) 2004, Red Hat, Inc.
- * Copyright (C) 2004, Rik van Riel <riel@redhat.com>
- * Released under the GPL, see the file COPYING for details.
- *
- * Simple token based thrashing protection, using the algorithm
- * described in: http://www.cse.ohio-state.edu/hpcs/WWW/HTML/publications/abs05-1.html
- *
- * Sep 2006, Ashwin Chaugule <ashwin.chaugule@celunite.com>
- * Improved algorithm to pass token:
- * Each task has a priority which is incremented if it contended
- * for the token in an interval less than its previous attempt.
- * If the token is acquired, that task's priority is boosted to prevent
- * the token from bouncing around too often and to let the task make
- * some progress in its execution.
- */
-
-#include <linux/jiffies.h>
-#include <linux/mm.h>
-#include <linux/sched.h>
-#include <linux/swap.h>
-#include <linux/memcontrol.h>
-
-#include <trace/events/vmscan.h>
-
-#define TOKEN_AGING_INTERVAL (0xFF)
-
-static DEFINE_SPINLOCK(swap_token_lock);
-struct mm_struct *swap_token_mm;
-static struct mem_cgroup *swap_token_memcg;
-
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR
-static struct mem_cgroup *swap_token_memcg_from_mm(struct mm_struct *mm)
-{
- struct mem_cgroup *memcg;
-
- memcg = try_get_mem_cgroup_from_mm(mm);
- if (memcg)
- css_put(mem_cgroup_css(memcg));
-
- return memcg;
-}
-#else
-static struct mem_cgroup *swap_token_memcg_from_mm(struct mm_struct *mm)
-{
- return NULL;
-}
-#endif
-
-void grab_swap_token(struct mm_struct *mm)
-{
- int current_interval;
- unsigned int old_prio = mm->token_priority;
- static unsigned int global_faults;
- static unsigned int last_aging;
-
- global_faults++;
-
- current_interval = global_faults - mm->faultstamp;
-
- if (!spin_trylock(&swap_token_lock))
- return;
-
- /* First come first served */
- if (!swap_token_mm)
- goto replace_token;
-
- /*
- * Usually, we don't need priority aging because long interval faults
- * makes priority decrease quickly. But there is one exception. If the
- * token owner task is sleeping, it never make long interval faults.
- * Thus, we need a priority aging mechanism instead. The requirements
- * of priority aging are
- * 1) An aging interval is reasonable enough long. Too short aging
- * interval makes quick swap token lost and decrease performance.
- * 2) The swap token owner task have to get priority aging even if
- * it's under sleep.
- */
- if ((global_faults - last_aging) > TOKEN_AGING_INTERVAL) {
- swap_token_mm->token_priority /= 2;
- last_aging = global_faults;
- }
-
- if (mm == swap_token_mm) {
- mm->token_priority += 2;
- goto update_priority;
- }
-
- if (current_interval < mm->last_interval)
- mm->token_priority++;
- else {
- if (likely(mm->token_priority > 0))
- mm->token_priority--;
- }
-
- /* Check if we deserve the token */
- if (mm->token_priority > swap_token_mm->token_priority)
- goto replace_token;
-
-update_priority:
- trace_update_swap_token_priority(mm, old_prio, swap_token_mm);
-
-out:
- mm->faultstamp = global_faults;
- mm->last_interval = current_interval;
- spin_unlock(&swap_token_lock);
- return;
-
-replace_token:
- mm->token_priority += 2;
- trace_replace_swap_token(swap_token_mm, mm);
- swap_token_mm = mm;
- swap_token_memcg = swap_token_memcg_from_mm(mm);
- last_aging = global_faults;
- goto out;
-}
-
-/* Called on process exit. */
-void __put_swap_token(struct mm_struct *mm)
-{
- spin_lock(&swap_token_lock);
- if (likely(mm == swap_token_mm)) {
- trace_put_swap_token(swap_token_mm);
- swap_token_mm = NULL;
- swap_token_memcg = NULL;
- }
- spin_unlock(&swap_token_lock);
-}
-
-static bool match_memcg(struct mem_cgroup *a, struct mem_cgroup *b)
-{
- if (!a)
- return true;
- if (!b)
- return true;
- if (a == b)
- return true;
- return false;
-}
-
-void disable_swap_token(struct mem_cgroup *memcg)
-{
- /* memcg reclaim don't disable unrelated mm token. */
- if (match_memcg(memcg, swap_token_memcg)) {
- spin_lock(&swap_token_lock);
- if (match_memcg(memcg, swap_token_memcg)) {
- trace_disable_swap_token(swap_token_mm);
- swap_token_mm = NULL;
- swap_token_memcg = NULL;
- }
- spin_unlock(&swap_token_lock);
- }
-}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 33dc256..c69f5e2 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -53,24 +53,6 @@
#define CREATE_TRACE_POINTS
#include <trace/events/vmscan.h>
-/*
- * reclaim_mode determines how the inactive list is shrunk
- * RECLAIM_MODE_SINGLE: Reclaim only order-0 pages
- * RECLAIM_MODE_ASYNC: Do not block
- * RECLAIM_MODE_SYNC: Allow blocking e.g. call wait_on_page_writeback
- * RECLAIM_MODE_LUMPYRECLAIM: For high-order allocations, take a reference
- * page from the LRU and reclaim all pages within a
- * naturally aligned range
- * RECLAIM_MODE_COMPACTION: For high-order allocations, reclaim a number of
- * order-0 pages and then compact the zone
- */
-typedef unsigned __bitwise__ reclaim_mode_t;
-#define RECLAIM_MODE_SINGLE ((__force reclaim_mode_t)0x01u)
-#define RECLAIM_MODE_ASYNC ((__force reclaim_mode_t)0x02u)
-#define RECLAIM_MODE_SYNC ((__force reclaim_mode_t)0x04u)
-#define RECLAIM_MODE_LUMPYRECLAIM ((__force reclaim_mode_t)0x08u)
-#define RECLAIM_MODE_COMPACTION ((__force reclaim_mode_t)0x10u)
-
struct scan_control {
/* Incremented by the number of inactive pages that were scanned */
unsigned long nr_scanned;
@@ -96,11 +78,8 @@
int order;
- /*
- * Intend to reclaim enough continuous memory rather than reclaim
- * enough amount of memory. i.e, mode for high order allocation.
- */
- reclaim_mode_t reclaim_mode;
+ /* Scan (total_size >> priority) pages at once */
+ int priority;
/*
* The memory cgroup that hit its limit and as a result is the
@@ -164,26 +143,16 @@
{
return !sc->target_mem_cgroup;
}
-
-static bool scanning_global_lru(struct mem_cgroup_zone *mz)
-{
- return !mz->mem_cgroup;
-}
#else
static bool global_reclaim(struct scan_control *sc)
{
return true;
}
-
-static bool scanning_global_lru(struct mem_cgroup_zone *mz)
-{
- return true;
-}
#endif
static struct zone_reclaim_stat *get_reclaim_stat(struct mem_cgroup_zone *mz)
{
- if (!scanning_global_lru(mz))
+ if (!mem_cgroup_disabled())
return mem_cgroup_get_reclaim_stat(mz->mem_cgroup, mz->zone);
return &mz->zone->reclaim_stat;
@@ -192,7 +161,7 @@
static unsigned long zone_nr_lru_pages(struct mem_cgroup_zone *mz,
enum lru_list lru)
{
- if (!scanning_global_lru(mz))
+ if (!mem_cgroup_disabled())
return mem_cgroup_zone_nr_lru_pages(mz->mem_cgroup,
zone_to_nid(mz->zone),
zone_idx(mz->zone),
@@ -364,39 +333,6 @@
return ret;
}
-static void set_reclaim_mode(int priority, struct scan_control *sc,
- bool sync)
-{
- reclaim_mode_t syncmode = sync ? RECLAIM_MODE_SYNC : RECLAIM_MODE_ASYNC;
-
- /*
- * Initially assume we are entering either lumpy reclaim or
- * reclaim/compaction.Depending on the order, we will either set the
- * sync mode or just reclaim order-0 pages later.
- */
- if (COMPACTION_BUILD)
- sc->reclaim_mode = RECLAIM_MODE_COMPACTION;
- else
- sc->reclaim_mode = RECLAIM_MODE_LUMPYRECLAIM;
-
- /*
- * Avoid using lumpy reclaim or reclaim/compaction if possible by
- * restricting when its set to either costly allocations or when
- * under memory pressure
- */
- if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
- sc->reclaim_mode |= syncmode;
- else if (sc->order && priority < DEF_PRIORITY - 2)
- sc->reclaim_mode |= syncmode;
- else
- sc->reclaim_mode = RECLAIM_MODE_SINGLE | RECLAIM_MODE_ASYNC;
-}
-
-static void reset_reclaim_mode(struct scan_control *sc)
-{
- sc->reclaim_mode = RECLAIM_MODE_SINGLE | RECLAIM_MODE_ASYNC;
-}
-
static inline int is_page_cache_freeable(struct page *page)
{
/*
@@ -416,10 +352,6 @@
return 1;
if (bdi == current->backing_dev_info)
return 1;
-
- /* lumpy reclaim for hugepage often need a lot of write */
- if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
- return 1;
return 0;
}
@@ -523,8 +455,7 @@
/* synchronous write or broken a_ops? */
ClearPageReclaim(page);
}
- trace_mm_vmscan_writepage(page,
- trace_reclaim_flags(page, sc->reclaim_mode));
+ trace_mm_vmscan_writepage(page, trace_reclaim_flags(page));
inc_zone_page_state(page, NR_VMSCAN_WRITE);
return PAGE_SUCCESS;
}
@@ -701,19 +632,15 @@
};
static enum page_references page_check_references(struct page *page,
- struct mem_cgroup_zone *mz,
struct scan_control *sc)
{
int referenced_ptes, referenced_page;
unsigned long vm_flags;
- referenced_ptes = page_referenced(page, 1, mz->mem_cgroup, &vm_flags);
+ referenced_ptes = page_referenced(page, 1, sc->target_mem_cgroup,
+ &vm_flags);
referenced_page = TestClearPageReferenced(page);
- /* Lumpy reclaim - ignore references */
- if (sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM)
- return PAGEREF_RECLAIM;
-
/*
* Mlock lost the isolation race with us. Let try_to_unmap()
* move the page to the unevictable list.
@@ -763,11 +690,12 @@
* shrink_page_list() returns the number of reclaimed pages
*/
static unsigned long shrink_page_list(struct list_head *page_list,
- struct mem_cgroup_zone *mz,
+ struct zone *zone,
struct scan_control *sc,
- int priority,
+ enum ttu_flags ttu_flags,
unsigned long *ret_nr_dirty,
- unsigned long *ret_nr_writeback)
+ unsigned long *ret_nr_writeback,
+ bool force_reclaim)
{
LIST_HEAD(ret_pages);
LIST_HEAD(free_pages);
@@ -780,10 +708,10 @@
cond_resched();
while (!list_empty(page_list)) {
- enum page_references references;
struct address_space *mapping;
struct page *page;
int may_enter_fs;
+ enum page_references references = PAGEREF_RECLAIM_CLEAN;
cond_resched();
@@ -794,7 +722,7 @@
goto keep;
VM_BUG_ON(PageActive(page));
- VM_BUG_ON(page_zone(page) != mz->zone);
+ VM_BUG_ON(page_zone(page) != zone);
sc->nr_scanned++;
@@ -813,22 +741,13 @@
if (PageWriteback(page)) {
nr_writeback++;
- /*
- * Synchronous reclaim cannot queue pages for
- * writeback due to the possibility of stack overflow
- * but if it encounters a page under writeback, wait
- * for the IO to complete.
- */
- if ((sc->reclaim_mode & RECLAIM_MODE_SYNC) &&
- may_enter_fs)
- wait_on_page_writeback(page);
- else {
- unlock_page(page);
- goto keep_lumpy;
- }
+ unlock_page(page);
+ goto keep;
}
- references = page_check_references(page, mz, sc);
+ if (!force_reclaim)
+ references = page_check_references(page, sc);
+
switch (references) {
case PAGEREF_ACTIVATE:
goto activate_locked;
@@ -858,7 +777,7 @@
* processes. Try to unmap it here.
*/
if (page_mapped(page) && mapping) {
- switch (try_to_unmap(page, TTU_UNMAP)) {
+ switch (try_to_unmap(page, ttu_flags)) {
case SWAP_FAIL:
goto activate_locked;
case SWAP_AGAIN:
@@ -879,7 +798,8 @@
* unless under significant pressure.
*/
if (page_is_file_cache(page) &&
- (!current_is_kswapd() || priority >= DEF_PRIORITY - 2)) {
+ (!current_is_kswapd() ||
+ sc->priority >= DEF_PRIORITY - 2)) {
/*
* Immediately reclaim when written back.
* Similar in principal to deactivate_page()
@@ -908,7 +828,7 @@
goto activate_locked;
case PAGE_SUCCESS:
if (PageWriteback(page))
- goto keep_lumpy;
+ goto keep;
if (PageDirty(page))
goto keep;
@@ -994,7 +914,6 @@
try_to_free_swap(page);
unlock_page(page);
putback_lru_page(page);
- reset_reclaim_mode(sc);
continue;
activate_locked:
@@ -1007,8 +926,6 @@
keep_locked:
unlock_page(page);
keep:
- reset_reclaim_mode(sc);
-keep_lumpy:
list_add(&page->lru, &ret_pages);
VM_BUG_ON(PageLRU(page) || PageUnevictable(page));
}
@@ -1020,7 +937,7 @@
* will encounter the same problem
*/
if (nr_dirty && nr_dirty == nr_congested && global_reclaim(sc))
- zone_set_flag(mz->zone, ZONE_CONGESTED);
+ zone_set_flag(zone, ZONE_CONGESTED);
free_hot_cold_page_list(&free_pages, 1);
@@ -1031,6 +948,33 @@
return nr_reclaimed;
}
+unsigned long reclaim_clean_pages_from_list(struct zone *zone,
+ struct list_head *page_list)
+{
+ struct scan_control sc = {
+ .gfp_mask = GFP_KERNEL,
+ .priority = DEF_PRIORITY,
+ .may_unmap = 1,
+ };
+ unsigned long ret, dummy1, dummy2;
+ struct page *page, *next;
+ LIST_HEAD(clean_pages);
+
+ list_for_each_entry_safe(page, next, page_list, lru) {
+ if (page_is_file_cache(page) && !PageDirty(page)) {
+ ClearPageActive(page);
+ list_move(&page->lru, &clean_pages);
+ }
+ }
+
+ ret = shrink_page_list(&clean_pages, zone, &sc,
+ TTU_UNMAP|TTU_IGNORE_ACCESS,
+ &dummy1, &dummy2, true);
+ list_splice(&clean_pages, page_list);
+ __mod_zone_page_state(zone, NR_ISOLATED_FILE, -ret);
+ return ret;
+}
+
/*
* Attempt to remove the specified page from its LRU. Only take this page
* if it is of the appropriate PageActive status. Pages which are being
@@ -1041,35 +985,16 @@
*
* returns 0 on success, -ve errno on failure.
*/
-int __isolate_lru_page(struct page *page, isolate_mode_t mode, int file)
+int __isolate_lru_page(struct page *page, isolate_mode_t mode)
{
- bool all_lru_mode;
int ret = -EINVAL;
/* Only take pages on the LRU. */
if (!PageLRU(page))
return ret;
- all_lru_mode = (mode & (ISOLATE_ACTIVE|ISOLATE_INACTIVE)) ==
- (ISOLATE_ACTIVE|ISOLATE_INACTIVE);
-
- /*
- * When checking the active state, we need to be sure we are
- * dealing with comparible boolean values. Take the logical not
- * of each.
- */
- if (!all_lru_mode && !PageActive(page) != !(mode & ISOLATE_ACTIVE))
- return ret;
-
- if (!all_lru_mode && !!page_is_file_cache(page) != file)
- return ret;
-
- /*
- * When this function is being called for lumpy reclaim, we
- * initially look into all LRU pages, active, inactive and
- * unevictable; only give shrink_page_list evictable pages.
- */
- if (PageUnevictable(page))
+ /* Compaction should not handle unevictable pages but CMA can do so */
+ if (PageUnevictable(page) && !(mode & ISOLATE_UNEVICTABLE))
return ret;
ret = -EBUSY;
@@ -1135,52 +1060,38 @@
* Appropriate locks must be held before calling this function.
*
* @nr_to_scan: The number of pages to look through on the list.
- * @mz: The mem_cgroup_zone to pull pages from.
+ * @lruvec: The LRU vector to pull pages from.
* @dst: The temp list to put pages on to.
* @nr_scanned: The number of pages that were scanned.
* @sc: The scan_control struct for this reclaim session
* @mode: One of the LRU isolation modes
- * @active: True [1] if isolating active pages
- * @file: True [1] if isolating file [!anon] pages
+ * @lru: LRU list id for isolating
*
* returns how many pages were moved onto *@dst.
*/
static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
- struct mem_cgroup_zone *mz, struct list_head *dst,
+ struct lruvec *lruvec, struct list_head *dst,
unsigned long *nr_scanned, struct scan_control *sc,
- isolate_mode_t mode, int active, int file)
+ isolate_mode_t mode, enum lru_list lru)
{
- struct lruvec *lruvec;
struct list_head *src;
unsigned long nr_taken = 0;
- unsigned long nr_lumpy_taken = 0;
- unsigned long nr_lumpy_dirty = 0;
- unsigned long nr_lumpy_failed = 0;
unsigned long scan;
- int lru = LRU_BASE;
+ int file = is_file_lru(lru);
- lruvec = mem_cgroup_zone_lruvec(mz->zone, mz->mem_cgroup);
- if (active)
- lru += LRU_ACTIVE;
- if (file)
- lru += LRU_FILE;
src = &lruvec->lists[lru];
for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) {
struct page *page;
- unsigned long pfn;
- unsigned long end_pfn;
- unsigned long page_pfn;
- int zone_id;
page = lru_to_page(src);
prefetchw_prev_lru_page(page, src, flags);
VM_BUG_ON(!PageLRU(page));
- switch (__isolate_lru_page(page, mode, file)) {
+ switch (__isolate_lru_page(page, mode)) {
case 0:
- mem_cgroup_lru_del(page);
+ mem_cgroup_lru_del_list(page, lru);
list_move(&page->lru, dst);
nr_taken += hpage_nr_pages(page);
break;
@@ -1193,84 +1104,6 @@
default:
BUG();
}
-
- if (!sc->order || !(sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM))
- continue;
-
- /*
- * Attempt to take all pages in the order aligned region
- * surrounding the tag page. Only take those pages of
- * the same active state as that tag page. We may safely
- * round the target page pfn down to the requested order
- * as the mem_map is guaranteed valid out to MAX_ORDER,
- * where that page is in a different zone we will detect
- * it from its zone id and abort this block scan.
- */
- zone_id = page_zone_id(page);
- page_pfn = page_to_pfn(page);
- pfn = page_pfn & ~((1 << sc->order) - 1);
- end_pfn = pfn + (1 << sc->order);
- for (; pfn < end_pfn; pfn++) {
- struct page *cursor_page;
-
- /* The target page is in the block, ignore it. */
- if (unlikely(pfn == page_pfn))
- continue;
-
- /* Avoid holes within the zone. */
- if (unlikely(!pfn_valid_within(pfn)))
- break;
-
- cursor_page = pfn_to_page(pfn);
-
- /* Check that we have not crossed a zone boundary. */
- if (unlikely(page_zone_id(cursor_page) != zone_id))
- break;
-
- /*
- * If we don't have enough swap space, reclaiming of
- * anon page which don't already have a swap slot is
- * pointless.
- */
- if (nr_swap_pages <= 0 && PageSwapBacked(cursor_page) &&
- !PageSwapCache(cursor_page))
- break;
-
- if (__isolate_lru_page(cursor_page, mode, file) == 0) {
- unsigned int isolated_pages;
-
- mem_cgroup_lru_del(cursor_page);
- list_move(&cursor_page->lru, dst);
- isolated_pages = hpage_nr_pages(cursor_page);
- nr_taken += isolated_pages;
- nr_lumpy_taken += isolated_pages;
- if (PageDirty(cursor_page))
- nr_lumpy_dirty += isolated_pages;
- scan++;
- pfn += isolated_pages - 1;
- } else {
- /*
- * Check if the page is freed already.
- *
- * We can't use page_count() as that
- * requires compound_head and we don't
- * have a pin on the page here. If a
- * page is tail, we may or may not
- * have isolated the head, so assume
- * it's not free, it'd be tricky to
- * track the head status without a
- * page pin.
- */
- if (!PageTail(cursor_page) &&
- !atomic_read(&cursor_page->_count))
- continue;
- break;
- }
- }
-
- /* If we break out of the loop above, lumpy reclaim failed */
- if (pfn < end_pfn)
- nr_lumpy_failed++;
}
*nr_scanned = scan;
@@ -1278,7 +1111,6 @@
trace_mm_vmscan_lru_isolate(sc->order,
nr_to_scan, scan,
nr_taken,
- nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed,
mode, file);
return nr_taken;
}
@@ -1407,112 +1239,25 @@
list_splice(&pages_to_free, page_list);
}
-static noinline_for_stack void
-update_isolated_counts(struct mem_cgroup_zone *mz,
- struct list_head *page_list,
- unsigned long *nr_anon,
- unsigned long *nr_file)
-{
- struct zone *zone = mz->zone;
- unsigned int count[NR_LRU_LISTS] = { 0, };
- unsigned long nr_active = 0;
- struct page *page;
- int lru;
-
- /*
- * Count pages and clear active flags
- */
- list_for_each_entry(page, page_list, lru) {
- int numpages = hpage_nr_pages(page);
- lru = page_lru_base_type(page);
- if (PageActive(page)) {
- lru += LRU_ACTIVE;
- ClearPageActive(page);
- nr_active += numpages;
- }
- count[lru] += numpages;
- }
-
- preempt_disable();
- __count_vm_events(PGDEACTIVATE, nr_active);
-
- __mod_zone_page_state(zone, NR_ACTIVE_FILE,
- -count[LRU_ACTIVE_FILE]);
- __mod_zone_page_state(zone, NR_INACTIVE_FILE,
- -count[LRU_INACTIVE_FILE]);
- __mod_zone_page_state(zone, NR_ACTIVE_ANON,
- -count[LRU_ACTIVE_ANON]);
- __mod_zone_page_state(zone, NR_INACTIVE_ANON,
- -count[LRU_INACTIVE_ANON]);
-
- *nr_anon = count[LRU_ACTIVE_ANON] + count[LRU_INACTIVE_ANON];
- *nr_file = count[LRU_ACTIVE_FILE] + count[LRU_INACTIVE_FILE];
-
- __mod_zone_page_state(zone, NR_ISOLATED_ANON, *nr_anon);
- __mod_zone_page_state(zone, NR_ISOLATED_FILE, *nr_file);
- preempt_enable();
-}
-
-/*
- * Returns true if a direct reclaim should wait on pages under writeback.
- *
- * If we are direct reclaiming for contiguous pages and we do not reclaim
- * everything in the list, try again and wait for writeback IO to complete.
- * This will stall high-order allocations noticeably. Only do that when really
- * need to free the pages under high memory pressure.
- */
-static inline bool should_reclaim_stall(unsigned long nr_taken,
- unsigned long nr_freed,
- int priority,
- struct scan_control *sc)
-{
- int lumpy_stall_priority;
-
- /* kswapd should not stall on sync IO */
- if (current_is_kswapd())
- return false;
-
- /* Only stall on lumpy reclaim */
- if (sc->reclaim_mode & RECLAIM_MODE_SINGLE)
- return false;
-
- /* If we have reclaimed everything on the isolated list, no stall */
- if (nr_freed == nr_taken)
- return false;
-
- /*
- * For high-order allocations, there are two stall thresholds.
- * High-cost allocations stall immediately where as lower
- * order allocations such as stacks require the scanning
- * priority to be much higher before stalling.
- */
- if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
- lumpy_stall_priority = DEF_PRIORITY;
- else
- lumpy_stall_priority = DEF_PRIORITY / 3;
-
- return priority <= lumpy_stall_priority;
-}
-
/*
* shrink_inactive_list() is a helper for shrink_zone(). It returns the number
* of reclaimed pages
*/
static noinline_for_stack unsigned long
shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
- struct scan_control *sc, int priority, int file)
+ struct scan_control *sc, enum lru_list lru)
{
LIST_HEAD(page_list);
unsigned long nr_scanned;
unsigned long nr_reclaimed = 0;
unsigned long nr_taken;
- unsigned long nr_anon;
- unsigned long nr_file;
unsigned long nr_dirty = 0;
unsigned long nr_writeback = 0;
- isolate_mode_t isolate_mode = ISOLATE_INACTIVE;
+ isolate_mode_t isolate_mode = 0;
+ int file = is_file_lru(lru);
struct zone *zone = mz->zone;
struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz);
+ struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, mz->mem_cgroup);
while (unlikely(too_many_isolated(zone, file, sc))) {
congestion_wait(BLK_RW_ASYNC, HZ/10);
@@ -1522,10 +1267,6 @@
return SWAP_CLUSTER_MAX;
}
- set_reclaim_mode(priority, sc, false);
- if (sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM)
- isolate_mode |= ISOLATE_ACTIVE;
-
lru_add_drain();
if (!sc->may_unmap)
@@ -1535,8 +1276,12 @@
spin_lock_irq(&zone->lru_lock);
- nr_taken = isolate_lru_pages(nr_to_scan, mz, &page_list, &nr_scanned,
- sc, isolate_mode, 0, file);
+ nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &page_list,
+ &nr_scanned, sc, isolate_mode, lru);
+
+ __mod_zone_page_state(zone, NR_LRU_BASE + lru, -nr_taken);
+ __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken);
+
if (global_reclaim(sc)) {
zone->pages_scanned += nr_scanned;
if (current_is_kswapd())
@@ -1551,22 +1296,12 @@
if (nr_taken == 0)
return 0;
- update_isolated_counts(mz, &page_list, &nr_anon, &nr_file);
-
- nr_reclaimed = shrink_page_list(&page_list, mz, sc, priority,
- &nr_dirty, &nr_writeback);
-
- /* Check if we should syncronously wait for writeback */
- if (should_reclaim_stall(nr_taken, nr_reclaimed, priority, sc)) {
- set_reclaim_mode(priority, sc, true);
- nr_reclaimed += shrink_page_list(&page_list, mz, sc,
- priority, &nr_dirty, &nr_writeback);
- }
+ nr_reclaimed = shrink_page_list(&page_list, zone, sc, TTU_UNMAP,
+ &nr_dirty, &nr_writeback, false);
spin_lock_irq(&zone->lru_lock);
- reclaim_stat->recent_scanned[0] += nr_anon;
- reclaim_stat->recent_scanned[1] += nr_file;
+ reclaim_stat->recent_scanned[file] += nr_taken;
if (global_reclaim(sc)) {
if (current_is_kswapd())
@@ -1579,8 +1314,7 @@
putback_inactive_pages(mz, &page_list);
- __mod_zone_page_state(zone, NR_ISOLATED_ANON, -nr_anon);
- __mod_zone_page_state(zone, NR_ISOLATED_FILE, -nr_file);
+ __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
spin_unlock_irq(&zone->lru_lock);
@@ -1609,14 +1343,15 @@
* DEF_PRIORITY-6 For SWAP_CLUSTER_MAX isolated pages, throttle if any
* isolated page is PageWriteback
*/
- if (nr_writeback && nr_writeback >= (nr_taken >> (DEF_PRIORITY-priority)))
+ if (nr_writeback && nr_writeback >=
+ (nr_taken >> (DEF_PRIORITY - sc->priority)))
wait_iff_congested(zone, BLK_RW_ASYNC, HZ/10);
trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id,
zone_idx(zone),
nr_scanned, nr_reclaimed,
- priority,
- trace_shrink_flags(file, sc->reclaim_mode));
+ sc->priority,
+ trace_shrink_flags(file));
return nr_reclaimed;
}
@@ -1679,7 +1414,7 @@
static void shrink_active_list(unsigned long nr_to_scan,
struct mem_cgroup_zone *mz,
struct scan_control *sc,
- int priority, int file)
+ enum lru_list lru)
{
unsigned long nr_taken;
unsigned long nr_scanned;
@@ -1690,13 +1425,13 @@
struct page *page;
struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz);
unsigned long nr_rotated = 0;
- isolate_mode_t isolate_mode = ISOLATE_ACTIVE;
+ isolate_mode_t isolate_mode = 0;
+ int file = is_file_lru(lru);
struct zone *zone = mz->zone;
+ struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, mz->mem_cgroup);
lru_add_drain();
- reset_reclaim_mode(sc);
-
if (!sc->may_unmap)
isolate_mode |= ISOLATE_UNMAPPED;
if (!sc->may_writepage)
@@ -1704,18 +1439,15 @@
spin_lock_irq(&zone->lru_lock);
- nr_taken = isolate_lru_pages(nr_to_scan, mz, &l_hold, &nr_scanned, sc,
- isolate_mode, 1, file);
+ nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &l_hold,
+ &nr_scanned, sc, isolate_mode, lru);
if (global_reclaim(sc))
zone->pages_scanned += nr_scanned;
reclaim_stat->recent_scanned[file] += nr_taken;
__count_zone_vm_events(PGREFILL, zone, nr_scanned);
- if (file)
- __mod_zone_page_state(zone, NR_ACTIVE_FILE, -nr_taken);
- else
- __mod_zone_page_state(zone, NR_ACTIVE_ANON, -nr_taken);
+ __mod_zone_page_state(zone, NR_LRU_BASE + lru, -nr_taken);
__mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken);
spin_unlock_irq(&zone->lru_lock);
@@ -1737,7 +1469,8 @@
}
}
- if (page_referenced(page, 0, mz->mem_cgroup, &vm_flags)) {
+ if (page_referenced(page, 0, sc->target_mem_cgroup,
+ &vm_flags)) {
nr_rotated += hpage_nr_pages(page);
/*
* Identify referenced, file-backed active pages and
@@ -1770,10 +1503,8 @@
*/
reclaim_stat->recent_rotated[file] += nr_rotated;
- move_active_pages_to_lru(zone, &l_active, &l_hold,
- LRU_ACTIVE + file * LRU_FILE);
- move_active_pages_to_lru(zone, &l_inactive, &l_hold,
- LRU_BASE + file * LRU_FILE);
+ move_active_pages_to_lru(zone, &l_active, &l_hold, lru);
+ move_active_pages_to_lru(zone, &l_inactive, &l_hold, lru - LRU_ACTIVE);
__mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
spin_unlock_irq(&zone->lru_lock);
@@ -1811,7 +1542,7 @@
if (!total_swap_pages)
return 0;
- if (!scanning_global_lru(mz))
+ if (!mem_cgroup_disabled())
return mem_cgroup_inactive_anon_is_low(mz->mem_cgroup,
mz->zone);
@@ -1850,7 +1581,7 @@
*/
static int inactive_file_is_low(struct mem_cgroup_zone *mz)
{
- if (!scanning_global_lru(mz))
+ if (!mem_cgroup_disabled())
return mem_cgroup_inactive_file_is_low(mz->mem_cgroup,
mz->zone);
@@ -1867,25 +1598,24 @@
static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
struct mem_cgroup_zone *mz,
- struct scan_control *sc, int priority)
+ struct scan_control *sc)
{
int file = is_file_lru(lru);
if (is_active_lru(lru)) {
if (inactive_list_is_low(mz, file))
- shrink_active_list(nr_to_scan, mz, sc, priority, file);
+ shrink_active_list(nr_to_scan, mz, sc, lru);
return 0;
}
- return shrink_inactive_list(nr_to_scan, mz, sc, priority, file);
+ return shrink_inactive_list(nr_to_scan, mz, sc, lru);
}
-static int vmscan_swappiness(struct mem_cgroup_zone *mz,
- struct scan_control *sc)
+static int vmscan_swappiness(struct scan_control *sc)
{
if (global_reclaim(sc))
return vm_swappiness;
- return mem_cgroup_swappiness(mz->mem_cgroup);
+ return mem_cgroup_swappiness(sc->target_mem_cgroup);
}
/*
@@ -1897,7 +1627,7 @@
* nr[0] = anon pages to scan; nr[1] = file pages to scan
*/
static void get_scan_count(struct mem_cgroup_zone *mz, struct scan_control *sc,
- unsigned long *nr, int priority)
+ unsigned long *nr)
{
unsigned long anon, file, free;
unsigned long anon_prio, file_prio;
@@ -1953,8 +1683,8 @@
* With swappiness at 100, anonymous and file have the same priority.
* This scanning priority is essentially the inverse of IO cost.
*/
- anon_prio = vmscan_swappiness(mz, sc);
- file_prio = 200 - vmscan_swappiness(mz, sc);
+ anon_prio = vmscan_swappiness(sc);
+ file_prio = 200 - vmscan_swappiness(sc);
/*
* OK, so we have swap space and a fair amount of page cache
@@ -1983,10 +1713,10 @@
* proportional to the fraction of recently scanned pages on
* each list that were recently referenced and in active use.
*/
- ap = (anon_prio + 1) * (reclaim_stat->recent_scanned[0] + 1);
+ ap = anon_prio * (reclaim_stat->recent_scanned[0] + 1);
ap /= reclaim_stat->recent_rotated[0] + 1;
- fp = (file_prio + 1) * (reclaim_stat->recent_scanned[1] + 1);
+ fp = file_prio * (reclaim_stat->recent_scanned[1] + 1);
fp /= reclaim_stat->recent_rotated[1] + 1;
spin_unlock_irq(&mz->zone->lru_lock);
@@ -1999,8 +1729,8 @@
unsigned long scan;
scan = zone_nr_lru_pages(mz, lru);
- if (priority || noswap) {
- scan >>= priority;
+ if (sc->priority || noswap || !vmscan_swappiness(sc)) {
+ scan >>= sc->priority;
if (!scan && force_scan)
scan = SWAP_CLUSTER_MAX;
scan = div64_u64(scan * fraction[file], denominator);
@@ -2009,12 +1739,23 @@
}
}
+/* Use reclaim/compaction for costly allocs or under memory pressure */
+static bool in_reclaim_compaction(struct scan_control *sc)
+{
+ if (COMPACTION_BUILD && sc->order &&
+ (sc->order > PAGE_ALLOC_COSTLY_ORDER ||
+ sc->priority < DEF_PRIORITY - 2))
+ return true;
+
+ return false;
+}
+
/*
- * Reclaim/compaction depends on a number of pages being freed. To avoid
- * disruption to the system, a small number of order-0 pages continue to be
- * rotated and reclaimed in the normal fashion. However, by the time we get
- * back to the allocator and call try_to_compact_zone(), we ensure that
- * there are enough free pages for it to be likely successful
+ * Reclaim/compaction is used for high-order allocation requests. It reclaims
+ * order-0 pages before compacting the zone. should_continue_reclaim() returns
+ * true if more pages should be reclaimed such that when the page allocator
+ * calls try_to_compact_zone() that it will have enough free pages to succeed.
+ * It will give up earlier than that if there is difficulty reclaiming pages.
*/
static inline bool should_continue_reclaim(struct mem_cgroup_zone *mz,
unsigned long nr_reclaimed,
@@ -2025,7 +1766,7 @@
unsigned long inactive_lru_pages;
/* If not in reclaim/compaction mode, stop */
- if (!(sc->reclaim_mode & RECLAIM_MODE_COMPACTION))
+ if (!in_reclaim_compaction(sc))
return false;
/* Consider stopping depending on scan and reclaim activity */
@@ -2076,7 +1817,7 @@
/*
* This is a basic per-zone page freer. Used by both kswapd and direct reclaim.
*/
-static void shrink_mem_cgroup_zone(int priority, struct mem_cgroup_zone *mz,
+static void shrink_mem_cgroup_zone(struct mem_cgroup_zone *mz,
struct scan_control *sc)
{
unsigned long nr[NR_LRU_LISTS];
@@ -2089,7 +1830,7 @@
restart:
nr_reclaimed = 0;
nr_scanned = sc->nr_scanned;
- get_scan_count(mz, sc, nr, priority);
+ get_scan_count(mz, sc, nr);
blk_start_plug(&plug);
while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
@@ -2101,7 +1842,7 @@
nr[lru] -= nr_to_scan;
nr_reclaimed += shrink_list(lru, nr_to_scan,
- mz, sc, priority);
+ mz, sc);
}
}
/*
@@ -2112,7 +1853,8 @@
* with multiple processes reclaiming pages, the total
* freeing target can get unreasonably large.
*/
- if (nr_reclaimed >= nr_to_reclaim && priority < DEF_PRIORITY)
+ if (nr_reclaimed >= nr_to_reclaim &&
+ sc->priority < DEF_PRIORITY)
break;
}
blk_finish_plug(&plug);
@@ -2123,23 +1865,23 @@
* rebalance the anon lru active/inactive ratio.
*/
if (inactive_anon_is_low(mz))
- shrink_active_list(SWAP_CLUSTER_MAX, mz, sc, priority, 0);
+ shrink_active_list(SWAP_CLUSTER_MAX, mz,
+ sc, LRU_ACTIVE_ANON);
/* reclaim/compaction might need reclaim to continue */
if (should_continue_reclaim(mz, nr_reclaimed,
- sc->nr_scanned - nr_scanned, sc))
+ sc->nr_scanned - nr_scanned, sc))
goto restart;
throttle_vm_writeout(sc->gfp_mask);
}
-static void shrink_zone(int priority, struct zone *zone,
- struct scan_control *sc)
+static void shrink_zone(struct zone *zone, struct scan_control *sc)
{
struct mem_cgroup *root = sc->target_mem_cgroup;
struct mem_cgroup_reclaim_cookie reclaim = {
.zone = zone,
- .priority = priority,
+ .priority = sc->priority,
};
struct mem_cgroup *memcg;
@@ -2150,7 +1892,7 @@
.zone = zone,
};
- shrink_mem_cgroup_zone(priority, &mz, sc);
+ shrink_mem_cgroup_zone(&mz, sc);
/*
* Limit reclaim has historically picked one memcg and
* scanned it with decreasing priority levels until
@@ -2226,8 +1968,7 @@
* the caller that it should consider retrying the allocation instead of
* further reclaim.
*/
-static bool shrink_zones(int priority, struct zonelist *zonelist,
- struct scan_control *sc)
+static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
{
struct zoneref *z;
struct zone *zone;
@@ -2254,7 +1995,8 @@
if (global_reclaim(sc)) {
if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
continue;
- if (zone->all_unreclaimable && priority != DEF_PRIORITY)
+ if (zone->all_unreclaimable &&
+ sc->priority != DEF_PRIORITY)
continue; /* Let kswapd poll it */
if (COMPACTION_BUILD) {
/*
@@ -2286,7 +2028,7 @@
/* need some check for avoid more shrink_zone() */
}
- shrink_zone(priority, zone, sc);
+ shrink_zone(zone, sc);
}
return aborted_reclaim;
@@ -2337,7 +2079,6 @@
struct scan_control *sc,
struct shrink_control *shrink)
{
- int priority;
unsigned long total_scanned = 0;
struct reclaim_state *reclaim_state = current->reclaim_state;
struct zoneref *z;
@@ -2350,11 +2091,9 @@
if (global_reclaim(sc))
count_vm_event(ALLOCSTALL);
- for (priority = DEF_PRIORITY; priority >= 0; priority--) {
+ do {
sc->nr_scanned = 0;
- if (!priority)
- disable_swap_token(sc->target_mem_cgroup);
- aborted_reclaim = shrink_zones(priority, zonelist, sc);
+ aborted_reclaim = shrink_zones(zonelist, sc);
/*
* Don't shrink slabs when reclaiming memory from
@@ -2396,7 +2135,7 @@
/* Take a nap, wait for some writeback to complete */
if (!sc->hibernation_mode && sc->nr_scanned &&
- priority < DEF_PRIORITY - 2) {
+ sc->priority < DEF_PRIORITY - 2) {
struct zone *preferred_zone;
first_zones_zonelist(zonelist, gfp_zone(sc->gfp_mask),
@@ -2404,7 +2143,7 @@
&preferred_zone);
wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/10);
}
- }
+ } while (--sc->priority >= 0);
out:
delayacct_freepages_end();
@@ -2442,6 +2181,7 @@
.may_unmap = 1,
.may_swap = 1,
.order = order,
+ .priority = DEF_PRIORITY,
.target_mem_cgroup = NULL,
.nodemask = nodemask,
};
@@ -2474,6 +2214,7 @@
.may_unmap = 1,
.may_swap = !noswap,
.order = 0,
+ .priority = 0,
.target_mem_cgroup = memcg,
};
struct mem_cgroup_zone mz = {
@@ -2484,7 +2225,7 @@
sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
(GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
- trace_mm_vmscan_memcg_softlimit_reclaim_begin(0,
+ trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.order,
sc.may_writepage,
sc.gfp_mask);
@@ -2495,7 +2236,7 @@
* will pick up pages from other mem cgroup's as well. We hack
* the priority and make it zero.
*/
- shrink_mem_cgroup_zone(0, &mz, &sc);
+ shrink_mem_cgroup_zone(&mz, &sc);
trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
@@ -2516,6 +2257,7 @@
.may_swap = !noswap,
.nr_to_reclaim = SWAP_CLUSTER_MAX,
.order = 0,
+ .priority = DEF_PRIORITY,
.target_mem_cgroup = memcg,
.nodemask = NULL, /* we don't care the placement */
.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
@@ -2546,8 +2288,7 @@
}
#endif
-static void age_active_anon(struct zone *zone, struct scan_control *sc,
- int priority)
+static void age_active_anon(struct zone *zone, struct scan_control *sc)
{
struct mem_cgroup *memcg;
@@ -2563,7 +2304,7 @@
if (inactive_anon_is_low(&mz))
shrink_active_list(SWAP_CLUSTER_MAX, &mz,
- sc, priority, 0);
+ sc, LRU_ACTIVE_ANON);
memcg = mem_cgroup_iter(NULL, memcg, NULL);
} while (memcg);
@@ -2672,7 +2413,6 @@
{
int all_zones_ok;
unsigned long balanced;
- int priority;
int i;
int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
unsigned long total_scanned;
@@ -2696,18 +2436,15 @@
};
loop_again:
total_scanned = 0;
+ sc.priority = DEF_PRIORITY;
sc.nr_reclaimed = 0;
sc.may_writepage = !laptop_mode;
count_vm_event(PAGEOUTRUN);
- for (priority = DEF_PRIORITY; priority >= 0; priority--) {
+ do {
unsigned long lru_pages = 0;
int has_under_min_watermark_zone = 0;
- /* The swap token gets in the way of swapout... */
- if (!priority)
- disable_swap_token(NULL);
-
all_zones_ok = 1;
balanced = 0;
@@ -2721,14 +2458,15 @@
if (!populated_zone(zone))
continue;
- if (zone->all_unreclaimable && priority != DEF_PRIORITY)
+ if (zone->all_unreclaimable &&
+ sc.priority != DEF_PRIORITY)
continue;
/*
* Do some background aging of the anon list, to give
* pages a chance to be referenced before reclaiming.
*/
- age_active_anon(zone, &sc, priority);
+ age_active_anon(zone, &sc);
/*
* If the number of buffer_heads in the machine
@@ -2776,7 +2514,8 @@
if (!populated_zone(zone))
continue;
- if (zone->all_unreclaimable && priority != DEF_PRIORITY)
+ if (zone->all_unreclaimable &&
+ sc.priority != DEF_PRIORITY)
continue;
sc.nr_scanned = 0;
@@ -2820,7 +2559,7 @@
!zone_watermark_ok_safe(zone, testorder,
high_wmark_pages(zone) + balance_gap,
end_zone, 0)) {
- shrink_zone(priority, zone, &sc);
+ shrink_zone(zone, &sc);
reclaim_state->reclaimed_slab = 0;
nr_slab = shrink_slab(&shrink, sc.nr_scanned, lru_pages);
@@ -2877,7 +2616,7 @@
* OK, kswapd is getting into trouble. Take a nap, then take
* another pass across the zones.
*/
- if (total_scanned && (priority < DEF_PRIORITY - 2)) {
+ if (total_scanned && (sc.priority < DEF_PRIORITY - 2)) {
if (has_under_min_watermark_zone)
count_vm_event(KSWAPD_SKIP_CONGESTION_WAIT);
else
@@ -2892,7 +2631,7 @@
*/
if (sc.nr_reclaimed >= SWAP_CLUSTER_MAX)
break;
- }
+ } while (--sc.priority >= 0);
out:
/*
@@ -2942,7 +2681,8 @@
if (!populated_zone(zone))
continue;
- if (zone->all_unreclaimable && priority != DEF_PRIORITY)
+ if (zone->all_unreclaimable &&
+ sc.priority != DEF_PRIORITY)
continue;
/* Would compaction fail due to lack of free memory? */
@@ -3013,7 +2753,18 @@
* them before going back to sleep.
*/
set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
- schedule();
+
+ /*
+ * Compaction records what page blocks it recently failed to
+ * isolate pages from and skips them in the future scanning.
+ * When kswapd is going to sleep, it is reasonable to assume
+ * that pages and compaction may succeed so reset the cache.
+ */
+ reset_isolation_suitable(pgdat);
+
+ if (!kthread_should_stop())
+ schedule();
+
set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold);
} else {
if (remaining)
@@ -3209,6 +2960,7 @@
.nr_to_reclaim = nr_to_reclaim,
.hibernation_mode = 1,
.order = 0,
+ .priority = DEF_PRIORITY,
};
struct shrink_control shrink = {
.gfp_mask = sc.gfp_mask,
@@ -3386,7 +3138,6 @@
const unsigned long nr_pages = 1 << order;
struct task_struct *p = current;
struct reclaim_state reclaim_state;
- int priority;
struct scan_control sc = {
.may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
.may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP),
@@ -3395,6 +3146,7 @@
SWAP_CLUSTER_MAX),
.gfp_mask = gfp_mask,
.order = order,
+ .priority = ZONE_RECLAIM_PRIORITY,
};
struct shrink_control shrink = {
.gfp_mask = sc.gfp_mask,
@@ -3417,11 +3169,9 @@
* Free memory by calling shrink zone with increasing
* priorities until we have enough memory freed.
*/
- priority = ZONE_RECLAIM_PRIORITY;
do {
- shrink_zone(priority, zone, &sc);
- priority--;
- } while (priority >= 0 && sc.nr_reclaimed < nr_pages);
+ shrink_zone(zone, &sc);
+ } while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0);
}
nr_slab_pages0 = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 8e18d6b..959a558 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -761,10 +761,14 @@
"pgrotated",
+#ifdef CONFIG_MIGRATION
+ "pgmigrate_success",
+ "pgmigrate_fail",
+#endif
#ifdef CONFIG_COMPACTION
- "compact_blocks_moved",
- "compact_pages_moved",
- "compact_pagemigrate_failed",
+ "compact_migrate_scanned",
+ "compact_free_scanned",
+ "compact_isolated",
"compact_stall",
"compact_fail",
"compact_success",
diff --git a/sound/soc/codecs/wcd9306.c b/sound/soc/codecs/wcd9306.c
index 09f2a51..b31c7c9 100644
--- a/sound/soc/codecs/wcd9306.c
+++ b/sound/soc/codecs/wcd9306.c
@@ -306,18 +306,18 @@
return 0;
}
- WCD9XXX_BCL_LOCK(&priv->resmgr);
+ codec = priv->codec;
+ mutex_lock(&codec->mutex);
old = spkr_drv_wrnd;
ret = param_set_int(val, kp);
if (ret) {
- WCD9XXX_BCL_UNLOCK(&priv->resmgr);
+ mutex_unlock(&codec->mutex);
return ret;
}
- codec = priv->codec;
dev_dbg(codec->dev, "%s: spkr_drv_wrnd %d -> %d\n",
__func__, old, spkr_drv_wrnd);
- if (old == 0 && spkr_drv_wrnd == 1) {
+ if ((old == -1 || old == 0) && spkr_drv_wrnd == 1) {
WCD9XXX_BG_CLK_LOCK(&priv->resmgr);
wcd9xxx_resmgr_get_bandgap(&priv->resmgr,
WCD9XXX_BANDGAP_AUDIO_MODE);
@@ -332,8 +332,8 @@
snd_soc_update_bits(codec, TAPAN_A_SPKR_DRV_EN, 0x80,
0x00);
}
+ mutex_unlock(&codec->mutex);
- WCD9XXX_BCL_UNLOCK(&priv->resmgr);
return 0;
}
@@ -1701,7 +1701,6 @@
struct tapan_priv *tapan = snd_soc_codec_get_drvdata(codec);
dev_dbg(codec->dev, "%s: %s %d\n", __func__, w->name, event);
- WCD9XXX_BCL_LOCK(&tapan->resmgr);
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
tapan->spkr_pa_widget_on = true;
@@ -1712,7 +1711,6 @@
snd_soc_update_bits(codec, TAPAN_A_SPKR_DRV_EN, 0x80, 0x00);
break;
}
- WCD9XXX_BCL_UNLOCK(&tapan->resmgr);
return 0;
}
@@ -4537,7 +4535,6 @@
codec = (struct snd_soc_codec *)(wcd9xxx->ssr_priv);
tapan = snd_soc_codec_get_drvdata(codec);
mutex_lock(&codec->mutex);
- WCD9XXX_BCL_LOCK(&tapan->resmgr);
if (codec->reg_def_copy) {
pr_debug("%s: Update ASOC cache", __func__);
@@ -4546,7 +4543,6 @@
codec->reg_size, GFP_KERNEL);
if (!codec->reg_cache) {
pr_err("%s: Cache update failed!\n", __func__);
- WCD9XXX_BCL_UNLOCK(&tapan->resmgr);
mutex_unlock(&codec->mutex);
return -ENOMEM;
}
@@ -4555,7 +4551,6 @@
wcd9xxx_resmgr_post_ssr(&tapan->resmgr);
if (spkr_drv_wrnd == 1)
snd_soc_update_bits(codec, TAPAN_A_SPKR_DRV_EN, 0x80, 0x80);
- WCD9XXX_BCL_UNLOCK(&tapan->resmgr);
tapan_update_reg_defaults(codec);
tapan_update_reg_mclk_rate(wcd9xxx);
diff --git a/sound/soc/codecs/wcd9320.c b/sound/soc/codecs/wcd9320.c
index 2f3ad73..516ac4f 100644
--- a/sound/soc/codecs/wcd9320.c
+++ b/sound/soc/codecs/wcd9320.c
@@ -604,18 +604,17 @@
return 0;
}
- WCD9XXX_BCL_LOCK(&priv->resmgr);
+ codec = priv->codec;
+ mutex_lock(&codec->mutex);
old = spkr_drv_wrnd;
ret = param_set_int(val, kp);
if (ret) {
- WCD9XXX_BCL_UNLOCK(&priv->resmgr);
+ mutex_unlock(&codec->mutex);
return ret;
}
- WCD9XXX_BCL_UNLOCK(&priv->resmgr);
pr_debug("%s: spkr_drv_wrnd %d -> %d\n", __func__, old, spkr_drv_wrnd);
- codec = priv->codec;
- if (old == 0 && spkr_drv_wrnd == 1) {
+ if ((old == -1 || old == 0) && spkr_drv_wrnd == 1) {
WCD9XXX_BG_CLK_LOCK(&priv->resmgr);
wcd9xxx_resmgr_get_bandgap(&priv->resmgr,
WCD9XXX_BANDGAP_AUDIO_MODE);
@@ -630,6 +629,7 @@
snd_soc_update_bits(codec, TAIKO_A_SPKR_DRV_EN, 0x80,
0x00);
}
+ mutex_unlock(&codec->mutex);
return 0;
}
@@ -2420,7 +2420,6 @@
struct taiko_priv *taiko = snd_soc_codec_get_drvdata(codec);
pr_debug("%s: %d %s\n", __func__, event, w->name);
- WCD9XXX_BCL_LOCK(&taiko->resmgr);
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
taiko->spkr_pa_widget_on = true;
@@ -2431,7 +2430,6 @@
snd_soc_update_bits(codec, TAIKO_A_SPKR_DRV_EN, 0x80, 0x00);
break;
}
- WCD9XXX_BCL_UNLOCK(&taiko->resmgr);
return 0;
}
@@ -6189,7 +6187,6 @@
codec = (struct snd_soc_codec *)(wcd9xxx->ssr_priv);
taiko = snd_soc_codec_get_drvdata(codec);
mutex_lock(&codec->mutex);
- WCD9XXX_BCL_LOCK(&taiko->resmgr);
if (codec->reg_def_copy) {
pr_debug("%s: Update ASOC cache", __func__);
@@ -6201,7 +6198,6 @@
wcd9xxx_resmgr_post_ssr(&taiko->resmgr);
if (spkr_drv_wrnd == 1)
snd_soc_update_bits(codec, TAIKO_A_SPKR_DRV_EN, 0x80, 0x80);
- WCD9XXX_BCL_UNLOCK(&taiko->resmgr);
taiko_update_reg_defaults(codec);
taiko_codec_init_reg(codec);
diff --git a/sound/soc/codecs/wcd9xxx-resmgr.c b/sound/soc/codecs/wcd9xxx-resmgr.c
index 84f236e..be11e53 100644
--- a/sound/soc/codecs/wcd9xxx-resmgr.c
+++ b/sound/soc/codecs/wcd9xxx-resmgr.c
@@ -209,8 +209,8 @@
int old_clk_rco_users, old_clk_mclk_users;
pr_debug("%s: enter\n", __func__);
- WCD9XXX_BCL_ASSERT_LOCKED(resmgr);
+ WCD9XXX_BG_CLK_LOCK(resmgr);
old_bg_audio_users = resmgr->bg_audio_users;
old_bg_mbhc_users = resmgr->bg_mbhc_users;
old_clk_rco_users = resmgr->clk_rco_users;
@@ -243,6 +243,7 @@
while (old_clk_rco_users--)
wcd9xxx_resmgr_get_clk_block(resmgr, WCD9XXX_CLK_RCO);
}
+ WCD9XXX_BG_CLK_UNLOCK(resmgr);
pr_debug("%s: leave\n", __func__);
}