Merge "input: touchscreen: power off touch panel during suspend"
diff --git a/Documentation/devicetree/bindings/nfc/nfc-nci.txt b/Documentation/devicetree/bindings/nfc/nfc-nci.txt
index cdd1e68..5c53470 100644
--- a/Documentation/devicetree/bindings/nfc/nfc-nci.txt
+++ b/Documentation/devicetree/bindings/nfc/nfc-nci.txt
@@ -10,23 +10,26 @@
- qcom,irq-gpio: specific gpio for read interrupt.
- qcom,clk-src: nfc clock source ("BBCLK2", "RFCLK3", "GPCLK", ...)
- qcom,clk-en-gpio: msm gpio clock,used ony if clock source is msm gpio
+- vlogic-supply: LDO for power supply
- interrupt-parent: Should be phandle for the interrupt controller
that services interrupts for this device.
- interrupts: should contain the NFC interrupt. NFC has one read interrupt.
- qcom,clk-gpio: pmic gpio on which bbclk2 signal is coming.
-Example:
+LDO example:
- i2c@f9925000 { /* BLSP1 QUP3 */
- nfc-nci@0e {
- compatible = "qcom,nfc-nci";
- reg = <0x0e>;
- qcom,irq-gpio = <&msmgpio 21 0x00>;
- qcom,dis-gpio = <&msmgpio 20 0x00>;
- qcom,clk-src = "BBCLK2";
- qcom,clk-en-gpio = <&msmgpio 0 0x00>;
- interrupt-parent = <&msmgpio>;
- interrupts = <21 0>;
- qcom,clk-gpio = <&pm8226_gpios 3 0>;
- };
+ i2c@f9925000 { /* BLSP-1 QUP-3 */
+ nfc-nci@e {
+ compatible = "qcom,nfc-nci";
+ reg = <0x0e>;
+ qcom,irq-gpio = <&msmgpio 77 0x00>;
+ qcom,dis-gpio = <&msmgpio 93 0x00>;
+ qcom,clk-en-gpio = <&msmgpio 78 0x00>;
+ qcom,clk-src = "GPCLK";
+ interrupt-parent = <&msmgpio>;
+ interrupts = <77 0>;
+ qcom,clk-gpio = <&msmgpio 75 0x00>;
+ vlogic-supply = <&pm8110_l14>;
+ };
};
+
diff --git a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
index 5e686a4f..56bdc59 100644
--- a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
+++ b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
@@ -407,6 +407,17 @@
Required properties:
- compatible : "qcom,msm8974-audio-taiko"
- qcom,model : The user-visible name of this sound card.
+- reg : Offset and length of the register region(s) for MI2S/PCM MUX
+- reg-names : Register region name(s) referenced in reg above
+ Required register resource entries are:
+ "lpaif_pri_mode_muxsel": Physical address of MUX to select between
+ Primary PCM and Primary MI2S
+ "lpaif_sec_mode_muxsel": Physical address of MUX to select between
+ Secondary PCM and Secondary MI2S
+ "lpaif_tert_mode_muxsel": Physical address of MUX to select between
+ Primary PCM and Tertiary MI2S
+ "lpaif_quat_mode_muxsel": Physical address of MUX to select between
+ Secondary PCM and Quarternary MI2S
- qcom,audio-routing : A list of the connections between audio components.
Each entry is a pair of strings, the first being the connection's sink,
the second being the connection's source.
diff --git a/arch/arm/boot/dts/dsi-panel-hx8379a-wvga-video.dtsi b/arch/arm/boot/dts/dsi-panel-hx8379a-wvga-video.dtsi
index 23b65f3..92e6fc1 100644
--- a/arch/arm/boot/dts/dsi-panel-hx8379a-wvga-video.dtsi
+++ b/arch/arm/boot/dts/dsi-panel-hx8379a-wvga-video.dtsi
@@ -81,14 +81,47 @@
00 00 00 00
39 01 00 00 00 00 24
E0 79 05 0F
- 14 26 20 3F
- 2A 43 04 0C
- 11 15 17 15
- 15 10 13 05
- 0F 14 26 20
- 3F 2A 43 04
- 0C 11 15 17
- 15 15 10 13
+ 14 23 24 3F
+ 30 46 06 10
+ 13 16 17 16
+ 16 13 18 05
+ 0F 14 23 24
+ 3F 30 46 06
+ 10 13 16 17
+ 16 16 13 18
+ 39 01 00 00 00 00 80
+ C1 01 00 07
+ 10 17 1D 2A
+ 33 3A 43 4A
+ 52 5B 64 6D
+ 78 7F 88 90
+ 98 A0 A9 B2
+ B9 C1 C9 D1
+ D7 DF E6 ED
+ F4 FA FD 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 08 10 18
+ 20 28 30 38
+ 40 47 4F 58
+ 60 68 70 78
+ 80 88 90 98
+ A0 A9 B1 B9
+ C1 C9 D1 D8
+ E0 E8 F0 F9
+ FE 00 00 00
+ 00 00 00 00
+ 00 00 00 08
+ 10 18 1E 26
+ 2E 34 3A 41
+ 49 4F 58 5E
+ 67 6F 77 80
+ 88 8F 97 9F
+ A7 AF B8 BF
+ C7 D1 D8 E3
+ EA F6 FF 00
+ 00 00 00 00
+ 00 00 00 00
23 01 00 00 00 00 02
cc 02
39 01 00 00 00 00 05
diff --git a/arch/arm/boot/dts/msm-pm8226.dtsi b/arch/arm/boot/dts/msm-pm8226.dtsi
index f65e282..b9bcd0c 100644
--- a/arch/arm/boot/dts/msm-pm8226.dtsi
+++ b/arch/arm/boot/dts/msm-pm8226.dtsi
@@ -433,6 +433,16 @@
qcom,hw-settle-time = <0>;
qcom,fast-avg-setup = <0>;
};
+ chan@1 {
+ label = "external_rsense";
+ reg = <1>;
+ qcom,decimation = <0>;
+ qcom,pre-div-channel-scaling = <1>;
+ qcom,calibration-type = "absolute";
+ qcom,scale-function = <0>;
+ qcom,hw-settle-time = <0>;
+ qcom,fast-avg-setup = <0>;
+ };
};
pm8226_adc_tm: vadc@3400 {
diff --git a/arch/arm/boot/dts/msm8226-bus.dtsi b/arch/arm/boot/dts/msm8226-bus.dtsi
index d87aa3e..74b4a30 100644
--- a/arch/arm/boot/dts/msm8226-bus.dtsi
+++ b/arch/arm/boot/dts/msm8226-bus.dtsi
@@ -78,7 +78,7 @@
mas-vfe {
cell-id = <29>;
label = "mas-vfe";
- qcom,masterp = <16>;
+ qcom,masterp = <7>;
qcom,tier = <2>;
qcom,hw-sel = "NoC";
qcom,perm-mode = "Bypass";
diff --git a/arch/arm/boot/dts/msm8226-regulator.dtsi b/arch/arm/boot/dts/msm8226-regulator.dtsi
index 23a2158..9764982 100644
--- a/arch/arm/boot/dts/msm8226-regulator.dtsi
+++ b/arch/arm/boot/dts/msm8226-regulator.dtsi
@@ -336,7 +336,7 @@
pm8226_l16: regulator-l16 {
regulator-name = "8226_l16";
regulator-min-microvolt = <3000000>;
- regulator-max-microvolt = <3300000>;
+ regulator-max-microvolt = <3350000>;
qcom,init-voltage = <3300000>;
status = "okay";
};
@@ -423,7 +423,7 @@
pm8226_l24: regulator-l24 {
regulator-name = "8226_l24";
regulator-min-microvolt = <1300000>;
- regulator-max-microvolt = <1300000>;
+ regulator-max-microvolt = <1350000>;
qcom,init-voltage = <1300000>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/msm8226.dtsi b/arch/arm/boot/dts/msm8226.dtsi
index 151b508..abe5ff3 100644
--- a/arch/arm/boot/dts/msm8226.dtsi
+++ b/arch/arm/boot/dts/msm8226.dtsi
@@ -183,8 +183,8 @@
<268000 348000>,
<505000 657000>;
qcom,buffer-type-tz-usage-table = <0x1 0x1>,
- <0x2 0x2>,
- <0x1f0 0x3>;
+ <0x6 0x2>,
+ <0x7C0 0x3>;
qcom,max-hw-load = <352800>; /* 720p @ 30 + 1080p @ 30 */
qcom,vidc-iommu-domains {
qcom,domain-ns {
diff --git a/arch/arm/boot/dts/msm8610-cdp.dtsi b/arch/arm/boot/dts/msm8610-cdp.dtsi
index 9920f77..d63c6e5 100644
--- a/arch/arm/boot/dts/msm8610-cdp.dtsi
+++ b/arch/arm/boot/dts/msm8610-cdp.dtsi
@@ -20,7 +20,7 @@
compatible = "atmel,mxt-ts";
reg = <0x4a>;
interrupt-parent = <&msmgpio>;
- interrupts = <1 0x2>;
+ interrupts = <1 0x2002>;
vdd_ana-supply = <&pm8110_l19>;
vcc_i2c-supply = <&pm8110_l14>;
atmel,reset-gpio = <&msmgpio 0 0x00>;
@@ -76,6 +76,21 @@
];
};
};
+
+ synaptics@20 {
+ compatible = "synaptics,rmi4";
+ reg = <0x20>;
+ interrupt-parent = <&msmgpio>;
+ interrupts = <1 0x2002>;
+ vdd-supply = <&pm8110_l19>;
+ vcc_i2c-supply = <&pm8110_l14>;
+ synaptics,reset-gpio = <&msmgpio 0 0x00>;
+ synaptics,irq-gpio = <&msmgpio 1 0x2008>;
+ synaptics,button-map = <139 102 158>;
+ synaptics,i2c-pull-up;
+ synaptics,power-down;
+ synaptics,disable-gpios;
+ };
};
gen-vkeys {
diff --git a/arch/arm/boot/dts/msm8610-mtp.dtsi b/arch/arm/boot/dts/msm8610-mtp.dtsi
index 0244b89..6ce0109 100644
--- a/arch/arm/boot/dts/msm8610-mtp.dtsi
+++ b/arch/arm/boot/dts/msm8610-mtp.dtsi
@@ -20,7 +20,7 @@
compatible = "atmel,mxt-ts";
reg = <0x4a>;
interrupt-parent = <&msmgpio>;
- interrupts = <1 0x2>;
+ interrupts = <1 0x2002>;
vdd_ana-supply = <&pm8110_l19>;
vcc_i2c-supply = <&pm8110_l14>;
atmel,reset-gpio = <&msmgpio 0 0x00>;
@@ -76,6 +76,21 @@
];
};
};
+
+ synaptics@20 {
+ compatible = "synaptics,rmi4";
+ reg = <0x20>;
+ interrupt-parent = <&msmgpio>;
+ interrupts = <1 0x2002>;
+ vdd-supply = <&pm8110_l19>;
+ vcc_i2c-supply = <&pm8110_l14>;
+ synaptics,reset-gpio = <&msmgpio 0 0x00>;
+ synaptics,irq-gpio = <&msmgpio 1 0x2008>;
+ synaptics,button-map = <139 102 158>;
+ synaptics,i2c-pull-up;
+ synaptics,power-down;
+ synaptics,disable-gpios;
+ };
};
i2c@f9925000 {
diff --git a/arch/arm/boot/dts/msm8610-qrd-skuaa.dtsi b/arch/arm/boot/dts/msm8610-qrd-skuaa.dtsi
index 86f1210..bb866b2 100644
--- a/arch/arm/boot/dts/msm8610-qrd-skuaa.dtsi
+++ b/arch/arm/boot/dts/msm8610-qrd-skuaa.dtsi
@@ -32,6 +32,21 @@
qcom,hsusb-otg-phy-init-seq =
<0x44 0x80 0x6a 0x81 0x34 0x82 0x13 0x83 0xffffffff>;
};
+
+ i2c@f9925000 { /* BLSP-1 QUP-3 */
+ nfc-nci@e {
+ compatible = "qcom,nfc-nci";
+ reg = <0x0e>;
+ qcom,irq-gpio = <&msmgpio 77 0x00>;
+ qcom,dis-gpio = <&msmgpio 93 0x00>;
+ qcom,clk-en-gpio = <&msmgpio 78 0x00>;
+ qcom,clk-src = "GPCLK";
+ interrupt-parent = <&msmgpio>;
+ interrupts = <77 0>;
+ qcom,clk-gpio = <&msmgpio 75 0x00>;
+ vlogic-supply = <&pm8110_l14>;
+ };
+ };
};
/ {
@@ -41,6 +56,7 @@
/include/ "batterydata-qrd-4v2-1300mah.dtsi"
};
+
};
&pm8110_bms {
@@ -70,3 +86,13 @@
&dsi_hx8379a_wvga_vid {
qcom,cont-splash-enabled;
};
+
+&pm8110_gpios {
+ gpio@c000 { /* GPIO 1 */
+ qcom,mode = <0>; /* QPNP_PIN_MODE_DIG_IN */
+ qcom,pull = <5>; /* QPNP_PIN_PULL_NO */
+ qcom,vin-sel = <2>; /* QPNP_PIN_VIN2 */
+ qcom,src-sel = <2>; /* QPNP_PIN_SEL_FUNC_1 */
+ qcom,master-en = <1>;
+ };
+};
diff --git a/arch/arm/boot/dts/msm8610-qrd-skuab.dtsi b/arch/arm/boot/dts/msm8610-qrd-skuab.dtsi
index 08e9be5..a22958a 100644
--- a/arch/arm/boot/dts/msm8610-qrd-skuab.dtsi
+++ b/arch/arm/boot/dts/msm8610-qrd-skuab.dtsi
@@ -67,6 +67,7 @@
goodix,display-coords = <0 0 540 960>;
goodix,button-map= <139 102 158>;
goodix,product-id = "915";
+ goodix,enable-power-off;
goodix,cfg-data0 = [
46 1C 02 C0 03 0A 05 11 01 08
14 3B 46 32 03 05 00 00 00 00
diff --git a/arch/arm/boot/dts/msm8610-qrd.dtsi b/arch/arm/boot/dts/msm8610-qrd.dtsi
index d578ef6..023371b 100644
--- a/arch/arm/boot/dts/msm8610-qrd.dtsi
+++ b/arch/arm/boot/dts/msm8610-qrd.dtsi
@@ -287,7 +287,7 @@
qcom,pad-drv-on = <0x4 0x4 0x4>; /* 10mA, 10mA, 10mA */
qcom,pad-drv-off = <0x0 0x0 0x0>; /* 2mA, 2mA, 2mA */
- qcom,clk-rates = <400000 25000000 50000000 100000000 200000000>;
+ qcom,clk-rates = <400000 25000000 50000000>;
#address-cells = <0>;
interrupt-parent = <&sdhc_2>;
diff --git a/arch/arm/boot/dts/msm8610-regulator.dtsi b/arch/arm/boot/dts/msm8610-regulator.dtsi
index f97d991..7eb6a22 100644
--- a/arch/arm/boot/dts/msm8610-regulator.dtsi
+++ b/arch/arm/boot/dts/msm8610-regulator.dtsi
@@ -215,7 +215,7 @@
status = "okay";
pm8110_l5: regulator-l5 {
regulator-min-microvolt = <1300000>;
- regulator-max-microvolt = <1300000>;
+ regulator-max-microvolt = <1350000>;
qcom,init-voltage = <1300000>;
status = "okay";
};
@@ -325,7 +325,7 @@
status = "okay";
pm8110_l16: regulator-l16 {
regulator-min-microvolt = <3000000>;
- regulator-max-microvolt = <3000000>;
+ regulator-max-microvolt = <3350000>;
qcom,init-voltage = <3000000>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/msm8610.dtsi b/arch/arm/boot/dts/msm8610.dtsi
index f6ced20..133757a 100644
--- a/arch/arm/boot/dts/msm8610.dtsi
+++ b/arch/arm/boot/dts/msm8610.dtsi
@@ -182,7 +182,7 @@
compatible = "qcom,msm-vidc";
qcom,vidc-ns-map = <0x40000000 0x40000000>;
qcom,buffer-type-tz-usage-map = <0x1 0x1>,
- <0x1fe 0x2>;
+ <0x7fe 0x2>;
qcom,hfi = "q6";
qcom,max-hw-load = <244800>; /* 1080p @ 30 * 1 */
qcom,vidc-iommu-domains {
diff --git a/arch/arm/boot/dts/msm8974-regulator.dtsi b/arch/arm/boot/dts/msm8974-regulator.dtsi
index 344c26f..9b9202e 100644
--- a/arch/arm/boot/dts/msm8974-regulator.dtsi
+++ b/arch/arm/boot/dts/msm8974-regulator.dtsi
@@ -289,7 +289,7 @@
status = "okay";
pm8941_l11: regulator-l11 {
regulator-min-microvolt = <1300000>;
- regulator-max-microvolt = <1300000>;
+ regulator-max-microvolt = <1350000>;
qcom,init-voltage = <1300000>;
status = "okay";
};
@@ -376,7 +376,7 @@
status = "okay";
pm8941_l19: regulator-l19 {
regulator-min-microvolt = <2900000>;
- regulator-max-microvolt = <3300000>;
+ regulator-max-microvolt = <3350000>;
qcom,init-voltage = <2900000>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/msm8974-v1.dtsi b/arch/arm/boot/dts/msm8974-v1.dtsi
index 249c963..556e912 100644
--- a/arch/arm/boot/dts/msm8974-v1.dtsi
+++ b/arch/arm/boot/dts/msm8974-v1.dtsi
@@ -134,7 +134,7 @@
qcom,iommu-groups = <&venus_domain_ns &venus_domain_cp>;
qcom,iommu-group-buffer-types = <0xfff 0x1ff>;
qcom,buffer-type-tz-usage-table = <0x1 0x1>,
- <0x1fe 0x2>;
+ <0x7fe 0x2>;
};
&sfpb_spinlock {
diff --git a/arch/arm/boot/dts/msm8974-v2.dtsi b/arch/arm/boot/dts/msm8974-v2.dtsi
index 55e18f0..0da5658 100644
--- a/arch/arm/boot/dts/msm8974-v2.dtsi
+++ b/arch/arm/boot/dts/msm8974-v2.dtsi
@@ -136,9 +136,9 @@
<3240000 1600000>,
<4048000 1600000>,
<4264000 1600000>;
- qcom,buffer-type-tz-usage-table = <0x91 0x1>,
- <0x42 0x2>,
- <0x120 0x3>;
+ qcom,buffer-type-tz-usage-table = <0x241 0x1>,
+ <0x106 0x2>,
+ <0x480 0x3>;
qcom,vidc-iommu-domains {
qcom,domain-ns {
qcom,vidc-domain-phandle = <&venus_domain_ns>;
diff --git a/arch/arm/boot/dts/msm8974.dtsi b/arch/arm/boot/dts/msm8974.dtsi
index 311944c..e612df7 100644
--- a/arch/arm/boot/dts/msm8974.dtsi
+++ b/arch/arm/boot/dts/msm8974.dtsi
@@ -741,6 +741,14 @@
sound {
compatible = "qcom,msm8974-audio-taiko";
qcom,model = "msm8974-taiko-snd-card";
+ reg = <0xfe02b000 0x4>,
+ <0xfe02c000 0x4>,
+ <0xfe02d000 0x4>,
+ <0xfe02e000 0x4>;
+ reg-names = "lpaif_pri_mode_muxsel",
+ "lpaif_sec_mode_muxsel",
+ "lpaif_tert_mode_muxsel",
+ "lpaif_quat_mode_muxsel";
qcom,audio-routing =
"RX_BIAS", "MCLK",
diff --git a/arch/arm/boot/dts/msm8974pro.dtsi b/arch/arm/boot/dts/msm8974pro.dtsi
index bd58653..1395573 100644
--- a/arch/arm/boot/dts/msm8974pro.dtsi
+++ b/arch/arm/boot/dts/msm8974pro.dtsi
@@ -39,6 +39,17 @@
/delete-property/ qcom,pmic-sw-mode-temp-hysteresis;
/delete-property/ qcom,pmic-sw-mode-regs;
};
+
+ sound {
+ reg = <0xfe02c000 0x4>,
+ <0xfe02d000 0x4>,
+ <0xfe02e000 0x4>,
+ <0xfe02f000 0x4>;
+ reg-names = "lpaif_pri_mode_muxsel",
+ "lpaif_sec_mode_muxsel",
+ "lpaif_tert_mode_muxsel",
+ "lpaif_quat_mode_muxsel";
+ };
};
/* GPU overrides */
@@ -208,9 +219,9 @@
<4048000 1600000>,
<4264000 1600000>;
qcom,max-hw-load = <1281600>; /* max(4k X 2304 @ 24, 4k X 2160 @ 30) + 1080p @ 30 */
- qcom,buffer-type-tz-usage-table = <0x91 0x1>,
- <0x42 0x2>,
- <0x120 0x3>;
+ qcom,buffer-type-tz-usage-table = <0x241 0x1>,
+ <0x106 0x2>,
+ <0x480 0x3>;
qcom,vidc-iommu-domains {
qcom,domain-ns {
qcom,vidc-domain-phandle = <&venus_domain_ns>;
diff --git a/arch/arm/configs/msm8610-perf_defconfig b/arch/arm/configs/msm8610-perf_defconfig
index 7ea1bd5..2160c79 100644
--- a/arch/arm/configs/msm8610-perf_defconfig
+++ b/arch/arm/configs/msm8610-perf_defconfig
@@ -313,7 +313,6 @@
CONFIG_MSM_KGSL=y
CONFIG_KGSL_PER_PROCESS_PAGE_TABLE=y
CONFIG_FB=y
-CONFIG_FB_VIRTUAL=y
CONFIG_FB_MSM=y
# CONFIG_FB_MSM_BACKLIGHT is not set
CONFIG_FB_MSM_MDSS=y
diff --git a/arch/arm/mach-msm/board-8226.c b/arch/arm/mach-msm/board-8226.c
index aff2d75..5244918 100644
--- a/arch/arm/mach-msm/board-8226.c
+++ b/arch/arm/mach-msm/board-8226.c
@@ -82,6 +82,8 @@
"msm_sdcc.2", NULL),
OF_DEV_AUXDATA("qcom,sdhci-msm", 0xF9864900, \
"msm_sdcc.3", NULL),
+ OF_DEV_AUXDATA("qcom,hsic-host", 0xF9A00000, "msm_hsic_host", NULL),
+
{}
};
diff --git a/arch/arm/mach-msm/msm_watchdog_v2.c b/arch/arm/mach-msm/msm_watchdog_v2.c
index 52e94e6..ead2e95 100644
--- a/arch/arm/mach-msm/msm_watchdog_v2.c
+++ b/arch/arm/mach-msm/msm_watchdog_v2.c
@@ -68,7 +68,7 @@
/*
* On the kernel command line specify
- * msm_watchdog.enable=1 to enable the watchdog
+ * msm_watchdog_v2.enable=1 to enable the watchdog
* By default watchdog is turned on
*/
static int enable = 1;
@@ -76,7 +76,7 @@
/*
* On the kernel command line specify
- * msm_watchdog.WDT_HZ=<clock val in HZ> to set Watchdog
+ * msm_watchdog_v2.WDT_HZ=<clock val in HZ> to set Watchdog
* ticks. By default it is set to 32765.
*/
static long WDT_HZ = 32765;
diff --git a/arch/arm/mach-msm/ocmem.c b/arch/arm/mach-msm/ocmem.c
index d31f3c4..99e54b7 100644
--- a/arch/arm/mach-msm/ocmem.c
+++ b/arch/arm/mach-msm/ocmem.c
@@ -836,7 +836,7 @@
return -EBUSY;
if (ocmem_debugfs_init(pdev))
- return -EBUSY;
+ dev_err(dev, "ocmem: No debugfs node available\n");
if (ocmem_core_init(pdev))
return -EBUSY;
diff --git a/arch/arm/mach-msm/qdsp6v2/audio_utils.c b/arch/arm/mach-msm/qdsp6v2/audio_utils.c
index 85af4a7..ccacd3e 100644
--- a/arch/arm/mach-msm/qdsp6v2/audio_utils.c
+++ b/arch/arm/mach-msm/qdsp6v2/audio_utils.c
@@ -99,7 +99,7 @@
int audio_in_disable(struct q6audio_in *audio)
{
int rc = 0;
- if (audio->opened) {
+ if (!audio->stopped) {
audio->enabled = 0;
audio->opened = 0;
pr_debug("%s:session id %d: inbytes[%d] insamples[%d]\n",
diff --git a/drivers/char/diag/diag_dci.c b/drivers/char/diag/diag_dci.c
index 97dc26d..91b90a8 100644
--- a/drivers/char/diag/diag_dci.c
+++ b/drivers/char/diag/diag_dci.c
@@ -419,6 +419,8 @@
/* Notify the DCI process that the peripheral DCI Channel is up */
for (i = 0; i < MAX_DCI_CLIENTS; i++) {
+ if (!driver->dci_client_tbl[i].client)
+ continue;
if (driver->dci_client_tbl[i].list & peripheral_mask) {
info.si_signo = driver->dci_client_tbl[i].signal_type;
stat = send_sig_info(
diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
index 74a252f..57aa835 100644
--- a/drivers/i2c/busses/i2c-qup.c
+++ b/drivers/i2c/busses/i2c-qup.c
@@ -192,6 +192,10 @@
struct qup_i2c_clk_path_vote clk_path_vote;
};
+#ifdef CONFIG_PM
+static int i2c_qup_pm_resume_runtime(struct device *device);
+#endif
+
#ifdef DEBUG
static void
qup_print_status(struct qup_i2c_dev *dev)
@@ -944,7 +948,13 @@
long timeout;
int err;
- pm_runtime_get_sync(dev->dev);
+ /* Alternate if runtime power management is disabled */
+ if (!pm_runtime_enabled(dev->dev)) {
+ dev_dbg(dev->dev, "Runtime PM is disabled\n");
+ i2c_qup_pm_resume_runtime(dev->dev);
+ } else {
+ pm_runtime_get_sync(dev->dev);
+ }
mutex_lock(&dev->mlock);
if (dev->suspended) {
@@ -1754,22 +1764,24 @@
if (!pm_runtime_enabled(device) || !pm_runtime_suspended(device)) {
dev_dbg(device, "system suspend");
i2c_qup_pm_suspend_runtime(device);
+ /*
+ * set the device's runtime PM status to 'suspended'
+ */
+ pm_runtime_disable(device);
+ pm_runtime_set_suspended(device);
+ pm_runtime_enable(device);
}
return 0;
}
static int qup_i2c_resume(struct device *device)
{
- int ret = 0;
- if (!pm_runtime_enabled(device) || !pm_runtime_suspended(device)) {
- dev_dbg(device, "system resume");
- ret = i2c_qup_pm_resume_runtime(device);
- if (!ret) {
- pm_runtime_mark_last_busy(device);
- pm_request_autosuspend(device);
- }
- return ret;
- }
+ /*
+ * Rely on runtime-PM to call resume in case it is enabled
+ * Even if it's not enabled, rely on 1st client transaction to do
+ * clock ON and gpio configuration
+ */
+ dev_dbg(device, "system resume");
return 0;
}
#endif /* CONFIG_PM */
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
index 5f36a4a..fb1caea 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
@@ -513,7 +513,9 @@
}
lo_tbl_ptr = cfg_data +
reg_cfg_cmd->u.dmi_info.lo_tbl_offset/4;
-
+ if (reg_cfg_cmd->cmd_type == VFE_WRITE_DMI_64BIT)
+ reg_cfg_cmd->u.dmi_info.len =
+ reg_cfg_cmd->u.dmi_info.len / 2;
for (i = 0; i < reg_cfg_cmd->u.dmi_info.len/4; i++) {
lo_val = *lo_tbl_ptr++;
if (reg_cfg_cmd->cmd_type == VFE_WRITE_DMI_16BIT) {
diff --git a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c
index e3a539c..d3b9c0a 100644
--- a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c
+++ b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c
@@ -4036,6 +4036,13 @@
__func__);
return ret;
}
+
+ if (mpq_feed->sdmx_filter_handle ==
+ SDMX_INVALID_FILTER_HANDLE) {
+ MPQ_DVB_DBG_PRINT("%s: filter was stopped\n",
+ __func__);
+ return -ENODEV;
+ }
}
if (mpq_feed->sdmx_buf.pread + header->payload_length <
@@ -4089,6 +4096,13 @@
mutex_lock(&mpq_demux->mutex);
+ if (mpq_feed->sdmx_filter_handle ==
+ SDMX_INVALID_FILTER_HANDLE) {
+ MPQ_DVB_DBG_PRINT("%s: filter was stopped\n",
+ __func__);
+ return -ENODEV;
+ }
+
return ret;
}
@@ -4456,6 +4470,14 @@
return;
}
+ if (!header.payload_length) {
+ MPQ_DVB_DBG_PRINT(
+ "%s: warnning - video frame with 0 length, dropping\n",
+ __func__);
+ spin_unlock(&mpq_feed->video_info.video_buffer_lock);
+ continue;
+ }
+
packet.raw_data_len = header.payload_length;
packet.user_data_len = sizeof(meta_data);
mpq_streambuffer_get_buffer_handle(sbuf, 0,
diff --git a/drivers/media/platform/msm/vcap/Kconfig b/drivers/media/platform/msm/vcap/Kconfig
index 2bdcfbd..db3bc47 100644
--- a/drivers/media/platform/msm/vcap/Kconfig
+++ b/drivers/media/platform/msm/vcap/Kconfig
@@ -1,7 +1,6 @@
config MSM_VCAP
tristate "Qualcomm MSM VCAP"
depends on VIDEO_DEV && VIDEO_V4L2
- default y
---help---
Enables VCAP driver. This device allows for video capture and
video processing using the v4l2 api
diff --git a/drivers/media/platform/msm/vidc/msm_smem.h b/drivers/media/platform/msm/vidc/msm_smem.h
index dc384bc..7bd6443 100644
--- a/drivers/media/platform/msm/vidc/msm_smem.h
+++ b/drivers/media/platform/msm/vidc/msm_smem.h
@@ -28,6 +28,10 @@
SMEM_SECURE = ION_FLAG_SECURE,
};
+/* NOTE: if you change this enum you MUST update the
+ * "buffer-type-tz-usage-table" for any affected target
+ * in arch/arm/boot/dts/<arch>.dtsi
+ */
enum hal_buffer {
HAL_BUFFER_INPUT = 0x1,
HAL_BUFFER_OUTPUT = 0x2,
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 293033b..995d9e4 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -744,7 +744,11 @@
*/
mmc_update_clk_scaling(host);
err = mmc_stop_request(host);
- if (err && !context_info->is_done_rcv) {
+ if (err == MMC_BLK_NO_REQ_TO_STOP) {
+ pending_is_urgent = true;
+ /* wait for done/new/urgent event */
+ continue;
+ } else if (err && !context_info->is_done_rcv) {
err = MMC_BLK_ABORT;
break;
}
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 758a79e..b01abe7 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -27,6 +27,7 @@
#include <linux/mmc/mmc.h>
#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
#include "sdhci.h"
@@ -2223,10 +2224,13 @@
struct sdhci_host *host = mmc_priv(mmc);
unsigned long flags;
struct mmc_data *data;
+ int ret = 0;
spin_lock_irqsave(&host->lock, flags);
- if (!host->mrq || !host->data)
+ if (!host->mrq || !host->data) {
+ ret = MMC_BLK_NO_REQ_TO_STOP;
goto out;
+ }
data = host->data;
@@ -2252,7 +2256,7 @@
host->data = NULL;
out:
spin_unlock_irqrestore(&host->lock, flags);
- return 0;
+ return ret;
}
static unsigned int sdhci_get_xfer_remain(struct mmc_host *mmc)
diff --git a/drivers/nfc/nfc-nci.c b/drivers/nfc/nfc-nci.c
index 67b057c..043f9bc 100644
--- a/drivers/nfc/nfc-nci.c
+++ b/drivers/nfc/nfc-nci.c
@@ -923,6 +923,19 @@
goto err_misc_register;
}
+ regulators.regulator = regulator_get(&client->dev, regulators.name);
+ if (IS_ERR(regulators.regulator)) {
+ r = PTR_ERR(regulators.regulator);
+ pr_err("regulator get of %s failed (%d)\n", regulators.name, r);
+ } else {
+ /* Enable the regulator */
+ r = regulator_enable(regulators.regulator);
+ if (r) {
+ pr_err("vreg %s enable failed (%d)\n",
+ regulators.name, r);
+ }
+ }
+
logging_level = 0;
/* request irq. The irq is set whenever the chip has data available
* for reading. It is cleared when all data has been read.
diff --git a/drivers/nfc/nfc-nci.h b/drivers/nfc/nfc-nci.h
index 81f2521..9bfb77d 100644
--- a/drivers/nfc/nfc-nci.h
+++ b/drivers/nfc/nfc-nci.h
@@ -223,3 +223,9 @@
unsigned int reg;
};
#endif
+/* enable LDO */
+struct vregs_info {
+ const char * const name;
+ struct regulator *regulator;
+};
+struct vregs_info regulators = {"vlogic", NULL};
diff --git a/drivers/of/of_batterydata.c b/drivers/of/of_batterydata.c
index 2061408..b0d40f1 100644
--- a/drivers/of/of_batterydata.c
+++ b/drivers/of/of_batterydata.c
@@ -244,9 +244,17 @@
{
int64_t resistor_value_kohm, denom;
+ if (batt_id_uv == 0) {
+ /* vadc not correct or batt id line grounded, report 0 kohms */
+ return 0;
+ }
/* calculate the battery id resistance reported via ADC */
denom = div64_s64(vadc_vdd * 1000000LL, batt_id_uv) - 1000000LL;
+ if (denom == 0) {
+ /* batt id connector might be open, return 0 kohms */
+ return 0;
+ }
resistor_value_kohm = div64_s64(rpull_up * 1000000LL + denom/2, denom);
pr_debug("batt id voltage = %d, resistor value = %lld\n",
diff --git a/drivers/power/qpnp-charger.c b/drivers/power/qpnp-charger.c
index 7e40e80..a65ac5b 100644
--- a/drivers/power/qpnp-charger.c
+++ b/drivers/power/qpnp-charger.c
@@ -2128,6 +2128,8 @@
if (chip->prev_usb_max_ma == ret.intval)
goto skip_set_iusb_max;
+ chip->prev_usb_max_ma = ret.intval;
+
if (ret.intval <= 2 && !chip->use_default_batt_values &&
get_prop_batt_present(chip)) {
qpnp_chg_usb_suspend_enable(chip, 1);
@@ -2156,7 +2158,6 @@
schedule_work(&chip->reduce_power_stage_work);
}
}
- chip->prev_usb_max_ma = ret.intval;
}
skip_set_iusb_max:
diff --git a/drivers/staging/zram/Kconfig b/drivers/staging/zram/Kconfig
index 9d11a4c..983314c 100644
--- a/drivers/staging/zram/Kconfig
+++ b/drivers/staging/zram/Kconfig
@@ -1,9 +1,6 @@
config ZRAM
tristate "Compressed RAM block device support"
- # X86 dependency is because zsmalloc uses non-portable pte/tlb
- # functions
- depends on BLOCK && SYSFS && X86
- select ZSMALLOC
+ depends on BLOCK && SYSFS && ZSMALLOC
select LZO_COMPRESS
select LZO_DECOMPRESS
default n
@@ -17,7 +14,7 @@
disks and maybe many more.
See zram.txt for more information.
- Project home: http://compcache.googlecode.com/
+ Project home: <https://compcache.googlecode.com/>
config ZRAM_DEBUG
bool "Compressed RAM block device debug support"
diff --git a/drivers/staging/zram/Makefile b/drivers/staging/zram/Makefile
index 7f4a301..cb0f9ce 100644
--- a/drivers/staging/zram/Makefile
+++ b/drivers/staging/zram/Makefile
@@ -1,3 +1,3 @@
-zram-y := zram_drv.o zram_sysfs.o
+zram-y := zram_drv.o
obj-$(CONFIG_ZRAM) += zram.o
diff --git a/drivers/staging/zram/zram.txt b/drivers/staging/zram/zram.txt
index 5f75d29..765d790 100644
--- a/drivers/staging/zram/zram.txt
+++ b/drivers/staging/zram/zram.txt
@@ -23,17 +23,17 @@
This creates 4 devices: /dev/zram{0,1,2,3}
(num_devices parameter is optional. Default: 1)
-2) Set Disksize (Optional):
- Set disk size by writing the value to sysfs node 'disksize'
- (in bytes). If disksize is not given, default value of 25%
- of RAM is used.
+2) Set Disksize
+ Set disk size by writing the value to sysfs node 'disksize'.
+ The value can be either in bytes or you can use mem suffixes.
+ Examples:
+ # Initialize /dev/zram0 with 50MB disksize
+ echo $((50*1024*1024)) > /sys/block/zram0/disksize
- # Initialize /dev/zram0 with 50MB disksize
- echo $((50*1024*1024)) > /sys/block/zram0/disksize
-
- NOTE: disksize cannot be changed if the disk contains any
- data. So, for such a disk, you need to issue 'reset' (see below)
- before you can change its disksize.
+ # Using mem suffixes
+ echo 256K > /sys/block/zram0/disksize
+ echo 512M > /sys/block/zram0/disksize
+ echo 1G > /sys/block/zram0/disksize
3) Activate:
mkswap /dev/zram0
@@ -65,8 +65,9 @@
echo 1 > /sys/block/zram0/reset
echo 1 > /sys/block/zram1/reset
- (This frees all the memory allocated for the given device).
-
+ This frees all the memory allocated for the given device and
+ resets the disksize to zero. You must set the disksize again
+ before reusing the device.
Please report any problems at:
- Mailing list: linux-mm-cc at laptop dot org
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
index 685d612..44949d2 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -37,56 +37,216 @@
/* Globals */
static int zram_major;
-struct zram *zram_devices;
+static struct zram *zram_devices;
/* Module params (documentation at end) */
-static unsigned int num_devices;
+static unsigned int num_devices = 1;
-static void zram_stat_inc(u32 *v)
+static inline struct zram *dev_to_zram(struct device *dev)
{
- *v = *v + 1;
+ return (struct zram *)dev_to_disk(dev)->private_data;
}
-static void zram_stat_dec(u32 *v)
+static ssize_t disksize_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- *v = *v - 1;
+ struct zram *zram = dev_to_zram(dev);
+
+ return sprintf(buf, "%llu\n", zram->disksize);
}
-static void zram_stat64_add(struct zram *zram, u64 *v, u64 inc)
+static ssize_t initstate_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- spin_lock(&zram->stat64_lock);
- *v = *v + inc;
- spin_unlock(&zram->stat64_lock);
+ struct zram *zram = dev_to_zram(dev);
+
+ return sprintf(buf, "%u\n", zram->init_done);
}
-static void zram_stat64_sub(struct zram *zram, u64 *v, u64 dec)
+static ssize_t num_reads_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- spin_lock(&zram->stat64_lock);
- *v = *v - dec;
- spin_unlock(&zram->stat64_lock);
+ struct zram *zram = dev_to_zram(dev);
+
+ return sprintf(buf, "%llu\n",
+ (u64)atomic64_read(&zram->stats.num_reads));
}
-static void zram_stat64_inc(struct zram *zram, u64 *v)
+static ssize_t num_writes_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- zram_stat64_add(zram, v, 1);
+ struct zram *zram = dev_to_zram(dev);
+
+ return sprintf(buf, "%llu\n",
+ (u64)atomic64_read(&zram->stats.num_writes));
}
-static int zram_test_flag(struct zram *zram, u32 index,
+static ssize_t invalid_io_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct zram *zram = dev_to_zram(dev);
+
+ return sprintf(buf, "%llu\n",
+ (u64)atomic64_read(&zram->stats.invalid_io));
+}
+
+static ssize_t notify_free_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct zram *zram = dev_to_zram(dev);
+
+ return sprintf(buf, "%llu\n",
+ (u64)atomic64_read(&zram->stats.notify_free));
+}
+
+static ssize_t zero_pages_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct zram *zram = dev_to_zram(dev);
+
+ return sprintf(buf, "%u\n", zram->stats.pages_zero);
+}
+
+static ssize_t orig_data_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct zram *zram = dev_to_zram(dev);
+
+ return sprintf(buf, "%llu\n",
+ (u64)(zram->stats.pages_stored) << PAGE_SHIFT);
+}
+
+static ssize_t compr_data_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct zram *zram = dev_to_zram(dev);
+
+ return sprintf(buf, "%llu\n",
+ (u64)atomic64_read(&zram->stats.compr_size));
+}
+
+static ssize_t mem_used_total_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u64 val = 0;
+ struct zram *zram = dev_to_zram(dev);
+ struct zram_meta *meta = zram->meta;
+
+ down_read(&zram->init_lock);
+ if (zram->init_done)
+ val = zs_get_total_size_bytes(meta->mem_pool);
+ up_read(&zram->init_lock);
+
+ return sprintf(buf, "%llu\n", val);
+}
+
+static int zram_test_flag(struct zram_meta *meta, u32 index,
enum zram_pageflags flag)
{
- return zram->table[index].flags & BIT(flag);
+ return meta->table[index].flags & BIT(flag);
}
-static void zram_set_flag(struct zram *zram, u32 index,
+static void zram_set_flag(struct zram_meta *meta, u32 index,
enum zram_pageflags flag)
{
- zram->table[index].flags |= BIT(flag);
+ meta->table[index].flags |= BIT(flag);
}
-static void zram_clear_flag(struct zram *zram, u32 index,
+static void zram_clear_flag(struct zram_meta *meta, u32 index,
enum zram_pageflags flag)
{
- zram->table[index].flags &= ~BIT(flag);
+ meta->table[index].flags &= ~BIT(flag);
+}
+
+static inline int is_partial_io(struct bio_vec *bvec)
+{
+ return bvec->bv_len != PAGE_SIZE;
+}
+
+/*
+ * Check if request is within bounds and aligned on zram logical blocks.
+ */
+static inline int valid_io_request(struct zram *zram, struct bio *bio)
+{
+ u64 start, end, bound;
+
+ /* unaligned request */
+ if (unlikely(bio->bi_sector & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
+ return 0;
+ if (unlikely(bio->bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
+ return 0;
+
+ start = bio->bi_sector;
+ end = start + (bio->bi_size >> SECTOR_SHIFT);
+ bound = zram->disksize >> SECTOR_SHIFT;
+ /* out of range range */
+ if (unlikely(start >= bound || end > bound || start > end))
+ return 0;
+
+ /* I/O request is valid */
+ return 1;
+}
+
+static void zram_meta_free(struct zram_meta *meta)
+{
+ zs_destroy_pool(meta->mem_pool);
+ kfree(meta->compress_workmem);
+ free_pages((unsigned long)meta->compress_buffer, 1);
+ vfree(meta->table);
+ kfree(meta);
+}
+
+static struct zram_meta *zram_meta_alloc(u64 disksize)
+{
+ size_t num_pages;
+ struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
+ if (!meta)
+ goto out;
+
+ meta->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
+ if (!meta->compress_workmem)
+ goto free_meta;
+
+ meta->compress_buffer =
+ (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
+ if (!meta->compress_buffer) {
+ pr_err("Error allocating compressor buffer space\n");
+ goto free_workmem;
+ }
+
+ num_pages = disksize >> PAGE_SHIFT;
+ meta->table = vzalloc(num_pages * sizeof(*meta->table));
+ if (!meta->table) {
+ pr_err("Error allocating zram address table\n");
+ goto free_buffer;
+ }
+
+ meta->mem_pool = zs_create_pool(GFP_NOIO | __GFP_HIGHMEM);
+ if (!meta->mem_pool) {
+ pr_err("Error creating memory pool\n");
+ goto free_table;
+ }
+
+ return meta;
+
+free_table:
+ vfree(meta->table);
+free_buffer:
+ free_pages((unsigned long)meta->compress_buffer, 1);
+free_workmem:
+ kfree(meta->compress_workmem);
+free_meta:
+ kfree(meta);
+ meta = NULL;
+out:
+ return meta;
+}
+
+static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
+{
+ if (*offset + bvec->bv_len >= PAGE_SIZE)
+ (*index)++;
+ *offset = (*offset + bvec->bv_len) % PAGE_SIZE;
}
static int page_zero_filled(void *ptr)
@@ -104,349 +264,244 @@
return 1;
}
-static void zram_set_disksize(struct zram *zram, size_t totalram_bytes)
-{
- if (!zram->disksize) {
- pr_info(
- "disk size not provided. You can use disksize_kb module "
- "param to specify size.\nUsing default: (%u%% of RAM).\n",
- default_disksize_perc_ram
- );
- zram->disksize = default_disksize_perc_ram *
- (totalram_bytes / 100);
- }
-
- if (zram->disksize > 2 * (totalram_bytes)) {
- pr_info(
- "There is little point creating a zram of greater than "
- "twice the size of memory since we expect a 2:1 compression "
- "ratio. Note that zram uses about 0.1%% of the size of "
- "the disk when not in use so a huge zram is "
- "wasteful.\n"
- "\tMemory Size: %zu kB\n"
- "\tSize you selected: %llu kB\n"
- "Continuing anyway ...\n",
- totalram_bytes >> 10, zram->disksize
- );
- }
-
- zram->disksize &= PAGE_MASK;
-}
-
-static void zram_free_page(struct zram *zram, size_t index)
-{
- void *handle = zram->table[index].handle;
-
- if (unlikely(!handle)) {
- /*
- * No memory is allocated for zero filled pages.
- * Simply clear zero page flag.
- */
- if (zram_test_flag(zram, index, ZRAM_ZERO)) {
- zram_clear_flag(zram, index, ZRAM_ZERO);
- zram_stat_dec(&zram->stats.pages_zero);
- }
- return;
- }
-
- if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
- __free_page(handle);
- zram_clear_flag(zram, index, ZRAM_UNCOMPRESSED);
- zram_stat_dec(&zram->stats.pages_expand);
- goto out;
- }
-
- zs_free(zram->mem_pool, handle);
-
- if (zram->table[index].size <= PAGE_SIZE / 2)
- zram_stat_dec(&zram->stats.good_compress);
-
-out:
- zram_stat64_sub(zram, &zram->stats.compr_size,
- zram->table[index].size);
- zram_stat_dec(&zram->stats.pages_stored);
-
- zram->table[index].handle = NULL;
- zram->table[index].size = 0;
-}
-
static void handle_zero_page(struct bio_vec *bvec)
{
struct page *page = bvec->bv_page;
void *user_mem;
user_mem = kmap_atomic(page);
- memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
+ if (is_partial_io(bvec))
+ memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
+ else
+ clear_page(user_mem);
kunmap_atomic(user_mem);
flush_dcache_page(page);
}
-static void handle_uncompressed_page(struct zram *zram, struct bio_vec *bvec,
- u32 index, int offset)
+static void zram_free_page(struct zram *zram, size_t index)
{
- struct page *page = bvec->bv_page;
- unsigned char *user_mem, *cmem;
+ struct zram_meta *meta = zram->meta;
+ unsigned long handle = meta->table[index].handle;
+ u16 size = meta->table[index].size;
- user_mem = kmap_atomic(page);
- cmem = kmap_atomic(zram->table[index].handle);
+ if (unlikely(!handle)) {
+ /*
+ * No memory is allocated for zero filled pages.
+ * Simply clear zero page flag.
+ */
+ if (zram_test_flag(meta, index, ZRAM_ZERO)) {
+ zram_clear_flag(meta, index, ZRAM_ZERO);
+ zram->stats.pages_zero--;
+ }
+ return;
+ }
- memcpy(user_mem + bvec->bv_offset, cmem + offset, bvec->bv_len);
- kunmap_atomic(cmem);
- kunmap_atomic(user_mem);
+ if (unlikely(size > max_zpage_size))
+ zram->stats.bad_compress--;
- flush_dcache_page(page);
+ zs_free(meta->mem_pool, handle);
+
+ if (size <= PAGE_SIZE / 2)
+ zram->stats.good_compress--;
+
+ atomic64_sub(meta->table[index].size, &zram->stats.compr_size);
+ zram->stats.pages_stored--;
+
+ meta->table[index].handle = 0;
+ meta->table[index].size = 0;
}
-static inline int is_partial_io(struct bio_vec *bvec)
+static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
{
- return bvec->bv_len != PAGE_SIZE;
+ int ret = LZO_E_OK;
+ size_t clen = PAGE_SIZE;
+ unsigned char *cmem;
+ struct zram_meta *meta = zram->meta;
+ unsigned long handle = meta->table[index].handle;
+
+ if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
+ clear_page(mem);
+ return 0;
+ }
+
+ cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
+ if (meta->table[index].size == PAGE_SIZE)
+ copy_page(mem, cmem);
+ else
+ ret = lzo1x_decompress_safe(cmem, meta->table[index].size,
+ mem, &clen);
+ zs_unmap_object(meta->mem_pool, handle);
+
+ /* Should NEVER happen. Return bio error if it does. */
+ if (unlikely(ret != LZO_E_OK)) {
+ pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
+ atomic64_inc(&zram->stats.failed_reads);
+ return ret;
+ }
+
+ return 0;
}
static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
u32 index, int offset, struct bio *bio)
{
int ret;
- size_t clen;
struct page *page;
- struct zobj_header *zheader;
- unsigned char *user_mem, *cmem, *uncmem = NULL;
-
+ unsigned char *user_mem, *uncmem = NULL;
+ struct zram_meta *meta = zram->meta;
page = bvec->bv_page;
- if (zram_test_flag(zram, index, ZRAM_ZERO)) {
+ if (unlikely(!meta->table[index].handle) ||
+ zram_test_flag(meta, index, ZRAM_ZERO)) {
handle_zero_page(bvec);
return 0;
}
- /* Requested page is not present in compressed area */
- if (unlikely(!zram->table[index].handle)) {
- pr_debug("Read before write: sector=%lu, size=%u",
- (ulong)(bio->bi_sector), bio->bi_size);
- handle_zero_page(bvec);
- return 0;
- }
-
- /* Page is stored uncompressed since it's incompressible */
- if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
- handle_uncompressed_page(zram, bvec, index, offset);
- return 0;
- }
-
- if (is_partial_io(bvec)) {
+ if (is_partial_io(bvec))
/* Use a temporary buffer to decompress the page */
- uncmem = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (!uncmem) {
- pr_info("Error allocating temp memory!\n");
- return -ENOMEM;
- }
- }
+ uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
user_mem = kmap_atomic(page);
if (!is_partial_io(bvec))
uncmem = user_mem;
- clen = PAGE_SIZE;
- cmem = zs_map_object(zram->mem_pool, zram->table[index].handle);
-
- ret = lzo1x_decompress_safe(cmem + sizeof(*zheader),
- zram->table[index].size,
- uncmem, &clen);
-
- if (is_partial_io(bvec)) {
- memcpy(user_mem + bvec->bv_offset, uncmem + offset,
- bvec->bv_len);
- kfree(uncmem);
+ if (!uncmem) {
+ pr_info("Unable to allocate temp memory\n");
+ ret = -ENOMEM;
+ goto out_cleanup;
}
- zs_unmap_object(zram->mem_pool, zram->table[index].handle);
- kunmap_atomic(user_mem);
-
+ ret = zram_decompress_page(zram, uncmem, index);
/* Should NEVER happen. Return bio error if it does. */
- if (unlikely(ret != LZO_E_OK)) {
- pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
- zram_stat64_inc(zram, &zram->stats.failed_reads);
- return ret;
- }
+ if (unlikely(ret != LZO_E_OK))
+ goto out_cleanup;
+
+ if (is_partial_io(bvec))
+ memcpy(user_mem + bvec->bv_offset, uncmem + offset,
+ bvec->bv_len);
flush_dcache_page(page);
-
- return 0;
-}
-
-static int zram_read_before_write(struct zram *zram, char *mem, u32 index)
-{
- int ret;
- size_t clen = PAGE_SIZE;
- struct zobj_header *zheader;
- unsigned char *cmem;
-
- if (zram_test_flag(zram, index, ZRAM_ZERO) ||
- !zram->table[index].handle) {
- memset(mem, 0, PAGE_SIZE);
- return 0;
- }
-
- cmem = zs_map_object(zram->mem_pool, zram->table[index].handle);
-
- /* Page is stored uncompressed since it's incompressible */
- if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
- memcpy(mem, cmem, PAGE_SIZE);
- kunmap_atomic(cmem);
- return 0;
- }
-
- ret = lzo1x_decompress_safe(cmem + sizeof(*zheader),
- zram->table[index].size,
- mem, &clen);
- zs_unmap_object(zram->mem_pool, zram->table[index].handle);
-
- /* Should NEVER happen. Return bio error if it does. */
- if (unlikely(ret != LZO_E_OK)) {
- pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
- zram_stat64_inc(zram, &zram->stats.failed_reads);
- return ret;
- }
-
- return 0;
+ ret = 0;
+out_cleanup:
+ kunmap_atomic(user_mem);
+ if (is_partial_io(bvec))
+ kfree(uncmem);
+ return ret;
}
static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
int offset)
{
- int ret;
- u32 store_offset;
+ int ret = 0;
size_t clen;
- void *handle;
- struct zobj_header *zheader;
- struct page *page, *page_store;
+ unsigned long handle;
+ struct page *page;
unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
+ struct zram_meta *meta = zram->meta;
page = bvec->bv_page;
- src = zram->compress_buffer;
+ src = meta->compress_buffer;
if (is_partial_io(bvec)) {
/*
* This is a partial IO. We need to read the full page
* before to write the changes.
*/
- uncmem = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
if (!uncmem) {
- pr_info("Error allocating temp memory!\n");
ret = -ENOMEM;
goto out;
}
- ret = zram_read_before_write(zram, uncmem, index);
- if (ret) {
- kfree(uncmem);
+ ret = zram_decompress_page(zram, uncmem, index);
+ if (ret)
goto out;
- }
}
- /*
- * System overwrites unused sectors. Free memory associated
- * with this sector now.
- */
- if (zram->table[index].handle ||
- zram_test_flag(zram, index, ZRAM_ZERO))
- zram_free_page(zram, index);
-
user_mem = kmap_atomic(page);
- if (is_partial_io(bvec))
+ if (is_partial_io(bvec)) {
memcpy(uncmem + offset, user_mem + bvec->bv_offset,
bvec->bv_len);
- else
+ kunmap_atomic(user_mem);
+ user_mem = NULL;
+ } else {
uncmem = user_mem;
+ }
if (page_zero_filled(uncmem)) {
kunmap_atomic(user_mem);
- if (is_partial_io(bvec))
- kfree(uncmem);
- zram_stat_inc(&zram->stats.pages_zero);
- zram_set_flag(zram, index, ZRAM_ZERO);
+ /* Free memory associated with this sector now. */
+ zram_free_page(zram, index);
+
+ zram->stats.pages_zero++;
+ zram_set_flag(meta, index, ZRAM_ZERO);
ret = 0;
goto out;
}
ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
- zram->compress_workmem);
+ meta->compress_workmem);
- kunmap_atomic(user_mem);
- if (is_partial_io(bvec))
- kfree(uncmem);
+ if (!is_partial_io(bvec)) {
+ kunmap_atomic(user_mem);
+ user_mem = NULL;
+ uncmem = NULL;
+ }
if (unlikely(ret != LZO_E_OK)) {
pr_err("Compression failed! err=%d\n", ret);
goto out;
}
- /*
- * Page is incompressible. Store it as-is (uncompressed)
- * since we do not want to return too many disk write
- * errors which has side effect of hanging the system.
- */
if (unlikely(clen > max_zpage_size)) {
+ zram->stats.bad_compress++;
clen = PAGE_SIZE;
- page_store = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
- if (unlikely(!page_store)) {
- pr_info("Error allocating memory for "
- "incompressible page: %u\n", index);
- ret = -ENOMEM;
- goto out;
- }
-
- store_offset = 0;
- zram_set_flag(zram, index, ZRAM_UNCOMPRESSED);
- zram_stat_inc(&zram->stats.pages_expand);
- handle = page_store;
- src = kmap_atomic(page);
- cmem = kmap_atomic(page_store);
- goto memstore;
+ src = NULL;
+ if (is_partial_io(bvec))
+ src = uncmem;
}
- handle = zs_malloc(zram->mem_pool, clen + sizeof(*zheader));
+ handle = zs_malloc(meta->mem_pool, clen);
if (!handle) {
- pr_info("Error allocating memory for compressed "
- "page: %u, size=%zu\n", index, clen);
+ pr_info("Error allocating memory for compressed page: %u, size=%zu\n",
+ index, clen);
ret = -ENOMEM;
goto out;
}
- cmem = zs_map_object(zram->mem_pool, handle);
+ cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
-memstore:
-#if 0
- /* Back-reference needed for memory defragmentation */
- if (!zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)) {
- zheader = (struct zobj_header *)cmem;
- zheader->table_idx = index;
- cmem += sizeof(*zheader);
- }
-#endif
-
- memcpy(cmem, src, clen);
-
- if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
- kunmap_atomic(cmem);
+ if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
+ src = kmap_atomic(page);
+ copy_page(cmem, src);
kunmap_atomic(src);
} else {
- zs_unmap_object(zram->mem_pool, handle);
+ memcpy(cmem, src, clen);
}
- zram->table[index].handle = handle;
- zram->table[index].size = clen;
+ zs_unmap_object(meta->mem_pool, handle);
+
+ /*
+ * Free memory associated with this sector
+ * before overwriting unused sectors.
+ */
+ zram_free_page(zram, index);
+
+ meta->table[index].handle = handle;
+ meta->table[index].size = clen;
/* Update stats */
- zram_stat64_add(zram, &zram->stats.compr_size, clen);
- zram_stat_inc(&zram->stats.pages_stored);
+ atomic64_add(clen, &zram->stats.compr_size);
+ zram->stats.pages_stored++;
if (clen <= PAGE_SIZE / 2)
- zram_stat_inc(&zram->stats.good_compress);
-
- return 0;
+ zram->stats.good_compress++;
out:
+ if (is_partial_io(bvec))
+ kfree(uncmem);
+
if (ret)
- zram_stat64_inc(zram, &zram->stats.failed_writes);
+ atomic64_inc(&zram->stats.failed_writes);
return ret;
}
@@ -468,11 +523,121 @@
return ret;
}
-static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
+static void zram_reset_device(struct zram *zram)
{
- if (*offset + bvec->bv_len >= PAGE_SIZE)
- (*index)++;
- *offset = (*offset + bvec->bv_len) % PAGE_SIZE;
+ size_t index;
+ struct zram_meta *meta;
+
+ down_write(&zram->init_lock);
+ if (!zram->init_done) {
+ up_write(&zram->init_lock);
+ return;
+ }
+
+ meta = zram->meta;
+ zram->init_done = 0;
+
+ /* Free all pages that are still in this zram device */
+ for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
+ unsigned long handle = meta->table[index].handle;
+ if (!handle)
+ continue;
+
+ zs_free(meta->mem_pool, handle);
+ }
+
+ zram_meta_free(zram->meta);
+ zram->meta = NULL;
+ /* Reset stats */
+ memset(&zram->stats, 0, sizeof(zram->stats));
+
+ zram->disksize = 0;
+ set_capacity(zram->disk, 0);
+ up_write(&zram->init_lock);
+}
+
+static void zram_init_device(struct zram *zram, struct zram_meta *meta)
+{
+ if (zram->disksize > 2 * (totalram_pages << PAGE_SHIFT)) {
+ pr_info(
+ "There is little point creating a zram of greater than "
+ "twice the size of memory since we expect a 2:1 compression "
+ "ratio. Note that zram uses about 0.1%% of the size of "
+ "the disk when not in use so a huge zram is "
+ "wasteful.\n"
+ "\tMemory Size: %lu kB\n"
+ "\tSize you selected: %llu kB\n"
+ "Continuing anyway ...\n",
+ (totalram_pages << PAGE_SHIFT) >> 10, zram->disksize >> 10
+ );
+ }
+
+ /* zram devices sort of resembles non-rotational disks */
+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
+
+ zram->meta = meta;
+ zram->init_done = 1;
+
+ pr_debug("Initialization done!\n");
+}
+
+static ssize_t disksize_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ u64 disksize;
+ struct zram_meta *meta;
+ struct zram *zram = dev_to_zram(dev);
+
+ disksize = memparse(buf, NULL);
+ if (!disksize)
+ return -EINVAL;
+
+ disksize = PAGE_ALIGN(disksize);
+ meta = zram_meta_alloc(disksize);
+ down_write(&zram->init_lock);
+ if (zram->init_done) {
+ up_write(&zram->init_lock);
+ zram_meta_free(meta);
+ pr_info("Cannot change disksize for initialized device\n");
+ return -EBUSY;
+ }
+
+ zram->disksize = disksize;
+ set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
+ zram_init_device(zram, meta);
+ up_write(&zram->init_lock);
+
+ return len;
+}
+
+static ssize_t reset_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ int ret;
+ unsigned short do_reset;
+ struct zram *zram;
+ struct block_device *bdev;
+
+ zram = dev_to_zram(dev);
+ bdev = bdget_disk(zram->disk, 0);
+
+ /* Do not reset an active device! */
+ if (bdev->bd_holders)
+ return -EBUSY;
+
+ ret = kstrtou16(buf, 10, &do_reset);
+ if (ret)
+ return ret;
+
+ if (!do_reset)
+ return -EINVAL;
+
+ /* Make sure all pending I/O is finished */
+ if (bdev)
+ fsync_bdev(bdev);
+
+ zram_reset_device(zram);
+ return len;
}
static void __zram_make_request(struct zram *zram, struct bio *bio, int rw)
@@ -483,10 +648,10 @@
switch (rw) {
case READ:
- zram_stat64_inc(zram, &zram->stats.num_reads);
+ atomic64_inc(&zram->stats.num_reads);
break;
case WRITE:
- zram_stat64_inc(zram, &zram->stats.num_writes);
+ atomic64_inc(&zram->stats.num_writes);
break;
}
@@ -531,39 +696,19 @@
}
/*
- * Check if request is within bounds and aligned on zram logical blocks.
- */
-static inline int valid_io_request(struct zram *zram, struct bio *bio)
-{
- if (unlikely(
- (bio->bi_sector >= (zram->disksize >> SECTOR_SHIFT)) ||
- (bio->bi_sector & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)) ||
- (bio->bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))) {
-
- return 0;
- }
-
- /* I/O request is valid */
- return 1;
-}
-
-/*
* Handler function for all zram I/O requests.
*/
static void zram_make_request(struct request_queue *queue, struct bio *bio)
{
struct zram *zram = queue->queuedata;
- if (unlikely(!zram->init_done) && zram_init_device(zram))
- goto error;
-
down_read(&zram->init_lock);
if (unlikely(!zram->init_done))
- goto error_unlock;
+ goto error;
if (!valid_io_request(zram, bio)) {
- zram_stat64_inc(zram, &zram->stats.invalid_io);
- goto error_unlock;
+ atomic64_inc(&zram->stats.invalid_io);
+ goto error;
}
__zram_make_request(zram, bio, bio_data_dir(bio));
@@ -571,129 +716,21 @@
return;
-error_unlock:
- up_read(&zram->init_lock);
error:
+ up_read(&zram->init_lock);
bio_io_error(bio);
}
-void __zram_reset_device(struct zram *zram)
-{
- size_t index;
-
- zram->init_done = 0;
-
- /* Free various per-device buffers */
- kfree(zram->compress_workmem);
- free_pages((unsigned long)zram->compress_buffer, 1);
-
- zram->compress_workmem = NULL;
- zram->compress_buffer = NULL;
-
- /* Free all pages that are still in this zram device */
- for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
- void *handle = zram->table[index].handle;
- if (!handle)
- continue;
-
- if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)))
- __free_page(handle);
- else
- zs_free(zram->mem_pool, handle);
- }
-
- vfree(zram->table);
- zram->table = NULL;
-
- zs_destroy_pool(zram->mem_pool);
- zram->mem_pool = NULL;
-
- /* Reset stats */
- memset(&zram->stats, 0, sizeof(zram->stats));
-
- zram->disksize = 0;
-}
-
-void zram_reset_device(struct zram *zram)
-{
- down_write(&zram->init_lock);
- __zram_reset_device(zram);
- up_write(&zram->init_lock);
-}
-
-int zram_init_device(struct zram *zram)
-{
- int ret;
- size_t num_pages;
-
- down_write(&zram->init_lock);
-
- if (zram->init_done) {
- up_write(&zram->init_lock);
- return 0;
- }
-
- zram_set_disksize(zram, totalram_pages << PAGE_SHIFT);
-
- zram->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
- if (!zram->compress_workmem) {
- pr_err("Error allocating compressor working memory!\n");
- ret = -ENOMEM;
- goto fail_no_table;
- }
-
- zram->compress_buffer =
- (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
- if (!zram->compress_buffer) {
- pr_err("Error allocating compressor buffer space\n");
- ret = -ENOMEM;
- goto fail_no_table;
- }
-
- num_pages = zram->disksize >> PAGE_SHIFT;
- zram->table = vzalloc(num_pages * sizeof(*zram->table));
- if (!zram->table) {
- pr_err("Error allocating zram address table\n");
- ret = -ENOMEM;
- goto fail_no_table;
- }
-
- set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
-
- /* zram devices sort of resembles non-rotational disks */
- queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
-
- zram->mem_pool = zs_create_pool("zram", GFP_NOIO | __GFP_HIGHMEM);
- if (!zram->mem_pool) {
- pr_err("Error creating memory pool\n");
- ret = -ENOMEM;
- goto fail;
- }
-
- zram->init_done = 1;
- up_write(&zram->init_lock);
-
- pr_debug("Initialization done!\n");
- return 0;
-
-fail_no_table:
- /* To prevent accessing table entries during cleanup */
- zram->disksize = 0;
-fail:
- __zram_reset_device(zram);
- up_write(&zram->init_lock);
- pr_err("Initialization failed: err=%d\n", ret);
- return ret;
-}
-
static void zram_slot_free_notify(struct block_device *bdev,
unsigned long index)
{
struct zram *zram;
zram = bdev->bd_disk->private_data;
+ down_write(&zram->lock);
zram_free_page(zram, index);
- zram_stat64_inc(zram, &zram->stats.notify_free);
+ up_write(&zram->lock);
+ atomic64_inc(&zram->stats.notify_free);
}
static const struct block_device_operations zram_devops = {
@@ -701,19 +738,49 @@
.owner = THIS_MODULE
};
+static DEVICE_ATTR(disksize, S_IRUGO | S_IWUSR,
+ disksize_show, disksize_store);
+static DEVICE_ATTR(initstate, S_IRUGO, initstate_show, NULL);
+static DEVICE_ATTR(reset, S_IWUSR, NULL, reset_store);
+static DEVICE_ATTR(num_reads, S_IRUGO, num_reads_show, NULL);
+static DEVICE_ATTR(num_writes, S_IRUGO, num_writes_show, NULL);
+static DEVICE_ATTR(invalid_io, S_IRUGO, invalid_io_show, NULL);
+static DEVICE_ATTR(notify_free, S_IRUGO, notify_free_show, NULL);
+static DEVICE_ATTR(zero_pages, S_IRUGO, zero_pages_show, NULL);
+static DEVICE_ATTR(orig_data_size, S_IRUGO, orig_data_size_show, NULL);
+static DEVICE_ATTR(compr_data_size, S_IRUGO, compr_data_size_show, NULL);
+static DEVICE_ATTR(mem_used_total, S_IRUGO, mem_used_total_show, NULL);
+
+static struct attribute *zram_disk_attrs[] = {
+ &dev_attr_disksize.attr,
+ &dev_attr_initstate.attr,
+ &dev_attr_reset.attr,
+ &dev_attr_num_reads.attr,
+ &dev_attr_num_writes.attr,
+ &dev_attr_invalid_io.attr,
+ &dev_attr_notify_free.attr,
+ &dev_attr_zero_pages.attr,
+ &dev_attr_orig_data_size.attr,
+ &dev_attr_compr_data_size.attr,
+ &dev_attr_mem_used_total.attr,
+ NULL,
+};
+
+static struct attribute_group zram_disk_attr_group = {
+ .attrs = zram_disk_attrs,
+};
+
static int create_device(struct zram *zram, int device_id)
{
- int ret = 0;
+ int ret = -ENOMEM;
init_rwsem(&zram->lock);
init_rwsem(&zram->init_lock);
- spin_lock_init(&zram->stat64_lock);
zram->queue = blk_alloc_queue(GFP_KERNEL);
if (!zram->queue) {
pr_err("Error allocating disk queue for device %d\n",
device_id);
- ret = -ENOMEM;
goto out;
}
@@ -723,11 +790,9 @@
/* gendisk structure */
zram->disk = alloc_disk(1);
if (!zram->disk) {
- blk_cleanup_queue(zram->queue);
- pr_warning("Error allocating disk structure for device %d\n",
+ pr_warn("Error allocating disk structure for device %d\n",
device_id);
- ret = -ENOMEM;
- goto out;
+ goto out_free_queue;
}
zram->disk->major = zram_major;
@@ -755,12 +820,18 @@
ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
&zram_disk_attr_group);
if (ret < 0) {
- pr_warning("Error creating sysfs group");
- goto out;
+ pr_warn("Error creating sysfs group");
+ goto out_free_disk;
}
zram->init_done = 0;
+ return 0;
+out_free_disk:
+ del_gendisk(zram->disk);
+ put_disk(zram->disk);
+out_free_queue:
+ blk_cleanup_queue(zram->queue);
out:
return ret;
}
@@ -779,17 +850,12 @@
blk_cleanup_queue(zram->queue);
}
-unsigned int zram_get_num_devices(void)
-{
- return num_devices;
-}
-
static int __init zram_init(void)
{
int ret, dev_id;
if (num_devices > max_num_devices) {
- pr_warning("Invalid value for num_devices: %u\n",
+ pr_warn("Invalid value for num_devices: %u\n",
num_devices);
ret = -EINVAL;
goto out;
@@ -797,18 +863,12 @@
zram_major = register_blkdev(0, "zram");
if (zram_major <= 0) {
- pr_warning("Unable to get major number\n");
+ pr_warn("Unable to get major number\n");
ret = -EBUSY;
goto out;
}
- if (!num_devices) {
- pr_info("num_devices not specified. Using default: 1\n");
- num_devices = 1;
- }
-
/* Allocate the device array and initialize each one */
- pr_info("Creating %u devices ...\n", num_devices);
zram_devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL);
if (!zram_devices) {
ret = -ENOMEM;
@@ -821,6 +881,8 @@
goto free_devices;
}
+ pr_info("Created %u device(s) ...\n", num_devices);
+
return 0;
free_devices:
@@ -841,9 +903,10 @@
for (i = 0; i < num_devices; i++) {
zram = &zram_devices[i];
+ get_disk(zram->disk);
destroy_device(zram);
- if (zram->init_done)
- zram_reset_device(zram);
+ zram_reset_device(zram);
+ put_disk(zram->disk);
}
unregister_blkdev(zram_major, "zram");
@@ -852,12 +915,13 @@
pr_debug("Cleanup done!\n");
}
-module_param(num_devices, uint, 0);
-MODULE_PARM_DESC(num_devices, "Number of zram devices");
-
module_init(zram_init);
module_exit(zram_exit);
+module_param(num_devices, uint, 0);
+MODULE_PARM_DESC(num_devices, "Number of zram devices");
+
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
MODULE_DESCRIPTION("Compressed RAM Block Device");
+MODULE_ALIAS("devname:zram");
diff --git a/drivers/staging/zram/zram_drv.h b/drivers/staging/zram/zram_drv.h
index fbe8ac9..9e57bfb 100644
--- a/drivers/staging/zram/zram_drv.h
+++ b/drivers/staging/zram/zram_drv.h
@@ -26,23 +26,8 @@
*/
static const unsigned max_num_devices = 32;
-/*
- * Stored at beginning of each compressed object.
- *
- * It stores back-reference to table entry which points to this
- * object. This is required to support memory defragmentation.
- */
-struct zobj_header {
-#if 0
- u32 table_idx;
-#endif
-};
-
/*-- Configurable parameters */
-/* Default zram disk size: 25% of total RAM */
-static const unsigned default_disksize_perc_ram = 25;
-
/*
* Pages that compress to size greater than this are stored
* uncompressed in memory.
@@ -51,8 +36,8 @@
/*
* NOTE: max_zpage_size must be less than or equal to:
- * ZS_MAX_ALLOC_SIZE - sizeof(struct zobj_header)
- * otherwise, xv_malloc() would always return failure.
+ * ZS_MAX_ALLOC_SIZE. Otherwise, zs_malloc() would
+ * always return failure.
*/
/*-- End of configurable params */
@@ -68,9 +53,6 @@
/* Flags for zram pages (table[page_no].flags) */
enum zram_pageflags {
- /* Page is stored uncompressed */
- ZRAM_UNCOMPRESSED,
-
/* Page consists entirely of zeros */
ZRAM_ZERO,
@@ -81,34 +63,42 @@
/* Allocated for each disk page */
struct table {
- void *handle;
+ unsigned long handle;
u16 size; /* object size (excluding header) */
u8 count; /* object ref count (not yet used) */
u8 flags;
-} __attribute__((aligned(4)));
+} __aligned(4);
+/*
+ * All 64bit fields should only be manipulated by 64bit atomic accessors.
+ * All modifications to 32bit counter should be protected by zram->lock.
+ */
struct zram_stats {
- u64 compr_size; /* compressed size of pages stored */
- u64 num_reads; /* failed + successful */
- u64 num_writes; /* --do-- */
- u64 failed_reads; /* should NEVER! happen */
- u64 failed_writes; /* can happen when memory is too low */
- u64 invalid_io; /* non-page-aligned I/O requests */
- u64 notify_free; /* no. of swap slot free notifications */
+ atomic64_t compr_size; /* compressed size of pages stored */
+ atomic64_t num_reads; /* failed + successful */
+ atomic64_t num_writes; /* --do-- */
+ atomic64_t failed_reads; /* should NEVER! happen */
+ atomic64_t failed_writes; /* can happen when memory is too low */
+ atomic64_t invalid_io; /* non-page-aligned I/O requests */
+ atomic64_t notify_free; /* no. of swap slot free notifications */
u32 pages_zero; /* no. of zero filled pages */
u32 pages_stored; /* no. of pages currently stored */
u32 good_compress; /* % of pages with compression ratio<=50% */
- u32 pages_expand; /* % of incompressible pages */
+ u32 bad_compress; /* % of pages with compression ratio>=75% */
};
-struct zram {
- struct zs_pool *mem_pool;
+struct zram_meta {
void *compress_workmem;
void *compress_buffer;
struct table *table;
- spinlock_t stat64_lock; /* protect 64-bit stats */
- struct rw_semaphore lock; /* protect compression buffers and table
- * against concurrent read and writes */
+ struct zs_pool *mem_pool;
+};
+
+struct zram {
+ struct zram_meta *meta;
+ struct rw_semaphore lock; /* protect compression buffers, table,
+ * 32bit stat counters against concurrent
+ * notifications, reads and writes */
struct request_queue *queue;
struct gendisk *disk;
int init_done;
@@ -122,14 +112,4 @@
struct zram_stats stats;
};
-
-extern struct zram *zram_devices;
-unsigned int zram_get_num_devices(void);
-#ifdef CONFIG_SYSFS
-extern struct attribute_group zram_disk_attr_group;
-#endif
-
-extern int zram_init_device(struct zram *zram);
-extern void __zram_reset_device(struct zram *zram);
-
#endif
diff --git a/drivers/staging/zram/zram_sysfs.c b/drivers/staging/zram/zram_sysfs.c
deleted file mode 100644
index a7f3771..0000000
--- a/drivers/staging/zram/zram_sysfs.c
+++ /dev/null
@@ -1,227 +0,0 @@
-/*
- * Compressed RAM block device
- *
- * Copyright (C) 2008, 2009, 2010 Nitin Gupta
- *
- * This code is released using a dual license strategy: BSD/GPL
- * You can choose the licence that better fits your requirements.
- *
- * Released under the terms of 3-clause BSD License
- * Released under the terms of GNU General Public License Version 2.0
- *
- * Project home: http://compcache.googlecode.com/
- */
-
-#include <linux/device.h>
-#include <linux/genhd.h>
-#include <linux/mm.h>
-
-#include "zram_drv.h"
-
-static u64 zram_stat64_read(struct zram *zram, u64 *v)
-{
- u64 val;
-
- spin_lock(&zram->stat64_lock);
- val = *v;
- spin_unlock(&zram->stat64_lock);
-
- return val;
-}
-
-static struct zram *dev_to_zram(struct device *dev)
-{
- int i;
- struct zram *zram = NULL;
-
- for (i = 0; i < zram_get_num_devices(); i++) {
- zram = &zram_devices[i];
- if (disk_to_dev(zram->disk) == dev)
- break;
- }
-
- return zram;
-}
-
-static ssize_t disksize_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct zram *zram = dev_to_zram(dev);
-
- return sprintf(buf, "%llu\n", zram->disksize);
-}
-
-static ssize_t disksize_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
-{
- int ret;
- u64 disksize;
- struct zram *zram = dev_to_zram(dev);
-
- ret = kstrtoull(buf, 10, &disksize);
- if (ret)
- return ret;
-
- down_write(&zram->init_lock);
- if (zram->init_done) {
- up_write(&zram->init_lock);
- pr_info("Cannot change disksize for initialized device\n");
- return -EBUSY;
- }
-
- zram->disksize = PAGE_ALIGN(disksize);
- set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
- up_write(&zram->init_lock);
-
- return len;
-}
-
-static ssize_t initstate_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct zram *zram = dev_to_zram(dev);
-
- return sprintf(buf, "%u\n", zram->init_done);
-}
-
-static ssize_t reset_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
-{
- int ret;
- unsigned short do_reset;
- struct zram *zram;
- struct block_device *bdev;
-
- zram = dev_to_zram(dev);
- bdev = bdget_disk(zram->disk, 0);
-
- /* Do not reset an active device! */
- if (bdev->bd_holders)
- return -EBUSY;
-
- ret = kstrtou16(buf, 10, &do_reset);
- if (ret)
- return ret;
-
- if (!do_reset)
- return -EINVAL;
-
- /* Make sure all pending I/O is finished */
- if (bdev)
- fsync_bdev(bdev);
-
- down_write(&zram->init_lock);
- if (zram->init_done)
- __zram_reset_device(zram);
- up_write(&zram->init_lock);
-
- return len;
-}
-
-static ssize_t num_reads_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct zram *zram = dev_to_zram(dev);
-
- return sprintf(buf, "%llu\n",
- zram_stat64_read(zram, &zram->stats.num_reads));
-}
-
-static ssize_t num_writes_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct zram *zram = dev_to_zram(dev);
-
- return sprintf(buf, "%llu\n",
- zram_stat64_read(zram, &zram->stats.num_writes));
-}
-
-static ssize_t invalid_io_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct zram *zram = dev_to_zram(dev);
-
- return sprintf(buf, "%llu\n",
- zram_stat64_read(zram, &zram->stats.invalid_io));
-}
-
-static ssize_t notify_free_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct zram *zram = dev_to_zram(dev);
-
- return sprintf(buf, "%llu\n",
- zram_stat64_read(zram, &zram->stats.notify_free));
-}
-
-static ssize_t zero_pages_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct zram *zram = dev_to_zram(dev);
-
- return sprintf(buf, "%u\n", zram->stats.pages_zero);
-}
-
-static ssize_t orig_data_size_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct zram *zram = dev_to_zram(dev);
-
- return sprintf(buf, "%llu\n",
- (u64)(zram->stats.pages_stored) << PAGE_SHIFT);
-}
-
-static ssize_t compr_data_size_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct zram *zram = dev_to_zram(dev);
-
- return sprintf(buf, "%llu\n",
- zram_stat64_read(zram, &zram->stats.compr_size));
-}
-
-static ssize_t mem_used_total_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- u64 val = 0;
- struct zram *zram = dev_to_zram(dev);
-
- if (zram->init_done) {
- val = zs_get_total_size_bytes(zram->mem_pool) +
- ((u64)(zram->stats.pages_expand) << PAGE_SHIFT);
- }
-
- return sprintf(buf, "%llu\n", val);
-}
-
-static DEVICE_ATTR(disksize, S_IRUGO | S_IWUSR,
- disksize_show, disksize_store);
-static DEVICE_ATTR(initstate, S_IRUGO, initstate_show, NULL);
-static DEVICE_ATTR(reset, S_IWUSR, NULL, reset_store);
-static DEVICE_ATTR(num_reads, S_IRUGO, num_reads_show, NULL);
-static DEVICE_ATTR(num_writes, S_IRUGO, num_writes_show, NULL);
-static DEVICE_ATTR(invalid_io, S_IRUGO, invalid_io_show, NULL);
-static DEVICE_ATTR(notify_free, S_IRUGO, notify_free_show, NULL);
-static DEVICE_ATTR(zero_pages, S_IRUGO, zero_pages_show, NULL);
-static DEVICE_ATTR(orig_data_size, S_IRUGO, orig_data_size_show, NULL);
-static DEVICE_ATTR(compr_data_size, S_IRUGO, compr_data_size_show, NULL);
-static DEVICE_ATTR(mem_used_total, S_IRUGO, mem_used_total_show, NULL);
-
-static struct attribute *zram_disk_attrs[] = {
- &dev_attr_disksize.attr,
- &dev_attr_initstate.attr,
- &dev_attr_reset.attr,
- &dev_attr_num_reads.attr,
- &dev_attr_num_writes.attr,
- &dev_attr_invalid_io.attr,
- &dev_attr_notify_free.attr,
- &dev_attr_zero_pages.attr,
- &dev_attr_orig_data_size.attr,
- &dev_attr_compr_data_size.attr,
- &dev_attr_mem_used_total.attr,
- NULL,
-};
-
-struct attribute_group zram_disk_attr_group = {
- .attrs = zram_disk_attrs,
-};
diff --git a/drivers/staging/zsmalloc/Kconfig b/drivers/staging/zsmalloc/Kconfig
index a5ab720..7fab032 100644
--- a/drivers/staging/zsmalloc/Kconfig
+++ b/drivers/staging/zsmalloc/Kconfig
@@ -1,9 +1,5 @@
config ZSMALLOC
- tristate "Memory allocator for compressed pages"
- # X86 dependency is because of the use of __flush_tlb_one and set_pte
- # in zsmalloc-main.c.
- # TODO: convert these to portable functions
- depends on X86
+ bool "Memory allocator for compressed pages"
default n
help
zsmalloc is a slab-based memory allocator designed to store
diff --git a/drivers/staging/zsmalloc/zsmalloc-main.c b/drivers/staging/zsmalloc/zsmalloc-main.c
index 917461c..1a67537 100644
--- a/drivers/staging/zsmalloc/zsmalloc-main.c
+++ b/drivers/staging/zsmalloc/zsmalloc-main.c
@@ -10,6 +10,54 @@
* Released under the terms of GNU General Public License Version 2.0
*/
+
+/*
+ * This allocator is designed for use with zcache and zram. Thus, the
+ * allocator is supposed to work well under low memory conditions. In
+ * particular, it never attempts higher order page allocation which is
+ * very likely to fail under memory pressure. On the other hand, if we
+ * just use single (0-order) pages, it would suffer from very high
+ * fragmentation -- any object of size PAGE_SIZE/2 or larger would occupy
+ * an entire page. This was one of the major issues with its predecessor
+ * (xvmalloc).
+ *
+ * To overcome these issues, zsmalloc allocates a bunch of 0-order pages
+ * and links them together using various 'struct page' fields. These linked
+ * pages act as a single higher-order page i.e. an object can span 0-order
+ * page boundaries. The code refers to these linked pages as a single entity
+ * called zspage.
+ *
+ * Following is how we use various fields and flags of underlying
+ * struct page(s) to form a zspage.
+ *
+ * Usage of struct page fields:
+ * page->first_page: points to the first component (0-order) page
+ * page->index (union with page->freelist): offset of the first object
+ * starting in this page. For the first page, this is
+ * always 0, so we use this field (aka freelist) to point
+ * to the first free object in zspage.
+ * page->lru: links together all component pages (except the first page)
+ * of a zspage
+ *
+ * For _first_ page only:
+ *
+ * page->private (union with page->first_page): refers to the
+ * component page after the first page
+ * page->freelist: points to the first free object in zspage.
+ * Free objects are linked together using in-place
+ * metadata.
+ * page->objects: maximum number of objects we can store in this
+ * zspage (class->zspage_order * PAGE_SIZE / class->size)
+ * page->lru: links together first pages of various zspages.
+ * Basically forming list of zspages in a fullness group.
+ * page->mapping: class index and fullness group of the zspage
+ *
+ * Usage of struct page flags:
+ * PG_private: identifies the first component page
+ * PG_private2: identifies the last component page
+ *
+ */
+
#ifdef CONFIG_ZSMALLOC_DEBUG
#define DEBUG
#endif
@@ -27,9 +75,139 @@
#include <linux/cpumask.h>
#include <linux/cpu.h>
#include <linux/vmalloc.h>
+#include <linux/hardirq.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
#include "zsmalloc.h"
-#include "zsmalloc_int.h"
+
+/*
+ * This must be power of 2 and greater than of equal to sizeof(link_free).
+ * These two conditions ensure that any 'struct link_free' itself doesn't
+ * span more than 1 page which avoids complex case of mapping 2 pages simply
+ * to restore link_free pointer values.
+ */
+#define ZS_ALIGN 8
+
+/*
+ * A single 'zspage' is composed of up to 2^N discontiguous 0-order (single)
+ * pages. ZS_MAX_ZSPAGE_ORDER defines upper limit on N.
+ */
+#define ZS_MAX_ZSPAGE_ORDER 2
+#define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL) << ZS_MAX_ZSPAGE_ORDER)
+
+/*
+ * Object location (<PFN>, <obj_idx>) is encoded as
+ * as single (void *) handle value.
+ *
+ * Note that object index <obj_idx> is relative to system
+ * page <PFN> it is stored in, so for each sub-page belonging
+ * to a zspage, obj_idx starts with 0.
+ *
+ * This is made more complicated by various memory models and PAE.
+ */
+
+#ifndef MAX_PHYSMEM_BITS
+#ifdef CONFIG_HIGHMEM64G
+#define MAX_PHYSMEM_BITS 36
+#else /* !CONFIG_HIGHMEM64G */
+/*
+ * If this definition of MAX_PHYSMEM_BITS is used, OBJ_INDEX_BITS will just
+ * be PAGE_SHIFT
+ */
+#define MAX_PHYSMEM_BITS BITS_PER_LONG
+#endif
+#endif
+#define _PFN_BITS (MAX_PHYSMEM_BITS - PAGE_SHIFT)
+#define OBJ_INDEX_BITS (BITS_PER_LONG - _PFN_BITS)
+#define OBJ_INDEX_MASK ((_AC(1, UL) << OBJ_INDEX_BITS) - 1)
+
+#define MAX(a, b) ((a) >= (b) ? (a) : (b))
+/* ZS_MIN_ALLOC_SIZE must be multiple of ZS_ALIGN */
+#define ZS_MIN_ALLOC_SIZE \
+ MAX(32, (ZS_MAX_PAGES_PER_ZSPAGE << PAGE_SHIFT >> OBJ_INDEX_BITS))
+#define ZS_MAX_ALLOC_SIZE PAGE_SIZE
+
+/*
+ * On systems with 4K page size, this gives 254 size classes! There is a
+ * trader-off here:
+ * - Large number of size classes is potentially wasteful as free page are
+ * spread across these classes
+ * - Small number of size classes causes large internal fragmentation
+ * - Probably its better to use specific size classes (empirically
+ * determined). NOTE: all those class sizes must be set as multiple of
+ * ZS_ALIGN to make sure link_free itself never has to span 2 pages.
+ *
+ * ZS_MIN_ALLOC_SIZE and ZS_SIZE_CLASS_DELTA must be multiple of ZS_ALIGN
+ * (reason above)
+ */
+#define ZS_SIZE_CLASS_DELTA (PAGE_SIZE >> 8)
+#define ZS_SIZE_CLASSES ((ZS_MAX_ALLOC_SIZE - ZS_MIN_ALLOC_SIZE) / \
+ ZS_SIZE_CLASS_DELTA + 1)
+
+/*
+ * We do not maintain any list for completely empty or full pages
+ */
+enum fullness_group {
+ ZS_ALMOST_FULL,
+ ZS_ALMOST_EMPTY,
+ _ZS_NR_FULLNESS_GROUPS,
+
+ ZS_EMPTY,
+ ZS_FULL
+};
+
+/*
+ * We assign a page to ZS_ALMOST_EMPTY fullness group when:
+ * n <= N / f, where
+ * n = number of allocated objects
+ * N = total number of objects zspage can store
+ * f = 1/fullness_threshold_frac
+ *
+ * Similarly, we assign zspage to:
+ * ZS_ALMOST_FULL when n > N / f
+ * ZS_EMPTY when n == 0
+ * ZS_FULL when n == N
+ *
+ * (see: fix_fullness_group())
+ */
+static const int fullness_threshold_frac = 4;
+
+struct size_class {
+ /*
+ * Size of objects stored in this class. Must be multiple
+ * of ZS_ALIGN.
+ */
+ int size;
+ unsigned int index;
+
+ /* Number of PAGE_SIZE sized pages to combine to form a 'zspage' */
+ int pages_per_zspage;
+
+ spinlock_t lock;
+
+ /* stats */
+ u64 pages_allocated;
+
+ struct page *fullness_list[_ZS_NR_FULLNESS_GROUPS];
+};
+
+/*
+ * Placed within free objects to form a singly linked list.
+ * For every zspage, first_page->freelist gives head of this list.
+ *
+ * This must be power of 2 and less than or equal to ZS_ALIGN
+ */
+struct link_free {
+ /* Handle of next free chunk (encodes <PFN, obj_idx>) */
+ void *next;
+};
+
+struct zs_pool {
+ struct size_class size_class[ZS_SIZE_CLASSES];
+
+ gfp_t flags; /* allocation flags used when growing pool */
+};
/*
* A zspage's class index and fullness group
@@ -40,17 +218,39 @@
#define CLASS_IDX_MASK ((1 << CLASS_IDX_BITS) - 1)
#define FULLNESS_MASK ((1 << FULLNESS_BITS) - 1)
+/*
+ * By default, zsmalloc uses a copy-based object mapping method to access
+ * allocations that span two pages. However, if a particular architecture
+ * performs VM mapping faster than copying, then it should be added here
+ * so that USE_PGTABLE_MAPPING is defined. This causes zsmalloc to use
+ * page table mapping rather than copying for object mapping.
+ */
+#if defined(CONFIG_ARM) && !defined(MODULE)
+#define USE_PGTABLE_MAPPING
+#endif
+
+struct mapping_area {
+#ifdef USE_PGTABLE_MAPPING
+ struct vm_struct *vm; /* vm area for mapping object that span pages */
+#else
+ char *vm_buf; /* copy buffer for objects that span pages */
+#endif
+ char *vm_addr; /* address of kmap_atomic()'ed pages */
+ enum zs_mapmode vm_mm; /* mapping mode */
+};
+
+
/* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
static DEFINE_PER_CPU(struct mapping_area, zs_map_area);
static int is_first_page(struct page *page)
{
- return test_bit(PG_private, &page->flags);
+ return PagePrivate(page);
}
static int is_last_page(struct page *page)
{
- return test_bit(PG_private_2, &page->flags);
+ return PagePrivate2(page);
}
static void get_zspage_mapping(struct page *page, unsigned int *class_idx,
@@ -180,7 +380,7 @@
* link together 3 PAGE_SIZE sized pages to form a zspage
* since then we can perfectly fit in 8 such objects.
*/
-static int get_zspage_order(int class_size)
+static int get_pages_per_zspage(int class_size)
{
int i, max_usedpc = 0;
/* zspage order which gives maximum used size per KB */
@@ -223,7 +423,7 @@
if (is_last_page(page))
next = NULL;
else if (is_first_page(page))
- next = (struct page *)page->private;
+ next = (struct page *)page_private(page);
else
next = list_entry(page->lru.next, struct page, lru);
@@ -247,13 +447,11 @@
}
/* Decode <page, obj_idx> pair from the given object handle */
-static void obj_handle_to_location(void *handle, struct page **page,
+static void obj_handle_to_location(unsigned long handle, struct page **page,
unsigned long *obj_idx)
{
- unsigned long hval = (unsigned long)handle;
-
- *page = pfn_to_page(hval >> OBJ_INDEX_BITS);
- *obj_idx = hval & OBJ_INDEX_MASK;
+ *page = pfn_to_page(handle >> OBJ_INDEX_BITS);
+ *obj_idx = handle & OBJ_INDEX_MASK;
}
static unsigned long obj_idx_to_offset(struct page *page,
@@ -274,7 +472,7 @@
set_page_private(page, 0);
page->mapping = NULL;
page->freelist = NULL;
- reset_page_mapcount(page);
+ page_mapcount_reset(page);
}
static void free_zspage(struct page *first_page)
@@ -354,7 +552,7 @@
static struct page *alloc_zspage(struct size_class *class, gfp_t flags)
{
int i, error;
- struct page *first_page = NULL;
+ struct page *first_page = NULL, *uninitialized_var(prev_page);
/*
* Allocate individual pages and link them together as:
@@ -368,8 +566,8 @@
* identify the last page.
*/
error = -ENOMEM;
- for (i = 0; i < class->zspage_order; i++) {
- struct page *page, *prev_page;
+ for (i = 0; i < class->pages_per_zspage; i++) {
+ struct page *page;
page = alloc_page(flags);
if (!page)
@@ -377,20 +575,19 @@
INIT_LIST_HEAD(&page->lru);
if (i == 0) { /* first page */
- set_bit(PG_private, &page->flags);
+ SetPagePrivate(page);
set_page_private(page, 0);
first_page = page;
first_page->inuse = 0;
}
if (i == 1)
- first_page->private = (unsigned long)page;
+ set_page_private(first_page, (unsigned long)page);
if (i >= 1)
page->first_page = first_page;
if (i >= 2)
list_add(&page->lru, &prev_page->lru);
- if (i == class->zspage_order - 1) /* last page */
- set_bit(PG_private_2, &page->flags);
-
+ if (i == class->pages_per_zspage - 1) /* last page */
+ SetPagePrivate2(page);
prev_page = page;
}
@@ -398,7 +595,7 @@
first_page->freelist = obj_location_to_handle(first_page, 0);
/* Maximum number of objects we can store in this zspage */
- first_page->objects = class->zspage_order * PAGE_SIZE / class->size;
+ first_page->objects = class->pages_per_zspage * PAGE_SIZE / class->size;
error = 0; /* Success */
@@ -425,34 +622,141 @@
return page;
}
+#ifdef USE_PGTABLE_MAPPING
+static inline int __zs_cpu_up(struct mapping_area *area)
+{
+ /*
+ * Make sure we don't leak memory if a cpu UP notification
+ * and zs_init() race and both call zs_cpu_up() on the same cpu
+ */
+ if (area->vm)
+ return 0;
+ area->vm = alloc_vm_area(PAGE_SIZE * 2, NULL);
+ if (!area->vm)
+ return -ENOMEM;
+ return 0;
+}
-/*
- * If this becomes a separate module, register zs_init() with
- * module_init(), zs_exit with module_exit(), and remove zs_initialized
-*/
-static int zs_initialized;
+static inline void __zs_cpu_down(struct mapping_area *area)
+{
+ if (area->vm)
+ free_vm_area(area->vm);
+ area->vm = NULL;
+}
+
+static inline void *__zs_map_object(struct mapping_area *area,
+ struct page *pages[2], int off, int size)
+{
+ BUG_ON(map_vm_area(area->vm, PAGE_KERNEL, &pages));
+ area->vm_addr = area->vm->addr;
+ return area->vm_addr + off;
+}
+
+static inline void __zs_unmap_object(struct mapping_area *area,
+ struct page *pages[2], int off, int size)
+{
+ unsigned long addr = (unsigned long)area->vm_addr;
+
+ unmap_kernel_range(addr, PAGE_SIZE * 2);
+}
+
+#else /* USE_PGTABLE_MAPPING */
+
+static inline int __zs_cpu_up(struct mapping_area *area)
+{
+ /*
+ * Make sure we don't leak memory if a cpu UP notification
+ * and zs_init() race and both call zs_cpu_up() on the same cpu
+ */
+ if (area->vm_buf)
+ return 0;
+ area->vm_buf = (char *)__get_free_page(GFP_KERNEL);
+ if (!area->vm_buf)
+ return -ENOMEM;
+ return 0;
+}
+
+static inline void __zs_cpu_down(struct mapping_area *area)
+{
+ if (area->vm_buf)
+ free_page((unsigned long)area->vm_buf);
+ area->vm_buf = NULL;
+}
+
+static void *__zs_map_object(struct mapping_area *area,
+ struct page *pages[2], int off, int size)
+{
+ int sizes[2];
+ void *addr;
+ char *buf = area->vm_buf;
+
+ /* disable page faults to match kmap_atomic() return conditions */
+ pagefault_disable();
+
+ /* no read fastpath */
+ if (area->vm_mm == ZS_MM_WO)
+ goto out;
+
+ sizes[0] = PAGE_SIZE - off;
+ sizes[1] = size - sizes[0];
+
+ /* copy object to per-cpu buffer */
+ addr = kmap_atomic(pages[0]);
+ memcpy(buf, addr + off, sizes[0]);
+ kunmap_atomic(addr);
+ addr = kmap_atomic(pages[1]);
+ memcpy(buf + sizes[0], addr, sizes[1]);
+ kunmap_atomic(addr);
+out:
+ return area->vm_buf;
+}
+
+static void __zs_unmap_object(struct mapping_area *area,
+ struct page *pages[2], int off, int size)
+{
+ int sizes[2];
+ void *addr;
+ char *buf = area->vm_buf;
+
+ /* no write fastpath */
+ if (area->vm_mm == ZS_MM_RO)
+ goto out;
+
+ sizes[0] = PAGE_SIZE - off;
+ sizes[1] = size - sizes[0];
+
+ /* copy per-cpu buffer to object */
+ addr = kmap_atomic(pages[0]);
+ memcpy(addr + off, buf, sizes[0]);
+ kunmap_atomic(addr);
+ addr = kmap_atomic(pages[1]);
+ memcpy(addr, buf + sizes[0], sizes[1]);
+ kunmap_atomic(addr);
+
+out:
+ /* enable page faults to match kunmap_atomic() return conditions */
+ pagefault_enable();
+}
+
+#endif /* USE_PGTABLE_MAPPING */
static int zs_cpu_notifier(struct notifier_block *nb, unsigned long action,
void *pcpu)
{
- int cpu = (long)pcpu;
+ int ret, cpu = (long)pcpu;
struct mapping_area *area;
switch (action) {
case CPU_UP_PREPARE:
area = &per_cpu(zs_map_area, cpu);
- if (area->vm)
- break;
- area->vm = alloc_vm_area(2 * PAGE_SIZE, area->vm_ptes);
- if (!area->vm)
- return notifier_from_errno(-ENOMEM);
+ ret = __zs_cpu_up(area);
+ if (ret)
+ return notifier_from_errno(ret);
break;
case CPU_DEAD:
case CPU_UP_CANCELED:
area = &per_cpu(zs_map_area, cpu);
- if (area->vm)
- free_vm_area(area->vm);
- area->vm = NULL;
+ __zs_cpu_down(area);
break;
}
@@ -488,14 +792,21 @@
return notifier_to_errno(ret);
}
-struct zs_pool *zs_create_pool(const char *name, gfp_t flags)
+/**
+ * zs_create_pool - Creates an allocation pool to work from.
+ * @flags: allocation flags used to allocate pool metadata
+ *
+ * This function must be called before anything when using
+ * the zsmalloc allocator.
+ *
+ * On success, a pointer to the newly created pool is returned,
+ * otherwise NULL.
+ */
+struct zs_pool *zs_create_pool(gfp_t flags)
{
- int i, error, ovhd_size;
+ int i, ovhd_size;
struct zs_pool *pool;
- if (!name)
- return NULL;
-
ovhd_size = roundup(sizeof(*pool), PAGE_SIZE);
pool = kzalloc(ovhd_size, GFP_KERNEL);
if (!pool)
@@ -513,31 +824,11 @@
class->size = size;
class->index = i;
spin_lock_init(&class->lock);
- class->zspage_order = get_zspage_order(size);
+ class->pages_per_zspage = get_pages_per_zspage(size);
}
- /*
- * If this becomes a separate module, register zs_init with
- * module_init, and remove this block
- */
- if (!zs_initialized) {
- error = zs_init();
- if (error)
- goto cleanup;
- zs_initialized = 1;
- }
-
pool->flags = flags;
- pool->name = name;
-
- error = 0; /* Success */
-
-cleanup:
- if (error) {
- zs_destroy_pool(pool);
- pool = NULL;
- }
return pool;
}
@@ -553,8 +844,7 @@
for (fg = 0; fg < _ZS_NR_FULLNESS_GROUPS; fg++) {
if (class->fullness_list[fg]) {
- pr_info("Freeing non-empty class with size "
- "%db, fullness group %d\n",
+ pr_info("Freeing non-empty class with size %db, fullness group %d\n",
class->size, fg);
}
}
@@ -567,18 +857,14 @@
* zs_malloc - Allocate block of given size from pool.
* @pool: pool to allocate from
* @size: size of block to allocate
- * @page: page no. that holds the object
- * @offset: location of object within page
*
- * On success, <page, offset> identifies block allocated
- * and 0 is returned. On failure, <page, offset> is set to
- * 0 and -ENOMEM is returned.
- *
+ * On success, handle to the allocated object is returned,
+ * otherwise 0.
* Allocation requests with size > ZS_MAX_ALLOC_SIZE will fail.
*/
-void *zs_malloc(struct zs_pool *pool, size_t size)
+unsigned long zs_malloc(struct zs_pool *pool, size_t size)
{
- void *obj;
+ unsigned long obj;
struct link_free *link;
int class_idx;
struct size_class *class;
@@ -587,7 +873,7 @@
unsigned long m_objidx, m_offset;
if (unlikely(!size || size > ZS_MAX_ALLOC_SIZE))
- return NULL;
+ return 0;
class_idx = get_size_class_index(size);
class = &pool->size_class[class_idx];
@@ -600,14 +886,14 @@
spin_unlock(&class->lock);
first_page = alloc_zspage(class, pool->flags);
if (unlikely(!first_page))
- return NULL;
+ return 0;
set_zspage_mapping(first_page, class->index, ZS_EMPTY);
spin_lock(&class->lock);
- class->pages_allocated += class->zspage_order;
+ class->pages_allocated += class->pages_per_zspage;
}
- obj = first_page->freelist;
+ obj = (unsigned long)first_page->freelist;
obj_handle_to_location(obj, &m_page, &m_objidx);
m_offset = obj_idx_to_offset(m_page, m_objidx, class->size);
@@ -626,7 +912,7 @@
}
EXPORT_SYMBOL_GPL(zs_malloc);
-void zs_free(struct zs_pool *pool, void *obj)
+void zs_free(struct zs_pool *pool, unsigned long obj)
{
struct link_free *link;
struct page *first_page, *f_page;
@@ -653,13 +939,13 @@
+ f_offset);
link->next = first_page->freelist;
kunmap_atomic(link);
- first_page->freelist = obj;
+ first_page->freelist = (void *)obj;
first_page->inuse--;
fullness = fix_fullness_group(pool, first_page);
if (fullness == ZS_EMPTY)
- class->pages_allocated -= class->zspage_order;
+ class->pages_allocated -= class->pages_per_zspage;
spin_unlock(&class->lock);
@@ -668,7 +954,22 @@
}
EXPORT_SYMBOL_GPL(zs_free);
-void *zs_map_object(struct zs_pool *pool, void *handle)
+/**
+ * zs_map_object - get address of allocated object from handle.
+ * @pool: pool from which the object was allocated
+ * @handle: handle returned from zs_malloc
+ *
+ * Before using an object allocated from zs_malloc, it must be mapped using
+ * this function. When done with the object, it must be unmapped using
+ * zs_unmap_object.
+ *
+ * Only one object can be mapped per cpu at a time. There is no protection
+ * against nested mappings.
+ *
+ * This function returns with preemption and page faults disabled.
+ */
+void *zs_map_object(struct zs_pool *pool, unsigned long handle,
+ enum zs_mapmode mm)
{
struct page *page;
unsigned long obj_idx, off;
@@ -677,38 +978,40 @@
enum fullness_group fg;
struct size_class *class;
struct mapping_area *area;
+ struct page *pages[2];
BUG_ON(!handle);
+ /*
+ * Because we use per-cpu mapping areas shared among the
+ * pools/users, we can't allow mapping in interrupt context
+ * because it can corrupt another users mappings.
+ */
+ BUG_ON(in_interrupt());
+
obj_handle_to_location(handle, &page, &obj_idx);
get_zspage_mapping(get_first_page(page), &class_idx, &fg);
class = &pool->size_class[class_idx];
off = obj_idx_to_offset(page, obj_idx, class->size);
area = &get_cpu_var(zs_map_area);
+ area->vm_mm = mm;
if (off + class->size <= PAGE_SIZE) {
/* this object is contained entirely within a page */
area->vm_addr = kmap_atomic(page);
- } else {
- /* this object spans two pages */
- struct page *nextp;
-
- nextp = get_next_page(page);
- BUG_ON(!nextp);
-
-
- set_pte(area->vm_ptes[0], mk_pte(page, PAGE_KERNEL));
- set_pte(area->vm_ptes[1], mk_pte(nextp, PAGE_KERNEL));
-
- /* We pre-allocated VM area so mapping can never fail */
- area->vm_addr = area->vm->addr;
+ return area->vm_addr + off;
}
- return area->vm_addr + off;
+ /* this object spans two pages */
+ pages[0] = page;
+ pages[1] = get_next_page(page);
+ BUG_ON(!pages[1]);
+
+ return __zs_map_object(area, pages, off, class->size);
}
EXPORT_SYMBOL_GPL(zs_map_object);
-void zs_unmap_object(struct zs_pool *pool, void *handle)
+void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
{
struct page *page;
unsigned long obj_idx, off;
@@ -726,13 +1029,16 @@
off = obj_idx_to_offset(page, obj_idx, class->size);
area = &__get_cpu_var(zs_map_area);
- if (off + class->size <= PAGE_SIZE) {
+ if (off + class->size <= PAGE_SIZE)
kunmap_atomic(area->vm_addr);
- } else {
- set_pte(area->vm_ptes[0], __pte(0));
- set_pte(area->vm_ptes[1], __pte(0));
- __flush_tlb_one((unsigned long)area->vm_addr);
- __flush_tlb_one((unsigned long)area->vm_addr + PAGE_SIZE);
+ else {
+ struct page *pages[2];
+
+ pages[0] = page;
+ pages[1] = get_next_page(page);
+ BUG_ON(!pages[1]);
+
+ __zs_unmap_object(area, pages, off, class->size);
}
put_cpu_var(zs_map_area);
}
@@ -749,3 +1055,9 @@
return npages << PAGE_SHIFT;
}
EXPORT_SYMBOL_GPL(zs_get_total_size_bytes);
+
+module_init(zs_init);
+module_exit(zs_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
diff --git a/drivers/staging/zsmalloc/zsmalloc.h b/drivers/staging/zsmalloc/zsmalloc.h
index 949384e..fbe6bec 100644
--- a/drivers/staging/zsmalloc/zsmalloc.h
+++ b/drivers/staging/zsmalloc/zsmalloc.h
@@ -15,16 +15,28 @@
#include <linux/types.h>
+/*
+ * zsmalloc mapping modes
+ *
+ * NOTE: These only make a difference when a mapped object spans pages
+ */
+enum zs_mapmode {
+ ZS_MM_RW, /* normal read-write mapping */
+ ZS_MM_RO, /* read-only (no copy-out at unmap time) */
+ ZS_MM_WO /* write-only (no copy-in at map time) */
+};
+
struct zs_pool;
-struct zs_pool *zs_create_pool(const char *name, gfp_t flags);
+struct zs_pool *zs_create_pool(gfp_t flags);
void zs_destroy_pool(struct zs_pool *pool);
-void *zs_malloc(struct zs_pool *pool, size_t size);
-void zs_free(struct zs_pool *pool, void *obj);
+unsigned long zs_malloc(struct zs_pool *pool, size_t size);
+void zs_free(struct zs_pool *pool, unsigned long obj);
-void *zs_map_object(struct zs_pool *pool, void *handle);
-void zs_unmap_object(struct zs_pool *pool, void *handle);
+void *zs_map_object(struct zs_pool *pool, unsigned long handle,
+ enum zs_mapmode mm);
+void zs_unmap_object(struct zs_pool *pool, unsigned long handle);
u64 zs_get_total_size_bytes(struct zs_pool *pool);
diff --git a/drivers/staging/zsmalloc/zsmalloc_int.h b/drivers/staging/zsmalloc/zsmalloc_int.h
deleted file mode 100644
index 92eefc6..0000000
--- a/drivers/staging/zsmalloc/zsmalloc_int.h
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
- * zsmalloc memory allocator
- *
- * Copyright (C) 2011 Nitin Gupta
- *
- * This code is released using a dual license strategy: BSD/GPL
- * You can choose the license that better fits your requirements.
- *
- * Released under the terms of 3-clause BSD License
- * Released under the terms of GNU General Public License Version 2.0
- */
-
-#ifndef _ZS_MALLOC_INT_H_
-#define _ZS_MALLOC_INT_H_
-
-#include <linux/kernel.h>
-#include <linux/spinlock.h>
-#include <linux/types.h>
-
-/*
- * This must be power of 2 and greater than of equal to sizeof(link_free).
- * These two conditions ensure that any 'struct link_free' itself doesn't
- * span more than 1 page which avoids complex case of mapping 2 pages simply
- * to restore link_free pointer values.
- */
-#define ZS_ALIGN 8
-
-/*
- * A single 'zspage' is composed of up to 2^N discontiguous 0-order (single)
- * pages. ZS_MAX_ZSPAGE_ORDER defines upper limit on N.
- */
-#define ZS_MAX_ZSPAGE_ORDER 2
-#define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL) << ZS_MAX_ZSPAGE_ORDER)
-
-/*
- * Object location (<PFN>, <obj_idx>) is encoded as
- * as single (void *) handle value.
- *
- * Note that object index <obj_idx> is relative to system
- * page <PFN> it is stored in, so for each sub-page belonging
- * to a zspage, obj_idx starts with 0.
- *
- * This is made more complicated by various memory models and PAE.
- */
-
-#ifndef MAX_PHYSMEM_BITS
-#ifdef CONFIG_HIGHMEM64G
-#define MAX_PHYSMEM_BITS 36
-#else /* !CONFIG_HIGHMEM64G */
-/*
- * If this definition of MAX_PHYSMEM_BITS is used, OBJ_INDEX_BITS will just
- * be PAGE_SHIFT
- */
-#define MAX_PHYSMEM_BITS BITS_PER_LONG
-#endif
-#endif
-#define _PFN_BITS (MAX_PHYSMEM_BITS - PAGE_SHIFT)
-#define OBJ_INDEX_BITS (BITS_PER_LONG - _PFN_BITS)
-#define OBJ_INDEX_MASK ((_AC(1, UL) << OBJ_INDEX_BITS) - 1)
-
-#define MAX(a, b) ((a) >= (b) ? (a) : (b))
-/* ZS_MIN_ALLOC_SIZE must be multiple of ZS_ALIGN */
-#define ZS_MIN_ALLOC_SIZE \
- MAX(32, (ZS_MAX_PAGES_PER_ZSPAGE << PAGE_SHIFT >> OBJ_INDEX_BITS))
-#define ZS_MAX_ALLOC_SIZE PAGE_SIZE
-
-/*
- * On systems with 4K page size, this gives 254 size classes! There is a
- * trader-off here:
- * - Large number of size classes is potentially wasteful as free page are
- * spread across these classes
- * - Small number of size classes causes large internal fragmentation
- * - Probably its better to use specific size classes (empirically
- * determined). NOTE: all those class sizes must be set as multiple of
- * ZS_ALIGN to make sure link_free itself never has to span 2 pages.
- *
- * ZS_MIN_ALLOC_SIZE and ZS_SIZE_CLASS_DELTA must be multiple of ZS_ALIGN
- * (reason above)
- */
-#define ZS_SIZE_CLASS_DELTA 16
-#define ZS_SIZE_CLASSES ((ZS_MAX_ALLOC_SIZE - ZS_MIN_ALLOC_SIZE) / \
- ZS_SIZE_CLASS_DELTA + 1)
-
-/*
- * We do not maintain any list for completely empty or full pages
- */
-enum fullness_group {
- ZS_ALMOST_FULL,
- ZS_ALMOST_EMPTY,
- _ZS_NR_FULLNESS_GROUPS,
-
- ZS_EMPTY,
- ZS_FULL
-};
-
-/*
- * We assign a page to ZS_ALMOST_EMPTY fullness group when:
- * n <= N / f, where
- * n = number of allocated objects
- * N = total number of objects zspage can store
- * f = 1/fullness_threshold_frac
- *
- * Similarly, we assign zspage to:
- * ZS_ALMOST_FULL when n > N / f
- * ZS_EMPTY when n == 0
- * ZS_FULL when n == N
- *
- * (see: fix_fullness_group())
- */
-static const int fullness_threshold_frac = 4;
-
-struct mapping_area {
- struct vm_struct *vm;
- pte_t *vm_ptes[2];
- char *vm_addr;
-};
-
-struct size_class {
- /*
- * Size of objects stored in this class. Must be multiple
- * of ZS_ALIGN.
- */
- int size;
- unsigned int index;
-
- /* Number of PAGE_SIZE sized pages to combine to form a 'zspage' */
- int zspage_order;
-
- spinlock_t lock;
-
- /* stats */
- u64 pages_allocated;
-
- struct page *fullness_list[_ZS_NR_FULLNESS_GROUPS];
-};
-
-/*
- * Placed within free objects to form a singly linked list.
- * For every zspage, first_page->freelist gives head of this list.
- *
- * This must be power of 2 and less than or equal to ZS_ALIGN
- */
-struct link_free {
- /* Handle of next free chunk (encodes <PFN, obj_idx>) */
- void *next;
-};
-
-struct zs_pool {
- struct size_class size_class[ZS_SIZE_CLASSES];
-
- gfp_t flags; /* allocation flags used when growing pool */
- const char *name;
-};
-
-#endif
diff --git a/drivers/usb/gadget/f_qdss.c b/drivers/usb/gadget/f_qdss.c
index f649248..dcfa2bc 100644
--- a/drivers/usb/gadget/f_qdss.c
+++ b/drivers/usb/gadget/f_qdss.c
@@ -605,7 +605,7 @@
spin_lock_irqsave(&d_lock, flags);
list_for_each_entry(ch, &usb_qdss_ch_list, list) {
- if (!strncmp(name, ch->name, sizeof(ch->name))) {
+ if (!strcmp(name, ch->name)) {
found = 1;
break;
}
@@ -767,7 +767,7 @@
spin_lock_irqsave(&d_lock, flags);
/* Check if we already have a channel with this name */
list_for_each_entry(ch, &usb_qdss_ch_list, list) {
- if (!strncmp(name, ch->name, sizeof(ch->name))) {
+ if (!strcmp(name, ch->name)) {
found = 1;
break;
}
diff --git a/drivers/usb/host/ehci-msm-hsic.c b/drivers/usb/host/ehci-msm-hsic.c
index 5d58f16..5f20ad1 100644
--- a/drivers/usb/host/ehci-msm-hsic.c
+++ b/drivers/usb/host/ehci-msm-hsic.c
@@ -1947,7 +1947,6 @@
if (pdev->dev.of_node) {
dev_dbg(&pdev->dev, "device tree enabled\n");
pdev->dev.platform_data = msm_hsic_dt_to_pdata(pdev);
- dev_set_name(&pdev->dev, ehci_msm_hsic_driver.driver.name);
} else {
/* explicitly pass wakeup_irq flag for !DT */
wakeup_irq_flags = IRQF_TRIGGER_HIGH;
diff --git a/drivers/video/msm/mdss/mdss_dsi.c b/drivers/video/msm/mdss/mdss_dsi.c
index 865775a..927cfa9 100644
--- a/drivers/video/msm/mdss/mdss_dsi.c
+++ b/drivers/video/msm/mdss/mdss_dsi.c
@@ -1216,7 +1216,8 @@
}
}
- if (gpio_is_valid(ctrl_pdata->disp_te_gpio)) {
+ if (gpio_is_valid(ctrl_pdata->disp_te_gpio) &&
+ pinfo->type == MIPI_CMD_PANEL) {
rc = gpio_request(ctrl_pdata->disp_te_gpio, "disp_te");
if (rc) {
pr_err("request TE gpio failed, rc=%d\n",
diff --git a/drivers/video/msm/mdss/mdss_mdp_ctl.c b/drivers/video/msm/mdss/mdss_mdp_ctl.c
index 82937e3..4f9045e 100644
--- a/drivers/video/msm/mdss/mdss_mdp_ctl.c
+++ b/drivers/video/msm/mdss/mdss_mdp_ctl.c
@@ -28,7 +28,7 @@
#define MDSS_MDP_BUS_FUDGE_FACTOR_IB(val) (((val) / 2) * 3)
#define MDSS_MDP_BUS_FUDGE_FACTOR_HIGH_IB(val) (val << 1)
#define MDSS_MDP_BUS_FUDGE_FACTOR_AB(val) (val << 1)
-#define MDSS_MDP_BUS_FLOOR_BW (3200000000ULL >> MDSS_MDP_BUS_FACTOR_SHIFT)
+#define MDSS_MDP_BUS_FLOOR_BW (1600000000ULL >> MDSS_MDP_BUS_FACTOR_SHIFT)
/* 1.25 clock fudge factor */
#define MDSS_MDP_CLK_FUDGE_FACTOR(val) (((val) * 5) / 4)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index a5fa304..2218ac4 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -806,6 +806,17 @@
return (void *)((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS);
}
+extern struct address_space *__page_file_mapping(struct page *);
+
+static inline
+struct address_space *page_file_mapping(struct page *page)
+{
+ if (unlikely(PageSwapCache(page)))
+ return __page_file_mapping(page);
+
+ return page->mapping;
+}
+
static inline int PageAnon(struct page *page)
{
return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
@@ -822,6 +833,20 @@
return page->index;
}
+extern pgoff_t __page_file_index(struct page *page);
+
+/*
+ * Return the file index of the page. Regular pagecache pages use ->index
+ * whereas swapcache pages use swp_offset(->private)
+ */
+static inline pgoff_t page_file_index(struct page *page)
+{
+ if (unlikely(PageSwapCache(page)))
+ return __page_file_index(page);
+
+ return page->index;
+}
+
/*
* Return true if this page is mapped into pagetables.
*/
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index d0ad3e4..1bcfd28 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -219,6 +219,7 @@
MMC_BLK_NEW_REQUEST,
MMC_BLK_URGENT,
MMC_BLK_URGENT_DONE,
+ MMC_BLK_NO_REQ_TO_STOP,
};
struct mmc_wr_pack_stats {
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 0844dc3..696ca39 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -295,6 +295,11 @@
return ((loff_t)page->index) << PAGE_CACHE_SHIFT;
}
+static inline loff_t page_file_offset(struct page *page)
+{
+ return ((loff_t)page_file_index(page)) << PAGE_CACHE_SHIFT;
+}
+
extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
unsigned long address);
diff --git a/include/linux/swap.h b/include/linux/swap.h
index d5bd6ee..036b107 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -351,6 +351,8 @@
extern unsigned int count_swap_pages(int, int);
extern sector_t map_swap_page(struct page *, struct block_device **);
extern sector_t swapdev_block(int, pgoff_t);
+extern int page_swapcount(struct page *);
+extern struct swap_info_struct *page_swap_info(struct page *);
extern int reuse_swap_page(struct page *);
extern int try_to_free_swap(struct page *);
struct backing_dev_info;
diff --git a/mm/swapfile.c b/mm/swapfile.c
index fafc26d..a6c07fd 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -31,6 +31,7 @@
#include <linux/memcontrol.h>
#include <linux/poll.h>
#include <linux/oom.h>
+#include <linux/export.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
@@ -601,7 +602,7 @@
* This does not give an exact answer when swap count is continued,
* but does include the high COUNT_CONTINUED flag to allow for that.
*/
-static inline int page_swapcount(struct page *page)
+int page_swapcount(struct page *page)
{
int count = 0;
struct swap_info_struct *p;
@@ -2292,6 +2293,31 @@
return __swap_duplicate(entry, SWAP_HAS_CACHE);
}
+struct swap_info_struct *page_swap_info(struct page *page)
+{
+ swp_entry_t swap = { .val = page_private(page) };
+ BUG_ON(!PageSwapCache(page));
+ return swap_info[swp_type(swap)];
+}
+
+/*
+ * out-of-line __page_file_ methods to avoid include hell.
+ */
+struct address_space *__page_file_mapping(struct page *page)
+{
+ VM_BUG_ON(!PageSwapCache(page));
+ return page_swap_info(page)->swap_file->f_mapping;
+}
+EXPORT_SYMBOL_GPL(__page_file_mapping);
+
+pgoff_t __page_file_index(struct page *page)
+{
+ swp_entry_t swap = { .val = page_private(page) };
+ VM_BUG_ON(!PageSwapCache(page));
+ return swp_offset(swap);
+}
+EXPORT_SYMBOL_GPL(__page_file_index);
+
/*
* add_swap_count_continuation - called when a swap count is duplicated
* beyond SWAP_MAP_MAX, it allocates a new page and links that to the entry's
diff --git a/sound/soc/codecs/wcd9306.c b/sound/soc/codecs/wcd9306.c
index da99254..aaa132e 100644
--- a/sound/soc/codecs/wcd9306.c
+++ b/sound/soc/codecs/wcd9306.c
@@ -3154,13 +3154,27 @@
struct snd_soc_dai *dai)
{
struct wcd9xxx *tapan_core = dev_get_drvdata(dai->codec->dev->parent);
+ struct tapan_priv *tapan = snd_soc_codec_get_drvdata(dai->codec);
+ u32 active = 0;
+
dev_dbg(dai->codec->dev, "%s(): substream = %s stream = %d\n",
__func__, substream->name, substream->stream);
+
+ if (dai->id <= NUM_CODEC_DAIS) {
+ if (tapan->dai[dai->id].ch_mask) {
+ active = 1;
+ dev_dbg(dai->codec->dev, "%s(): Codec DAI: chmask[%d] = 0x%lx\n",
+ __func__, dai->id,
+ tapan->dai[dai->id].ch_mask);
+ }
+ }
if ((tapan_core != NULL) &&
(tapan_core->dev != NULL) &&
- (tapan_core->dev->parent != NULL)) {
+ (tapan_core->dev->parent != NULL) &&
+ (active == 0)) {
pm_runtime_mark_last_busy(tapan_core->dev->parent);
pm_runtime_put(tapan_core->dev->parent);
+ dev_dbg(dai->codec->dev, "%s: unvote requested", __func__);
}
}
@@ -3920,6 +3934,13 @@
dev_dbg(codec->dev, "%s: Disconnect RX port, ret = %d\n",
__func__, ret);
}
+ if ((core != NULL) &&
+ (core->dev != NULL) &&
+ (core->dev->parent != NULL)) {
+ pm_runtime_mark_last_busy(core->dev->parent);
+ pm_runtime_put(core->dev->parent);
+ dev_dbg(codec->dev, "%s: unvote requested", __func__);
+ }
break;
}
return ret;
@@ -3967,6 +3988,13 @@
dev_dbg(codec->dev, "%s: Disconnect RX port, ret = %d\n",
__func__, ret);
}
+ if ((core != NULL) &&
+ (core->dev != NULL) &&
+ (core->dev->parent != NULL)) {
+ pm_runtime_mark_last_busy(core->dev->parent);
+ pm_runtime_put(core->dev->parent);
+ dev_dbg(codec->dev, "%s: unvote requested", __func__);
+ }
break;
}
return ret;
diff --git a/sound/soc/codecs/wcd9320.c b/sound/soc/codecs/wcd9320.c
index 9cad1e5..bc02513 100644
--- a/sound/soc/codecs/wcd9320.c
+++ b/sound/soc/codecs/wcd9320.c
@@ -4974,12 +4974,6 @@
/*Enable spkr VI clocks*/
snd_soc_update_bits(codec,
TAIKO_A_CDC_CLK_TX_CLK_EN_B2_CTL, 0xC, 0xC);
- /*Enable Voltage Decimator*/
- snd_soc_update_bits(codec,
- TAIKO_A_CDC_CONN_TX_SB_B9_CTL, 0x1F, 0x12);
- /*Enable Current Decimator*/
- snd_soc_update_bits(codec,
- TAIKO_A_CDC_CONN_TX_SB_B10_CTL, 0x1F, 0x13);
(void) taiko_codec_enable_slim_chmask(dai, true);
ret = wcd9xxx_cfg_slim_sch_tx(core, &dai->wcd9xxx_ch_list,
dai->rate, dai->bit_width,
@@ -4991,13 +4985,6 @@
if (ret)
pr_err("%s error in close_slim_sch_tx %d\n",
__func__, ret);
- /*Disable Voltage decimator*/
- snd_soc_update_bits(codec,
- TAIKO_A_CDC_CONN_TX_SB_B9_CTL, 0x1F, 0x0);
- /*Disable Current decimator*/
- snd_soc_update_bits(codec,
- TAIKO_A_CDC_CONN_TX_SB_B10_CTL, 0x1F, 0x0);
- /*Disable spkr VI clocks*/
snd_soc_update_bits(codec, TAIKO_A_CDC_CLK_TX_CLK_EN_B2_CTL,
0xC, 0x0);
/*Disable V&I sensing*/
diff --git a/sound/soc/codecs/wcd9xxx-mbhc.c b/sound/soc/codecs/wcd9xxx-mbhc.c
index 8d6c4bc..66a07f4 100644
--- a/sound/soc/codecs/wcd9xxx-mbhc.c
+++ b/sound/soc/codecs/wcd9xxx-mbhc.c
@@ -222,6 +222,19 @@
pr_debug("Polling is not active, do not start polling\n");
return;
}
+
+ /*
+ * setup internal micbias if codec uses internal micbias for
+ * headset detection
+ */
+ if (mbhc->mbhc_cfg->use_int_rbias && !mbhc->int_rbias_on) {
+ if (mbhc->mbhc_cb && mbhc->mbhc_cb->setup_int_rbias)
+ mbhc->mbhc_cb->setup_int_rbias(codec, true);
+ else
+ pr_err("%s: internal bias requested but codec did not provide callback\n",
+ __func__);
+ }
+
snd_soc_write(codec, WCD9XXX_A_MBHC_SCALING_MUX_1, 0x04);
if (mbhc->mbhc_cb && mbhc->mbhc_cb->enable_mux_bias_block)
mbhc->mbhc_cb->enable_mux_bias_block(codec);
@@ -1090,12 +1103,12 @@
* setup internal micbias if codec uses internal micbias for
* headset detection
*/
- if (mbhc->mbhc_cfg->use_int_rbias) {
+ if (mbhc->mbhc_cfg->use_int_rbias && !mbhc->int_rbias_on) {
if (mbhc->mbhc_cb && mbhc->mbhc_cb->setup_int_rbias)
mbhc->mbhc_cb->setup_int_rbias(codec, true);
else
- pr_err("%s: internal bias is requested but codec did not provide callback\n",
- __func__);
+ pr_err("%s: internal bias requested but codec did not provide callback\n",
+ __func__);
}
snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1, 0x05, 0x01);
@@ -1289,6 +1302,7 @@
const struct wcd9xxx_mbhc_plug_type_cfg *plug_type =
WCD9XXX_MBHC_CAL_PLUG_TYPE_PTR(mbhc->mbhc_cfg->calibration);
s16 hs_max, no_mic, dce_z;
+ int highhph_cnt = 0;
pr_debug("%s: enter\n", __func__);
pr_debug("%s: event_state 0x%lx\n", __func__, event_state);
@@ -1313,9 +1327,10 @@
d->_vdces = vdce;
if (d->_vdces < no_mic)
d->_type = PLUG_TYPE_HEADPHONE;
- else if (d->_vdces >= hs_max)
+ else if (d->_vdces >= hs_max) {
d->_type = PLUG_TYPE_HIGH_HPH;
- else
+ highhph_cnt++;
+ } else
d->_type = PLUG_TYPE_HEADSET;
pr_debug("%s: DCE #%d, %04x, V %04d(%04d), HPHL %d TYPE %d\n",
@@ -1350,7 +1365,8 @@
goto exit;
}
- delta_thr = highhph ? WCD9XXX_MB_MEAS_DELTA_MAX_MV :
+ delta_thr = ((highhph_cnt == sz) || highhph) ?
+ WCD9XXX_MB_MEAS_DELTA_MAX_MV :
WCD9XXX_CS_MEAS_DELTA_MAX_MV;
for (i = 0, d = dt; i < sz; i++, d++) {
@@ -3171,6 +3187,19 @@
goto done;
}
+ /*
+ * setup internal micbias if codec uses internal micbias for
+ * headset detection
+ */
+ if (mbhc->mbhc_cfg->use_int_rbias && !mbhc->int_rbias_on) {
+ if (mbhc->mbhc_cb && mbhc->mbhc_cb->setup_int_rbias)
+ mbhc->mbhc_cb->setup_int_rbias(codec, true);
+ else
+ pr_err("%s: internal bias requested but codec did not provide callback\n",
+ __func__);
+ }
+
+
/* Measure scaled HW DCE */
vddio = (mbhc->mbhc_data.micb_mv != VDDIO_MICBIAS_MV &&
mbhc->mbhc_micbias_switched);
@@ -3991,11 +4020,13 @@
* headset detection
*/
if (mbhc->mbhc_cfg->use_int_rbias) {
- if (mbhc->mbhc_cb && mbhc->mbhc_cb->setup_int_rbias)
+ if (mbhc->mbhc_cb && mbhc->mbhc_cb->setup_int_rbias) {
mbhc->mbhc_cb->setup_int_rbias(codec, true);
- else
- pr_info("%s: internal bias is requested but codec did not provide callback\n",
+ mbhc->int_rbias_on = true;
+ } else {
+ pr_info("%s: internal bias requested but codec did not provide callback\n",
__func__);
+ }
}
/*
@@ -4150,6 +4181,7 @@
case WCD9XXX_EVENT_PRE_MICBIAS_2_ON:
case WCD9XXX_EVENT_PRE_MICBIAS_3_ON:
case WCD9XXX_EVENT_PRE_MICBIAS_4_ON:
+ mbhc->int_rbias_on = true;
if (mbhc->mbhc_cfg && mbhc->mbhc_cfg->micbias ==
wcd9xxx_event_to_micbias(event)) {
wcd9xxx_switch_micbias(mbhc, 0);
@@ -4177,6 +4209,7 @@
case WCD9XXX_EVENT_POST_MICBIAS_2_OFF:
case WCD9XXX_EVENT_POST_MICBIAS_3_OFF:
case WCD9XXX_EVENT_POST_MICBIAS_4_OFF:
+ mbhc->int_rbias_on = false;
if (mbhc->mbhc_cfg && mbhc->mbhc_cfg->micbias ==
wcd9xxx_event_to_micbias(event)) {
if (mbhc->event_state &
@@ -4472,6 +4505,7 @@
mbhc->mbhc_cb = mbhc_cb;
mbhc->intr_ids = mbhc_cdc_intr_ids;
mbhc->impedance_detect = impedance_det_en;
+ mbhc->int_rbias_on = false;
if (mbhc->intr_ids == NULL) {
pr_err("%s: Interrupt mapping not provided\n", __func__);
diff --git a/sound/soc/codecs/wcd9xxx-mbhc.h b/sound/soc/codecs/wcd9xxx-mbhc.h
index 9d0afe9..7fe9538 100644
--- a/sound/soc/codecs/wcd9xxx-mbhc.h
+++ b/sound/soc/codecs/wcd9xxx-mbhc.h
@@ -336,7 +336,7 @@
u32 rco_clk_rate;
bool update_z;
-
+ bool int_rbias_on;
/* Holds codec specific interrupt mapping */
const struct wcd9xxx_mbhc_intr *intr_ids;
diff --git a/sound/soc/msm/msm8974.c b/sound/soc/msm/msm8974.c
index d7ddca2..7f8736c 100644
--- a/sound/soc/msm/msm8974.c
+++ b/sound/soc/msm/msm8974.c
@@ -58,16 +58,9 @@
#define LO_2_SPK_AMP 0x4
#define LO_4_SPK_AMP 0x8
-#define LPAIF_OFFSET 0xFE000000
-#define LPAIF_PRI_MODE_MUXSEL (LPAIF_OFFSET + 0x2B000)
-#define LPAIF_SEC_MODE_MUXSEL (LPAIF_OFFSET + 0x2C000)
-#define LPAIF_TER_MODE_MUXSEL (LPAIF_OFFSET + 0x2D000)
-#define LPAIF_QUAD_MODE_MUXSEL (LPAIF_OFFSET + 0x2E000)
-
#define I2S_PCM_SEL 1
#define I2S_PCM_SEL_OFFSET 1
-
#define WCD9XXX_MBHC_DEF_BUTTONS 8
#define WCD9XXX_MBHC_DEF_RLOADS 5
#define TAIKO_EXT_CLK_RATE 9600000
@@ -146,6 +139,7 @@
struct msm_auxpcm_ctrl {
struct msm_auxpcm_gpio *pin_data;
u32 cnt;
+ void __iomem *mux;
};
struct msm8974_asoc_mach_data {
@@ -173,9 +167,6 @@
{"SEC_AUXPCM_DOUT", "qcom,sec-auxpcm-gpio-dout"},
};
-void *lpaif_pri_muxsel_virt_addr;
-void *lpaif_sec_muxsel_virt_addr;
-
struct msm8974_liquid_dock_dev {
int dock_plug_gpio;
int dock_plug_irq;
@@ -1192,12 +1183,14 @@
goto err;
}
if (atomic_inc_return(&prim_auxpcm_rsc_ref) == 1) {
- if (lpaif_pri_muxsel_virt_addr != NULL)
+ if (auxpcm_ctrl->mux != NULL) {
iowrite32(I2S_PCM_SEL << I2S_PCM_SEL_OFFSET,
- lpaif_pri_muxsel_virt_addr);
- else
- pr_err("%s lpaif_pri_muxsel_virt_addr is NULL\n",
- __func__);
+ auxpcm_ctrl->mux);
+ } else {
+ pr_err("%s: Pri AUXPCM MUX addr is NULL\n", __func__);
+ ret = -EINVAL;
+ goto err;
+ }
ret = msm_aux_pcm_get_gpios(auxpcm_ctrl);
}
if (ret < 0) {
@@ -1247,12 +1240,14 @@
goto err;
}
if (atomic_inc_return(&sec_auxpcm_rsc_ref) == 1) {
- if (lpaif_sec_muxsel_virt_addr != NULL)
+ if (auxpcm_ctrl->mux != NULL) {
iowrite32(I2S_PCM_SEL << I2S_PCM_SEL_OFFSET,
- lpaif_sec_muxsel_virt_addr);
- else
- pr_err("%s lpaif_sec_muxsel_virt_addr is NULL\n",
- __func__);
+ auxpcm_ctrl->mux);
+ } else {
+ pr_err("%s Sec AUXPCM MUX addr is NULL\n", __func__);
+ ret = -EINVAL;
+ goto err;
+ }
ret = msm_aux_pcm_get_gpios(auxpcm_ctrl);
}
if (ret < 0) {
@@ -2667,6 +2662,8 @@
int ret;
const char *auxpcm_pri_gpio_set = NULL;
const char *prop_name_ult_lo_gpio = "qcom,ext-ult-lo-amp-gpio";
+ struct resource *pri_muxsel;
+ struct resource *sec_muxsel;
if (!pdev->dev.of_node) {
dev_err(&pdev->dev, "No platform supplied from device tree\n");
@@ -2794,7 +2791,6 @@
}
}
-
pdata->us_euro_gpio = of_get_named_gpio(pdev->dev.of_node,
"qcom,us-euro-gpios", 0);
if (pdata->us_euro_gpio < 0) {
@@ -2821,28 +2817,49 @@
goto err1;
}
if (!strcmp(auxpcm_pri_gpio_set, "prim-gpio-prim")) {
- lpaif_pri_muxsel_virt_addr = ioremap(LPAIF_PRI_MODE_MUXSEL, 4);
+ pri_muxsel = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "lpaif_pri_mode_muxsel");
} else if (!strcmp(auxpcm_pri_gpio_set, "prim-gpio-tert")) {
- lpaif_pri_muxsel_virt_addr = ioremap(LPAIF_TER_MODE_MUXSEL, 4);
+ pri_muxsel = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "lpaif_tert_mode_muxsel");
} else {
dev_err(&pdev->dev, "Invalid value %s for AUXPCM GPIO set\n",
auxpcm_pri_gpio_set);
ret = -EINVAL;
goto err1;
}
- if (lpaif_pri_muxsel_virt_addr == NULL) {
- pr_err("%s Pri muxsel virt addr is null\n", __func__);
- ret = -EINVAL;
- goto err1;
+ if (!pri_muxsel) {
+ dev_err(&pdev->dev, "MUX addr invalid for primary AUXPCM\n");
+ ret = -ENODEV;
+ goto err1;
+ } else {
+ pdata->pri_auxpcm_ctrl->mux = ioremap(pri_muxsel->start,
+ resource_size(pri_muxsel));
+ if (pdata->pri_auxpcm_ctrl->mux == NULL) {
+ pr_err("%s Pri muxsel virt addr is null\n", __func__);
+ ret = -EINVAL;
+ goto err1;
+ }
}
- lpaif_sec_muxsel_virt_addr = ioremap(LPAIF_SEC_MODE_MUXSEL, 4);
- if (lpaif_sec_muxsel_virt_addr == NULL) {
+
+ sec_muxsel = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "lpaif_sec_mode_muxsel");
+ if (!sec_muxsel) {
+ dev_err(&pdev->dev, "MUX addr invalid for secondary AUXPCM\n");
+ ret = -ENODEV;
+ goto err2;
+ }
+ pdata->sec_auxpcm_ctrl->mux = ioremap(sec_muxsel->start,
+ resource_size(sec_muxsel));
+ if (pdata->sec_auxpcm_ctrl->mux == NULL) {
pr_err("%s Sec muxsel virt addr is null\n", __func__);
ret = -EINVAL;
- goto err1;
+ goto err2;
}
return 0;
+err2:
+ iounmap(pdata->pri_auxpcm_ctrl->mux);
err1:
if (ext_ult_lo_amp_gpio >= 0)
gpio_free(ext_ult_lo_amp_gpio);
@@ -2896,8 +2913,8 @@
msm8974_liquid_dock_dev = NULL;
}
- iounmap(lpaif_pri_muxsel_virt_addr);
- iounmap(lpaif_sec_muxsel_virt_addr);
+ iounmap(pdata->pri_auxpcm_ctrl->mux);
+ iounmap(pdata->sec_auxpcm_ctrl->mux);
snd_soc_unregister_card(card);
return 0;
diff --git a/sound/soc/msm/qdsp6v2/audio_ocmem.c b/sound/soc/msm/qdsp6v2/audio_ocmem.c
index 93b3597..4a25606 100644
--- a/sound/soc/msm/qdsp6v2/audio_ocmem.c
+++ b/sound/soc/msm/qdsp6v2/audio_ocmem.c
@@ -879,6 +879,15 @@
pr_debug("%s\n", __func__);
+ audio_ocmem_lcl.audio_hdl = ocmem_notifier_register(OCMEM_LP_AUDIO,
+ &audio_ocmem_client_nb);
+ if (PTR_RET(audio_ocmem_lcl.audio_hdl) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ else if (!audio_ocmem_lcl.audio_hdl) {
+ pr_err("%s: Failed to get ocmem handle %d\n", __func__,
+ OCMEM_LP_AUDIO);
+ return -ENODEV;
+ }
subsys_notif_register_notifier("adsp", &anb);
audio_ocmem_lcl.ocmem_dump_addr =
@@ -944,12 +953,6 @@
ret = -EFAULT;
goto destroy_voice_wq;
}
- audio_ocmem_lcl.audio_hdl = ocmem_notifier_register(OCMEM_LP_AUDIO,
- &audio_ocmem_client_nb);
- if (audio_ocmem_lcl.audio_hdl == NULL) {
- pr_err("%s: Failed to get ocmem handle %d\n", __func__,
- OCMEM_LP_AUDIO);
- }
audio_ocmem_lcl.lp_memseg_ptr = NULL;
return 0;
destroy_voice_wq: