Merge "msm-fb: Fix sysfs node creation in mdp_probe for MDP3 targets"
diff --git a/Documentation/devicetree/bindings/arm/msm/msm_watchdog.txt b/Documentation/devicetree/bindings/arm/msm/msm_watchdog.txt
index 9f0c922..a665431 100644
--- a/Documentation/devicetree/bindings/arm/msm/msm_watchdog.txt
+++ b/Documentation/devicetree/bindings/arm/msm/msm_watchdog.txt
@@ -12,14 +12,17 @@
The device tree parameters for the watchdog are:
-Required parameters:
+Required properties:
- compatible : "qcom,msm-watchdog"
- reg : offset and length of the register set for the watchdog block.
- interrupts : should contain bark and bite irq numbers
- qcom,pet-time : Non zero time interval at which watchdog should be pet in ms.
- qcom,bark-time : Non zero timeout value for a watchdog bark in ms.
-- qcom,ipi-ping : send keep alive ping to other cpus if set to 1 else set to 0.
+
+Optional properties:
+
+- qcom,ipi-ping : (boolean) send keep alive ping to other cpus if present
Example:
@@ -29,5 +32,5 @@
interrupts = <0 3 0 0 4 0>;
qcom,bark-time = <11000>;
qcom,pet-time = <10000>;
- qcom,ipi-ping = <1>;
+ qcom,ipi-ping;
};
diff --git a/Documentation/devicetree/bindings/crypto/msm/qcedev.txt b/Documentation/devicetree/bindings/crypto/msm/qcedev.txt
index c50a6c3..2cc2696 100644
--- a/Documentation/devicetree/bindings/crypto/msm/qcedev.txt
+++ b/Documentation/devicetree/bindings/crypto/msm/qcedev.txt
@@ -3,15 +3,16 @@
Required properties:
- compatible : should be "qcom,qcedev"
- reg : should contain crypto, BAM register map.
+ - reg-names : should contain the crypto and bam base register names.
- interrupts : should contain crypto BAM interrupt.
- qcom,bam-pipe-pair : should contain crypto BAM pipe pair index.
Example:
- qcom,qcedev@fd440000 {
+ qcom,qcedev@fd440000 {
compatible = "qcom,qcedev";
reg = <0xfd440000 0x20000>,
- <0xfd444000 0x8000>;
+ <0xfd444000 0x8000>;
reg-names = "crypto-base","crypto-bam-base";
interrupts = <0 235 0>;
qcom,bam-pipe-pair = <0>;
diff --git a/Documentation/devicetree/bindings/crypto/msm/qcrypto.txt b/Documentation/devicetree/bindings/crypto/msm/qcrypto.txt
index 1b0f703..4f9dd06 100644
--- a/Documentation/devicetree/bindings/crypto/msm/qcrypto.txt
+++ b/Documentation/devicetree/bindings/crypto/msm/qcrypto.txt
@@ -3,6 +3,7 @@
Required properties:
- compatible : should be "qcom,qcrypto"
- reg : should contain crypto, BAM register map.
+ - reg-names : should contain the crypto and bam base register names.
- interrupts : should contain crypto BAM interrupt.
- qcom,bam-pipe-pair : should contain crypto BAM pipe pair.
diff --git a/Documentation/devicetree/bindings/iommu/msm_iommu.txt b/Documentation/devicetree/bindings/iommu/msm_iommu.txt
index 7872280..f093f51 100644
--- a/Documentation/devicetree/bindings/iommu/msm_iommu.txt
+++ b/Documentation/devicetree/bindings/iommu/msm_iommu.txt
@@ -7,6 +7,8 @@
Optional properties:
- qcom,iommu-secure-id : Secure identifier for the IOMMU block
+- qcom,secure-context : boolean indicating that a context is secure and
+ programmed by the secure environment.
- List of sub nodes, one for each of the translation context banks supported.
Each sub node has the following required properties:
diff --git a/Documentation/devicetree/bindings/misc/isa1200.txt b/Documentation/devicetree/bindings/misc/isa1200.txt
new file mode 100644
index 0000000..b30782d
--- /dev/null
+++ b/Documentation/devicetree/bindings/misc/isa1200.txt
@@ -0,0 +1,65 @@
+Imagis ISA1200 Haptics Vibrator
+
+Required properties:
+ - reg: slave address of bus
+ - compatible: should be "imagis,isa1200"
+ - label: name for vibrator directory
+ - imagis,hap-en-gpio: haptic enable gpio
+ - imagis,mode-ctrl: mode of the controller, 0 = POWER_DOWN_MODE,
+ 1 = PWM_INPUT_MODE, 2 = PWM_GEN_MODE, 3 = WAVE_GEN_MODE
+ - imagis,max-timeout: maximum timeout for vibration
+ - imagis,chip-en: chip enable
+ - vcc_i2c-supply: regulator supplying i2c bus
+
+Regulator described as a child of the main device:
+ - regulator-name: A string used as a descriptive name for regulator outputs,
+ should match vcc_i2c above
+ - regulator-min-microvolt: smallest voltage consumers may set
+ - regulator-max-microvolt: largest voltage consumers may set
+ - regulator-max-microamp: largest current consumers may set
+
+Optional properties:
+ - imagis,smart-en: automatically control haptic power based on pwm/clk signal
+ - imagis,is-erm: controlled by dc motor, use ERM driving method
+ - imagis,overdrive-high: overdrive high
+ - imagis,overdrive-en: enable overdrive
+ - imagis,pwm-freq: pwm frequency (hZ)
+ - imagis,pwm-ch-id: pwm channel id
+ - imagis,pwm-div: pwm division to be used for vibration
+ - imagis,need-pwm-clk: use "pwm_clk"
+ - imagis,hap-len-gpio: haptic ldo enable gpio
+ - imagis,etc-clk-en: use external clock
+ - xyz-supply: to be used if additional regulators are require beyond
+ "imagis,regulator" above
+
+Any additional regulators are described as child nodes of main device:
+ - regulator-name: A string used as a descriptive name for regulator outputs,
+ should match supply "xyz"
+ - regulator-min-microvolt: smallest voltage consumers may set
+ - regulator-max-microvolt: largest voltage consumers may set
+ - regulator-max-microamp: largest current consumers may set
+
+Example:
+ i2c@f9967000 {
+ isa1200@48 {
+ status = "okay";
+ reg = <0x48>;
+ vcc_i2c-supply = <&pm8941_s3>;
+ compatible = "imagis,isa1200";
+ label = "vibrator";
+ imagis,chip-en;
+ imagis,smart-en;
+ imagis,need-pwm-clk;
+ imagis,ext-clk-en;
+ imagis,hap-en-gpio = <&msmgpio 86 0x00>;
+ imagis,max-timeout = <15000>;
+ imagis,pwm-div = <256>;
+ imagis,mode-ctrl = <2>;
+ imagis,regulator {
+ regulator-name = "vcc_i2c";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-max-microamp = <9360>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/tty/serial/msm_serial.txt b/Documentation/devicetree/bindings/tty/serial/msm_serial.txt
index ae7d736..82faa7e 100644
--- a/Documentation/devicetree/bindings/tty/serial/msm_serial.txt
+++ b/Documentation/devicetree/bindings/tty/serial/msm_serial.txt
@@ -34,6 +34,17 @@
- reg : offset and length of the register set for the device.
- interrupts : should contain the uart interrupt.
+Optional properties:
+- qcom,config-gpio : Set this value if UART GPIOs need to be configured by driver.
+set 4 if 4-wire UART used (for Tx, Rx, CTS, RFR GPIOs).
+Set 1 if 2-wire UART used (for Tx, Rx GPIOs).
+- qcom,<gpio-name>-gpio : handle to the GPIO node, see "gpios property" in
+Documentation/devicetree/bindings/gpio/gpio.txt.
+"gpio-name" can be "tx", "rx", "cts" and "rfr" based on number of UART GPIOs
+need to configured.
+qcom,use-pm : If present, this property will cause the device to prevent system
+suspend as long as the port remains open.
+
Aliases:
An alias may optionally be used to bind the serial device to a tty device
(ttyHSLx) with a given line number. Aliases are of the form serial<n> where <n>
@@ -50,4 +61,11 @@
compatible = "qcom,msm-lsuart-v14"
reg = <0x19c40000 0x1000">;
interrupts = <195>;
+
+ qcom,config-gpio = <4>;
+ qcom,tx-gpio = <&msmgpio 41 0x00>;
+ qcom,rx-gpio = <&msmgpio 42 0x00>;
+ qcom,cts-gpio = <&msmgpio 43 0x00>;
+ qcom,rfr-gpio = <&msmgpio 44 0x00>;
+ qcom,use-pm;
};
diff --git a/arch/arm/boot/dts/msm-iommu.dtsi b/arch/arm/boot/dts/msm-iommu.dtsi
index 839199a..8343c7a 100755
--- a/arch/arm/boot/dts/msm-iommu.dtsi
+++ b/arch/arm/boot/dts/msm-iommu.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -134,6 +134,7 @@
interrupts = <0 47 0>;
qcom,iommu-ctx-sids = <1>;
label = "mdp_1";
+ qcom,secure-context;
};
};
@@ -210,6 +211,7 @@
interrupts = <0 42 0>;
qcom,iommu-ctx-sids = <0x80 0x81 0x82 0x83 0x84 0x85>;
label = "venus_cp";
+ qcom,secure-context;
};
qcom,iommu-ctx@fdc8e000 {
@@ -217,6 +219,7 @@
interrupts = <0 42 0>;
qcom,iommu-ctx-sids = <0xc0 0xc6>;
label = "venus_fw";
+ qcom,secure-context;
};
};
diff --git a/arch/arm/boot/dts/msm8226.dtsi b/arch/arm/boot/dts/msm8226.dtsi
index 475ed40..3c8ea84 100644
--- a/arch/arm/boot/dts/msm8226.dtsi
+++ b/arch/arm/boot/dts/msm8226.dtsi
@@ -83,13 +83,23 @@
compatible = "qcom,android-usb";
};
+ slim@fe12f000 {
+ cell-index = <1>;
+ compatible = "qcom,slim-ngd";
+ reg = <0xfe12f000 0x35000>,
+ <0xfe104000 0x20000>;
+ reg-names = "slimbus_physical", "slimbus_bam_physical";
+ interrupts = <0 163 0>, <0 164 0>;
+ interrupt-names = "slimbus_irq", "slimbus_bam_irq";
+ };
+
qcom,wdt@f9017000 {
compatible = "qcom,msm-watchdog";
reg = <0xf9017000 0x1000>;
interrupts = <0 3 0>, <0 4 0>;
qcom,bark-time = <11000>;
qcom,pet-time = <10000>;
- qcom,ipi-ping = <1>;
+ qcom,ipi-ping;
};
qcom,smem@fa00000 {
diff --git a/arch/arm/boot/dts/msm8910.dtsi b/arch/arm/boot/dts/msm8910.dtsi
index 8ad4eda..61f1dcd 100644
--- a/arch/arm/boot/dts/msm8910.dtsi
+++ b/arch/arm/boot/dts/msm8910.dtsi
@@ -214,7 +214,7 @@
interrupts = <0 3 0>, <0 4 0>;
qcom,bark-time = <11000>;
qcom,pet-time = <10000>;
- qcom,ipi-ping = <1>;
+ qcom,ipi-ping;
};
spmi_bus: qcom,spmi@fc4c0000 {
diff --git a/arch/arm/boot/dts/msm8974-cdp.dtsi b/arch/arm/boot/dts/msm8974-cdp.dtsi
index 6aaf677..5e65ca4 100644
--- a/arch/arm/boot/dts/msm8974-cdp.dtsi
+++ b/arch/arm/boot/dts/msm8974-cdp.dtsi
@@ -51,16 +51,16 @@
/* Object 6, Instance = 0 */
00 00 00 00 00 00
/* Object 38, Instance = 0 */
- 15 00 02 10 08 0C 00 00
+ 15 01 00 03 0A 0C 00 00
/* Object 7, Instance = 0 */
- FF FF 32 03
+ 20 08 32 03
/* Object 8, Instance = 0 */
- 0F 00 0A 0A 00 00 0A 00 00 00
+ 0F 00 0A 0A 00 00 0A 0A 00 00
/* Object 9, Instance = 0 */
- 83 00 00 18 0E 00 70 32 02 01
- 00 03 01 01 05 0A 0A 0A 90 05
- F8 02 00 00 0F 0F 00 00 48 2D
- 07 0C 00 00 00 00
+ 83 00 00 18 0E 00 70 46 02 01
+ 00 0A 03 31 04 05 0A 0A 90 05
+ F8 02 05 F1 F1 0F 00 00 08 2D
+ 12 06 00 00 00 01
/* Object 15, Instance = 0 */
00 00 00 00 00 00 00 00 00 00
00
@@ -77,7 +77,7 @@
/* Object 40, Instance = 0 */
00 00 00 00 00
/* Object 42, Instance = 0 */
- 00 00 00 00 00 00 00 00 00 00
+ 33 1E 19 10 80 00 00 00 FF 00
/* Object 46, Instance = 0 */
00 00 10 10 00 00 03 00 00 01
/* Object 47, Instance = 0 */
@@ -86,7 +86,7 @@
/* Object 55, Instance = 0 */
00 00 00 00 00 00
/* Object 56, Instance = 0 */
- 03 00 01 18 05 05 05 05 05 05
+ 00 00 00 18 05 05 05 05 05 05
05 05 05 05 05 05 05 05 05 05
05 05 05 05 05 05 05 05 00 00
00 00 00 00 00 00 00 00 00 00
@@ -95,20 +95,42 @@
00 00 00
/* Object 61, Instance = 0 */
00 00 00 00 00
- /* Object 61, Instance = 1 */
- 00 00 00 00 00
/* Object 62, Instance = 0 */
- 7F 03 00 16 00 00 00 00 00 00
- 04 08 10 18 05 00 0A 05 05 50
- 14 19 34 1A 64 00 00 04 40 00
- 00 00 00 00 30 32 02 00 01 00
- 05 00 00 00 00 00 00 00 00 00
- 00 00 0C 00
+ 01 2A 00 16 00 00 00 00 0B 01
+ 02 03 04 08 00 00 08 10 18 05
+ 00 0A 05 05 50 14 19 34 1A 7F
+ 00 00 00 00 00 00 00 00 00 30
+ 05 02 00 01 00 05 00 00 00 00
+ 00 00 00 00
];
};
};
};
+ i2c@f9967000 {
+ isa1200@48 {
+ status = "okay";
+ reg = <0x48>;
+ vcc_i2c-supply = <&pm8941_s3>;
+ compatible = "imagis,isa1200";
+ label = "vibrator";
+ imagis,chip-en;
+ imagis,smart-en;
+ imagis,need-pwm-clk;
+ imagis,ext-clk-en;
+ imagis,hap-en-gpio = <&msmgpio 86 0x00>;
+ imagis,max-timeout = <15000>;
+ imagis,pwm-div = <256>;
+ imagis,mode-ctrl = <2>;
+ imagis,regulator {
+ regulator-name = "vcc_i2c";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-max-microamp = <9360>;
+ };
+ };
+ };
+
gpio_keys {
compatible = "gpio-keys";
input-name = "gpio-keys";
@@ -339,6 +361,13 @@
};
gpio@cf00 { /* GPIO 16 */
+ qcom,mode = <1>;
+ qcom,output-type = <0>;
+ qcom,pull = <2>;
+ qcom,vin-sel = <2>;
+ qcom,out-strength = <2>;
+ qcom,src-sel = <2>;
+ qcom,master-en = <1>;
};
gpio@d000 { /* GPIO 17 */
diff --git a/arch/arm/boot/dts/msm8974-fluid.dtsi b/arch/arm/boot/dts/msm8974-fluid.dtsi
index a027a1e..d8e15b8 100644
--- a/arch/arm/boot/dts/msm8974-fluid.dtsi
+++ b/arch/arm/boot/dts/msm8974-fluid.dtsi
@@ -51,16 +51,16 @@
/* Object 6, Instance = 0 */
00 00 00 00 00 00
/* Object 38, Instance = 0 */
- 15 00 02 10 08 0C 00 00
+ 15 01 00 03 0A 0C 00 00
/* Object 7, Instance = 0 */
- FF FF 32 03
+ 20 08 32 03
/* Object 8, Instance = 0 */
- 0F 00 0A 0A 00 00 0A 00 00 00
+ 0F 00 0A 0A 00 00 0A 0A 00 00
/* Object 9, Instance = 0 */
- 83 00 00 18 0E 00 70 32 02 01
- 00 03 01 01 05 0A 0A 0A 90 05
- F8 02 00 00 0F 0F 00 00 48 2D
- 07 0C 00 00 00 00
+ 83 00 00 18 0E 00 70 46 02 01
+ 00 0A 03 31 04 05 0A 0A 90 05
+ F8 02 05 F1 F1 0F 00 00 08 2D
+ 12 06 00 00 00 01
/* Object 15, Instance = 0 */
00 00 00 00 00 00 00 00 00 00
00
@@ -77,7 +77,7 @@
/* Object 40, Instance = 0 */
00 00 00 00 00
/* Object 42, Instance = 0 */
- 00 00 00 00 00 00 00 00 00 00
+ 33 1E 19 10 80 00 00 00 FF 00
/* Object 46, Instance = 0 */
00 00 10 10 00 00 03 00 00 01
/* Object 47, Instance = 0 */
@@ -86,7 +86,7 @@
/* Object 55, Instance = 0 */
00 00 00 00 00 00
/* Object 56, Instance = 0 */
- 03 00 01 18 05 05 05 05 05 05
+ 00 00 00 18 05 05 05 05 05 05
05 05 05 05 05 05 05 05 05 05
05 05 05 05 05 05 05 05 00 00
00 00 00 00 00 00 00 00 00 00
@@ -95,15 +95,13 @@
00 00 00
/* Object 61, Instance = 0 */
00 00 00 00 00
- /* Object 61, Instance = 1 */
- 00 00 00 00 00
/* Object 62, Instance = 0 */
- 7F 03 00 16 00 00 00 00 00 00
- 04 08 10 18 05 00 0A 05 05 50
- 14 19 34 1A 64 00 00 04 40 00
- 00 00 00 00 30 32 02 00 01 00
- 05 00 00 00 00 00 00 00 00 00
- 00 00 0C 00
+ 01 2A 00 16 00 00 00 00 0B 01
+ 02 03 04 08 00 00 08 10 18 05
+ 00 0A 05 05 50 14 19 34 1A 7F
+ 00 00 00 00 00 00 00 00 00 30
+ 05 02 00 01 00 05 00 00 00 00
+ 00 00 00 00
];
};
};
@@ -123,6 +121,27 @@
smps3a-supply = <&pm8941_s3>;
vdda-supply = <&pm8941_l12>;
};
+
+ isa1200@48 {
+ status = "okay";
+ reg = <0x48>;
+ vcc_i2c-supply = <&pm8941_s3>;
+ compatible = "imagis,isa1200";
+ label = "vibrator";
+ imagis,chip-en;
+ imagis,need-pwm-clk;
+ imagis,ext-clk-en;
+ imagis,hap-en-gpio = <&msmgpio 86 0x00>;
+ imagis,max-timeout = <15000>;
+ imagis,pwm-div = <256>;
+ imagis,mode-ctrl = <2>;
+ imagis,regulator {
+ regulator-name = "vcc_i2c";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-max-microamp = <9360>;
+ };
+ };
};
gpio_keys {
@@ -364,6 +383,13 @@
};
gpio@cf00 { /* GPIO 16 */
+ qcom,mode = <1>;
+ qcom,output-type = <0>;
+ qcom,pull = <5>;
+ qcom,vin-sel = <2>;
+ qcom,out-strength = <3>;
+ qcom,src-sel = <2>;
+ qcom,master-en = <1>;
};
gpio@d000 { /* GPIO 17 */
diff --git a/arch/arm/boot/dts/msm8974-mtp.dtsi b/arch/arm/boot/dts/msm8974-mtp.dtsi
index 48bb5ba..c295353 100644
--- a/arch/arm/boot/dts/msm8974-mtp.dtsi
+++ b/arch/arm/boot/dts/msm8974-mtp.dtsi
@@ -51,16 +51,16 @@
/* Object 6, Instance = 0 */
00 00 00 00 00 00
/* Object 38, Instance = 0 */
- 15 00 02 10 08 0C 00 00
+ 15 01 00 03 0A 0C 00 00
/* Object 7, Instance = 0 */
- FF FF 32 03
+ 20 08 32 03
/* Object 8, Instance = 0 */
- 0F 00 0A 0A 00 00 0A 00 00 00
+ 0F 00 0A 0A 00 00 0A 0A 00 00
/* Object 9, Instance = 0 */
- 83 00 00 18 0E 00 70 32 02 01
- 00 03 01 01 05 0A 0A 0A 90 05
- F8 02 00 00 0F 0F 00 00 48 2D
- 07 0C 00 00 00 00
+ 83 00 00 18 0E 00 70 46 02 01
+ 00 0A 03 31 04 05 0A 0A 90 05
+ F8 02 05 F1 F1 0F 00 00 08 2D
+ 12 06 00 00 00 01
/* Object 15, Instance = 0 */
00 00 00 00 00 00 00 00 00 00
00
@@ -77,7 +77,7 @@
/* Object 40, Instance = 0 */
00 00 00 00 00
/* Object 42, Instance = 0 */
- 00 00 00 00 00 00 00 00 00 00
+ 33 1E 19 10 80 00 00 00 FF 00
/* Object 46, Instance = 0 */
00 00 10 10 00 00 03 00 00 01
/* Object 47, Instance = 0 */
@@ -86,7 +86,7 @@
/* Object 55, Instance = 0 */
00 00 00 00 00 00
/* Object 56, Instance = 0 */
- 03 00 01 18 05 05 05 05 05 05
+ 00 00 00 18 05 05 05 05 05 05
05 05 05 05 05 05 05 05 05 05
05 05 05 05 05 05 05 05 00 00
00 00 00 00 00 00 00 00 00 00
@@ -95,20 +95,41 @@
00 00 00
/* Object 61, Instance = 0 */
00 00 00 00 00
- /* Object 61, Instance = 1 */
- 00 00 00 00 00
/* Object 62, Instance = 0 */
- 7F 03 00 16 00 00 00 00 00 00
- 04 08 10 18 05 00 0A 05 05 50
- 14 19 34 1A 64 00 00 04 40 00
- 00 00 00 00 30 32 02 00 01 00
- 05 00 00 00 00 00 00 00 00 00
- 00 00 0C 00
+ 01 2A 00 16 00 00 00 00 0B 01
+ 02 03 04 08 00 00 08 10 18 05
+ 00 0A 05 05 50 14 19 34 1A 7F
+ 00 00 00 00 00 00 00 00 00 30
+ 05 02 00 01 00 05 00 00 00 00
+ 00 00 00 00
];
};
};
};
+ i2c@f9967000 {
+ isa1200@48 {
+ status = "okay";
+ reg = <0x48>;
+ vcc_i2c-supply = <&pm8941_s3>;
+ compatible = "imagis,isa1200";
+ label = "vibrator";
+ imagis,chip-en;
+ imagis,need-pwm-clk;
+ imagis,ext-clk-en;
+ imagis,hap-en-gpio = <&msmgpio 86 0x00>;
+ imagis,max-timeout = <15000>;
+ imagis,pwm-div = <256>;
+ imagis,mode-ctrl = <2>;
+ imagis,regulator {
+ regulator-name = "vcc_i2c";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-max-microamp = <9360>;
+ };
+ };
+ };
+
gpio_keys {
compatible = "gpio-keys";
input-name = "gpio-keys";
@@ -344,6 +365,13 @@
};
gpio@cf00 { /* GPIO 16 */
+ qcom,mode = <1>;
+ qcom,output-type = <0>;
+ qcom,pull = <5>;
+ qcom,vin-sel = <2>;
+ qcom,out-strength = <3>;
+ qcom,src-sel = <2>;
+ qcom,master-en = <1>;
};
gpio@d000 { /* GPIO 17 */
diff --git a/arch/arm/boot/dts/msm8974.dtsi b/arch/arm/boot/dts/msm8974.dtsi
index 74b6521..bd0711c 100644
--- a/arch/arm/boot/dts/msm8974.dtsi
+++ b/arch/arm/boot/dts/msm8974.dtsi
@@ -998,7 +998,7 @@
interrupts = <0 3 0 0 4 0>;
qcom,bark-time = <11000>;
qcom,pet-time = <10000>;
- qcom,ipi-ping = <1>;
+ qcom,ipi-ping;
};
qcom,tz-log@fc03000 {
diff --git a/arch/arm/boot/dts/msm9625.dtsi b/arch/arm/boot/dts/msm9625.dtsi
index 6fee4e6..bf66de6 100644
--- a/arch/arm/boot/dts/msm9625.dtsi
+++ b/arch/arm/boot/dts/msm9625.dtsi
@@ -170,7 +170,6 @@
interrupts = <1 2 0>, <1 1 0>;
qcom,bark-time = <11000>;
qcom,pet-time = <10000>;
- qcom,ipi-ping = <0>;
};
rpm_bus: qcom,rpm-smd {
@@ -577,6 +576,25 @@
qcom,irq-no-suspend;
};
};
+
+ qcom,qcedev@fd400000 {
+ compatible = "qcom,qcedev";
+ reg = <0xfd400000 0x20000>,
+ <0xfd404000 0x8000>;
+ reg-names = "crypto-base","crypto-bam-base";
+ interrupts = <0 207 0>;
+ qcom,bam-pipe-pair = <1>;
+ };
+
+ qcom,qcrypto@fd440000 {
+ compatible = "qcom,qcrypto";
+ reg = <0xfd400000 0x20000>,
+ <0xfd404000 0x8000>;
+ reg-names = "crypto-base","crypto-bam-base";
+ interrupts = <0 207 0>;
+ qcom,bam-pipe-pair = <2>;
+ };
+
};
/include/ "msm-pm8019-rpm-regulator.dtsi"
diff --git a/arch/arm/configs/msm8910_defconfig b/arch/arm/configs/msm8910_defconfig
index 0fcfa97..67fabd4 100644
--- a/arch/arm/configs/msm8910_defconfig
+++ b/arch/arm/configs/msm8910_defconfig
@@ -2,6 +2,7 @@
CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
+CONFIG_RCU_FAST_NO_HZ=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_CGROUPS=y
diff --git a/arch/arm/configs/msm8960-perf_defconfig b/arch/arm/configs/msm8960-perf_defconfig
index c95472f..1aaf7ea 100644
--- a/arch/arm/configs/msm8960-perf_defconfig
+++ b/arch/arm/configs/msm8960-perf_defconfig
@@ -2,6 +2,7 @@
CONFIG_EXPERIMENTAL=y
CONFIG_LOCALVERSION="-perf"
CONFIG_SYSVIPC=y
+CONFIG_RCU_FAST_NO_HZ=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_CGROUPS=y
@@ -122,15 +123,16 @@
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_NET_KEY=y
CONFIG_INET=y
CONFIG_IP_ADVANCED_ROUTER=y
CONFIG_IP_MULTIPLE_TABLES=y
CONFIG_IP_ROUTE_VERBOSE=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
# CONFIG_INET_LRO is not set
CONFIG_IPV6=y
CONFIG_IPV6_PRIVACY=y
@@ -502,6 +504,11 @@
CONFIG_DYNAMIC_DEBUG=y
CONFIG_DEBUG_USER=y
CONFIG_PID_IN_CONTEXTIDR=y
+CONFIG_CRYPTO_NULL=y
+CONFIG_CRYPTO_CRYPTD=y
+CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_CTR=y
+CONFIG_CRYPTO_CTS=y
CONFIG_CRYPTO_TWOFISH=y
CONFIG_CRYPTO_DEV_QCRYPTO=m
CONFIG_CRYPTO_DEV_QCE=m
diff --git a/arch/arm/configs/msm8960_defconfig b/arch/arm/configs/msm8960_defconfig
index 1842b6e..94d12e0 100644
--- a/arch/arm/configs/msm8960_defconfig
+++ b/arch/arm/configs/msm8960_defconfig
@@ -1,6 +1,7 @@
# CONFIG_ARM_PATCH_PHYS_VIRT is not set
CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
+CONFIG_RCU_FAST_NO_HZ=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_CGROUPS=y
@@ -127,15 +128,16 @@
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_NET_KEY=y
CONFIG_INET=y
CONFIG_IP_ADVANCED_ROUTER=y
CONFIG_IP_MULTIPLE_TABLES=y
CONFIG_IP_ROUTE_VERBOSE=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
# CONFIG_INET_LRO is not set
CONFIG_IPV6=y
CONFIG_IPV6_PRIVACY=y
@@ -519,6 +521,11 @@
CONFIG_DYNAMIC_DEBUG=y
CONFIG_DEBUG_USER=y
CONFIG_PID_IN_CONTEXTIDR=y
+CONFIG_CRYPTO_NULL=y
+CONFIG_CRYPTO_CRYPTD=y
+CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_CTR=y
+CONFIG_CRYPTO_CTS=y
CONFIG_CRYPTO_TWOFISH=y
CONFIG_CRYPTO_DEV_QCRYPTO=m
CONFIG_CRYPTO_DEV_QCE=m
diff --git a/arch/arm/configs/msm8974-perf_defconfig b/arch/arm/configs/msm8974-perf_defconfig
index ad6dc6a..8debb93 100644
--- a/arch/arm/configs/msm8974-perf_defconfig
+++ b/arch/arm/configs/msm8974-perf_defconfig
@@ -3,6 +3,7 @@
CONFIG_LOCALVERSION="-perf"
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
+CONFIG_RCU_FAST_NO_HZ=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_CGROUPS=y
@@ -222,6 +223,7 @@
CONFIG_GENLOCK_MISCDEVICE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
+CONFIG_HAPTIC_ISA1200=y
CONFIG_QSEECOM=y
CONFIG_USB_HSIC_SMSC_HUB=y
CONFIG_TI_DRV2667=y
diff --git a/arch/arm/configs/msm8974_defconfig b/arch/arm/configs/msm8974_defconfig
index ca60319..0f8c469 100644
--- a/arch/arm/configs/msm8974_defconfig
+++ b/arch/arm/configs/msm8974_defconfig
@@ -2,6 +2,7 @@
CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
+CONFIG_RCU_FAST_NO_HZ=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_CGROUPS=y
@@ -224,6 +225,7 @@
CONFIG_GENLOCK_MISCDEVICE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
+CONFIG_HAPTIC_ISA1200=y
CONFIG_QSEECOM=y
CONFIG_USB_HSIC_SMSC_HUB=y
CONFIG_TI_DRV2667=y
diff --git a/arch/arm/configs/msm9625_defconfig b/arch/arm/configs/msm9625_defconfig
index a3e7b98..cbdcc5b 100644
--- a/arch/arm/configs/msm9625_defconfig
+++ b/arch/arm/configs/msm9625_defconfig
@@ -257,6 +257,8 @@
CONFIG_CRYPTO_DES=y
CONFIG_CRYPTO_TWOFISH=y
CONFIG_CRYPTO_DEFLATE=y
-# CONFIG_CRYPTO_HW is not set
+CONFIG_CRYPTO_DEV_QCRYPTO=m
+CONFIG_CRYPTO_DEV_QCE=m
+CONFIG_CRYPTO_DEV_QCEDEV=m
CONFIG_CRC_CCITT=y
CONFIG_LIBCRC32C=y
diff --git a/arch/arm/include/asm/arch_timer.h b/arch/arm/include/asm/arch_timer.h
index e32194f..bdfdfa0 100644
--- a/arch/arm/include/asm/arch_timer.h
+++ b/arch/arm/include/asm/arch_timer.h
@@ -3,6 +3,7 @@
#include <linux/ioport.h>
#include <linux/clocksource.h>
+#include <asm/errno.h>
struct arch_timer {
struct resource res[3];
diff --git a/arch/arm/include/asm/delay.h b/arch/arm/include/asm/delay.h
index 5c6b9a3..ab98fdd 100644
--- a/arch/arm/include/asm/delay.h
+++ b/arch/arm/include/asm/delay.h
@@ -6,9 +6,27 @@
#ifndef __ASM_ARM_DELAY_H
#define __ASM_ARM_DELAY_H
+#include <asm/memory.h>
#include <asm/param.h> /* HZ */
-extern void __delay(unsigned long loops);
+#define MAX_UDELAY_MS 2
+#define UDELAY_MULT ((UL(2199023) * HZ) >> 11)
+#define UDELAY_SHIFT 30
+
+#ifndef __ASSEMBLY__
+
+struct delay_timer {
+ unsigned long (*read_current_timer)(void);
+ unsigned long freq;
+};
+
+extern struct arm_delay_ops {
+ void (*delay)(unsigned long);
+ void (*const_udelay)(unsigned long);
+ void (*udelay)(unsigned long);
+} arm_delay_ops;
+
+#define __delay(n) arm_delay_ops.delay(n)
/*
* This function intentionally does not exist; if you see references to
@@ -23,25 +41,31 @@
* division by multiplication: you don't have to worry about
* loss of precision.
*
- * Use only for very small delays ( < 1 msec). Should probably use a
+ * Use only for very small delays ( < 2 msec). Should probably use a
* lookup table, really, as the multiplications take much too long with
* short delays. This is a "reasonable" implementation, though (and the
* first constant multiplications gets optimized away if the delay is
* a constant)
*/
-extern void __udelay(unsigned long usecs);
-extern void __const_udelay(unsigned long);
-
-#define MAX_UDELAY_MS 2
+#define __udelay(n) arm_delay_ops.udelay(n)
+#define __const_udelay(n) arm_delay_ops.const_udelay(n)
#define udelay(n) \
(__builtin_constant_p(n) ? \
((n) > (MAX_UDELAY_MS * 1000) ? __bad_udelay() : \
- __const_udelay((n) * ((2199023U*HZ)>>11))) : \
+ __const_udelay((n) * UDELAY_MULT)) : \
__udelay(n))
-extern void set_delay_fn(void (*fn)(unsigned long));
-extern void read_current_timer_delay_loop(unsigned long loops);
+/* Loop-based definitions for assembly code. */
+extern void __loop_delay(unsigned long loops);
+extern void __loop_udelay(unsigned long usecs);
+extern void __loop_const_udelay(unsigned long);
+
+/* Delay-loop timer registration. */
+#define ARCH_HAS_READ_CURRENT_TIMER
+extern void register_current_timer_delay(const struct delay_timer *timer);
+
+#endif /* __ASSEMBLY__ */
#endif /* defined(_ARM_DELAY_H) */
diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h
index 926ac0e..5dc9a66 100644
--- a/arch/arm/include/asm/hardware/cache-l2x0.h
+++ b/arch/arm/include/asm/hardware/cache-l2x0.h
@@ -69,6 +69,7 @@
#define L2X0_CACHE_ID_REV_MASK (0x3f)
#define L2X0_CACHE_ID_PART_MASK (0xf << 6)
#define L2X0_CACHE_ID_PART_L210 (1 << 6)
+#define L2X0_CACHE_ID_PART_L220 (2 << 6)
#define L2X0_CACHE_ID_PART_L310 (3 << 6)
#define L2X0_CACHE_ID_RTL_MASK 0x3f
#define L2X0_CACHE_ID_RTL_R0P0 0x0
@@ -104,6 +105,7 @@
#define L2X0_LATENCY_CTRL_WR_SHIFT 8
#define L2X0_ADDR_FILTER_EN 1
+#define L2X0_INTR_MASK_ECNTR 1
#define REV_PL310_R2P0 4
@@ -146,6 +148,49 @@
extern struct l2x0_regs l2x0_saved_regs;
+#ifdef CONFIG_HW_PERF_EVENTS
+/* L220/PL310 Event control register values */
+#define L2X0_EVENT_CNT_ENABLE_MASK 1
+#define L2X0_EVENT_CNT_ENABLE 1
+#define L2X0_EVENT_CNT_RESET(x) (1 << (x+1))
+
+/* Bit-shifted event counter config values */
+enum l2x0_perf_types {
+ L2X0_EVENT_CNT_CFG_DISABLED = 0x0,
+ L2X0_EVENT_CNT_CFG_CO = 0x1,
+ L2X0_EVENT_CNT_CFG_DRHIT = 0x2,
+ L2X0_EVENT_CNT_CFG_DRREQ = 0x3,
+ L2X0_EVENT_CNT_CFG_DWHIT = 0x4,
+ L2X0_EVENT_CNT_CFG_DWREQ = 0x5,
+ L2X0_EVENT_CNT_CFG_DWTREQ = 0x6,
+ L2X0_EVENT_CNT_CFG_IRHIT = 0x7,
+ L2X0_EVENT_CNT_CFG_IRREQ = 0x8,
+ L2X0_EVENT_CNT_CFG_WA = 0x9,
+
+ /* PL310 only */
+ L2X0_EVENT_CNT_CFG_IPFALLOC = 0xA,
+ L2X0_EVENT_CNT_CFG_EPFHIT = 0xB,
+ L2X0_EVENT_CNT_CFG_EPFALLOC = 0xC,
+ L2X0_EVENT_CNT_CFG_SRRCVD = 0xD,
+ L2X0_EVENT_CNT_CFG_SRCONF = 0xE,
+ L2X0_EVENT_CNT_CFG_EPFRCVD = 0xF,
+};
+
+#define L220_EVENT_CNT_CFG_MAX L2X0_EVENT_CNT_CFG_WA
+#define PL310_EVENT_CNT_CFG_MAX L2X0_EVENT_CNT_CFG_EPFRCVD
+
+#define L2X0_EVENT_CNT_CFG_SHIFT 2
+#define L2X0_EVENT_CNT_CFG_MASK (0xF << 2)
+
+#define L2X0_EVENT_CNT_CFG_INTR_MASK 0x3
+#define L2X0_EVENT_CNT_CFG_INTR_DISABLED 0x0
+#define L2X0_EVENT_CNT_CFG_INTR_INCREMENT 0x1
+#define L2X0_EVENT_CNT_CFG_INTR_OVERFLOW 0x2
+
+#define L2X0_NUM_COUNTERS 2
+
+#endif /* CONFIG_HW_PERF_EVENTS */
+
#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h
index a40f81e..4f41fd6 100644
--- a/arch/arm/include/asm/perf_event.h
+++ b/arch/arm/include/asm/perf_event.h
@@ -22,6 +22,7 @@
ARM_PERF_PMU_ID_CA9,
ARM_PERF_PMU_ID_CA5,
ARM_PERF_PMU_ID_CA15,
+ ARM_PERF_PMU_ID_L2X0,
ARM_PERF_PMU_ID_CA7,
ARM_PERF_PMU_ID_SCORPION,
ARM_PERF_PMU_ID_SCORPIONMP,
diff --git a/arch/arm/include/asm/timex.h b/arch/arm/include/asm/timex.h
index 3be8de3..9acc135 100644
--- a/arch/arm/include/asm/timex.h
+++ b/arch/arm/include/asm/timex.h
@@ -15,10 +15,6 @@
#include <mach/timex.h>
typedef unsigned long cycles_t;
-
-static inline cycles_t get_cycles (void)
-{
- return 0;
-}
+#define get_cycles() ({ cycles_t c; read_current_timer(&c) ? 0 : c; })
#endif
diff --git a/arch/arm/kernel/arch_timer.c b/arch/arm/kernel/arch_timer.c
index 2455d1f..2b3667f 100644
--- a/arch/arm/kernel/arch_timer.c
+++ b/arch/arm/kernel/arch_timer.c
@@ -25,6 +25,7 @@
#include <linux/export.h>
#include <asm/cputype.h>
+#include <asm/delay.h>
#include <asm/localtimer.h>
#include <asm/arch_timer.h>
#include <asm/sched_clock.h>
@@ -72,6 +73,8 @@
static struct arch_timer_operations *arch_specific_timer = &arch_timer_ops_cp15;
+static struct delay_timer arch_delay_timer;
+
/*
* Architected system timer support.
*/
@@ -331,13 +334,10 @@
return arch_counter_get_cntpct();
}
-#ifdef ARCH_HAS_READ_CURRENT_TIMER
-int read_current_timer(unsigned long *timer_val)
+static unsigned long arch_timer_read_current_timer(void)
{
- *timer_val = (unsigned long)arch_specific_timer->get_cntpct();
- return 0;
+ return arch_counter_get_cntpct();
}
-#endif
static struct clocksource clocksource_counter = {
.name = "arch_sys_counter",
@@ -402,10 +402,6 @@
setup_sched_clock(arch_timer_update_sched_clock, 32, arch_timer_rate);
-#ifdef ARCH_HAS_READ_CURRENT_TIMER
- set_delay_fn(read_current_timer_delay_loop);
-#endif
-
if (is_irq_percpu)
err = request_percpu_irq(arch_timer_ppi, arch_timer_handler,
"arch_timer", arch_timer_evt);
@@ -439,6 +435,10 @@
goto out_free_irq;
percpu_timer_setup();
+ /* Use the architected timer for the delay loop. */
+ arch_delay_timer.read_current_timer = &arch_timer_read_current_timer;
+ arch_delay_timer.freq = arch_timer_rate;
+ register_current_timer_delay(&arch_delay_timer);
return 0;
out_free_irq:
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
index f1a50f3..7196228 100644
--- a/arch/arm/kernel/armksyms.c
+++ b/arch/arm/kernel/armksyms.c
@@ -48,6 +48,9 @@
extern void fpundefinstr(void);
+ /* platform dependent support */
+EXPORT_SYMBOL(arm_delay_ops);
+
/* networking */
EXPORT_SYMBOL(csum_partial);
EXPORT_SYMBOL(csum_partial_copy_from_user);
diff --git a/arch/arm/lib/Makefile b/arch/arm/lib/Makefile
index 0ade0ac..8ade75d 100644
--- a/arch/arm/lib/Makefile
+++ b/arch/arm/lib/Makefile
@@ -6,7 +6,7 @@
lib-y := backtrace.o changebit.o csumipv6.o csumpartial.o \
csumpartialcopy.o csumpartialcopyuser.o clearbit.o \
- delay.o findbit.o memchr.o memcpy.o \
+ delay.o delay-loop.o findbit.o memchr.o memcpy.o \
memmove.o memset.o memzero.o setbit.o \
strncpy_from_user.o strnlen_user.o \
strchr.o strrchr.o \
diff --git a/arch/arm/lib/delay-loop.S b/arch/arm/lib/delay-loop.S
new file mode 100644
index 0000000..36b668d
--- /dev/null
+++ b/arch/arm/lib/delay-loop.S
@@ -0,0 +1,67 @@
+/*
+ * linux/arch/arm/lib/delay.S
+ *
+ * Copyright (C) 1995, 1996 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include <asm/delay.h>
+ .text
+
+.LC0: .word loops_per_jiffy
+.LC1: .word UDELAY_MULT
+
+/*
+ * r0 <= 2000
+ * lpj <= 0x01ffffff (max. 3355 bogomips)
+ * HZ <= 1000
+ */
+
+ENTRY(__loop_udelay)
+ ldr r2, .LC1
+ mul r0, r2, r0
+ENTRY(__loop_const_udelay) @ 0 <= r0 <= 0x7fffff06
+ mov r1, #-1
+ ldr r2, .LC0
+ ldr r2, [r2] @ max = 0x01ffffff
+ add r0, r0, r1, lsr #32-14
+ mov r0, r0, lsr #14 @ max = 0x0001ffff
+ add r2, r2, r1, lsr #32-10
+ mov r2, r2, lsr #10 @ max = 0x00007fff
+ mul r0, r2, r0 @ max = 2^32-1
+ add r0, r0, r1, lsr #32-6
+ movs r0, r0, lsr #6
+ moveq pc, lr
+
+/*
+ * loops = r0 * HZ * loops_per_jiffy / 1000000
+ */
+
+@ Delay routine
+ENTRY(__loop_delay)
+ subs r0, r0, #1
+#if 0
+ movls pc, lr
+ subs r0, r0, #1
+ movls pc, lr
+ subs r0, r0, #1
+ movls pc, lr
+ subs r0, r0, #1
+ movls pc, lr
+ subs r0, r0, #1
+ movls pc, lr
+ subs r0, r0, #1
+ movls pc, lr
+ subs r0, r0, #1
+ movls pc, lr
+ subs r0, r0, #1
+#endif
+ bhi __loop_delay
+ mov pc, lr
+ENDPROC(__loop_udelay)
+ENDPROC(__loop_const_udelay)
+ENDPROC(__loop_delay)
diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
index fc9a37c..0dc5385 100644
--- a/arch/arm/lib/delay.c
+++ b/arch/arm/lib/delay.c
@@ -1,90 +1,90 @@
/*
- * Originally from linux/arch/arm/lib/delay.S
+ * Delay loops based on the OpenRISC implementation.
*
- * Copyright (C) 1995, 1996 Russell King
- * Copyright (c) 2010, Code Aurora Forum. All rights reserved.
- * Copyright (C) 1993 Linus Torvalds
- * Copyright (C) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
- * Copyright (C) 2005-2006 Atmel Corporation
+ * Copyright (C) 2012 ARM Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
*/
-#include <linux/module.h>
+
#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/timex.h>
/*
- * Oh, if only we had a cycle counter...
+ * Default to the loop-based delay implementation.
*/
-void delay_loop(unsigned long loops)
+struct arm_delay_ops arm_delay_ops = {
+ .delay = __loop_delay,
+ .const_udelay = __loop_const_udelay,
+ .udelay = __loop_udelay,
+};
+
+static const struct delay_timer *delay_timer;
+static bool delay_calibrated;
+
+int read_current_timer(unsigned long *timer_val)
{
- asm volatile(
- "1: subs %0, %0, #1 \n"
- " bhi 1b \n"
- : /* No output */
- : "r" (loops)
- );
+ if (!delay_timer)
+ return -ENXIO;
+
+ *timer_val = delay_timer->read_current_timer();
+ return 0;
+}
+EXPORT_SYMBOL_GPL(read_current_timer);
+
+static void __timer_delay(unsigned long cycles)
+{
+ cycles_t start = get_cycles();
+
+ while ((get_cycles() - start) < cycles)
+ cpu_relax();
}
-#ifdef ARCH_HAS_READ_CURRENT_TIMER
-/*
- * Assuming read_current_timer() is monotonically increasing
- * across calls.
- */
-void read_current_timer_delay_loop(unsigned long loops)
+static void __timer_const_udelay(unsigned long xloops)
{
- unsigned long bclock, now;
-
- read_current_timer(&bclock);
- do {
- read_current_timer(&now);
- } while ((now - bclock) < loops);
-}
-#endif
-
-static void (*delay_fn)(unsigned long) = delay_loop;
-
-void set_delay_fn(void (*fn)(unsigned long))
-{
- delay_fn = fn;
+ unsigned long long loops = xloops;
+ loops *= loops_per_jiffy;
+ __timer_delay(loops >> UDELAY_SHIFT);
}
-/*
- * loops = usecs * HZ * loops_per_jiffy / 1000000
- */
-void __delay(unsigned long loops)
+static void __timer_udelay(unsigned long usecs)
{
- delay_fn(loops);
+ __timer_const_udelay(usecs * UDELAY_MULT);
}
-EXPORT_SYMBOL(__delay);
-/*
- * 0 <= xloops <= 0x7fffff06
- * loops_per_jiffy <= 0x01ffffff (max. 3355 bogomips)
- */
-void __const_udelay(unsigned long xloops)
+void __init register_current_timer_delay(const struct delay_timer *timer)
{
- unsigned long lpj;
- unsigned long loops;
-
- xloops >>= 14; /* max = 0x01ffffff */
- lpj = loops_per_jiffy >> 10; /* max = 0x0001ffff */
- loops = lpj * xloops; /* max = 0x00007fff */
- loops >>= 6; /* max = 2^32-1 */
-
- if (loops)
- __delay(loops);
+ if (!delay_calibrated) {
+ pr_info("Switching to timer-based delay loop\n");
+ delay_timer = timer;
+ lpj_fine = timer->freq / HZ;
+ loops_per_jiffy = lpj_fine;
+ arm_delay_ops.delay = __timer_delay;
+ arm_delay_ops.const_udelay = __timer_const_udelay;
+ arm_delay_ops.udelay = __timer_udelay;
+ delay_calibrated = true;
+ } else {
+ pr_info("Ignoring duplicate/late registration of read_current_timer delay\n");
+ }
}
-EXPORT_SYMBOL(__const_udelay);
-/*
- * usecs <= 2000
- * HZ <= 1000
- */
-void __udelay(unsigned long usecs)
+unsigned long __cpuinit calibrate_delay_is_known(void)
{
- __const_udelay(usecs * ((2199023UL*HZ)>>11));
+ delay_calibrated = true;
+ return lpj_fine;
}
-EXPORT_SYMBOL(__udelay);
diff --git a/arch/arm/mach-msm/acpuclock-8974.c b/arch/arm/mach-msm/acpuclock-8974.c
index f929943..94a4d3e 100644
--- a/arch/arm/mach-msm/acpuclock-8974.c
+++ b/arch/arm/mach-msm/acpuclock-8974.c
@@ -142,7 +142,7 @@
};
static struct acpu_level acpu_freq_tbl[] __initdata = {
- { 1, { 300000, PLL_0, 0, 0 }, L2(0), 850000, 100000 },
+ { 1, { 300000, PLL_0, 0, 0 }, L2(0), 850000, 400000 },
{ 0, { 345600, HFPLL, 2, 36 }, L2(0), 850000, 3200000 },
{ 1, { 422400, HFPLL, 2, 44 }, L2(0), 850000, 3200000 },
{ 0, { 499200, HFPLL, 2, 52 }, L2(0), 850000, 3200000 },
diff --git a/arch/arm/mach-msm/bam_dmux.c b/arch/arm/mach-msm/bam_dmux.c
index c475e2d..c5f9e76 100644
--- a/arch/arm/mach-msm/bam_dmux.c
+++ b/arch/arm/mach-msm/bam_dmux.c
@@ -494,7 +494,7 @@
kfree(info);
fail:
- if (rx_len_cached == 0) {
+ if (rx_len_cached == 0 && !in_global_reset) {
DMUX_LOG_KERR("%s: rescheduling\n", __func__);
schedule_delayed_work(&queue_rx_work, msecs_to_jiffies(100));
}
diff --git a/arch/arm/mach-msm/board-8930-gpu.c b/arch/arm/mach-msm/board-8930-gpu.c
index 3eb7d8a..c8e493c 100644
--- a/arch/arm/mach-msm/board-8930-gpu.c
+++ b/arch/arm/mach-msm/board-8930-gpu.c
@@ -173,8 +173,10 @@
if (cpu_is_msm8930aa())
kgsl_3d0_pdata.pwrlevel[0].gpu_freq = 450000000;
- else if (cpu_is_msm8930ab())
+ else if (cpu_is_msm8930ab()) {
kgsl_3d0_pdata.pwrlevel[0].gpu_freq = 500000000;
+ grp3d_max_vectors[0].ib = KGSL_CONVERT_TO_MBPS(4800);
+ }
/* Set up the chip ID based on the SoC version */
diff --git a/arch/arm/mach-msm/board-8974-gpiomux.c b/arch/arm/mach-msm/board-8974-gpiomux.c
index 3245ff8..4bd100d 100644
--- a/arch/arm/mach-msm/board-8974-gpiomux.c
+++ b/arch/arm/mach-msm/board-8974-gpiomux.c
@@ -140,6 +140,26 @@
.drv = GPIOMUX_DRV_2MA,
.pull = GPIOMUX_PULL_NONE,
};
+static struct gpiomux_setting hap_lvl_shft_suspended_config = {
+ .func = GPIOMUX_FUNC_GPIO,
+ .drv = GPIOMUX_DRV_2MA,
+ .pull = GPIOMUX_PULL_DOWN,
+};
+
+static struct gpiomux_setting hap_lvl_shft_active_config = {
+ .func = GPIOMUX_FUNC_GPIO,
+ .drv = GPIOMUX_DRV_8MA,
+ .pull = GPIOMUX_PULL_UP,
+};
+static struct msm_gpiomux_config hap_lvl_shft_config[] __initdata = {
+ {
+ .gpio = 86,
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &hap_lvl_shft_suspended_config,
+ [GPIOMUX_ACTIVE] = &hap_lvl_shft_active_config,
+ },
+ },
+};
static struct msm_gpiomux_config msm_touch_configs[] __initdata = {
{
@@ -859,6 +879,8 @@
ARRAY_SIZE(msm8974_slimbus_config));
msm_gpiomux_install(msm_touch_configs, ARRAY_SIZE(msm_touch_configs));
+ msm_gpiomux_install(hap_lvl_shft_config,
+ ARRAY_SIZE(hap_lvl_shft_config));
msm_gpiomux_install(msm_sensor_configs, ARRAY_SIZE(msm_sensor_configs));
diff --git a/arch/arm/mach-msm/clock-8974.c b/arch/arm/mach-msm/clock-8974.c
index 760587f..1b0ac74 100644
--- a/arch/arm/mach-msm/clock-8974.c
+++ b/arch/arm/mach-msm/clock-8974.c
@@ -5134,6 +5134,7 @@
CLK_LOOKUP("cal_clk", gcc_usb_hsic_io_cal_clk.c, "msm_hsic_host"),
CLK_LOOKUP("core_clk", gcc_usb_hsic_system_clk.c, "msm_hsic_host"),
CLK_LOOKUP("ref_clk", div_clk2.c, "msm_smsc_hub"),
+ CLK_LOOKUP("pwm_clk", div_clk2.c, "0-0048"),
/* Multimedia clocks */
CLK_LOOKUP("bus_clk_src", axi_clk_src.c, ""),
diff --git a/arch/arm/mach-msm/clock-9625.c b/arch/arm/mach-msm/clock-9625.c
index 2e85006..ccf6755 100644
--- a/arch/arm/mach-msm/clock-9625.c
+++ b/arch/arm/mach-msm/clock-9625.c
@@ -2104,6 +2104,16 @@
CLK_LOOKUP("alt_core_clk", gcc_usb_hsic_xcvr_fs_clk.c,
"f9a15000.hsic"),
+ CLK_LOOKUP("core_clk", gcc_ce1_clk.c, "fd400000.qcom,qcedev"),
+ CLK_LOOKUP("iface_clk", gcc_ce1_ahb_clk.c, "fd400000.qcom,qcedev"),
+ CLK_LOOKUP("bus_clk", gcc_ce1_axi_clk.c, "fd400000.qcom,qcedev"),
+ CLK_LOOKUP("core_clk_src", ce1_clk_src.c, "fd400000.qcom,qcedev"),
+
+ CLK_LOOKUP("core_clk", gcc_ce1_clk.c, "fd400000.qcom,qcrypto"),
+ CLK_LOOKUP("iface_clk", gcc_ce1_ahb_clk.c, "fd400000.qcom,qcrypto"),
+ CLK_LOOKUP("bus_clk", gcc_ce1_axi_clk.c, "fd400000.qcom,qcrypto"),
+ CLK_LOOKUP("core_clk_src", ce1_clk_src.c, "fd400000.qcom,qcrypto"),
+
/* LPASS clocks */
CLK_LOOKUP("core_clk", audio_core_slimbus_core_clk.c, "fe12f000.slim"),
CLK_LOOKUP("iface_clk", audio_core_slimbus_lfabif_clk.c, ""),
diff --git a/arch/arm/mach-msm/include/mach/iommu.h b/arch/arm/mach-msm/include/mach/iommu.h
index 975b12c..8ebd0cf 100644
--- a/arch/arm/mach-msm/include/mach/iommu.h
+++ b/arch/arm/mach-msm/include/mach/iommu.h
@@ -127,6 +127,9 @@
* @name Human-readable name of this context device
* @sids List of Stream IDs mapped to this context
* @nsid Number of Stream IDs mapped to this context
+ * @secure_context true if this is a secure context programmed by
+ the secure environment, false otherwise
+ * @asid ASID used with this context.
*
* A msm_iommu_ctx_drvdata holds the driver data for a single context bank
* within each IOMMU hardware instance
@@ -139,6 +142,8 @@
const char *name;
u32 sids[MAX_NUM_SMR];
unsigned int nsid;
+ unsigned int secure_context;
+ int asid;
};
/*
diff --git a/arch/arm/mach-msm/include/mach/msm_serial_hs_lite.h b/arch/arm/mach-msm/include/mach/msm_serial_hs_lite.h
index 577a097..d40b2f6 100644
--- a/arch/arm/mach-msm/include/mach/msm_serial_hs_lite.h
+++ b/arch/arm/mach-msm/include/mach/msm_serial_hs_lite.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2011-2012 Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -13,11 +13,26 @@
#ifndef __ASM_ARCH_MSM_SERIAL_HS_LITE_H
#define __ASM_ARCH_MSM_SERIAL_HS_LITE_H
-
+/**
+ * struct msm_serial_hslite_platform_data - platform device data
+ * for msm_hs_lite.
+ * @config_gpio: Select GPIOs to configure.
+ * Set 4 if 4-wire UART used (for Tx, Rx, CTS, RFR GPIOs).
+ * Set 1 if 2-wire UART used (for Tx, Rx GPIOs).
+ * @uart_tx_gpio: GPIO number for UART Tx Line.
+ * @uart_rx_gpio: GPIO number for UART Rx Line.
+ * @uart_cts_gpio: GPIO number for UART CTS Line.
+ * @uart_rfr_gpio: GPIO number for UART RFR Line.
+ * @use_pm: use this to enable power management
+ * @line: Used to set UART Port number.
+ */
struct msm_serial_hslite_platform_data {
unsigned config_gpio;
unsigned uart_tx_gpio;
unsigned uart_rx_gpio;
+ unsigned uart_cts_gpio;
+ unsigned uart_rfr_gpio;
+ bool use_pm;
int line;
};
diff --git a/arch/arm/mach-msm/msm_watchdog_v2.c b/arch/arm/mach-msm/msm_watchdog_v2.c
index f88c611..2a4422e 100644
--- a/arch/arm/mach-msm/msm_watchdog_v2.c
+++ b/arch/arm/mach-msm/msm_watchdog_v2.c
@@ -50,7 +50,7 @@
unsigned int bark_time;
unsigned int bark_irq;
unsigned int bite_irq;
- unsigned int do_ipi_ping;
+ bool do_ipi_ping;
unsigned long long last_pet;
unsigned min_slack_ticks;
unsigned long long min_slack_ns;
@@ -488,11 +488,7 @@
dev_err(&pdev->dev, "reading pet time failed\n");
return -ENXIO;
}
- ret = of_property_read_u32(node, "qcom,ipi-ping", &pdata->do_ipi_ping);
- if (ret) {
- dev_err(&pdev->dev, "reading do ipi failed\n");
- return -ENXIO;
- }
+ pdata->do_ipi_ping = of_property_read_bool(node, "qcom,ipi-ping");
if (!pdata->bark_time) {
dev_err(&pdev->dev, "%s watchdog bark time not setup\n",
__func__);
@@ -503,11 +499,6 @@
__func__);
return -ENXIO;
}
- if (pdata->do_ipi_ping > 1) {
- dev_err(&pdev->dev, "%s invalid watchdog ipi value\n",
- __func__);
- return -ENXIO;
- }
pdata->irq_ppi = irq_is_per_cpu(pdata->bark_irq);
dump_pdata(pdata);
return 0;
diff --git a/arch/arm/mach-msm/pil-pronto.c b/arch/arm/mach-msm/pil-pronto.c
index b457599..cfcf5dc 100644
--- a/arch/arm/mach-msm/pil-pronto.c
+++ b/arch/arm/mach-msm/pil-pronto.c
@@ -329,6 +329,7 @@
disable_irq_nosync(drv->irq);
drv->restart_inprogress = true;
+ wcnss_pronto_log_debug_regs();
restart_wcnss(drv);
return IRQ_HANDLED;
diff --git a/arch/arm/mach-msm/pil-riva.c b/arch/arm/mach-msm/pil-riva.c
index 96b9882..0f7bc6e 100644
--- a/arch/arm/mach-msm/pil-riva.c
+++ b/arch/arm/mach-msm/pil-riva.c
@@ -349,6 +349,7 @@
panic("Watchdog bite received from Riva");
drv->rst_in_progress = 1;
+ wcnss_riva_log_debug_regs();
subsystem_restart_dev(drv->subsys);
return IRQ_HANDLED;
diff --git a/arch/arm/mach-msm/qdsp5/snd_cad.c b/arch/arm/mach-msm/qdsp5/snd_cad.c
index c0efa3b..1b85b92 100644
--- a/arch/arm/mach-msm/qdsp5/snd_cad.c
+++ b/arch/arm/mach-msm/qdsp5/snd_cad.c
@@ -285,8 +285,9 @@
vmsg.args.device.rx_device = cpu_to_be32(dev.device.rx_device);
vmsg.args.device.tx_device = cpu_to_be32(dev.device.tx_device);
vmsg.args.method = cpu_to_be32(vol.method);
- if (vol.method != SND_METHOD_VOICE) {
- MM_ERR("set volume: invalid method\n");
+ if (vol.method != SND_METHOD_VOICE &&
+ vol.method != SND_METHOD_MIDI) {
+ MM_ERR("set volume: invalid method %d\n", vol.method);
rc = -EINVAL;
break;
}
@@ -437,7 +438,7 @@
vmsg.args.device.rx_device = cpu_to_be32(vol.device.rx_device);
vmsg.args.device.tx_device = cpu_to_be32(vol.device.tx_device);
vmsg.args.method = cpu_to_be32(vol.method);
- if (vol.method != SND_METHOD_VOICE) {
+ if (vol.method != SND_METHOD_VOICE && vol.method != SND_METHOD_MIDI) {
MM_ERR("snd_cad_ioctl set volume: invalid method\n");
rc = -EINVAL;
return rc;
@@ -448,7 +449,7 @@
vmsg.args.client_data = 0;
MM_DBG("snd_cad_set_volume %d %d %d %d\n", vol.device.rx_device,
- vol.device.rx_device, vol.method, vol.volume);
+ vol.device.tx_device, vol.method, vol.volume);
rc = msm_rpc_call(snd_cad_sys->ept,
SND_CAD_SET_VOLUME_PROC,
diff --git a/arch/arm/mach-msm/timer.c b/arch/arm/mach-msm/timer.c
index e360906..161611c 100644
--- a/arch/arm/mach-msm/timer.c
+++ b/arch/arm/mach-msm/timer.c
@@ -942,11 +942,12 @@
return cs->read(NULL);
}
-int read_current_timer(unsigned long *timer_val)
+static struct delay_timer msm_delay_timer;
+
+static unsigned long msm_read_current_timer(void)
{
struct msm_clock *dgt = &msm_clocks[MSM_CLOCK_DGT];
- *timer_val = msm_read_timer_count(dgt, GLOBAL_TIMER);
- return 0;
+ return msm_read_timer_count(dgt, GLOBAL_TIMER);
}
static void __init msm_sched_clock_init(void)
@@ -1184,13 +1185,13 @@
}
}
-#ifdef ARCH_HAS_READ_CURRENT_TIMER
if (is_smp()) {
__raw_writel(1,
msm_clocks[MSM_CLOCK_DGT].regbase + TIMER_ENABLE);
- set_delay_fn(read_current_timer_delay_loop);
+ msm_delay_timer.freq = dgt->freq;
+ msm_delay_timer.read_current_timer = &msm_read_current_timer;
+ register_current_timer_delay(&msm_delay_timer);
}
-#endif
#ifdef CONFIG_LOCAL_TIMERS
local_timer_register(&msm_lt_ops);
diff --git a/arch/arm/mach-sa1100/sleep.S b/arch/arm/mach-sa1100/sleep.S
index 30cc672..8586374 100644
--- a/arch/arm/mach-sa1100/sleep.S
+++ b/arch/arm/mach-sa1100/sleep.S
@@ -38,9 +38,9 @@
orr r4, r4, #MDREFR_K1DB2
ldr r5, =PPCR
- @ Pre-load __udelay into the I-cache
+ @ Pre-load __loop_udelay into the I-cache
mov r0, #1
- bl __udelay
+ bl __loop_udelay
mov r0, r0
@ The following must all exist in a single cache line to
@@ -53,11 +53,11 @@
@ delay 90us and set CPU PLL to lowest speed
@ fixes resume problem on high speed SA1110
mov r0, #90
- bl __udelay
+ bl __loop_udelay
mov r1, #0
str r1, [r5]
mov r0, #90
- bl __udelay
+ bl __loop_udelay
/*
* SA1110 SDRAM controller workaround. register values:
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index bb4da0f..adab76d 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -23,9 +23,12 @@
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/platform_device.h>
#include <asm/cacheflush.h>
#include <asm/hardware/cache-l2x0.h>
+#include <asm/irq_regs.h>
+#include <asm/pmu.h>
#define CACHE_LINE_SIZE 32
@@ -653,3 +656,315 @@
pl310_resume();
dmb();
}
+
+#ifdef CONFIG_HW_PERF_EVENTS
+/*
+ * L220/PL310 PMU-specific functionality.
+ * TODO: Put this in a separate file and get the l2x0 driver to register
+ * the PMU from l2x0_{of}_init.
+ */
+
+static struct arm_pmu l2x0_pmu;
+
+static u64 l2x0pmu_max_event_id;
+
+static struct perf_event *events[2];
+static unsigned long used_mask[BITS_TO_LONGS(2)];
+static struct pmu_hw_events hw_events = {
+ .events = events,
+ .used_mask = used_mask,
+ .pmu_lock = __RAW_SPIN_LOCK_UNLOCKED(l2x0pmu_hw_events.pmu_lock),
+};
+
+#define COUNTER_CFG_ADDR(idx) (l2x0_base + L2X0_EVENT_CNT0_CFG - 4*idx)
+
+#define COUNTER_CTRL_ADDR (l2x0_base + L2X0_EVENT_CNT_CTRL)
+
+#define COUNTER_ADDR(idx) (l2x0_base + L2X0_EVENT_CNT0_VAL - 4*idx)
+
+static u32 l2x0_read_intr_mask(void)
+{
+ return readl_relaxed(l2x0_base + L2X0_INTR_MASK);
+}
+
+static void l2x0_write_intr_mask(u32 val)
+{
+ writel_relaxed(val, l2x0_base + L2X0_INTR_MASK);
+}
+
+static void l2x0_enable_counter_interrupt(void)
+{
+ u32 intr_mask = l2x0_read_intr_mask();
+ intr_mask |= L2X0_INTR_MASK_ECNTR;
+ l2x0_write_intr_mask(intr_mask);
+}
+
+static void l2x0_disable_counter_interrupt(void)
+{
+ u32 intr_mask = l2x0_read_intr_mask();
+ intr_mask &= ~L2X0_INTR_MASK_ECNTR;
+ l2x0_write_intr_mask(intr_mask);
+}
+
+static void l2x0_clear_interrupts(u32 flags)
+{
+ writel_relaxed(flags, l2x0_base + L2X0_INTR_CLEAR);
+}
+
+static struct pmu_hw_events *l2x0pmu_get_hw_events(void)
+{
+ return &hw_events;
+}
+
+static u32 l2x0pmu_read_ctrl(void)
+{
+ return readl_relaxed(COUNTER_CTRL_ADDR);
+}
+
+static void l2x0pmu_write_ctrl(u32 val)
+{
+ writel_relaxed(val, COUNTER_CTRL_ADDR);
+}
+
+static u32 l2x0pmu_read_cfg(int idx)
+{
+ return readl_relaxed(COUNTER_CFG_ADDR(idx));
+}
+
+static void l2x0pmu_write_cfg(u32 val, int idx)
+{
+ writel_relaxed(val, COUNTER_CFG_ADDR(idx));
+}
+
+static void l2x0pmu_enable_counter(u32 cfg, int idx)
+{
+ cfg |= L2X0_EVENT_CNT_CFG_INTR_OVERFLOW;
+ l2x0pmu_write_cfg(cfg, idx);
+}
+
+static u32 l2x0pmu_disable_counter(int idx)
+{
+ u32 cfg, oldcfg;
+
+ cfg = oldcfg = l2x0pmu_read_cfg(idx);
+ cfg &= ~L2X0_EVENT_CNT_CFG_MASK;
+ cfg &= ~L2X0_EVENT_CNT_CFG_INTR_MASK;
+ l2x0pmu_write_cfg(cfg, idx);
+
+ return oldcfg;
+}
+
+static u32 l2x0pmu_read_counter(int idx)
+{
+ return readl_relaxed(COUNTER_ADDR(idx));
+}
+
+static void l2x0pmu_write_counter(int idx, u32 val)
+{
+ /*
+ * L2X0 counters can only be written to when they are disabled.
+ * As perf core does not disable counters before writing to them
+ * under interrupts, we must do so here.
+ */
+ u32 cfg = l2x0pmu_disable_counter(idx);
+ writel_relaxed(val, COUNTER_ADDR(idx));
+ l2x0pmu_write_cfg(cfg, idx);
+}
+
+static int counter_is_saturated(int idx)
+{
+ return l2x0pmu_read_counter(idx) == 0xFFFFFFFF;
+}
+
+static void l2x0pmu_start(void)
+{
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&hw_events.pmu_lock, flags);
+
+ l2x0_enable_counter_interrupt();
+
+ val = l2x0pmu_read_ctrl();
+ val |= L2X0_EVENT_CNT_ENABLE;
+ l2x0pmu_write_ctrl(val);
+
+ raw_spin_unlock_irqrestore(&hw_events.pmu_lock, flags);
+}
+
+static void l2x0pmu_stop(void)
+{
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&hw_events.pmu_lock, flags);
+
+ val = l2x0pmu_read_ctrl();
+ val &= ~L2X0_EVENT_CNT_ENABLE_MASK;
+ l2x0pmu_write_ctrl(val);
+
+ l2x0_disable_counter_interrupt();
+
+ raw_spin_unlock_irqrestore(&hw_events.pmu_lock, flags);
+}
+
+static void l2x0pmu_enable(struct hw_perf_event *event, int idx, int cpu)
+{
+ unsigned long flags;
+ u32 cfg;
+
+ raw_spin_lock_irqsave(&hw_events.pmu_lock, flags);
+
+ cfg = (event->config_base << L2X0_EVENT_CNT_CFG_SHIFT) &
+ L2X0_EVENT_CNT_CFG_MASK;
+ l2x0pmu_enable_counter(cfg, idx);
+
+ raw_spin_unlock_irqrestore(&hw_events.pmu_lock, flags);
+}
+
+static void l2x0pmu_disable(struct hw_perf_event *event, int idx)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&hw_events.pmu_lock, flags);
+ l2x0pmu_disable_counter(idx);
+ raw_spin_unlock_irqrestore(&hw_events.pmu_lock, flags);
+}
+
+static int l2x0pmu_get_event_idx(struct pmu_hw_events *events,
+ struct hw_perf_event *hwc)
+{
+ int idx;
+
+ /* Counters are identical. Just grab a free one. */
+ for (idx = 0; idx < L2X0_NUM_COUNTERS; ++idx) {
+ if (!test_and_set_bit(idx, hw_events.used_mask))
+ return idx;
+ }
+
+ return -EAGAIN;
+}
+
+/*
+ * As System PMUs are affine to CPU0, the fact that interrupts are disabled
+ * during interrupt handling is enough to serialise our actions and make this
+ * safe. We do not need to grab our pmu_lock here.
+ */
+static irqreturn_t l2x0pmu_handle_irq(int irq, void *dev)
+{
+ irqreturn_t status = IRQ_NONE;
+ struct perf_sample_data data;
+ struct pt_regs *regs;
+ int idx;
+
+ regs = get_irq_regs();
+
+ for (idx = 0; idx < L2X0_NUM_COUNTERS; ++idx) {
+ struct perf_event *event = hw_events.events[idx];
+ struct hw_perf_event *hwc;
+
+ if (!counter_is_saturated(idx))
+ continue;
+
+ status = IRQ_HANDLED;
+
+ hwc = &event->hw;
+
+ /*
+ * The armpmu_* functions expect counters to overflow, but
+ * L220/PL310 counters saturate instead. Fake the overflow
+ * here so the hardware is in sync with what the framework
+ * expects.
+ */
+ l2x0pmu_write_counter(idx, 0);
+
+ armpmu_event_update(event, hwc, idx);
+ data.period = event->hw.last_period;
+
+ if (!armpmu_event_set_period(event, hwc, idx))
+ continue;
+
+ if (perf_event_overflow(event, &data, regs))
+ l2x0pmu_disable_counter(idx);
+ }
+
+ l2x0_clear_interrupts(L2X0_INTR_MASK_ECNTR);
+
+ irq_work_run();
+
+ return status;
+}
+
+static int map_l2x0_raw_event(u64 config)
+{
+ return (config <= l2x0pmu_max_event_id) ? config : -ENOENT;
+}
+
+static int l2x0pmu_map_event(struct perf_event *event)
+{
+ u64 config = event->attr.config;
+ u64 supported_samples = (PERF_SAMPLE_TIME |
+ PERF_SAMPLE_ID |
+ PERF_SAMPLE_PERIOD |
+ PERF_SAMPLE_STREAM_ID |
+ PERF_SAMPLE_RAW);
+
+ if (event->attr.type != l2x0_pmu.pmu.type)
+ return -ENOENT;
+
+ /*
+ * L2x0 counters are global across CPUs.
+ * If userspace ask perf to monitor from multiple CPUs, each CPU will
+ * report the shared total. When summed, this will be the actual value
+ * multiplied by the number of CPUs. We limit monitoring to a single
+ * CPU (0) to prevent confusion stemming from this.
+ */
+ if (event->cpu != 0)
+ return -ENOENT;
+
+ if (event->attr.sample_type & ~supported_samples)
+ return -ENOENT;
+
+ return map_l2x0_raw_event(config);
+}
+
+static struct arm_pmu l2x0_pmu = {
+ .id = ARM_PERF_PMU_ID_L2X0,
+ .type = ARM_PMU_DEVICE_L2CC,
+ .name = "ARM L220/PL310 L2 Cache controller",
+ .start = l2x0pmu_start,
+ .stop = l2x0pmu_stop,
+ .handle_irq = l2x0pmu_handle_irq,
+ .enable = l2x0pmu_enable,
+ .disable = l2x0pmu_disable,
+ .get_event_idx = l2x0pmu_get_event_idx,
+ .read_counter = l2x0pmu_read_counter,
+ .write_counter = l2x0pmu_write_counter,
+ .map_event = l2x0pmu_map_event,
+ .num_events = 2,
+ .max_period = 0xFFFFFFFF,
+ .get_hw_events = l2x0pmu_get_hw_events,
+};
+
+static int __devinit l2x0pmu_device_probe(struct platform_device *pdev)
+{
+ l2x0_pmu.plat_device = pdev;
+ /* FIXME: return code? */
+ armpmu_register(&l2x0_pmu, "l2x0", -1);
+ return 0;
+}
+
+static struct platform_driver l2x0pmu_driver = {
+ .driver = {
+ .name = "l2x0-pmu",
+ },
+ .probe = l2x0pmu_device_probe,
+};
+
+static int __init register_pmu_driver(void)
+{
+ return platform_driver_register(&l2x0pmu_driver);
+}
+device_initcall(register_pmu_driver);
+
+#endif /* CONFIG_HW_PERF_EVENTS */
diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types
index 2b31f47..2d1494d 100644
--- a/arch/arm/tools/mach-types
+++ b/arch/arm/tools/mach-types
@@ -1190,6 +1190,7 @@
mpq8064_cdp MACH_MPQ8064_CDP MPQ8064_CDP 3993
mpq8064_hrd MACH_MPQ8064_HRD MPQ8064_HRD 3994
mpq8064_dtv MACH_MPQ8064_DTV MPQ8064_DTV 3995
+fsm8064_ep MACH_FSM8064_EP FSM8064_EP 3996
msm7627a_qrd3 MACH_MSM7627A_QRD3 MSM7627A_QRD3 4005
msm8625_surf MACH_MSM8625_SURF MSM8625_SURF 4037
msm8625_evb MACH_MSM8625_EVB MSM8625_EVB 4042
diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched
index 5751d28..5fd98ea 100644
--- a/block/Kconfig.iosched
+++ b/block/Kconfig.iosched
@@ -15,7 +15,7 @@
config IOSCHED_TEST
tristate "Test I/O scheduler"
depends on DEBUG_FS
- default y
+ default m
---help---
The test I/O scheduler is a duplicate of the noop scheduler with
addition of test utlity.
diff --git a/block/test-iosched.c b/block/test-iosched.c
index 71e8669..d2716c84 100644
--- a/block/test-iosched.c
+++ b/block/test-iosched.c
@@ -43,18 +43,7 @@
static LIST_HEAD(blk_dev_test_list);
static struct test_data *ptd;
-/* Get the request after `test_rq' in the test requests list */
-static struct test_request *
-latter_test_request(struct request_queue *q,
- struct test_request *test_rq)
-{
- struct test_data *td = q->elevator->elevator_data;
- if (test_rq->queuelist.next == &td->test_queue)
- return NULL;
- return list_entry(test_rq->queuelist.next, struct test_request,
- queuelist);
-}
/**
* test_iosched_get_req_queue() - returns the request queue
@@ -77,6 +66,10 @@
{
if (!ptd)
return;
+ test_pr_info("%s: mark test is completed, test_count=%d,",
+ __func__, ptd->test_count);
+ test_pr_info("%s: reinsert_count=%d, dispatched_count=%d",
+ __func__, ptd->reinsert_count, ptd->dispatched_count);
ptd->test_state = TEST_COMPLETED;
wake_up(&ptd->wait_q);
@@ -87,18 +80,32 @@
static void check_test_completion(void)
{
struct test_request *test_rq;
- struct request *rq;
- list_for_each_entry(test_rq, &ptd->test_queue, queuelist) {
- rq = test_rq->rq;
+ if (!ptd)
+ return;
+
+ list_for_each_entry(test_rq, &ptd->dispatched_queue, queuelist)
if (!test_rq->req_completed)
return;
+
+ if (!list_empty(&ptd->test_queue)
+ || !list_empty(&ptd->reinsert_queue)
+ || !list_empty(&ptd->urgent_queue)) {
+ test_pr_info("%s: Test still not completed,", __func__);
+ test_pr_info("%s: test_count=%d, reinsert_count=%d",
+ __func__, ptd->test_count, ptd->reinsert_count);
+ test_pr_info("%s: dispatched_count=%d, urgent_count=%d",
+ __func__, ptd->dispatched_count, ptd->urgent_count);
+ return;
}
ptd->test_info.test_duration = jiffies -
ptd->test_info.test_duration;
- test_pr_info("%s: Test is completed", __func__);
+ test_pr_info("%s: Test is completed, test_count=%d, reinsert_count=%d,",
+ __func__, ptd->test_count, ptd->reinsert_count);
+ test_pr_info("%s: dispatched_count=%d",
+ __func__, ptd->dispatched_count);
test_iosched_mark_test_completion();
}
@@ -111,7 +118,6 @@
{
if (err)
clear_bit(BIO_UPTODATE, &bio->bi_flags);
-
bio_put(bio);
}
@@ -221,7 +227,10 @@
"%s: added request %d to the test requests list, type = %d",
__func__, test_rq->req_id, req_unique);
+ spin_lock_irq(ptd->req_q->queue_lock);
list_add_tail(&test_rq->queuelist, &ptd->test_queue);
+ ptd->test_count++;
+ spin_unlock_irq(ptd->req_q->queue_lock);
return 0;
}
@@ -253,8 +262,7 @@
}
/**
- * test_iosched_add_wr_rd_test_req() - Create and queue a
- * read/write request.
+ * test_iosched_create_test_req() - Create a read/write request.
* @is_err_expcted: A flag to indicate if this request
* should succeed or not
* @direction: READ/WRITE
@@ -278,34 +286,33 @@
* request memory is freed at the end of the test and the
* allocated BIO memory is freed by end_test_bio.
*/
-int test_iosched_add_wr_rd_test_req(int is_err_expcted,
+struct test_request *test_iosched_create_test_req(int is_err_expcted,
int direction, int start_sec,
int num_bios, int pattern, rq_end_io_fn *end_req_io)
{
- struct request *rq = NULL;
- struct test_request *test_rq = NULL;
- int rw_flags = 0;
- int buf_size = 0;
- int ret = 0, i = 0;
+ struct request *rq;
+ struct test_request *test_rq;
+ int rw_flags, buf_size;
+ int ret = 0, i;
unsigned int *bio_ptr = NULL;
struct bio *bio = NULL;
if (!ptd)
- return -ENODEV;
+ return NULL;
rw_flags = direction;
rq = blk_get_request(ptd->req_q, rw_flags, GFP_KERNEL);
if (!rq) {
test_pr_err("%s: Failed to allocate a request", __func__);
- return -ENODEV;
+ return NULL;
}
test_rq = kzalloc(sizeof(struct test_request), GFP_KERNEL);
if (!test_rq) {
test_pr_err("%s: Failed to allocate test request", __func__);
blk_put_request(rq);
- return -ENODEV;
+ return NULL;
}
buf_size = sizeof(unsigned int) * BIO_U32_SIZE * num_bios;
@@ -341,6 +348,7 @@
rq->end_io = end_test_req;
rq->__sector = start_sec;
rq->cmd_type |= REQ_TYPE_FS;
+ rq->cmd_flags |= REQ_SORTED; /* do we need this?*/
if (rq->bio) {
rq->bio->bi_sector = start_sec;
@@ -359,16 +367,61 @@
test_rq->is_err_expected = is_err_expcted;
rq->elv.priv[0] = (void *)test_rq;
- test_pr_debug(
- "%s: added request %d to the test requests list, buf_size=%d",
- __func__, test_rq->req_id, buf_size);
+ test_pr_debug("%s: created test request %d, buf_size=%d",
+ __func__, test_rq->req_id, buf_size);
- list_add_tail(&test_rq->queuelist, &ptd->test_queue);
-
- return 0;
+ return test_rq;
err:
blk_put_request(rq);
kfree(test_rq->bios_buffer);
+ return NULL;
+}
+EXPORT_SYMBOL(test_iosched_create_test_req);
+
+
+/**
+ * test_iosched_add_wr_rd_test_req() - Create and queue a
+ * read/write request.
+ * @is_err_expcted: A flag to indicate if this request
+ * should succeed or not
+ * @direction: READ/WRITE
+ * @start_sec: start address of the first bio
+ * @num_bios: number of BIOs to be allocated for the
+ * request
+ * @pattern: A pattern, to be written into the write
+ * requests data buffer. In case of READ
+ * request, the given pattern is kept as
+ * the expected pattern. The expected
+ * pattern will be compared in the test
+ * check result function. If no comparisson
+ * is required, set pattern to
+ * TEST_NO_PATTERN.
+ * @end_req_io: specific completion callback. When not
+ * set,the default callback will be used
+ *
+ * This function allocates the test request and the block
+ * request and calls blk_rq_map_kern which allocates the
+ * required BIO. Upon success the new request is added to the
+ * test_queue. The allocated test request and the block request
+ * memory is freed at the end of the test and the allocated BIO
+ * memory is freed by end_test_bio.
+ */
+int test_iosched_add_wr_rd_test_req(int is_err_expcted,
+ int direction, int start_sec,
+ int num_bios, int pattern, rq_end_io_fn *end_req_io)
+{
+ struct test_request *test_rq = NULL;
+
+ test_rq = test_iosched_create_test_req(is_err_expcted,
+ direction, start_sec,
+ num_bios, pattern, end_req_io);
+ if (test_rq) {
+ spin_lock_irq(ptd->req_q->queue_lock);
+ list_add_tail(&test_rq->queuelist, &ptd->test_queue);
+ ptd->test_count++;
+ spin_unlock_irq(ptd->req_q->queue_lock);
+ return 0;
+ }
return -ENODEV;
}
EXPORT_SYMBOL(test_iosched_add_wr_rd_test_req);
@@ -431,12 +484,18 @@
static int check_test_result(struct test_data *td)
{
struct test_request *test_rq;
- struct request *rq;
int res = 0;
static int run;
- list_for_each_entry(test_rq, &ptd->test_queue, queuelist) {
- rq = test_rq->rq;
+ if (!ptd)
+ goto err;
+
+ list_for_each_entry(test_rq, &ptd->dispatched_queue, queuelist) {
+ if (!test_rq->rq) {
+ test_pr_info("%s: req_id %d is contains empty req",
+ __func__, test_rq->req_id);
+ continue;
+ }
if (!test_rq->req_completed) {
test_pr_err("%s: rq %d not completed", __func__,
test_rq->req_id);
@@ -509,27 +568,25 @@
return ret;
}
- /*
- * Set the next_req pointer to the first request in the test requests
- * list
- */
- if (!list_empty(&td->test_queue))
- td->next_req = list_entry(td->test_queue.next,
- struct test_request, queuelist);
__blk_run_queue(td->req_q);
return 0;
}
-/* Free the allocated test requests, their requests and BIOs buffer */
-static void free_test_requests(struct test_data *td)
+/*
+ * free_test_queue() - Free all allocated test requests in the given test_queue:
+ * free their requests and BIOs buffer
+ * @test_queue the test queue to be freed
+ */
+static void free_test_queue(struct list_head *test_queue)
{
struct test_request *test_rq;
struct bio *bio;
- while (!list_empty(&td->test_queue)) {
- test_rq = list_entry(td->test_queue.next, struct test_request,
- queuelist);
+ while (!list_empty(test_queue)) {
+ test_rq = list_entry(test_queue->next, struct test_request,
+ queuelist);
+
list_del_init(&test_rq->queuelist);
/*
* If the request was not completed we need to free its BIOs
@@ -538,7 +595,7 @@
if (!test_rq->req_completed) {
test_pr_info(
"%s: Freeing memory of an uncompleted request",
- __func__);
+ __func__);
list_del_init(&test_rq->rq->queuelist);
while ((bio = test_rq->rq->bio) != NULL) {
test_rq->rq->bio = bio->bi_next;
@@ -552,8 +609,39 @@
}
/*
- * Do post test operations.
- * Free the allocated test requests, their requests and BIOs buffer.
+ * free_test_requests() - Free all allocated test requests in
+ * all test queues in given test_data.
+ * @td The test_data struct whos test requests will be
+ * freed.
+ */
+static void free_test_requests(struct test_data *td)
+{
+ if (!td)
+ return;
+
+ if (td->urgent_count) {
+ free_test_queue(&td->urgent_queue);
+ td->urgent_count = 0;
+ }
+ if (td->test_count) {
+ free_test_queue(&td->test_queue);
+ td->test_count = 0;
+ }
+ if (td->dispatched_count) {
+ free_test_queue(&td->dispatched_queue);
+ td->dispatched_count = 0;
+ }
+ if (td->reinsert_count) {
+ free_test_queue(&td->reinsert_queue);
+ td->reinsert_count = 0;
+ }
+}
+
+/*
+ * post_test() - Do post test operations. Free the allocated
+ * test requests, their requests and BIOs buffer.
+ * @td The test_data struct for the test that has
+ * ended.
*/
static int post_test(struct test_data *td)
{
@@ -641,7 +729,6 @@
memcpy(&ptd->test_info, t_info, sizeof(struct test_info));
- ptd->next_req = NULL;
ptd->test_result = TEST_NO_RESULT;
ptd->num_of_write_bios = 0;
@@ -886,6 +973,45 @@
{
list_del_init(&next->queuelist);
}
+/*
+ * test_dispatch_from(): Dispatch request from @queue to the @dispatched_queue.
+ * Also update th dispatched_count counter.
+ */
+static int test_dispatch_from(struct request_queue *q,
+ struct list_head *queue, unsigned int *count)
+{
+ struct test_request *test_rq;
+ struct request *rq;
+ int ret = 0;
+
+ if (!ptd)
+ goto err;
+
+ spin_lock_irq(&ptd->lock);
+ if (!list_empty(queue)) {
+ test_rq = list_entry(queue->next, struct test_request,
+ queuelist);
+ rq = test_rq->rq;
+ if (!rq) {
+ pr_err("%s: null request,return", __func__);
+ spin_unlock_irq(&ptd->lock);
+ goto err;
+ }
+ list_move_tail(&test_rq->queuelist, &ptd->dispatched_queue);
+ ptd->dispatched_count++;
+ (*count)--;
+ spin_unlock_irq(&ptd->lock);
+
+ print_req(rq);
+ elv_dispatch_sort(q, rq);
+ ret = 1;
+ goto err;
+ }
+ spin_unlock_irq(&ptd->lock);
+
+err:
+ return ret;
+}
/*
* Dispatch a test request in case there is a running test Otherwise, dispatch
@@ -895,6 +1021,7 @@
{
struct test_data *td = q->elevator->elevator_data;
struct request *rq = NULL;
+ int ret = 0;
switch (td->test_state) {
case TEST_IDLE:
@@ -903,27 +1030,39 @@
queuelist);
list_del_init(&rq->queuelist);
elv_dispatch_sort(q, rq);
- return 1;
+ ret = 1;
+ goto exit;
}
break;
case TEST_RUNNING:
- if (td->next_req) {
- rq = td->next_req->rq;
- td->next_req =
- latter_test_request(td->req_q, td->next_req);
- if (!rq)
- return 0;
- print_req(rq);
- elv_dispatch_sort(q, rq);
- return 1;
+ if (test_dispatch_from(q, &td->urgent_queue,
+ &td->urgent_count)) {
+ test_pr_debug("%s: Dispatched from urgent_count=%d",
+ __func__, ptd->urgent_count);
+ ret = 1;
+ goto exit;
+ }
+ if (test_dispatch_from(q, &td->reinsert_queue,
+ &td->reinsert_count)) {
+ test_pr_debug("%s: Dispatched from reinsert_count=%d",
+ __func__, ptd->reinsert_count);
+ ret = 1;
+ goto exit;
+ }
+ if (test_dispatch_from(q, &td->test_queue, &td->test_count)) {
+ test_pr_debug("%s: Dispatched from test_count=%d",
+ __func__, ptd->test_count);
+ ret = 1;
+ goto exit;
}
break;
case TEST_COMPLETED:
default:
- return 0;
+ break;
}
- return 0;
+exit:
+ return ret;
}
static void test_add_request(struct request_queue *q, struct request *rq)
@@ -976,6 +1115,9 @@
memset((void *)ptd, 0, sizeof(struct test_data));
INIT_LIST_HEAD(&ptd->queue);
INIT_LIST_HEAD(&ptd->test_queue);
+ INIT_LIST_HEAD(&ptd->dispatched_queue);
+ INIT_LIST_HEAD(&ptd->reinsert_queue);
+ INIT_LIST_HEAD(&ptd->urgent_queue);
init_waitqueue_head(&ptd->wait_q);
ptd->req_q = q;
@@ -1010,7 +1152,79 @@
kfree(td);
}
+/**
+ * test_get_test_data() - Returns a pointer to the test_data
+ * struct which keeps the current test data.
+ *
+ */
+struct test_data *test_get_test_data(void)
+{
+ return ptd;
+}
+EXPORT_SYMBOL(test_get_test_data);
+
+static bool test_urgent_pending(struct request_queue *q)
+{
+ return !list_empty(&ptd->urgent_queue);
+}
+
+/**
+ * test_iosched_add_urgent_req() - Add an urgent test_request.
+ * First mark the request as urgent, then add it to the
+ * urgent_queue test queue.
+ * @test_rq: pointer to the urgent test_request to be
+ * added.
+ *
+ */
+void test_iosched_add_urgent_req(struct test_request *test_rq)
+{
+ spin_lock_irq(&ptd->lock);
+ blk_mark_rq_urgent(test_rq->rq);
+ list_add_tail(&test_rq->queuelist, &ptd->urgent_queue);
+ ptd->urgent_count++;
+ spin_unlock_irq(&ptd->lock);
+}
+EXPORT_SYMBOL(test_iosched_add_urgent_req);
+
+/**
+ * test_reinsert_req() - Moves the @rq request from
+ * @dispatched_queue into @reinsert_queue.
+ * The @rq must be in @dispatched_queue
+ * @q: request queue
+ * @rq: request to be inserted
+ *
+ *
+ */
+static int test_reinsert_req(struct request_queue *q,
+ struct request *rq)
+{
+ struct test_request *test_rq;
+ int ret = -EINVAL;
+
+ if (!ptd)
+ goto exit;
+
+ if (list_empty(&ptd->dispatched_queue)) {
+ test_pr_err("%s: dispatched_queue is empty", __func__);
+ goto exit;
+ }
+
+ list_for_each_entry(test_rq, &ptd->dispatched_queue, queuelist) {
+ if (test_rq->rq == rq) {
+ list_move(&test_rq->queuelist, &ptd->reinsert_queue);
+ ptd->dispatched_count--;
+ ptd->reinsert_count++;
+ ret = 0;
+ break;
+ }
+ }
+
+exit:
+ return ret;
+}
+
static struct elevator_type elevator_test_iosched = {
+
.ops = {
.elevator_merge_req_fn = test_merged_requests,
.elevator_dispatch_fn = test_dispatch_requests,
@@ -1019,6 +1233,8 @@
.elevator_latter_req_fn = test_latter_request,
.elevator_init_fn = test_init_queue,
.elevator_exit_fn = test_exit_queue,
+ .elevator_is_urgent_fn = test_urgent_pending,
+ .elevator_reinsert_req_fn = test_reinsert_req,
},
.elevator_name = "test-iosched",
.elevator_owner = THIS_MODULE,
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 4657c37..c758b3a 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -306,7 +306,7 @@
config CRYPTO_DEV_QCE
tristate "Qualcomm Crypto Engine (QCE) module"
select CRYPTO_DEV_QCE40 if ARCH_MSM8960 || ARCH_MSM9615
- select CRYPTO_DEV_QCE50 if ARCH_MSM8974
+ select CRYPTO_DEV_QCE50 if ARCH_MSM8974 || ARCH_MSM9625
default n
help
This driver supports Qualcomm Crypto Engine in MSM7x30, MSM8660
diff --git a/drivers/gpu/ion/ion_iommu_heap.c b/drivers/gpu/ion/ion_iommu_heap.c
index 5483054..0e47d54 100644
--- a/drivers/gpu/ion/ion_iommu_heap.c
+++ b/drivers/gpu/ion/ion_iommu_heap.c
@@ -138,7 +138,7 @@
return NULL;
if (!ION_IS_CACHED(buffer->flags))
- page_prot = pgprot_noncached(page_prot);
+ page_prot = pgprot_writecombine(page_prot);
buffer->vaddr = vmap(data->pages, data->nrpages, VM_IOREMAP, page_prot);
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index d07fb96..a6eae7e 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -32,7 +32,7 @@
#include <linux/err.h>
static struct kset *iommu_group_kset;
-static struct ida iommu_group_ida;
+static struct idr iommu_group_idr;
static struct mutex iommu_group_mutex;
struct iommu_group {
@@ -126,7 +126,7 @@
group->iommu_data_release(group->iommu_data);
mutex_lock(&iommu_group_mutex);
- ida_remove(&iommu_group_ida, group->id);
+ idr_remove(&iommu_group_idr, group->id);
mutex_unlock(&iommu_group_mutex);
kfree(group->name);
@@ -167,22 +167,27 @@
mutex_lock(&iommu_group_mutex);
again:
- if (unlikely(0 == ida_pre_get(&iommu_group_ida, GFP_KERNEL))) {
+ if (unlikely(0 == idr_pre_get(&iommu_group_idr, GFP_KERNEL))) {
kfree(group);
mutex_unlock(&iommu_group_mutex);
return ERR_PTR(-ENOMEM);
}
- if (-EAGAIN == ida_get_new(&iommu_group_ida, &group->id))
+ ret = idr_get_new_above(&iommu_group_idr, group, 1, &group->id);
+ if (ret == -EAGAIN)
goto again;
-
mutex_unlock(&iommu_group_mutex);
+ if (ret == -ENOSPC) {
+ kfree(group);
+ return ERR_PTR(ret);
+ }
+
ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype,
NULL, "%d", group->id);
if (ret) {
mutex_lock(&iommu_group_mutex);
- ida_remove(&iommu_group_ida, group->id);
+ idr_remove(&iommu_group_idr, group->id);
mutex_unlock(&iommu_group_mutex);
kfree(group);
return ERR_PTR(ret);
@@ -426,6 +431,37 @@
EXPORT_SYMBOL_GPL(iommu_group_get);
/**
+ * iommu_group_find - Find and return the group based on the group name.
+ * Also increment the reference count.
+ * @name: the name of the group
+ *
+ * This function is called by iommu drivers and clients to get the group
+ * by the specified name. If found, the group is returned and the group
+ * reference is incremented, else NULL.
+ */
+struct iommu_group *iommu_group_find(const char *name)
+{
+ struct iommu_group *group;
+ int next = 0;
+
+ mutex_lock(&iommu_group_mutex);
+ while ((group = idr_get_next(&iommu_group_idr, &next))) {
+ if (group->name) {
+ if (strcmp(group->name, name) == 0)
+ break;
+ }
+ ++next;
+ }
+ mutex_unlock(&iommu_group_mutex);
+
+ if (group)
+ kobject_get(group->devices_kobj);
+
+ return group;
+}
+EXPORT_SYMBOL_GPL(iommu_group_find);
+
+/**
* iommu_group_put - Decrement group reference
* @group: the group to use
*
@@ -888,7 +924,7 @@
{
iommu_group_kset = kset_create_and_add("iommu_groups",
NULL, kernel_kobj);
- ida_init(&iommu_group_ida);
+ idr_init(&iommu_group_idr);
mutex_init(&iommu_group_mutex);
BUG_ON(!iommu_group_kset);
diff --git a/drivers/iommu/msm_iommu-v2.c b/drivers/iommu/msm_iommu-v2.c
index 567b9ba..e3aa30c 100644
--- a/drivers/iommu/msm_iommu-v2.c
+++ b/drivers/iommu/msm_iommu-v2.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012 Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2012 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -225,13 +225,79 @@
SET_SMR_VALID(base, i, 0);
}
-static void __program_context(void __iomem *base, int ctx, int ncb,
- phys_addr_t pgtable, int redirect,
- u32 *sids, int len, bool is_secure)
+static void msm_iommu_assign_ASID(const struct msm_iommu_drvdata *iommu_drvdata,
+ struct msm_iommu_ctx_drvdata *curr_ctx,
+ phys_addr_t pgtable)
+{
+ struct platform_device *pdev;
+ struct device_node *child;
+ struct msm_iommu_ctx_drvdata *ctx;
+ unsigned int found = 0;
+ void __iomem *base = iommu_drvdata->base;
+ struct device_node *iommu_node = iommu_drvdata->dev->of_node;
+ unsigned int asid;
+ unsigned int ncb = iommu_drvdata->ncb;
+
+ /* Find if this page table is used elsewhere, and re-use ASID */
+ for_each_child_of_node(iommu_node, child) {
+ pdev = of_find_device_by_node(child);
+ ctx = dev_get_drvdata(&pdev->dev);
+
+ if (ctx->secure_context) {
+ of_dev_put(pdev);
+ continue;
+ }
+
+ if ((ctx != curr_ctx) &&
+ (GET_CB_TTBR0_ADDR(base, ctx->num) == pgtable)) {
+ SET_CB_CONTEXTIDR_ASID(base, curr_ctx->num, ctx->asid);
+ curr_ctx->asid = ctx->asid;
+ found = 1;
+ of_dev_put(pdev);
+ of_node_put(child);
+ break;
+ }
+ of_dev_put(pdev);
+ }
+
+ /* If page table is new, find an unused ASID */
+ if (!found) {
+ for (asid = 1; asid < ncb + 1; ++asid) {
+ found = 0;
+ for_each_child_of_node(iommu_node, child) {
+ pdev = of_find_device_by_node(child);
+ ctx = dev_get_drvdata(&pdev->dev);
+
+ if (ctx != curr_ctx && ctx->asid == asid) {
+ found = 1;
+ of_dev_put(pdev);
+ of_node_put(child);
+ break;
+ }
+ of_dev_put(pdev);
+ }
+ if (!found) {
+ SET_CB_CONTEXTIDR_ASID(base, curr_ctx->num,
+ asid);
+ curr_ctx->asid = asid;
+ break;
+ }
+ }
+ BUG_ON(found);
+ }
+}
+
+static void __program_context(struct msm_iommu_drvdata *iommu_drvdata,
+ struct msm_iommu_ctx_drvdata *ctx_drvdata,
+ phys_addr_t pgtable, int redirect, bool is_secure)
{
unsigned int prrr, nmrr;
unsigned int pn;
- int i, j, found, num = 0, smt_size;
+ int num = 0, i, smt_size;
+ void __iomem *base = iommu_drvdata->base;
+ unsigned int ctx = ctx_drvdata->num;
+ u32 *sids = ctx_drvdata->sids;
+ int len = ctx_drvdata->nsid;
__reset_context(base, ctx);
@@ -308,33 +374,7 @@
}
- /* Find if this page table is used elsewhere, and re-use ASID */
- found = 0;
- for (i = 0; i < ncb; i++)
- if ((GET_CB_TTBR0_ADDR(base, i) == pn) && (i != ctx)) {
- SET_CB_CONTEXTIDR_ASID(base, ctx, \
- GET_CB_CONTEXTIDR_ASID(base, i));
- found = 1;
- break;
- }
-
- /* If page table is new, find an unused ASID */
- if (!found) {
- for (i = 0; i < ncb; i++) {
- found = 0;
- for (j = 0; j < ncb; j++) {
- if (GET_CB_CONTEXTIDR_ASID(base, j) == i &&
- j != ctx)
- found = 1;
- }
-
- if (!found) {
- SET_CB_CONTEXTIDR_ASID(base, ctx, i);
- break;
- }
- }
- BUG_ON(found);
- }
+ msm_iommu_assign_ASID(iommu_drvdata, ctx_drvdata, pn);
/* Enable the MMU */
SET_CB_SCTLR_M(base, ctx, 1);
@@ -391,9 +431,11 @@
ctx = dev_get_drvdata(&pdev->dev);
if (ctx->attached_domain) {
+ of_dev_put(pdev);
of_node_put(child);
return 1;
}
+ of_dev_put(pdev);
}
return 0;
@@ -461,10 +503,9 @@
}
}
- __program_context(iommu_drvdata->base, ctx_drvdata->num,
- iommu_drvdata->ncb, __pa(priv->pt.fl_table),
- priv->pt.redirect, ctx_drvdata->sids, ctx_drvdata->nsid,
- is_secure);
+ __program_context(iommu_drvdata, ctx_drvdata, __pa(priv->pt.fl_table),
+ priv->pt.redirect, is_secure);
+
__disable_clocks(iommu_drvdata);
list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
@@ -500,8 +541,8 @@
is_secure = iommu_drvdata->sec_id != -1;
- SET_TLBIASID(iommu_drvdata->base, ctx_drvdata->num,
- GET_CB_CONTEXTIDR_ASID(iommu_drvdata->base, ctx_drvdata->num));
+ SET_TLBIASID(iommu_drvdata->base, ctx_drvdata->num, ctx_drvdata->asid);
+ ctx_drvdata->asid = -1;
__reset_context(iommu_drvdata->base, ctx_drvdata->num);
if (!is_secure)
diff --git a/drivers/iommu/msm_iommu_dev-v2.c b/drivers/iommu/msm_iommu_dev-v2.c
index cf30500..7961280 100644
--- a/drivers/iommu/msm_iommu_dev-v2.c
+++ b/drivers/iommu/msm_iommu_dev-v2.c
@@ -244,6 +244,9 @@
}
ctx_drvdata->nsid = nsid;
+ ctx_drvdata->secure_context = of_property_read_bool(pdev->dev.of_node,
+ "qcom,secure-context");
+ ctx_drvdata->asid = -1;
return 0;
}
diff --git a/drivers/media/dvb/dvb-core/demux.h b/drivers/media/dvb/dvb-core/demux.h
index f802a38..ed657d7 100644
--- a/drivers/media/dvb/dvb-core/demux.h
+++ b/drivers/media/dvb/dvb-core/demux.h
@@ -188,6 +188,7 @@
struct dmx_demux *parent; /* Back-pointer */
struct data_buffer buffer;
void *priv; /* Pointer to private data of the API client */
+ struct dmx_decoder_buffers *decoder_buffers;
int (*set) (struct dmx_ts_feed *feed,
u16 pid,
int type,
diff --git a/drivers/media/dvb/dvb-core/dmxdev.c b/drivers/media/dvb/dvb-core/dmxdev.c
index 507c014..e956170 100644
--- a/drivers/media/dvb/dvb-core/dmxdev.c
+++ b/drivers/media/dvb/dvb-core/dmxdev.c
@@ -31,7 +31,7 @@
#include <linux/ioctl.h>
#include <linux/wait.h>
#include <linux/mm.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include "dmxdev.h"
@@ -41,6 +41,8 @@
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off).");
+#define DMX_DEFAULT_DECODER_BUFFER_SIZE (32768)
+
#define dprintk if (debug) printk
static int dvb_dmxdev_buffer_write(struct dvb_ringbuffer *buf,
@@ -1351,18 +1353,27 @@
return 0;
}
-static int dvb_dmxdev_set_pes_buffer_size(struct dmxdev_filter *dmxdevfilter,
- unsigned long size)
+static int dvb_dmxdev_set_decoder_buffer_size(
+ struct dmxdev_filter *dmxdevfilter,
+ unsigned long size)
{
- if (dmxdevfilter->pes_buffer_size == size)
- return 0;
- if (!size)
+ if (0 == size)
return -EINVAL;
+
+ if (dmxdevfilter->decoder_buffers.buffers_size == size)
+ return 0;
+
if (dmxdevfilter->state >= DMXDEV_STATE_GO)
return -EBUSY;
- dmxdevfilter->pes_buffer_size = size;
-
+ /*
+ * In case decoder buffers were already set before to some external
+ * buffers, setting the decoder buffer size alone implies transition
+ * to internal buffer mode.
+ */
+ dmxdevfilter->decoder_buffers.buffers_size = size;
+ dmxdevfilter->decoder_buffers.buffers_num = 0;
+ dmxdevfilter->decoder_buffers.is_linear = 0;
return 0;
}
@@ -1524,8 +1535,6 @@
{
struct dvb_ringbuffer *buf = &dmxdevfilter->buffer;
- if (!buf->data)
- return -EINVAL;
spin_lock_irq(&dmxdevfilter->dev->lock);
@@ -1550,6 +1559,11 @@
return ret;
}
+ if (!buf->data) {
+ spin_unlock_irq(&dmxdevfilter->dev->lock);
+ return -EINVAL;
+ }
+
dmx_buffer_status->error = buf->error;
if (buf->error) {
if (buf->error == -EOVERFLOW) {
@@ -2253,6 +2267,9 @@
if (!dmxdev->dvr_feeds_count)
dmxdev->dvr_feed = filter;
dmxdev->dvr_feeds_count++;
+ } else if (filter->params.pes.output == DMX_OUT_DECODER) {
+ tsfeed->decoder_buffers = &filter->decoder_buffers;
+ tsfeed->buffer.priv_handle = filter->priv_buff_handle;
} else {
tsfeed->buffer.ringbuff = &filter->buffer;
tsfeed->buffer.priv_handle = filter->priv_buff_handle;
@@ -2269,7 +2286,8 @@
ret = tsfeed->set(tsfeed, feed->pid,
ts_type, ts_pes,
- filter->pes_buffer_size, timeout);
+ filter->decoder_buffers.buffers_size,
+ timeout);
if (ret < 0) {
dmxdev->demux->release_ts_feed(dmxdev->demux, tsfeed);
return ret;
@@ -2477,6 +2495,11 @@
mutex_init(&dmxdevfilter->mutex);
file->private_data = dmxdevfilter;
+ memset(&dmxdevfilter->decoder_buffers,
+ 0,
+ sizeof(dmxdevfilter->decoder_buffers));
+ dmxdevfilter->decoder_buffers.buffers_size =
+ DMX_DEFAULT_DECODER_BUFFER_SIZE;
dmxdevfilter->buffer_mode = DMX_BUFFER_MODE_INTERNAL;
dmxdevfilter->priv_buff_handle = NULL;
dvb_ringbuffer_init(&dmxdevfilter->buffer, NULL, 8192);
@@ -2486,10 +2509,7 @@
dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_ALLOCATED);
init_timer(&dmxdevfilter->timer);
- dmxdevfilter->pes_buffer_size = 32768;
-
dmxdevfilter->dmx_tsp_format = DMX_TSP_FORMAT_188;
-
dvbdev->users++;
mutex_unlock(&dmxdev->mutex);
@@ -2515,7 +2535,10 @@
vfree(mem);
}
- if ((dmxdevfilter->buffer_mode == DMX_BUFFER_MODE_EXTERNAL) &&
+ /* Decoder filters do not map buffers via priv_buff_handle */
+ if ((DMXDEV_TYPE_PES == dmxdevfilter->type) &&
+ (DMX_OUT_DECODER != dmxdevfilter->params.pes.output) &&
+ (dmxdevfilter->buffer_mode == DMX_BUFFER_MODE_EXTERNAL) &&
(dmxdevfilter->priv_buff_handle)) {
dmxdev->demux->unmap_buffer(dmxdev->demux,
dmxdevfilter->priv_buff_handle);
@@ -2653,6 +2676,47 @@
return 0;
}
+static int dvb_dmxdev_set_decoder_buffer(struct dmxdev *dmxdev,
+ struct dmxdev_filter *filter,
+ struct dmx_decoder_buffers *buffs)
+{
+ int i;
+ struct dmx_decoder_buffers *dec_buffs;
+ struct dmx_caps caps;
+
+ if (NULL == dmxdev || NULL == filter || NULL == buffs)
+ return -EINVAL;
+
+ dec_buffs = &filter->decoder_buffers;
+ dmxdev->demux->get_caps(dmxdev->demux, &caps);
+
+ if (0 == buffs->buffers_size ||
+ (buffs->is_linear && buffs->buffers_num <= 1))
+ return -EINVAL;
+
+ if (0 == buffs->buffers_num) {
+ /* Internal mode - linear buffers not supported in this mode */
+ if (!(caps.decoder.flags & DMX_BUFFER_INTERNAL_SUPPORT) ||
+ buffs->is_linear)
+ return -EINVAL;
+ } else {
+ /* External buffer(s) mode */
+ if ((!(caps.decoder.flags & DMX_BUFFER_LINEAR_GROUP_SUPPORT) &&
+ buffs->buffers_num > 1) ||
+ !(caps.decoder.flags & DMX_BUFFER_EXTERNAL_SUPPORT) ||
+ buffs->buffers_num > caps.decoder.max_buffer_num)
+ return -EINVAL;
+
+ dec_buffs->is_linear = buffs->is_linear;
+ dec_buffs->buffers_num = buffs->buffers_num;
+ dec_buffs->buffers_size = buffs->buffers_size;
+ for (i = 0; i < dec_buffs->buffers_num; i++)
+ dec_buffs->handles[i] = buffs->handles[i];
+ }
+
+ return 0;
+}
+
static ssize_t dvb_dmxdev_read_sec(struct dmxdev_filter *dfil,
struct file *file, char __user *buf,
size_t count, loff_t *ppos)
@@ -2896,7 +2960,7 @@
return -ERESTARTSYS;
}
- ret = dvb_dmxdev_set_pes_buffer_size(dmxdevfilter, arg);
+ ret = dvb_dmxdev_set_decoder_buffer_size(dmxdevfilter, arg);
mutex_unlock(&dmxdevfilter->mutex);
break;
@@ -2944,6 +3008,15 @@
mutex_unlock(&dmxdevfilter->mutex);
break;
+ case DMX_SET_DECODER_BUFFER:
+ if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+ ret = dvb_dmxdev_set_decoder_buffer(dmxdev, dmxdevfilter, parg);
+ mutex_unlock(&dmxdevfilter->mutex);
+ break;
+
default:
ret = -EINVAL;
break;
diff --git a/drivers/media/dvb/dvb-core/dmxdev.h b/drivers/media/dvb/dvb-core/dmxdev.h
index d1c1cc3..0f7da1b 100644
--- a/drivers/media/dvb/dvb-core/dmxdev.h
+++ b/drivers/media/dvb/dvb-core/dmxdev.h
@@ -119,9 +119,6 @@
struct mutex mutex;
- /* relevent for decoder PES */
- unsigned long pes_buffer_size;
-
/* for recording output */
enum dmx_tsp_format_t dmx_tsp_format;
u32 rec_chunk_size;
@@ -130,6 +127,9 @@
struct timer_list timer;
int todo;
u8 secheader[3];
+
+ /* Decoder buffer(s) related */
+ struct dmx_decoder_buffers decoder_buffers;
};
struct dmxdev {
diff --git a/drivers/media/dvb/mpq/adapter/mpq_stream_buffer.c b/drivers/media/dvb/mpq/adapter/mpq_stream_buffer.c
index f779851..6840858 100644
--- a/drivers/media/dvb/mpq/adapter/mpq_stream_buffer.c
+++ b/drivers/media/dvb/mpq/adapter/mpq_stream_buffer.c
@@ -526,3 +526,34 @@
}
EXPORT_SYMBOL(mpq_streambuffer_data_avail);
+int mpq_streambuffer_get_data_rw_offset(
+ struct mpq_streambuffer *sbuff,
+ u32 *read_offset,
+ u32 *write_offset)
+{
+ if (NULL == sbuff)
+ return -EINVAL;
+
+ if (MPQ_STREAMBUFFER_BUFFER_MODE_RING == sbuff->mode) {
+ if (read_offset)
+ *read_offset = sbuff->raw_data.pread;
+ if (write_offset)
+ *write_offset = sbuff->raw_data.pwrite;
+ } else {
+ struct mpq_streambuffer_buffer_desc *desc;
+
+ if (read_offset) {
+ desc = (struct mpq_streambuffer_buffer_desc *)
+ &sbuff->raw_data.data[sbuff->raw_data.pread];
+ *read_offset = desc->read_ptr;
+ }
+ if (write_offset) {
+ desc = (struct mpq_streambuffer_buffer_desc *)
+ &sbuff->raw_data.data[sbuff->raw_data.pwrite];
+ *write_offset = desc->write_ptr;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mpq_streambuffer_get_data_rw_offset);
diff --git a/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_common.c b/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_common.c
index 2a60840..d766862 100644
--- a/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_common.c
+++ b/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_common.c
@@ -39,6 +39,11 @@
static int mpq_demux_device_num = CONFIG_DVB_MPQ_NUM_DMX_DEVICES;
module_param(mpq_demux_device_num, int, S_IRUGO);
+/* ION head ID to be used when calling ion_alloc for video decoder buffer */
+static int video_ion_alloc_heap = ION_CP_MM_HEAP_ID;
+module_param(video_ion_alloc_heap, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(video_ion_alloc_heap, "ION heap ID for allocation");
+
/**
* Maximum allowed framing pattern size
*/
@@ -295,8 +300,7 @@
(patterns[j].size - current_size))) {
MPQ_DVB_DBG_PRINT(
- "%s: Found matching pattern"
- "using prefix of size %d\n",
+ "%s: Found matching pattern using prefix of size %d\n",
__func__, current_size);
/*
* pattern found using prefix at the
@@ -787,14 +791,81 @@
}
EXPORT_SYMBOL(mpq_dmx_set_source);
+/**
+ * Takes an ION allocated buffer's file descriptor and handles the details of
+ * mapping it into kernel memory and obtaining an ION handle for it.
+ * Internal helper function.
+ *
+ * @client: ION client
+ * @handle: ION file descriptor to map
+ * @priv_handle: returned ION handle. Must be freed when no longer needed
+ * @kernel_mem: returned kernel mapped pointer
+ *
+ * Note: mapping might not be possible in secured heaps/buffers, and so NULL
+ * might be returned in kernel_mem
+ *
+ * Return errors status
+ */
+static int mpq_map_buffer_to_kernel(
+ struct ion_client *client,
+ int handle,
+ struct ion_handle **priv_handle,
+ void **kernel_mem)
+{
+ struct ion_handle *ion_handle;
+ unsigned long ionflag = 0;
+ int ret;
+
+ if (NULL == client || priv_handle == NULL || kernel_mem == NULL) {
+ MPQ_DVB_ERR_PRINT("%s: invalid parameters\n", __func__);
+ return -EINVAL;
+ }
+
+ ion_handle = ion_import_dma_buf(client, handle);
+ if (IS_ERR_OR_NULL(ion_handle)) {
+ ret = PTR_ERR(ion_handle);
+ MPQ_DVB_ERR_PRINT("%s: ion_import_dma_buf failed %d\n",
+ __func__, ret);
+ if (!ret)
+ ret = -ENOMEM;
+
+ goto map_buffer_failed;
+ }
+
+ ret = ion_handle_get_flags(client, ion_handle, &ionflag);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT("%s: ion_handle_get_flags failed %d\n",
+ __func__, ret);
+ goto map_buffer_failed_free_buff;
+ }
+
+ if (ionflag & ION_SECURE) {
+ MPQ_DVB_DBG_PRINT("%s: secured buffer\n", __func__);
+ *kernel_mem = NULL;
+ } else {
+ *kernel_mem = ion_map_kernel(client, ion_handle);
+ if (*kernel_mem == NULL) {
+ MPQ_DVB_ERR_PRINT("%s: ion_map_kernel failed\n",
+ __func__);
+ ret = -ENOMEM;
+ goto map_buffer_failed_free_buff;
+ }
+ }
+
+ *priv_handle = ion_handle;
+ return 0;
+
+map_buffer_failed_free_buff:
+ ion_free(client, ion_handle);
+map_buffer_failed:
+ return ret;
+}
+
int mpq_dmx_map_buffer(struct dmx_demux *demux, struct dmx_buffer *dmx_buffer,
void **priv_handle, void **kernel_mem)
{
struct dvb_demux *dvb_demux = demux->priv;
struct mpq_demux *mpq_demux;
- struct ion_handle *ion_handle;
- unsigned long ionflag = 0;
- int ret;
if ((mpq_dmx_info.devices == NULL) || (dvb_demux == NULL) ||
(priv_handle == NULL) || (kernel_mem == NULL)) {
@@ -808,47 +879,10 @@
return -EINVAL;
}
- ion_handle = ion_import_dma_buf(mpq_demux->ion_client,
- dmx_buffer->handle);
- if (IS_ERR_OR_NULL(ion_handle)) {
- ret = PTR_ERR(ion_handle);
- if (!ret)
- ret = -ENOMEM;
-
- MPQ_DVB_ERR_PRINT("%s: ion_import_dma_buf failed %d\n",
- __func__, ret);
- goto map_buffer_failed;
- }
-
- ret = ion_handle_get_flags(mpq_demux->ion_client, ion_handle, &ionflag);
- if (ret) {
- MPQ_DVB_ERR_PRINT("%s: ion_handle_get_flags failed %d\n",
- __func__, ret);
- goto map_buffer_failed_free_buff;
- }
-
- if (ionflag & ION_SECURE) {
- MPQ_DVB_DBG_PRINT("%s: secured buffer\n", __func__);
- /* TBD: Set buffer as secured */
- *kernel_mem = NULL;
- } else {
- *kernel_mem = ion_map_kernel(mpq_demux->ion_client,
- ion_handle);
- if (*kernel_mem == NULL) {
- MPQ_DVB_ERR_PRINT("%s: ion_map_kernel failed\n",
- __func__);
- ret = -ENOMEM;
- goto map_buffer_failed_free_buff;
- }
- }
-
- *priv_handle = (void *)ion_handle;
- return 0;
-
-map_buffer_failed_free_buff:
- ion_free(mpq_demux->ion_client, ion_handle);
-map_buffer_failed:
- return ret;
+ return mpq_map_buffer_to_kernel(
+ mpq_demux->ion_client,
+ dmx_buffer->handle,
+ (struct ion_handle **)priv_handle, kernel_mem);
}
EXPORT_SYMBOL(mpq_dmx_map_buffer);
@@ -889,18 +923,256 @@
}
EXPORT_SYMBOL(mpq_dmx_unmap_buffer);
+/**
+ * Handles the details of internal decoder buffer allocation via ION.
+ * Internal helper function.
+ * @feed_data: decoder feed object
+ * @dec_buffs: buffer information
+ * @client: ION client
+ *
+ * Return error status
+ */
+static int mpq_dmx_init_internal_buffers(
+ struct mpq_video_feed_info *feed_data,
+ struct dmx_decoder_buffers *dec_buffs,
+ struct ion_client *client)
+{
+ struct ion_handle *temp_handle = NULL;
+ void *payload_buffer = NULL;
+ int actual_buffer_size = 0;
+ int ret = 0;
+
+ MPQ_DVB_DBG_PRINT("%s: Internal decoder buffer allocation\n", __func__);
+
+ actual_buffer_size = dec_buffs->buffers_size;
+ actual_buffer_size += (SZ_4K - 1);
+ actual_buffer_size &= ~(SZ_4K - 1);
+
+ temp_handle = ion_alloc(client, actual_buffer_size, SZ_4K,
+ ION_HEAP(video_ion_alloc_heap), ION_FLAG_CACHED);
+
+ if (IS_ERR_OR_NULL(temp_handle)) {
+ ret = PTR_ERR(temp_handle);
+ MPQ_DVB_ERR_PRINT("%s: FAILED to allocate payload buffer %d\n",
+ __func__, ret);
+ if (!ret)
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ payload_buffer = ion_map_kernel(client, temp_handle);
+
+ if (IS_ERR_OR_NULL(payload_buffer)) {
+ ret = PTR_ERR(payload_buffer);
+ MPQ_DVB_ERR_PRINT(
+ "%s: FAILED to map payload buffer %d\n",
+ __func__, ret);
+ if (!ret)
+ ret = -ENOMEM;
+ goto init_failed_free_payload_buffer;
+ }
+ feed_data->buffer_desc.decoder_buffers_num = 1;
+ feed_data->buffer_desc.ion_handle[0] = temp_handle;
+ feed_data->buffer_desc.desc[0].base = payload_buffer;
+ feed_data->buffer_desc.desc[0].size = actual_buffer_size;
+ feed_data->buffer_desc.desc[0].read_ptr = 0;
+ feed_data->buffer_desc.desc[0].write_ptr = 0;
+ feed_data->buffer_desc.desc[0].handle =
+ ion_share_dma_buf(
+ client,
+ temp_handle);
+ if (IS_ERR_VALUE(feed_data->buffer_desc.desc[0].handle)) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: FAILED to share payload buffer %d\n",
+ __func__, ret);
+ ret = -ENOMEM;
+ goto init_failed_unmap_payload_buffer;
+ }
+
+ return 0;
+
+init_failed_unmap_payload_buffer:
+ ion_unmap_kernel(client, temp_handle);
+ feed_data->buffer_desc.desc[0].base = NULL;
+init_failed_free_payload_buffer:
+ ion_free(client, temp_handle);
+ feed_data->buffer_desc.ion_handle[0] = NULL;
+ feed_data->buffer_desc.desc[0].size = 0;
+ feed_data->buffer_desc.decoder_buffers_num = 0;
+end:
+ return ret;
+}
+
+/**
+ * Handles the details of external decoder buffers allocated by user.
+ * Each buffer is mapped into kernel memory and an ION handle is obtained, and
+ * decoder feed object is updated with related information.
+ * Internal helper function.
+ * @feed_data: decoder feed object
+ * @dec_buffs: buffer information
+ * @client: ION client
+ *
+ * Return error status
+ */
+static int mpq_dmx_init_external_buffers(
+ struct mpq_video_feed_info *feed_data,
+ struct dmx_decoder_buffers *dec_buffs,
+ struct ion_client *client)
+{
+ struct ion_handle *temp_handle = NULL;
+ void *payload_buffer = NULL;
+ int actual_buffer_size = 0;
+ int ret = 0;
+ int i;
+
+ /*
+ * Payload buffer was allocated externally (through ION).
+ * Map the ion handles to kernel memory
+ */
+ MPQ_DVB_DBG_PRINT("%s: External decoder buffer allocation\n", __func__);
+
+ actual_buffer_size = dec_buffs->buffers_size;
+ if (!dec_buffs->is_linear) {
+ MPQ_DVB_DBG_PRINT("%s: Ex. Ring-buffer\n", __func__);
+ feed_data->buffer_desc.decoder_buffers_num = 1;
+ } else {
+ MPQ_DVB_DBG_PRINT("%s: Ex. Linear\n", __func__);
+ feed_data->buffer_desc.decoder_buffers_num =
+ dec_buffs->buffers_num;
+ }
+
+ for (i = 0; i < feed_data->buffer_desc.decoder_buffers_num; i++) {
+ ret = mpq_map_buffer_to_kernel(
+ client,
+ dec_buffs->handles[i],
+ &temp_handle,
+ &payload_buffer);
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: Failed mapping buffer %d\n",
+ __func__, i);
+ goto init_failed;
+ }
+ feed_data->buffer_desc.ion_handle[i] = temp_handle;
+ feed_data->buffer_desc.desc[i].base = payload_buffer;
+ feed_data->buffer_desc.desc[i].handle =
+ dec_buffs->handles[i];
+ feed_data->buffer_desc.desc[i].size =
+ dec_buffs->buffers_size;
+ feed_data->buffer_desc.desc[i].read_ptr = 0;
+ feed_data->buffer_desc.desc[i].write_ptr = 0;
+
+ MPQ_DVB_DBG_PRINT(
+ "%s: Buffer #%d: base=0x%p, handle=%d, size=%d\n",
+ __func__, i ,
+ feed_data->buffer_desc.desc[i].base,
+ feed_data->buffer_desc.desc[i].handle,
+ feed_data->buffer_desc.desc[i].size);
+ }
+
+ return 0;
+
+init_failed:
+ for (i = 0; i < feed_data->buffer_desc.decoder_buffers_num; i++) {
+ if (feed_data->buffer_desc.ion_handle[i]) {
+ if (feed_data->buffer_desc.desc[i].base) {
+ ion_unmap_kernel(client,
+ feed_data->buffer_desc.ion_handle[i]);
+ feed_data->buffer_desc.desc[i].base = NULL;
+ }
+ ion_free(client, feed_data->buffer_desc.ion_handle[i]);
+ feed_data->buffer_desc.ion_handle[i] = NULL;
+ feed_data->buffer_desc.desc[i].size = 0;
+ }
+ }
+ return ret;
+}
+
+/**
+ * Handles the details of initializing the mpq_streambuffer object according
+ * to the user decoder buffer configuration: External/Internal buffers and
+ * ring/linear buffering mode.
+ * Internal helper function.
+ * @feed: dvb demux feed object, contains the buffers configuration
+ * @feed_data: decoder feed object
+ * @stream_buffer: stream buffer object to initialize
+ *
+ * Return error status
+ */
+static int mpq_dmx_init_streambuffer(
+ struct dvb_demux_feed *feed,
+ struct mpq_video_feed_info *feed_data,
+ struct mpq_streambuffer *stream_buffer)
+{
+ int ret;
+ void *packet_buffer = NULL;
+ struct mpq_demux *mpq_demux = feed->demux->priv;
+ struct ion_client *client = mpq_demux->ion_client;
+ struct dmx_decoder_buffers *dec_buffs = NULL;
+ enum mpq_streambuffer_mode mode;
+
+ dec_buffs = feed->feed.ts.decoder_buffers;
+
+ /* Allocate packet buffer holding the meta-data */
+ packet_buffer = vmalloc(VIDEO_META_DATA_BUFFER_SIZE);
+
+ if (packet_buffer == NULL) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: FAILED to allocate packets buffer\n",
+ __func__);
+
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ MPQ_DVB_DBG_PRINT("%s: dec_buffs: num=%d, size=%d, linear=%d\n",
+ __func__,
+ dec_buffs->buffers_num,
+ dec_buffs->buffers_size,
+ dec_buffs->is_linear);
+
+ feed_data->buffer_desc.decoder_buffers_num = dec_buffs->buffers_num;
+ if (0 == dec_buffs->buffers_num)
+ ret = mpq_dmx_init_internal_buffers(
+ feed_data, dec_buffs, client);
+ else
+ ret = mpq_dmx_init_external_buffers(
+ feed_data, dec_buffs, client);
+
+ if (ret != 0)
+ goto init_failed_free_packet_buffer;
+
+ mode = dec_buffs->is_linear ? MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR :
+ MPQ_STREAMBUFFER_BUFFER_MODE_RING;
+ ret = mpq_streambuffer_init(
+ feed_data->video_buffer,
+ mode,
+ feed_data->buffer_desc.desc,
+ feed_data->buffer_desc.decoder_buffers_num,
+ packet_buffer,
+ VIDEO_META_DATA_BUFFER_SIZE);
+
+ if (ret != 0)
+ goto init_failed_free_packet_buffer;
+
+ goto end;
+
+
+init_failed_free_packet_buffer:
+ vfree(packet_buffer);
+end:
+ return ret;
+}
+
int mpq_dmx_init_video_feed(struct dvb_demux_feed *feed)
{
int ret;
- void *packet_buffer;
- void *payload_buffer;
struct mpq_video_feed_info *feed_data;
struct mpq_demux *mpq_demux = feed->demux->priv;
struct mpq_streambuffer *stream_buffer;
- int actual_buffer_size;
/* Allocate memory for private feed data */
- feed_data = vmalloc(sizeof(struct mpq_video_feed_info));
+ feed_data = vzalloc(sizeof(struct mpq_video_feed_info));
if (feed_data == NULL) {
MPQ_DVB_ERR_PRINT(
@@ -925,78 +1197,6 @@
}
}
- /* Allocate packet buffer holding the meta-data */
- packet_buffer = vmalloc(VIDEO_META_DATA_BUFFER_SIZE);
-
- if (packet_buffer == NULL) {
- MPQ_DVB_ERR_PRINT(
- "%s: FAILED to allocate packets buffer\n",
- __func__);
-
- ret = -ENOMEM;
- goto init_failed_free_priv_data;
- }
-
- /*
- * Allocate payload buffer through ION.
- * TODO: for scrambling support, need to check if the
- * stream is scrambled and allocate the buffer with secure
- * flag set.
- */
-
- actual_buffer_size = feed->buffer_size;
-
- actual_buffer_size += (SZ_4K - 1);
- actual_buffer_size &= ~(SZ_4K - 1);
-
- feed_data->payload_buff_handle =
- ion_alloc(mpq_demux->ion_client,
- actual_buffer_size,
- SZ_4K,
- ION_HEAP(ION_CP_MM_HEAP_ID),
- ION_FLAG_CACHED);
-
- if (IS_ERR_OR_NULL(feed_data->payload_buff_handle)) {
- ret = PTR_ERR(feed_data->payload_buff_handle);
-
- MPQ_DVB_ERR_PRINT(
- "%s: FAILED to allocate payload buffer %d\n",
- __func__, ret);
-
- if (!ret)
- ret = -ENOMEM;
- goto init_failed_free_packet_buffer;
- }
-
- payload_buffer =
- ion_map_kernel(mpq_demux->ion_client,
- feed_data->payload_buff_handle);
-
- if (IS_ERR_OR_NULL(payload_buffer)) {
- ret = PTR_ERR(payload_buffer);
-
- MPQ_DVB_ERR_PRINT(
- "%s: FAILED to map payload buffer %d\n",
- __func__, ret);
-
- if (!ret)
- ret = -ENOMEM;
- goto init_failed_free_payload_buffer;
- }
-
- feed_data->buffer_desc.read_ptr = 0;
- feed_data->buffer_desc.write_ptr = 0;
- feed_data->buffer_desc.base = payload_buffer;
- feed_data->buffer_desc.size = actual_buffer_size;
- feed_data->buffer_desc.handle =
- ion_share_dma_buf(
- mpq_demux->ion_client,
- feed_data->payload_buff_handle);
- if (feed_data->buffer_desc.handle < 0) {
- ret = -EFAULT;
- goto init_failed_unmap_payload_buffer;
- }
-
/* Register the new stream-buffer interface to MPQ adapter */
switch (feed->pes_type) {
case DMX_TS_PES_VIDEO0:
@@ -1025,7 +1225,7 @@
__func__,
feed->pes_type);
ret = -EINVAL;
- goto init_failed_unshare_payload_buffer;
+ goto init_failed_free_priv_data;
}
/* make sure not occupied already */
@@ -1039,26 +1239,12 @@
__func__,
feed_data->stream_interface);
ret = -EBUSY;
- goto init_failed_unshare_payload_buffer;
+ goto init_failed_free_priv_data;
}
feed_data->video_buffer =
&mpq_dmx_info.decoder_buffers[feed_data->stream_interface];
- ret = mpq_streambuffer_init(
- feed_data->video_buffer,
- MPQ_STREAMBUFFER_BUFFER_MODE_RING,
- &feed_data->buffer_desc,
- 1,
- packet_buffer,
- VIDEO_META_DATA_BUFFER_SIZE);
- if (ret < 0) {
- MPQ_DVB_ERR_PRINT(
- "%s: mpq_streambuffer_init failed, err = %d\n",
- __func__, ret);
- goto init_failed_unshare_payload_buffer;
- }
-
ret = mpq_adapter_register_stream_if(
feed_data->stream_interface,
feed_data->video_buffer);
@@ -1068,10 +1254,18 @@
"%s: mpq_adapter_register_stream_if failed, "
"err = %d\n",
__func__, ret);
- goto init_failed_unshare_payload_buffer;
+ goto init_failed_free_priv_data;
}
- feed->buffer_size = actual_buffer_size;
+ ret = mpq_dmx_init_streambuffer(
+ feed, feed_data, feed_data->video_buffer);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_dmx_init_streambuffer failed, err = %d\n",
+ __func__, ret);
+ goto init_failed_unregister_stream_if;
+ }
+
feed_data->pes_payload_address =
(u32)feed_data->video_buffer->raw_data.data;
@@ -1085,7 +1279,6 @@
feed_data->found_sequence_header_pattern = 0;
memset(&feed_data->prefix_size, 0,
sizeof(struct mpq_framing_prefix_size_masks));
- feed_data->first_pattern_offset = 0;
feed_data->first_prefix_size = 0;
feed_data->saved_pts_dts_info.pts_exist = 0;
feed_data->saved_pts_dts_info.dts_exist = 0;
@@ -1101,16 +1294,8 @@
return 0;
-init_failed_unshare_payload_buffer:
- put_unused_fd(feed_data->buffer_desc.handle);
-init_failed_unmap_payload_buffer:
- ion_unmap_kernel(mpq_demux->ion_client,
- feed_data->payload_buff_handle);
-init_failed_free_payload_buffer:
- ion_free(mpq_demux->ion_client,
- feed_data->payload_buff_handle);
-init_failed_free_packet_buffer:
- vfree(packet_buffer);
+init_failed_unregister_stream_if:
+ mpq_adapter_unregister_stream_if(feed_data->stream_interface);
init_failed_free_priv_data:
vfree(feed_data);
feed->priv = NULL;
@@ -1120,6 +1305,41 @@
}
EXPORT_SYMBOL(mpq_dmx_init_video_feed);
+void mpq_dmx_release_streambuffer(
+ struct dvb_demux_feed *feed,
+ struct mpq_video_feed_info *feed_data,
+ struct ion_client *client)
+{
+ int buf_num = 0;
+ struct dmx_decoder_buffers *dec_buffs = feed->feed.ts.decoder_buffers;
+ int i;
+
+ mpq_adapter_unregister_stream_if(feed_data->stream_interface);
+
+ vfree(feed_data->video_buffer->packet_data.data);
+
+ buf_num = feed_data->buffer_desc.decoder_buffers_num;
+ for (i = 0; i < buf_num; i++) {
+ if (feed_data->buffer_desc.ion_handle[i]) {
+ if (feed_data->buffer_desc.desc[i].base) {
+ ion_unmap_kernel(client,
+ feed_data->buffer_desc.ion_handle[i]);
+ feed_data->buffer_desc.desc[i].base = NULL;
+ }
+ ion_free(client, feed_data->buffer_desc.ion_handle[i]);
+ feed_data->buffer_desc.ion_handle[i] = NULL;
+ feed_data->buffer_desc.desc[i].size = 0;
+
+ /*
+ * Call put_unused_fd only if kernel it the one that
+ * shared the buffer handle.
+ */
+ if (0 == dec_buffs->buffers_num)
+ put_unused_fd(
+ feed_data->buffer_desc.desc[i].handle);
+ }
+ }
+}
int mpq_dmx_terminate_video_feed(struct dvb_demux_feed *feed)
{
@@ -1143,17 +1363,7 @@
wake_up_all(&feed_data->video_buffer->raw_data.queue);
- mpq_adapter_unregister_stream_if(feed_data->stream_interface);
-
- vfree(feed_data->video_buffer->packet_data.data);
-
- put_unused_fd(feed_data->buffer_desc.handle);
-
- ion_unmap_kernel(mpq_demux->ion_client,
- feed_data->payload_buff_handle);
-
- ion_free(mpq_demux->ion_client,
- feed_data->payload_buff_handle);
+ mpq_dmx_release_streambuffer(feed, feed_data, mpq_demux->ion_client);
vfree(feed_data);
@@ -1196,78 +1406,98 @@
}
EXPORT_SYMBOL(mpq_dmx_decoder_fullness_init);
+
+static inline int mpq_dmx_check_decoder_fullness(
+ struct mpq_streambuffer *sbuff,
+ size_t required_space)
+{
+ u32 free = mpq_streambuffer_data_free(sbuff);
+ MPQ_DVB_DBG_PRINT("%s: stream buffer free = %d, required = %d\n",
+ __func__, free, required_space);
+
+ /*
+ * For linear buffers, verify there's enough space for this TSP
+ * and an additional buffer is free, as framing might required one
+ * more buffer to be available.
+ */
+ if (MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR == sbuff->mode)
+ return (free >= required_space &&
+ sbuff->pending_buffers_count < sbuff->buffers_num-1);
+ else
+ /* Ring buffer mode */
+ return (free >= required_space);
+}
+
int mpq_dmx_decoder_fullness_wait(
struct dvb_demux_feed *feed,
size_t required_space)
{
struct mpq_demux *mpq_demux = feed->demux->priv;
+ struct mpq_streambuffer *sbuff = NULL;
+ struct mpq_video_feed_info *feed_data;
+ int ret;
- if (mpq_dmx_is_video_feed(feed)) {
- int ret;
- struct mpq_video_feed_info *feed_data;
- struct dvb_ringbuffer *video_buff;
-
- spin_lock(&mpq_demux->feed_lock);
-
- if (feed->priv == NULL) {
- spin_unlock(&mpq_demux->feed_lock);
- return -EINVAL;
- }
-
- feed_data = feed->priv;
- video_buff = &feed_data->video_buffer->raw_data;
-
- ret = 0;
- if ((feed_data != NULL) &&
- (!feed_data->fullness_wait_cancel) &&
- (dvb_ringbuffer_free(video_buff) < required_space)) {
- DEFINE_WAIT(__wait);
- for (;;) {
- prepare_to_wait(
- &video_buff->queue,
- &__wait,
- TASK_INTERRUPTIBLE);
-
- if ((feed->priv == NULL) ||
- (feed_data->fullness_wait_cancel) ||
- (dvb_ringbuffer_free(video_buff) >=
- required_space))
- break;
-
- if (!signal_pending(current)) {
- spin_unlock(&mpq_demux->feed_lock);
- schedule();
- spin_lock(&mpq_demux->feed_lock);
- continue;
- }
- ret = -ERESTARTSYS;
- break;
- }
- finish_wait(&video_buff->queue, &__wait);
- }
-
- if (ret < 0) {
- spin_unlock(&mpq_demux->feed_lock);
- return ret;
- }
-
- if ((feed->priv == NULL) ||
- (feed_data->fullness_wait_cancel)) {
- spin_unlock(&mpq_demux->feed_lock);
- return -EINVAL;
- }
-
- spin_unlock(&mpq_demux->feed_lock);
- return 0;
+ if (!mpq_dmx_is_video_feed(feed)) {
+ MPQ_DVB_DBG_PRINT("%s: Invalid feed type %d\n",
+ __func__,
+ feed->pes_type);
+ return -EINVAL;
}
- /* else */
- MPQ_DVB_DBG_PRINT(
- "%s: Invalid feed type %d\n",
- __func__,
- feed->pes_type);
+ spin_lock(&mpq_demux->feed_lock);
+ if (feed->priv == NULL) {
+ spin_unlock(&mpq_demux->feed_lock);
+ return -EINVAL;
+ }
+ feed_data = feed->priv;
+ sbuff = feed_data->video_buffer;
+ if (sbuff == NULL) {
+ spin_unlock(&mpq_demux->feed_lock);
+ MPQ_DVB_ERR_PRINT("%s: mpq_streambuffer object is NULL\n",
+ __func__);
+ return -EINVAL;
+ }
- return -EINVAL;
+ if ((feed_data != NULL) &&
+ (!feed_data->fullness_wait_cancel) &&
+ (!mpq_dmx_check_decoder_fullness(sbuff, required_space))) {
+ DEFINE_WAIT(__wait);
+ for (;;) {
+ prepare_to_wait(&sbuff->raw_data.queue,
+ &__wait,
+ TASK_INTERRUPTIBLE);
+
+ if ((feed->priv == NULL) ||
+ feed_data->fullness_wait_cancel ||
+ mpq_dmx_check_decoder_fullness(sbuff,
+ required_space))
+ break;
+
+ if (!signal_pending(current)) {
+ spin_unlock(&mpq_demux->feed_lock);
+ schedule();
+ spin_lock(&mpq_demux->feed_lock);
+ continue;
+ }
+
+ ret = -ERESTARTSYS;
+ break;
+ }
+ finish_wait(&sbuff->raw_data.queue, &__wait);
+ }
+
+ if (ret < 0) {
+ spin_unlock(&mpq_demux->feed_lock);
+ return ret;
+ }
+
+ if ((feed->priv == NULL) || (feed_data->fullness_wait_cancel)) {
+ spin_unlock(&mpq_demux->feed_lock);
+ return -EINVAL;
+ }
+
+ spin_unlock(&mpq_demux->feed_lock);
+ return 0;
}
EXPORT_SYMBOL(mpq_dmx_decoder_fullness_wait);
@@ -1572,17 +1802,6 @@
pes_header = &feed_data->pes_header;
- /* MPQ_DVB_DBG_PRINT("TS packet: %X %X %X %X %X%X %X %X %X\n",
- ts_header->sync_byte,
- ts_header->transport_error_indicator,
- ts_header->payload_unit_start_indicator,
- ts_header->transport_priority,
- ts_header->pid_msb,
- ts_header->pid_lsb,
- ts_header->transport_scrambling_control,
- ts_header->adaptation_field_control,
- ts_header->continuity_counter); */
-
/* Make sure this TS packet has a payload and not scrambled */
if ((ts_header->sync_byte != 0x47) ||
(ts_header->adaptation_field_control == 0) ||
@@ -1752,8 +1971,11 @@
feed->peslen += bytes_avail;
meta_data.packet_type = DMX_FRAMING_INFO_PACKET;
- packet.raw_data_handle = feed_data->buffer_desc.handle;
- packet.raw_data_offset = 0;
+ packet.raw_data_handle = feed_data->buffer_desc.desc[0].handle;
+ mpq_streambuffer_get_data_rw_offset(
+ stream_buffer,
+ &packet.raw_data_offset,
+ NULL);
packet.user_data_len =
sizeof(struct mpq_adapter_video_meta_data);
@@ -1874,17 +2096,6 @@
pes_header = &feed_data->pes_header;
- /* MPQ_DVB_DBG_PRINT("TS packet: %X %X %X %X %X%X %X %X %X\n",
- ts_header->sync_byte,
- ts_header->transport_error_indicator,
- ts_header->payload_unit_start_indicator,
- ts_header->transport_priority,
- ts_header->pid_msb,
- ts_header->pid_lsb,
- ts_header->transport_scrambling_control,
- ts_header->adaptation_field_control,
- ts_header->continuity_counter); */
-
/* Make sure this TS packet has a payload and not scrambled */
if ((ts_header->sync_byte != 0x47) ||
(ts_header->adaptation_field_control == 0) ||
@@ -1910,8 +2121,11 @@
if (0 == feed_data->pes_header_left_bytes) {
packet.raw_data_len = feed->peslen;
packet.raw_data_handle =
- feed_data->buffer_desc.handle;
- packet.raw_data_offset = 0;
+ feed_data->buffer_desc.desc[0].handle;
+ mpq_streambuffer_get_data_rw_offset(
+ stream_buffer,
+ &packet.raw_data_offset,
+ NULL);
packet.user_data_len =
sizeof(struct
mpq_adapter_video_meta_data);
@@ -2014,43 +2228,57 @@
struct dmx_buffer_status *dmx_buffer_status)
{
struct mpq_demux *mpq_demux = feed->demux->priv;
+ struct mpq_video_feed_info *feed_data;
+ struct mpq_streambuffer *video_buff;
- if (mpq_dmx_is_video_feed(feed)) {
- struct mpq_video_feed_info *feed_data;
- struct dvb_ringbuffer *video_buff;
-
- spin_lock(&mpq_demux->feed_lock);
-
- if (feed->priv == NULL) {
- MPQ_DVB_ERR_PRINT(
- "%s: invalid feed, feed->priv is NULL\n",
- __func__);
- spin_unlock(&mpq_demux->feed_lock);
- return -EINVAL;
- }
-
- feed_data = feed->priv;
- video_buff = &feed_data->video_buffer->raw_data;
-
- dmx_buffer_status->error = video_buff->error;
- dmx_buffer_status->fullness = dvb_ringbuffer_avail(video_buff);
- dmx_buffer_status->free_bytes = dvb_ringbuffer_free(video_buff);
- dmx_buffer_status->read_offset = video_buff->pread;
- dmx_buffer_status->write_offset = video_buff->pwrite;
- dmx_buffer_status->size = video_buff->size;
-
- spin_unlock(&mpq_demux->feed_lock);
-
- return 0;
+ if (!mpq_dmx_is_video_feed(feed)) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: Invalid feed type %d\n",
+ __func__,
+ feed->pes_type);
+ return -EINVAL;
}
- /* else */
- MPQ_DVB_ERR_PRINT(
- "%s: Invalid feed type %d\n",
- __func__,
- feed->pes_type);
+ spin_lock(&mpq_demux->feed_lock);
- return -EINVAL;
+ if (feed->priv == NULL) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: invalid feed, feed->priv is NULL\n",
+ __func__);
+ spin_unlock(&mpq_demux->feed_lock);
+ return -EINVAL;
+ }
+ feed_data = feed->priv;
+ video_buff = feed_data->video_buffer;
+
+ dmx_buffer_status->error = video_buff->raw_data.error;
+ if (MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR == video_buff->mode) {
+ dmx_buffer_status->fullness =
+ video_buff->buffers[0].size *
+ video_buff->pending_buffers_count;
+ dmx_buffer_status->free_bytes =
+ video_buff->buffers[0].size *
+ (video_buff->buffers_num -
+ video_buff->pending_buffers_count);
+ dmx_buffer_status->size =
+ video_buff->buffers[0].size *
+ video_buff->buffers_num;
+ } else {
+ dmx_buffer_status->fullness =
+ mpq_streambuffer_data_avail(video_buff);
+ dmx_buffer_status->free_bytes =
+ mpq_streambuffer_data_free(video_buff);
+ dmx_buffer_status->size = video_buff->buffers[0].size;
+ }
+
+ mpq_streambuffer_get_data_rw_offset(
+ video_buff,
+ &dmx_buffer_status->read_offset,
+ &dmx_buffer_status->write_offset);
+
+ spin_unlock(&mpq_demux->feed_lock);
+
+ return 0;
}
EXPORT_SYMBOL(mpq_dmx_decoder_buffer_status);
diff --git a/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_common.h b/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_common.h
index f7af1ef..daf8aa9 100644
--- a/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_common.h
+++ b/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_common.h
@@ -269,6 +269,24 @@
u32 size_mask[MPQ_MAX_FOUND_PATTERNS];
};
+/**
+ * mpq_decoder_buffers_desc - decoder buffer(s) management information.
+ *
+ * @desc: Array of buffer descriptors as they are passed to mpq_streambuffer
+ * upon its initialization. These descriptors must remain valid as long as
+ * the mpq_streambuffer object is used.
+ * @ion_handle: Array of ION handles, one for each decoder buffer, used for
+ * kernel memory mapping or allocation. Handles are saved in order to release
+ * resources properly later on.
+ * @decoder_buffers_num: number of buffers that are managed, either externally
+ * or internally by the mpq_streambuffer object
+ */
+struct mpq_decoder_buffers_desc {
+ struct mpq_streambuffer_buffer_desc desc[DMX_MAX_DECODER_BUFFER_NUM];
+ struct ion_handle *ion_handle[DMX_MAX_DECODER_BUFFER_NUM];
+ u32 decoder_buffers_num;
+};
+
/*
* mpq_video_feed_info - private data used for video feed.
*
@@ -286,8 +304,7 @@
* decoder's fullness.
* @pes_payload_address: Used for feeds that output data to decoder,
* holds current PES payload start address.
- * @payload_buff_handle: ION handle for the allocated payload buffer
- * @stream_interface: The ID of the video stream interface registered
+ * @stream_interface: The ID of the video stream interface registered
* with this stream buffer.
* @patterns: pointer to the framing patterns to look for.
* @patterns_num: number of framing patterns.
@@ -320,13 +337,12 @@
struct mpq_video_feed_info {
void *plugin_data;
struct mpq_streambuffer *video_buffer;
- struct mpq_streambuffer_buffer_desc buffer_desc;
+ struct mpq_decoder_buffers_desc buffer_desc;
struct pes_packet_header pes_header;
u32 pes_header_left_bytes;
u32 pes_header_offset;
u32 pes_payload_address;
int fullness_wait_cancel;
- struct ion_handle *payload_buff_handle;
enum mpq_adapter_stream_if stream_interface;
const struct mpq_framing_pattern_lookup_params *patterns;
int patterns_num;
diff --git a/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tsif.c b/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tsif.c
index bbf9d0a..c5c3518 100644
--- a/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tsif.c
+++ b/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tsif.c
@@ -572,24 +572,53 @@
caps->max_bitrate = 144;
caps->demod_input_max_bitrate = 72;
caps->memory_input_max_bitrate = 72;
- caps->section.flags = 0;
+
+ /* Buffer requirements */
+ caps->section.flags =
+ DMX_BUFFER_EXTERNAL_SUPPORT |
+ DMX_BUFFER_INTERNAL_SUPPORT;
+ caps->section.max_buffer_num = 1;
caps->section.max_size = 0xFFFFFFFF;
caps->section.size_alignment = 0;
- caps->pes.flags = 0;
+ caps->pes.flags =
+ DMX_BUFFER_EXTERNAL_SUPPORT |
+ DMX_BUFFER_INTERNAL_SUPPORT;
+ caps->pes.max_buffer_num = 1;
caps->pes.max_size = 0xFFFFFFFF;
caps->pes.size_alignment = 0;
- caps->recording_188_tsp.flags = 0;
+ caps->recording_188_tsp.flags =
+ DMX_BUFFER_EXTERNAL_SUPPORT |
+ DMX_BUFFER_INTERNAL_SUPPORT;
+ caps->recording_188_tsp.max_buffer_num = 1;
caps->recording_188_tsp.max_size = 0xFFFFFFFF;
caps->recording_188_tsp.size_alignment = 0;
- caps->recording_192_tsp.flags = 0;
+ caps->recording_192_tsp.flags =
+ DMX_BUFFER_EXTERNAL_SUPPORT |
+ DMX_BUFFER_INTERNAL_SUPPORT;
+ caps->recording_192_tsp.max_buffer_num = 1;
caps->recording_192_tsp.max_size = 0xFFFFFFFF;
caps->recording_192_tsp.size_alignment = 0;
- caps->playback_188_tsp.flags = 0;
+ caps->playback_188_tsp.flags =
+ DMX_BUFFER_EXTERNAL_SUPPORT |
+ DMX_BUFFER_INTERNAL_SUPPORT;
+ caps->playback_188_tsp.max_buffer_num = 1;
caps->playback_188_tsp.max_size = 0xFFFFFFFF;
caps->playback_188_tsp.size_alignment = 0;
- caps->playback_192_tsp.flags = 0;
+ caps->playback_192_tsp.flags =
+ DMX_BUFFER_EXTERNAL_SUPPORT |
+ DMX_BUFFER_INTERNAL_SUPPORT;
+ caps->playback_192_tsp.max_buffer_num = 1;
caps->playback_192_tsp.max_size = 0xFFFFFFFF;
caps->playback_192_tsp.size_alignment = 0;
+ caps->decoder.flags =
+ DMX_BUFFER_CONTIGUOUS_MEM |
+ DMX_BUFFER_SECURED_IF_DECRYPTED |
+ DMX_BUFFER_EXTERNAL_SUPPORT |
+ DMX_BUFFER_INTERNAL_SUPPORT |
+ DMX_BUFFER_LINEAR_GROUP_SUPPORT;
+ caps->decoder.max_buffer_num = DMX_MAX_DECODER_BUFFER_NUM;
+ caps->decoder.max_size = 0xFFFFFFFF;
+ caps->decoder.size_alignment = SZ_4K;
return 0;
}
diff --git a/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tspp_v1.c b/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tspp_v1.c
index ac03e43..61c1761 100644
--- a/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tspp_v1.c
+++ b/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tspp_v1.c
@@ -868,24 +868,53 @@
caps->max_bitrate = 144;
caps->demod_input_max_bitrate = 72;
caps->memory_input_max_bitrate = 72;
- caps->section.flags = 0;
+
+ /* Buffer requirements */
+ caps->section.flags =
+ DMX_BUFFER_EXTERNAL_SUPPORT |
+ DMX_BUFFER_INTERNAL_SUPPORT;
+ caps->section.max_buffer_num = 1;
caps->section.max_size = 0xFFFFFFFF;
caps->section.size_alignment = 0;
- caps->pes.flags = 0;
+ caps->pes.flags =
+ DMX_BUFFER_EXTERNAL_SUPPORT |
+ DMX_BUFFER_INTERNAL_SUPPORT;
+ caps->pes.max_buffer_num = 1;
caps->pes.max_size = 0xFFFFFFFF;
caps->pes.size_alignment = 0;
- caps->recording_188_tsp.flags = 0;
+ caps->recording_188_tsp.flags =
+ DMX_BUFFER_EXTERNAL_SUPPORT |
+ DMX_BUFFER_INTERNAL_SUPPORT;
+ caps->recording_188_tsp.max_buffer_num = 1;
caps->recording_188_tsp.max_size = 0xFFFFFFFF;
caps->recording_188_tsp.size_alignment = 0;
- caps->recording_192_tsp.flags = 0;
+ caps->recording_192_tsp.flags =
+ DMX_BUFFER_EXTERNAL_SUPPORT |
+ DMX_BUFFER_INTERNAL_SUPPORT;
+ caps->recording_192_tsp.max_buffer_num = 1;
caps->recording_192_tsp.max_size = 0xFFFFFFFF;
caps->recording_192_tsp.size_alignment = 0;
- caps->playback_188_tsp.flags = 0;
+ caps->playback_188_tsp.flags =
+ DMX_BUFFER_EXTERNAL_SUPPORT |
+ DMX_BUFFER_INTERNAL_SUPPORT;
+ caps->playback_188_tsp.max_buffer_num = 1;
caps->playback_188_tsp.max_size = 0xFFFFFFFF;
caps->playback_188_tsp.size_alignment = 0;
- caps->playback_192_tsp.flags = 0;
+ caps->playback_192_tsp.flags =
+ DMX_BUFFER_EXTERNAL_SUPPORT |
+ DMX_BUFFER_INTERNAL_SUPPORT;
+ caps->playback_192_tsp.max_buffer_num = 1;
caps->playback_192_tsp.max_size = 0xFFFFFFFF;
caps->playback_192_tsp.size_alignment = 0;
+ caps->decoder.flags =
+ DMX_BUFFER_CONTIGUOUS_MEM |
+ DMX_BUFFER_SECURED_IF_DECRYPTED |
+ DMX_BUFFER_EXTERNAL_SUPPORT |
+ DMX_BUFFER_INTERNAL_SUPPORT |
+ DMX_BUFFER_LINEAR_GROUP_SUPPORT;
+ caps->decoder.max_buffer_num = DMX_MAX_DECODER_BUFFER_NUM;
+ caps->decoder.max_size = 0xFFFFFFFF;
+ caps->decoder.size_alignment = SZ_4K;
return 0;
}
diff --git a/drivers/media/dvb/mpq/include/mpq_stream_buffer.h b/drivers/media/dvb/mpq/include/mpq_stream_buffer.h
index 9476c73..e5ba635 100644
--- a/drivers/media/dvb/mpq/include/mpq_stream_buffer.h
+++ b/drivers/media/dvb/mpq/include/mpq_stream_buffer.h
@@ -413,7 +413,21 @@
mpq_streambuffer_pkt_dispose_cb cb_func,
void *user_data);
-
+/**
+ * mpq_streambuffer_data_rw_offset - returns read/write offsets of current data
+ * buffer.
+ * @sbuff: The stream buffer object
+ * @read_offset: returned read offset
+ * @write_offset: returned write offset
+ *
+ * Note: read offset or write offset may be NULL if not required.
+ * Returns error status
+ * -EINVAL if arguments are invalid
+ */
+int mpq_streambuffer_get_data_rw_offset(
+ struct mpq_streambuffer *sbuff,
+ u32 *read_offset,
+ u32 *write_offset);
#endif /* _MPQ_STREAM_BUFFER_H */
diff --git a/drivers/media/video/msm_vidc/vidc_hal.c b/drivers/media/video/msm_vidc/vidc_hal.c
index 10ab0dd..8283fac 100644
--- a/drivers/media/video/msm_vidc/vidc_hal.c
+++ b/drivers/media/video/msm_vidc/vidc_hal.c
@@ -712,6 +712,7 @@
pkt->rg_property_data[0] = HFI_PROPERTY_SYS_DEBUG_CONFIG;
hfi = (struct hfi_debug_config *) &pkt->rg_property_data[1];
hfi->debug_config = debug;
+ hfi->debug_mode = HFI_DEBUG_MODE_QUEUE;
if (vidc_hal_iface_cmdq_write(device, pkt))
return -ENOTEMPTY;
return 0;
diff --git a/drivers/misc/isa1200.c b/drivers/misc/isa1200.c
index 604ffd7..df4a241 100644
--- a/drivers/misc/isa1200.c
+++ b/drivers/misc/isa1200.c
@@ -23,6 +23,7 @@
#include <linux/clk.h>
#include <linux/i2c/isa1200.h>
#include "../staging/android/timed_output.h"
+#include <linux/of_gpio.h>
#define ISA1200_HCTRL0 0x30
#define ISA1200_HCTRL1 0x31
@@ -121,7 +122,7 @@
/* vote for clock */
if (haptic->pdata->need_pwm_clk && !haptic->clk_on) {
- rc = clk_enable(haptic->pwm_clk);
+ rc = clk_prepare_enable(haptic->pwm_clk);
if (rc < 0) {
pr_err("%s: clk enable failed\n",
__func__);
@@ -406,10 +407,10 @@
for (i = 0; i < num_reg; i++) {
haptic->regs[i] = regulator_get(&haptic->client->dev,
- reg_info[i].name);
+ reg_info[i].name);
if (IS_ERR(haptic->regs[i])) {
rc = PTR_ERR(haptic->regs[i]);
- pr_err("%s:regulator get failed(%d)\n", __func__, rc);
+ pr_err("%s:regulator get failed(%d)\n", __func__, rc);
goto put_regs;
}
@@ -438,6 +439,138 @@
return rc;
}
+#ifdef CONFIG_OF
+static int isa1200_parse_dt(struct device *dev,
+ struct isa1200_platform_data *pdata)
+{
+ struct device_node *temp, *np = dev->of_node;
+ struct isa1200_regulator *reg_info;
+ enum of_gpio_flags hap_en_flags = OF_GPIO_ACTIVE_LOW;
+ enum of_gpio_flags hap_len_flags = OF_GPIO_ACTIVE_LOW;
+ int rc = 0;
+ u32 temp_val;
+ const char *temp_string;
+
+ rc = of_property_read_string(np, "label", &pdata->name);
+ if (rc) {
+ dev_err(dev, "Unable to read device name\n");
+ return rc;
+ }
+
+ pdata->chip_en = of_property_read_bool(np, "imagis,chip-en");
+ pdata->ext_clk_en = of_property_read_bool(np, "imagis,ext-clk-en");
+ pdata->is_erm = of_property_read_bool(np, "imagis,is-erm");
+ pdata->overdrive_high =
+ of_property_read_bool(np, "imagis,overdrive-high");
+ pdata->overdrive_en = of_property_read_bool(np, "imagis,overdrive-en");
+ pdata->smart_en = of_property_read_bool(np, "imagis,smart-en");
+ pdata->need_pwm_clk = of_property_read_bool(np, "imagis,need-pwm-clk");
+
+ pdata->hap_en_gpio = of_get_named_gpio_flags(np,
+ "imagis,hap-en-gpio", 0, &hap_en_flags);
+ pdata->hap_len_gpio = of_get_named_gpio_flags(np,
+ "imagis,hap-len-gpio", 0, &hap_len_flags);
+
+ rc = of_property_read_u32(np, "imagis,max-timeout",
+ &pdata->max_timeout);
+ if (rc) {
+ dev_err(dev, "Unable to read max timeout\n");
+ return rc;
+ }
+
+ rc = of_property_read_u32(np, "imagis,pwm-div", &pdata->pwm_fd.pwm_div);
+ if (rc && (rc != -EINVAL)) {
+ dev_err(dev, "Unable to read pwm division\n");
+ return rc;
+ }
+
+ rc = of_property_read_u32(np, "imagis,pwm-freq",
+ &pdata->pwm_fd.pwm_freq);
+ if (rc && (rc != -EINVAL)) {
+ dev_err(dev, "Unable to read pwm frequency\n");
+ return rc;
+ }
+
+ rc = of_property_read_u32(np, "imagis,pwm-ch-id", &pdata->pwm_ch_id);
+ if (rc && (rc != -EINVAL)) {
+ dev_err(dev, "Unable to read pwm channel id\n");
+ return rc;
+ }
+
+ rc = of_property_read_u32(np, "imagis,mode-ctrl", &pdata->mode_ctrl);
+ if (rc) {
+ dev_err(dev, "Unable to read control mode\n");
+ return rc;
+ }
+
+ rc = of_property_read_u32(np, "imagis,duty", &pdata->duty);
+ if (rc && (rc != -EINVAL)) {
+ dev_err(dev, "Unable to read duty cycle\n");
+ return rc;
+ }
+
+ pdata->num_regulators = 0;
+ temp = NULL;
+ while ((temp = of_get_next_child(np, temp)))
+ pdata->num_regulators++;
+
+ if (!pdata->num_regulators)
+ return 0;
+
+ reg_info = devm_kzalloc(dev, pdata->num_regulators *
+ sizeof(struct isa1200_regulator), GFP_KERNEL);
+ if (!reg_info)
+ return -ENOMEM;
+
+ pdata->regulator_info = reg_info;
+
+ for_each_child_of_node(np, temp) {
+ rc = of_property_read_string(temp,
+ "regulator-name", &temp_string);
+ if (rc) {
+ dev_err(dev, "Unable to read regulator name\n");
+ return rc;
+ } else
+ reg_info->name = temp_string;
+
+ rc = of_property_read_u32(temp, "regulator-max-microvolt",
+ &temp_val);
+ if (rc) {
+ dev_err(dev, "Unable to read max uV\n");
+ return rc;
+ } else
+ reg_info->max_uV = temp_val;
+
+ rc = of_property_read_u32(temp, "regulator-min-microvolt",
+ &temp_val);
+ if (rc) {
+ dev_err(dev, "Unable to read min uV\n");
+ return rc;
+ } else
+ reg_info->min_uV = temp_val;
+
+ rc = of_property_read_u32(temp, "regulator-max-microamp",
+ &temp_val);
+ if (rc) {
+ dev_err(dev, "Unable to read load uA\n");
+ return rc;
+ } else
+ reg_info->load_uA = temp_val;
+
+ reg_info++;
+ }
+
+ return 0;
+}
+#else
+static int isa1200_parse_dt(struct device *dev,
+ struct isa1200_platform_data *pdata)
+{
+ return -ENODEV;
+}
+#endif
+
+
static int __devinit isa1200_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -452,7 +585,22 @@
return -EIO;
}
- pdata = client->dev.platform_data;
+ if (client->dev.of_node) {
+ pdata = devm_kzalloc(&client->dev,
+ sizeof(struct isa1200_platform_data), GFP_KERNEL);
+ if (!pdata) {
+ dev_err(&client->dev, "Failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ ret = isa1200_parse_dt(&client->dev, pdata);
+ if (ret) {
+ dev_err(&client->dev, "Parsing DT failed(%d)", ret);
+ return ret;
+ }
+ } else
+ pdata = client->dev.platform_data;
+
if (!pdata) {
dev_err(&client->dev, "%s: no platform data\n", __func__);
return -EINVAL;
@@ -714,10 +862,19 @@
{ },
};
MODULE_DEVICE_TABLE(i2c, isa1200_id);
+#ifdef CONFIG_OF
+static struct of_device_id isa1200_match_table[] = {
+ { .compatible = "imagis,isa1200",},
+ { },
+};
+#else
+#define isa1200_match_table NULL
+#endif
static struct i2c_driver isa1200_driver = {
.driver = {
.name = "isa1200",
+ .of_match_table = isa1200_match_table,
},
.probe = isa1200_probe,
.remove = __devexit_p(isa1200_remove),
diff --git a/drivers/mmc/card/Kconfig b/drivers/mmc/card/Kconfig
index a1bea00..33f0600 100644
--- a/drivers/mmc/card/Kconfig
+++ b/drivers/mmc/card/Kconfig
@@ -80,7 +80,6 @@
config MMC_BLOCK_TEST
tristate "MMC block test"
depends on MMC_BLOCK && IOSCHED_TEST
- default y
help
MMC block test can be used with test iosched to test the MMC block
device.
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 0b5449e..b748228 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -1557,6 +1557,39 @@
}
EXPORT_SYMBOL(mmc_blk_init_packed_statistics);
+/**
+ * mmc_blk_init_async_event_statistics() - Init async event
+ * statistics data
+ * @card: The mmc_card in which the async_event_stats
+ * struct is a member
+ *
+ * Initiate counters for the new request feature, and mark the
+ * statistics as enabled.
+ */
+void mmc_blk_init_async_event_statistics(struct mmc_card *card)
+{
+ if (!card)
+ return;
+
+ /* init async events tests stats */
+ memset(&card->async_event_stats,
+ sizeof(struct mmc_async_event_stats), 0);
+ card->async_event_stats.null_fetched = 0;
+ card->async_event_stats.wakeup_new = 0;
+ card->async_event_stats.new_request_flag = 0;
+ card->async_event_stats.q_no_waiting = 0;
+ card->async_event_stats.enabled = true;
+ card->async_event_stats.no_mmc_request_action = 0;
+ card->async_event_stats.wakeup_mq_thread = 0;
+ card->async_event_stats.fetch_due_to_new_req = 0;
+ card->async_event_stats.returned_new_req = 0;
+ card->async_event_stats.done_flag = 0;
+ card->async_event_stats.cmd_retry = 0;
+ card->async_event_stats.done_when_new_req_event_on = 0;
+ card->async_event_stats.new_req_when_new_marked = 0;
+}
+EXPORT_SYMBOL(mmc_blk_init_async_event_statistics);
+
static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
{
struct request_queue *q = mq->queue;
@@ -1569,7 +1602,12 @@
u8 put_back = 0;
u8 max_packed_rw = 0;
u8 reqs = 0;
- struct mmc_wr_pack_stats *stats = &card->wr_pack_stats;
+ struct mmc_wr_pack_stats *stats;
+
+ if (!card)
+ goto no_packed;
+
+ stats = &card->wr_pack_stats;
mmc_blk_clear_packed(mq->mqrq_cur);
@@ -1896,6 +1934,7 @@
struct mmc_async_req *areq;
const u8 packed_num = 2;
u8 reqs = 0;
+ struct mmc_async_event_stats *stats = &card->async_event_stats;
if (!rqc && !mq->mqrq_prev->req)
return 0;
@@ -1917,8 +1956,15 @@
} else
areq = NULL;
areq = mmc_start_req(card->host, areq, (int *) &status);
- if (!areq)
+ if (!areq) {
+ if (status == MMC_BLK_NEW_REQUEST && stats) {
+ if (stats->enabled)
+ stats->returned_new_req++;
+
+ mq->flags |= MMC_QUEUE_NEW_REQUEST;
+ }
return 0;
+ }
mq_rq = container_of(areq, struct mmc_queue_req, mmc_active);
brq = &mq_rq->brq;
@@ -1927,6 +1973,8 @@
mmc_queue_bounce_post(mq_rq);
switch (status) {
+ case MMC_BLK_NEW_REQUEST:
+ BUG(); /* should never get here */
case MMC_BLK_SUCCESS:
case MMC_BLK_PARTIAL:
/*
@@ -2077,6 +2125,7 @@
mmc_blk_write_packing_control(mq, req);
+ mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
if (req && req->cmd_flags & REQ_SANITIZE) {
/* complete ongoing async transfer before issuing sanitize */
if (card->host && card->host->areq)
@@ -2101,7 +2150,7 @@
}
out:
- if (!req)
+ if (!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST))
/* release host only when there are no more requests */
mmc_release_host(card->host);
return ret;
diff --git a/drivers/mmc/card/mmc_block_test.c b/drivers/mmc/card/mmc_block_test.c
index 610a822..08c75a0 100644
--- a/drivers/mmc/card/mmc_block_test.c
+++ b/drivers/mmc/card/mmc_block_test.c
@@ -65,9 +65,10 @@
#define test_pr_err(fmt, args...) pr_err("%s: "fmt"\n", MODULE_NAME, args)
#define SANITIZE_TEST_TIMEOUT 240000
+#define NEW_REQ_TEST_SLEEP_TIME 1
+#define NEW_REQ_TEST_NUM_BIOS 64
#define TEST_REQUEST_NUM_OF_BIOS 3
-
#define CHECK_BKOPS_STATS(stats, exp_bkops, exp_hpi, exp_suspend) \
((stats.bkops != exp_bkops) || \
(stats.hpi != exp_hpi) || \
@@ -153,6 +154,8 @@
TEST_LONG_SEQUENTIAL_READ,
TEST_LONG_SEQUENTIAL_WRITE,
+
+ TEST_NEW_REQ_NOTIFICATION,
};
enum mmc_block_test_group {
@@ -163,6 +166,7 @@
TEST_SEND_INVALID_GROUP,
TEST_PACKING_CONTROL_GROUP,
TEST_BKOPS_GROUP,
+ TEST_NEW_NOTIFICATION_GROUP,
};
enum bkops_test_stages {
@@ -182,6 +186,7 @@
struct dentry *bkops_test;
struct dentry *long_sequential_read_test;
struct dentry *long_sequential_write_test;
+ struct dentry *new_req_notification_test;
};
struct mmc_block_test_data {
@@ -217,6 +222,8 @@
enum bkops_test_stages bkops_stage;
/* A wait queue for BKOPs tests */
wait_queue_head_t bkops_wait_q;
+
+ unsigned int completed_req_count;
};
static struct mmc_block_test_data *mbtd;
@@ -278,6 +285,50 @@
spin_unlock(&card->wr_pack_stats.lock);
}
+/**
+ * mmc_print_async_event_stats() - Print async event statistics
+ * @card: The mmc_card in which the async_event_stats
+ * struct is a member
+ */
+void mmc_print_async_event_stats(struct mmc_card *card)
+{
+ struct mmc_async_event_stats *s;
+
+ if (!card)
+ return;
+
+ s = &card->async_event_stats;
+ if (!s)
+ return;
+
+ pr_info("%s: new notification & req statistics:\n",
+ mmc_hostname(card->host));
+ pr_info("%s: done_flag:%d", mmc_hostname(card->host),
+ s->done_flag);
+ pr_info("%s: cmd_retry:%d", mmc_hostname(card->host),
+ s->cmd_retry);
+ pr_info("%s: NULL fetched:%d", mmc_hostname(card->host),
+ s->null_fetched);
+ pr_info("%s: wake up new:%d", mmc_hostname(card->host),
+ s->wakeup_new);
+ pr_info("%s: new_request_flag:%d", mmc_hostname(card->host),
+ s->new_request_flag);
+ pr_info("%s: no waiting:%d\n", mmc_hostname(card->host),
+ s->q_no_waiting);
+ pr_info("%s: no_mmc_request_action:%d", mmc_hostname(card->host),
+ s->no_mmc_request_action);
+ pr_info("%s: wakeup_mq_thread:%d", mmc_hostname(card->host),
+ s->wakeup_mq_thread);
+ pr_info("%s: fetch_due_to_new_req:%d", mmc_hostname(card->host),
+ s->fetch_due_to_new_req);
+ pr_info("%s: returned_new_req:%d", mmc_hostname(card->host),
+ s->returned_new_req);
+ pr_info("%s: done_when_new_req_event_on:%d", mmc_hostname(card->host),
+ s->done_when_new_req_event_on);
+ pr_info("%s: new_req_when_new_marked:%d", mmc_hostname(card->host),
+ s->new_req_when_new_marked);
+}
+
/*
* A callback assigned to the packed_test_fn field.
* Called from block layer in mmc_blk_packed_hdr_wrq_prep.
@@ -651,6 +702,8 @@
return "\"long sequential read\"";
case TEST_LONG_SEQUENTIAL_WRITE:
return "\"long sequential write\"";
+ case TEST_NEW_REQ_NOTIFICATION:
+ return "\"new request notification test\"";
default:
return " Unknown testcase";
}
@@ -1877,8 +1930,6 @@
break;
}
- td->next_req = list_entry(td->test_queue.prev,
- struct test_request, queuelist);
__blk_run_queue(q);
wait_event(mbtd->bkops_wait_q,
mbtd->bkops_stage == BKOPS_STAGE_4);
@@ -1908,8 +1959,6 @@
break;
}
- td->next_req = list_entry(td->test_queue.prev,
- struct test_request, queuelist);
__blk_run_queue(q);
wait_event(mbtd->bkops_wait_q,
mbtd->bkops_stage == BKOPS_STAGE_4);
@@ -1939,8 +1988,6 @@
break;
}
- td->next_req = list_entry(td->test_queue.prev,
- struct test_request, queuelist);
__blk_run_queue(q);
wait_event(mbtd->bkops_wait_q,
mbtd->bkops_stage == BKOPS_STAGE_2);
@@ -1958,8 +2005,6 @@
break;
}
- td->next_req = list_entry(td->test_queue.prev,
- struct test_request, queuelist);
__blk_run_queue(q);
wait_event(mbtd->bkops_wait_q,
@@ -1999,8 +2044,6 @@
break;
}
- td->next_req = list_entry(td->test_queue.next,
- struct test_request, queuelist);
__blk_run_queue(q);
wait_event(mbtd->bkops_wait_q,
mbtd->bkops_stage == BKOPS_STAGE_2);
@@ -2018,8 +2061,6 @@
break;
}
- td->next_req = list_entry(td->test_queue.prev,
- struct test_request, queuelist);
__blk_run_queue(q);
wait_event(mbtd->bkops_wait_q,
@@ -2037,6 +2078,170 @@
return ret;
}
+/*
+ * new_req_post_test() - Do post test operations for
+ * new_req_notification test: disable the statistics and clear
+ * the feature flags.
+ * @td The test_data for the new_req test that has
+ * ended.
+ */
+static int new_req_post_test(struct test_data *td)
+{
+ struct mmc_queue *mq;
+
+ if (!td || !td->req_q)
+ goto exit;
+
+ mq = (struct mmc_queue *)td->req_q->queuedata;
+
+ if (!mq || !mq->card)
+ goto exit;
+
+ /* disable async_event test stats */
+ mq->card->async_event_stats.enabled = false;
+ mmc_print_async_event_stats(mq->card);
+ test_pr_info("Completed %d requests",
+ mbtd->completed_req_count);
+
+exit:
+ return 0;
+}
+
+/*
+ * check_new_req_result() - Print out the number of completed
+ * requests. Assigned to the check_test_result_fn pointer,
+ * therefore the name.
+ * @td The test_data for the new_req test that has
+ * ended.
+ */
+static int check_new_req_result(struct test_data *td)
+{
+ test_pr_info("%s: Test results: Completed %d requests",
+ __func__, mbtd->completed_req_count);
+ return 0;
+}
+
+/*
+ * new_req_free_end_io_fn() - Remove request from queuelist and
+ * free request's allocated memory. Used as a call-back
+ * assigned to end_io member in request struct.
+ * @rq The request to be freed
+ * @err Unused
+ */
+static void new_req_free_end_io_fn(struct request *rq, int err)
+{
+ struct test_request *test_rq =
+ (struct test_request *)rq->elv.priv[0];
+ struct test_data *ptd = test_get_test_data();
+
+ BUG_ON(!test_rq);
+
+ spin_lock_irq(&ptd->lock);
+ list_del_init(&test_rq->queuelist);
+ ptd->dispatched_count--;
+ spin_unlock_irq(&ptd->lock);
+
+ __blk_put_request(ptd->req_q, test_rq->rq);
+ kfree(test_rq->bios_buffer);
+ kfree(test_rq);
+ mbtd->completed_req_count++;
+}
+
+static int prepare_new_req(struct test_data *td)
+{
+ struct request_queue *q = td->req_q;
+ struct mmc_queue *mq = (struct mmc_queue *)q->queuedata;
+
+ mmc_blk_init_packed_statistics(mq->card);
+ mmc_blk_init_async_event_statistics(mq->card);
+
+ mbtd->completed_req_count = 0;
+
+ return 0;
+}
+
+static int test_new_req_notification(struct test_data *ptd)
+{
+ int ret = 0;
+ int i;
+ unsigned int requests_count = 2;
+ unsigned int bio_num;
+ struct test_request *test_rq = NULL;
+
+ while (1) {
+ for (i = 0; i < requests_count; i++) {
+ bio_num = TEST_MAX_BIOS_PER_REQ;
+ test_rq = test_iosched_create_test_req(0, READ,
+ ptd->start_sector,
+ bio_num, TEST_PATTERN_5A,
+ new_req_free_end_io_fn);
+ if (test_rq) {
+ spin_lock_irq(ptd->req_q->queue_lock);
+ list_add_tail(&test_rq->queuelist,
+ &ptd->test_queue);
+ ptd->test_count++;
+ spin_unlock_irq(ptd->req_q->queue_lock);
+ } else {
+ test_pr_err("%s: failed to create read request",
+ __func__);
+ ret = -ENODEV;
+ break;
+ }
+ }
+
+ __blk_run_queue(ptd->req_q);
+ /* wait while a mmc layer will send all requests in test_queue*/
+ while (!list_empty(&ptd->test_queue))
+ msleep(NEW_REQ_TEST_SLEEP_TIME);
+
+ /* test finish criteria */
+ if (mbtd->completed_req_count > 1000) {
+ if (ptd->dispatched_count)
+ continue;
+ else
+ break;
+ }
+
+ for (i = 0; i < requests_count; i++) {
+ bio_num = NEW_REQ_TEST_NUM_BIOS;
+ test_rq = test_iosched_create_test_req(0, READ,
+ ptd->start_sector,
+ bio_num, TEST_PATTERN_5A,
+ new_req_free_end_io_fn);
+ if (test_rq) {
+ spin_lock_irq(ptd->req_q->queue_lock);
+ list_add_tail(&test_rq->queuelist,
+ &ptd->test_queue);
+ ptd->test_count++;
+ spin_unlock_irq(ptd->req_q->queue_lock);
+ } else {
+ test_pr_err("%s: failed to create read request",
+ __func__);
+ ret = -ENODEV;
+ break;
+ }
+ }
+ __blk_run_queue(ptd->req_q);
+ }
+
+ test_iosched_mark_test_completion();
+ test_pr_info("%s: EXIT: %d code", __func__, ret);
+
+ return ret;
+}
+
+static int run_new_req(struct test_data *td)
+{
+ int ret = 0;
+ struct request_queue *q = td->req_q;
+ struct mmc_queue *mq = (struct mmc_queue *)q->queuedata;
+
+ mmc_blk_init_async_event_statistics(mq->card);
+ ret = test_new_req_notification(td);
+
+ return ret;
+}
+
static bool message_repeat;
static int test_open(struct inode *inode, struct file *file)
{
@@ -2757,6 +2962,73 @@
.read = long_sequential_write_test_read,
};
+static ssize_t new_req_notification_test_write(struct file *file,
+ const char __user *buf,
+ size_t count,
+ loff_t *ppos)
+{
+ int ret = 0;
+ int i = 0;
+ int number = -1;
+
+ test_pr_info("%s: -- new_req_notification TEST --", __func__);
+
+ sscanf(buf, "%d", &number);
+
+ if (number <= 0)
+ number = 1;
+
+ mbtd->test_group = TEST_NEW_NOTIFICATION_GROUP;
+
+ memset(&mbtd->test_info, 0, sizeof(struct test_info));
+
+ mbtd->test_info.data = mbtd;
+ mbtd->test_info.prepare_test_fn = prepare_new_req;
+ mbtd->test_info.check_test_result_fn = check_new_req_result;
+ mbtd->test_info.get_test_case_str_fn = get_test_case_str;
+ mbtd->test_info.run_test_fn = run_new_req;
+ mbtd->test_info.timeout_msec = 10 * 60 * 1000; /* 1 min */
+ mbtd->test_info.post_test_fn = new_req_post_test;
+
+ for (i = 0 ; i < number ; ++i) {
+ test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
+ test_pr_info("%s: ===================", __func__);
+ test_pr_info("%s: start test case TEST_NEW_REQ_NOTIFICATION",
+ __func__);
+ mbtd->test_info.testcase = TEST_NEW_REQ_NOTIFICATION;
+ ret = test_iosched_start_test(&mbtd->test_info);
+ if (ret) {
+ test_pr_info("%s: break from new_req tests loop",
+ __func__);
+ break;
+ }
+ }
+ return count;
+}
+
+static ssize_t new_req_notification_test_read(struct file *file,
+ char __user *buffer,
+ size_t count,
+ loff_t *offset)
+{
+ memset((void *)buffer, 0, count);
+
+ snprintf(buffer, count,
+ "\nnew_req_notification_test\n========================\n"
+ "Description:\n"
+ "This test checks following scenarious\n"
+ "- new request arrives after a NULL request was sent to the "
+ "mmc_queue,\n"
+ "which is waiting for completion of a former request\n");
+
+ return strnlen(buffer, count);
+}
+
+const struct file_operations new_req_notification_test_ops = {
+ .open = test_open,
+ .write = new_req_notification_test_write,
+ .read = new_req_notification_test_read,
+};
static void mmc_block_test_debugfs_cleanup(void)
{
@@ -2769,6 +3041,7 @@
debugfs_remove(mbtd->debug.bkops_test);
debugfs_remove(mbtd->debug.long_sequential_read_test);
debugfs_remove(mbtd->debug.long_sequential_write_test);
+ debugfs_remove(mbtd->debug.new_req_notification_test);
}
static int mmc_block_test_debugfs_init(void)
@@ -2848,6 +3121,16 @@
NULL,
&bkops_test_ops);
+ mbtd->debug.new_req_notification_test =
+ debugfs_create_file("new_req_notification_test",
+ S_IRUGO | S_IWUGO,
+ tests_root,
+ NULL,
+ &new_req_notification_test_ops);
+
+ if (!mbtd->debug.new_req_notification_test)
+ goto err_nomem;
+
if (!mbtd->debug.bkops_test)
goto err_nomem;
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 8eb787d..7b80dfb 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -22,7 +22,6 @@
#define MMC_QUEUE_BOUNCESZ 65536
-#define MMC_QUEUE_SUSPENDED (1 << 0)
/*
* Based on benchmark tests the default num of requests to trigger the write
@@ -60,18 +59,33 @@
struct request_queue *q = mq->queue;
struct request *req;
struct mmc_card *card = mq->card;
+ struct mmc_async_event_stats *stats;
+ struct mmc_queue_req *tmp;
+
+ if (!card)
+ return 0;
+
+ stats = &mq->card->async_event_stats;
current->flags |= PF_MEMALLOC;
down(&mq->thread_sem);
do {
- struct mmc_queue_req *tmp;
+
req = NULL; /* Must be set to NULL at each iteration */
spin_lock_irq(q->queue_lock);
set_current_state(TASK_INTERRUPTIBLE);
req = blk_fetch_request(q);
mq->mqrq_cur->req = req;
+ if (!req && mq->mqrq_prev->req &&
+ !(mq->mqrq_prev->req->cmd_flags & REQ_SANITIZE) &&
+ !(mq->mqrq_prev->req->cmd_flags & REQ_FLUSH) &&
+ !(mq->mqrq_prev->req->cmd_flags & REQ_DISCARD)) {
+ card->host->context_info.is_waiting_last_req = true;
+ if (stats && stats->enabled)
+ stats->null_fetched++;
+ }
spin_unlock_irq(q->queue_lock);
if (req || mq->mqrq_prev->req) {
@@ -88,6 +102,12 @@
set_current_state(TASK_RUNNING);
mq->issue_fn(mq, req);
+ if (mq->flags & MMC_QUEUE_NEW_REQUEST) {
+ mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
+ if (stats && stats->enabled)
+ stats->fetch_due_to_new_req++;
+ continue; /* fetch again */
+ }
} else {
if (kthread_should_stop()) {
set_current_state(TASK_RUNNING);
@@ -120,7 +140,10 @@
static void mmc_request(struct request_queue *q)
{
struct mmc_queue *mq = q->queuedata;
+ struct mmc_async_event_stats *stats;
struct request *req;
+ unsigned long flags;
+ struct mmc_context_info *cntx;
if (!mq) {
while ((req = blk_fetch_request(q)) != NULL) {
@@ -129,9 +152,39 @@
}
return;
}
+ if (mq->card) {
+ cntx = &mq->card->host->context_info;
+ stats = &mq->card->async_event_stats;
+ } else
+ return;
- if (!mq->mqrq_cur->req && !mq->mqrq_prev->req)
+ cntx = &mq->card->host->context_info;
+ stats = &mq->card->async_event_stats;
+ if (!mq->mqrq_cur->req && mq->mqrq_prev->req) {
+ /*
+ * New MMC request arrived when MMC thread may be
+ * blocked on the previous request to be complete
+ * with no current request fetched
+ */
+
+ spin_lock_irqsave(&cntx->lock, flags);
+ if (cntx->is_waiting_last_req) {
+ if (stats && stats->enabled)
+ stats->wakeup_new++;
+ if (cntx->is_new_req)
+ if (stats->enabled)
+ stats->new_req_when_new_marked++;
+ cntx->is_new_req = true;
+ wake_up_interruptible(&cntx->wait);
+ } else if (stats->enabled)
+ stats->q_no_waiting++;
+ spin_unlock_irqrestore(&cntx->lock, flags);
+ } else if (!mq->mqrq_cur->req && !mq->mqrq_prev->req) {
wake_up_process(mq->thread);
+ if (stats->enabled)
+ stats->wakeup_mq_thread++;
+ } else if (stats->enabled)
+ stats->no_mmc_request_action++;
}
static struct scatterlist *mmc_alloc_sg(int sg_len, int *err)
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
index a8c104e..0a72372 100644
--- a/drivers/mmc/card/queue.h
+++ b/drivers/mmc/card/queue.h
@@ -12,17 +12,6 @@
struct mmc_data data;
};
-enum mmc_blk_status {
- MMC_BLK_SUCCESS = 0,
- MMC_BLK_PARTIAL,
- MMC_BLK_CMD_ERR,
- MMC_BLK_RETRY,
- MMC_BLK_ABORT,
- MMC_BLK_DATA_ERR,
- MMC_BLK_ECC_ERR,
- MMC_BLK_NOMEDIUM,
-};
-
enum mmc_packed_cmd {
MMC_PACKED_NONE = 0,
MMC_PACKED_WRITE,
@@ -50,6 +39,9 @@
struct task_struct *thread;
struct semaphore thread_sem;
unsigned int flags;
+#define MMC_QUEUE_SUSPENDED (1 << 0)
+#define MMC_QUEUE_NEW_REQUEST (1 << 1)
+
int (*issue_fn)(struct mmc_queue *, struct request *);
void *data;
struct request_queue *queue;
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index b24620b..2f27407 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -328,6 +328,7 @@
#ifdef CONFIG_DEBUG_FS
mmc_add_card_debugfs(card);
#endif
+ mmc_init_context_info(card->host);
ret = device_add(&card->dev);
if (ret)
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 89f834a..1ea580e 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -564,12 +564,44 @@
mmc_start_bkops(card, false);
}
EXPORT_SYMBOL(mmc_start_idle_time_bkops);
+/*
+ * mmc_wait_data_done() - done callback for data request
+ * @mrq: done data request
+ *
+ * Wakes up mmc context, passed as a callback to host controller driver
+ */
+static void mmc_wait_data_done(struct mmc_request *mrq)
+{
+ mrq->host->context_info.is_done_rcv = true;
+ wake_up_interruptible(&mrq->host->context_info.wait);
+}
static void mmc_wait_done(struct mmc_request *mrq)
{
complete(&mrq->completion);
}
+/*
+ *__mmc_start_data_req() - starts data request
+ * @host: MMC host to start the request
+ * @mrq: data request to start
+ *
+ * Sets the done callback to be called when request is completed by the card.
+ * Starts data mmc request execution
+ */
+static int __mmc_start_data_req(struct mmc_host *host, struct mmc_request *mrq)
+{
+ mrq->done = mmc_wait_data_done;
+ mrq->host = host;
+ if (mmc_card_removed(host->card)) {
+ mrq->cmd->error = -ENOMEDIUM;
+ return -ENOMEDIUM;
+ }
+ mmc_start_request(host, mrq);
+
+ return 0;
+}
+
static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
{
init_completion(&mrq->completion);
@@ -583,6 +615,67 @@
return 0;
}
+/*
+ * mmc_wait_for_data_req_done() - wait for request completed
+ * @host: MMC host to prepare the command.
+ * @mrq: MMC request to wait for
+ *
+ * Blocks MMC context till host controller will ack end of data request
+ * execution or new request notification arrives from the block layer.
+ * Handles command retries.
+ *
+ * Returns enum mmc_blk_status after checking errors.
+ */
+static int mmc_wait_for_data_req_done(struct mmc_host *host,
+ struct mmc_request *mrq)
+{
+ struct mmc_command *cmd;
+ struct mmc_context_info *context_info = &host->context_info;
+ int err;
+ unsigned long flags;
+ struct mmc_async_event_stats *stats = &host->card->async_event_stats;
+
+ while (1) {
+ wait_io_event_interruptible(context_info->wait,
+ (context_info->is_done_rcv ||
+ context_info->is_new_req));
+ spin_lock_irqsave(&context_info->lock, flags);
+ context_info->is_waiting_last_req = false;
+ spin_unlock_irqrestore(&context_info->lock, flags);
+ if (context_info->is_done_rcv) {
+ context_info->is_done_rcv = false;
+ context_info->is_new_req = false;
+ cmd = mrq->cmd;
+ if (stats->enabled) {
+ stats->done_flag++;
+ if (context_info->is_new_req)
+ stats->done_when_new_req_event_on++;
+ }
+ if (!cmd->error || !cmd->retries ||
+ mmc_card_removed(host->card)) {
+ err = host->areq->err_check(host->card,
+ host->areq);
+ break; /* return err */
+ } else {
+ pr_info("%s: req failed (CMD%u):%d, retrying\n",
+ mmc_hostname(host),
+ cmd->opcode, cmd->error);
+ cmd->retries--;
+ cmd->error = 0;
+ host->ops->request(host, mrq);
+ continue; /* wait for done/new event again */
+ }
+ } else if (context_info->is_new_req) {
+ context_info->is_new_req = false;
+ if (stats->enabled)
+ stats->new_request_flag++;
+ err = MMC_BLK_NEW_REQUEST;
+ break; /* return err */
+ }
+ } /* while */
+ return err;
+}
+
static void mmc_wait_for_req_done(struct mmc_host *host,
struct mmc_request *mrq)
{
@@ -672,8 +765,21 @@
mmc_pre_req(host, areq->mrq, !host->areq);
if (host->areq) {
- mmc_wait_for_req_done(host, host->areq->mrq);
- err = host->areq->err_check(host->card, host->areq);
+ err = mmc_wait_for_data_req_done(host, host->areq->mrq);
+ if (err == MMC_BLK_NEW_REQUEST) {
+ if (areq) {
+ pr_err("%s: new request while areq = %p",
+ mmc_hostname(host), areq);
+ BUG_ON(1);
+ }
+ if (error)
+ *error = err;
+ /*
+ * The previous request was not completed,
+ * nothing to return
+ */
+ return NULL;
+ }
/*
* Check BKOPS urgency for each R1 response
*/
@@ -688,7 +794,7 @@
}
if (!err && areq)
- start_err = __mmc_start_req(host, areq->mrq);
+ start_err = __mmc_start_data_req(host, areq->mrq);
if (host->areq)
mmc_post_req(host, host->areq->mrq, 0);
@@ -3199,6 +3305,15 @@
EXPORT_SYMBOL(mmc_set_embedded_sdio_data);
#endif
+void mmc_init_context_info(struct mmc_host *host)
+{
+ spin_lock_init(&host->context_info.lock);
+ host->context_info.is_new_req = false;
+ host->context_info.is_done_rcv = false;
+ host->context_info.is_waiting_last_req = false;
+ init_waitqueue_head(&host->context_info.wait);
+}
+
static int __init mmc_init(void)
{
int ret;
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index c85f5aa..6fa51e0 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -87,5 +87,5 @@
extern void mmc_exit_clk_scaling(struct mmc_host *host);
extern void mmc_reset_clk_scale_stats(struct mmc_host *host);
extern unsigned long mmc_get_max_frequency(struct mmc_host *host);
+void mmc_init_context_info(struct mmc_host *host);
#endif
-
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
index 84a26a1..9642a06 100644
--- a/drivers/mmc/core/debugfs.c
+++ b/drivers/mmc/core/debugfs.c
@@ -528,6 +528,141 @@
.write = mmc_wr_pack_stats_write,
};
+static int mmc_new_req_stats_open(struct inode *inode, struct file *filp)
+{
+ struct mmc_card *card = inode->i_private;
+
+ filp->private_data = card;
+ card->async_event_stats.print_in_read = 1;
+ return 0;
+}
+
+static ssize_t mmc_new_req_stats_read(struct file *filp, char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ struct mmc_card *card = filp->private_data;
+ struct mmc_async_event_stats *s;
+ char *temp_buf;
+
+ if (!card)
+ return cnt;
+
+ s = &card->async_event_stats;
+
+ if (!card->async_event_stats.enabled) {
+ pr_info("%s: New Request statistics are disabled\n",
+ mmc_hostname(card->host));
+ goto exit;
+ }
+
+ temp_buf = kmalloc(2 * TEMP_BUF_SIZE, GFP_KERNEL);
+ if (!temp_buf)
+ goto exit;
+
+ memset(ubuf, 0, cnt);
+ memset(temp_buf, 0, 2 * TEMP_BUF_SIZE);
+
+ snprintf(temp_buf, TEMP_BUF_SIZE,
+ "%s: new notification & req statistics:\n",
+ mmc_hostname(card->host));
+ strlcat(ubuf, temp_buf, cnt);
+
+ snprintf(temp_buf, TEMP_BUF_SIZE,
+ "%s: done_flag:%d\n", mmc_hostname(card->host), s->done_flag);
+ strlcat(ubuf, temp_buf, cnt);
+
+ snprintf(temp_buf, TEMP_BUF_SIZE,
+ "%s: cmd_retry:%d\n", mmc_hostname(card->host), s->cmd_retry);
+ strlcat(ubuf, temp_buf, cnt);
+
+ snprintf(temp_buf, TEMP_BUF_SIZE,
+ "%s: NULL fetched:%d\n", mmc_hostname(card->host),
+ s->null_fetched);
+ strlcat(ubuf, temp_buf, cnt);
+
+ snprintf(temp_buf, TEMP_BUF_SIZE,
+ "%s: wake up new:%d\n",
+ mmc_hostname(card->host), s->wakeup_new);
+ strlcat(ubuf, temp_buf, cnt);
+
+ snprintf(temp_buf, TEMP_BUF_SIZE,
+ "%s: new_request_flag:%d\n", mmc_hostname(card->host),
+ s->new_request_flag);
+ strlcat(ubuf, temp_buf, cnt);
+
+ snprintf(temp_buf, TEMP_BUF_SIZE,
+ "%s: no waiting:%d\n", mmc_hostname(card->host),
+ s->q_no_waiting);
+ strlcat(ubuf, temp_buf, cnt);
+
+ snprintf(temp_buf, TEMP_BUF_SIZE,
+ "%s: no_mmc_request_action:%d\n", mmc_hostname(card->host),
+ s->no_mmc_request_action);
+ strlcat(ubuf, temp_buf, cnt);
+
+ snprintf(temp_buf, TEMP_BUF_SIZE,
+ "%s: wakeup_mq_thread:%d\n", mmc_hostname(card->host),
+ s->wakeup_mq_thread);
+ strlcat(ubuf, temp_buf, cnt);
+
+ snprintf(temp_buf, TEMP_BUF_SIZE,
+ "%s: fetch_due_to_new_req:%d\n", mmc_hostname(card->host),
+ s->fetch_due_to_new_req);
+ strlcat(ubuf, temp_buf, cnt);
+
+ snprintf(temp_buf, TEMP_BUF_SIZE,
+ "%s: returned_new_req:%d\n", mmc_hostname(card->host),
+ s->returned_new_req);
+ strlcat(ubuf, temp_buf, cnt);
+
+ snprintf(temp_buf, TEMP_BUF_SIZE,
+ "%s: done_when_new_req_event_on:%d\n",
+ mmc_hostname(card->host), s->done_when_new_req_event_on);
+ strlcat(ubuf, temp_buf, cnt);
+
+ kfree(temp_buf);
+
+ pr_info("%s", ubuf);
+
+exit:
+ if (card->async_event_stats.print_in_read == 1) {
+ card->async_event_stats.print_in_read = 0;
+ return strnlen(ubuf, cnt);
+ }
+
+ return 0;
+}
+
+static ssize_t mmc_new_req_stats_write(struct file *filp,
+ const char __user *ubuf, size_t cnt,
+ loff_t *ppos)
+{
+ struct mmc_card *card = filp->private_data;
+ int value;
+
+ if (!card)
+ return cnt;
+
+ sscanf(ubuf, "%d", &value);
+ if (value) {
+ mmc_blk_init_async_event_statistics(card);
+ pr_info("%s: %s: New request statistics are enabled",
+ mmc_hostname(card->host), __func__);
+ } else {
+ card->async_event_stats.enabled = false;
+ pr_info("%s: %s: New request statistics are disabled",
+ mmc_hostname(card->host), __func__);
+ }
+
+ return cnt;
+}
+
+static const struct file_operations mmc_dbg_new_req_stats_fops = {
+ .open = mmc_new_req_stats_open,
+ .read = mmc_new_req_stats_read,
+ .write = mmc_new_req_stats_write,
+};
+
static int mmc_bkops_stats_open(struct inode *inode, struct file *filp)
{
struct mmc_card *card = inode->i_private;
@@ -673,6 +808,10 @@
&mmc_dbg_wr_pack_stats_fops))
goto err;
+ if (!debugfs_create_file("new_req_stats", S_IRUSR, root, card,
+ &mmc_dbg_new_req_stats_fops))
+ goto err;
+
if (mmc_card_mmc(card) && (card->ext_csd.rev >= 6) &&
card->ext_csd.bkops_en)
if (!debugfs_create_file("bkops_stats", S_IRUSR, root, card,
diff --git a/drivers/net/usb/rmnet_usb_ctrl.c b/drivers/net/usb/rmnet_usb_ctrl.c
index 1476bb3..c69bdc2 100644
--- a/drivers/net/usb/rmnet_usb_ctrl.c
+++ b/drivers/net/usb/rmnet_usb_ctrl.c
@@ -338,15 +338,6 @@
return retval;
}
-int rmnet_usb_ctrl_suspend(struct rmnet_ctrl_dev *dev)
-{
- if (work_busy(&dev->get_encap_work))
- return -EBUSY;
-
- usb_kill_anchored_urbs(&dev->rx_submitted);
-
- return 0;
-}
static int rmnet_usb_ctrl_alloc_rx(struct rmnet_ctrl_dev *dev)
{
int retval = -ENOMEM;
diff --git a/drivers/net/usb/rmnet_usb_data.c b/drivers/net/usb/rmnet_usb_data.c
index fdfe468..4a1423d 100644
--- a/drivers/net/usb/rmnet_usb_data.c
+++ b/drivers/net/usb/rmnet_usb_data.c
@@ -87,65 +87,35 @@
{
struct usbnet *unet;
struct rmnet_ctrl_dev *dev;
- int retval = 0;
unet = usb_get_intfdata(iface);
- if (!unet) {
- pr_err("%s:data device not found\n", __func__);
- retval = -ENODEV;
- goto fail;
- }
dev = (struct rmnet_ctrl_dev *)unet->data[1];
- if (!dev) {
- dev_err(&iface->dev, "%s: ctrl device not found\n",
- __func__);
- retval = -ENODEV;
- goto fail;
- }
- retval = usbnet_suspend(iface, message);
- if (!retval) {
- retval = rmnet_usb_ctrl_suspend(dev);
- iface->dev.power.power_state.event = message.event;
- } else {
- dev_dbg(&iface->dev,
- "%s: device is busy can not suspend\n", __func__);
- }
+ if (work_busy(&dev->get_encap_work))
+ return -EBUSY;
-fail:
- return retval;
+ if (usbnet_suspend(iface, message))
+ return -EBUSY;
+
+ usb_kill_anchored_urbs(&dev->rx_submitted);
+
+ return 0;
}
static int rmnet_usb_resume(struct usb_interface *iface)
{
int retval = 0;
- int oldstate;
struct usbnet *unet;
struct rmnet_ctrl_dev *dev;
unet = usb_get_intfdata(iface);
- if (!unet) {
- pr_err("%s:data device not found\n", __func__);
- retval = -ENODEV;
- goto fail;
- }
dev = (struct rmnet_ctrl_dev *)unet->data[1];
- if (!dev) {
- dev_err(&iface->dev, "%s: ctrl device not found\n", __func__);
- retval = -ENODEV;
- goto fail;
- }
- oldstate = iface->dev.power.power_state.event;
- iface->dev.power.power_state.event = PM_EVENT_ON;
- retval = usbnet_resume(iface);
- if (!retval) {
- if (oldstate & PM_EVENT_SUSPEND)
- retval = rmnet_usb_ctrl_start_rx(dev);
- }
-fail:
+ usbnet_resume(iface);
+ retval = rmnet_usb_ctrl_start_rx(dev);
+
return retval;
}
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 740c717..e9130f6 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -333,10 +333,12 @@
void usbnet_defer_kevent (struct usbnet *dev, int work)
{
set_bit (work, &dev->flags);
- if (!schedule_work (&dev->kevent))
- netdev_err(dev->net, "kevent %d may have been dropped\n", work);
- else
+ if (!schedule_work (&dev->kevent)) {
+ if (net_ratelimit())
+ netdev_err(dev->net, "kevent %d may have been dropped\n", work);
+ } else {
netdev_dbg(dev->net, "kevent %d scheduled\n", work);
+ }
}
EXPORT_SYMBOL_GPL(usbnet_defer_kevent);
diff --git a/drivers/net/wireless/wcnss/wcnss_wlan.c b/drivers/net/wireless/wcnss/wcnss_wlan.c
index 71a9860..457cbb3 100644
--- a/drivers/net/wireless/wcnss/wcnss_wlan.c
+++ b/drivers/net/wireless/wcnss/wcnss_wlan.c
@@ -26,6 +26,7 @@
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/clk.h>
+#include <linux/ratelimit.h>
#include <mach/msm_smd.h>
#include <mach/msm_iomap.h>
@@ -61,6 +62,11 @@
#define CCU_LAST_ADDR1_OFFSET 0x108
#define CCU_LAST_ADDR2_OFFSET 0x10c
+#define MSM_PRONTO_A2XB_BASE 0xfb100400
+#define A2XB_CFG_OFFSET 0x00
+#define A2XB_INT_SRC_OFFSET 0x0c
+#define A2XB_ERR_INFO_OFFSET 0x1c
+
#define WCNSS_CTRL_CHANNEL "WCNSS_CTRL"
#define WCNSS_MAX_FRAME_SIZE 500
#define WCNSS_VERSION_LEN 30
@@ -182,7 +188,7 @@
/* wcnss_reset_intr() is invoked when host drivers fails to
* communicate with WCNSS over SMD; so logging these registers
* helps to know WCNSS failure reason */
-static void wcnss_log_ccpu_regs(void)
+void wcnss_riva_log_debug_regs(void)
{
void __iomem *ccu_base;
void __iomem *ccu_reg;
@@ -196,31 +202,62 @@
ccu_reg = ccu_base + CCU_INVALID_ADDR_OFFSET;
reg = readl_relaxed(ccu_reg);
- pr_info("%s: CCU_CCPU_INVALID_ADDR %08x\n", __func__, reg);
+ pr_info_ratelimited("%s: CCU_CCPU_INVALID_ADDR %08x\n", __func__, reg);
ccu_reg = ccu_base + CCU_LAST_ADDR0_OFFSET;
reg = readl_relaxed(ccu_reg);
- pr_info("%s: CCU_CCPU_LAST_ADDR0 %08x\n", __func__, reg);
+ pr_info_ratelimited("%s: CCU_CCPU_LAST_ADDR0 %08x\n", __func__, reg);
ccu_reg = ccu_base + CCU_LAST_ADDR1_OFFSET;
reg = readl_relaxed(ccu_reg);
- pr_info("%s: CCU_CCPU_LAST_ADDR1 %08x\n", __func__, reg);
+ pr_info_ratelimited("%s: CCU_CCPU_LAST_ADDR1 %08x\n", __func__, reg);
ccu_reg = ccu_base + CCU_LAST_ADDR2_OFFSET;
reg = readl_relaxed(ccu_reg);
- pr_info("%s: CCU_CCPU_LAST_ADDR2 %08x\n", __func__, reg);
+ pr_info_ratelimited("%s: CCU_CCPU_LAST_ADDR2 %08x\n", __func__, reg);
iounmap(ccu_base);
}
+EXPORT_SYMBOL(wcnss_riva_log_debug_regs);
-/* interface to reset Riva by sending the reset interrupt */
+/* Log pronto debug registers before sending reset interrupt */
+void wcnss_pronto_log_debug_regs(void)
+{
+ void __iomem *a2xb_base;
+ void __iomem *reg_addr;
+ u32 reg = 0;
+
+ a2xb_base = ioremap(MSM_PRONTO_A2XB_BASE, SZ_512);
+ if (!a2xb_base) {
+ pr_err("%s: ioremap WCNSS A2XB reg failed\n", __func__);
+ return;
+ }
+
+ reg_addr = a2xb_base + A2XB_CFG_OFFSET;
+ reg = readl_relaxed(reg_addr);
+ pr_info_ratelimited("%s: A2XB_CFG_OFFSET %08x\n", __func__, reg);
+
+ reg_addr = a2xb_base + A2XB_INT_SRC_OFFSET;
+ reg = readl_relaxed(reg_addr);
+ pr_info_ratelimited("%s: A2XB_INT_SRC_OFFSET %08x\n", __func__, reg);
+
+ reg_addr = a2xb_base + A2XB_ERR_INFO_OFFSET;
+ reg = readl_relaxed(reg_addr);
+ pr_info_ratelimited("%s: A2XB_ERR_INFO_OFFSET %08x\n", __func__, reg);
+
+ iounmap(a2xb_base);
+}
+EXPORT_SYMBOL(wcnss_pronto_log_debug_regs);
+
+/* interface to reset wcnss by sending the reset interrupt */
void wcnss_reset_intr(void)
{
- if (wcnss_hardware_type() != WCNSS_RIVA_HW) {
+ if (wcnss_hardware_type() == WCNSS_PRONTO_HW) {
+ wcnss_pronto_log_debug_regs();
pr_err("%s: reset interrupt not supported\n", __func__);
return;
}
- wcnss_log_ccpu_regs();
+ wcnss_riva_log_debug_regs();
wmb();
__raw_writel(1 << 24, MSM_APCS_GCC_BASE + 0x8);
}
diff --git a/drivers/power/msm_battery.c b/drivers/power/msm_battery.c
index f8186b1..3365811 100644
--- a/drivers/power/msm_battery.c
+++ b/drivers/power/msm_battery.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2009-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -503,13 +503,13 @@
}
if (msm_batt_info.charger_type != charger_type) {
- if (charger_type == CHARGER_TYPE_USB_WALL ||
- charger_type == CHARGER_TYPE_USB_PC ||
+ if (charger_type == CHARGER_TYPE_USB_PC ||
charger_type == CHARGER_TYPE_USB_CARKIT) {
DBG_LIMIT("BATT: USB charger plugged in\n");
msm_batt_info.current_chg_source = USB_CHG;
supp = &msm_psy_usb;
- } else if (charger_type == CHARGER_TYPE_WALL) {
+ } else if (charger_type == CHARGER_TYPE_WALL ||
+ charger_type == CHARGER_TYPE_USB_WALL) {
DBG_LIMIT("BATT: AC Wall changer plugged in\n");
msm_batt_info.current_chg_source = AC_CHG;
supp = &msm_psy_ac;
diff --git a/drivers/power/pm8921-bms.c b/drivers/power/pm8921-bms.c
index a8d52b5..12bdf30 100644
--- a/drivers/power/pm8921-bms.c
+++ b/drivers/power/pm8921-bms.c
@@ -524,6 +524,7 @@
int rc;
uint16_t msw, lsw;
+ *result = 0;
rc = pm_bms_read_output_data(chip, CC_LSB, &lsw);
if (rc) {
pr_err("fail to read CC_LSB rc = %d\n", rc);
@@ -1434,6 +1435,7 @@
chg_soc = linear_interpolate(chip->soc_at_cv, chip->ibat_at_cv_ua,
100, -1 * chip->chg_term_ua,
ibat_ua);
+ chg_soc = bound_soc(chg_soc);
/* always report a higher soc */
if (chg_soc > chip->prev_chg_soc) {
@@ -2171,7 +2173,9 @@
int pm8921_bms_get_battery_current(int *result_ua)
{
int vsense_uv;
+ int rc = 0;
+ *result_ua = 0;
if (!the_chip) {
pr_err("called before initialization\n");
return -EINVAL;
@@ -2183,14 +2187,20 @@
mutex_lock(&the_chip->bms_output_lock);
pm_bms_lock_output_data(the_chip);
- read_vsense_avg(the_chip, &vsense_uv);
+ rc = read_vsense_avg(the_chip, &vsense_uv);
pm_bms_unlock_output_data(the_chip);
mutex_unlock(&the_chip->bms_output_lock);
+ if (rc) {
+ pr_err("Unable to read vsense average\n");
+ goto error_vsense;
+ }
pr_debug("vsense=%duV\n", vsense_uv);
/* cast for signed division */
*result_ua = div_s64(vsense_uv * 1000000LL, the_chip->r_sense_uohm);
pr_debug("ibat=%duA\n", *result_ua);
- return 0;
+
+error_vsense:
+ return rc;
}
EXPORT_SYMBOL(pm8921_bms_get_battery_current);
@@ -3093,10 +3103,13 @@
calculate_soc_work(&(chip->calculate_soc_delayed_work.work));
- get_battery_uvolts(chip, &vbatt);
- pr_info("OK battery_capacity_at_boot=%d volt = %d ocv = %d\n",
+ rc = get_battery_uvolts(chip, &vbatt);
+ if (!rc)
+ pr_info("OK battery_capacity_at_boot=%d volt = %d ocv = %d\n",
pm8921_bms_get_percent_charge(),
vbatt, chip->last_ocv_uv);
+ else
+ pr_info("Unable to read battery voltage at boot\n");
return 0;
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 52b96e8..58f4ba6 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -232,11 +232,11 @@
* the host controller
* @reg_hcs - host controller status register value
*
- * Returns 0 if device present, non-zero if no device detected
+ * Returns 1 if device present, 0 if no device detected
*/
static inline int ufshcd_is_device_present(u32 reg_hcs)
{
- return (DEVICE_PRESENT & reg_hcs) ? 0 : -1;
+ return (DEVICE_PRESENT & reg_hcs) ? 1 : 0;
}
/**
@@ -911,7 +911,7 @@
/* check if device present */
reg = readl((hba->mmio_base + REG_CONTROLLER_STATUS));
- if (ufshcd_is_device_present(reg)) {
+ if (!ufshcd_is_device_present(reg)) {
dev_err(&hba->pdev->dev, "cc: Device not present\n");
err = -ENXIO;
goto out;
@@ -1032,11 +1032,11 @@
return -EIO;
/* Configure UTRL and UTMRL base address registers */
- writel(hba->utrdl_dma_addr,
- (hba->mmio_base + REG_UTP_TRANSFER_REQ_LIST_BASE_L));
writel(lower_32_bits(hba->utrdl_dma_addr),
+ (hba->mmio_base + REG_UTP_TRANSFER_REQ_LIST_BASE_L));
+ writel(upper_32_bits(hba->utrdl_dma_addr),
(hba->mmio_base + REG_UTP_TRANSFER_REQ_LIST_BASE_H));
- writel(hba->utmrdl_dma_addr,
+ writel(lower_32_bits(hba->utmrdl_dma_addr),
(hba->mmio_base + REG_UTP_TASK_REQ_LIST_BASE_L));
writel(upper_32_bits(hba->utmrdl_dma_addr),
(hba->mmio_base + REG_UTP_TASK_REQ_LIST_BASE_H));
@@ -1160,9 +1160,11 @@
task_result = be32_to_cpu(task_rsp_upiup->header.dword_1);
task_result = ((task_result & MASK_TASK_RESPONSE) >> 8);
- if (task_result != UPIU_TASK_MANAGEMENT_FUNC_COMPL ||
+ if (task_result != UPIU_TASK_MANAGEMENT_FUNC_COMPL &&
task_result != UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED)
task_result = FAILED;
+ else
+ task_result = SUCCESS;
} else {
task_result = FAILED;
dev_err(&hba->pdev->dev,
@@ -1556,7 +1558,7 @@
goto out;
}
clear_bit(free_slot, &hba->tm_condition);
- return ufshcd_task_req_compl(hba, free_slot);
+ err = ufshcd_task_req_compl(hba, free_slot);
out:
return err;
}
@@ -1580,7 +1582,7 @@
tag = cmd->request->tag;
err = ufshcd_issue_tm_cmd(hba, &hba->lrb[tag], UFS_LOGICAL_RESET);
- if (err)
+ if (err == FAILED)
goto out;
for (pos = 0; pos < hba->nutrs; pos++) {
@@ -1620,7 +1622,7 @@
if (hba->ufshcd_state == UFSHCD_STATE_RESET)
return SUCCESS;
- return (ufshcd_do_reset(hba) == SUCCESS) ? SUCCESS : FAILED;
+ return ufshcd_do_reset(hba);
}
/**
@@ -1652,7 +1654,7 @@
spin_unlock_irqrestore(host->host_lock, flags);
err = ufshcd_issue_tm_cmd(hba, &hba->lrb[tag], UFS_ABORT_TASK);
- if (err)
+ if (err == FAILED)
goto out;
scsi_dma_unmap(cmd);
@@ -1836,7 +1838,7 @@
err = pci_request_regions(pdev, UFSHCD);
if (err < 0) {
dev_err(&pdev->dev, "request regions failed\n");
- goto out_disable;
+ goto out_host_put;
}
hba->mmio_base = pci_ioremap_bar(pdev, 0);
@@ -1925,8 +1927,9 @@
iounmap(hba->mmio_base);
out_release_regions:
pci_release_regions(pdev);
-out_disable:
+out_host_put:
scsi_host_put(host);
+out_disable:
pci_clear_master(pdev);
pci_disable_device(pdev);
out_error:
@@ -1952,24 +1955,7 @@
#endif
};
-/**
- * ufshcd_init - Driver registration routine
- */
-static int __init ufshcd_init(void)
-{
- return pci_register_driver(&ufshcd_pci_driver);
-}
-module_init(ufshcd_init);
-
-/**
- * ufshcd_exit - Driver exit clean-up routine
- */
-static void __exit ufshcd_exit(void)
-{
- pci_unregister_driver(&ufshcd_pci_driver);
-}
-module_exit(ufshcd_exit);
-
+module_pci_driver(ufshcd_pci_driver);
MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>, "
"Vinayak Holikatti <h.vinayak@samsung.com>");
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
index 7b8788d..7d8defd 100644
--- a/drivers/staging/android/lowmemorykiller.c
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -62,6 +62,60 @@
printk(x); \
} while (0)
+
+static int can_use_cma_pages(struct zone *zone, gfp_t gfp_mask)
+{
+ int can_use = 0;
+ int mtype = allocflags_to_migratetype(gfp_mask);
+ int i = 0;
+ int *mtype_fallbacks = get_migratetype_fallbacks(mtype);
+
+ if (is_migrate_cma(mtype)) {
+ can_use = 1;
+ } else {
+ for (i = 0;; i++) {
+ int fallbacktype = mtype_fallbacks[i];
+
+ if (is_migrate_cma(fallbacktype)) {
+ can_use = 1;
+ break;
+ }
+
+ if (fallbacktype == MIGRATE_RESERVE)
+ break;
+ }
+ }
+ return can_use;
+}
+
+
+static int nr_free_zone_pages(struct zone *zone, gfp_t gfp_mask)
+{
+ int sum = zone_page_state(zone, NR_FREE_PAGES);
+
+ if (!can_use_cma_pages(zone, gfp_mask))
+ sum -= zone_page_state(zone, NR_FREE_CMA_PAGES);
+
+ return sum;
+}
+
+
+static int nr_free_pages(gfp_t gfp_mask)
+{
+ struct zoneref *z;
+ struct zone *zone;
+ int sum = 0;
+
+ struct zonelist *zonelist = node_zonelist(numa_node_id(), gfp_mask);
+
+ for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
+ sum += nr_free_zone_pages(zone, gfp_mask);
+ }
+
+ return sum;
+}
+
+
static int test_task_flag(struct task_struct *p, int flag)
{
struct task_struct *t = p;
@@ -93,6 +147,15 @@
int other_file = global_page_state(NR_FILE_PAGES) -
global_page_state(NR_SHMEM);
+ if (sc->nr_to_scan > 0 && other_free > other_file) {
+ /*
+ * If the number of free pages is going to affect the decision
+ * of which process is selected then ensure only free pages
+ * which can satisfy the request are considered.
+ */
+ other_free = nr_free_pages(sc->gfp_mask);
+ }
+
if (lowmem_adj_size < array_size)
array_size = lowmem_adj_size;
if (lowmem_minfree_size < array_size)
diff --git a/drivers/tty/serial/msm_serial_hs.c b/drivers/tty/serial/msm_serial_hs.c
index 7a0e32b..4ffe0d8 100644
--- a/drivers/tty/serial/msm_serial_hs.c
+++ b/drivers/tty/serial/msm_serial_hs.c
@@ -754,6 +754,8 @@
uport->ignore_status_mask = termios->c_iflag & INPCK;
uport->ignore_status_mask |= termios->c_iflag & IGNPAR;
+ uport->ignore_status_mask = termios->c_iflag & IGNBRK;
+
uport->read_status_mask = (termios->c_cflag & CREAD);
msm_hs_write(uport, UARTDM_IMR_ADDR, 0);
@@ -1031,15 +1033,29 @@
if (unlikely(status & UARTDM_SR_PAR_FRAME_BMSK)) {
/* Can not tell difference between parity & frame error */
+ if (hs_serial_debug_mask)
+ printk(KERN_WARNING "msm_serial_hs: parity error\n");
uport->icount.parity++;
error_f = 1;
- if (uport->ignore_status_mask & IGNPAR) {
+ if (!(uport->ignore_status_mask & IGNPAR)) {
retval = tty_insert_flip_char(tty, 0, TTY_PARITY);
if (!retval)
msm_uport->rx.buffer_pending |= TTY_PARITY;
}
}
+ if (unlikely(status & UARTDM_SR_RX_BREAK_BMSK)) {
+ if (hs_serial_debug_mask)
+ printk(KERN_WARNING "msm_serial_hs: Rx break\n");
+ uport->icount.brk++;
+ error_f = 1;
+ if (!(uport->ignore_status_mask & IGNBRK)) {
+ retval = tty_insert_flip_char(tty, 0, TTY_BREAK);
+ if (!retval)
+ msm_uport->rx.buffer_pending |= TTY_BREAK;
+ }
+ }
+
if (error_f)
msm_hs_write(uport, UARTDM_CR_ADDR, RESET_ERROR_STATUS);
diff --git a/drivers/tty/serial/msm_serial_hs_lite.c b/drivers/tty/serial/msm_serial_hs_lite.c
index 72a12d1..39c1801 100644
--- a/drivers/tty/serial/msm_serial_hs_lite.c
+++ b/drivers/tty/serial/msm_serial_hs_lite.c
@@ -45,11 +45,33 @@
#include <linux/debugfs.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/wakelock.h>
#include <mach/board.h>
#include <mach/msm_serial_hs_lite.h>
#include <asm/mach-types.h>
#include "msm_serial_hs_hwreg.h"
+/*
+ * There are 3 different kind of UART Core available on MSM.
+ * High Speed UART (i.e. Legacy HSUART), GSBI based HSUART
+ * and BSLP based HSUART.
+ */
+enum uart_core_type {
+ LEGACY_HSUART,
+ GSBI_HSUART,
+ BLSP_HSUART,
+};
+
+/*
+ * UART can be used in 2-wire or 4-wire mode.
+ * Use uart_func_mode to set 2-wire or 4-wire mode.
+ */
+enum uart_func_mode {
+ UART_TWO_WIRE, /* can't support HW Flow control. */
+ UART_FOUR_WIRE,/* can support HW Flow control. */
+};
+
struct msm_hsl_port {
struct uart_port uart;
char name[16];
@@ -60,11 +82,13 @@
unsigned int *uart_csr_code;
unsigned int *gsbi_mapbase;
unsigned int *mapped_gsbi;
- int is_uartdm;
unsigned int old_snap_state;
unsigned int ver_id;
int tx_timeout;
struct mutex clk_mutex;
+ enum uart_core_type uart_type;
+ enum uart_func_mode func_mode;
+ struct wake_lock port_open_wake_lock;
};
#define UARTDM_VERSION_11_13 0
@@ -147,13 +171,191 @@
static unsigned int msm_serial_hsl_has_gsbi(struct uart_port *port)
{
- return UART_TO_MSM(port)->is_uartdm;
+ return (UART_TO_MSM(port)->uart_type == GSBI_HSUART);
}
+/**
+ * set_gsbi_uart_func_mode: Check the currently used GSBI UART mode
+ * and set the new required GSBI UART Mode if it is different.
+ * @port: uart port
+ */
+static void set_gsbi_uart_func_mode(struct uart_port *port)
+{
+ struct msm_hsl_port *msm_hsl_port = UART_TO_MSM(port);
+ unsigned int set_gsbi_uart_mode = GSBI_PROTOCOL_I2C_UART;
+ unsigned int cur_gsbi_uart_mode;
+
+ if (msm_hsl_port->func_mode == UART_FOUR_WIRE)
+ set_gsbi_uart_mode = GSBI_PROTOCOL_UART;
+
+ if (msm_hsl_port->pclk)
+ clk_prepare_enable(msm_hsl_port->pclk);
+
+ /* Read current used GSBI UART Mode and set only if it is different. */
+ cur_gsbi_uart_mode = ioread32(msm_hsl_port->mapped_gsbi +
+ GSBI_CONTROL_ADDR);
+ if ((cur_gsbi_uart_mode & set_gsbi_uart_mode) != set_gsbi_uart_mode)
+ /*
+ * Programmed GSBI based UART protocol mode i.e. I2C/UART
+ * Shared Mode or UART Mode.
+ */
+ iowrite32(set_gsbi_uart_mode,
+ msm_hsl_port->mapped_gsbi + GSBI_CONTROL_ADDR);
+
+ if (msm_hsl_port->pclk)
+ clk_disable_unprepare(msm_hsl_port->pclk);
+}
+
+/**
+ * msm_hsl_config_uart_tx_rx_gpios - Configures UART Tx and RX GPIOs
+ * @port: uart port
+ */
+static int msm_hsl_config_uart_tx_rx_gpios(struct uart_port *port)
+{
+ struct platform_device *pdev = to_platform_device(port->dev);
+ const struct msm_serial_hslite_platform_data *pdata =
+ pdev->dev.platform_data;
+ int ret;
+
+ if (pdata) {
+ ret = gpio_request(pdata->uart_tx_gpio,
+ "UART_TX_GPIO");
+ if (unlikely(ret)) {
+ pr_err("gpio request failed for:%d\n",
+ pdata->uart_tx_gpio);
+ goto exit_uart_config;
+ }
+
+ ret = gpio_request(pdata->uart_rx_gpio, "UART_RX_GPIO");
+ if (unlikely(ret)) {
+ pr_err("gpio request failed for:%d\n",
+ pdata->uart_rx_gpio);
+ gpio_free(pdata->uart_tx_gpio);
+ goto exit_uart_config;
+ }
+ } else {
+ pr_err("Pdata is NULL.\n");
+ ret = -EINVAL;
+ }
+
+exit_uart_config:
+ return ret;
+}
+
+/**
+ * msm_hsl_unconfig_uart_tx_rx_gpios: Unconfigures UART Tx and RX GPIOs
+ * @port: uart port
+ */
+static void msm_hsl_unconfig_uart_tx_rx_gpios(struct uart_port *port)
+{
+ struct platform_device *pdev = to_platform_device(port->dev);
+ const struct msm_serial_hslite_platform_data *pdata =
+ pdev->dev.platform_data;
+
+ if (pdata) {
+ gpio_free(pdata->uart_tx_gpio);
+ gpio_free(pdata->uart_rx_gpio);
+ } else {
+ pr_err("Error:Pdata is NULL.\n");
+ }
+}
+
+/**
+ * msm_hsl_config_uart_hwflow_gpios: Configures UART HWFlow GPIOs
+ * @port: uart port
+ */
+static int msm_hsl_config_uart_hwflow_gpios(struct uart_port *port)
+{
+ struct platform_device *pdev = to_platform_device(port->dev);
+ const struct msm_serial_hslite_platform_data *pdata =
+ pdev->dev.platform_data;
+ int ret = -EINVAL;
+
+ if (pdata) {
+ ret = gpio_request(pdata->uart_cts_gpio,
+ "UART_CTS_GPIO");
+ if (unlikely(ret)) {
+ pr_err("gpio request failed for:%d\n",
+ pdata->uart_cts_gpio);
+ goto exit_config_uart;
+ }
+
+ ret = gpio_request(pdata->uart_rfr_gpio,
+ "UART_RFR_GPIO");
+ if (unlikely(ret)) {
+ pr_err("gpio request failed for:%d\n",
+ pdata->uart_rfr_gpio);
+ gpio_free(pdata->uart_cts_gpio);
+ goto exit_config_uart;
+ }
+ } else {
+ pr_err("Error: Pdata is NULL.\n");
+ }
+
+exit_config_uart:
+ return ret;
+}
+
+/**
+ * msm_hsl_unconfig_uart_hwflow_gpios: Unonfigures UART HWFlow GPIOs
+ * @port: uart port
+ */
+static void msm_hsl_unconfig_uart_hwflow_gpios(struct uart_port *port)
+{
+ struct platform_device *pdev = to_platform_device(port->dev);
+ const struct msm_serial_hslite_platform_data *pdata =
+ pdev->dev.platform_data;
+
+ if (pdata) {
+ gpio_free(pdata->uart_cts_gpio);
+ gpio_free(pdata->uart_rfr_gpio);
+ } else {
+ pr_err("Error: Pdata is NULL.\n");
+ }
+
+}
+
+/**
+ * msm_hsl_config_uart_gpios: Configures UART GPIOs and returns success or
+ * Failure
+ * @port: uart port
+ */
+static int msm_hsl_config_uart_gpios(struct uart_port *port)
+{
+ struct msm_hsl_port *msm_hsl_port = UART_TO_MSM(port);
+ int ret;
+
+ /* Configure UART Tx and Rx GPIOs */
+ ret = msm_hsl_config_uart_tx_rx_gpios(port);
+ if (!ret) {
+ if (msm_hsl_port->func_mode == UART_FOUR_WIRE) {
+ /*if 4-wire uart, configure CTS and RFR GPIOs */
+ ret = msm_hsl_config_uart_hwflow_gpios(port);
+ if (ret)
+ msm_hsl_unconfig_uart_tx_rx_gpios(port);
+ }
+ } else {
+ msm_hsl_unconfig_uart_tx_rx_gpios(port);
+ }
+
+ return ret;
+}
+
+/**
+ * msm_hsl_unconfig_uart_gpios: Unconfigures UART GPIOs
+ * @port: uart port
+ */
+static void msm_hsl_unconfig_uart_gpios(struct uart_port *port)
+{
+ struct msm_hsl_port *msm_hsl_port = UART_TO_MSM(port);
+
+ msm_hsl_unconfig_uart_tx_rx_gpios(port);
+ if (msm_hsl_port->func_mode == UART_FOUR_WIRE)
+ msm_hsl_unconfig_uart_hwflow_gpios(port);
+}
static int get_line(struct platform_device *pdev)
{
struct msm_hsl_port *msm_hsl_port = platform_get_drvdata(pdev);
-
return msm_hsl_port->uart.line;
}
@@ -745,28 +947,16 @@
(port->cons && (!(port->cons->flags & CON_ENABLED)))) {
if (msm_serial_hsl_has_gsbi(port))
- if ((ioread32(msm_hsl_port->mapped_gsbi +
- GSBI_CONTROL_ADDR) & GSBI_PROTOCOL_I2C_UART)
- != GSBI_PROTOCOL_I2C_UART)
- iowrite32(GSBI_PROTOCOL_I2C_UART,
- msm_hsl_port->mapped_gsbi +
- GSBI_CONTROL_ADDR);
+ set_gsbi_uart_func_mode(port);
+
+ if (pdata && pdata->use_pm)
+ wake_lock(&msm_hsl_port->port_open_wake_lock);
if (pdata && pdata->config_gpio) {
- ret = gpio_request(pdata->uart_tx_gpio,
- "UART_TX_GPIO");
- if (unlikely(ret)) {
- pr_err("gpio request failed for:%d\n",
- pdata->uart_tx_gpio);
- return ret;
- }
-
- ret = gpio_request(pdata->uart_rx_gpio, "UART_RX_GPIO");
- if (unlikely(ret)) {
- pr_err("gpio request failed for:%d\n",
- pdata->uart_rx_gpio);
- gpio_free(pdata->uart_tx_gpio);
- return ret;
+ ret = msm_hsl_config_uart_gpios(port);
+ if (ret) {
+ msm_hsl_unconfig_uart_gpios(port);
+ goto release_wakelock;
}
}
}
@@ -800,9 +990,17 @@
msm_hsl_port->name, port);
if (unlikely(ret)) {
pr_err("failed to request_irq\n");
- return ret;
+ msm_hsl_unconfig_uart_gpios(port);
+ goto release_wakelock;
}
- return 0;
+
+ return ret;
+
+release_wakelock:
+ if (pdata && pdata->use_pm)
+ wake_unlock(&msm_hsl_port->port_open_wake_lock);
+
+ return ret;
}
static void msm_hsl_shutdown(struct uart_port *port)
@@ -824,10 +1022,12 @@
pm_runtime_put_sync(port->dev);
if (!(is_console(port)) || (!port->cons) ||
(port->cons && (!(port->cons->flags & CON_ENABLED)))) {
- if (pdata && pdata->config_gpio) {
- gpio_free(pdata->uart_tx_gpio);
- gpio_free(pdata->uart_rx_gpio);
- }
+ /* Free UART GPIOs */
+ if (pdata && pdata->config_gpio)
+ msm_hsl_unconfig_uart_gpios(port);
+
+ if (pdata && pdata->use_pm)
+ wake_unlock(&msm_hsl_port->port_open_wake_lock);
}
}
@@ -1009,22 +1209,15 @@
static void msm_hsl_config_port(struct uart_port *port, int flags)
{
- struct msm_hsl_port *msm_hsl_port = UART_TO_MSM(port);
if (flags & UART_CONFIG_TYPE) {
port->type = PORT_MSM;
if (msm_hsl_request_port(port))
return;
}
- if (msm_serial_hsl_has_gsbi(port)) {
- if (msm_hsl_port->pclk)
- clk_prepare_enable(msm_hsl_port->pclk);
- if ((ioread32(msm_hsl_port->mapped_gsbi + GSBI_CONTROL_ADDR) &
- GSBI_PROTOCOL_I2C_UART) != GSBI_PROTOCOL_I2C_UART)
- iowrite32(GSBI_PROTOCOL_I2C_UART,
- msm_hsl_port->mapped_gsbi + GSBI_CONTROL_ADDR);
- if (msm_hsl_port->pclk)
- clk_disable_unprepare(msm_hsl_port->pclk);
- }
+
+ /* Configure required GSBI based UART protocol. */
+ if (msm_serial_hsl_has_gsbi(port))
+ set_gsbi_uart_func_mode(port);
}
static int msm_hsl_verify_port(struct uart_port *port,
@@ -1397,6 +1590,56 @@
.cons = MSM_HSL_CONSOLE,
};
+static struct msm_serial_hslite_platform_data
+ *msm_hsl_dt_to_pdata(struct platform_device *pdev)
+{
+ int ret;
+ struct device_node *node = pdev->dev.of_node;
+ struct msm_serial_hslite_platform_data *pdata;
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ pr_err("unable to allocate memory for platform data\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ret = of_property_read_u32(node, "qcom,config-gpio",
+ &pdata->config_gpio);
+ if (ret && ret != -EINVAL) {
+ pr_err("Error with config_gpio property.\n");
+ return ERR_PTR(ret);
+ }
+
+ if (pdata->config_gpio) {
+ pdata->uart_tx_gpio = of_get_named_gpio(node,
+ "qcom,tx-gpio", 0);
+ if (pdata->uart_tx_gpio < 0)
+ return ERR_PTR(pdata->uart_tx_gpio);
+
+ pdata->uart_rx_gpio = of_get_named_gpio(node,
+ "qcom,rx-gpio", 0);
+ if (pdata->uart_rx_gpio < 0)
+ return ERR_PTR(pdata->uart_rx_gpio);
+
+ /* check if 4-wire UART, then get cts/rfr GPIOs. */
+ if (pdata->config_gpio == 4) {
+ pdata->uart_cts_gpio = of_get_named_gpio(node,
+ "qcom,cts-gpio", 0);
+ if (pdata->uart_cts_gpio < 0)
+ return ERR_PTR(pdata->uart_cts_gpio);
+
+ pdata->uart_rfr_gpio = of_get_named_gpio(node,
+ "qcom,rfr-gpio", 0);
+ if (pdata->uart_rfr_gpio < 0)
+ return ERR_PTR(pdata->uart_rfr_gpio);
+ }
+ }
+
+ pdata->use_pm = of_property_read_bool(node, "qcom,use-pm");
+
+ return pdata;
+}
+
static atomic_t msm_serial_hsl_next_id = ATOMIC_INIT(0);
static int __devinit msm_serial_hsl_probe(struct platform_device *pdev)
@@ -1405,7 +1648,7 @@
struct resource *uart_resource;
struct resource *gsbi_resource;
struct uart_port *port;
- const struct msm_serial_hslite_platform_data *pdata;
+ struct msm_serial_hslite_platform_data *pdata;
const struct of_device_id *match;
u32 line;
int ret;
@@ -1422,9 +1665,16 @@
/* Use line number from device tree alias if present */
if (pdev->dev.of_node) {
+ dev_dbg(&pdev->dev, "device tree enabled\n");
ret = of_alias_get_id(pdev->dev.of_node, "serial");
if (ret >= 0)
line = ret;
+
+ pdata = msm_hsl_dt_to_pdata(pdev);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+
+ pdev->dev.platform_data = pdata;
}
if (unlikely(line < 0 || line >= UART_NR))
@@ -1437,11 +1687,23 @@
port->uartclk = 7372800;
msm_hsl_port = UART_TO_MSM(port);
- match = of_match_device(msm_hsl_match_table, &pdev->dev);
- if (!match)
- msm_hsl_port->ver_id = UARTDM_VERSION_11_13;
+ /* Identify UART functional mode as 2-wire or 4-wire. */
+ if (pdata && pdata->config_gpio == 4)
+ msm_hsl_port->func_mode = UART_FOUR_WIRE;
else
+ msm_hsl_port->func_mode = UART_TWO_WIRE;
+
+ match = of_match_device(msm_hsl_match_table, &pdev->dev);
+ if (!match) {
+ msm_hsl_port->ver_id = UARTDM_VERSION_11_13;
+ } else {
msm_hsl_port->ver_id = (unsigned int)match->data;
+ /*
+ * BLSP based UART configuration is available with
+ * UARTDM v14 Revision. Hence set uart_type as UART_BLSP.
+ */
+ msm_hsl_port->uart_type = BLSP_HSUART;
+ }
gsbi_resource = platform_get_resource_byname(pdev,
IORESOURCE_MEM,
@@ -1452,9 +1714,9 @@
msm_hsl_port->pclk = clk_get(&pdev->dev, "iface_clk");
if (gsbi_resource)
- msm_hsl_port->is_uartdm = 1;
+ msm_hsl_port->uart_type = GSBI_HSUART;
else
- msm_hsl_port->is_uartdm = 0;
+ msm_hsl_port->uart_type = LEGACY_HSUART;
if (unlikely(IS_ERR(msm_hsl_port->clk))) {
pr_err("Error getting clk\n");
@@ -1492,6 +1754,10 @@
#endif
msm_hsl_debugfs_init(msm_hsl_port, get_line(pdev));
mutex_init(&msm_hsl_port->clk_mutex);
+ if (pdata && pdata->use_pm)
+ wake_lock_init(&msm_hsl_port->port_open_wake_lock,
+ WAKE_LOCK_SUSPEND,
+ "msm_serial_hslite_port_open");
/* Temporarily increase the refcount on the GSBI clock to avoid a race
* condition with the earlyprintk handover mechanism.
@@ -1507,6 +1773,8 @@
static int __devexit msm_serial_hsl_remove(struct platform_device *pdev)
{
struct msm_hsl_port *msm_hsl_port = platform_get_drvdata(pdev);
+ const struct msm_serial_hslite_platform_data *pdata =
+ pdev->dev.platform_data;
struct uart_port *port;
port = get_port_from_line(get_line(pdev));
@@ -1516,6 +1784,9 @@
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+ if (pdata && pdata->use_pm)
+ wake_lock_destroy(&msm_hsl_port->port_open_wake_lock);
+
device_set_wakeup_capable(&pdev->dev, 0);
platform_set_drvdata(pdev, NULL);
mutex_destroy(&msm_hsl_port->clk_mutex);
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index bcacf7a..f577fdb 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -128,6 +128,7 @@
#define CHARGING_DET_OUTPUT_REG (QSCRATCH_REG_OFFSET + 0x1C)
#define ALT_INTERRUPT_EN_REG (QSCRATCH_REG_OFFSET + 0x20)
#define HS_PHY_IRQ_STAT_REG (QSCRATCH_REG_OFFSET + 0x24)
+#define CGCTL_REG (QSCRATCH_REG_OFFSET + 0x28)
#define SS_PHY_CTRL_REG (QSCRATCH_REG_OFFSET + 0x30)
#define SS_CR_PROTOCOL_DATA_IN_REG (QSCRATCH_REG_OFFSET + 0x3C)
#define SS_CR_PROTOCOL_DATA_OUT_REG (QSCRATCH_REG_OFFSET + 0x40)
@@ -1204,6 +1205,13 @@
/* Disable (bypass) VBUS and ID filters */
dwc3_msm_write_reg(msm->base, QSCRATCH_GENERAL_CFG, 0x78);
+ /* Enable master clock for RAMs to allow BAM to access RAMs when
+ * RAM clock gating is enabled via DWC3's GCTL. Otherwise, issues
+ * are seen where RAM clocks get turned OFF in SS mode
+ */
+ dwc3_msm_write_reg(msm->base, CGCTL_REG,
+ dwc3_msm_read_reg(msm->base, CGCTL_REG) | 0x18);
+
/*
* WORKAROUND: There is SSPHY suspend bug due to which USB enumerates
* in HS mode instead of SS mode. Workaround it by asserting
diff --git a/drivers/usb/dwc3/dwc3_otg.c b/drivers/usb/dwc3/dwc3_otg.c
index 136cc5d..361aa32 100644
--- a/drivers/usb/dwc3/dwc3_otg.c
+++ b/drivers/usb/dwc3/dwc3_otg.c
@@ -337,7 +337,6 @@
struct dwc3_ext_xceiv *ext_xceiv = dotg->ext_xceiv;
struct usb_phy *phy = dotg->otg.phy;
int ret = 0;
- int work = 0;
if (event == DWC3_EVENT_PHY_RESUME) {
if (!pm_runtime_status_suspended(phy->dev)) {
@@ -358,27 +357,19 @@
}
} else if (event == DWC3_EVENT_XCEIV_STATE) {
if (ext_xceiv->id == DWC3_ID_FLOAT) {
- if (!test_and_set_bit(ID, &dotg->inputs)) {
- dev_dbg(phy->dev, "XCVR: ID set\n");
- work = 1;
- }
+ dev_dbg(phy->dev, "XCVR: ID set\n");
+ set_bit(ID, &dotg->inputs);
} else {
- if (test_and_clear_bit(ID, &dotg->inputs)) {
- dev_dbg(phy->dev, "XCVR: ID clear\n");
- work = 1;
- }
+ dev_dbg(phy->dev, "XCVR: ID clear\n");
+ clear_bit(ID, &dotg->inputs);
}
if (ext_xceiv->bsv) {
- if (!test_and_set_bit(B_SESS_VLD, &dotg->inputs)) {
- dev_dbg(phy->dev, "XCVR: BSV set\n");
- work = 1;
- }
+ dev_dbg(phy->dev, "XCVR: BSV set\n");
+ set_bit(B_SESS_VLD, &dotg->inputs);
} else {
- if (test_and_clear_bit(B_SESS_VLD, &dotg->inputs)) {
- dev_dbg(phy->dev, "XCVR: BSV clear\n");
- work = 1;
- }
+ dev_dbg(phy->dev, "XCVR: BSV clear\n");
+ clear_bit(B_SESS_VLD, &dotg->inputs);
}
if (!init) {
@@ -387,8 +378,8 @@
dev_dbg(phy->dev, "XCVR: BSV init complete\n");
return;
}
- if (work)
- schedule_work(&dotg->sm_work);
+
+ schedule_work(&dotg->sm_work);
}
}
@@ -714,7 +705,8 @@
phy->state = OTG_STATE_B_IDLE;
work = 1;
} else {
- if (dwc3_otg_start_host(&dotg->otg, 1)) {
+ phy->state = OTG_STATE_A_HOST;
+ if (dwc3_otg_start_host(&dotg->otg, 1)) {
/*
* Probably set_host was not called yet.
* We will re-try as soon as it will be called
@@ -725,7 +717,6 @@
pm_runtime_put_sync(phy->dev);
return;
}
- phy->state = OTG_STATE_A_HOST;
}
break;
diff --git a/drivers/usb/gadget/f_mbim.c b/drivers/usb/gadget/f_mbim.c
index 65b4890..98c6dbc 100644
--- a/drivers/usb/gadget/f_mbim.c
+++ b/drivers/usb/gadget/f_mbim.c
@@ -1659,7 +1659,7 @@
atomic_read(&dev->error)));
if (ret < 0) {
mbim_unlock(&dev->read_excl);
- return 0;
+ return -ERESTARTSYS;
}
}
@@ -1675,7 +1675,7 @@
if (ret < 0) {
pr_err("Waiting failed\n");
mbim_unlock(&dev->read_excl);
- return 0;
+ return -ERESTARTSYS;
}
pr_debug("Received request packet\n");
}
@@ -1697,7 +1697,7 @@
ret = copy_to_user(buf, cpkt->buf, cpkt->len);
if (ret) {
pr_err("copy_to_user failed: err %d\n", ret);
- ret = 0;
+ ret = -ENOMEM;
} else {
pr_debug("copied %d bytes to user\n", cpkt->len);
ret = cpkt->len;
diff --git a/drivers/usb/misc/ks_bridge.c b/drivers/usb/misc/ks_bridge.c
index 86c59e7..dab6e7f 100644
--- a/drivers/usb/misc/ks_bridge.c
+++ b/drivers/usb/misc/ks_bridge.c
@@ -74,7 +74,6 @@
struct usb_anchor submitted;
unsigned long flags;
- unsigned int alloced_read_pkts;
#define DBG_MSG_LEN 40
#define DBG_MAX_MSG 500
@@ -141,6 +140,8 @@
}
+static void
+submit_one_urb(struct ks_bridge *ksb, gfp_t flags, struct data_pkt *pkt);
static ssize_t ksb_fs_read(struct file *fp, char __user *buf,
size_t count, loff_t *pos)
{
@@ -179,7 +180,6 @@
if (ret) {
pr_err("copy_to_user failed err:%d\n", ret);
ksb_free_data_pkt(pkt);
- ksb->alloced_read_pkts--;
return ret;
}
@@ -189,9 +189,16 @@
spin_lock_irqsave(&ksb->lock, flags);
if (pkt->n_read == pkt->len) {
+ /*
+ * re-init the packet and queue it
+ * for more data.
+ */
list_del_init(&pkt->list);
- ksb_free_data_pkt(pkt);
- ksb->alloced_read_pkts--;
+ pkt->n_read = 0;
+ pkt->len = MAX_DATA_PKT_SIZE;
+ spin_unlock_irqrestore(&ksb->lock, flags);
+ submit_one_urb(ksb, GFP_KERNEL, pkt);
+ spin_lock_irqsave(&ksb->lock, flags);
}
}
spin_unlock_irqrestore(&ksb->lock, flags);
@@ -410,25 +417,18 @@
MODULE_DEVICE_TABLE(usb, ksb_usb_ids);
static void ksb_rx_cb(struct urb *urb);
-static void submit_one_urb(struct ks_bridge *ksb)
+static void
+submit_one_urb(struct ks_bridge *ksb, gfp_t flags, struct data_pkt *pkt)
{
- struct data_pkt *pkt;
struct urb *urb;
int ret;
- pkt = ksb_alloc_data_pkt(MAX_DATA_PKT_SIZE, GFP_ATOMIC, ksb);
- if (IS_ERR(pkt)) {
- pr_err("unable to allocate data pkt");
- return;
- }
-
- urb = usb_alloc_urb(0, GFP_ATOMIC);
+ urb = usb_alloc_urb(0, flags);
if (!urb) {
pr_err("unable to allocate urb");
ksb_free_data_pkt(pkt);
return;
}
- ksb->alloced_read_pkts++;
usb_fill_bulk_urb(urb, ksb->udev, ksb->in_pipe,
pkt->buf, pkt->len,
@@ -439,18 +439,16 @@
usb_unanchor_urb(urb);
usb_free_urb(urb);
ksb_free_data_pkt(pkt);
- ksb->alloced_read_pkts--;
return;
}
atomic_inc(&ksb->rx_pending_cnt);
- ret = usb_submit_urb(urb, GFP_ATOMIC);
+ ret = usb_submit_urb(urb, flags);
if (ret) {
pr_err("in urb submission failed");
usb_unanchor_urb(urb);
usb_free_urb(urb);
ksb_free_data_pkt(pkt);
- ksb->alloced_read_pkts--;
atomic_dec(&ksb->rx_pending_cnt);
wake_up(&ksb->pending_urb_wait);
return;
@@ -479,14 +477,12 @@
pr_err_ratelimited("urb failed with err:%d",
urb->status);
ksb_free_data_pkt(pkt);
- ksb->alloced_read_pkts--;
goto done;
}
if (urb->actual_length == 0) {
- ksb_free_data_pkt(pkt);
- ksb->alloced_read_pkts--;
- goto resubmit_urb;
+ submit_one_urb(ksb, GFP_ATOMIC, pkt);
+ goto done;
}
add_to_list:
@@ -498,8 +494,6 @@
/* wake up read thread */
wake_up(&ksb->ks_wait_q);
-resubmit_urb:
- submit_one_urb(ksb);
done:
atomic_dec(&ksb->rx_pending_cnt);
wake_up(&ksb->pending_urb_wait);
@@ -539,7 +533,6 @@
ksb_free_data_pkt(pkt);
return;
}
- ksb->alloced_read_pkts++;
usb_fill_bulk_urb(urb, ksb->udev, ksb->in_pipe,
pkt->buf, pkt->len,
@@ -555,7 +548,6 @@
usb_unanchor_urb(urb);
usb_free_urb(urb);
ksb_free_data_pkt(pkt);
- ksb->alloced_read_pkts--;
usb_autopm_put_interface(ksb->ifc);
atomic_dec(&ksb->rx_pending_cnt);
wake_up(&ksb->pending_urb_wait);
@@ -588,6 +580,7 @@
break;
case 0x9048:
case 0x904C:
+ case 0x9075:
if (ifc_num != 2)
return -ENODEV;
ksb = __ksb[EFS_BRIDGE_INDEX];
@@ -639,7 +632,6 @@
struct data_pkt, list);
list_del_init(&pkt->list);
ksb_free_data_pkt(pkt);
- ksb->alloced_read_pkts--;
}
while (!list_empty(&ksb->to_mdm_list)) {
pkt = list_first_entry(&ksb->to_mdm_list,
@@ -666,8 +658,6 @@
dbg_log_event(ksb, "SUSPEND", 0, 0);
- pr_debug("read cnt: %d", ksb->alloced_read_pkts);
-
usb_kill_anchored_urbs(&ksb->submitted);
return 0;
@@ -714,7 +704,6 @@
struct data_pkt, list);
list_del_init(&pkt->list);
ksb_free_data_pkt(pkt);
- ksb->alloced_read_pkts--;
}
while (!list_empty(&ksb->to_mdm_list)) {
pkt = list_first_entry(&ksb->to_mdm_list,
diff --git a/drivers/video/msm/mdss/mhl_sii8334.c b/drivers/video/msm/mdss/mhl_sii8334.c
index 6a63964..2982aaa 100644
--- a/drivers/video/msm/mdss/mhl_sii8334.c
+++ b/drivers/video/msm/mdss/mhl_sii8334.c
@@ -18,6 +18,7 @@
#include <linux/of_address.h>
#include <linux/of_gpio.h>
#include <linux/types.h>
+#include <linux/usb/msm_hsusb.h>
#include <linux/mhl_8334.h>
#include "mdss_fb.h"
@@ -58,6 +59,9 @@
uint8_t cur_state;
uint8_t chip_rev_id;
int mhl_mode;
+ struct completion rgnd_done;
+ void (*notify_usb_online)(int online);
+ struct usb_ext_notification *mhl_info;
};
@@ -196,6 +200,52 @@
return 0;
}
+/* USB_HANDSHAKING FUNCTIONS */
+static int mhl_sii_device_discovery(void *data, int id,
+ void (*usb_notify_cb)(int online))
+{
+ int timeout, rc;
+ struct mhl_tx_ctrl *mhl_ctrl = data;
+ struct i2c_client *client = mhl_ctrl->i2c_handle;
+
+ if (id) {
+ /* When MHL cable is disconnected we get a sii8334
+ * mhl_disconnect interrupt which is handled separately.
+ */
+ pr_debug("%s: USB ID pin high\n", __func__);
+ return id;
+ }
+
+ if (!mhl_ctrl || !usb_notify_cb) {
+ pr_warn("%s: cb || ctrl is NULL\n", __func__);
+ /* return "USB" so caller can proceed */
+ return -EINVAL;
+ }
+
+ if (!mhl_ctrl->notify_usb_online)
+ mhl_ctrl->notify_usb_online = usb_notify_cb;
+
+ MHL_SII_REG_NAME_WR(REG_DISC_CTRL1, 0x27);
+ msleep(50);
+ if (mhl_ctrl->cur_state == POWER_STATE_D3) {
+ /* give MHL driver chance to handle RGND interrupt */
+ INIT_COMPLETION(mhl_ctrl->rgnd_done);
+ timeout = wait_for_completion_interruptible_timeout
+ (&mhl_ctrl->rgnd_done, HZ/2);
+ if (!timeout) {
+ /* most likely nothing plugged in USB */
+ /* USB HOST connected or already in USB mode */
+ pr_debug("Timedout Returning from discovery mode\n");
+ return 0;
+ }
+ rc = mhl_ctrl->mhl_mode ? 0 : 1;
+ } else {
+ /* not in D3. already in MHL mode */
+ rc = 0;
+ }
+ return rc;
+}
+
static void cbus_reset(struct i2c_client *client)
{
uint8_t i;
@@ -545,14 +595,15 @@
if (0x02 == rgnd_imp) {
pr_debug("%s: mhl sink\n", __func__);
- MHL_SII_REG_NAME_MOD(REG_DISC_CTRL9, BIT0, BIT0);
mhl_ctrl->mhl_mode = 1;
+ if (mhl_ctrl->notify_usb_online)
+ mhl_ctrl->notify_usb_online(1);
} else {
pr_debug("%s: non-mhl sink\n", __func__);
mhl_ctrl->mhl_mode = 0;
- MHL_SII_REG_NAME_MOD(REG_DISC_CTRL9, BIT3, BIT3);
switch_mode(mhl_ctrl, POWER_STATE_D3);
}
+ complete(&mhl_ctrl->rgnd_done);
return mhl_ctrl->mhl_mode ?
MHL_DISCOVERY_RESULT_MHL : MHL_DISCOVERY_RESULT_USB;
}
@@ -651,6 +702,8 @@
reg = MHL_SII_REG_NAME_RD(REG_INTR4);
MHL_SII_REG_NAME_WR(REG_INTR4, reg);
mhl_msm_disconnection(mhl_ctrl);
+ if (mhl_ctrl->notify_usb_online)
+ mhl_ctrl->notify_usb_online(0);
}
if ((mhl_ctrl->cur_state != POWER_STATE_D0_MHL) &&\
@@ -1048,6 +1101,7 @@
int rc = 0;
struct mhl_tx_platform_data *pdata = NULL;
struct mhl_tx_ctrl *mhl_ctrl;
+ struct usb_ext_notification *mhl_info = NULL;
mhl_ctrl = devm_kzalloc(&client->dev, sizeof(*mhl_ctrl), GFP_KERNEL);
if (!mhl_ctrl) {
@@ -1107,6 +1161,8 @@
goto failed_probe;
}
+ init_completion(&mhl_ctrl->rgnd_done);
+
pr_debug("%s: IRQ from GPIO INTR = %d\n",
__func__, mhl_ctrl->i2c_handle->irq);
pr_debug("%s: Driver name = [%s]\n", __func__,
@@ -1123,8 +1179,25 @@
pr_debug("request_threaded_irq succeeded\n");
}
pr_debug("%s: i2c client addr is [%x]\n", __func__, client->addr);
+
+ mhl_info = devm_kzalloc(&client->dev, sizeof(*mhl_info), GFP_KERNEL);
+ if (!mhl_info) {
+ pr_err("%s: alloc mhl info failed\n", __func__);
+ goto failed_probe;
+ }
+
+ mhl_info->ctxt = mhl_ctrl;
+ mhl_info->notify = mhl_sii_device_discovery;
+ if (msm_register_usb_ext_notification(mhl_info)) {
+ pr_err("%s: register for usb notifcn failed\n", __func__);
+ goto failed_probe;
+ }
+ mhl_ctrl->mhl_info = mhl_info;
return 0;
failed_probe:
+ /* do not deep-free */
+ if (mhl_info)
+ devm_kfree(&client->dev, mhl_info);
failed_dt_data:
if (pdata)
devm_kfree(&client->dev, pdata);
@@ -1148,6 +1221,8 @@
free_irq(mhl_ctrl->i2c_handle->irq, mhl_ctrl);
mhl_gpio_config(mhl_ctrl, 0);
mhl_vreg_config(mhl_ctrl, 0);
+ if (mhl_ctrl->mhl_info)
+ devm_kfree(&client->dev, mhl_ctrl->mhl_info);
if (mhl_ctrl->pdata)
devm_kfree(&client->dev, mhl_ctrl->pdata);
devm_kfree(&client->dev, mhl_ctrl);
diff --git a/include/linux/dvb/dmx.h b/include/linux/dvb/dmx.h
index 257e069..fd4447f 100644
--- a/include/linux/dvb/dmx.h
+++ b/include/linux/dvb/dmx.h
@@ -39,6 +39,9 @@
/* Min recording chunk upon which event is generated */
#define DMX_REC_BUFF_CHUNK_MIN_SIZE (100*188)
+/* Decoder buffers are usually large ~1MB, 10 should suffice */
+#define DMX_MAX_DECODER_BUFFER_NUM (10)
+
typedef enum
{
DMX_OUT_DECODER, /* Streaming directly to decoder. */
@@ -314,13 +317,27 @@
/* Maximum buffer size allowed */
__u32 max_size;
+
+ /* Maximum number of linear buffers handled by demux */
+ __u32 max_buffer_num;
+
+ /* Feature support bitmap as detailed below */
__u32 flags;
-/* Buffer allocated as physically contiguous memory */
-#define DMX_BUFFER_CONTIGEOUS_MEM 0x1
+/* Buffer must be allocated as physically contiguous memory */
+#define DMX_BUFFER_CONTIGUOUS_MEM 0x1
/* If the filter's data is decrypted, the buffer should be secured one */
#define DMX_BUFFER_SECURED_IF_DECRYPTED 0x2
+
+/* Buffer can be allocated externally */
+#define DMX_BUFFER_EXTERNAL_SUPPORT 0x4
+
+/* Buffer can be allocated internally */
+#define DMX_BUFFER_INTERNAL_SUPPORT 0x8
+
+/* Filter output can be output to a linear buffer group */
+#define DMX_BUFFER_LINEAR_GROUP_SUPPORT 0x10
};
typedef struct dmx_caps {
@@ -385,6 +402,9 @@
/* For PES not sent to decoder */
struct dmx_buffer_requirement pes;
+ /* For PES sent to decoder */
+ struct dmx_buffer_requirement decoder;
+
/* Recording buffer for recording of 188 bytes packets */
struct dmx_buffer_requirement recording_188_tsp;
@@ -438,9 +458,9 @@
};
struct dmx_stc {
- unsigned int num; /* input : which STC? 0..N */
- unsigned int base; /* output: divisor for stc to get 90 kHz clock */
- __u64 stc; /* output: stc in 'base'*90 kHz units */
+ unsigned int num; /* input : which STC? 0..N */
+ unsigned int base; /* output: divisor for stc to get 90 kHz clock */
+ __u64 stc; /* output: stc in 'base'*90 kHz units */
};
enum dmx_buffer_mode {
@@ -466,6 +486,27 @@
int handle;
};
+
+struct dmx_decoder_buffers {
+ /*
+ * Specify if linear buffer support is requested. If set, buffers_num
+ * must be greater than 1
+ */
+ int is_linear;
+
+ /*
+ * Specify number of external buffers allocated by user.
+ * If set to 0 means internal buffer allocation is requested
+ */
+ __u32 buffers_num;
+
+ /* Specify buffer size, either external or internal */
+ __u32 buffers_size;
+
+ /* Array of externally allocated buffer handles */
+ int handles[DMX_MAX_DECODER_BUFFER_NUM];
+};
+
#define DMX_START _IO('o', 41)
#define DMX_STOP _IO('o', 42)
#define DMX_SET_FILTER _IOW('o', 43, struct dmx_sct_filter_params)
@@ -487,5 +528,7 @@
#define DMX_GET_EVENT _IOR('o', 60, struct dmx_filter_event)
#define DMX_SET_BUFFER_MODE _IOW('o', 61, enum dmx_buffer_mode)
#define DMX_SET_BUFFER _IOW('o', 62, struct dmx_buffer)
+#define DMX_SET_DECODER_BUFFER _IOW('o', 63, struct dmx_decoder_buffers)
+
#endif /*_DVBDMX_H_*/
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index d4fa3aa..97b8e0b 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -138,6 +138,7 @@
extern int iommu_group_for_each_dev(struct iommu_group *group, void *data,
int (*fn)(struct device *, void *));
extern struct iommu_group *iommu_group_get(struct device *dev);
+extern struct iommu_group *iommu_group_find(const char *name);
extern void iommu_group_put(struct iommu_group *group);
extern int iommu_group_register_notifier(struct iommu_group *group,
struct notifier_block *nb);
@@ -317,6 +318,11 @@
return NULL;
}
+static inline struct iommu_group *iommu_group_find(const char *name)
+{
+ return NULL;
+}
+
static inline void iommu_group_put(struct iommu_group *group)
{
}
diff --git a/include/linux/mhl_8334.h b/include/linux/mhl_8334.h
index c9f57c5..d3597dc 100644
--- a/include/linux/mhl_8334.h
+++ b/include/linux/mhl_8334.h
@@ -44,7 +44,7 @@
/* USB driver interface */
-#ifdef CONFIG_FB_MSM_HDMI_MHL_8334
+#if defined(CONFIG_FB_MSM_HDMI_MHL_8334)
/* mhl_device_discovery */
extern int mhl_device_discovery(const char *name, int *result);
@@ -70,7 +70,6 @@
}
#endif
-
struct msc_cmd_envelope {
/*
* this list head is for list APIs
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index ec1d619..d2f8faf 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -201,6 +201,18 @@
MAX_REASONS,
};
+enum mmc_blk_status {
+ MMC_BLK_SUCCESS = 0,
+ MMC_BLK_PARTIAL,
+ MMC_BLK_CMD_ERR,
+ MMC_BLK_RETRY,
+ MMC_BLK_ABORT,
+ MMC_BLK_DATA_ERR,
+ MMC_BLK_ECC_ERR,
+ MMC_BLK_NOMEDIUM,
+ MMC_BLK_NEW_REQUEST,
+};
+
struct mmc_wr_pack_stats {
u32 *packing_events;
u32 pack_stop_reason[MAX_REASONS];
@@ -290,6 +302,31 @@
#define BKOPS_MIN_SECTORS_TO_QUEUE_DELAYED_WORK 204800 /* 100MB */
};
+/**
+ * struct mmc_async_event_stats - async events stats data
+ *
+ * @enabled A boolean indicating if the stats are initiated
+ * and enabled
+ * The rest of the members in this struct are counters which are
+ * incremented at strategic locations in the async events flows.
+ */
+struct mmc_async_event_stats {
+ bool enabled;
+ u32 cmd_retry;
+ u32 new_request_flag;
+ u32 null_fetched;
+ u32 wakeup_new;
+ u32 q_no_waiting;
+ u32 done_flag;
+ u32 no_mmc_request_action;
+ u32 wakeup_mq_thread;
+ u32 fetch_due_to_new_req;
+ u32 returned_new_req;
+ u32 done_when_new_req_event_on;
+ u32 new_req_when_new_marked;
+ bool print_in_read;
+};
+
/*
* MMC device
*/
@@ -363,6 +400,8 @@
struct mmc_wr_pack_stats wr_pack_stats; /* packed commands stats*/
struct mmc_bkops_info bkops_info;
+ /* async events flow stats */
+ struct mmc_async_event_stats async_event_stats;
};
/*
@@ -591,5 +630,5 @@
extern struct mmc_wr_pack_stats *mmc_blk_get_packed_statistics(
struct mmc_card *card);
extern void mmc_blk_init_packed_statistics(struct mmc_card *card);
-
+extern void mmc_blk_init_async_event_statistics(struct mmc_card *card);
#endif /* LINUX_MMC_CARD_H */
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index 24b9790..83cc723 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -124,6 +124,7 @@
bool fault_injected; /* fault injected */
};
+struct mmc_host;
struct mmc_request {
struct mmc_command *sbc; /* SET_BLOCK_COUNT for multiblock */
struct mmc_command *cmd;
@@ -132,9 +133,9 @@
struct completion completion;
void (*done)(struct mmc_request *);/* completion function */
+ struct mmc_host *host;
};
-struct mmc_host;
struct mmc_card;
struct mmc_async_req;
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 6c43ec7..6982c45 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -155,6 +155,22 @@
int (*err_check) (struct mmc_card *, struct mmc_async_req *);
};
+/**
+ * mmc_context_info - synchronization details for mmc context
+ * @is_done_rcv wake up reason was done request
+ * @is_new_req wake up reason was new request
+ * @is_waiting_last_req mmc context waiting for single running request
+ * @wait wait queue
+ * @lock lock to protect data fields
+ */
+struct mmc_context_info {
+ bool is_done_rcv;
+ bool is_new_req;
+ bool is_waiting_last_req;
+ wait_queue_head_t wait;
+ spinlock_t lock;
+};
+
struct mmc_hotplug {
unsigned int irq;
void *handler_priv;
@@ -327,6 +343,7 @@
struct dentry *debugfs_root;
struct mmc_async_req *areq; /* active async req */
+ struct mmc_context_info context_info; /* async synchronization info */
#ifdef CONFIG_FAIL_MMC_REQUEST
struct fault_attr fail_mmc_request;
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 7bdd3f2..90980b7 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -61,6 +61,14 @@
MIGRATE_TYPES
};
+/*
+ * Returns a list which contains the migrate types on to which
+ * an allocation falls back when the free list for the migrate
+ * type mtype is depleted.
+ * The end of the list is delimited by the type MIGRATE_RESERVE.
+ */
+extern int *get_migratetype_fallbacks(int mtype);
+
#ifdef CONFIG_CMA
bool is_cma_pageblock(struct page *page);
# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
@@ -144,6 +152,7 @@
NUMA_OTHER, /* allocation from other node */
#endif
NR_ANON_TRANSPARENT_HUGEPAGES,
+ NR_FREE_CMA_PAGES,
NR_VM_ZONE_STAT_ITEMS };
/*
diff --git a/include/linux/msm_audio.h b/include/linux/msm_audio.h
index 4eb8a65..9173d54 100644
--- a/include/linux/msm_audio.h
+++ b/include/linux/msm_audio.h
@@ -245,6 +245,7 @@
#define CAD_SET_DEVICE _IOW(SND_IOCTL_MAGIC, 2, struct msm_cad_device_config *)
#define SND_METHOD_VOICE 0
+#define SND_METHOD_MIDI 4
struct msm_snd_volume_config {
uint32_t device;
diff --git a/include/linux/test-iosched.h b/include/linux/test-iosched.h
index b52762c..b933069 100644
--- a/include/linux/test-iosched.h
+++ b/include/linux/test-iosched.h
@@ -161,8 +161,17 @@
* struct test_data - global test iosched data
* @queue: The test IO scheduler requests list
* @test_queue: The test requests list
- * @next_req: Points to the next request to be
- * dispatched from the test requests list
+ * @dispatched_queue: The queue contains requests dispatched
+ * from @test_queue
+ * @reinsert_queue: The queue contains reinserted from underlying
+ * driver requests
+ * @urgent_queue: The queue contains requests for urgent delivery
+ * These requests will be delivered before @test_queue
+ * and @reinsert_queue requests
+ * @test_count: Number of requests in the @test_queue
+ * @dispatched_count: Number of requests in the @dispatched_queue
+ * @reinsert_count: Number of requests in the @reinsert_queue
+ * @urgent_count: Number of requests in the @urgent_queue
* @wait_q: A wait queue for waiting for the test
* requests completion
* @test_state: Indicates if there is a running test.
@@ -195,7 +204,13 @@
struct test_data {
struct list_head queue;
struct list_head test_queue;
- struct test_request *next_req;
+ struct list_head dispatched_queue;
+ struct list_head reinsert_queue;
+ struct list_head urgent_queue;
+ unsigned int test_count;
+ unsigned int dispatched_count;
+ unsigned int reinsert_count;
+ unsigned int urgent_count;
wait_queue_head_t wait_q;
enum test_state test_state;
enum test_results test_result;
@@ -220,6 +235,9 @@
extern int test_iosched_add_wr_rd_test_req(int is_err_expcted,
int direction, int start_sec,
int num_bios, int pattern, rq_end_io_fn *end_req_io);
+extern struct test_request *test_iosched_create_test_req(int is_err_expcted,
+ int direction, int start_sec,
+ int num_bios, int pattern, rq_end_io_fn *end_req_io);
extern struct dentry *test_iosched_get_debugfs_tests_root(void);
extern struct dentry *test_iosched_get_debugfs_utils_root(void);
@@ -234,4 +252,9 @@
void test_iosched_unregister(struct blk_dev_test_type *bdt);
+extern struct test_data *test_get_test_data(void);
+
+void test_iosched_add_urgent_req(struct test_request *test_rq);
+
+int test_is_req_urgent(struct request *rq);
#endif /* _LINUX_TEST_IOSCHED_H */
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 65efb92..1d10474 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -258,6 +258,13 @@
#endif /* CONFIG_SMP */
+static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
+ int migratetype)
+{
+ __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
+ if (is_migrate_cma(migratetype))
+ __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
+}
extern const char * const vmstat_text[];
#endif /* _LINUX_VMSTAT_H */
diff --git a/include/linux/wcnss_wlan.h b/include/linux/wcnss_wlan.h
index 6d2eee4..f58c7a7 100644
--- a/include/linux/wcnss_wlan.h
+++ b/include/linux/wcnss_wlan.h
@@ -63,6 +63,8 @@
void wcnss_reset_intr(void);
void wcnss_suspend_notify(void);
void wcnss_resume_notify(void);
+void wcnss_riva_log_debug_regs(void);
+void wcnss_pronto_log_debug_regs(void);
#define wcnss_wlan_get_drvdata(dev) dev_get_drvdata(dev)
#define wcnss_wlan_set_drvdata(dev, data) dev_set_drvdata((dev), (data))
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index bd3f0f3..7dc95af 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -561,7 +561,8 @@
if (page_is_guard(buddy)) {
clear_page_guard_flag(buddy);
set_page_private(page, 0);
- __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
+ __mod_zone_freepage_state(zone, 1 << order,
+ migratetype);
} else {
list_del(&buddy->lru);
zone->free_area[order].nr_free--;
@@ -643,6 +644,7 @@
int migratetype = 0;
int batch_free = 0;
int to_free = count;
+ int mt = 0;
spin_lock(&zone->lock);
zone->all_unreclaimable = 0;
@@ -672,11 +674,15 @@
do {
page = list_entry(list->prev, struct page, lru);
+ mt = get_pageblock_migratetype(page);
/* must delete as __free_one_page list manipulates */
list_del(&page->lru);
/* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
__free_one_page(page, zone, 0, page_private(page));
trace_mm_page_pcpu_drain(page, 0, page_private(page));
+ if (is_migrate_cma(mt))
+ __mod_zone_page_state(zone,
+ NR_FREE_CMA_PAGES, 1);
} while (--to_free && --batch_free && !list_empty(list));
}
__mod_zone_page_state(zone, NR_FREE_PAGES, count);
@@ -691,7 +697,8 @@
zone->pages_scanned = 0;
__free_one_page(page, zone, order, migratetype);
- __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
+ if (unlikely(migratetype != MIGRATE_ISOLATE))
+ __mod_zone_freepage_state(zone, 1 << order, migratetype);
spin_unlock(&zone->lock);
}
@@ -819,7 +826,8 @@
set_page_guard_flag(&page[size]);
set_page_private(&page[size], high);
/* Guard pages are not available for any usage */
- __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << high));
+ __mod_zone_freepage_state(zone, -(1 << high),
+ migratetype);
continue;
}
#endif
@@ -918,6 +926,11 @@
[MIGRATE_ISOLATE] = { MIGRATE_RESERVE }, /* Never used */
};
+int *get_migratetype_fallbacks(int mtype)
+{
+ return fallbacks[mtype];
+}
+
/*
* Move the free pages in a range to the free lists of the requested type.
* Note that start_page and end_pages are not aligned on a pageblock
@@ -1145,6 +1158,9 @@
}
set_page_private(page, mt);
list = &page->lru;
+ if (is_migrate_cma(mt))
+ __mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
+ -(1 << order));
}
__mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
spin_unlock(&zone->lock);
@@ -1418,7 +1434,9 @@
list_del(&page->lru);
zone->free_area[order].nr_free--;
rmv_page_order(page);
- __mod_zone_page_state(zone, NR_FREE_PAGES, -(1UL << order));
+
+ if (unlikely(mt != MIGRATE_ISOLATE))
+ __mod_zone_freepage_state(zone, -(1UL << order), mt);
/* Split into individual pages */
set_page_refcounted(page);
@@ -1493,7 +1511,8 @@
spin_unlock(&zone->lock);
if (!page)
goto failed;
- __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order));
+ __mod_zone_freepage_state(zone, -(1 << order),
+ get_pageblock_migratetype(page));
}
__count_zone_vm_events(PGALLOC, zone, 1 << order);
@@ -2806,7 +2825,8 @@
" unevictable:%lu"
" dirty:%lu writeback:%lu unstable:%lu\n"
" free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n"
- " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n",
+ " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n"
+ " free_cma:%lu\n",
global_page_state(NR_ACTIVE_ANON),
global_page_state(NR_INACTIVE_ANON),
global_page_state(NR_ISOLATED_ANON),
@@ -2823,7 +2843,8 @@
global_page_state(NR_FILE_MAPPED),
global_page_state(NR_SHMEM),
global_page_state(NR_PAGETABLE),
- global_page_state(NR_BOUNCE));
+ global_page_state(NR_BOUNCE),
+ global_page_state(NR_FREE_CMA_PAGES));
for_each_populated_zone(zone) {
int i;
@@ -2855,6 +2876,7 @@
" pagetables:%lukB"
" unstable:%lukB"
" bounce:%lukB"
+ " free_cma:%lukB"
" writeback_tmp:%lukB"
" pages_scanned:%lu"
" all_unreclaimable? %s"
@@ -2884,6 +2906,7 @@
K(zone_page_state(zone, NR_PAGETABLE)),
K(zone_page_state(zone, NR_UNSTABLE_NFS)),
K(zone_page_state(zone, NR_BOUNCE)),
+ K(zone_page_state(zone, NR_FREE_CMA_PAGES)),
K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
zone->pages_scanned,
(zone->all_unreclaimable ? "yes" : "no")
@@ -5615,8 +5638,13 @@
out:
if (!ret) {
+ unsigned long nr_pages;
+ int migratetype = get_pageblock_migratetype(page);
+
set_pageblock_migratetype(page, MIGRATE_ISOLATE);
- move_freepages_block(zone, page, MIGRATE_ISOLATE);
+ nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE);
+
+ __mod_zone_freepage_state(zone, -nr_pages, migratetype);
}
spin_unlock_irqrestore(&zone->lock, flags);
@@ -5628,13 +5656,15 @@
void unset_migratetype_isolate(struct page *page, unsigned migratetype)
{
struct zone *zone;
- unsigned long flags;
+ unsigned long flags, nr_pages;
+
zone = page_zone(page);
spin_lock_irqsave(&zone->lock, flags);
if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
goto out;
+ nr_pages = move_freepages_block(zone, page, migratetype);
+ __mod_zone_freepage_state(zone, nr_pages, migratetype);
set_pageblock_migratetype(page, migratetype);
- move_freepages_block(zone, page, migratetype);
out:
spin_unlock_irqrestore(&zone->lock, flags);
}
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 0dad31dc..8e18d6b 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -722,6 +722,7 @@
"numa_other",
#endif
"nr_anon_transparent_hugepages",
+ "nr_free_cma",
"nr_dirty_threshold",
"nr_dirty_background_threshold",
diff --git a/sound/soc/msm/msm7201.c b/sound/soc/msm/msm7201.c
index 2a73fd6..49687ab 100644
--- a/sound/soc/msm/msm7201.c
+++ b/sound/soc/msm/msm7201.c
@@ -25,6 +25,7 @@
#include <linux/time.h>
#include <linux/wait.h>
#include <linux/platform_device.h>
+#include <linux/msm_audio.h>
#include <sound/core.h>
#include <sound/soc.h>
#include <sound/soc-dapm.h>
@@ -46,10 +47,27 @@
struct msm_snd_rpc_ids {
unsigned long prog;
unsigned long vers;
- unsigned long vers2;
unsigned long rpc_set_snd_device;
unsigned long rpc_set_device_vol;
- int device;
+ struct cad_devices_type device;
+};
+
+struct rpc_cad_set_device_args {
+ struct cad_devices_type device;
+ uint32_t ear_mute;
+ uint32_t mic_mute;
+
+ uint32_t cb_func;
+ uint32_t client_data;
+};
+
+struct rpc_cad_set_volume_args {
+ struct cad_devices_type device;
+ uint32_t method;
+ uint32_t volume;
+
+ uint32_t cb_func;
+ uint32_t client_data;
};
static struct msm_snd_rpc_ids snd_rpc_ids;
@@ -97,7 +115,7 @@
struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
- uinfo->count = 3; /* Device */
+ uinfo->count = 4; /* Device */
/*
* The number of devices supported is 26 (0 to 25)
@@ -110,23 +128,25 @@
static int snd_msm_device_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- ucontrol->value.integer.value[0] = (uint32_t)snd_rpc_ids.device;
- ucontrol->value.integer.value[1] = snd_mute_ear_mute;
- ucontrol->value.integer.value[2] = snd_mute_mic_mute;
+ ucontrol->value.integer.value[0]
+ = (uint32_t)snd_rpc_ids.device.rx_device;
+ ucontrol->value.integer.value[1]
+ = (uint32_t)snd_rpc_ids.device.tx_device;
+ ucontrol->value.integer.value[2] = snd_mute_ear_mute;
+ ucontrol->value.integer.value[3] = snd_mute_mic_mute;
return 0;
}
int msm_snd_init_rpc_ids(void)
{
snd_rpc_ids.prog = 0x30000002;
- snd_rpc_ids.vers = 0x00020001;
- snd_rpc_ids.vers2 = 0x00030001;
+ snd_rpc_ids.vers = 0x00030003;
/*
* The magic number 2 corresponds to the rpc call
* index for snd_set_device
*/
- snd_rpc_ids.rpc_set_snd_device = 2;
- snd_rpc_ids.rpc_set_device_vol = 3;
+ snd_rpc_ids.rpc_set_snd_device = 40;
+ snd_rpc_ids.rpc_set_device_vol = 39;
return 0;
}
@@ -139,7 +159,7 @@
/* Initialize rpc ids */
if (msm_snd_init_rpc_ids()) {
- printk(KERN_ERR "%s: snd rpc ids initialization failed\n"
+ pr_err("%s: snd rpc ids initialization failed\n"
, __func__);
return -ENODATA;
}
@@ -147,16 +167,8 @@
snd_ep = msm_rpc_connect_compatible(snd_rpc_ids.prog,
snd_rpc_ids.vers, 0);
if (IS_ERR(snd_ep)) {
- printk(KERN_DEBUG "%s failed (compatible VERS = %ld) \
- trying again with another API\n",
+ pr_err("%s: failed (compatible VERS = %ld)\n",
__func__, snd_rpc_ids.vers);
- snd_ep =
- msm_rpc_connect_compatible(snd_rpc_ids.prog,
- snd_rpc_ids.vers2, 0);
- }
- if (IS_ERR(snd_ep)) {
- printk(KERN_ERR "%s: failed (compatible VERS = %ld)\n",
- __func__, snd_rpc_ids.vers2);
snd_ep = NULL;
return -EAGAIN;
}
@@ -168,7 +180,7 @@
int rc = 0;
if (IS_ERR(snd_ep)) {
- printk(KERN_ERR "%s: snd handle unavailable, rc = %ld\n",
+ pr_err("%s: snd handle unavailable, rc = %ld\n",
__func__, PTR_ERR(snd_ep));
return -EAGAIN;
}
@@ -177,7 +189,7 @@
snd_ep = NULL;
if (rc < 0) {
- printk(KERN_ERR "%s: close rpc failed! rc = %d\n",
+ pr_err("%s: close rpc failed! rc = %d\n",
__func__, rc);
return -EAGAIN;
} else
@@ -190,47 +202,46 @@
struct snd_ctl_elem_value *ucontrol)
{
int rc = 0;
- struct snd_start_req {
+ struct snd_cad_set_device_msg {
struct rpc_request_hdr hdr;
- uint32_t rpc_snd_device;
- uint32_t snd_mute_ear_mute;
- uint32_t snd_mute_mic_mute;
- uint32_t callback_ptr;
- uint32_t client_data;
- } req;
+ struct rpc_cad_set_device_args args;
+ } dmsg;
- snd_rpc_ids.device = (int)ucontrol->value.integer.value[0];
+ snd_rpc_ids.device.rx_device
+ = (int)ucontrol->value.integer.value[0];
+ snd_rpc_ids.device.tx_device
+ = (int)ucontrol->value.integer.value[1];
+ snd_rpc_ids.device.pathtype = CAD_DEVICE_PATH_RX_TX;
- if (ucontrol->value.integer.value[1] > 1)
- ucontrol->value.integer.value[1] = 1;
- if (ucontrol->value.integer.value[2] > 1)
- ucontrol->value.integer.value[2] = 1;
-
- req.hdr.type = 0;
- req.hdr.rpc_vers = 2;
-
- req.rpc_snd_device = cpu_to_be32(snd_rpc_ids.device);
- req.snd_mute_ear_mute =
- cpu_to_be32((int)ucontrol->value.integer.value[1]);
- req.snd_mute_mic_mute =
- cpu_to_be32((int)ucontrol->value.integer.value[2]);
- req.callback_ptr = -1;
- req.client_data = cpu_to_be32(0);
-
- req.hdr.prog = snd_rpc_ids.prog;
- req.hdr.vers = snd_rpc_ids.vers;
+ dmsg.args.device.rx_device
+ = cpu_to_be32(snd_rpc_ids.device.rx_device);
+ dmsg.args.device.tx_device
+ = cpu_to_be32(snd_rpc_ids.device.tx_device);
+ dmsg.args.device.pathtype = cpu_to_be32(CAD_DEVICE_PATH_RX_TX);
+ dmsg.args.ear_mute = cpu_to_be32(ucontrol->value.integer.value[2]);
+ dmsg.args.mic_mute = cpu_to_be32(ucontrol->value.integer.value[3]);
+ if (!(dmsg.args.ear_mute == SND_MUTE_MUTED ||
+ dmsg.args.ear_mute == SND_MUTE_UNMUTED) ||
+ (!(dmsg.args.mic_mute == SND_MUTE_MUTED ||
+ dmsg.args.ear_mute == SND_MUTE_UNMUTED))) {
+ pr_err("snd_cad_ioctl set device: invalid mute status\n");
+ rc = -EINVAL;
+ return rc;
+ }
+ dmsg.args.cb_func = -1;
+ dmsg.args.client_data = 0;
rc = msm_rpc_call(snd_ep, snd_rpc_ids.rpc_set_snd_device ,
- &req, sizeof(req), 5 * HZ);
+ &dmsg, sizeof(dmsg), 5 * HZ);
if (rc < 0) {
- printk(KERN_ERR "%s: snd rpc call failed! rc = %d\n",
+ pr_err("%s: snd rpc call failed! rc = %d\n",
__func__, rc);
} else {
printk(KERN_INFO "snd device connected\n");
- snd_mute_ear_mute = ucontrol->value.integer.value[1];
- snd_mute_mic_mute = ucontrol->value.integer.value[2];
- printk(KERN_ERR "%s: snd_mute_ear_mute =%d, snd_mute_mic_mute = %d\n",
+ snd_mute_ear_mute = ucontrol->value.integer.value[2];
+ snd_mute_mic_mute = ucontrol->value.integer.value[3];
+ pr_err("%s: snd_mute_ear_mute =%d, snd_mute_mic_mute = %d\n",
__func__, snd_mute_ear_mute, snd_mute_mic_mute);
}
@@ -241,13 +252,13 @@
struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
- uinfo->count = 2; /* Device/Volume */
+ uinfo->count = 1; /* Device/Volume */
/*
- * The number of devices supported is 37 (0 to 36)
+ * The volume ranges from (0 to 6)
*/
uinfo->value.integer.min = 0;
- uinfo->value.integer.max = 36;
+ uinfo->value.integer.max = 6;
return 0;
}
@@ -255,44 +266,34 @@
struct snd_ctl_elem_value *ucontrol)
{
int rc = 0;
- struct snd_vol_req {
+
+ struct snd_cad_set_volume_msg {
struct rpc_request_hdr hdr;
- uint32_t device;
- uint32_t method;
- uint32_t volume;
- uint32_t cb_func;
- uint32_t client_data;
- } req;
+ struct rpc_cad_set_volume_args args;
+ } vmsg;
- snd_rpc_ids.device = (int)ucontrol->value.integer.value[0];
-
- if ((ucontrol->value.integer.value[1] < 0) ||
- (ucontrol->value.integer.value[1] > 6)) {
- pr_err("Device volume should be in range of 1 to 6\n");
- return -EINVAL;
- }
- if ((ucontrol->value.integer.value[0] > 36) ||
- (ucontrol->value.integer.value[0] < 0)) {
- pr_err("Device range supported is 0 to 36\n");
- return -EINVAL;
- }
-
- req.device = cpu_to_be32((int)ucontrol->value.integer.value[0]);
- req.method = cpu_to_be32(0);
- req.volume = cpu_to_be32((int)ucontrol->value.integer.value[1]);
- req.cb_func = -1;
- req.client_data = cpu_to_be32(0);
+ vmsg.args.device.rx_device
+ = cpu_to_be32(snd_rpc_ids.device.rx_device);
+ vmsg.args.device.tx_device
+ = cpu_to_be32(snd_rpc_ids.device.tx_device);
+ vmsg.args.method = cpu_to_be32(SND_METHOD_VOICE);
+ vmsg.args.volume = cpu_to_be32(ucontrol->value.integer.value[0]);
+ vmsg.args.cb_func = -1;
+ vmsg.args.client_data = 0;
rc = msm_rpc_call(snd_ep, snd_rpc_ids.rpc_set_device_vol ,
- &req, sizeof(req), 5 * HZ);
+ &vmsg, sizeof(vmsg), 5 * HZ);
if (rc < 0) {
- printk(KERN_ERR "%s: snd rpc call failed! rc = %d\n",
+ pr_err("%s: snd rpc call failed! rc = %d\n",
__func__, rc);
} else {
- printk(KERN_ERR "%s: device [%d] volume set to [%d]\n",
- __func__, (int)ucontrol->value.integer.value[0],
- (int)ucontrol->value.integer.value[1]);
+ pr_debug("%s:rx device [%d]", __func__,
+ snd_rpc_ids.device.rx_device);
+ pr_debug("%s:tx device [%d]", __func__,
+ snd_rpc_ids.device.tx_device);
+ pr_debug("%s:volume set to [%ld]\n", __func__,
+ snd_rpc_ids.rpc_set_device_vol);
}
return rc;
@@ -349,7 +350,7 @@
struct snd_soc_pcm_runtime *rtd)
{
int ret = 0;
- struct snd_soc_codec *codec = rtd->codec;
+ struct snd_soc_codec *codec = rtd->codec;
mutex_init(&the_locks.lock);
mutex_init(&the_locks.write_lock);
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-afe-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-afe-v2.c
index 1aa12e3..dcf5cfa 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-afe-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-afe-v2.c
@@ -33,8 +33,8 @@
#include <mach/msm_subsystem_map.h>
#include "msm-pcm-afe-v2.h"
-#define MIN_PERIOD_SIZE (128 * 2 * 8)
-#define MAX_PERIOD_SIZE (128 * 2 * 2 * 6 * 8)
+#define MIN_PERIOD_SIZE (128 * 2)
+#define MAX_PERIOD_SIZE (128 * 2 * 2 * 6)
#define MAX_NUM_PERIODS 384
#define MIN_NUM_PERIODS 32
static struct snd_pcm_hardware msm_afe_hardware = {
@@ -49,8 +49,8 @@
.rate_min = 8000,
.rate_max = 48000,
.channels_min = 1,
- .channels_max = 8,
- .buffer_bytes_max = MAX_PERIOD_SIZE * 32,
+ .channels_max = 6,
+ .buffer_bytes_max = MAX_PERIOD_SIZE * MIN_NUM_PERIODS,
.period_bytes_min = MIN_PERIOD_SIZE,
.period_bytes_max = MAX_PERIOD_SIZE,
.periods_min = MIN_NUM_PERIODS,
@@ -355,17 +355,6 @@
if (ret < 0)
pr_err("snd_pcm_hw_constraint_integer failed\n");
- if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
- ret = snd_pcm_hw_constraint_minmax(runtime,
- SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
- MAX_NUM_PERIODS * MIN_PERIOD_SIZE,
- MIN_NUM_PERIODS * MAX_PERIOD_SIZE);
- if (ret < 0) {
- pr_err("constraint for buffer bytes min max ret = %d\n",
- ret);
- }
- }
-
return 0;
}
@@ -510,8 +499,8 @@
dir = OUT;
rc = q6afe_audio_client_buf_alloc_contiguous(dir,
prtd->audio_client,
- (params_buffer_bytes(params) / params_periods(params)),
- params_periods(params));
+ runtime->hw.period_bytes_min,
+ runtime->hw.periods_max);
if (rc < 0) {
pr_err("Audio Start: Buffer Allocation failed rc = %d\n", rc);
mutex_unlock(&prtd->lock);
@@ -530,14 +519,14 @@
dma_buf->private_data = NULL;
dma_buf->area = buf[0].data;
dma_buf->addr = buf[0].phys;
- dma_buf->bytes = params_buffer_bytes(params);
+ dma_buf->bytes = runtime->hw.buffer_bytes_max;
if (!dma_buf->area) {
pr_err("%s:MSM AFE physical memory allocation failed\n",
__func__);
mutex_unlock(&prtd->lock);
return -ENOMEM;
}
- memset(dma_buf->area, 0, params_buffer_bytes(params));
+ memset(dma_buf->area, 0, runtime->hw.buffer_bytes_max);
prtd->dma_addr = (u32) dma_buf->addr;
mutex_unlock(&prtd->lock);