Merge changes I2bb0954d,Icdbbebb9,I659b133b,Id51bc882,I59917adc,Ia67eaee0,I8e82434b,I1f112fa3 into msm-3.4
* changes:
mm: mmzone: MIGRATE_CMA migration type added
mm: page_alloc: change fallbacks array handling
mm: page_alloc: introduce alloc_contig_range()
mm: compaction: export some of the functions
mm: compaction: introduce isolate_freepages_range()
mm: compaction: introduce map_pages()
mm: compaction: introduce isolate_migratepages_range()
mm: page_alloc: remove trailing whitespace
diff --git a/Documentation/block/test-iosched.txt b/Documentation/block/test-iosched.txt
new file mode 100644
index 0000000..75d8134
--- /dev/null
+++ b/Documentation/block/test-iosched.txt
@@ -0,0 +1,39 @@
+Test IO scheduler
+==================
+
+The test scheduler allows testing a block device by dispatching
+specific requests according to the test case and declare PASS/FAIL
+according to the requests completion error code.
+
+The test IO scheduler implements the no-op scheduler operations, and uses
+them in order to dispatch the non-test requests when no test is running.
+This will allow to keep a normal FS operation in parallel to the test
+capability.
+The test IO scheduler keeps two different queues, one for real-world requests
+(inserted by the FS) and the other for the test requests.
+The test IO scheduler chooses the queue for dispatch requests according to the
+test state (IDLE/RUNNING).
+
+the test IO scheduler is compiled by default as a dynamic module and enabled
+only if CONFIG_DEBUG_FS is defined.
+
+Each block device test utility that would like to use the test-iosched test
+services, should register as a blk_dev_test_type and supply an init and exit
+callbacks. Those callback are called upon selection (or removal) of the
+test-iosched as the active scheduler. From that point the block device test
+can start a test and supply its own callbacks for preparing, running, result
+checking and cleanup of the test.
+
+Each test is exposed via debugfs and can be triggered by writing to
+the debugfs file. In order to add a new test one should expose a new debugfs
+file for the new test.
+
+Selecting IO schedulers
+-----------------------
+Refer to Documentation/block/switching-sched.txt for information on
+selecting an io scheduler on a per-device basis.
+
+
+May 10 2012, maya Erez <merez@codeaurora.org>
+
+
diff --git a/Documentation/devicetree/bindings/arm/msm/msm_watchdog.txt b/Documentation/devicetree/bindings/arm/msm/msm_watchdog.txt
new file mode 100644
index 0000000..9f0c922
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/msm_watchdog.txt
@@ -0,0 +1,33 @@
+* Qualcomm MSM Watchdog
+
+Watchdog timer is configured with a bark and a bite time.
+if the watchdog is not "pet" at regular intervals, the system
+is assumed to have become non responsive and needs to be reset.
+A warning in the form of a bark timeout leads to a bark interrupt
+and a kernel panic. if the watchdog timer is still not reset,
+a bite timeout occurs, which is an interrupt in the secure mode,
+which leads to a reset of the SOC via the secure watchdog. The
+driver needs the petting time, and the bark timeout to be programmed
+into the watchdog, as well as the bark and bite irqs.
+
+The device tree parameters for the watchdog are:
+
+Required parameters:
+
+- compatible : "qcom,msm-watchdog"
+- reg : offset and length of the register set for the watchdog block.
+- interrupts : should contain bark and bite irq numbers
+- qcom,pet-time : Non zero time interval at which watchdog should be pet in ms.
+- qcom,bark-time : Non zero timeout value for a watchdog bark in ms.
+- qcom,ipi-ping : send keep alive ping to other cpus if set to 1 else set to 0.
+
+Example:
+
+ qcom,wdt@f9017000 {
+ compatible = "qcom,msm-watchdog";
+ reg = <0xf9017000 0x1000>;
+ interrupts = <0 3 0 0 4 0>;
+ qcom,bark-time = <11000>;
+ qcom,pet-time = <10000>;
+ qcom,ipi-ping = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/arm/msm/rpm-regulator-smd.txt b/Documentation/devicetree/bindings/arm/msm/rpm-regulator-smd.txt
index 786635f..82935ed 100644
--- a/Documentation/devicetree/bindings/arm/msm/rpm-regulator-smd.txt
+++ b/Documentation/devicetree/bindings/arm/msm/rpm-regulator-smd.txt
@@ -54,6 +54,14 @@
- parent-supply: phandle to the parent supply/regulator node
- qcom,system-load: Load in uA present on regulator that is not
captured by any consumer request
+- qcom,use-voltage-corner: Flag that signifies if regulator_set_voltage
+ calls should modify the corner parameter instead
+ of the voltage parameter. When used, voltages
+ specified inside of the regulator framework
+ represent corners that have been incremented by
+ 1. This value shift is necessary to work around
+ limitations in the regulator framework which
+ treat 0 uV as an error.
The following properties specify initial values for parameters to be sent to the
RPM in regulator requests.
- qcom,init-enable: 0 = regulator disabled
@@ -120,6 +128,24 @@
2 = GPS
4 = WLAN
8 = WAN
+- qcom,init-voltage-corner: Performance corner to use in order to determine
+ voltage set point. This value corresponds to
+ the actual value that will be sent and is not
+ incremented by 1 like the values used inside of
+ the regulator framework. The meaning of corner
+ values is set by the RPM. It is possible that
+ different regulators on a given platform or
+ similar regulators on different platforms will
+ utilize different corner values. These are
+ corner values supported on MSM8974 for PMIC
+ PM8841 SMPS 2 (VDD_Dig); nominal voltages for
+ these corners are also shown:
+ 0 = Retention (0.5000 V)
+ 1 = SVS Krait (0.7250 V)
+ 2 = SVS SOC (0.8125 V)
+ 3 = Normal (0.9000 V)
+ 4 = Turbo (0.9875 V)
+ 5 = Super Turbo (1.0500 V)
All properties specified within the core regulator framework can also be used in
second level nodes. These bindings can be found in:
@@ -150,4 +176,13 @@
regulator-max-microvolt = <1150000>;
compatible = "qcom,rpm-regulator-smd";
};
+ pm8841_s1_corner: regulator-s1-corner {
+ regulator-name = "8841_s1_corner";
+ qcom,set = <3>;
+ regulator-min-microvolt = <1>;
+ regulator-max-microvolt = <6>;
+ qcom,init-voltage-corner = <3>;
+ qcom,use-voltage-corner;
+ compatible = "qcom,rpm-regulator-smd";
+ };
};
diff --git a/Documentation/devicetree/bindings/gpio/qpnp-gpio.txt b/Documentation/devicetree/bindings/gpio/qpnp-gpio.txt
deleted file mode 100644
index 7cab09b..0000000
--- a/Documentation/devicetree/bindings/gpio/qpnp-gpio.txt
+++ /dev/null
@@ -1,143 +0,0 @@
-* msm-qpnp-gpio
-
-msm-qpnp-gpio is a GPIO chip driver for the MSM SPMI implementation.
-It creates a spmi_device for every spmi-dev-container block of device_nodes.
-These device_nodes contained within specify the PMIC GPIO number associated
-with each GPIO chip. The driver will map these to Linux GPIO numbers.
-
-[PMIC GPIO Device Declarations]
-
--Root Node-
-
-Required properties :
- - spmi-dev-container : Used to specify the following child nodes as part of the
- same SPMI device.
- - gpio-controller : Specify as gpio-contoller. All child nodes will belong to this
- gpio_chip.
- - #gpio-cells: We encode a PMIC GPIO number and a 32-bit flag field to
- specify the gpio configuration. This must be set to '2'.
- - #address-cells: Specify one address field. This must be set to '1'.
- - #size-cells: Specify one size-cell. This must be set to '1'.
- - compatible = "qcom,qpnp-gpio" : Specify driver matching for this driver.
-
--Child Nodes-
-
-Required properties :
- - reg : Specify the spmi offset and size for this gpio device.
- - qcom,gpio-num : Specify the PMIC GPIO number for this gpio device.
-
-Optional configuration properties :
- - qcom,direction: indicates whether the gpio should be input, output, or
- both.
- QPNP_GPIO_DIR_IN = 0,
- QPNP_GPIO_DIR_OUT = 1,
- QPNP_GPIO_DIR_BOTH = 2
-
- - qcom,output-type: indicates gpio should be configured as CMOS or open
- drain.
- QPNP_GPIO_OUT_BUF_CMOS = 0
- QPNP_GPIO_OUT_BUF_OPEN_DRAIN_NMOS = 1,
- QPNP_GPIO_OUT_BUF_OPEN_DRAIN_PMOS = 2,
-
- - qcom,invert: Invert the signal of the gpio line -
- QPNP_GPIO_INVERT_DISABLE = 0
- QPNP_GPIO_INVERT_ENABLE = 1
-
- - qcom,pull: Indicates whether a pull up or pull down should be
- applied. If a pullup is required the current strength
- needs to be specified. Current values of 30uA, 1.5uA,
- 31.5uA, 1.5uA with 30uA boost are supported.
- QPNP_GPIO_PULL_UP_30 = 0,
- QPNP_GPIO_PULL_UP_1P5 = 1,
- QPNP_GPIO_PULL_UP_31P5 = 2,
- QPNP_GPIO_PULL_UP_1P5_30 = 3,
- QPNP_GPIO_PULL_DN = 4,
- QPNP_GPIO_PULL_NO = 5
-
- - qcom,vin-sel: specifies the voltage level when the output is set to 1.
- For an input gpio specifies the voltage level at which
- the input is interpreted as a logical 1.
- QPNP_GPIO_VIN0 = 0,
- QPNP_GPIO_VIN1 = 1,
- QPNP_GPIO_VIN2 = 2,
- QPNP_GPIO_VIN3 = 3,
- QPNP_GPIO_VIN4 = 4,
- QPNP_GPIO_VIN5 = 5,
- QPNP_GPIO_VIN6 = 6,
- QPNP_GPIO_VIN7 = 7
-
- - qcom,out-strength: the amount of current supplied for an output gpio.
- QPNP_GPIO_OUT_STRENGTH_LOW = 1
- QPNP_GPIO_OUT_STRENGTH_MED = 2,
- QPNP_GPIO_OUT_STRENGTH_HIGH = 3,
-
- - qcom,source-sel: choose alternate function for the gpio. Certain gpios
- can be paired (shorted) with each other. Some gpio pin
- can act as alternate functions.
- QPNP_GPIO_FUNC_NORMAL = 0,
- QPNP_GPIO_FUNC_PAIRED = 1
- QPNP_GPIO_FUNC_1 = 2,
- QPNP_GPIO_FUNC_2 = 3,
- QPNP_GPIO_DTEST1 = 4,
- QPNP_GPIO_DTEST2 = 5,
- QPNP_GPIO_DTEST3 = 6,
- QPNP_GPIO_DTEST4 = 7
-
- - qcom,master-en: 1 = Enable features within the
- GPIO block based on configurations.
- 0 = Completely disable the GPIO
- block and let the pin float with high impedance
- regardless of other settings.
-
-*Note: If any of the configuration properties are not specified, then the
- qpnp-gpio driver will not modify that respective configuration in
- hardware.
-
-[PMIC GPIO clients]
-
-Required properties :
- - gpios : Contains 3 fields of the form <&gpio_controller pmic_gpio_num flags>
-
-[Example]
-
-qpnp: qcom,spmi@fc4c0000 {
- #address-cells = <1>;
- #size-cells = <0>;
- interrupt-controller;
- #interrupt-cells = <3>;
-
- qcom,pm8941@0 {
- spmi-slave-container;
- reg = <0x0>;
- #address-cells = <1>;
- #size-cells = <1>;
-
- pm8941_gpios: gpios {
- spmi-dev-container;
- compatible = "qcom,qpnp-gpio";
- gpio-controller;
- #gpio-cells = <2>;
- #address-cells = <1>;
- #size-cells = <1>;
-
- gpio@c000 {
- reg = <0xc000 0x100>;
- qcom,gpio-num = <62>;
- };
-
- gpio@c100 {
- reg = <0xc100 0x100>;
- qcom,gpio-num = <20>;
- qcom,source_sel = <2>;
- qcom,pull = <5>;
- };
- };
-
- qcom,testgpio@1000 {
- compatible = "qcom,qpnp-testgpio";
- reg = <0x1000 0x1000>;
- gpios = <&pm8941_gpios 62 0x0 &pm8941_gpios 20 0x1>;
- };
- };
- };
-};
diff --git a/Documentation/devicetree/bindings/gpio/qpnp-pin.txt b/Documentation/devicetree/bindings/gpio/qpnp-pin.txt
new file mode 100644
index 0000000..c58e073
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/qpnp-pin.txt
@@ -0,0 +1,198 @@
+* msm-qpnp-pin
+
+msm-qpnp-pin is a GPIO chip driver for the MSM SPMI implementation.
+It creates a spmi_device for every spmi-dev-container block of device_nodes.
+These device_nodes contained within specify the PMIC pin number associated
+with each gpio chip. The driver will map these to Linux GPIO numbers.
+
+[PMIC GPIO Device Declarations]
+
+-Root Node-
+
+Required properties :
+ - spmi-dev-container : Used to specify the following child nodes as part of the
+ same SPMI device.
+ - gpio-controller : Specify as gpio-contoller. All child nodes will belong to
+ this gpio_chip.
+ - #gpio-cells: We encode a PMIC pin number and a 32-bit flag field to
+ specify the gpio configuration. This must be set to '2'.
+ - #address-cells: Specify one address field. This must be set to '1'.
+ - #size-cells: Specify one size-cell. This must be set to '1'.
+ - compatible = "qcom,qpnp-pin" : Specify driver matching for this driver.
+ - label: String giving the name for the gpio_chip device. This name
+ should be unique on the system and portray the specifics of the device.
+
+-Child Nodes-
+
+Required properties :
+ - reg : Specify the spmi offset and size for this pin device.
+ - qcom,pin-num : Specify the PMIC pin number for this device.
+
+Optional configuration properties :
+ - qcom,mode: indicates whether the pin should be input, output, or
+ both for gpios. mpp pins also support bidirectional,
+ analog in, analog out and current sink.
+ QPNP_PIN_MODE_DIG_IN = 0, (GPIO/MPP)
+ QPNP_PIN_MODE_DIG_OUT = 1, (GPIO/MPP)
+ QPNP_PIN_MODE_DIG_IN_OUT = 2, (GPIO/MPP)
+ QPNP_PIN_MODE_BIDIR = 3, (MPP)
+ QPNP_PIN_MODE_AIN = 4, (MPP)
+ QPNP_PIN_MODE_AOUT = 5, (MPP)
+ QPNP_PIN_MODE_SINK = 6 (MPP)
+
+ - qcom,output-type: indicates gpio should be configured as CMOS or open
+ drain.
+ QPNP_PIN_OUT_BUF_CMOS = 0, (GPIO)
+ QPNP_PIN_OUT_BUF_OPEN_DRAIN_NMOS = 1, (GPIO)
+ QPNP_PIN_OUT_BUF_OPEN_DRAIN_PMOS = 2 (GPIO)
+
+ - qcom,invert: Invert the signal of the gpio line -
+ QPNP_PIN_INVERT_DISABLE = 0 (GPIO/MPP)
+ QPNP_PIN_INVERT_ENABLE = 1 (GPIO/MPP)
+
+ - qcom,pull: This parameter should be programmed to different values
+ depending on whether it's GPIO or MPP.
+ For GPIO, it indicates whether a pull up or pull down
+ should be applied. If a pullup is required the
+ current strength needs to be specified.
+ Current values of 30uA, 1.5uA, 31.5uA, 1.5uA with 30uA
+ boost are supported. This value should be one of
+ the QPNP_PIN_GPIO_PULL_*. Note that the hardware ignores
+ this configuration if the GPIO is not set to input or
+ output open-drain mode.
+ QPNP_PIN_PULL_UP_30 = 0, (GPIO)
+ QPNP_PIN_PULL_UP_1P5 = 1, (GPIO)
+ QPNP_PIN_PULL_UP_31P5 = 2, (GPIO)
+ QPNP_PIN_PULL_UP_1P5_30 = 3, (GPIO)
+ QPNP_PIN_PULL_DN = 4, (GPIO)
+ QPNP_PIN_PULL_NO = 5 (GPIO)
+
+ For MPP, it indicates whether a pullup should be
+ applied for bidirectitional mode only. The hardware
+ ignores the configuration when operating in other modes.
+ This value should be one of the QPNP_PIN_MPP_PULL_*.
+
+ QPNP_PIN_MPP_PULL_UP_0P6KOHM = 0, (MPP)
+ QPNP_PIN_MPP_PULL_UP_OPEN = 1 (MPP)
+ QPNP_PIN_MPP_PULL_UP_10KOHM = 2, (MPP)
+ QPNP_PIN_MPP_PULL_UP_30KOHM = 3, (MPP)
+
+ - qcom,vin-sel: specifies the voltage level when the output is set to 1.
+ For an input gpio specifies the voltage level at which
+ the input is interpreted as a logical 1.
+ QPNP_PIN_VIN0 = 0, (GPIO/MPP)
+ QPNP_PIN_VIN1 = 1, (GPIO/MPP)
+ QPNP_PIN_VIN2 = 2, (GPIO/MPP)
+ QPNP_PIN_VIN3 = 3, (GPIO/MPP)
+ QPNP_PIN_VIN4 = 4, (GPIO/MPP)
+ QPNP_PIN_VIN5 = 5, (GPIO/MPP)
+ QPNP_PIN_VIN6 = 6, (GPIO/MPP)
+ QPNP_PIN_VIN7 = 7 (GPIO/MPP)
+
+ - qcom,out-strength: the amount of current supplied for an output gpio.
+ QPNP_PIN_OUT_STRENGTH_LOW = 1 (GPIO)
+ QPNP_PIN_OUT_STRENGTH_MED = 2, (GPIO)
+ QPNP_PIN_OUT_STRENGTH_HIGH = 3, (GPIO)
+
+ - qcom,select: select a function for the pin. Certain pins
+ can be paired (shorted) with each other. Some gpio pins
+ can act as alternate functions.
+ In the context of gpio, this acts as a source select.
+ For mpps, this is an enable select.
+ QPNP_PIN_SEL_FUNC_CONSTANT = 0, (GPIO/MPP)
+ QPNP_PIN_SEL_FUNC_PAIRED = 1, (GPIO/MPP)
+ QPNP_PIN_SEL_FUNC_1 = 2, (GPIO/MPP)
+ QPNP_PIN_SEL_FUNC_2 = 3, (GPIO/MPP)
+ QPNP_PIN_SEL_DTEST1 = 4, (GPIO/MPP)
+ QPNP_PIN_SEL_DTEST2 = 5, (GPIO/MPP)
+ QPNP_PIN_SEL_DTEST3 = 6, (GPIO/MPP)
+ QPNP_PIN_SEL_DTEST4 = 7 (GPIO/MPP)
+
+ - qcom,master-en: 1 = Enable features within the
+ pin block based on configurations. (GPIO/MPP)
+ 0 = Completely disable the block and
+ let the pin float with high impedance
+ regardless of other settings. (GPIO/MPP)
+ - qcom,aout-ref: set the analog output reference.
+
+ QPNP_PIN_AOUT_1V25 = 0, (MPP)
+ QPNP_PIN_AOUT_0V625 = 1, (MPP)
+ QPNP_PIN_AOUT_0V3125 = 2, (MPP)
+ QPNP_PIN_AOUT_MPP = 3, (MPP)
+ QPNP_PIN_AOUT_ABUS1 = 4, (MPP)
+ QPNP_PIN_AOUT_ABUS2 = 5, (MPP)
+ QPNP_PIN_AOUT_ABUS3 = 6, (MPP)
+ QPNP_PIN_AOUT_ABUS4 = 7 (MPP)
+
+ - qcom,ain-route: Set the destination for analog input.
+ QPNP_PIN_AIN_AMUX_CH5 = 0, (MPP)
+ QPNP_PIN_AIN_AMUX_CH6 = 1, (MPP)
+ QPNP_PIN_AIN_AMUX_CH7 = 2, (MPP)
+ QPNP_PIN_AIN_AMUX_CH8 = 3, (MPP)
+ QPNP_PIN_AIN_AMUX_ABUS1 = 4, (MPP)
+ QPNP_PIN_AIN_AMUX_ABUS2 = 5, (MPP)
+ QPNP_PIN_AIN_AMUX_ABUS3 = 6, (MPP)
+ QPNP_PIN_AIN_AMUX_ABUS4 = 7 (MPP)
+
+ - qcom,cs-out: Set the the amount of output to sync in mA.
+ QPNP_PIN_CS_OUT_5MA = 0, (MPP)
+ QPNP_PIN_CS_OUT_10MA = 1, (MPP)
+ QPNP_PIN_CS_OUT_15MA = 2, (MPP)
+ QPNP_PIN_CS_OUT_20MA = 3, (MPP)
+ QPNP_PIN_CS_OUT_25MA = 4, (MPP)
+ QPNP_PIN_CS_OUT_30MA = 5, (MPP)
+ QPNP_PIN_CS_OUT_35MA = 6, (MPP)
+ QPNP_PIN_CS_OUT_40MA = 7 (MPP)
+
+*Note: If any of the configuration properties are not specified, then the
+ qpnp-pin driver will not modify that respective configuration in
+ hardware.
+
+[PMIC GPIO clients]
+
+Required properties :
+ - gpios : Contains 3 fields of the form <&gpio_controller pmic_pin_num flags>
+
+[Example]
+
+qpnp: qcom,spmi@fc4c0000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupt-controller;
+ #interrupt-cells = <3>;
+
+ qcom,pm8941@0 {
+ spmi-slave-container;
+ reg = <0x0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ pm8941_gpios: gpios {
+ spmi-dev-container;
+ compatible = "qcom,qpnp-pin";
+ gpio-controller;
+ #gpio-cells = <2>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ gpio@c000 {
+ reg = <0xc000 0x100>;
+ qcom,pin-num = <62>;
+ };
+
+ gpio@c100 {
+ reg = <0xc100 0x100>;
+ qcom,pin-num = <20>;
+ qcom,source_sel = <2>;
+ qcom,pull = <5>;
+ };
+ };
+
+ qcom,testgpio@1000 {
+ compatible = "qcom,qpnp-testgpio";
+ reg = <0x1000 0x1000>;
+ gpios = <&pm8941_gpios 62 0x0 &pm8941_gpios 20 0x1>;
+ };
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/spmi/msm-spmi.txt b/Documentation/devicetree/bindings/spmi/msm-spmi.txt
index d50037f..5e43ea6 100644
--- a/Documentation/devicetree/bindings/spmi/msm-spmi.txt
+++ b/Documentation/devicetree/bindings/spmi/msm-spmi.txt
@@ -36,6 +36,10 @@
number of interrupts.
- interrupt-parent : the phandle for the interrupt controller that
services interrupts for this device.
+ - reg-names : a list of strings that map in order to the list of addresses
+ specified above in the 'reg' property.
+ - interrupt-names : a list of strings that map in order to the list of
+ interrupts specified in the 'interrupts' property.
[Second Level Nodes]
@@ -60,6 +64,10 @@
- spmi-dev-container: This specifies that all the device nodes specified for
this slave_id should have their resources coalesced into only one
spmi_device.
+ - reg-names : a list of strings that map in order to the list of addresses
+ specified above in the 'reg' property.
+ - interrupt-names : a list of strings that map in order to the list of
+ interrupts specified in the 'interrupts' property.
[Third Level Nodes]
@@ -79,7 +87,14 @@
number of interrupts.
- interrupt-parent : the phandle for the interrupt controller that
services interrupts for this device.
-
+ - reg-names : a list of strings that map in order to the list of addresses
+ specified above in the 'reg' property.
+ - interrupt-names : a list of strings that map in order to the list of
+ interrupts specified in the 'interrupts' property.
+ - label: A single name that names the device. This name can be looked up
+ with spmi_get_node_byname(). This is mostly useful in spmi-dev-container
+ configurations where multiple device_nodes are associated with one spmi
+ device.
Notes :
- It is considered an error to include spmi-slave-dev at this level.
@@ -97,7 +112,7 @@
compatible = "qcom,qpnp-testint";
reg = <0xf>;
interrupts = <0x3 0x15 0x0 0x3 0x15 0x02 0x1 0x47 0x0>;
-
+ interrupt-names = "testint_0", "testint_1", "testint_err";
};
pm8941@0 {
@@ -108,22 +123,23 @@
pm8941_gpios: gpios {
spmi-dev-container;
- compatible = "qcom,qpnp-gpio";
+ compatible = "qcom,qpnp-pin";
gpio-controller;
#gpio-cells = <1>;
#address-cells = <1>;
#size-cells = <1>;
pm8941_gpio1@0xc000 {
- compatible = "qcom,qpnp-gpio";
+ compatible = "qcom,qpnp-pin";
reg = <0xc000 0x100>;
qcom,qpnp_gpio = <1>;
interrupt-parent = <&qpnp>;
interrupts = <0x3 0x15 0x02 0x1 0x47 0x0>;
+ label = "foo-dev";
};
pm8941_gpio2@0xc100 {
- compatible = "qcom,qpnp-gpio";
+ compatible = "qcom,qpnp-pin";
reg = <0xc100 0x100>;
qcom,qpnp_gpio = <2>;
interrupt-parent = <&qpnp>;
@@ -133,7 +149,8 @@
testgpio@0x1000 {
compatible = "qcom,qpnp-testgpio";
- reg = <0x1000 0x1000>;
+ reg = <0x1000 0x1000 0x2000 0x1000>;
+ reg-names = "foo", "bar";
qpnp-gpios = <&pm8941_gpios 0x0>;
};
};
@@ -143,7 +160,7 @@
#address-cells = <1>;
#size-cells = <1>;
spmi-dev-container;
- compatible = "qcom,qpnp-gpio";
+ compatible = "qcom,qpnp-pin";
pm8841_gpio1@0xc000 {
reg = <0xc000 0x100>;
diff --git a/arch/arm/boot/dts/msm-pm8841.dtsi b/arch/arm/boot/dts/msm-pm8841.dtsi
index b157e95..a586a90 100644
--- a/arch/arm/boot/dts/msm-pm8841.dtsi
+++ b/arch/arm/boot/dts/msm-pm8841.dtsi
@@ -17,6 +17,47 @@
interrupt-controller;
#interrupt-cells = <3>;
+ qcom,pm8841@4 {
+ spmi-slave-container;
+ reg = <0x4>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ pm8841_mpps {
+ spmi-dev-container;
+ compatible = "qcom,qpnp-pin";
+ gpio-controller;
+ #gpio-cells = <2>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ label = "pm8841-mpp";
+
+ mpp@a000 {
+ reg = <0xa000 0x100>;
+ qcom,pin-num = <1>;
+ status = "disabled";
+ };
+
+ mpp@a100 {
+ reg = <0xa100 0x100>;
+ qcom,pin-num = <2>;
+ status = "disabled";
+ };
+
+ mpp@a200 {
+ reg = <0xa200 0x100>;
+ qcom,pin-num = <3>;
+ status = "disabled";
+ };
+
+ mpp@a300 {
+ reg = <0xa300 0x100>;
+ qcom,pin-num = <4>;
+ status = "disabled";
+ };
+ };
+ };
+
qcom,pm8841@5 {
spmi-slave-container;
reg = <0x5>;
diff --git a/arch/arm/boot/dts/msm-pm8941.dtsi b/arch/arm/boot/dts/msm-pm8941.dtsi
index e62dfbd..2714d9e 100644
--- a/arch/arm/boot/dts/msm-pm8941.dtsi
+++ b/arch/arm/boot/dts/msm-pm8941.dtsi
@@ -25,225 +25,284 @@
pm8941_gpios {
spmi-dev-container;
- compatible = "qcom,qpnp-gpio";
+ compatible = "qcom,qpnp-pin";
gpio-controller;
#gpio-cells = <2>;
#address-cells = <1>;
#size-cells = <1>;
+ label = "pm8941-gpio";
gpio@c000 {
reg = <0xc000 0x100>;
- qcom,gpio-num = <1>;
+ qcom,pin-num = <1>;
status = "disabled";
};
gpio@c100 {
reg = <0xc100 0x100>;
- qcom,gpio-num = <2>;
+ qcom,pin-num = <2>;
status = "disabled";
};
gpio@c200 {
reg = <0xc200 0x100>;
- qcom,gpio-num = <3>;
+ qcom,pin-num = <3>;
status = "disabled";
};
gpio@c300 {
reg = <0xc300 0x100>;
- qcom,gpio-num = <4>;
+ qcom,pin-num = <4>;
status = "disabled";
};
gpio@c400 {
reg = <0xc400 0x100>;
- qcom,gpio-num = <5>;
+ qcom,pin-num = <5>;
status = "disabled";
};
gpio@c500 {
reg = <0xc500 0x100>;
- qcom,gpio-num = <6>;
+ qcom,pin-num = <6>;
status = "disabled";
};
gpio@c600 {
reg = <0xc600 0x100>;
- qcom,gpio-num = <7>;
+ qcom,pin-num = <7>;
status = "disabled";
};
gpio@c700 {
reg = <0xc700 0x100>;
- qcom,gpio-num = <8>;
+ qcom,pin-num = <8>;
status = "disabled";
};
gpio@c800 {
reg = <0xc800 0x100>;
- qcom,gpio-num = <9>;
+ qcom,pin-num = <9>;
status = "disabled";
};
gpio@c900 {
reg = <0xc900 0x100>;
- qcom,gpio-num = <10>;
+ qcom,pin-num = <10>;
status = "disabled";
};
gpio@ca00 {
reg = <0xca00 0x100>;
- qcom,gpio-num = <11>;
+ qcom,pin-num = <11>;
status = "disabled";
};
gpio@cb00 {
reg = <0xcb00 0x100>;
- qcom,gpio-num = <12>;
+ qcom,pin-num = <12>;
status = "disabled";
};
gpio@cc00 {
reg = <0xcc00 0x100>;
- qcom,gpio-num = <13>;
+ qcom,pin-num = <13>;
status = "disabled";
};
gpio@cd00 {
reg = <0xcd00 0x100>;
- qcom,gpio-num = <14>;
+ qcom,pin-num = <14>;
status = "disabled";
};
gpio@ce00 {
reg = <0xce00 0x100>;
- qcom,gpio-num = <15>;
+ qcom,pin-num = <15>;
status = "disabled";
};
gpio@cf00 {
reg = <0xcf00 0x100>;
- qcom,gpio-num = <16>;
+ qcom,pin-num = <16>;
status = "disabled";
};
gpio@d000 {
reg = <0xd000 0x100>;
- qcom,gpio-num = <17>;
+ qcom,pin-num = <17>;
status = "disabled";
};
gpio@d100 {
reg = <0xd100 0x100>;
- qcom,gpio-num = <18>;
+ qcom,pin-num = <18>;
status = "disabled";
};
gpio@d200 {
reg = <0xd200 0x100>;
- qcom,gpio-num = <19>;
+ qcom,pin-num = <19>;
status = "disabled";
};
gpio@d300 {
reg = <0xd300 0x100>;
- qcom,gpio-num = <20>;
+ qcom,pin-num = <20>;
status = "disabled";
};
gpio@d400 {
reg = <0xd400 0x100>;
- qcom,gpio-num = <21>;
+ qcom,pin-num = <21>;
status = "disabled";
};
gpio@d500 {
reg = <0xd500 0x100>;
- qcom,gpio-num = <22>;
+ qcom,pin-num = <22>;
status = "disabled";
};
gpio@d600 {
reg = <0xd600 0x100>;
- qcom,gpio-num = <23>;
+ qcom,pin-num = <23>;
status = "disabled";
};
gpio@d700 {
reg = <0xd700 0x100>;
- qcom,gpio-num = <24>;
+ qcom,pin-num = <24>;
status = "disabled";
};
gpio@d800 {
reg = <0xd800 0x100>;
- qcom,gpio-num = <25>;
+ qcom,pin-num = <25>;
status = "disabled";
};
gpio@d900 {
reg = <0xd900 0x100>;
- qcom,gpio-num = <26>;
+ qcom,pin-num = <26>;
status = "disabled";
};
gpio@da00 {
reg = <0xda00 0x100>;
- qcom,gpio-num = <27>;
+ qcom,pin-num = <27>;
status = "disabled";
};
gpio@db00 {
reg = <0xdb00 0x100>;
- qcom,gpio-num = <28>;
+ qcom,pin-num = <28>;
status = "disabled";
};
gpio@dc00 {
reg = <0xdc00 0x100>;
- qcom,gpio-num = <29>;
+ qcom,pin-num = <29>;
status = "disabled";
};
gpio@dd00 {
reg = <0xdd00 0x100>;
- qcom,gpio-num = <30>;
+ qcom,pin-num = <30>;
status = "disabled";
};
gpio@de00 {
reg = <0xde00 0x100>;
- qcom,gpio-num = <31>;
+ qcom,pin-num = <31>;
status = "disabled";
};
gpio@df00 {
reg = <0xdf00 0x100>;
- qcom,gpio-num = <32>;
+ qcom,pin-num = <32>;
status = "disabled";
};
gpio@e000 {
reg = <0xe000 0x100>;
- qcom,gpio-num = <33>;
+ qcom,pin-num = <33>;
status = "disabled";
};
gpio@e100 {
reg = <0xe100 0x100>;
- qcom,gpio-num = <34>;
+ qcom,pin-num = <34>;
status = "disabled";
};
gpio@e200 {
reg = <0xe200 0x100>;
- qcom,gpio-num = <35>;
+ qcom,pin-num = <35>;
status = "disabled";
};
gpio@e300 {
reg = <0xe300 0x100>;
- qcom,gpio-num = <36>;
+ qcom,pin-num = <36>;
+ status = "disabled";
+ };
+ };
+
+ pm8941_mpps {
+ spmi-dev-container;
+ compatible = "qcom,qpnp-pin";
+ gpio-controller;
+ #gpio-cells = <2>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ label = "pm8941-mpp";
+
+ mpp@a000 {
+ reg = <0xa000 0x100>;
+ qcom,pin-num = <1>;
+ status = "disabled";
+ };
+
+ mpp@a100 {
+ reg = <0xa100 0x100>;
+ qcom,pin-num = <2>;
+ status = "disabled";
+ };
+
+ mpp@a200 {
+ reg = <0xa200 0x100>;
+ qcom,pin-num = <3>;
+ status = "disabled";
+ };
+
+ mpp@a300 {
+ reg = <0xa300 0x100>;
+ qcom,pin-num = <4>;
+ status = "disabled";
+ };
+
+ mpp@a400 {
+ reg = <0xa400 0x100>;
+ qcom,pin-num = <5>;
+ status = "disabled";
+ };
+
+ mpp@a500 {
+ reg = <0xa500 0x100>;
+ qcom,pin-num = <6>;
+ status = "disabled";
+ };
+
+ mpp@a600 {
+ reg = <0xa600 0x100>;
+ qcom,pin-num = <7>;
+ status = "disabled";
+ };
+
+ mpp@a700 {
+ reg = <0xa700 0x100>;
+ qcom,pin-num = <8>;
status = "disabled";
};
};
diff --git a/arch/arm/boot/dts/msmcopper-gpio.dtsi b/arch/arm/boot/dts/msmcopper-gpio.dtsi
index 7c3f5ce..59ad8db 100644
--- a/arch/arm/boot/dts/msmcopper-gpio.dtsi
+++ b/arch/arm/boot/dts/msmcopper-gpio.dtsi
@@ -18,197 +18,218 @@
pm8941_gpios: pm8941_gpios {
gpio@c000 {
- qcom,gpio-num = <1>;
status = "ok";
};
gpio@c100 {
- qcom,gpio-num = <2>;
status = "ok";
};
gpio@c200 {
- qcom,gpio-num = <3>;
status = "ok";
};
gpio@c300 {
- qcom,gpio-num = <4>;
status = "ok";
};
gpio@c400 {
- qcom,gpio-num = <5>;
status = "ok";
};
gpio@c500 {
- qcom,gpio-num = <6>;
status = "ok";
};
gpio@c600 {
- qcom,gpio-num = <7>;
status = "ok";
};
gpio@c700 {
- qcom,gpio-num = <8>;
status = "ok";
};
gpio@c800 {
- qcom,gpio-num = <9>;
status = "ok";
};
gpio@c900 {
- qcom,gpio-num = <10>;
status = "ok";
};
gpio@ca00 {
- qcom,gpio-num = <11>;
status = "ok";
};
gpio@cb00 {
- qcom,gpio-num = <12>;
status = "ok";
};
gpio@cc00 {
- qcom,gpio-num = <13>;
status = "ok";
};
gpio@cd00 {
- qcom,gpio-num = <14>;
status = "ok";
};
gpio@ce00 {
- qcom,gpio-num = <15>;
status = "ok";
};
gpio@cf00 {
- qcom,gpio-num = <16>;
status = "ok";
};
gpio@d000 {
- qcom,gpio-num = <17>;
status = "ok";
};
gpio@d100 {
- qcom,gpio-num = <18>;
status = "ok";
};
gpio@d200 {
- qcom,gpio-num = <19>;
status = "ok";
};
gpio@d300 {
- qcom,gpio-num = <20>;
status = "ok";
};
gpio@d400 {
- qcom,gpio-num = <21>;
status = "ok";
};
gpio@d500 {
- qcom,gpio-num = <22>;
status = "ok";
};
gpio@d600 {
- qcom,gpio-num = <23>;
status = "ok";
};
gpio@d700 {
- qcom,gpio-num = <24>;
status = "ok";
};
gpio@d800 {
- qcom,gpio-num = <25>;
qcom,out-strength = <1>;
status = "ok";
};
gpio@d900 {
- qcom,gpio-num = <26>;
qcom,out-strength = <1>;
status = "ok";
};
gpio@da00 {
- qcom,gpio-num = <27>;
qcom,out-strength = <1>;
status = "ok";
};
gpio@db00 {
- qcom,gpio-num = <28>;
qcom,out-strength = <1>;
status = "ok";
};
gpio@dc00 {
- qcom,gpio-num = <29>;
qcom,out-strength = <1>;
status = "ok";
};
gpio@dd00 {
- qcom,gpio-num = <30>;
qcom,out-strength = <1>;
status = "ok";
};
gpio@de00 {
- qcom,gpio-num = <31>;
qcom,out-strength = <1>;
status = "ok";
};
gpio@df00 {
- qcom,gpio-num = <32>;
qcom,out-strength = <1>;
status = "ok";
};
gpio@e000 {
- qcom,gpio-num = <33>;
qcom,out-strength = <1>;
status = "ok";
};
gpio@e100 {
- qcom,gpio-num = <34>;
qcom,out-strength = <1>;
status = "ok";
};
gpio@e200 {
- qcom,gpio-num = <35>;
qcom,out-strength = <1>;
status = "ok";
};
gpio@e300 {
- qcom,gpio-num = <36>;
qcom,out-strength = <1>;
status = "ok";
};
};
+
+ pm8941_mpps: pm8941_mpps {
+
+ mpp@a000 {
+ status = "ok";
+ };
+
+ mpp@a100 {
+ status = "ok";
+ };
+
+ mpp@a200 {
+ status = "ok";
+ };
+
+ mpp@a300 {
+ status = "ok";
+ };
+
+ mpp@a400 {
+ status = "ok";
+ };
+
+ mpp@a500 {
+ status = "ok";
+ };
+
+ mpp@a600 {
+ status = "ok";
+ };
+
+ mpp@a700 {
+ status = "ok";
+ };
+ };
+ };
+
+ qcom,pm8841@4 {
+
+ pm8841_mpps: pm8841_mpps {
+
+ mpp@a000 {
+ status = "ok";
+ };
+
+ mpp@a100 {
+ status = "ok";
+ };
+
+ mpp@a200 {
+ status = "ok";
+ };
+
+ mpp@a300 {
+ status = "ok";
+ };
+ };
};
};
};
diff --git a/arch/arm/boot/dts/msmcopper.dtsi b/arch/arm/boot/dts/msmcopper.dtsi
index 79d6814..bc7c5f0 100644
--- a/arch/arm/boot/dts/msmcopper.dtsi
+++ b/arch/arm/boot/dts/msmcopper.dtsi
@@ -270,6 +270,10 @@
qcom,acpuclk@f9000000 {
compatible = "qcom,acpuclk-copper";
+ krait0-supply = <&krait0_vreg>;
+ krait1-supply = <&krait1_vreg>;
+ krait2-supply = <&krait2_vreg>;
+ krait3-supply = <&krait3_vreg>;
};
qcom,ssusb@F9200000 {
@@ -398,6 +402,7 @@
compatible = "qcom,mdss_mdp";
reg = <0xfd900000 0x22100>;
interrupts = <0 72 0>;
+ vdd-supply = <&gdsc_mdss>;
};
qcom,mdss_wb_panel {
@@ -406,4 +411,13 @@
qcom,mdss_pan_res = <640 480>;
qcom,mdss_pan_bpp = <24>;
};
+
+ qcom,wdt@f9017000 {
+ compatible = "qcom,msm-watchdog";
+ reg = <0xf9017000 0x1000>;
+ interrupts = <0 3 0 0 4 0>;
+ qcom,bark-time = <11000>;
+ qcom,pet-time = <10000>;
+ qcom,ipi-ping = <1>;
+ };
};
diff --git a/arch/arm/common/cpaccess.c b/arch/arm/common/cpaccess.c
index e71e318..12e2c38 100644
--- a/arch/arm/common/cpaccess.c
+++ b/arch/arm/common/cpaccess.c
@@ -63,6 +63,8 @@
.name = "cpaccess",
};
+void cpaccess_dummy_inst(void);
+
#ifdef CONFIG_ARCH_MSM_KRAIT
/*
* do_read_il2 - Read indirect L2 registers
@@ -143,9 +145,12 @@
*/
static noinline unsigned long cpaccess_dummy(unsigned long write_val)
{
- asm("mrc p15, 0, r0, c0, c0, 0\n\t");
- asm("bx lr\n\t");
- return 0xBEEF;
+ unsigned long ret = 0xBEEF;
+
+ asm volatile (".globl cpaccess_dummy_inst\n"
+ "cpaccess_dummy_inst:\n\t"
+ "mrc p15, 0, %0, c0, c0, 0\n\t" : "=r" (ret));
+ return ret;
} __attribute__((aligned(32)))
/*
@@ -195,7 +200,7 @@
* Grab address of the Dummy function, write the MRC/MCR
* instruction, ensuring cache coherency.
*/
- p_opcode = (unsigned long *)&cpaccess_dummy;
+ p_opcode = (unsigned long *)&cpaccess_dummy_inst;
mem_text_write_kernel_word(p_opcode, opcode);
#ifdef CONFIG_SMP
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
index b14ecf8..88c4862 100644
--- a/arch/arm/common/gic.c
+++ b/arch/arm/common/gic.c
@@ -64,9 +64,7 @@
u32 __percpu *saved_ppi_enable;
u32 __percpu *saved_ppi_conf;
#endif
-#ifdef CONFIG_IRQ_DOMAIN
- struct irq_domain domain;
-#endif
+ struct irq_domain *domain;
unsigned int gic_irqs;
#ifdef CONFIG_GIC_NON_BANKED
void __iomem *(*get_base)(union gic_base *);
@@ -447,7 +445,7 @@
irqnr = irqstat & ~0x1c00;
if (likely(irqnr > 15 && irqnr < 1021)) {
- irqnr = irq_domain_to_irq(&gic->domain, irqnr);
+ irqnr = irq_find_mapping(gic->domain, irqnr);
handle_IRQ(irqnr, regs);
continue;
}
@@ -485,8 +483,8 @@
if (gic_irq == 1023)
goto out;
- cascade_irq = irq_domain_to_irq(&chip_data->domain, gic_irq);
- if (unlikely(gic_irq < 32 || gic_irq > 1020 || cascade_irq >= NR_IRQS))
+ cascade_irq = irq_find_mapping(chip_data->domain, gic_irq);
+ if (unlikely(gic_irq < 32 || gic_irq > 1020))
do_bad_IRQ(cascade_irq, desc);
else
generic_handle_irq(cascade_irq);
@@ -520,10 +518,9 @@
static void __init gic_dist_init(struct gic_chip_data *gic)
{
- unsigned int i, irq;
+ unsigned int i;
u32 cpumask;
unsigned int gic_irqs = gic->gic_irqs;
- struct irq_domain *domain = &gic->domain;
void __iomem *base = gic_data_dist_base(gic);
u32 cpu = cpu_logical_map(smp_processor_id());
@@ -566,23 +563,6 @@
for (i = 32; i < gic_irqs; i += 32)
writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32);
- /*
- * Setup the Linux IRQ subsystem.
- */
- irq_domain_for_each_irq(domain, i, irq) {
- if (i < 32) {
- irq_set_percpu_devid(irq);
- irq_set_chip_and_handler(irq, &gic_chip,
- handle_percpu_devid_irq);
- set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
- } else {
- irq_set_chip_and_handler(irq, &gic_chip,
- handle_fasteoi_irq);
- set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
- }
- irq_set_chip_data(irq, gic);
- }
-
gic->max_irq = gic_irqs;
if (is_cpu_secure())
@@ -831,13 +811,45 @@
static void __init gic_pm_init(struct gic_chip_data *gic)
{
}
+
+static void gic_cpu_restore(unsigned int gic_nr)
+{
+}
+
+static void gic_cpu_save(unsigned int gic_nr)
+{
+}
+
+static void gic_dist_restore(unsigned int gic_nr)
+{
+}
+
+static void gic_dist_save(unsigned int gic_nr)
+{
+}
#endif
-#ifdef CONFIG_OF
-static int gic_irq_domain_dt_translate(struct irq_domain *d,
- struct device_node *controller,
- const u32 *intspec, unsigned int intsize,
- unsigned long *out_hwirq, unsigned int *out_type)
+static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
+{
+ if (hw < 32) {
+ irq_set_percpu_devid(irq);
+ irq_set_chip_and_handler(irq, &gic_chip,
+ handle_percpu_devid_irq);
+ set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
+ } else {
+ irq_set_chip_and_handler(irq, &gic_chip,
+ handle_fasteoi_irq);
+ set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+ }
+ irq_set_chip_data(irq, d->host_data);
+ return 0;
+}
+
+static int gic_irq_domain_xlate(struct irq_domain *d,
+ struct device_node *controller,
+ const u32 *intspec, unsigned int intsize,
+ unsigned long *out_hwirq, unsigned int *out_type)
{
if (d->of_node != controller)
return -EINVAL;
@@ -854,26 +866,23 @@
*out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
return 0;
}
-#endif
const struct irq_domain_ops gic_irq_domain_ops = {
-#ifdef CONFIG_OF
- .dt_translate = gic_irq_domain_dt_translate,
-#endif
+ .map = gic_irq_domain_map,
+ .xlate = gic_irq_domain_xlate,
};
void __init gic_init_bases(unsigned int gic_nr, int irq_start,
void __iomem *dist_base, void __iomem *cpu_base,
- u32 percpu_offset)
+ u32 percpu_offset, struct device_node *node)
{
+ irq_hw_number_t hwirq_base;
struct gic_chip_data *gic;
- struct irq_domain *domain;
- int gic_irqs, rc;
+ int gic_irqs, irq_base;
BUG_ON(gic_nr >= MAX_GIC_NR);
gic = &gic_data[gic_nr];
- domain = &gic->domain;
#ifdef CONFIG_GIC_NON_BANKED
if (percpu_offset) { /* Frankein-GIC without banked registers... */
unsigned int cpu;
@@ -881,8 +890,11 @@
gic->dist_base.percpu_base = alloc_percpu(void __iomem *);
gic->cpu_base.percpu_base = alloc_percpu(void __iomem *);
if (WARN_ON(!gic->dist_base.percpu_base ||
- !gic->cpu_base.percpu_base))
- goto init_bases_err;
+ !gic->cpu_base.percpu_base)) {
+ free_percpu(gic->dist_base.percpu_base);
+ free_percpu(gic->cpu_base.percpu_base);
+ return;
+ }
for_each_possible_cpu(cpu) {
unsigned long offset = percpu_offset * cpu_logical_map(cpu);
@@ -906,13 +918,12 @@
* For primary GICs, skip over SGIs.
* For secondary GICs, skip over PPIs, too.
*/
- domain->hwirq_base = 32;
- if (gic_nr == 0) {
- if ((irq_start & 31) > 0) {
- domain->hwirq_base = 16;
- if (irq_start != -1)
- irq_start = (irq_start & ~31) + 16;
- }
+ if (gic_nr == 0 && (irq_start & 31) > 0) {
+ hwirq_base = 16;
+ if (irq_start != -1)
+ irq_start = (irq_start & ~31) + 16;
+ } else {
+ hwirq_base = 32;
}
/*
@@ -925,33 +936,22 @@
gic_irqs = 1020;
gic->gic_irqs = gic_irqs;
- domain->nr_irq = gic_irqs - domain->hwirq_base;
- domain->irq_base = irq_alloc_descs(irq_start, 16, domain->nr_irq,
- numa_node_id());
- if (IS_ERR_VALUE(domain->irq_base)) {
+ gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */
+ irq_base = irq_alloc_descs(irq_start, 16, gic_irqs, numa_node_id());
+ if (IS_ERR_VALUE(irq_base)) {
WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
irq_start);
- domain->irq_base = irq_start;
+ irq_base = irq_start;
}
- domain->priv = gic;
- domain->ops = &gic_irq_domain_ops;
- rc = irq_domain_add(domain);
- if (rc) {
- WARN(1, "Unable to create irq_domain\n");
- goto init_bases_err;
- }
- irq_domain_register(domain);
+ gic->domain = irq_domain_add_legacy(node, gic_irqs, irq_base,
+ hwirq_base, &gic_irq_domain_ops, gic);
+ if (WARN_ON(!gic->domain))
+ return;
gic_chip.flags |= gic_arch_extn.flags;
gic_dist_init(gic);
gic_cpu_init(gic);
gic_pm_init(gic);
-
- return;
-
-init_bases_err:
- free_percpu(gic->dist_base.percpu_base);
- free_percpu(gic->cpu_base.percpu_base);
}
void __cpuinit gic_secondary_init(unsigned int gic_nr)
@@ -1036,7 +1036,6 @@
void __iomem *dist_base;
u32 percpu_offset;
int irq;
- struct irq_domain *domain = &gic_data[gic_cnt].domain;
if (WARN_ON(!node))
return -ENODEV;
@@ -1050,9 +1049,7 @@
if (of_property_read_u32(node, "cpu-offset", &percpu_offset))
percpu_offset = 0;
- domain->of_node = of_node_get(node);
-
- gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset);
+ gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset, node);
if (parent) {
irq = irq_of_parse_and_map(node, 0);
@@ -1152,6 +1149,7 @@
return 0;
}
+#endif
void msm_gic_save(bool modem_wake, int from_idle)
{
@@ -1218,4 +1216,3 @@
mb();
raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
}
-#endif
diff --git a/arch/arm/configs/msm-copper_defconfig b/arch/arm/configs/msm-copper_defconfig
index 78ab155..57cd263 100644
--- a/arch/arm/configs/msm-copper_defconfig
+++ b/arch/arm/configs/msm-copper_defconfig
@@ -136,10 +136,13 @@
CONFIG_SPMI=y
CONFIG_SPMI_MSM_PMIC_ARB=y
CONFIG_MSM_QPNP=y
+CONFIG_MSM_QPNP_INT=y
CONFIG_SLIMBUS=y
CONFIG_SLIMBUS_MSM_CTRL=y
CONFIG_DEBUG_GPIO=y
CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_QPNP_PIN=y
+CONFIG_GPIO_QPNP_PIN_DEBUG=y
CONFIG_POWER_SUPPLY=y
# CONFIG_BATTERY_MSM is not set
# CONFIG_HWMON is not set
@@ -170,6 +173,7 @@
CONFIG_MMC=y
CONFIG_MMC_PERF_PROFILING=y
CONFIG_MMC_UNSAFE_RESUME=y
+CONFIG_MMC_CLKGATE=y
CONFIG_MMC_PARANOID_SD_INIT=y
CONFIG_MMC_BLOCK_MINORS=32
# CONFIG_MMC_BLOCK_BOUNCE is not set
diff --git a/arch/arm/configs/msm8960-perf_defconfig b/arch/arm/configs/msm8960-perf_defconfig
index 57e644d..dd33d76 100644
--- a/arch/arm/configs/msm8960-perf_defconfig
+++ b/arch/arm/configs/msm8960-perf_defconfig
@@ -92,6 +92,8 @@
CONFIG_MSM_SLEEP_STATS=y
CONFIG_MSM_EBI_ERP=y
CONFIG_MSM_CACHE_ERP=y
+CONFIG_MSM_L1_ERR_PANIC=y
+CONFIG_MSM_L1_ERR_LOG=y
CONFIG_MSM_L2_ERP_2BIT_PANIC=y
CONFIG_MSM_DCVS=y
CONFIG_MSM_HSIC_SYSMON=y
diff --git a/arch/arm/configs/msm8960_defconfig b/arch/arm/configs/msm8960_defconfig
index ca8a909..c0bc02e 100644
--- a/arch/arm/configs/msm8960_defconfig
+++ b/arch/arm/configs/msm8960_defconfig
@@ -94,6 +94,7 @@
CONFIG_MSM_EBI_ERP=y
CONFIG_MSM_CACHE_ERP=y
CONFIG_MSM_L1_ERR_PANIC=y
+CONFIG_MSM_L1_ERR_LOG=y
CONFIG_MSM_L2_ERP_PRINT_ACCESS_ERRORS=y
CONFIG_MSM_L2_ERP_1BIT_PANIC=y
CONFIG_MSM_L2_ERP_2BIT_PANIC=y
diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h
index a244039..926ac0e 100644
--- a/arch/arm/include/asm/hardware/cache-l2x0.h
+++ b/arch/arm/include/asm/hardware/cache-l2x0.h
@@ -90,6 +90,7 @@
#define L2X0_AUX_CTRL_ASSOCIATIVITY_SHIFT 16
#define L2X0_AUX_CTRL_WAY_SIZE_SHIFT 17
#define L2X0_AUX_CTRL_WAY_SIZE_MASK (0x7 << 17)
+#define L2X0_AUX_CTRL_EVNT_MON_BUS_EN_SHIFT 20
#define L2X0_AUX_CTRL_SHARE_OVERRIDE_SHIFT 22
#define L2X0_AUX_CTRL_L2_FORCE_NWA_SHIFT 23
#define L2X0_AUX_CTRL_NS_LOCKDOWN_SHIFT 26
diff --git a/arch/arm/include/asm/hardware/gic.h b/arch/arm/include/asm/hardware/gic.h
index 3783ff3..5078148 100644
--- a/arch/arm/include/asm/hardware/gic.h
+++ b/arch/arm/include/asm/hardware/gic.h
@@ -40,7 +40,7 @@
extern struct irq_chip gic_arch_extn;
void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
- u32 offset);
+ u32 offset, struct device_node *);
int gic_of_init(struct device_node *node, struct device_node *parent);
void gic_secondary_init(unsigned int);
void gic_handle_irq(struct pt_regs *regs);
@@ -56,12 +56,10 @@
static inline void gic_init(unsigned int nr, int start,
void __iomem *dist , void __iomem *cpu)
{
- gic_init_bases(nr, start, dist, cpu, 0);
+ gic_init_bases(nr, start, dist, cpu, 0, NULL);
}
void gic_set_irq_secure(unsigned int irq);
-#endif
-#ifdef CONFIG_ARCH_MSM8625
void msm_gic_save(bool modem_wake, int from_idle);
void msm_gic_restore(void);
void core1_gic_configure_and_raise(void);
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h
index 0a45dee..669a626 100644
--- a/arch/arm/include/asm/mach/arch.h
+++ b/arch/arm/include/asm/mach/arch.h
@@ -22,7 +22,7 @@
const char *const *dt_compat; /* array of device tree
* 'compatible' strings */
- int nr_irqs; /* number of IRQs */
+ unsigned int nr_irqs; /* number of IRQs */
#ifdef CONFIG_ZONE_DMA
unsigned long dma_zone_size; /* size of DMA-able area */
diff --git a/arch/arm/include/asm/mach/mmc.h b/arch/arm/include/asm/mach/mmc.h
index 4bcbfc2..a734547 100644
--- a/arch/arm/include/asm/mach/mmc.h
+++ b/arch/arm/include/asm/mach/mmc.h
@@ -51,6 +51,15 @@
bool always_on;
/* is low power mode setting required for this regulator? */
bool lpm_sup;
+ /*
+ * Use to indicate if the regulator should be reset at boot time.
+ * Its needed only when sd card's vdd regulator is always on
+ * since always on regulators dont get reset at boot time.
+ *
+ * It is needed for sd 3.0 card to be detected as a sd 3.0 card
+ * on device reboot.
+ */
+ bool reset_at_init;
};
/*
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index bc48bff..65c8c0f 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -132,18 +132,8 @@
#ifdef CONFIG_SPARSE_IRQ
int __init arch_probe_nr_irqs(void)
{
- /*
- * machine_desc->nr_irqs < 0 is a special case that
- * specifies not to preallocate any irq_descs.
- */
- if (machine_desc->nr_irqs < 0) {
- nr_irqs = 0;
- return nr_irqs;
- } else {
- nr_irqs = machine_desc->nr_irqs ?
- machine_desc->nr_irqs : NR_IRQS;
- return nr_irqs;
- }
+ nr_irqs = machine_desc->nr_irqs ? machine_desc->nr_irqs : NR_IRQS;
+ return nr_irqs;
}
#endif
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index e37b28b..778128b 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -616,16 +616,14 @@
atomic_set(&armpmu->active_events, 0);
mutex_init(&armpmu->reserve_mutex);
- armpmu->pmu = (struct pmu) {
- .pmu_enable = armpmu_enable,
- .pmu_disable = armpmu_disable,
- .event_init = armpmu_event_init,
- .add = armpmu_add,
- .del = armpmu_del,
- .start = armpmu_start,
- .stop = armpmu_stop,
- .read = armpmu_read,
- };
+ armpmu->pmu.pmu_enable = armpmu_enable;
+ armpmu->pmu.pmu_disable = armpmu_disable;
+ armpmu->pmu.event_init = armpmu_event_init;
+ armpmu->pmu.add = armpmu_add;
+ armpmu->pmu.del = armpmu_del;
+ armpmu->pmu.start = armpmu_start;
+ armpmu->pmu.stop = armpmu_stop;
+ armpmu->pmu.read = armpmu_read;
}
int armpmu_register(struct arm_pmu *armpmu, char *name, int type)
@@ -857,14 +855,12 @@
case 0x02D0: /* 8x60 */
// fabricmon_pmu_init();
cpu_pmu = armv7_scorpionmp_pmu_init();
-// scorpionmp_l2_pmu_init();
break;
case 0x0490: /* 8960 sim */
case 0x04D0: /* 8960 */
case 0x06F0: /* 8064 */
// fabricmon_pmu_init();
cpu_pmu = armv7_krait_pmu_init();
-// krait_l2_pmu_init();
break;
}
}
diff --git a/arch/arm/kernel/perf_event_msm.c b/arch/arm/kernel/perf_event_msm.c
index 46fa8fe..90c9c9e 100644
--- a/arch/arm/kernel/perf_event_msm.c
+++ b/arch/arm/kernel/perf_event_msm.c
@@ -720,6 +720,8 @@
.start = armv7pmu_start,
.stop = armv7pmu_stop,
.reset = scorpion_pmu_reset,
+ .test_set_event_constraints = msm_test_set_ev_constraint,
+ .clear_event_constraints = msm_clear_ev_constraint,
.max_period = (1LLU << 32) - 1,
};
@@ -728,6 +730,7 @@
scorpion_pmu.id = ARM_PERF_PMU_ID_SCORPION;
scorpion_pmu.name = "ARMv7 Scorpion";
scorpion_pmu.num_events = armv7_read_num_pmnc_events();
+ scorpion_pmu.pmu.attr_groups = msm_l1_pmu_attr_grps;
scorpion_clear_pmuregs();
return &scorpion_pmu;
}
@@ -737,6 +740,7 @@
scorpion_pmu.id = ARM_PERF_PMU_ID_SCORPIONMP;
scorpion_pmu.name = "ARMv7 Scorpion-MP";
scorpion_pmu.num_events = armv7_read_num_pmnc_events();
+ scorpion_pmu.pmu.attr_groups = msm_l1_pmu_attr_grps;
scorpion_clear_pmuregs();
return &scorpion_pmu;
}
diff --git a/arch/arm/kernel/perf_event_msm_krait.c b/arch/arm/kernel/perf_event_msm_krait.c
index 1b115b4..8d8f47a 100644
--- a/arch/arm/kernel/perf_event_msm_krait.c
+++ b/arch/arm/kernel/perf_event_msm_krait.c
@@ -573,10 +573,10 @@
*/
static int msm_test_set_ev_constraint(struct perf_event *event)
{
- u32 krait_evt_type = event->attr.config & KRAIT_EVENT_MASK;
- u8 prefix = (krait_evt_type & 0xF0000) >> 16;
- u8 reg = (krait_evt_type & 0x0F000) >> 12;
- u8 group = krait_evt_type & 0x0000F;
+ u32 evt_type = event->attr.config & KRAIT_EVENT_MASK;
+ u8 prefix = (evt_type & 0xF0000) >> 16;
+ u8 reg = (evt_type & 0x0F000) >> 12;
+ u8 group = evt_type & 0x0000F;
u64 cpu_pmu_bitmap = __get_cpu_var(pmu_bitmap);
u64 bitmap_t;
@@ -598,10 +598,10 @@
static int msm_clear_ev_constraint(struct perf_event *event)
{
- u32 krait_evt_type = event->attr.config & KRAIT_EVENT_MASK;
- u8 prefix = (krait_evt_type & 0xF0000) >> 16;
- u8 reg = (krait_evt_type & 0x0F000) >> 12;
- u8 group = krait_evt_type & 0x0000F;
+ u32 evt_type = event->attr.config & KRAIT_EVENT_MASK;
+ u8 prefix = (evt_type & 0xF0000) >> 16;
+ u8 reg = (evt_type & 0x0F000) >> 12;
+ u8 group = evt_type & 0x0000F;
u64 cpu_pmu_bitmap = __get_cpu_var(pmu_bitmap);
u64 bitmap_t;
@@ -636,6 +636,34 @@
.max_period = (1LLU << 32) - 1,
};
+/* NRCCG format for perf RAW codes. */
+PMU_FORMAT_ATTR(prefix, "config:16-19");
+PMU_FORMAT_ATTR(reg, "config:12-15");
+PMU_FORMAT_ATTR(code, "config:4-11");
+PMU_FORMAT_ATTR(grp, "config:0-3");
+
+static struct attribute *msm_l1_ev_formats[] = {
+ &format_attr_prefix.attr,
+ &format_attr_reg.attr,
+ &format_attr_code.attr,
+ &format_attr_grp.attr,
+ NULL,
+};
+
+/*
+ * Format group is essential to access PMU's from userspace
+ * via their .name field.
+ */
+static struct attribute_group msm_pmu_format_group = {
+ .name = "format",
+ .attrs = msm_l1_ev_formats,
+};
+
+static const struct attribute_group *msm_l1_pmu_attr_grps[] = {
+ &msm_pmu_format_group,
+ NULL,
+};
+
int get_krait_ver(void)
{
int ver = 0;
@@ -655,6 +683,7 @@
krait_pmu.name = "ARMv7 Krait";
krait_pmu.map_event = krait_8960_map_event;
krait_pmu.num_events = armv7_read_num_pmnc_events();
+ krait_pmu.pmu.attr_groups = msm_l1_pmu_attr_grps;
krait_clear_pmuregs();
krait_ver = get_krait_ver();
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig
index 24c57ae..776bf40 100644
--- a/arch/arm/mach-msm/Kconfig
+++ b/arch/arm/mach-msm/Kconfig
@@ -38,6 +38,7 @@
select MSM_PM2 if PM
select MSM_RUN_QUEUE_STATS if MSM_SOC_REV_A
select DONT_MAP_HOLE_AFTER_MEMBANK0
+ select MIGHT_HAVE_CACHE_L2X0
config ARCH_MSM7X30
bool "MSM7x30"
@@ -288,6 +289,7 @@
select MULTI_IRQ_HANDLER
select ARM_TICKET_LOCKS
select MSM_RUN_QUEUE_STATS
+ select MIGHT_HAVE_CACHE_L2X0
config ARCH_MSM9625
bool "MSM9625"
@@ -345,12 +347,14 @@
select ARCH_MSM_SCORPION
select MSM_SMP
select HAVE_ARCH_HAS_CURRENT_TIMER
+ select MSM_JTAG if MSM_QDSS
bool
config ARCH_MSM_KRAITMP
select ARCH_MSM_KRAIT
select MSM_SMP
select HAVE_ARCH_HAS_CURRENT_TIMER
+ select MSM_JTAG if MSM_QDSS
bool
select HAVE_HW_BRKPT_RESERVED_RW_ACCESS
@@ -369,6 +373,7 @@
select MULTI_IRQ_HANDLER
select ARM_GIC
select ARCH_MSM_CORTEXMP
+ select MIGHT_HAVE_CACHE_L2X0
config MSM_VIC
bool
@@ -2071,9 +2076,15 @@
enabled via another mechanism.
config MSM_JTAG
- bool "JTAG debug and trace support"
+ bool "JTAG and kernel debug and trace support across power collapse"
help
- Add additional support for JTAG kernel debugging and tracing.
+ Enables support for kernel debugging (specifically breakpoints) and
+ processor tracing across power collapse both for JTag and OS hosted
+ software running on the target. Enabling this will ensure debug
+ and ETM registers are saved and restored across power collapse.
+
+ For production builds, you should probably say 'N' here to avoid
+ potential power, performance and memory penalty.
config MSM_ETM
tristate "Enable MSM ETM and ETB"
@@ -2082,30 +2093,6 @@
help
Enables embedded trace collection on MSM8660
-config MSM_QDSS
- bool "Qualcomm Debug Subsystem"
- select MSM_JTAG
- help
- Enables support for Qualcomm Debug Subsystem.
-
-config MSM_QDSS_STM_DEFAULT_ENABLE
- bool "Turn on QDSS STM Tracing by Default"
- depends on MSM_QDSS
- help
- Turns on QDSS STM tracing (hardware assisted software
- instrumentation based tracing) by default. Otherwise, tracing is
- disabled by default but can be enabled via sysfs.
-
- For production builds, you should probably say 'N' here to avoid
- potential power, performance and memory penalty.
-
-config MSM_QDSS_ETM_DEFAULT_ENABLE
- bool "Turn on QDSS ETM Tracing by Default"
- depends on MSM_QDSS
- help
- Turns on QDSS ETM tracing by default. Otherwise, tracing is
- disabled by default but can be enabled by other means.
-
config MSM_SLEEP_STATS
bool "Enable exporting of MSM sleep stats to userspace"
depends on CPU_IDLE
diff --git a/arch/arm/mach-msm/Makefile b/arch/arm/mach-msm/Makefile
index 8315d70..ec8de9f 100644
--- a/arch/arm/mach-msm/Makefile
+++ b/arch/arm/mach-msm/Makefile
@@ -55,7 +55,6 @@
msm-etm-objs := etm.o
obj-$(CONFIG_MSM_ETM) += msm-etm.o
-obj-$(CONFIG_MSM_QDSS) += qdss.o qdss-etb.o qdss-tpiu.o qdss-funnel.o qdss-stm.o qdss-etm.o
quiet_cmd_mkrpcsym = MKCAP $@
cmd_mkrpcsym = $(PERL) $(srctree)/$(src)/mkrpcsym.pl $< $@
diff --git a/arch/arm/mach-msm/acpuclock-8960.c b/arch/arm/mach-msm/acpuclock-8960.c
index d29fee6..6c14efa 100644
--- a/arch/arm/mach-msm/acpuclock-8960.c
+++ b/arch/arm/mach-msm/acpuclock-8960.c
@@ -735,10 +735,6 @@
{ 1, { 1080000, HFPLL, 1, 0, 0x28 }, L2(16), 1175000 },
{ 1, { 1134000, HFPLL, 1, 0, 0x2A }, L2(16), 1175000 },
{ 1, { 1188000, HFPLL, 1, 0, 0x2C }, L2(16), 1200000 },
- { 1, { 1242000, HFPLL, 1, 0, 0x2E }, L2(16), 1200000 },
- { 1, { 1296000, HFPLL, 1, 0, 0x30 }, L2(16), 1225000 },
- { 1, { 1350000, HFPLL, 1, 0, 0x32 }, L2(16), 1225000 },
- { 1, { 1404000, HFPLL, 1, 0, 0x34 }, L2(16), 1237500 },
{ 0, { 0 } }
};
@@ -760,10 +756,6 @@
{ 1, { 1080000, HFPLL, 1, 0, 0x28 }, L2(16), 1150000 },
{ 1, { 1134000, HFPLL, 1, 0, 0x2A }, L2(16), 1150000 },
{ 1, { 1188000, HFPLL, 1, 0, 0x2C }, L2(16), 1175000 },
- { 1, { 1242000, HFPLL, 1, 0, 0x2E }, L2(16), 1175000 },
- { 1, { 1296000, HFPLL, 1, 0, 0x30 }, L2(16), 1200000 },
- { 1, { 1350000, HFPLL, 1, 0, 0x32 }, L2(16), 1200000 },
- { 1, { 1404000, HFPLL, 1, 0, 0x34 }, L2(16), 1212500 },
{ 0, { 0 } }
};
@@ -785,10 +777,6 @@
{ 1, { 1080000, HFPLL, 1, 0, 0x28 }, L2(16), 1100000 },
{ 1, { 1134000, HFPLL, 1, 0, 0x2A }, L2(16), 1100000 },
{ 1, { 1188000, HFPLL, 1, 0, 0x2C }, L2(16), 1125000 },
- { 1, { 1242000, HFPLL, 1, 0, 0x2E }, L2(16), 1125000 },
- { 1, { 1296000, HFPLL, 1, 0, 0x30 }, L2(16), 1150000 },
- { 1, { 1350000, HFPLL, 1, 0, 0x32 }, L2(16), 1150000 },
- { 1, { 1404000, HFPLL, 1, 0, 0x34 }, L2(16), 1162500 },
{ 0, { 0 } }
};
/* TODO: Update vdd_dig, vdd_mem and bw when data is available. */
diff --git a/arch/arm/mach-msm/acpuclock-copper.c b/arch/arm/mach-msm/acpuclock-copper.c
index f0da74c..7ba2e7d 100644
--- a/arch/arm/mach-msm/acpuclock-copper.c
+++ b/arch/arm/mach-msm/acpuclock-copper.c
@@ -62,68 +62,42 @@
.hfpll_data = &hfpll_data_cpu,
.l2cpmr_iaddr = 0x4501,
.vreg[VREG_CORE] = { "krait0", 1050000, 3200000 },
- .vreg[VREG_MEM] = { "krait0_mem", 1050000, 0,
- RPM_VREG_VOTER1,
- RPM_VREG_ID_PM8941_S1 },
- .vreg[VREG_DIG] = { "krait0_dig", 1050000, 0,
- RPM_VREG_VOTER1,
- RPM_VREG_ID_PM8941_S2 },
- .vreg[VREG_HFPLL_A] = { "hfpll", 1800000, 0,
- RPM_VREG_VOTER1,
- RPM_VREG_ID_PM8941_L12 },
+ .vreg[VREG_MEM] = { "krait0_mem", 1050000 },
+ .vreg[VREG_DIG] = { "krait0_dig", 1050000 },
+ .vreg[VREG_HFPLL_A] = { "krait0_hfpll", 1800000 },
},
[CPU1] = {
.hfpll_phys_base = 0xF909A000,
.hfpll_data = &hfpll_data_cpu,
.l2cpmr_iaddr = 0x5501,
.vreg[VREG_CORE] = { "krait1", 1050000, 3200000 },
- .vreg[VREG_MEM] = { "krait1_mem", 1050000, 0,
- RPM_VREG_VOTER2,
- RPM_VREG_ID_PM8941_S1 },
- .vreg[VREG_DIG] = { "krait1_dig", 1050000, 0,
- RPM_VREG_VOTER2,
- RPM_VREG_ID_PM8941_S2 },
- .vreg[VREG_HFPLL_A] = { "hfpll", 1800000, 0,
- RPM_VREG_VOTER2,
- RPM_VREG_ID_PM8941_L12 },
+ .vreg[VREG_MEM] = { "krait1_mem", 1050000 },
+ .vreg[VREG_DIG] = { "krait1_dig", 1050000 },
+ .vreg[VREG_HFPLL_A] = { "krait1_hfpll", 1800000 },
},
[CPU2] = {
.hfpll_phys_base = 0xF90AA000,
.hfpll_data = &hfpll_data_cpu,
.l2cpmr_iaddr = 0x6501,
.vreg[VREG_CORE] = { "krait2", 1050000, 3200000 },
- .vreg[VREG_MEM] = { "krait2_mem", 1050000, 0,
- RPM_VREG_VOTER4,
- RPM_VREG_ID_PM8921_S1 },
- .vreg[VREG_DIG] = { "krait2_dig", 1050000, 0,
- RPM_VREG_VOTER4,
- RPM_VREG_ID_PM8921_S2 },
- .vreg[VREG_HFPLL_A] = { "hfpll", 1800000, 0,
- RPM_VREG_VOTER4,
- RPM_VREG_ID_PM8941_L12 },
+ .vreg[VREG_MEM] = { "krait2_mem", 1050000 },
+ .vreg[VREG_DIG] = { "krait2_dig", 1050000 },
+ .vreg[VREG_HFPLL_A] = { "krait2_hfpll", 1800000 },
},
[CPU3] = {
.hfpll_phys_base = 0xF90BA000,
.hfpll_data = &hfpll_data_cpu,
.l2cpmr_iaddr = 0x7501,
.vreg[VREG_CORE] = { "krait3", 1050000, 3200000 },
- .vreg[VREG_MEM] = { "krait3_mem", 1050000, 0,
- RPM_VREG_VOTER5,
- RPM_VREG_ID_PM8941_S1 },
- .vreg[VREG_DIG] = { "krait3_dig", 1050000, 0,
- RPM_VREG_VOTER5,
- RPM_VREG_ID_PM8941_S2 },
- .vreg[VREG_HFPLL_A] = { "hfpll", 1800000, 0,
- RPM_VREG_VOTER5,
- RPM_VREG_ID_PM8941_L12 },
+ .vreg[VREG_MEM] = { "krait3_mem", 1050000 },
+ .vreg[VREG_DIG] = { "krait3_dig", 1050000 },
+ .vreg[VREG_HFPLL_A] = { "krait3_hfpll", 1800000 },
},
[L2] = {
.hfpll_phys_base = 0xF9016000,
.hfpll_data = &hfpll_data_l2,
.l2cpmr_iaddr = 0x0500,
- .vreg[VREG_HFPLL_A] = { "hfpll", 1800000, 0,
- RPM_VREG_VOTER6,
- RPM_VREG_ID_PM8941_L12 },
+ .vreg[VREG_HFPLL_A] = { "l2_hfpll", 1800000 },
},
};
diff --git a/arch/arm/mach-msm/acpuclock-krait.c b/arch/arm/mach-msm/acpuclock-krait.c
index 5682ac3..4dc47d2 100644
--- a/arch/arm/mach-msm/acpuclock-krait.c
+++ b/arch/arm/mach-msm/acpuclock-krait.c
@@ -32,6 +32,7 @@
#include <mach/socinfo.h>
#include <mach/msm-krait-l2-accessors.h>
#include <mach/rpm-regulator.h>
+#include <mach/rpm-regulator-smd.h>
#include <mach/msm_bus.h>
#include "acpuclock.h"
@@ -52,7 +53,7 @@
static DEFINE_SPINLOCK(l2_lock);
static struct drv_data {
- const struct acpu_level *acpu_freq_tbl;
+ struct acpu_level *acpu_freq_tbl;
const struct l2_level *l2_freq_tbl;
struct scalable *scalable;
u32 bus_perf_client;
@@ -92,35 +93,39 @@
udelay(1);
}
-/* Enable an already-configured HFPLL. */
-static void hfpll_enable(struct scalable *sc, bool skip_regulators)
+static void enable_rpm_vreg(struct vreg *vreg)
{
int rc;
+ if (vreg->rpm_reg) {
+ rc = rpm_regulator_enable(vreg->rpm_reg);
+ if (rc) {
+ dev_err(drv.dev, "%s regulator enable failed (%d)\n",
+ vreg->name, rc);
+ BUG();
+ }
+ }
+}
+
+static void disable_rpm_vreg(struct vreg *vreg)
+{
+ int rc;
+
+ if (vreg->rpm_reg) {
+ rc = rpm_regulator_disable(vreg->rpm_reg);
+ if (rc)
+ dev_err(drv.dev, "%s regulator disable failed (%d)\n",
+ vreg->name, rc);
+ }
+}
+
+/* Enable an already-configured HFPLL. */
+static void hfpll_enable(struct scalable *sc, bool skip_regulators)
+{
if (!skip_regulators) {
/* Enable regulators required by the HFPLL. */
- if (sc->vreg[VREG_HFPLL_A].rpm_vreg_id) {
- rc = rpm_vreg_set_voltage(
- sc->vreg[VREG_HFPLL_A].rpm_vreg_id,
- sc->vreg[VREG_HFPLL_A].rpm_vreg_voter,
- sc->vreg[VREG_HFPLL_A].cur_vdd,
- sc->vreg[VREG_HFPLL_A].max_vdd, 0);
- if (rc)
- dev_err(drv.dev,
- "%s regulator enable failed (%d)\n",
- sc->vreg[VREG_HFPLL_A].name, rc);
- }
- if (sc->vreg[VREG_HFPLL_B].rpm_vreg_id) {
- rc = rpm_vreg_set_voltage(
- sc->vreg[VREG_HFPLL_B].rpm_vreg_id,
- sc->vreg[VREG_HFPLL_B].rpm_vreg_voter,
- sc->vreg[VREG_HFPLL_B].cur_vdd,
- sc->vreg[VREG_HFPLL_B].max_vdd, 0);
- if (rc)
- dev_err(drv.dev,
- "%s regulator enable failed (%d)\n",
- sc->vreg[VREG_HFPLL_B].name, rc);
- }
+ enable_rpm_vreg(&sc->vreg[VREG_HFPLL_A]);
+ enable_rpm_vreg(&sc->vreg[VREG_HFPLL_B]);
}
/* Disable PLL bypass mode. */
@@ -147,8 +152,6 @@
/* Disable a HFPLL for power-savings or while it's being reprogrammed. */
static void hfpll_disable(struct scalable *sc, bool skip_regulators)
{
- int rc;
-
/*
* Disable the PLL output, disable test mode, enable the bypass mode,
* and assert the reset.
@@ -157,26 +160,8 @@
if (!skip_regulators) {
/* Remove voltage votes required by the HFPLL. */
- if (sc->vreg[VREG_HFPLL_B].rpm_vreg_id) {
- rc = rpm_vreg_set_voltage(
- sc->vreg[VREG_HFPLL_B].rpm_vreg_id,
- sc->vreg[VREG_HFPLL_B].rpm_vreg_voter,
- 0, 0, 0);
- if (rc)
- dev_err(drv.dev,
- "%s regulator enable failed (%d)\n",
- sc->vreg[VREG_HFPLL_B].name, rc);
- }
- if (sc->vreg[VREG_HFPLL_A].rpm_vreg_id) {
- rc = rpm_vreg_set_voltage(
- sc->vreg[VREG_HFPLL_A].rpm_vreg_id,
- sc->vreg[VREG_HFPLL_A].rpm_vreg_voter,
- 0, 0, 0);
- if (rc)
- dev_err(drv.dev,
- "%s regulator enable failed (%d)\n",
- sc->vreg[VREG_HFPLL_A].name, rc);
- }
+ disable_rpm_vreg(&sc->vreg[VREG_HFPLL_B]);
+ disable_rpm_vreg(&sc->vreg[VREG_HFPLL_A]);
}
}
@@ -228,19 +213,19 @@
set_pri_clk_src(sc, PRI_SRC_SEL_SEC_SRC);
/* Re-program HFPLL. */
- hfpll_disable(sc, 1);
+ hfpll_disable(sc, true);
hfpll_set_rate(sc, tgt_s);
- hfpll_enable(sc, 1);
+ hfpll_enable(sc, true);
/* Move to HFPLL. */
set_pri_clk_src(sc, tgt_s->pri_src_sel);
} else if (strt_s->src == HFPLL && tgt_s->src != HFPLL) {
set_sec_clk_src(sc, tgt_s->sec_src_sel);
set_pri_clk_src(sc, tgt_s->pri_src_sel);
- hfpll_disable(sc, 0);
+ hfpll_disable(sc, false);
} else if (strt_s->src != HFPLL && tgt_s->src == HFPLL) {
hfpll_set_rate(sc, tgt_s);
- hfpll_enable(sc, 0);
+ hfpll_enable(sc, false);
set_pri_clk_src(sc, tgt_s->pri_src_sel);
} else {
set_sec_clk_src(sc, tgt_s->sec_src_sel);
@@ -261,9 +246,8 @@
* vdd_mem should be >= vdd_dig.
*/
if (vdd_mem > sc->vreg[VREG_MEM].cur_vdd) {
- rc = rpm_vreg_set_voltage(sc->vreg[VREG_MEM].rpm_vreg_id,
- sc->vreg[VREG_MEM].rpm_vreg_voter, vdd_mem,
- sc->vreg[VREG_MEM].max_vdd, 0);
+ rc = rpm_regulator_set_voltage(sc->vreg[VREG_MEM].rpm_reg,
+ vdd_mem, sc->vreg[VREG_MEM].max_vdd);
if (rc) {
dev_err(drv.dev,
"vdd_mem (cpu%d) increase failed (%d)\n",
@@ -275,9 +259,8 @@
/* Increase vdd_dig active-set vote. */
if (vdd_dig > sc->vreg[VREG_DIG].cur_vdd) {
- rc = rpm_vreg_set_voltage(sc->vreg[VREG_DIG].rpm_vreg_id,
- sc->vreg[VREG_DIG].rpm_vreg_voter, vdd_dig,
- sc->vreg[VREG_DIG].max_vdd, 0);
+ rc = rpm_regulator_set_voltage(sc->vreg[VREG_DIG].rpm_reg,
+ vdd_dig, sc->vreg[VREG_DIG].max_vdd);
if (rc) {
dev_err(drv.dev,
"vdd_dig (cpu%d) increase failed (%d)\n",
@@ -336,9 +319,8 @@
/* Decrease vdd_dig active-set vote. */
if (vdd_dig < sc->vreg[VREG_DIG].cur_vdd) {
- ret = rpm_vreg_set_voltage(sc->vreg[VREG_DIG].rpm_vreg_id,
- sc->vreg[VREG_DIG].rpm_vreg_voter, vdd_dig,
- sc->vreg[VREG_DIG].max_vdd, 0);
+ ret = rpm_regulator_set_voltage(sc->vreg[VREG_DIG].rpm_reg,
+ vdd_dig, sc->vreg[VREG_DIG].max_vdd);
if (ret) {
dev_err(drv.dev,
"vdd_dig (cpu%d) decrease failed (%d)\n",
@@ -353,9 +335,8 @@
* vdd_mem should be >= vdd_dig.
*/
if (vdd_mem < sc->vreg[VREG_MEM].cur_vdd) {
- ret = rpm_vreg_set_voltage(sc->vreg[VREG_MEM].rpm_vreg_id,
- sc->vreg[VREG_MEM].rpm_vreg_voter, vdd_mem,
- sc->vreg[VREG_MEM].max_vdd, 0);
+ ret = rpm_regulator_set_voltage(sc->vreg[VREG_MEM].rpm_reg,
+ vdd_mem, sc->vreg[VREG_MEM].max_vdd);
if (ret) {
dev_err(drv.dev,
"vdd_mem (cpu%d) decrease failed (%d)\n",
@@ -484,7 +465,7 @@
pr_debug("Initializing HFPLL%d\n", sc - drv.scalable);
/* Disable the PLL for re-programming. */
- hfpll_disable(sc, 1);
+ hfpll_disable(sc, true);
/* Configure PLL parameters for integer mode. */
writel_relaxed(sc->hfpll_data->config_val,
@@ -492,13 +473,49 @@
writel_relaxed(0, sc->hfpll_base + sc->hfpll_data->m_offset);
writel_relaxed(1, sc->hfpll_base + sc->hfpll_data->n_offset);
+ /* Program droop controller, if supported */
+ if (sc->hfpll_data->has_droop_ctl)
+ writel_relaxed(sc->hfpll_data->droop_val,
+ sc->hfpll_base + sc->hfpll_data->droop_offset);
+
/* Set an initial rate and enable the PLL. */
hfpll_set_rate(sc, tgt_s);
- hfpll_enable(sc, 0);
+ hfpll_enable(sc, false);
+}
+
+static void __init rpm_regulator_init(struct scalable *sc, enum vregs vreg,
+ int vdd, bool enable)
+{
+ int ret;
+
+ if (!sc->vreg[vreg].name)
+ return;
+
+ sc->vreg[vreg].rpm_reg = rpm_regulator_get(drv.dev,
+ sc->vreg[vreg].name);
+ if (IS_ERR(sc->vreg[vreg].rpm_reg)) {
+ dev_err(drv.dev, "rpm_regulator_get(%s) failed (%ld)\n",
+ sc->vreg[vreg].name,
+ PTR_ERR(sc->vreg[vreg].rpm_reg));
+ BUG();
+ }
+
+ ret = rpm_regulator_set_voltage(sc->vreg[vreg].rpm_reg, vdd,
+ sc->vreg[vreg].max_vdd);
+ if (ret) {
+ dev_err(drv.dev, "%s initialization failed (%d)\n",
+ sc->vreg[vreg].name, ret);
+ BUG();
+ }
+ sc->vreg[vreg].cur_vdd = vdd;
+
+ if (enable)
+ enable_rpm_vreg(&sc->vreg[vreg]);
}
/* Voltage regulator initialization. */
-static void __init regulator_init(const struct acpu_level *lvl)
+static void __init regulator_init(struct device *dev,
+ const struct acpu_level *lvl)
{
int cpu, ret;
struct scalable *sc;
@@ -507,33 +524,23 @@
vdd_mem = calculate_vdd_mem(lvl);
vdd_dig = calculate_vdd_dig(lvl);
+ rpm_regulator_init(&drv.scalable[L2], VREG_HFPLL_A,
+ drv.scalable[L2].vreg[VREG_HFPLL_A].max_vdd, false);
+ rpm_regulator_init(&drv.scalable[L2], VREG_HFPLL_B,
+ drv.scalable[L2].vreg[VREG_HFPLL_B].max_vdd, false);
+
for_each_possible_cpu(cpu) {
sc = &drv.scalable[cpu];
- /* Set initial vdd_mem vote. */
- ret = rpm_vreg_set_voltage(sc->vreg[VREG_MEM].rpm_vreg_id,
- sc->vreg[VREG_MEM].rpm_vreg_voter, vdd_mem,
- sc->vreg[VREG_MEM].max_vdd, 0);
- if (ret) {
- dev_err(drv.dev, "%s initialization failed (%d)\n",
- sc->vreg[VREG_MEM].name, ret);
- BUG();
- }
- sc->vreg[VREG_MEM].cur_vdd = vdd_mem;
-
- /* Set initial vdd_dig vote. */
- ret = rpm_vreg_set_voltage(sc->vreg[VREG_DIG].rpm_vreg_id,
- sc->vreg[VREG_DIG].rpm_vreg_voter, vdd_dig,
- sc->vreg[VREG_DIG].max_vdd, 0);
- if (ret) {
- dev_err(drv.dev, "%s initialization failed (%d)\n",
- sc->vreg[VREG_DIG].name, ret);
- BUG();
- }
- sc->vreg[VREG_DIG].cur_vdd = vdd_dig;
+ rpm_regulator_init(sc, VREG_MEM, vdd_mem, true);
+ rpm_regulator_init(sc, VREG_DIG, vdd_dig, true);
+ rpm_regulator_init(sc, VREG_HFPLL_A,
+ sc->vreg[VREG_HFPLL_A].max_vdd, false);
+ rpm_regulator_init(sc, VREG_HFPLL_B,
+ sc->vreg[VREG_HFPLL_B].max_vdd, false);
/* Setup Krait CPU regulators and initial core voltage. */
- sc->vreg[VREG_CORE].reg = regulator_get(NULL,
+ sc->vreg[VREG_CORE].reg = regulator_get(dev,
sc->vreg[VREG_CORE].name);
if (IS_ERR(sc->vreg[VREG_CORE].reg)) {
dev_err(drv.dev, "regulator_get(%s) failed (%ld)\n",
@@ -571,10 +578,15 @@
const struct core_speed *tgt_s)
{
u32 regval;
+ void __iomem *aux_reg;
/* Program AUX source input to the secondary MUX. */
- if (sc->aux_clk_sel_addr)
- writel_relaxed(sc->aux_clk_sel, sc->aux_clk_sel_addr);
+ if (sc->aux_clk_sel_phys) {
+ aux_reg = ioremap(sc->aux_clk_sel_phys, 4);
+ BUG_ON(!aux_reg);
+ writel_relaxed(sc->aux_clk_sel, aux_reg);
+ iounmap(aux_reg);
+ }
/* Switch away from the HFPLL while it's re-initialized. */
set_sec_clk_src(sc, SEC_SRC_SEL_AUX);
@@ -691,8 +703,27 @@
.notifier_call = acpuclk_cpu_callback,
};
+static const int krait_needs_vmin(void)
+{
+ switch (read_cpuid_id()) {
+ case 0x511F04D0: /* KR28M2A20 */
+ case 0x511F04D1: /* KR28M2A21 */
+ case 0x510F06F0: /* KR28M4A10 */
+ return 1;
+ default:
+ return 0;
+ };
+}
+
+static void krait_apply_vmin(struct acpu_level *tbl)
+{
+ for (; tbl->speed.khz != 0; tbl++)
+ if (tbl->vdd_core < 1150000)
+ tbl->vdd_core = 1150000;
+}
+
static const struct acpu_level __init *select_freq_plan(
- const struct acpu_level *const *pvs_tbl, u32 qfprom_phys)
+ struct acpu_level *const *pvs_tbl, u32 qfprom_phys)
{
const struct acpu_level *l, *max_acpu_level = NULL;
void __iomem *qfprom_base;
@@ -735,6 +766,9 @@
}
drv.acpu_freq_tbl = pvs_tbl[tbl_idx];
+ if (krait_needs_vmin())
+ krait_apply_vmin(drv.acpu_freq_tbl);
+
/* Find the max supported scaling frequency. */
for (l = drv.acpu_freq_tbl; l->speed.khz != 0; l++)
if (l->use_for_scaling)
@@ -769,7 +803,7 @@
max_acpu_level = select_freq_plan(params->pvs_acpu_freq_tbl,
params->qfprom_phys_base);
- regulator_init(max_acpu_level);
+ regulator_init(dev, max_acpu_level);
bus_init(params->bus_scale_data, max_acpu_level->l2_level->bw_level);
init_clock_sources(&drv.scalable[L2], &max_acpu_level->l2_level->speed);
for_each_online_cpu(cpu)
diff --git a/arch/arm/mach-msm/acpuclock-krait.h b/arch/arm/mach-msm/acpuclock-krait.h
index fbf1f5f..7c1d2b6 100644
--- a/arch/arm/mach-msm/acpuclock-krait.h
+++ b/arch/arm/mach-msm/acpuclock-krait.h
@@ -40,6 +40,7 @@
PLL_0 = 0,
HFPLL,
QSB,
+ PLL_8,
};
/**
@@ -91,18 +92,17 @@
* struct vreg - Voltage regulator data.
* @name: Name of requlator.
* @max_vdd: Limit the maximum-settable voltage.
- * @rpm_vreg_id: ID to use with rpm_vreg_*() APIs.
* @reg: Regulator handle.
+ * @rpm_reg: RPM Regulator handle.
* @cur_vdd: Last-set voltage in uV.
* @peak_ua: Maximum current draw expected in uA.
*/
struct vreg {
- const char name[15];
+ const char *name;
const int max_vdd;
const int peak_ua;
- const int rpm_vreg_voter;
- const int rpm_vreg_id;
struct regulator *reg;
+ struct rpm_regulator *rpm_reg;
int cur_vdd;
};
@@ -147,7 +147,7 @@
const int use_for_scaling;
const struct core_speed speed;
const struct l2_level *l2_level;
- const int vdd_core;
+ int vdd_core;
};
/**
@@ -158,6 +158,10 @@
* @n_offset: "N" value register offset from base address.
* @config_offset: Configuration register offset from base address.
* @config_val: Value to initialize the @config_offset register to.
+ * @has_droop_ctl: Indicates the presence of a voltage droop controller.
+ * @droop_offset: Droop controller register offset from base address.
+ * @droop_val: Value to initialize the @config_offset register to.
+ * @low_vdd_l_max: Maximum "L" value supported at HFPLL_VDD_LOW.
* @vdd: voltage requirements for each VDD level.
*/
struct hfpll_data {
@@ -167,6 +171,9 @@
const u32 n_offset;
const u32 config_offset;
const u32 config_val;
+ const bool has_droop_ctl;
+ const u32 droop_offset;
+ const u32 droop_val;
const u32 low_vdd_l_max;
const int vdd[NUM_HFPLL_VDD];
};
@@ -175,7 +182,7 @@
* struct scalable - Register locations and state associated with a scalable HW.
* @hfpll_phys_base: Physical base address of HFPLL register.
* @hfpll_base: Virtual base address of HFPLL registers.
- * @aux_clk_sel_addr: Virtual address of auxiliary MUX.
+ * @aux_clk_sel_phys: Physical address of auxiliary MUX.
* @aux_clk_sel: Auxiliary mux input to select at boot.
* @l2cpmr_iaddr: Indirect address of the CPMR MUX/divider CP15 register.
* @hfpll_data: Descriptive data of HFPLL hardware.
@@ -184,9 +191,9 @@
* @vreg: Array of voltage regulators needed by the scalable.
*/
struct scalable {
- const u32 hfpll_phys_base;
+ const phys_addr_t hfpll_phys_base;
void __iomem *hfpll_base;
- void __iomem *aux_clk_sel_addr;
+ const phys_addr_t aux_clk_sel_phys;
const u32 aux_clk_sel;
const u32 l2cpmr_iaddr;
const struct hfpll_data *hfpll_data;
@@ -206,10 +213,10 @@
*/
struct acpuclk_krait_params {
struct scalable *scalable;
- const struct acpu_level *pvs_acpu_freq_tbl[NUM_PVS];
+ struct acpu_level *pvs_acpu_freq_tbl[NUM_PVS];
const struct l2_level *l2_freq_tbl;
const size_t l2_freq_tbl_size;
- const u32 qfprom_phys_base;
+ const phys_addr_t qfprom_phys_base;
struct msm_bus_scale_pdata *bus_scale_data;
};
diff --git a/arch/arm/mach-msm/bam_dmux.c b/arch/arm/mach-msm/bam_dmux.c
index d53e471..3df566c 100644
--- a/arch/arm/mach-msm/bam_dmux.c
+++ b/arch/arm/mach-msm/bam_dmux.c
@@ -238,11 +238,12 @@
static DEFINE_SPINLOCK(wakelock_reference_lock);
static int wakelock_reference_count;
static int a2_pc_disabled_wakelock_skipped;
-static int disconnect_ack;
+static int disconnect_ack = 1;
static LIST_HEAD(bam_other_notify_funcs);
static DEFINE_MUTEX(smsm_cb_lock);
static DEFINE_MUTEX(delayed_ul_vote_lock);
static int need_delayed_ul_vote;
+static int power_management_only_mode;
struct outside_notify_func {
void (*notify)(void *, int, unsigned long);
@@ -561,9 +562,9 @@
bam_dmux_log("%s: opening cid %d PC enabled\n", __func__,
rx_hdr->ch_id);
handle_bam_mux_cmd_open(rx_hdr);
- if (rx_hdr->reserved & ENABLE_DISCONNECT_ACK) {
- bam_dmux_log("%s: activating disconnect ack\n");
- disconnect_ack = 1;
+ if (!(rx_hdr->reserved & ENABLE_DISCONNECT_ACK)) {
+ bam_dmux_log("%s: deactivating disconnect ack\n");
+ disconnect_ack = 0;
}
dev_kfree_skb_any(rx_skb);
break;
@@ -1688,21 +1689,28 @@
in_global_reset = 0;
vote_dfab();
- i = sps_device_reset(a2_device_handle);
- if (i)
- pr_err("%s: device reset failed rc = %d\n", __func__, i);
- i = sps_connect(bam_tx_pipe, &tx_connection);
- if (i)
- pr_err("%s: tx connection failed rc = %d\n", __func__, i);
- i = sps_connect(bam_rx_pipe, &rx_connection);
- if (i)
- pr_err("%s: rx connection failed rc = %d\n", __func__, i);
- i = sps_register_event(bam_tx_pipe, &tx_register_event);
- if (i)
- pr_err("%s: tx event reg failed rc = %d\n", __func__, i);
- i = sps_register_event(bam_rx_pipe, &rx_register_event);
- if (i)
- pr_err("%s: rx event reg failed rc = %d\n", __func__, i);
+ if (!power_management_only_mode) {
+ i = sps_device_reset(a2_device_handle);
+ if (i)
+ pr_err("%s: device reset failed rc = %d\n", __func__,
+ i);
+ i = sps_connect(bam_tx_pipe, &tx_connection);
+ if (i)
+ pr_err("%s: tx connection failed rc = %d\n", __func__,
+ i);
+ i = sps_connect(bam_rx_pipe, &rx_connection);
+ if (i)
+ pr_err("%s: rx connection failed rc = %d\n", __func__,
+ i);
+ i = sps_register_event(bam_tx_pipe, &tx_register_event);
+ if (i)
+ pr_err("%s: tx event reg failed rc = %d\n", __func__,
+ i);
+ i = sps_register_event(bam_rx_pipe, &rx_register_event);
+ if (i)
+ pr_err("%s: rx event reg failed rc = %d\n", __func__,
+ i);
+ }
bam_connection_is_active = 1;
@@ -1711,7 +1719,8 @@
toggle_apps_ack();
complete_all(&bam_connection_completion);
- queue_rx();
+ if (!power_management_only_mode)
+ queue_rx();
}
static void disconnect_to_bam(void)
@@ -1733,11 +1742,13 @@
/* tear down BAM connection */
INIT_COMPLETION(bam_connection_completion);
- sps_disconnect(bam_tx_pipe);
- sps_disconnect(bam_rx_pipe);
+ if (!power_management_only_mode) {
+ sps_disconnect(bam_tx_pipe);
+ sps_disconnect(bam_rx_pipe);
+ __memzero(rx_desc_mem_buf.base, rx_desc_mem_buf.size);
+ __memzero(tx_desc_mem_buf.base, tx_desc_mem_buf.size);
+ }
unvote_dfab();
- __memzero(rx_desc_mem_buf.base, rx_desc_mem_buf.size);
- __memzero(tx_desc_mem_buf.base, tx_desc_mem_buf.size);
mutex_lock(&bam_rx_pool_mutexlock);
while (!list_empty(&bam_rx_pool)) {
@@ -2081,7 +2092,6 @@
int ret;
void *a2_virt_addr;
- unvote_dfab();
/* init BAM */
a2_virt_addr = ioremap_nocache(A2_PHYS_BASE, A2_PHYS_SIZE);
if (!a2_virt_addr) {
@@ -2114,6 +2124,10 @@
mutex_unlock(&delayed_ul_vote_lock);
toggle_apps_ack();
+ power_management_only_mode = 1;
+ bam_connection_is_active = 1;
+ complete_all(&bam_connection_completion);
+
return 0;
register_bam_failed:
diff --git a/arch/arm/mach-msm/board-8064-display.c b/arch/arm/mach-msm/board-8064-display.c
index 101a26d..5edddb5 100644
--- a/arch/arm/mach-msm/board-8064-display.c
+++ b/arch/arm/mach-msm/board-8064-display.c
@@ -473,11 +473,18 @@
}
}
+ rc = regulator_disable(reg_l11);
+ if (rc) {
+ pr_err("disable reg_l1 failed, rc=%d\n", rc);
+ return -ENODEV;
+ }
+
rc = regulator_disable(reg_lvs7);
if (rc) {
pr_err("disable reg_lvs7 failed, rc=%d\n", rc);
return -ENODEV;
}
+
rc = regulator_disable(reg_l2);
if (rc) {
pr_err("disable reg_l2 failed, rc=%d\n", rc);
diff --git a/arch/arm/mach-msm/board-8064-gpiomux.c b/arch/arm/mach-msm/board-8064-gpiomux.c
index b941bd4..ecd4e54 100644
--- a/arch/arm/mach-msm/board-8064-gpiomux.c
+++ b/arch/arm/mach-msm/board-8064-gpiomux.c
@@ -797,6 +797,13 @@
.pull = GPIOMUX_PULL_DOWN,
};
+static struct gpiomux_setting mdm2ap_pblrdy = {
+ .func = GPIOMUX_FUNC_GPIO,
+ .drv = GPIOMUX_DRV_16MA,
+ .pull = GPIOMUX_PULL_DOWN,
+};
+
+
static struct gpiomux_setting ap2mdm_soft_reset_cfg = {
.func = GPIOMUX_FUNC_GPIO,
.drv = GPIOMUX_DRV_8MA,
@@ -852,6 +859,13 @@
[GPIOMUX_SUSPENDED] = &ap2mdm_wakeup,
}
},
+ /* MDM2AP_PBL_READY*/
+ {
+ .gpio = 46,
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &mdm2ap_pblrdy,
+ }
+ },
};
static struct gpiomux_setting mi2s_act_cfg = {
diff --git a/arch/arm/mach-msm/board-8064-gpu.c b/arch/arm/mach-msm/board-8064-gpu.c
index e24cac6..d877fd8 100644
--- a/arch/arm/mach-msm/board-8064-gpu.c
+++ b/arch/arm/mach-msm/board-8064-gpu.c
@@ -224,6 +224,7 @@
.set_grp_async = NULL,
.idle_timeout = HZ/10,
.nap_allowed = true,
+ .strtstp_sleepwake = true,
.clk_map = KGSL_CLK_CORE | KGSL_CLK_IFACE | KGSL_CLK_MEM_IFACE,
#ifdef CONFIG_MSM_BUS_SCALING
.bus_scale_table = &grp3d_bus_scale_pdata,
diff --git a/arch/arm/mach-msm/board-8064-regulator.c b/arch/arm/mach-msm/board-8064-regulator.c
index 622b213..7175123f 100644
--- a/arch/arm/mach-msm/board-8064-regulator.c
+++ b/arch/arm/mach-msm/board-8064-regulator.c
@@ -221,6 +221,7 @@
REGULATOR_SUPPLY("8921_lvs7", NULL),
REGULATOR_SUPPLY("pll_vdd", "pil_riva"),
REGULATOR_SUPPLY("lvds_vdda", "lvds.0"),
+ REGULATOR_SUPPLY("hdmi_pll_fs", "mdp.0"),
REGULATOR_SUPPLY("dsi1_vddio", "mipi_dsi.1"),
REGULATOR_SUPPLY("hdmi_vdda", "hdmi_msm.0"),
};
diff --git a/arch/arm/mach-msm/board-8064.c b/arch/arm/mach-msm/board-8064.c
index e4958f5..833a6d1 100644
--- a/arch/arm/mach-msm/board-8064.c
+++ b/arch/arm/mach-msm/board-8064.c
@@ -1220,6 +1220,7 @@
.name = "vibrator",
.dev_setup = isa1200_dev_setup,
.clk_enable = isa1200_clk_enable,
+ .need_pwm_clk = true,
.hap_en_gpio = ISA1200_HAP_EN_GPIO,
.hap_len_gpio = ISA1200_HAP_LEN_GPIO,
.max_timeout = 15000,
@@ -1696,12 +1697,22 @@
};
#endif
+static struct mdm_vddmin_resource mdm_vddmin_rscs = {
+ .rpm_id = MSM_RPM_ID_VDDMIN_GPIO,
+ .ap2mdm_vddmin_gpio = 30,
+ .modes = 0x03,
+ .drive_strength = 8,
+ .mdm2ap_vddmin_gpio = 80,
+};
+
static struct mdm_platform_data mdm_platform_data = {
.mdm_version = "3.0",
.ramdump_delay_ms = 2000,
.early_power_on = 1,
.sfr_query = 1,
+ .vddmin_resource = &mdm_vddmin_rscs,
.peripheral_platform_device = &apq8064_device_hsic_host,
+ .ramdump_timeout_ms = 120000,
};
static struct tsens_platform_data apq_tsens_pdata = {
diff --git a/arch/arm/mach-msm/board-8930-gpu.c b/arch/arm/mach-msm/board-8930-gpu.c
index 3c3843a..c9021f3 100644
--- a/arch/arm/mach-msm/board-8930-gpu.c
+++ b/arch/arm/mach-msm/board-8930-gpu.c
@@ -139,6 +139,7 @@
.set_grp_async = NULL,
.idle_timeout = HZ/12,
.nap_allowed = true,
+ .strtstp_sleepwake = true,
.clk_map = KGSL_CLK_CORE | KGSL_CLK_IFACE | KGSL_CLK_MEM_IFACE,
#ifdef CONFIG_MSM_BUS_SCALING
.bus_scale_table = &grp3d_bus_scale_pdata,
diff --git a/arch/arm/mach-msm/board-8930-regulator.c b/arch/arm/mach-msm/board-8930-regulator.c
index 5bee8a2..bc370ba 100644
--- a/arch/arm/mach-msm/board-8930-regulator.c
+++ b/arch/arm/mach-msm/board-8930-regulator.c
@@ -70,6 +70,8 @@
REGULATOR_SUPPLY("cam_vaf", "4-0048"),
REGULATOR_SUPPLY("cam_vana", "4-0020"),
REGULATOR_SUPPLY("cam_vaf", "4-0020"),
+ REGULATOR_SUPPLY("vdd", "12-0018"),
+ REGULATOR_SUPPLY("vdd", "12-0068"),
};
VREG_CONSUMERS(L10) = {
REGULATOR_SUPPLY("8038_l10", NULL),
@@ -186,6 +188,8 @@
REGULATOR_SUPPLY("vcc_i2c", "3-004a"),
REGULATOR_SUPPLY("vcc_i2c", "3-0024"),
REGULATOR_SUPPLY("vcc_i2c", "0-0048"),
+ REGULATOR_SUPPLY("vddio", "12-0018"),
+ REGULATOR_SUPPLY("vlogic", "12-0068"),
};
VREG_CONSUMERS(EXT_5V) = {
REGULATOR_SUPPLY("ext_5v", NULL),
diff --git a/arch/arm/mach-msm/board-8930-storage.c b/arch/arm/mach-msm/board-8930-storage.c
index 739b1c7..5c0a84c 100644
--- a/arch/arm/mach-msm/board-8930-storage.c
+++ b/arch/arm/mach-msm/board-8930-storage.c
@@ -66,7 +66,8 @@
.lpm_sup = 1,
.hpm_uA = 800000, /* 800mA */
.lpm_uA = 9000,
- }
+ .reset_at_init = true,
+ },
};
/* All SDCC controllers may require voting for VDD PAD voltage */
diff --git a/arch/arm/mach-msm/board-8930.c b/arch/arm/mach-msm/board-8930.c
index ab9fe5e..1a61dbb 100644
--- a/arch/arm/mach-msm/board-8930.c
+++ b/arch/arm/mach-msm/board-8930.c
@@ -78,6 +78,11 @@
#include <mach/mdm2.h>
#include <mach/msm_rtb.h>
#include <linux/fmem.h>
+#include <mach/msm_cache_dump.h>
+
+#ifdef CONFIG_INPUT_MPU3050
+#include <linux/input/mpu3050.h>
+#endif
#include "timer.h"
#include "devices.h"
@@ -625,6 +630,19 @@
msm8930_mdp_writeback(msm8930_reserve_table);
}
+#ifdef CONFIG_MSM_CACHE_DUMP
+static void __init reserve_cache_dump_memory(void)
+{
+ unsigned int total;
+
+ total = msm8930_cache_dump_pdata.l1_size +
+ msm8930_cache_dump_pdata.l2_size;
+ msm8930_reserve_table[MEMTYPE_EBI1].size += total;
+}
+#else
+static void __init reserve_cache_dump_memory(void) { }
+#endif
+
static void __init msm8930_calculate_reserve_sizes(void)
{
size_pmem_devices();
@@ -632,6 +650,7 @@
reserve_ion_memory();
reserve_mdp_memory();
reserve_rtb_memory();
+ reserve_cache_dump_memory();
}
static struct reserve_info msm8930_reserve_info __initdata = {
@@ -2145,6 +2164,7 @@
&msm8960_device_cache_erp,
&msm8930_iommu_domain_device,
&msm_tsens_device,
+ &msm8930_cache_dump_device,
};
static struct platform_device *cdp_devices[] __initdata = {
@@ -2311,6 +2331,21 @@
int len;
};
+#ifdef CONFIG_INPUT_MPU3050
+#define MPU3050_INT_GPIO 69
+
+static struct mpu3050_gyro_platform_data mpu3050_gyro = {
+ .gpio_int = MPU3050_INT_GPIO,
+};
+
+static struct i2c_board_info __initdata mpu3050_i2c_boardinfo[] = {
+ {
+ I2C_BOARD_INFO("mpu3050", 0x68),
+ .platform_data = &mpu3050_gyro,
+ },
+};
+#endif
+
#ifdef CONFIG_ISL9519_CHARGER
static struct isl_platform_data isl_data __initdata = {
.valid_n_gpio = 0, /* Not required when notify-by-pmic */
@@ -2340,6 +2375,14 @@
ARRAY_SIZE(isl_charger_i2c_info),
},
#endif /* CONFIG_ISL9519_CHARGER */
+#ifdef CONFIG_INPUT_MPU3050
+ {
+ I2C_FFA | I2C_FLUID,
+ MSM_8930_GSBI12_QUP_I2C_BUS_ID,
+ mpu3050_i2c_boardinfo,
+ ARRAY_SIZE(mpu3050_i2c_boardinfo),
+ },
+#endif
{
I2C_SURF | I2C_FFA | I2C_FLUID,
MSM_8930_GSBI9_QUP_I2C_BUS_ID,
diff --git a/arch/arm/mach-msm/board-8930.h b/arch/arm/mach-msm/board-8930.h
index e564aff..9f6276c 100644
--- a/arch/arm/mach-msm/board-8930.h
+++ b/arch/arm/mach-msm/board-8930.h
@@ -138,5 +138,7 @@
#define MSM_8930_GSBI4_QUP_I2C_BUS_ID 4
#define MSM_8930_GSBI9_QUP_I2C_BUS_ID 0
#define MSM_8930_GSBI10_QUP_I2C_BUS_ID 10
+#define MSM_8930_GSBI12_QUP_I2C_BUS_ID 12
extern struct msm_rtb_platform_data msm8930_rtb_pdata;
+extern struct msm_cache_dump_platform_data msm8930_cache_dump_pdata;
diff --git a/arch/arm/mach-msm/board-8960-regulator.c b/arch/arm/mach-msm/board-8960-regulator.c
index bc5a892..6bd1b7d 100644
--- a/arch/arm/mach-msm/board-8960-regulator.c
+++ b/arch/arm/mach-msm/board-8960-regulator.c
@@ -116,6 +116,7 @@
REGULATOR_SUPPLY("8921_l23", NULL),
REGULATOR_SUPPLY("dsi_vddio", "mipi_dsi.1"),
REGULATOR_SUPPLY("hdmi_avdd", "hdmi_msm.0"),
+ REGULATOR_SUPPLY("hdmi_pll_fs", "mdp.0"),
REGULATOR_SUPPLY("pll_vdd", "pil_riva"),
REGULATOR_SUPPLY("pll_vdd", "pil_qdsp6v4.1"),
REGULATOR_SUPPLY("pll_vdd", "pil_qdsp6v4.2"),
diff --git a/arch/arm/mach-msm/board-8960.c b/arch/arm/mach-msm/board-8960.c
index 628a324..6fd2b4d 100644
--- a/arch/arm/mach-msm/board-8960.c
+++ b/arch/arm/mach-msm/board-8960.c
@@ -1291,6 +1291,7 @@
.ramdump_delay_ms = 1000,
.soft_reset_inverted = 1,
.peripheral_platform_device = NULL,
+ .ramdump_timeout_ms = 600000,
};
#define MSM_TSIF0_PHYS (0x18200000)
@@ -1416,6 +1417,7 @@
static struct msm_spi_platform_data msm8960_qup_spi_gsbi1_pdata = {
.max_clock_speed = 15060000,
+ .infinite_mode = 1
};
#ifdef CONFIG_USB_MSM_OTG_72K
diff --git a/arch/arm/mach-msm/board-9615.c b/arch/arm/mach-msm/board-9615.c
index 568de46..7181990 100644
--- a/arch/arm/mach-msm/board-9615.c
+++ b/arch/arm/mach-msm/board-9615.c
@@ -194,7 +194,8 @@
/* Initial PM8018 GPIO configurations */
static struct pm8xxx_gpio_init pm8018_gpios[] __initdata = {
- PM8018_GPIO_OUTPUT(2, 0, HIGH) /* EXT_LDO_EN_WLAN */
+ PM8018_GPIO_OUTPUT(2, 0, HIGH), /* EXT_LDO_EN_WLAN */
+ PM8018_GPIO_OUTPUT(6, 0, LOW), /* WLAN_CLK_PWR_REQ */
};
/* Initial PM8018 MPP configurations */
diff --git a/arch/arm/mach-msm/board-copper-regulator.c b/arch/arm/mach-msm/board-copper-regulator.c
index 7543872..10d5d0b 100644
--- a/arch/arm/mach-msm/board-copper-regulator.c
+++ b/arch/arm/mach-msm/board-copper-regulator.c
@@ -22,16 +22,16 @@
* regulator name consumer dev_name
*/
VREG_CONSUMERS(K0) = {
- REGULATOR_SUPPLY("krait0", NULL),
+ REGULATOR_SUPPLY("krait0", "f9000000.qcom,acpuclk"),
};
VREG_CONSUMERS(K1) = {
- REGULATOR_SUPPLY("krait1", NULL),
+ REGULATOR_SUPPLY("krait1", "f9000000.qcom,acpuclk"),
};
VREG_CONSUMERS(K2) = {
- REGULATOR_SUPPLY("krait2", NULL),
+ REGULATOR_SUPPLY("krait2", "f9000000.qcom,acpuclk"),
};
VREG_CONSUMERS(K3) = {
- REGULATOR_SUPPLY("krait3", NULL),
+ REGULATOR_SUPPLY("krait3", "f9000000.qcom,acpuclk"),
};
#define PM8X41_VREG_INIT(_id, _name, _min_uV, _max_uV, _modes, _ops, \
diff --git a/arch/arm/mach-msm/board-copper.c b/arch/arm/mach-msm/board-copper.c
index 85241a4..2c5512a 100644
--- a/arch/arm/mach-msm/board-copper.c
+++ b/arch/arm/mach-msm/board-copper.c
@@ -43,6 +43,7 @@
#include <mach/rpm-regulator-smd.h>
#include <mach/qpnp-int.h>
#include <mach/socinfo.h>
+#include <mach/msm_bus_board.h>
#include "clock.h"
#include "devices.h"
#include "spm.h"
@@ -268,7 +269,7 @@
},
{
.irq_config_id = SMD_Q6,
- .subsys_name = "q6",
+ .subsys_name = "adsp",
.edge = SMD_APPS_QDSP,
.smd_int.irq_name = "adsp_smd_in",
@@ -410,6 +411,141 @@
.resource = copper_tzlog_resources,
};
+#define BIMC_BASE 0xfc380000
+#define BIMC_SIZE 0x0006A000
+#define SYS_NOC_BASE 0xfc460000
+#define PERIPH_NOC_BASE 0xFC468000
+#define OCMEM_NOC_BASE 0xfc470000
+#define MMSS_NOC_BASE 0xfc478000
+#define CONFIG_NOC_BASE 0xfc480000
+#define NOC_SIZE 0x00004000
+
+static struct resource bimc_res[] = {
+ {
+ .start = BIMC_BASE,
+ .end = BIMC_BASE + BIMC_SIZE,
+ .flags = IORESOURCE_MEM,
+ .name = "bimc_mem",
+ },
+};
+
+static struct resource ocmem_noc_res[] = {
+ {
+ .start = OCMEM_NOC_BASE,
+ .end = OCMEM_NOC_BASE + NOC_SIZE,
+ .flags = IORESOURCE_MEM,
+ .name = "ocmem_noc_mem",
+ },
+};
+
+static struct resource mmss_noc_res[] = {
+ {
+ .start = MMSS_NOC_BASE,
+ .end = MMSS_NOC_BASE + NOC_SIZE,
+ .flags = IORESOURCE_MEM,
+ .name = "mmss_noc_mem",
+ },
+};
+
+static struct resource sys_noc_res[] = {
+ {
+ .start = SYS_NOC_BASE,
+ .end = SYS_NOC_BASE + NOC_SIZE,
+ .flags = IORESOURCE_MEM,
+ .name = "sys_noc_mem",
+ },
+};
+
+static struct resource config_noc_res[] = {
+ {
+ .start = CONFIG_NOC_BASE,
+ .end = CONFIG_NOC_BASE + NOC_SIZE,
+ .flags = IORESOURCE_MEM,
+ .name = "config_noc_mem",
+ },
+};
+
+static struct resource periph_noc_res[] = {
+ {
+ .start = PERIPH_NOC_BASE,
+ .end = PERIPH_NOC_BASE + NOC_SIZE,
+ .flags = IORESOURCE_MEM,
+ .name = "periph_noc_mem",
+ },
+};
+
+static struct platform_device msm_bus_sys_noc = {
+ .name = "msm_bus_fabric",
+ .id = MSM_BUS_FAB_SYS_NOC,
+ .num_resources = ARRAY_SIZE(sys_noc_res),
+ .resource = sys_noc_res,
+};
+
+static struct platform_device msm_bus_bimc = {
+ .name = "msm_bus_fabric",
+ .id = MSM_BUS_FAB_BIMC,
+ .num_resources = ARRAY_SIZE(bimc_res),
+ .resource = bimc_res,
+};
+
+static struct platform_device msm_bus_mmss_noc = {
+ .name = "msm_bus_fabric",
+ .id = MSM_BUS_FAB_MMSS_NOC,
+ .num_resources = ARRAY_SIZE(mmss_noc_res),
+ .resource = mmss_noc_res,
+};
+
+static struct platform_device msm_bus_ocmem_noc = {
+ .name = "msm_bus_fabric",
+ .id = MSM_BUS_FAB_OCMEM_NOC,
+ .num_resources = ARRAY_SIZE(ocmem_noc_res),
+ .resource = ocmem_noc_res,
+};
+
+static struct platform_device msm_bus_periph_noc = {
+ .name = "msm_bus_fabric",
+ .id = MSM_BUS_FAB_PERIPH_NOC,
+ .num_resources = ARRAY_SIZE(periph_noc_res),
+ .resource = periph_noc_res,
+};
+
+static struct platform_device msm_bus_config_noc = {
+ .name = "msm_bus_fabric",
+ .id = MSM_BUS_FAB_CONFIG_NOC,
+ .num_resources = ARRAY_SIZE(config_noc_res),
+ .resource = config_noc_res,
+};
+
+static struct platform_device msm_bus_ocmem_vnoc = {
+ .name = "msm_bus_fabric",
+ .id = MSM_BUS_FAB_OCMEM_VNOC,
+};
+
+static struct platform_device *msm_bus_copper_devices[] = {
+ &msm_bus_sys_noc,
+ &msm_bus_bimc,
+ &msm_bus_mmss_noc,
+ &msm_bus_ocmem_noc,
+ &msm_bus_periph_noc,
+ &msm_bus_config_noc,
+ &msm_bus_ocmem_vnoc,
+};
+
+static void __init msmcopper_init_buses(void)
+{
+#ifdef CONFIG_MSM_BUS_SCALING
+ msm_bus_sys_noc.dev.platform_data =
+ &msm_bus_copper_sys_noc_pdata;
+ msm_bus_bimc.dev.platform_data = &msm_bus_copper_bimc_pdata;
+ msm_bus_mmss_noc.dev.platform_data = &msm_bus_copper_mmss_noc_pdata;
+ msm_bus_ocmem_noc.dev.platform_data = &msm_bus_copper_ocmem_noc_pdata;
+ msm_bus_periph_noc.dev.platform_data = &msm_bus_copper_periph_noc_pdata;
+ msm_bus_config_noc.dev.platform_data = &msm_bus_copper_config_noc_pdata;
+ msm_bus_ocmem_vnoc.dev.platform_data = &msm_bus_copper_ocmem_vnoc_pdata;
+#endif
+ platform_add_devices(msm_bus_copper_devices,
+ ARRAY_SIZE(msm_bus_copper_devices));
+};
void __init msm_copper_add_devices(void)
{
@@ -476,6 +612,7 @@
msm_clock_init(&msm_dummy_clock_init_data);
else
msm_clock_init(&msmcopper_clock_init_data);
+ msmcopper_init_buses();
}
static struct of_device_id irq_match[] __initdata = {
diff --git a/arch/arm/mach-msm/board-dt.c b/arch/arm/mach-msm/board-dt.c
index 674df09..8a801c2 100644
--- a/arch/arm/mach-msm/board-dt.c
+++ b/arch/arm/mach-msm/board-dt.c
@@ -85,7 +85,6 @@
.handle_irq = gic_handle_irq,
.timer = &msm_dt_timer,
.dt_compat = msm_dt_match,
- .nr_irqs = -1,
.reserve = msm_dt_reserve,
.init_very_early = msm_dt_init_very_early,
MACHINE_END
diff --git a/arch/arm/mach-msm/board-fsm9xxx.c b/arch/arm/mach-msm/board-fsm9xxx.c
index b071353..0e2aa3b 100644
--- a/arch/arm/mach-msm/board-fsm9xxx.c
+++ b/arch/arm/mach-msm/board-fsm9xxx.c
@@ -89,6 +89,15 @@
#define GPIO_USER_FIRST 58
#define GPIO_USER_LAST 63
+#define GPIO_UIM_RESET 75
+#define GPIO_UIM_DATA_IO 76
+#define GPIO_UIM_CLOCK 77
+
+#define GPIO_PM_UIM_M_RST 26 /* UIM_RST input */
+#define GPIO_PM_UIM_RST 27 /* UIM_RST output */
+#define GPIO_PM_UIM_M_CLK 28 /* UIM_CLK input */
+#define GPIO_PM_UIM_CLK 29 /* UIM_CLK output */
+
#define FPGA_SDCC_STATUS 0x8E0001A8
/* Macros assume PMIC GPIOs start at 0 */
@@ -100,6 +109,8 @@
#define PMIC_GPIO_5V_PA_PWR 21 /* PMIC GPIO Number 22 */
#define PMIC_GPIO_4_2V_PA_PWR 22 /* PMIC GPIO Number 23 */
+#define PMIC_MPP_UIM_M_DATA 0 /* UIM_DATA input */
+#define PMIC_MPP_UIM_DATA 1 /* UIM_DATA output */
#define PMIC_MPP_3 2 /* PMIC MPP Number 3 */
#define PMIC_MPP_6 5 /* PMIC MPP Number 6 */
#define PMIC_MPP_7 6 /* PMIC MPP Number 7 */
@@ -181,6 +192,10 @@
PM8XXX_MPP_AOUT_LVL_1V25_2, AOUT_CTRL_ENABLE),
PM8XXX_MPP_INIT(PMIC_MPP_6, A_OUTPUT,
PM8XXX_MPP_AOUT_LVL_1V25_2, AOUT_CTRL_ENABLE),
+ PM8XXX_MPP_INIT(PMIC_MPP_UIM_M_DATA, D_BI_DIR,
+ PM8058_MPP_DIG_LEVEL_L3, BI_PULLUP_30KOHM),
+ PM8XXX_MPP_INIT(PMIC_MPP_UIM_DATA, D_BI_DIR,
+ PM8058_MPP_DIG_LEVEL_L3, BI_PULLUP_30KOHM),
};
for (i = 0; i < ARRAY_SIZE(pm8058_mpps); i++) {
@@ -597,6 +612,52 @@
}
#endif
+static struct msm_gpio uart3_uim_config_data[] = {
+ { GPIO_CFG(GPIO_UIM_RESET, 0, GPIO_CFG_OUTPUT,
+ GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), "UIM_Reset" },
+ { GPIO_CFG(GPIO_UIM_DATA_IO, 2, GPIO_CFG_OUTPUT,
+ GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), "UIM_Data" },
+ { GPIO_CFG(GPIO_UIM_CLOCK, 2, GPIO_CFG_OUTPUT,
+ GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), "UIM_Clock" },
+};
+
+static void fsm9xxx_init_uart3_uim(void)
+{
+ struct pm_gpio pmic_uim_gpio_in = {
+ .direction = PM_GPIO_DIR_IN,
+ .pull = PM_GPIO_PULL_NO,
+ .out_strength = PM_GPIO_STRENGTH_HIGH,
+ .function = PM_GPIO_FUNC_PAIRED,
+ .vin_sel = PM8058_GPIO_VIN_L3,
+ };
+ struct pm_gpio pmic_uim_gpio_out = {
+ .direction = PM_GPIO_DIR_OUT,
+ .pull = PM_GPIO_PULL_NO,
+ .out_strength = PM_GPIO_STRENGTH_HIGH,
+ .function = PM_GPIO_FUNC_PAIRED,
+ .vin_sel = PM8058_GPIO_VIN_L3,
+ };
+
+ /* TLMM */
+ msm_gpios_request_enable(uart3_uim_config_data,
+ ARRAY_SIZE(uart3_uim_config_data));
+
+ /* Put UIM to reset state */
+ gpio_direction_output(GPIO_UIM_RESET, 0);
+ gpio_set_value(GPIO_UIM_RESET, 0);
+ gpio_export(GPIO_UIM_RESET, false);
+
+ /* PMIC */
+ pm8xxx_gpio_config(PM8058_GPIO_PM_TO_SYS(GPIO_PM_UIM_M_RST),
+ &pmic_uim_gpio_in);
+ pm8xxx_gpio_config(PM8058_GPIO_PM_TO_SYS(GPIO_PM_UIM_RST),
+ &pmic_uim_gpio_out);
+ pm8xxx_gpio_config(PM8058_GPIO_PM_TO_SYS(GPIO_PM_UIM_M_CLK),
+ &pmic_uim_gpio_in);
+ pm8xxx_gpio_config(PM8058_GPIO_PM_TO_SYS(GPIO_PM_UIM_CLK),
+ &pmic_uim_gpio_out);
+}
+
/*
* SSBI
*/
@@ -833,6 +894,7 @@
#if defined(CONFIG_SERIAL_MSM) || defined(CONFIG_MSM_SERIAL_DEBUGGER)
&msm_device_uart1,
#endif
+ &msm_device_uart3,
#if defined(CONFIG_QFP_FUSE)
&fsm_qfp_fuse_device,
#endif
@@ -903,6 +965,7 @@
#ifdef CONFIG_SERIAL_MSM_CONSOLE
fsm9xxx_init_uart1();
#endif
+ fsm9xxx_init_uart3_uim();
#ifdef CONFIG_I2C_SSBI
msm_device_ssbi2.dev.platform_data = &msm_i2c_ssbi2_pdata;
msm_device_ssbi3.dev.platform_data = &msm_i2c_ssbi3_pdata;
diff --git a/arch/arm/mach-msm/board-msm7627a-io.c b/arch/arm/mach-msm/board-msm7627a-io.c
index ec168f9..22095cd 100644
--- a/arch/arm/mach-msm/board-msm7627a-io.c
+++ b/arch/arm/mach-msm/board-msm7627a-io.c
@@ -229,7 +229,7 @@
static int mxt_vkey_setup(void)
{
- int retval;
+ int retval = 0;
mxt_virtual_key_properties_kobj =
kobject_create_and_add("board_properties", NULL);
diff --git a/arch/arm/mach-msm/board-msm7x27a.c b/arch/arm/mach-msm/board-msm7x27a.c
index dc473e6..7db4bda 100644
--- a/arch/arm/mach-msm/board-msm7x27a.c
+++ b/arch/arm/mach-msm/board-msm7x27a.c
@@ -50,6 +50,7 @@
#include <linux/atmel_maxtouch.h>
#include <linux/fmem.h>
#include <linux/msm_adc.h>
+#include <linux/ion.h>
#include "devices.h"
#include "timer.h"
#include "board-msm7x27a-regulator.h"
@@ -167,6 +168,15 @@
#endif
+#ifdef CONFIG_ION_MSM
+#define MSM_ION_HEAP_NUM 4
+static struct platform_device ion_dev;
+static int msm_ion_camera_size;
+static int msm_ion_audio_size;
+static int msm_ion_sf_size;
+#endif
+
+
static struct android_usb_platform_data android_usb_pdata = {
.update_pid_and_serial_num = usb_diag_update_pid_and_serial_num,
};
@@ -822,6 +832,9 @@
&asoc_msm_dai1,
&msm_batt_device,
&msm_adc_device,
+#ifdef CONFIG_ION_MSM
+ &ion_dev,
+#endif
};
static struct platform_device *msm8625_surf_devices[] __initdata = {
@@ -853,6 +866,81 @@
}
early_param("pmem_audio_size", pmem_audio_size_setup);
+static void fix_sizes(void)
+{
+ if (machine_is_msm7625a_surf() || machine_is_msm7625a_ffa()) {
+ pmem_mdp_size = MSM7x25A_MSM_PMEM_MDP_SIZE;
+ pmem_adsp_size = MSM7x25A_MSM_PMEM_ADSP_SIZE;
+ } else {
+ pmem_mdp_size = MSM_PMEM_MDP_SIZE;
+ pmem_adsp_size = MSM_PMEM_ADSP_SIZE;
+ }
+#ifdef CONFIG_ION_MSM
+ msm_ion_camera_size = pmem_adsp_size;
+ msm_ion_audio_size = (MSM_PMEM_AUDIO_SIZE + PMEM_KERNEL_EBI1_SIZE);
+ msm_ion_sf_size = pmem_mdp_size;
+#endif
+}
+
+#ifdef CONFIG_ION_MSM
+#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
+static struct ion_co_heap_pdata co_ion_pdata = {
+ .adjacent_mem_id = INVALID_HEAP_ID,
+ .align = PAGE_SIZE,
+};
+#endif
+
+/**
+ * These heaps are listed in the order they will be allocated.
+ * Don't swap the order unless you know what you are doing!
+ */
+static struct ion_platform_data ion_pdata = {
+ .nr = MSM_ION_HEAP_NUM,
+ .heaps = {
+ {
+ .id = ION_SYSTEM_HEAP_ID,
+ .type = ION_HEAP_TYPE_SYSTEM,
+ .name = ION_VMALLOC_HEAP_NAME,
+ },
+#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
+ /* PMEM_ADSP = CAMERA */
+ {
+ .id = ION_CAMERA_HEAP_ID,
+ .type = ION_HEAP_TYPE_CARVEOUT,
+ .name = ION_CAMERA_HEAP_NAME,
+ .memory_type = ION_EBI_TYPE,
+ .has_outer_cache = 1,
+ .extra_data = (void *)&co_ion_pdata,
+ },
+ /* PMEM_AUDIO */
+ {
+ .id = ION_AUDIO_HEAP_ID,
+ .type = ION_HEAP_TYPE_CARVEOUT,
+ .name = ION_AUDIO_HEAP_NAME,
+ .memory_type = ION_EBI_TYPE,
+ .has_outer_cache = 1,
+ .extra_data = (void *)&co_ion_pdata,
+ },
+ /* PMEM_MDP = SF */
+ {
+ .id = ION_SF_HEAP_ID,
+ .type = ION_HEAP_TYPE_CARVEOUT,
+ .name = ION_SF_HEAP_NAME,
+ .memory_type = ION_EBI_TYPE,
+ .has_outer_cache = 1,
+ .extra_data = (void *)&co_ion_pdata,
+ },
+#endif
+ }
+};
+
+static struct platform_device ion_dev = {
+ .name = "ion-msm",
+ .id = 1,
+ .dev = { .platform_data = &ion_pdata },
+};
+#endif
+
static struct memtype_reserve msm7x27a_reserve_table[] __initdata = {
[MEMTYPE_SMI] = {
},
@@ -865,27 +953,22 @@
};
#ifdef CONFIG_ANDROID_PMEM
+#ifndef CONFIG_MSM_MULTIMEDIA_USE_ION
static struct android_pmem_platform_data *pmem_pdata_array[] __initdata = {
&android_pmem_adsp_pdata,
&android_pmem_audio_pdata,
&android_pmem_pdata,
};
#endif
+#endif
static void __init size_pmem_devices(void)
{
#ifdef CONFIG_ANDROID_PMEM
+#ifndef CONFIG_MSM_MULTIMEDIA_USE_ION
unsigned int i;
unsigned int reusable_count = 0;
- if (machine_is_msm7625a_surf() || machine_is_msm7625a_ffa()) {
- pmem_mdp_size = MSM7x25A_MSM_PMEM_MDP_SIZE;
- pmem_adsp_size = MSM7x25A_MSM_PMEM_ADSP_SIZE;
- } else {
- pmem_mdp_size = MSM_PMEM_MDP_SIZE;
- pmem_adsp_size = MSM_PMEM_ADSP_SIZE;
- }
-
android_pmem_adsp_pdata.size = pmem_adsp_size;
android_pmem_pdata.size = pmem_mdp_size;
android_pmem_audio_pdata.size = pmem_audio_size;
@@ -910,29 +993,56 @@
}
}
#endif
-
+#endif
}
+#ifdef CONFIG_ANDROID_PMEM
+#ifndef CONFIG_MSM_MULTIMEDIA_USE_ION
static void __init reserve_memory_for(struct android_pmem_platform_data *p)
{
msm7x27a_reserve_table[p->memory_type].size += p->size;
}
+#endif
+#endif
static void __init reserve_pmem_memory(void)
{
#ifdef CONFIG_ANDROID_PMEM
+#ifndef CONFIG_MSM_MULTIMEDIA_USE_ION
unsigned int i;
for (i = 0; i < ARRAY_SIZE(pmem_pdata_array); ++i)
reserve_memory_for(pmem_pdata_array[i]);
msm7x27a_reserve_table[MEMTYPE_EBI1].size += pmem_kernel_ebi1_size;
#endif
+#endif
+}
+
+static void __init size_ion_devices(void)
+{
+#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
+ ion_pdata.heaps[1].size = msm_ion_camera_size;
+ ion_pdata.heaps[2].size = msm_ion_audio_size;
+ ion_pdata.heaps[3].size = msm_ion_sf_size;
+#endif
+}
+
+static void __init reserve_ion_memory(void)
+{
+#if defined(CONFIG_ION_MSM) && defined(CONFIG_MSM_MULTIMEDIA_USE_ION)
+ msm7x27a_reserve_table[MEMTYPE_EBI1].size += msm_ion_camera_size;
+ msm7x27a_reserve_table[MEMTYPE_EBI1].size += msm_ion_audio_size;
+ msm7x27a_reserve_table[MEMTYPE_EBI1].size += msm_ion_sf_size;
+#endif
}
static void __init msm7x27a_calculate_reserve_sizes(void)
{
+ fix_sizes();
size_pmem_devices();
reserve_pmem_memory();
+ size_ion_devices();
+ reserve_ion_memory();
}
static int msm7x27a_paddr_to_memtype(unsigned int paddr)
diff --git a/arch/arm/mach-msm/board-msm8x60.c b/arch/arm/mach-msm/board-msm8x60.c
index 098ad6e..1bb69b5 100644
--- a/arch/arm/mach-msm/board-msm8x60.c
+++ b/arch/arm/mach-msm/board-msm8x60.c
@@ -5320,7 +5320,7 @@
.align = PAGE_SIZE,
};
-static struct ion_co_heap_pdata hole_co_ion_pdata = {
+static struct ion_co_heap_pdata mm_fw_co_ion_pdata = {
.adjacent_mem_id = ION_CP_MM_HEAP_ID,
};
@@ -5363,10 +5363,10 @@
.id = ION_MM_FIRMWARE_HEAP_ID,
.type = ION_HEAP_TYPE_CARVEOUT,
.name = ION_MM_FIRMWARE_HEAP_NAME,
- .base = MSM_ION_HOLE_BASE,
- .size = MSM_ION_HOLE_SIZE,
+ .base = MSM_MM_FW_BASE,
+ .size = MSM_MM_FW_SIZE,
.memory_type = ION_SMI_TYPE,
- .extra_data = (void *) &hole_co_ion_pdata,
+ .extra_data = (void *) &mm_fw_co_ion_pdata,
},
{
.id = ION_CP_MFC_HEAP_ID,
diff --git a/arch/arm/mach-msm/board-qrd7627a.c b/arch/arm/mach-msm/board-qrd7627a.c
index 9c80c8b..8eb961c 100644
--- a/arch/arm/mach-msm/board-qrd7627a.c
+++ b/arch/arm/mach-msm/board-qrd7627a.c
@@ -33,6 +33,7 @@
#include <linux/msm_adc.h>
#include <linux/fmem.h>
#include <linux/regulator/msm-gpio-regulator.h>
+#include <linux/ion.h>
#include <asm/mach/mmc.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -131,6 +132,14 @@
#ifdef CONFIG_ARCH_MSM7X27A
#define MSM_PMEM_MDP_SIZE 0x2300000
#define MSM_PMEM_ADSP_SIZE 0x1200000
+
+#define MSM_ION_AUDIO_SIZE (MSM_PMEM_AUDIO_SIZE + PMEM_KERNEL_EBI1_SIZE)
+#define MSM_ION_CAMERA_SIZE MSM_PMEM_ADSP_SIZE
+#define MSM_ION_SF_SIZE MSM_PMEM_MDP_SIZE
+#define MSM_ION_HEAP_NUM 4
+#ifdef CONFIG_ION_MSM
+static struct platform_device ion_dev;
+#endif
#endif
static struct android_usb_platform_data android_usb_pdata = {
@@ -708,6 +717,9 @@
&asoc_msm_dai1,
&msm_adc_device,
&fmem_device,
+#ifdef CONFIG_ION_MSM
+ &ion_dev,
+#endif
};
static struct platform_device *qrd7627a_devices[] __initdata = {
@@ -756,6 +768,68 @@
}
early_param("pmem_audio_size", pmem_audio_size_setup);
+#ifdef CONFIG_ION_MSM
+#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
+static struct ion_co_heap_pdata co_ion_pdata = {
+ .adjacent_mem_id = INVALID_HEAP_ID,
+ .align = PAGE_SIZE,
+};
+#endif
+
+/**
+ * These heaps are listed in the order they will be allocated.
+ * Don't swap the order unless you know what you are doing!
+ */
+static struct ion_platform_data ion_pdata = {
+ .nr = MSM_ION_HEAP_NUM,
+ .heaps = {
+ {
+ .id = ION_SYSTEM_HEAP_ID,
+ .type = ION_HEAP_TYPE_SYSTEM,
+ .name = ION_VMALLOC_HEAP_NAME,
+ },
+#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
+ /* PMEM_ADSP = CAMERA */
+ {
+ .id = ION_CAMERA_HEAP_ID,
+ .type = ION_HEAP_TYPE_CARVEOUT,
+ .name = ION_CAMERA_HEAP_NAME,
+ .size = MSM_ION_CAMERA_SIZE,
+ .memory_type = ION_EBI_TYPE,
+ .has_outer_cache = 1,
+ .extra_data = (void *)&co_ion_pdata,
+ },
+ /* PMEM_AUDIO */
+ {
+ .id = ION_AUDIO_HEAP_ID,
+ .type = ION_HEAP_TYPE_CARVEOUT,
+ .name = ION_AUDIO_HEAP_NAME,
+ .size = MSM_ION_AUDIO_SIZE,
+ .memory_type = ION_EBI_TYPE,
+ .has_outer_cache = 1,
+ .extra_data = (void *)&co_ion_pdata,
+ },
+ /* PMEM_MDP = SF */
+ {
+ .id = ION_SF_HEAP_ID,
+ .type = ION_HEAP_TYPE_CARVEOUT,
+ .name = ION_SF_HEAP_NAME,
+ .size = MSM_ION_SF_SIZE,
+ .memory_type = ION_EBI_TYPE,
+ .has_outer_cache = 1,
+ .extra_data = (void *)&co_ion_pdata,
+ },
+#endif
+ }
+};
+
+static struct platform_device ion_dev = {
+ .name = "ion-msm",
+ .id = 1,
+ .dev = { .platform_data = &ion_pdata },
+};
+#endif
+
static struct memtype_reserve msm7627a_reserve_table[] __initdata = {
[MEMTYPE_SMI] = {
},
@@ -768,16 +842,19 @@
};
#ifdef CONFIG_ANDROID_PMEM
+#ifndef CONFIG_MSM_MULTIMEDIA_USE_ION
static struct android_pmem_platform_data *pmem_pdata_array[] __initdata = {
&android_pmem_adsp_pdata,
&android_pmem_audio_pdata,
&android_pmem_pdata,
};
#endif
+#endif
static void __init size_pmem_devices(void)
{
#ifdef CONFIG_ANDROID_PMEM
+#ifndef CONFIG_MSM_MULTIMEDIA_USE_ION
unsigned int i;
unsigned int reusable_count = 0;
@@ -804,30 +881,46 @@
pdata->reusable = 0;
}
}
-
+#endif
#endif
}
+#ifdef CONFIG_ANDROID_PMEM
+#ifndef CONFIG_MSM_MULTIMEDIA_USE_ION
static void __init reserve_memory_for(struct android_pmem_platform_data *p)
{
msm7627a_reserve_table[p->memory_type].size += p->size;
}
+#endif
+#endif
static void __init reserve_pmem_memory(void)
{
#ifdef CONFIG_ANDROID_PMEM
+#ifndef CONFIG_MSM_MULTIMEDIA_USE_ION
unsigned int i;
for (i = 0; i < ARRAY_SIZE(pmem_pdata_array); ++i)
reserve_memory_for(pmem_pdata_array[i]);
msm7627a_reserve_table[MEMTYPE_EBI1].size += pmem_kernel_ebi1_size;
#endif
+#endif
+}
+
+static void __init reserve_ion_memory(void)
+{
+#if defined(CONFIG_ION_MSM) && defined(CONFIG_MSM_MULTIMEDIA_USE_ION)
+ msm7627a_reserve_table[MEMTYPE_EBI1].size += MSM_ION_CAMERA_SIZE;
+ msm7627a_reserve_table[MEMTYPE_EBI1].size += MSM_ION_AUDIO_SIZE;
+ msm7627a_reserve_table[MEMTYPE_EBI1].size += MSM_ION_SF_SIZE;
+#endif
}
static void __init msm7627a_calculate_reserve_sizes(void)
{
size_pmem_devices();
reserve_pmem_memory();
+ reserve_ion_memory();
}
static int msm7627a_paddr_to_memtype(unsigned int paddr)
diff --git a/arch/arm/mach-msm/cache_erp.c b/arch/arm/mach-msm/cache_erp.c
index 4d7ce12..c3302ec 100644
--- a/arch/arm/mach-msm/cache_erp.c
+++ b/arch/arm/mach-msm/cache_erp.c
@@ -90,7 +90,7 @@
#define MODULE_NAME "msm_cache_erp"
-#define ERP_LOG_MAGIC_ADDR 0x748
+#define ERP_LOG_MAGIC_ADDR 0x6A4
#define ERP_LOG_MAGIC 0x11C39893
struct msm_l1_err_stats {
diff --git a/arch/arm/mach-msm/clock-7x30.c b/arch/arm/mach-msm/clock-7x30.c
index aa94be6..225ea2b 100644
--- a/arch/arm/mach-msm/clock-7x30.c
+++ b/arch/arm/mach-msm/clock-7x30.c
@@ -200,7 +200,7 @@
#define PCOM_XO_TCXO 0
#define PCOM_XO_LPXO 1
-static bool pcom_is_local(struct clk *clk)
+static bool pcom_is_local(struct clk *c)
{
return false;
}
@@ -2441,7 +2441,7 @@
struct measure_sel {
u32 test_vector;
- struct clk *clk;
+ struct clk *c;
};
static struct measure_sel measure_mux[] = {
@@ -2538,17 +2538,17 @@
{ CLK_TEST_LS(0x3F), &usb_hs_clk.c },
};
-static struct measure_sel *find_measure_sel(struct clk *clk)
+static struct measure_sel *find_measure_sel(struct clk *c)
{
int i;
for (i = 0; i < ARRAY_SIZE(measure_mux); i++)
- if (measure_mux[i].clk == clk)
+ if (measure_mux[i].c == c)
return &measure_mux[i];
return NULL;
}
-static int measure_clk_set_parent(struct clk *clk, struct clk *parent)
+static int measure_clk_set_parent(struct clk *c, struct clk *parent)
{
struct measure_sel *p;
unsigned long flags;
@@ -2599,7 +2599,7 @@
/* Perform a hardware rate measurement for a given clock.
FOR DEBUG USE ONLY: Measurements take ~15 ms! */
-static unsigned long measure_clk_get_rate(struct clk *clk)
+static unsigned long measure_clk_get_rate(struct clk *c)
{
unsigned long flags;
u32 regval, prph_web_reg_old;
@@ -2647,12 +2647,12 @@
return ret;
}
#else /* !CONFIG_DEBUG_FS */
-static int measure_clk_set_parent(struct clk *clk, struct clk *parent)
+static int measure_clk_set_parent(struct clk *c, struct clk *parent)
{
return -EINVAL;
}
-static unsigned long measure_clk_get_rate(struct clk *clk)
+static unsigned long measure_clk_get_rate(struct clk *c)
{
return 0;
}
@@ -2670,14 +2670,14 @@
};
/* Implementation for clk_set_flags(). */
-int soc_clk_set_flags(struct clk *clk, unsigned clk_flags)
+int soc_clk_set_flags(struct clk *c, unsigned clk_flags)
{
uint32_t regval, ret = 0;
unsigned long flags;
spin_lock_irqsave(&local_clock_reg_lock, flags);
- if (clk == &vfe_clk.c) {
+ if (c == &vfe_clk.c) {
regval = readl_relaxed(CAM_VFE_NS_REG);
/* Flag values chosen for backward compatibility
* with proc_comm remote clock control. */
@@ -2701,17 +2701,15 @@
return ret;
}
-static int msm7x30_clk_reset(struct clk *clk, enum clk_reset_action action)
+static int msm7x30_clk_reset(struct clk *c, enum clk_reset_action action)
{
/* reset_mask is actually a proc_comm id */
- unsigned id = to_rcg_clk(clk)->b.reset_mask;
- return pc_clk_reset(id, action);
+ return pc_clk_reset(to_rcg_clk(c)->b.reset_mask, action);
}
-static int soc_branch_clk_reset(struct clk *clk, enum clk_reset_action action)
+static int soc_branch_clk_reset(struct clk *c, enum clk_reset_action action)
{
- unsigned id = to_branch_clk(clk)->b.reset_mask;
- return pc_clk_reset(id, action);
+ return pc_clk_reset(to_branch_clk(c)->b.reset_mask, action);
}
/*
diff --git a/arch/arm/mach-msm/clock-8960.c b/arch/arm/mach-msm/clock-8960.c
index 5867eef..fa91249 100644
--- a/arch/arm/mach-msm/clock-8960.c
+++ b/arch/arm/mach-msm/clock-8960.c
@@ -2858,9 +2858,9 @@
struct clk c;
};
-static inline struct pix_rdi_clk *to_pix_rdi_clk(struct clk *clk)
+static inline struct pix_rdi_clk *to_pix_rdi_clk(struct clk *c)
{
- return container_of(clk, struct pix_rdi_clk, c);
+ return container_of(c, struct pix_rdi_clk, c);
}
static int pix_rdi_clk_set_rate(struct clk *c, unsigned long rate)
@@ -2868,7 +2868,7 @@
int ret, i;
u32 reg;
unsigned long flags;
- struct pix_rdi_clk *clk = to_pix_rdi_clk(c);
+ struct pix_rdi_clk *rdi = to_pix_rdi_clk(c);
struct clk **mux_map = pix_rdi_mux_map;
/*
@@ -2889,32 +2889,32 @@
goto err;
}
/* Keep the new source on when switching inputs of an enabled clock */
- if (clk->enabled) {
- clk_disable(mux_map[clk->cur_rate]);
+ if (rdi->enabled) {
+ clk_disable(mux_map[rdi->cur_rate]);
clk_enable(mux_map[rate]);
}
spin_lock_irqsave(&local_clock_reg_lock, flags);
- reg = readl_relaxed(clk->s2_reg);
- reg &= ~clk->s2_mask;
- reg |= rate == 2 ? clk->s2_mask : 0;
- writel_relaxed(reg, clk->s2_reg);
+ reg = readl_relaxed(rdi->s2_reg);
+ reg &= ~rdi->s2_mask;
+ reg |= rate == 2 ? rdi->s2_mask : 0;
+ writel_relaxed(reg, rdi->s2_reg);
/*
* Wait at least 6 cycles of slowest clock
* for the glitch-free MUX to fully switch sources.
*/
mb();
udelay(1);
- reg = readl_relaxed(clk->s_reg);
- reg &= ~clk->s_mask;
- reg |= rate == 1 ? clk->s_mask : 0;
- writel_relaxed(reg, clk->s_reg);
+ reg = readl_relaxed(rdi->s_reg);
+ reg &= ~rdi->s_mask;
+ reg |= rate == 1 ? rdi->s_mask : 0;
+ writel_relaxed(reg, rdi->s_reg);
/*
* Wait at least 6 cycles of slowest clock
* for the glitch-free MUX to fully switch sources.
*/
mb();
udelay(1);
- clk->cur_rate = rate;
+ rdi->cur_rate = rate;
spin_unlock_irqrestore(&local_clock_reg_lock, flags);
err:
for (i--; i >= 0; i--)
@@ -2931,12 +2931,12 @@
static int pix_rdi_clk_enable(struct clk *c)
{
unsigned long flags;
- struct pix_rdi_clk *clk = to_pix_rdi_clk(c);
+ struct pix_rdi_clk *rdi = to_pix_rdi_clk(c);
spin_lock_irqsave(&local_clock_reg_lock, flags);
- __branch_clk_enable_reg(&clk->b, clk->c.dbg_name);
+ __branch_enable_reg(&rdi->b, rdi->c.dbg_name);
spin_unlock_irqrestore(&local_clock_reg_lock, flags);
- clk->enabled = true;
+ rdi->enabled = true;
return 0;
}
@@ -2944,24 +2944,22 @@
static void pix_rdi_clk_disable(struct clk *c)
{
unsigned long flags;
- struct pix_rdi_clk *clk = to_pix_rdi_clk(c);
+ struct pix_rdi_clk *rdi = to_pix_rdi_clk(c);
spin_lock_irqsave(&local_clock_reg_lock, flags);
- __branch_clk_disable_reg(&clk->b, clk->c.dbg_name);
+ __branch_disable_reg(&rdi->b, rdi->c.dbg_name);
spin_unlock_irqrestore(&local_clock_reg_lock, flags);
- clk->enabled = false;
+ rdi->enabled = false;
}
-static int pix_rdi_clk_reset(struct clk *clk, enum clk_reset_action action)
+static int pix_rdi_clk_reset(struct clk *c, enum clk_reset_action action)
{
- return branch_reset(&to_pix_rdi_clk(clk)->b, action);
+ return branch_reset(&to_pix_rdi_clk(c)->b, action);
}
static struct clk *pix_rdi_clk_get_parent(struct clk *c)
{
- struct pix_rdi_clk *clk = to_pix_rdi_clk(c);
-
- return pix_rdi_mux_map[clk->cur_rate];
+ return pix_rdi_mux_map[to_pix_rdi_clk(c)->cur_rate];
}
static int pix_rdi_clk_list_rate(struct clk *c, unsigned n)
@@ -2974,17 +2972,17 @@
static enum handoff pix_rdi_clk_handoff(struct clk *c)
{
u32 reg;
- struct pix_rdi_clk *clk = to_pix_rdi_clk(c);
+ struct pix_rdi_clk *rdi = to_pix_rdi_clk(c);
enum handoff ret;
- ret = branch_handoff(&clk->b, &clk->c);
+ ret = branch_handoff(&rdi->b, &rdi->c);
if (ret == HANDOFF_DISABLED_CLK)
return ret;
- reg = readl_relaxed(clk->s_reg);
- clk->cur_rate = reg & clk->s_mask ? 1 : 0;
- reg = readl_relaxed(clk->s2_reg);
- clk->cur_rate = reg & clk->s2_mask ? 2 : clk->cur_rate;
+ reg = readl_relaxed(rdi->s_reg);
+ rdi->cur_rate = reg & rdi->s_mask ? 1 : 0;
+ reg = readl_relaxed(rdi->s2_reg);
+ rdi->cur_rate = reg & rdi->s2_mask ? 2 : rdi->cur_rate;
return HANDOFF_ENABLED_CLK;
}
@@ -3897,7 +3895,7 @@
},
};
-static int hdmi_pll_clk_enable(struct clk *clk)
+static int hdmi_pll_clk_enable(struct clk *c)
{
int ret;
unsigned long flags;
@@ -3907,7 +3905,7 @@
return ret;
}
-static void hdmi_pll_clk_disable(struct clk *clk)
+static void hdmi_pll_clk_disable(struct clk *c)
{
unsigned long flags;
spin_lock_irqsave(&local_clock_reg_lock, flags);
@@ -3915,12 +3913,12 @@
spin_unlock_irqrestore(&local_clock_reg_lock, flags);
}
-static unsigned long hdmi_pll_clk_get_rate(struct clk *clk)
+static unsigned long hdmi_pll_clk_get_rate(struct clk *c)
{
return hdmi_pll_get_rate();
}
-static struct clk *hdmi_pll_clk_get_parent(struct clk *clk)
+static struct clk *hdmi_pll_clk_get_parent(struct clk *c)
{
return &pxo_clk.c;
}
@@ -3975,12 +3973,12 @@
* Unlike other clocks, the TV rate is adjusted through PLL
* re-programming. It is also routed through an MND divider.
*/
-void set_rate_tv(struct rcg_clk *clk, struct clk_freq_tbl *nf)
+void set_rate_tv(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
{
unsigned long pll_rate = (unsigned long)nf->extra_freq_data;
if (pll_rate)
hdmi_pll_set_rate(pll_rate);
- set_rate_mnd(clk, nf);
+ set_rate_mnd(rcg, nf);
}
static struct rcg_clk tv_src_clk = {
@@ -4585,6 +4583,7 @@
DEFINE_CLK_RPM(mmfpb_clk, mmfpb_a_clk, MMFPB, NULL);
DEFINE_CLK_RPM(sfab_clk, sfab_a_clk, SYSTEM_FABRIC, NULL);
DEFINE_CLK_RPM(sfpb_clk, sfpb_a_clk, SFPB, NULL);
+DEFINE_CLK_RPM_QDSS(qdss_clk, qdss_a_clk);
static DEFINE_CLK_VOTER(sfab_msmbus_a_clk, &sfab_a_clk.c, 0);
static DEFINE_CLK_VOTER(sfab_tmr_a_clk, &sfab_a_clk.c, 0);
@@ -4616,7 +4615,7 @@
#ifdef CONFIG_DEBUG_FS
struct measure_sel {
u32 test_vector;
- struct clk *clk;
+ struct clk *c;
};
static DEFINE_CLK_MEASURE(l2_m_clk);
@@ -4840,12 +4839,12 @@
{ TEST_CPUL2(0x5), &krait3_m_clk },
};
-static struct measure_sel *find_measure_sel(struct clk *clk)
+static struct measure_sel *find_measure_sel(struct clk *c)
{
int i;
for (i = 0; i < ARRAY_SIZE(measure_mux); i++)
- if (measure_mux[i].clk == clk)
+ if (measure_mux[i].c == c)
return &measure_mux[i];
return NULL;
}
@@ -4855,7 +4854,7 @@
int ret = 0;
u32 clk_sel;
struct measure_sel *p;
- struct measure_clk *clk = to_measure_clk(c);
+ struct measure_clk *measure = to_measure_clk(c);
unsigned long flags;
if (!parent)
@@ -4871,9 +4870,9 @@
* Program the test vector, measurement period (sample_ticks)
* and scaling multiplier.
*/
- clk->sample_ticks = 0x10000;
+ measure->sample_ticks = 0x10000;
clk_sel = p->test_vector & TEST_CLK_SEL_MASK;
- clk->multiplier = 1;
+ measure->multiplier = 1;
switch (p->test_vector >> TEST_TYPE_SHIFT) {
case TEST_TYPE_PER_LS:
writel_relaxed(0x4030D00|BVAL(7, 0, clk_sel), CLK_TEST_REG);
@@ -4902,8 +4901,8 @@
case TEST_TYPE_CPUL2:
writel_relaxed(0x4030400, CLK_TEST_REG);
writel_relaxed(0x80|BVAL(5, 3, clk_sel), GCC_APCS_CLK_DIAG);
- clk->sample_ticks = 0x4000;
- clk->multiplier = 2;
+ measure->sample_ticks = 0x4000;
+ measure->multiplier = 2;
break;
default:
ret = -EPERM;
@@ -4946,7 +4945,7 @@
unsigned long flags;
u32 pdm_reg_backup, ringosc_reg_backup;
u64 raw_count_short, raw_count_full;
- struct measure_clk *clk = to_measure_clk(c);
+ struct measure_clk *measure = to_measure_clk(c);
unsigned ret;
ret = clk_prepare_enable(&cxo_clk.c);
@@ -4973,7 +4972,7 @@
/* Run a short measurement. (~1 ms) */
raw_count_short = run_measurement(0x1000);
/* Run a full measurement. (~14 ms) */
- raw_count_full = run_measurement(clk->sample_ticks);
+ raw_count_full = run_measurement(measure->sample_ticks);
writel_relaxed(ringosc_reg_backup, RINGOSC_NS_REG);
writel_relaxed(pdm_reg_backup, PDM_CLK_NS_REG);
@@ -4984,8 +4983,8 @@
else {
/* Compute rate in Hz. */
raw_count_full = ((raw_count_full * 10) + 15) * 4800000;
- do_div(raw_count_full, ((clk->sample_ticks * 10) + 35));
- ret = (raw_count_full * clk->multiplier);
+ do_div(raw_count_full, ((measure->sample_ticks * 10) + 35));
+ ret = (raw_count_full * measure->multiplier);
}
/* Route dbg_hs_clk to PLLTEST. 300mV single-ended amplitude. */
@@ -4997,12 +4996,12 @@
return ret;
}
#else /* !CONFIG_DEBUG_FS */
-static int measure_clk_set_parent(struct clk *clk, struct clk *parent)
+static int measure_clk_set_parent(struct clk *c, struct clk *parent)
{
return -EINVAL;
}
-static unsigned long measure_clk_get_rate(struct clk *clk)
+static unsigned long measure_clk_get_rate(struct clk *c)
{
return 0;
}
@@ -5025,6 +5024,7 @@
static struct clk_lookup msm_clocks_8064[] = {
CLK_LOOKUP("xo", cxo_a_clk.c, ""),
CLK_LOOKUP("xo", pxo_a_clk.c, ""),
+ CLK_LOOKUP("pwm_clk", cxo_clk.c, "0-0048"),
CLK_LOOKUP("cxo", cxo_clk.c, "wcnss_wlan.0"),
CLK_LOOKUP("cxo", cxo_clk.c, "pil_riva"),
CLK_LOOKUP("xo", pxo_clk.c, "pil_qdsp6v4.0"),
@@ -5069,6 +5069,12 @@
CLK_LOOKUP("mem_a_clk", ebi1_msmbus_a_clk.c, "msm_bus"),
CLK_LOOKUP("dfab_clk", dfab_msmbus_clk.c, "msm_bus"),
CLK_LOOKUP("dfab_a_clk", dfab_msmbus_a_clk.c, "msm_bus"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, ""),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "msm_etb.0"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "msm_tpiu.0"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "msm_funnel.0"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "msm_stm.0"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "msm_etm.0"),
CLK_LOOKUP("ebi1_clk", ebi1_clk.c, ""),
CLK_LOOKUP("mmfpb_clk", mmfpb_clk.c, ""),
@@ -5393,6 +5399,12 @@
CLK_LOOKUP("mem_a_clk", ebi1_msmbus_a_clk.c, "msm_bus"),
CLK_LOOKUP("dfab_clk", dfab_msmbus_clk.c, "msm_bus"),
CLK_LOOKUP("dfab_a_clk", dfab_msmbus_a_clk.c, "msm_bus"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, ""),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "msm_etb.0"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "msm_tpiu.0"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "msm_funnel.0"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "msm_stm.0"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "msm_etm.0"),
CLK_LOOKUP("ebi1_clk", ebi1_clk.c, NULL),
CLK_LOOKUP("mmfpb_clk", mmfpb_clk.c, NULL),
@@ -5709,6 +5721,12 @@
CLK_LOOKUP("mem_a_clk", ebi1_msmbus_a_clk.c, "msm_bus"),
CLK_LOOKUP("dfab_clk", dfab_msmbus_clk.c, "msm_bus"),
CLK_LOOKUP("dfab_a_clk", dfab_msmbus_a_clk.c, "msm_bus"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, ""),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "msm_etb.0"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "msm_tpiu.0"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "msm_funnel.0"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "msm_stm.0"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "msm_etm.0"),
CLK_LOOKUP("ebi1_clk", ebi1_clk.c, NULL),
CLK_LOOKUP("mmfpb_clk", mmfpb_clk.c, NULL),
diff --git a/arch/arm/mach-msm/clock-8x60.c b/arch/arm/mach-msm/clock-8x60.c
index 74d71a2..da7dca7 100644
--- a/arch/arm/mach-msm/clock-8x60.c
+++ b/arch/arm/mach-msm/clock-8x60.c
@@ -341,24 +341,24 @@
},
};
-static int pll4_clk_enable(struct clk *clk)
+static int pll4_clk_enable(struct clk *c)
{
struct msm_rpm_iv_pair iv = { MSM_RPM_ID_PLL_4, 1 };
return msm_rpm_set_noirq(MSM_RPM_CTX_SET_0, &iv, 1);
}
-static void pll4_clk_disable(struct clk *clk)
+static void pll4_clk_disable(struct clk *c)
{
struct msm_rpm_iv_pair iv = { MSM_RPM_ID_PLL_4, 0 };
msm_rpm_set_noirq(MSM_RPM_CTX_SET_0, &iv, 1);
}
-static struct clk *pll4_clk_get_parent(struct clk *clk)
+static struct clk *pll4_clk_get_parent(struct clk *c)
{
return &pxo_clk.c;
}
-static bool pll4_clk_is_local(struct clk *clk)
+static bool pll4_clk_is_local(struct clk *c)
{
return false;
}
@@ -397,7 +397,7 @@
/* Unlike other clocks, the TV rate is adjusted through PLL
* re-programming. It is also routed through an MND divider. */
-static void set_rate_tv(struct rcg_clk *clk, struct clk_freq_tbl *nf)
+static void set_rate_tv(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
{
struct pll_rate *rate = nf->extra_freq_data;
uint32_t pll_mode, pll_config, misc_cc2;
@@ -426,7 +426,7 @@
writel_relaxed(pll_config, MM_PLL2_CONFIG_REG);
/* Configure MND. */
- set_rate_mnd(clk, nf);
+ set_rate_mnd(rcg, nf);
/* Configure hdmi_ref_clk to be equal to the TV clock rate. */
misc_cc2 = readl_relaxed(MISC_CC2_REG);
@@ -3133,7 +3133,7 @@
#ifdef CONFIG_DEBUG_FS
struct measure_sel {
u32 test_vector;
- struct clk *clk;
+ struct clk *c;
};
static struct measure_sel measure_mux[] = {
@@ -3308,12 +3308,12 @@
{ TEST_SC(0x42), &l2_m_clk },
};
-static struct measure_sel *find_measure_sel(struct clk *clk)
+static struct measure_sel *find_measure_sel(struct clk *c)
{
int i;
for (i = 0; i < ARRAY_SIZE(measure_mux); i++)
- if (measure_mux[i].clk == clk)
+ if (measure_mux[i].c == c)
return &measure_mux[i];
return NULL;
}
@@ -3323,7 +3323,7 @@
int ret = 0;
u32 clk_sel;
struct measure_sel *p;
- struct measure_clk *clk = to_measure_clk(c);
+ struct measure_clk *measure = to_measure_clk(c);
unsigned long flags;
if (!parent)
@@ -3340,9 +3340,9 @@
* and scaling factors (multiplier, divider).
*/
clk_sel = p->test_vector & TEST_CLK_SEL_MASK;
- clk->sample_ticks = 0x10000;
- clk->multiplier = 1;
- clk->divider = 1;
+ measure->sample_ticks = 0x10000;
+ measure->multiplier = 1;
+ measure->divider = 1;
switch (p->test_vector >> TEST_TYPE_SHIFT) {
case TEST_TYPE_PER_LS:
writel_relaxed(0x4030D00|BVAL(7, 0, clk_sel), CLK_TEST_REG);
@@ -3355,7 +3355,7 @@
writel_relaxed(BVAL(6, 1, clk_sel)|BIT(0), DBG_CFG_REG_LS_REG);
break;
case TEST_TYPE_MM_HS2X:
- clk->divider = 2;
+ measure->divider = 2;
case TEST_TYPE_MM_HS:
writel_relaxed(0x402B800, CLK_TEST_REG);
writel_relaxed(BVAL(6, 1, clk_sel)|BIT(0), DBG_CFG_REG_HS_REG);
@@ -3367,8 +3367,8 @@
break;
case TEST_TYPE_SC:
writel_relaxed(0x5020000|BVAL(16, 10, clk_sel), CLK_TEST_REG);
- clk->sample_ticks = 0x4000;
- clk->multiplier = 2;
+ measure->sample_ticks = 0x4000;
+ measure->multiplier = 2;
break;
default:
ret = -EPERM;
@@ -3410,7 +3410,7 @@
unsigned long flags;
u32 pdm_reg_backup, ringosc_reg_backup;
u64 raw_count_short, raw_count_full;
- struct measure_clk *clk = to_measure_clk(c);
+ struct measure_clk *measure = to_measure_clk(c);
unsigned ret;
spin_lock_irqsave(&local_clock_reg_lock, flags);
@@ -3431,7 +3431,7 @@
/* Run a short measurement. (~1 ms) */
raw_count_short = run_measurement(0x1000);
/* Run a full measurement. (~14 ms) */
- raw_count_full = run_measurement(clk->sample_ticks);
+ raw_count_full = run_measurement(measure->sample_ticks);
writel_relaxed(ringosc_reg_backup, RINGOSC_NS_REG);
writel_relaxed(pdm_reg_backup, PDM_CLK_NS_REG);
@@ -3442,9 +3442,9 @@
else {
/* Compute rate in Hz. */
raw_count_full = ((raw_count_full * 10) + 15) * 4800000;
- do_div(raw_count_full,
- (((clk->sample_ticks * 10) + 35) * clk->divider));
- ret = (raw_count_full * clk->multiplier);
+ do_div(raw_count_full, (((measure->sample_ticks * 10) + 35)
+ * measure->divider));
+ ret = (raw_count_full * measure->multiplier);
}
/* Route dbg_hs_clk to PLLTEST. 300mV single-ended amplitude. */
@@ -3454,12 +3454,12 @@
return ret;
}
#else /* !CONFIG_DEBUG_FS */
-static int measure_clk_set_parent(struct clk *clk, struct clk *parent)
+static int measure_clk_set_parent(struct clk *c, struct clk *parent)
{
return -EINVAL;
}
-static unsigned long measure_clk_get_rate(struct clk *clk)
+static unsigned long measure_clk_get_rate(struct clk *c)
{
return 0;
}
diff --git a/arch/arm/mach-msm/clock-9615.c b/arch/arm/mach-msm/clock-9615.c
index a2e0bc9..f7ccb35 100644
--- a/arch/arm/mach-msm/clock-9615.c
+++ b/arch/arm/mach-msm/clock-9615.c
@@ -216,33 +216,33 @@
static DEFINE_SPINLOCK(soft_vote_lock);
-static int pll_acpu_vote_clk_enable(struct clk *clk)
+static int pll_acpu_vote_clk_enable(struct clk *c)
{
int ret = 0;
unsigned long flags;
- struct pll_vote_clk *pll = to_pll_vote_clk(clk);
+ struct pll_vote_clk *pllv = to_pll_vote_clk(c);
spin_lock_irqsave(&soft_vote_lock, flags);
- if (!*pll->soft_vote)
- ret = pll_vote_clk_enable(clk);
+ if (!*pllv->soft_vote)
+ ret = pll_vote_clk_enable(c);
if (ret == 0)
- *pll->soft_vote |= (pll->soft_vote_mask);
+ *pllv->soft_vote |= (pllv->soft_vote_mask);
spin_unlock_irqrestore(&soft_vote_lock, flags);
return ret;
}
-static void pll_acpu_vote_clk_disable(struct clk *clk)
+static void pll_acpu_vote_clk_disable(struct clk *c)
{
unsigned long flags;
- struct pll_vote_clk *pll = to_pll_vote_clk(clk);
+ struct pll_vote_clk *pllv = to_pll_vote_clk(c);
spin_lock_irqsave(&soft_vote_lock, flags);
- *pll->soft_vote &= ~(pll->soft_vote_mask);
- if (!*pll->soft_vote)
- pll_vote_clk_disable(clk);
+ *pllv->soft_vote &= ~(pllv->soft_vote_mask);
+ if (!*pllv->soft_vote)
+ pll_vote_clk_disable(c);
spin_unlock_irqrestore(&soft_vote_lock, flags);
}
@@ -1376,7 +1376,7 @@
#ifdef CONFIG_DEBUG_FS
struct measure_sel {
u32 test_vector;
- struct clk *clk;
+ struct clk *c;
};
static DEFINE_CLK_MEASURE(q6sw_clk);
@@ -1447,12 +1447,12 @@
{ TEST_LPA_HS(0x00), &q6_func_clk },
};
-static struct measure_sel *find_measure_sel(struct clk *clk)
+static struct measure_sel *find_measure_sel(struct clk *c)
{
int i;
for (i = 0; i < ARRAY_SIZE(measure_mux); i++)
- if (measure_mux[i].clk == clk)
+ if (measure_mux[i].c == c)
return &measure_mux[i];
return NULL;
}
@@ -1462,7 +1462,7 @@
int ret = 0;
u32 clk_sel;
struct measure_sel *p;
- struct measure_clk *clk = to_measure_clk(c);
+ struct measure_clk *measure = to_measure_clk(c);
unsigned long flags;
if (!parent)
@@ -1478,9 +1478,9 @@
* Program the test vector, measurement period (sample_ticks)
* and scaling multiplier.
*/
- clk->sample_ticks = 0x10000;
+ measure->sample_ticks = 0x10000;
clk_sel = p->test_vector & TEST_CLK_SEL_MASK;
- clk->multiplier = 1;
+ measure->multiplier = 1;
switch (p->test_vector >> TEST_TYPE_SHIFT) {
case TEST_TYPE_PER_LS:
writel_relaxed(0x4030D00|BVAL(7, 0, clk_sel), CLK_TEST_REG);
@@ -1539,7 +1539,7 @@
unsigned long flags;
u32 pdm_reg_backup, ringosc_reg_backup;
u64 raw_count_short, raw_count_full;
- struct measure_clk *clk = to_measure_clk(c);
+ struct measure_clk *measure = to_measure_clk(c);
unsigned ret;
spin_lock_irqsave(&local_clock_reg_lock, flags);
@@ -1560,7 +1560,7 @@
/* Run a short measurement. (~1 ms) */
raw_count_short = run_measurement(0x1000);
/* Run a full measurement. (~14 ms) */
- raw_count_full = run_measurement(clk->sample_ticks);
+ raw_count_full = run_measurement(measure->sample_ticks);
writel_relaxed(ringosc_reg_backup, RINGOSC_NS_REG);
writel_relaxed(pdm_reg_backup, PDM_CLK_NS_REG);
@@ -1571,8 +1571,8 @@
else {
/* Compute rate in Hz. */
raw_count_full = ((raw_count_full * 10) + 15) * 4800000;
- do_div(raw_count_full, ((clk->sample_ticks * 10) + 35));
- ret = (raw_count_full * clk->multiplier);
+ do_div(raw_count_full, ((measure->sample_ticks * 10) + 35));
+ ret = (raw_count_full * measure->multiplier);
}
/* Route dbg_hs_clk to PLLTEST. 300mV single-ended amplitude. */
@@ -1582,12 +1582,12 @@
return ret;
}
#else /* !CONFIG_DEBUG_FS */
-static int measure_clk_set_parent(struct clk *clk, struct clk *parent)
+static int measure_clk_set_parent(struct clk *c, struct clk *parent)
{
return -EINVAL;
}
-static unsigned long measure_clk_get_rate(struct clk *clk)
+static unsigned long measure_clk_get_rate(struct clk *c)
{
return 0;
}
diff --git a/arch/arm/mach-msm/clock-copper.c b/arch/arm/mach-msm/clock-copper.c
index 87a8998..fcb69f0 100644
--- a/arch/arm/mach-msm/clock-copper.c
+++ b/arch/arm/mach-msm/clock-copper.c
@@ -588,39 +588,29 @@
static DEFINE_VDD_CLASS(vdd_dig, set_vdd_dig);
-static int cxo_clk_enable(struct clk *clk)
-{
- /* TODO: Remove from here once the rpm xo clock is ready. */
- return 0;
-}
+#define RPM_MISC_CLK_TYPE 0x306b6c63
+#define RPM_BUS_CLK_TYPE 0x316b6c63
+#define RPM_MEM_CLK_TYPE 0x326b6c63
-static void cxo_clk_disable(struct clk *clk)
-{
- /* TODO: Remove from here once the rpm xo clock is ready. */
- return;
-}
+#define CXO_ID 0x0
-static enum handoff cxo_clk_handoff(struct clk *clk)
-{
- /* TODO: Remove from here once the rpm xo clock is ready. */
- return HANDOFF_ENABLED_CLK;
-}
+#define PNOC_ID 0x0
+#define SNOC_ID 0x1
+#define CNOC_ID 0x2
-static struct clk_ops clk_ops_cxo = {
- .enable = cxo_clk_enable,
- .disable = cxo_clk_disable,
- .handoff = cxo_clk_handoff,
-};
+#define BIMC_ID 0x0
+#define OCMEM_ID 0x1
-static struct fixed_clk cxo_clk_src = {
- .c = {
- .rate = 19200000,
- .dbg_name = "cxo_clk_src",
- .ops = &clk_ops_cxo,
- .warned = true,
- CLK_INIT(cxo_clk_src.c),
- },
-};
+DEFINE_CLK_RPM_SMD(pnoc_clk, pnoc_a_clk, RPM_BUS_CLK_TYPE, PNOC_ID, NULL);
+DEFINE_CLK_RPM_SMD(snoc_clk, snoc_a_clk, RPM_BUS_CLK_TYPE, SNOC_ID, NULL);
+DEFINE_CLK_RPM_SMD(cnoc_clk, cnoc_a_clk, RPM_BUS_CLK_TYPE, CNOC_ID, NULL);
+
+DEFINE_CLK_RPM_SMD(bimc_clk, bimc_a_clk, RPM_MEM_CLK_TYPE, BIMC_ID, NULL);
+DEFINE_CLK_RPM_SMD(ocmemgx_clk, ocmemgx_a_clk, RPM_MEM_CLK_TYPE, OCMEM_ID,
+ NULL);
+
+DEFINE_CLK_RPM_SMD_BRANCH(cxo_clk_src, cxo_a_clk_src,
+ RPM_MISC_CLK_TYPE, CXO_ID, 19200000);
static struct pll_vote_clk gpll0_clk_src = {
.en_reg = (void __iomem *)APCS_GPLL_ENA_VOTE_REG,
@@ -715,24 +705,6 @@
},
};
-#define RPM_BUS_CLK_TYPE 0x316b6c63
-#define RPM_MEM_CLK_TYPE 0x326b6c63
-
-#define PNOC_ID 0x0
-#define SNOC_ID 0x1
-#define CNOC_ID 0x2
-
-#define BIMC_ID 0x0
-#define OCMEM_ID 0x1
-
-DEFINE_CLK_RPM_SMD(pnoc_clk, pnoc_a_clk, RPM_BUS_CLK_TYPE, PNOC_ID, NULL);
-DEFINE_CLK_RPM_SMD(snoc_clk, snoc_a_clk, RPM_BUS_CLK_TYPE, SNOC_ID, NULL);
-DEFINE_CLK_RPM_SMD(cnoc_clk, cnoc_a_clk, RPM_BUS_CLK_TYPE, CNOC_ID, NULL);
-
-DEFINE_CLK_RPM_SMD(bimc_clk, bimc_a_clk, RPM_MEM_CLK_TYPE, BIMC_ID, NULL);
-DEFINE_CLK_RPM_SMD(ocmemgx_clk, ocmemgx_a_clk, RPM_MEM_CLK_TYPE, OCMEM_ID,
- NULL);
-
static DEFINE_CLK_VOTER(pnoc_msmbus_clk, &pnoc_clk.c, LONG_MAX);
static DEFINE_CLK_VOTER(snoc_msmbus_clk, &snoc_clk.c, LONG_MAX);
static DEFINE_CLK_VOTER(cnoc_msmbus_clk, &cnoc_clk.c, LONG_MAX);
@@ -5045,6 +5017,7 @@
static void __init msmcopper_clock_post_init(void)
{
clk_set_rate(&axi_clk_src.c, 333330000);
+ clk_set_rate(&ocmemnoc_clk_src.c, 333330000);
/* Set rates for single-rate clocks. */
clk_set_rate(&usb30_master_clk_src.c,
diff --git a/arch/arm/mach-msm/clock-dss-8960.c b/arch/arm/mach-msm/clock-dss-8960.c
index 7f3646f..d9ad103 100644
--- a/arch/arm/mach-msm/clock-dss-8960.c
+++ b/arch/arm/mach-msm/clock-dss-8960.c
@@ -98,6 +98,7 @@
unsigned int val;
u32 ahb_en_reg, ahb_enabled;
unsigned int timeout_count;
+ int pll_lock_retry = 10;
ahb_en_reg = readl_relaxed(AHB_EN_REG);
ahb_enabled = ahb_en_reg & BIT(4);
@@ -149,7 +150,7 @@
timeout_count = 1000;
while (!(readl_relaxed(HDMI_PHY_PLL_STATUS0) & BIT(0)) &&
- timeout_count) {
+ timeout_count && pll_lock_retry) {
if (--timeout_count == 0) {
/*
* PLL has still not locked.
@@ -166,16 +167,18 @@
udelay(10);
writel_relaxed(0x0D, HDMI_PHY_PLL_LOCKDET_CFG2);
timeout_count = 1000;
-
- pr_err("%s: PLL not locked after %d iterations\n",
- __func__, timeout_count);
- pr_err("%s: Asserting PLL S/W reset & trying again\n",
- __func__);
+ pll_lock_retry--;
}
}
if (!ahb_enabled)
writel_relaxed(ahb_en_reg & ~BIT(4), AHB_EN_REG);
+
+ if (!pll_lock_retry) {
+ pr_err("%s: HDMI PLL not locked\n", __func__);
+ return -EAGAIN;
+ }
+
hdmi_pll_on = 1;
return 0;
}
@@ -233,24 +236,19 @@
switch (rate) {
case 27030000:
/* 480p60/480i60 case */
- writel_relaxed(0x32, HDMI_PHY_PLL_REFCLK_CFG);
+ writel_relaxed(0xA, HDMI_PHY_PLL_PWRDN_B);
+ writel_relaxed(0x38, HDMI_PHY_PLL_REFCLK_CFG);
writel_relaxed(0x2, HDMI_PHY_PLL_CHRG_PUMP_CFG);
- writel_relaxed(0x08, HDMI_PHY_PLL_LOOP_FLT_CFG0);
- writel_relaxed(0x77, HDMI_PHY_PLL_LOOP_FLT_CFG1);
- writel_relaxed(0x2C, HDMI_PHY_PLL_IDAC_ADJ_CFG);
- writel_relaxed(0x6, HDMI_PHY_PLL_I_VI_KVCO_CFG);
- writel_relaxed(0x7b, HDMI_PHY_PLL_SDM_CFG0);
- writel_relaxed(0x01, HDMI_PHY_PLL_SDM_CFG1);
- writel_relaxed(0x4C, HDMI_PHY_PLL_SDM_CFG2);
- writel_relaxed(0xC0, HDMI_PHY_PLL_SDM_CFG3);
+ writel_relaxed(0x20, HDMI_PHY_PLL_LOOP_FLT_CFG0);
+ writel_relaxed(0xFF, HDMI_PHY_PLL_LOOP_FLT_CFG1);
+ writel_relaxed(0x00, HDMI_PHY_PLL_SDM_CFG0);
+ writel_relaxed(0x4E, HDMI_PHY_PLL_SDM_CFG1);
+ writel_relaxed(0xD7, HDMI_PHY_PLL_SDM_CFG2);
+ writel_relaxed(0x03, HDMI_PHY_PLL_SDM_CFG3);
writel_relaxed(0x00, HDMI_PHY_PLL_SDM_CFG4);
- writel_relaxed(0x9A, HDMI_PHY_PLL_SSC_CFG0);
- writel_relaxed(0x00, HDMI_PHY_PLL_SSC_CFG1);
- writel_relaxed(0x00, HDMI_PHY_PLL_SSC_CFG2);
- writel_relaxed(0x00, HDMI_PHY_PLL_SSC_CFG3);
writel_relaxed(0x2A, HDMI_PHY_PLL_VCOCAL_CFG0);
writel_relaxed(0x03, HDMI_PHY_PLL_VCOCAL_CFG1);
- writel_relaxed(0x2B, HDMI_PHY_PLL_VCOCAL_CFG2);
+ writel_relaxed(0x3B, HDMI_PHY_PLL_VCOCAL_CFG2);
writel_relaxed(0x00, HDMI_PHY_PLL_VCOCAL_CFG3);
writel_relaxed(0x86, HDMI_PHY_PLL_VCOCAL_CFG4);
writel_relaxed(0x00, HDMI_PHY_PLL_VCOCAL_CFG5);
@@ -266,6 +264,7 @@
writel_relaxed(0x33, HDMI_PHY_PLL_LOOP_FLT_CFG1);
writel_relaxed(0x2C, HDMI_PHY_PLL_IDAC_ADJ_CFG);
writel_relaxed(0x6, HDMI_PHY_PLL_I_VI_KVCO_CFG);
+ writel_relaxed(0xA, HDMI_PHY_PLL_PWRDN_B);
writel_relaxed(0x77, HDMI_PHY_PLL_SDM_CFG0);
writel_relaxed(0x4C, HDMI_PHY_PLL_SDM_CFG1);
writel_relaxed(0x00, HDMI_PHY_PLL_SDM_CFG2);
@@ -275,9 +274,12 @@
writel_relaxed(0x00, HDMI_PHY_PLL_SSC_CFG1);
writel_relaxed(0x00, HDMI_PHY_PLL_SSC_CFG2);
writel_relaxed(0x20, HDMI_PHY_PLL_SSC_CFG3);
+ writel_relaxed(0x10, HDMI_PHY_PLL_LOCKDET_CFG0);
+ writel_relaxed(0x1A, HDMI_PHY_PLL_LOCKDET_CFG1);
+ writel_relaxed(0x0D, HDMI_PHY_PLL_LOCKDET_CFG2);
writel_relaxed(0xF4, HDMI_PHY_PLL_VCOCAL_CFG0);
writel_relaxed(0x02, HDMI_PHY_PLL_VCOCAL_CFG1);
- writel_relaxed(0x2B, HDMI_PHY_PLL_VCOCAL_CFG2);
+ writel_relaxed(0x3B, HDMI_PHY_PLL_VCOCAL_CFG2);
writel_relaxed(0x00, HDMI_PHY_PLL_VCOCAL_CFG3);
writel_relaxed(0x86, HDMI_PHY_PLL_VCOCAL_CFG4);
writel_relaxed(0x00, HDMI_PHY_PLL_VCOCAL_CFG5);
@@ -293,6 +295,7 @@
writel_relaxed(0x33, HDMI_PHY_PLL_LOOP_FLT_CFG1);
writel_relaxed(0x2C, HDMI_PHY_PLL_IDAC_ADJ_CFG);
writel_relaxed(0x6, HDMI_PHY_PLL_I_VI_KVCO_CFG);
+ writel_relaxed(0xA, HDMI_PHY_PLL_PWRDN_B);
writel_relaxed(0x7B, HDMI_PHY_PLL_SDM_CFG0);
writel_relaxed(0x01, HDMI_PHY_PLL_SDM_CFG1);
writel_relaxed(0x4C, HDMI_PHY_PLL_SDM_CFG2);
@@ -302,9 +305,12 @@
writel_relaxed(0x00, HDMI_PHY_PLL_SSC_CFG1);
writel_relaxed(0x00, HDMI_PHY_PLL_SSC_CFG2);
writel_relaxed(0x00, HDMI_PHY_PLL_SSC_CFG3);
+ writel_relaxed(0x10, HDMI_PHY_PLL_LOCKDET_CFG0);
+ writel_relaxed(0x1A, HDMI_PHY_PLL_LOCKDET_CFG1);
+ writel_relaxed(0x0D, HDMI_PHY_PLL_LOCKDET_CFG2);
writel_relaxed(0x2a, HDMI_PHY_PLL_VCOCAL_CFG0);
writel_relaxed(0x03, HDMI_PHY_PLL_VCOCAL_CFG1);
- writel_relaxed(0x2B, HDMI_PHY_PLL_VCOCAL_CFG2);
+ writel_relaxed(0x3B, HDMI_PHY_PLL_VCOCAL_CFG2);
writel_relaxed(0x00, HDMI_PHY_PLL_VCOCAL_CFG3);
writel_relaxed(0x86, HDMI_PHY_PLL_VCOCAL_CFG4);
writel_relaxed(0x00, HDMI_PHY_PLL_VCOCAL_CFG5);
@@ -316,12 +322,14 @@
/* 720p60/720p50/1080i60/1080i50
* 1080p24/1080p30/1080p25 case
*/
+ writel_relaxed(0xA, HDMI_PHY_PLL_PWRDN_B);
writel_relaxed(0x12, HDMI_PHY_PLL_REFCLK_CFG);
writel_relaxed(0x01, HDMI_PHY_PLL_LOOP_FLT_CFG0);
writel_relaxed(0x33, HDMI_PHY_PLL_LOOP_FLT_CFG1);
writel_relaxed(0x76, HDMI_PHY_PLL_SDM_CFG0);
writel_relaxed(0xE6, HDMI_PHY_PLL_VCOCAL_CFG0);
writel_relaxed(0x02, HDMI_PHY_PLL_VCOCAL_CFG1);
+ writel_relaxed(0x3B, HDMI_PHY_PLL_VCOCAL_CFG2);
break;
case 148500000:
@@ -332,6 +340,7 @@
writel_relaxed(0x33, HDMI_PHY_PLL_LOOP_FLT_CFG1);
writel_relaxed(0x2C, HDMI_PHY_PLL_IDAC_ADJ_CFG);
writel_relaxed(0x6, HDMI_PHY_PLL_I_VI_KVCO_CFG);
+ writel_relaxed(0xA, HDMI_PHY_PLL_PWRDN_B);
writel_relaxed(0x76, HDMI_PHY_PLL_SDM_CFG0);
writel_relaxed(0x01, HDMI_PHY_PLL_SDM_CFG1);
writel_relaxed(0x4C, HDMI_PHY_PLL_SDM_CFG2);
@@ -341,9 +350,12 @@
writel_relaxed(0x00, HDMI_PHY_PLL_SSC_CFG1);
writel_relaxed(0x00, HDMI_PHY_PLL_SSC_CFG2);
writel_relaxed(0x00, HDMI_PHY_PLL_SSC_CFG3);
+ writel_relaxed(0x10, HDMI_PHY_PLL_LOCKDET_CFG0);
+ writel_relaxed(0x1A, HDMI_PHY_PLL_LOCKDET_CFG1);
+ writel_relaxed(0x0D, HDMI_PHY_PLL_LOCKDET_CFG2);
writel_relaxed(0xe6, HDMI_PHY_PLL_VCOCAL_CFG0);
writel_relaxed(0x02, HDMI_PHY_PLL_VCOCAL_CFG1);
- writel_relaxed(0x2B, HDMI_PHY_PLL_VCOCAL_CFG2);
+ writel_relaxed(0x3B, HDMI_PHY_PLL_VCOCAL_CFG2);
writel_relaxed(0x00, HDMI_PHY_PLL_VCOCAL_CFG3);
writel_relaxed(0x86, HDMI_PHY_PLL_VCOCAL_CFG4);
writel_relaxed(0x00, HDMI_PHY_PLL_VCOCAL_CFG5);
diff --git a/arch/arm/mach-msm/clock-fsm9xxx.c b/arch/arm/mach-msm/clock-fsm9xxx.c
index 13a5b65..c188ba6 100644
--- a/arch/arm/mach-msm/clock-fsm9xxx.c
+++ b/arch/arm/mach-msm/clock-fsm9xxx.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -24,6 +24,7 @@
static struct clk_lookup msm_clocks_fsm9xxx[] = {
CLK_DUMMY("core_clk", ADM0_CLK, "msm_dmov", OFF),
CLK_DUMMY("core_clk", UART1_CLK, "msm_serial.0", OFF),
+ CLK_DUMMY("core_clk", UART3_CLK, "msm_uim.2", OFF),
CLK_DUMMY("core_clk", CE_CLK, "qce.0", OFF),
CLK_DUMMY("core_clk", CE_CLK, "qcota.0", OFF),
CLK_DUMMY("core_clk", CE_CLK, "qcrypto.0", OFF),
diff --git a/arch/arm/mach-msm/clock-local.c b/arch/arm/mach-msm/clock-local.c
index b5ae4ab..0f9404b 100644
--- a/arch/arm/mach-msm/clock-local.c
+++ b/arch/arm/mach-msm/clock-local.c
@@ -53,32 +53,32 @@
*/
/* For clocks with MND dividers. */
-void set_rate_mnd(struct rcg_clk *clk, struct clk_freq_tbl *nf)
+void set_rate_mnd(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
{
uint32_t ns_reg_val, ctl_reg_val;
/* Assert MND reset. */
- ns_reg_val = readl_relaxed(clk->ns_reg);
+ ns_reg_val = readl_relaxed(rcg->ns_reg);
ns_reg_val |= BIT(7);
- writel_relaxed(ns_reg_val, clk->ns_reg);
+ writel_relaxed(ns_reg_val, rcg->ns_reg);
/* Program M and D values. */
- writel_relaxed(nf->md_val, clk->md_reg);
+ writel_relaxed(nf->md_val, rcg->md_reg);
/* If the clock has a separate CC register, program it. */
- if (clk->ns_reg != clk->b.ctl_reg) {
- ctl_reg_val = readl_relaxed(clk->b.ctl_reg);
- ctl_reg_val &= ~(clk->ctl_mask);
+ if (rcg->ns_reg != rcg->b.ctl_reg) {
+ ctl_reg_val = readl_relaxed(rcg->b.ctl_reg);
+ ctl_reg_val &= ~(rcg->ctl_mask);
ctl_reg_val |= nf->ctl_val;
- writel_relaxed(ctl_reg_val, clk->b.ctl_reg);
+ writel_relaxed(ctl_reg_val, rcg->b.ctl_reg);
}
/* Deassert MND reset. */
ns_reg_val &= ~BIT(7);
- writel_relaxed(ns_reg_val, clk->ns_reg);
+ writel_relaxed(ns_reg_val, rcg->ns_reg);
}
-void set_rate_nop(struct rcg_clk *clk, struct clk_freq_tbl *nf)
+void set_rate_nop(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
{
/*
* Nothing to do for fixed-rate or integer-divider clocks. Any settings
@@ -88,31 +88,31 @@
*/
}
-void set_rate_mnd_8(struct rcg_clk *clk, struct clk_freq_tbl *nf)
+void set_rate_mnd_8(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
{
uint32_t ctl_reg_val;
/* Assert MND reset. */
- ctl_reg_val = readl_relaxed(clk->b.ctl_reg);
+ ctl_reg_val = readl_relaxed(rcg->b.ctl_reg);
ctl_reg_val |= BIT(8);
- writel_relaxed(ctl_reg_val, clk->b.ctl_reg);
+ writel_relaxed(ctl_reg_val, rcg->b.ctl_reg);
/* Program M and D values. */
- writel_relaxed(nf->md_val, clk->md_reg);
+ writel_relaxed(nf->md_val, rcg->md_reg);
/* Program MN counter Enable and Mode. */
- ctl_reg_val &= ~(clk->ctl_mask);
+ ctl_reg_val &= ~(rcg->ctl_mask);
ctl_reg_val |= nf->ctl_val;
- writel_relaxed(ctl_reg_val, clk->b.ctl_reg);
+ writel_relaxed(ctl_reg_val, rcg->b.ctl_reg);
/* Deassert MND reset. */
ctl_reg_val &= ~BIT(8);
- writel_relaxed(ctl_reg_val, clk->b.ctl_reg);
+ writel_relaxed(ctl_reg_val, rcg->b.ctl_reg);
}
-void set_rate_mnd_banked(struct rcg_clk *clk, struct clk_freq_tbl *nf)
+void set_rate_mnd_banked(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
{
- struct bank_masks *banks = clk->bank_info;
+ struct bank_masks *banks = rcg->bank_info;
const struct bank_mask_info *new_bank_masks;
const struct bank_mask_info *old_bank_masks;
uint32_t ns_reg_val, ctl_reg_val;
@@ -123,10 +123,10 @@
* off, program the active bank since bank switching won't work if
* both banks aren't running.
*/
- ctl_reg_val = readl_relaxed(clk->b.ctl_reg);
+ ctl_reg_val = readl_relaxed(rcg->b.ctl_reg);
bank_sel = !!(ctl_reg_val & banks->bank_sel_mask);
/* If clock isn't running, don't switch banks. */
- bank_sel ^= (!clk->enabled || clk->current_freq->freq_hz == 0);
+ bank_sel ^= (!rcg->enabled || rcg->current_freq->freq_hz == 0);
if (bank_sel == 0) {
new_bank_masks = &banks->bank1_mask;
old_bank_masks = &banks->bank0_mask;
@@ -135,46 +135,46 @@
old_bank_masks = &banks->bank1_mask;
}
- ns_reg_val = readl_relaxed(clk->ns_reg);
+ ns_reg_val = readl_relaxed(rcg->ns_reg);
/* Assert bank MND reset. */
ns_reg_val |= new_bank_masks->rst_mask;
- writel_relaxed(ns_reg_val, clk->ns_reg);
+ writel_relaxed(ns_reg_val, rcg->ns_reg);
/*
* Program NS only if the clock is enabled, since the NS will be set
* as part of the enable procedure and should remain with a low-power
* MUX input selected until then.
*/
- if (clk->enabled) {
+ if (rcg->enabled) {
ns_reg_val &= ~(new_bank_masks->ns_mask);
ns_reg_val |= (nf->ns_val & new_bank_masks->ns_mask);
- writel_relaxed(ns_reg_val, clk->ns_reg);
+ writel_relaxed(ns_reg_val, rcg->ns_reg);
}
writel_relaxed(nf->md_val, new_bank_masks->md_reg);
/* Enable counter only if clock is enabled. */
- if (clk->enabled)
+ if (rcg->enabled)
ctl_reg_val |= new_bank_masks->mnd_en_mask;
else
ctl_reg_val &= ~(new_bank_masks->mnd_en_mask);
ctl_reg_val &= ~(new_bank_masks->mode_mask);
ctl_reg_val |= (nf->ctl_val & new_bank_masks->mode_mask);
- writel_relaxed(ctl_reg_val, clk->b.ctl_reg);
+ writel_relaxed(ctl_reg_val, rcg->b.ctl_reg);
/* Deassert bank MND reset. */
ns_reg_val &= ~(new_bank_masks->rst_mask);
- writel_relaxed(ns_reg_val, clk->ns_reg);
+ writel_relaxed(ns_reg_val, rcg->ns_reg);
/*
* Switch to the new bank if clock is running. If it isn't, then
* no switch is necessary since we programmed the active bank.
*/
- if (clk->enabled && clk->current_freq->freq_hz) {
+ if (rcg->enabled && rcg->current_freq->freq_hz) {
ctl_reg_val ^= banks->bank_sel_mask;
- writel_relaxed(ctl_reg_val, clk->b.ctl_reg);
+ writel_relaxed(ctl_reg_val, rcg->b.ctl_reg);
/*
* Wait at least 6 cycles of slowest bank's clock
* for the glitch-free MUX to fully switch sources.
@@ -184,22 +184,22 @@
/* Disable old bank's MN counter. */
ctl_reg_val &= ~(old_bank_masks->mnd_en_mask);
- writel_relaxed(ctl_reg_val, clk->b.ctl_reg);
+ writel_relaxed(ctl_reg_val, rcg->b.ctl_reg);
/* Program old bank to a low-power source and divider. */
ns_reg_val &= ~(old_bank_masks->ns_mask);
- ns_reg_val |= (clk->freq_tbl->ns_val & old_bank_masks->ns_mask);
- writel_relaxed(ns_reg_val, clk->ns_reg);
+ ns_reg_val |= (rcg->freq_tbl->ns_val & old_bank_masks->ns_mask);
+ writel_relaxed(ns_reg_val, rcg->ns_reg);
}
/* Update the MND_EN and NS masks to match the current bank. */
- clk->mnd_en_mask = new_bank_masks->mnd_en_mask;
- clk->ns_mask = new_bank_masks->ns_mask;
+ rcg->mnd_en_mask = new_bank_masks->mnd_en_mask;
+ rcg->ns_mask = new_bank_masks->ns_mask;
}
-void set_rate_div_banked(struct rcg_clk *clk, struct clk_freq_tbl *nf)
+void set_rate_div_banked(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
{
- struct bank_masks *banks = clk->bank_info;
+ struct bank_masks *banks = rcg->bank_info;
const struct bank_mask_info *new_bank_masks;
const struct bank_mask_info *old_bank_masks;
uint32_t ns_reg_val, bank_sel;
@@ -209,10 +209,10 @@
* off, program the active bank since bank switching won't work if
* both banks aren't running.
*/
- ns_reg_val = readl_relaxed(clk->ns_reg);
+ ns_reg_val = readl_relaxed(rcg->ns_reg);
bank_sel = !!(ns_reg_val & banks->bank_sel_mask);
/* If clock isn't running, don't switch banks. */
- bank_sel ^= (!clk->enabled || clk->current_freq->freq_hz == 0);
+ bank_sel ^= (!rcg->enabled || rcg->current_freq->freq_hz == 0);
if (bank_sel == 0) {
new_bank_masks = &banks->bank1_mask;
old_bank_masks = &banks->bank0_mask;
@@ -226,19 +226,19 @@
* as part of the enable procedure and should remain with a low-power
* MUX input selected until then.
*/
- if (clk->enabled) {
+ if (rcg->enabled) {
ns_reg_val &= ~(new_bank_masks->ns_mask);
ns_reg_val |= (nf->ns_val & new_bank_masks->ns_mask);
- writel_relaxed(ns_reg_val, clk->ns_reg);
+ writel_relaxed(ns_reg_val, rcg->ns_reg);
}
/*
* Switch to the new bank if clock is running. If it isn't, then
* no switch is necessary since we programmed the active bank.
*/
- if (clk->enabled && clk->current_freq->freq_hz) {
+ if (rcg->enabled && rcg->current_freq->freq_hz) {
ns_reg_val ^= banks->bank_sel_mask;
- writel_relaxed(ns_reg_val, clk->ns_reg);
+ writel_relaxed(ns_reg_val, rcg->ns_reg);
/*
* Wait at least 6 cycles of slowest bank's clock
* for the glitch-free MUX to fully switch sources.
@@ -248,12 +248,12 @@
/* Program old bank to a low-power source and divider. */
ns_reg_val &= ~(old_bank_masks->ns_mask);
- ns_reg_val |= (clk->freq_tbl->ns_val & old_bank_masks->ns_mask);
- writel_relaxed(ns_reg_val, clk->ns_reg);
+ ns_reg_val |= (rcg->freq_tbl->ns_val & old_bank_masks->ns_mask);
+ writel_relaxed(ns_reg_val, rcg->ns_reg);
}
/* Update the NS mask to match the current bank. */
- clk->ns_mask = new_bank_masks->ns_mask;
+ rcg->ns_mask = new_bank_masks->ns_mask;
}
/*
@@ -261,10 +261,10 @@
*/
/* Return non-zero if a clock status registers shows the clock is halted. */
-static int branch_clk_is_halted(const struct branch *clk)
+static int branch_clk_is_halted(const struct branch *b)
{
- int invert = (clk->halt_check == ENABLE);
- int status_bit = readl_relaxed(clk->halt_reg) & BIT(clk->halt_bit);
+ int invert = (b->halt_check == ENABLE);
+ int status_bit = readl_relaxed(b->halt_reg) & BIT(b->halt_bit);
return invert ? !status_bit : status_bit;
}
@@ -276,14 +276,14 @@
return !!(readl_relaxed(b->hwcg_reg) & b->hwcg_mask);
}
-void __branch_clk_enable_reg(const struct branch *clk, const char *name)
+void __branch_enable_reg(const struct branch *b, const char *name)
{
u32 reg_val;
- if (clk->en_mask) {
- reg_val = readl_relaxed(clk->ctl_reg);
- reg_val |= clk->en_mask;
- writel_relaxed(reg_val, clk->ctl_reg);
+ if (b->en_mask) {
+ reg_val = readl_relaxed(b->ctl_reg);
+ reg_val |= b->en_mask;
+ writel_relaxed(reg_val, b->ctl_reg);
}
/*
@@ -295,19 +295,19 @@
mb();
/* Skip checking halt bit if the clock is in hardware gated mode */
- if (branch_in_hwcg_mode(clk))
+ if (branch_in_hwcg_mode(b))
return;
/* Wait for clock to enable before returning. */
- if (clk->halt_check == DELAY)
+ if (b->halt_check == DELAY) {
udelay(HALT_CHECK_DELAY_US);
- else if (clk->halt_check == ENABLE || clk->halt_check == HALT
- || clk->halt_check == ENABLE_VOTED
- || clk->halt_check == HALT_VOTED) {
+ } else if (b->halt_check == ENABLE || b->halt_check == HALT
+ || b->halt_check == ENABLE_VOTED
+ || b->halt_check == HALT_VOTED) {
int count;
/* Wait up to HALT_CHECK_MAX_LOOPS for clock to enable. */
- for (count = HALT_CHECK_MAX_LOOPS; branch_clk_is_halted(clk)
+ for (count = HALT_CHECK_MAX_LOOPS; branch_clk_is_halted(b)
&& count > 0; count--)
udelay(1);
WARN(count == 0, "%s status stuck at 'off'", name);
@@ -315,50 +315,50 @@
}
/* Perform any register operations required to enable the clock. */
-static void __rcg_clk_enable_reg(struct rcg_clk *clk)
+static void __rcg_clk_enable_reg(struct rcg_clk *rcg)
{
u32 reg_val;
- void __iomem *const reg = clk->b.ctl_reg;
+ void __iomem *const reg = rcg->b.ctl_reg;
- WARN(clk->current_freq == &rcg_dummy_freq,
+ WARN(rcg->current_freq == &rcg_dummy_freq,
"Attempting to enable %s before setting its rate. "
- "Set the rate first!\n", clk->c.dbg_name);
+ "Set the rate first!\n", rcg->c.dbg_name);
/*
* Program the NS register, if applicable. NS registers are not
* set in the set_rate path because power can be saved by deferring
* the selection of a clocked source until the clock is enabled.
*/
- if (clk->ns_mask) {
- reg_val = readl_relaxed(clk->ns_reg);
- reg_val &= ~(clk->ns_mask);
- reg_val |= (clk->current_freq->ns_val & clk->ns_mask);
- writel_relaxed(reg_val, clk->ns_reg);
+ if (rcg->ns_mask) {
+ reg_val = readl_relaxed(rcg->ns_reg);
+ reg_val &= ~(rcg->ns_mask);
+ reg_val |= (rcg->current_freq->ns_val & rcg->ns_mask);
+ writel_relaxed(reg_val, rcg->ns_reg);
}
/* Enable MN counter, if applicable. */
reg_val = readl_relaxed(reg);
- if (clk->current_freq->md_val) {
- reg_val |= clk->mnd_en_mask;
+ if (rcg->current_freq->md_val) {
+ reg_val |= rcg->mnd_en_mask;
writel_relaxed(reg_val, reg);
}
/* Enable root. */
- if (clk->root_en_mask) {
- reg_val |= clk->root_en_mask;
+ if (rcg->root_en_mask) {
+ reg_val |= rcg->root_en_mask;
writel_relaxed(reg_val, reg);
}
- __branch_clk_enable_reg(&clk->b, clk->c.dbg_name);
+ __branch_enable_reg(&rcg->b, rcg->c.dbg_name);
}
/* Perform any register operations required to disable the branch. */
-u32 __branch_clk_disable_reg(const struct branch *clk, const char *name)
+u32 __branch_disable_reg(const struct branch *b, const char *name)
{
u32 reg_val;
- reg_val = readl_relaxed(clk->ctl_reg);
- if (clk->en_mask) {
- reg_val &= ~(clk->en_mask);
- writel_relaxed(reg_val, clk->ctl_reg);
+ reg_val = readl_relaxed(b->ctl_reg);
+ if (b->en_mask) {
+ reg_val &= ~(b->en_mask);
+ writel_relaxed(reg_val, b->ctl_reg);
}
/*
@@ -370,18 +370,18 @@
mb();
/* Skip checking halt bit if the clock is in hardware gated mode */
- if (branch_in_hwcg_mode(clk))
+ if (branch_in_hwcg_mode(b))
return reg_val;
/* Wait for clock to disable before continuing. */
- if (clk->halt_check == DELAY || clk->halt_check == ENABLE_VOTED
- || clk->halt_check == HALT_VOTED)
+ if (b->halt_check == DELAY || b->halt_check == ENABLE_VOTED
+ || b->halt_check == HALT_VOTED) {
udelay(HALT_CHECK_DELAY_US);
- else if (clk->halt_check == ENABLE || clk->halt_check == HALT) {
+ } else if (b->halt_check == ENABLE || b->halt_check == HALT) {
int count;
/* Wait up to HALT_CHECK_MAX_LOOPS for clock to disable. */
- for (count = HALT_CHECK_MAX_LOOPS; !branch_clk_is_halted(clk)
+ for (count = HALT_CHECK_MAX_LOOPS; !branch_clk_is_halted(b)
&& count > 0; count--)
udelay(1);
WARN(count == 0, "%s status stuck at 'on'", name);
@@ -391,31 +391,31 @@
}
/* Perform any register operations required to disable the generator. */
-static void __rcg_clk_disable_reg(struct rcg_clk *clk)
+static void __rcg_clk_disable_reg(struct rcg_clk *rcg)
{
- void __iomem *const reg = clk->b.ctl_reg;
+ void __iomem *const reg = rcg->b.ctl_reg;
uint32_t reg_val;
- reg_val = __branch_clk_disable_reg(&clk->b, clk->c.dbg_name);
+ reg_val = __branch_disable_reg(&rcg->b, rcg->c.dbg_name);
/* Disable root. */
- if (clk->root_en_mask) {
- reg_val &= ~(clk->root_en_mask);
+ if (rcg->root_en_mask) {
+ reg_val &= ~(rcg->root_en_mask);
writel_relaxed(reg_val, reg);
}
/* Disable MN counter, if applicable. */
- if (clk->current_freq->md_val) {
- reg_val &= ~(clk->mnd_en_mask);
+ if (rcg->current_freq->md_val) {
+ reg_val &= ~(rcg->mnd_en_mask);
writel_relaxed(reg_val, reg);
}
/*
* Program NS register to low-power value with an un-clocked or
* slowly-clocked source selected.
*/
- if (clk->ns_mask) {
- reg_val = readl_relaxed(clk->ns_reg);
- reg_val &= ~(clk->ns_mask);
- reg_val |= (clk->freq_tbl->ns_val & clk->ns_mask);
- writel_relaxed(reg_val, clk->ns_reg);
+ if (rcg->ns_mask) {
+ reg_val = readl_relaxed(rcg->ns_reg);
+ reg_val &= ~(rcg->ns_mask);
+ reg_val |= (rcg->freq_tbl->ns_val & rcg->ns_mask);
+ writel_relaxed(reg_val, rcg->ns_reg);
}
}
@@ -423,11 +423,11 @@
static int rcg_clk_enable(struct clk *c)
{
unsigned long flags;
- struct rcg_clk *clk = to_rcg_clk(c);
+ struct rcg_clk *rcg = to_rcg_clk(c);
spin_lock_irqsave(&local_clock_reg_lock, flags);
- __rcg_clk_enable_reg(clk);
- clk->enabled = true;
+ __rcg_clk_enable_reg(rcg);
+ rcg->enabled = true;
spin_unlock_irqrestore(&local_clock_reg_lock, flags);
return 0;
@@ -437,11 +437,11 @@
static void rcg_clk_disable(struct clk *c)
{
unsigned long flags;
- struct rcg_clk *clk = to_rcg_clk(c);
+ struct rcg_clk *rcg = to_rcg_clk(c);
spin_lock_irqsave(&local_clock_reg_lock, flags);
- __rcg_clk_disable_reg(clk);
- clk->enabled = false;
+ __rcg_clk_disable_reg(rcg);
+ rcg->enabled = false;
spin_unlock_irqrestore(&local_clock_reg_lock, flags);
}
@@ -452,21 +452,21 @@
/* Set a clock to an exact rate. */
static int rcg_clk_set_rate(struct clk *c, unsigned long rate)
{
- struct rcg_clk *clk = to_rcg_clk(c);
+ struct rcg_clk *rcg = to_rcg_clk(c);
struct clk_freq_tbl *nf, *cf;
struct clk *chld;
int rc = 0;
- for (nf = clk->freq_tbl; nf->freq_hz != FREQ_END
+ for (nf = rcg->freq_tbl; nf->freq_hz != FREQ_END
&& nf->freq_hz != rate; nf++)
;
if (nf->freq_hz == FREQ_END)
return -EINVAL;
- cf = clk->current_freq;
+ cf = rcg->current_freq;
- if (clk->enabled) {
+ if (rcg->enabled) {
/* Enable source clock dependency for the new freq. */
rc = clk_enable(nf->src_clk);
if (rc)
@@ -476,9 +476,9 @@
spin_lock(&local_clock_reg_lock);
/* Disable branch if clock isn't dual-banked with a glitch-free MUX. */
- if (!clk->bank_info) {
+ if (!rcg->bank_info) {
/* Disable all branches to prevent glitches. */
- list_for_each_entry(chld, &clk->c.children, siblings) {
+ list_for_each_entry(chld, &rcg->c.children, siblings) {
struct branch_clk *x = to_branch_clk(chld);
/*
* We don't need to grab the child's lock because
@@ -486,56 +486,56 @@
* only modified within lock.
*/
if (x->enabled)
- __branch_clk_disable_reg(&x->b, x->c.dbg_name);
+ __branch_disable_reg(&x->b, x->c.dbg_name);
}
- if (clk->enabled)
- __rcg_clk_disable_reg(clk);
+ if (rcg->enabled)
+ __rcg_clk_disable_reg(rcg);
}
/* Perform clock-specific frequency switch operations. */
- BUG_ON(!clk->set_rate);
- clk->set_rate(clk, nf);
+ BUG_ON(!rcg->set_rate);
+ rcg->set_rate(rcg, nf);
/*
* Current freq must be updated before __rcg_clk_enable_reg()
* is called to make sure the MNCNTR_EN bit is set correctly.
*/
- clk->current_freq = nf;
+ rcg->current_freq = nf;
/* Enable any clocks that were disabled. */
- if (!clk->bank_info) {
- if (clk->enabled)
- __rcg_clk_enable_reg(clk);
+ if (!rcg->bank_info) {
+ if (rcg->enabled)
+ __rcg_clk_enable_reg(rcg);
/* Enable only branches that were ON before. */
- list_for_each_entry(chld, &clk->c.children, siblings) {
+ list_for_each_entry(chld, &rcg->c.children, siblings) {
struct branch_clk *x = to_branch_clk(chld);
if (x->enabled)
- __branch_clk_enable_reg(&x->b, x->c.dbg_name);
+ __branch_enable_reg(&x->b, x->c.dbg_name);
}
}
spin_unlock(&local_clock_reg_lock);
/* Release source requirements of the old freq. */
- if (clk->enabled)
+ if (rcg->enabled)
clk_disable(cf->src_clk);
return rc;
}
/* Check if a clock is currently enabled. */
-static int rcg_clk_is_enabled(struct clk *clk)
+static int rcg_clk_is_enabled(struct clk *c)
{
- return to_rcg_clk(clk)->enabled;
+ return to_rcg_clk(c)->enabled;
}
/* Return a supported rate that's at least the specified rate. */
static long rcg_clk_round_rate(struct clk *c, unsigned long rate)
{
- struct rcg_clk *clk = to_rcg_clk(c);
+ struct rcg_clk *rcg = to_rcg_clk(c);
struct clk_freq_tbl *f;
- for (f = clk->freq_tbl; f->freq_hz != FREQ_END; f++)
+ for (f = rcg->freq_tbl; f->freq_hz != FREQ_END; f++)
if (f->freq_hz >= rate)
return f->freq_hz;
@@ -545,26 +545,26 @@
/* Return the nth supported frequency for a given clock. */
static int rcg_clk_list_rate(struct clk *c, unsigned n)
{
- struct rcg_clk *clk = to_rcg_clk(c);
+ struct rcg_clk *rcg = to_rcg_clk(c);
- if (!clk->freq_tbl || clk->freq_tbl->freq_hz == FREQ_END)
+ if (!rcg->freq_tbl || rcg->freq_tbl->freq_hz == FREQ_END)
return -ENXIO;
- return (clk->freq_tbl + n)->freq_hz;
+ return (rcg->freq_tbl + n)->freq_hz;
}
-static struct clk *rcg_clk_get_parent(struct clk *clk)
+static struct clk *rcg_clk_get_parent(struct clk *c)
{
- return to_rcg_clk(clk)->current_freq->src_clk;
+ return to_rcg_clk(c)->current_freq->src_clk;
}
/* Disable hw clock gating if not set at boot */
-enum handoff branch_handoff(struct branch *clk, struct clk *c)
+enum handoff branch_handoff(struct branch *b, struct clk *c)
{
- if (!branch_in_hwcg_mode(clk)) {
- clk->hwcg_mask = 0;
+ if (!branch_in_hwcg_mode(b)) {
+ b->hwcg_mask = 0;
c->flags &= ~CLKFLAG_HWCG;
- if (readl_relaxed(clk->ctl_reg) & clk->en_mask)
+ if (readl_relaxed(b->ctl_reg) & b->en_mask)
return HANDOFF_ENABLED_CLK;
} else {
c->flags |= CLKFLAG_HWCG;
@@ -574,24 +574,24 @@
static enum handoff branch_clk_handoff(struct clk *c)
{
- struct branch_clk *clk = to_branch_clk(c);
- return branch_handoff(&clk->b, &clk->c);
+ struct branch_clk *br = to_branch_clk(c);
+ return branch_handoff(&br->b, &br->c);
}
static enum handoff rcg_clk_handoff(struct clk *c)
{
- struct rcg_clk *clk = to_rcg_clk(c);
+ struct rcg_clk *rcg = to_rcg_clk(c);
uint32_t ctl_val, ns_val, md_val, ns_mask;
struct clk_freq_tbl *freq;
enum handoff ret;
- ctl_val = readl_relaxed(clk->b.ctl_reg);
- ret = branch_handoff(&clk->b, &clk->c);
+ ctl_val = readl_relaxed(rcg->b.ctl_reg);
+ ret = branch_handoff(&rcg->b, &rcg->c);
if (ret == HANDOFF_DISABLED_CLK)
return HANDOFF_DISABLED_CLK;
- if (clk->bank_info) {
- const struct bank_masks *bank_masks = clk->bank_info;
+ if (rcg->bank_info) {
+ const struct bank_masks *bank_masks = rcg->bank_info;
const struct bank_mask_info *bank_info;
if (!(ctl_val & bank_masks->bank_sel_mask))
bank_info = &bank_masks->bank0_mask;
@@ -602,13 +602,13 @@
md_val = bank_info->md_reg ?
readl_relaxed(bank_info->md_reg) : 0;
} else {
- ns_mask = clk->ns_mask;
- md_val = clk->md_reg ? readl_relaxed(clk->md_reg) : 0;
+ ns_mask = rcg->ns_mask;
+ md_val = rcg->md_reg ? readl_relaxed(rcg->md_reg) : 0;
}
if (!ns_mask)
return HANDOFF_UNKNOWN_RATE;
- ns_val = readl_relaxed(clk->ns_reg) & ns_mask;
- for (freq = clk->freq_tbl; freq->freq_hz != FREQ_END; freq++) {
+ ns_val = readl_relaxed(rcg->ns_reg) & ns_mask;
+ for (freq = rcg->freq_tbl; freq->freq_hz != FREQ_END; freq++) {
if ((freq->ns_val & ns_mask) == ns_val &&
(!freq->md_val || freq->md_val == md_val))
break;
@@ -616,7 +616,7 @@
if (freq->freq_hz == FREQ_END)
return HANDOFF_UNKNOWN_RATE;
- clk->current_freq = freq;
+ rcg->current_freq = freq;
c->rate = freq->freq_hz;
return HANDOFF_ENABLED_CLK;
@@ -632,40 +632,38 @@
},
};
-static int branch_clk_enable(struct clk *clk)
+static int branch_clk_enable(struct clk *c)
{
unsigned long flags;
- struct branch_clk *branch = to_branch_clk(clk);
+ struct branch_clk *br = to_branch_clk(c);
spin_lock_irqsave(&local_clock_reg_lock, flags);
- __branch_clk_enable_reg(&branch->b, branch->c.dbg_name);
- branch->enabled = true;
+ __branch_enable_reg(&br->b, br->c.dbg_name);
+ br->enabled = true;
spin_unlock_irqrestore(&local_clock_reg_lock, flags);
return 0;
}
-static void branch_clk_disable(struct clk *clk)
+static void branch_clk_disable(struct clk *c)
{
unsigned long flags;
- struct branch_clk *branch = to_branch_clk(clk);
+ struct branch_clk *br = to_branch_clk(c);
spin_lock_irqsave(&local_clock_reg_lock, flags);
- __branch_clk_disable_reg(&branch->b, branch->c.dbg_name);
- branch->enabled = false;
+ __branch_disable_reg(&br->b, br->c.dbg_name);
+ br->enabled = false;
spin_unlock_irqrestore(&local_clock_reg_lock, flags);
}
-static struct clk *branch_clk_get_parent(struct clk *clk)
+static struct clk *branch_clk_get_parent(struct clk *c)
{
- struct branch_clk *branch = to_branch_clk(clk);
- return branch->parent;
+ return to_branch_clk(c)->parent;
}
-static int branch_clk_is_enabled(struct clk *clk)
+static int branch_clk_is_enabled(struct clk *c)
{
- struct branch_clk *branch = to_branch_clk(clk);
- return branch->enabled;
+ return to_branch_clk(c)->enabled;
}
static void branch_enable_hwcg(struct branch *b)
@@ -692,16 +690,14 @@
spin_unlock_irqrestore(&local_clock_reg_lock, flags);
}
-static void branch_clk_enable_hwcg(struct clk *clk)
+static void branch_clk_enable_hwcg(struct clk *c)
{
- struct branch_clk *branch = to_branch_clk(clk);
- branch_enable_hwcg(&branch->b);
+ branch_enable_hwcg(&to_branch_clk(c)->b);
}
-static void branch_clk_disable_hwcg(struct clk *clk)
+static void branch_clk_disable_hwcg(struct clk *c)
{
- struct branch_clk *branch = to_branch_clk(clk);
- branch_disable_hwcg(&branch->b);
+ branch_disable_hwcg(&to_branch_clk(c)->b);
}
static int branch_set_flags(struct branch *b, unsigned flags)
@@ -738,26 +734,22 @@
static int branch_clk_in_hwcg_mode(struct clk *c)
{
- struct branch_clk *clk = to_branch_clk(c);
- return branch_in_hwcg_mode(&clk->b);
+ return branch_in_hwcg_mode(&to_branch_clk(c)->b);
}
-static void rcg_clk_enable_hwcg(struct clk *clk)
+static void rcg_clk_enable_hwcg(struct clk *c)
{
- struct rcg_clk *rcg = to_rcg_clk(clk);
- branch_enable_hwcg(&rcg->b);
+ branch_enable_hwcg(&to_rcg_clk(c)->b);
}
-static void rcg_clk_disable_hwcg(struct clk *clk)
+static void rcg_clk_disable_hwcg(struct clk *c)
{
- struct rcg_clk *rcg = to_rcg_clk(clk);
- branch_disable_hwcg(&rcg->b);
+ branch_disable_hwcg(&to_rcg_clk(c)->b);
}
static int rcg_clk_in_hwcg_mode(struct clk *c)
{
- struct rcg_clk *clk = to_rcg_clk(c);
- return branch_in_hwcg_mode(&clk->b);
+ return branch_in_hwcg_mode(&to_rcg_clk(c)->b);
}
static int rcg_clk_set_flags(struct clk *clk, unsigned flags)
@@ -802,9 +794,9 @@
return ret;
}
-static int branch_clk_reset(struct clk *clk, enum clk_reset_action action)
+static int branch_clk_reset(struct clk *c, enum clk_reset_action action)
{
- return branch_reset(&to_branch_clk(clk)->b, action);
+ return branch_reset(&to_branch_clk(c)->b, action);
}
struct clk_ops clk_ops_branch = {
@@ -825,9 +817,9 @@
.reset = branch_clk_reset,
};
-static int rcg_clk_reset(struct clk *clk, enum clk_reset_action action)
+static int rcg_clk_reset(struct clk *c, enum clk_reset_action action)
{
- return branch_reset(&to_rcg_clk(clk)->b, action);
+ return branch_reset(&to_rcg_clk(c)->b, action);
}
struct clk_ops clk_ops_rcg = {
@@ -850,10 +842,10 @@
static int cdiv_clk_enable(struct clk *c)
{
unsigned long flags;
- struct cdiv_clk *clk = to_cdiv_clk(c);
+ struct cdiv_clk *cdiv = to_cdiv_clk(c);
spin_lock_irqsave(&local_clock_reg_lock, flags);
- __branch_clk_enable_reg(&clk->b, clk->c.dbg_name);
+ __branch_enable_reg(&cdiv->b, cdiv->c.dbg_name);
spin_unlock_irqrestore(&local_clock_reg_lock, flags);
return 0;
@@ -862,70 +854,67 @@
static void cdiv_clk_disable(struct clk *c)
{
unsigned long flags;
- struct cdiv_clk *clk = to_cdiv_clk(c);
+ struct cdiv_clk *cdiv = to_cdiv_clk(c);
spin_lock_irqsave(&local_clock_reg_lock, flags);
- __branch_clk_disable_reg(&clk->b, clk->c.dbg_name);
+ __branch_disable_reg(&cdiv->b, cdiv->c.dbg_name);
spin_unlock_irqrestore(&local_clock_reg_lock, flags);
}
static int cdiv_clk_set_rate(struct clk *c, unsigned long rate)
{
- struct cdiv_clk *clk = to_cdiv_clk(c);
+ struct cdiv_clk *cdiv = to_cdiv_clk(c);
u32 reg_val;
- if (rate > clk->max_div)
+ if (rate > cdiv->max_div)
return -EINVAL;
spin_lock(&local_clock_reg_lock);
- reg_val = readl_relaxed(clk->ns_reg);
- reg_val &= ~(clk->ext_mask | (clk->max_div - 1) << clk->div_offset);
+ reg_val = readl_relaxed(cdiv->ns_reg);
+ reg_val &= ~(cdiv->ext_mask | (cdiv->max_div - 1) << cdiv->div_offset);
/* Non-zero rates mean set a divider, zero means use external input */
if (rate)
- reg_val |= (rate - 1) << clk->div_offset;
+ reg_val |= (rate - 1) << cdiv->div_offset;
else
- reg_val |= clk->ext_mask;
- writel_relaxed(reg_val, clk->ns_reg);
+ reg_val |= cdiv->ext_mask;
+ writel_relaxed(reg_val, cdiv->ns_reg);
spin_unlock(&local_clock_reg_lock);
- clk->cur_div = rate;
+ cdiv->cur_div = rate;
return 0;
}
static unsigned long cdiv_clk_get_rate(struct clk *c)
{
- struct cdiv_clk *clk = to_cdiv_clk(c);
- return clk->cur_div;
+ return to_cdiv_clk(c)->cur_div;
}
static long cdiv_clk_round_rate(struct clk *c, unsigned long rate)
{
- struct cdiv_clk *clk = to_cdiv_clk(c);
- return rate > clk->max_div ? -EPERM : rate;
+ return rate > to_cdiv_clk(c)->max_div ? -EPERM : rate;
}
static int cdiv_clk_list_rate(struct clk *c, unsigned n)
{
- struct cdiv_clk *clk = to_cdiv_clk(c);
- return n > clk->max_div ? -ENXIO : n;
+ return n > to_cdiv_clk(c)->max_div ? -ENXIO : n;
}
static enum handoff cdiv_clk_handoff(struct clk *c)
{
- struct cdiv_clk *clk = to_cdiv_clk(c);
+ struct cdiv_clk *cdiv = to_cdiv_clk(c);
enum handoff ret;
u32 reg_val;
- ret = branch_handoff(&clk->b, &clk->c);
+ ret = branch_handoff(&cdiv->b, &cdiv->c);
if (ret == HANDOFF_DISABLED_CLK)
return ret;
- reg_val = readl_relaxed(clk->ns_reg);
- if (reg_val & clk->ext_mask) {
- clk->cur_div = 0;
+ reg_val = readl_relaxed(cdiv->ns_reg);
+ if (reg_val & cdiv->ext_mask) {
+ cdiv->cur_div = 0;
} else {
- reg_val >>= clk->div_offset;
- clk->cur_div = (reg_val & (clk->max_div - 1)) + 1;
+ reg_val >>= cdiv->div_offset;
+ cdiv->cur_div = (reg_val & (cdiv->max_div - 1)) + 1;
}
return HANDOFF_ENABLED_CLK;
@@ -933,20 +922,17 @@
static void cdiv_clk_enable_hwcg(struct clk *c)
{
- struct cdiv_clk *clk = to_cdiv_clk(c);
- branch_enable_hwcg(&clk->b);
+ branch_enable_hwcg(&to_cdiv_clk(c)->b);
}
static void cdiv_clk_disable_hwcg(struct clk *c)
{
- struct cdiv_clk *clk = to_cdiv_clk(c);
- branch_disable_hwcg(&clk->b);
+ branch_disable_hwcg(&to_cdiv_clk(c)->b);
}
static int cdiv_clk_in_hwcg_mode(struct clk *c)
{
- struct cdiv_clk *clk = to_cdiv_clk(c);
- return branch_in_hwcg_mode(&clk->b);
+ return branch_in_hwcg_mode(&to_cdiv_clk(c)->b);
}
struct clk_ops clk_ops_cdiv = {
diff --git a/arch/arm/mach-msm/clock-local.h b/arch/arm/mach-msm/clock-local.h
index ffc7057..034e09c 100644
--- a/arch/arm/mach-msm/clock-local.h
+++ b/arch/arm/mach-msm/clock-local.h
@@ -156,9 +156,9 @@
extern struct clk_ops clk_ops_reset;
int branch_reset(struct branch *b, enum clk_reset_action action);
-void __branch_clk_enable_reg(const struct branch *clk, const char *name);
-u32 __branch_clk_disable_reg(const struct branch *clk, const char *name);
-enum handoff branch_handoff(struct branch *clk, struct clk *c);
+void __branch_enable_reg(const struct branch *b, const char *name);
+u32 __branch_disable_reg(const struct branch *b, const char *name);
+enum handoff branch_handoff(struct branch *b, struct clk *c);
/*
* Generic clock-definition struct and macros
@@ -183,9 +183,9 @@
struct clk c;
};
-static inline struct rcg_clk *to_rcg_clk(struct clk *clk)
+static inline struct rcg_clk *to_rcg_clk(struct clk *c)
{
- return container_of(clk, struct rcg_clk, c);
+ return container_of(c, struct rcg_clk, c);
}
extern struct clk_ops clk_ops_rcg;
@@ -214,9 +214,9 @@
struct clk c;
};
-static inline struct cdiv_clk *to_cdiv_clk(struct clk *clk)
+static inline struct cdiv_clk *to_cdiv_clk(struct clk *c)
{
- return container_of(clk, struct cdiv_clk, c);
+ return container_of(c, struct cdiv_clk, c);
}
extern struct clk_ops clk_ops_cdiv;
@@ -234,7 +234,7 @@
* @enabled: true if clock is on, false otherwise
* @b: branch
* @parent: clock source
- * @c: clk
+ * @c: clock
*
* An on/off switch with a rate derived from the parent.
*/
@@ -245,9 +245,9 @@
struct clk c;
};
-static inline struct branch_clk *to_branch_clk(struct clk *clk)
+static inline struct branch_clk *to_branch_clk(struct clk *c)
{
- return container_of(clk, struct branch_clk, c);
+ return container_of(c, struct branch_clk, c);
}
/**
@@ -255,7 +255,7 @@
* @sample_ticks: sample period in reference clock ticks
* @multiplier: measurement scale-up factor
* @divider: measurement scale-down factor
- * @c: clk
+ * @c: clock
*/
struct measure_clk {
u64 sample_ticks;
@@ -266,9 +266,9 @@
extern struct clk_ops clk_ops_empty;
-static inline struct measure_clk *to_measure_clk(struct clk *clk)
+static inline struct measure_clk *to_measure_clk(struct clk *c)
{
- return container_of(clk, struct measure_clk, c);
+ return container_of(c, struct measure_clk, c);
}
/*
@@ -280,11 +280,11 @@
/*
* Generic set-rate implementations
*/
-void set_rate_mnd(struct rcg_clk *clk, struct clk_freq_tbl *nf);
-void set_rate_nop(struct rcg_clk *clk, struct clk_freq_tbl *nf);
-void set_rate_mnd_8(struct rcg_clk *clk, struct clk_freq_tbl *nf);
-void set_rate_mnd_banked(struct rcg_clk *clk, struct clk_freq_tbl *nf);
-void set_rate_div_banked(struct rcg_clk *clk, struct clk_freq_tbl *nf);
+void set_rate_mnd(struct rcg_clk *rcg, struct clk_freq_tbl *nf);
+void set_rate_nop(struct rcg_clk *rcg, struct clk_freq_tbl *nf);
+void set_rate_mnd_8(struct rcg_clk *rcg, struct clk_freq_tbl *nf);
+void set_rate_mnd_banked(struct rcg_clk *rcg, struct clk_freq_tbl *nf);
+void set_rate_div_banked(struct rcg_clk *rcg, struct clk_freq_tbl *nf);
#endif /* __ARCH_ARM_MACH_MSM_CLOCK_LOCAL_H */
diff --git a/arch/arm/mach-msm/clock-pll.c b/arch/arm/mach-msm/clock-pll.c
index d839911..49bb063 100644
--- a/arch/arm/mach-msm/clock-pll.c
+++ b/arch/arm/mach-msm/clock-pll.c
@@ -55,16 +55,16 @@
#define ENABLE_WAIT_MAX_LOOPS 200
-int pll_vote_clk_enable(struct clk *clk)
+int pll_vote_clk_enable(struct clk *c)
{
u32 ena, count;
unsigned long flags;
- struct pll_vote_clk *pll = to_pll_vote_clk(clk);
+ struct pll_vote_clk *pllv = to_pll_vote_clk(c);
spin_lock_irqsave(&pll_reg_lock, flags);
- ena = readl_relaxed(PLL_EN_REG(pll));
- ena |= pll->en_mask;
- writel_relaxed(ena, PLL_EN_REG(pll));
+ ena = readl_relaxed(PLL_EN_REG(pllv));
+ ena |= pllv->en_mask;
+ writel_relaxed(ena, PLL_EN_REG(pllv));
spin_unlock_irqrestore(&pll_reg_lock, flags);
/*
@@ -75,45 +75,44 @@
/* Wait for pll to enable. */
for (count = ENABLE_WAIT_MAX_LOOPS; count > 0; count--) {
- if (readl_relaxed(PLL_STATUS_REG(pll)) & pll->status_mask)
+ if (readl_relaxed(PLL_STATUS_REG(pllv)) & pllv->status_mask)
return 0;
udelay(1);
}
- WARN("PLL %s didn't enable after voting for it!\n", clk->dbg_name);
+ WARN("PLL %s didn't enable after voting for it!\n", c->dbg_name);
return -ETIMEDOUT;
}
-void pll_vote_clk_disable(struct clk *clk)
+void pll_vote_clk_disable(struct clk *c)
{
u32 ena;
unsigned long flags;
- struct pll_vote_clk *pll = to_pll_vote_clk(clk);
+ struct pll_vote_clk *pllv = to_pll_vote_clk(c);
spin_lock_irqsave(&pll_reg_lock, flags);
- ena = readl_relaxed(PLL_EN_REG(pll));
- ena &= ~(pll->en_mask);
- writel_relaxed(ena, PLL_EN_REG(pll));
+ ena = readl_relaxed(PLL_EN_REG(pllv));
+ ena &= ~(pllv->en_mask);
+ writel_relaxed(ena, PLL_EN_REG(pllv));
spin_unlock_irqrestore(&pll_reg_lock, flags);
}
-struct clk *pll_vote_clk_get_parent(struct clk *clk)
+struct clk *pll_vote_clk_get_parent(struct clk *c)
{
- struct pll_vote_clk *pll = to_pll_vote_clk(clk);
- return pll->parent;
+ return to_pll_vote_clk(c)->parent;
}
-int pll_vote_clk_is_enabled(struct clk *clk)
+int pll_vote_clk_is_enabled(struct clk *c)
{
- struct pll_vote_clk *pll = to_pll_vote_clk(clk);
- return !!(readl_relaxed(PLL_STATUS_REG(pll)) & pll->status_mask);
+ struct pll_vote_clk *pllv = to_pll_vote_clk(c);
+ return !!(readl_relaxed(PLL_STATUS_REG(pllv)) & pllv->status_mask);
}
-static enum handoff pll_vote_clk_handoff(struct clk *clk)
+static enum handoff pll_vote_clk_handoff(struct clk *c)
{
- struct pll_vote_clk *pll = to_pll_vote_clk(clk);
- if (readl_relaxed(PLL_EN_REG(pll)) & pll->en_mask)
+ struct pll_vote_clk *pllv = to_pll_vote_clk(c);
+ if (readl_relaxed(PLL_EN_REG(pllv)) & pllv->en_mask)
return HANDOFF_ENABLED_CLK;
return HANDOFF_DISABLED_CLK;
@@ -158,10 +157,10 @@
mb();
}
-static int local_pll_clk_enable(struct clk *clk)
+static int local_pll_clk_enable(struct clk *c)
{
unsigned long flags;
- struct pll_clk *pll = to_pll_clk(clk);
+ struct pll_clk *pll = to_pll_clk(c);
spin_lock_irqsave(&pll_reg_lock, flags);
__pll_clk_enable_reg(PLL_MODE_REG(pll));
@@ -177,10 +176,10 @@
writel_relaxed(mode, mode_reg);
}
-static void local_pll_clk_disable(struct clk *clk)
+static void local_pll_clk_disable(struct clk *c)
{
unsigned long flags;
- struct pll_clk *pll = to_pll_clk(clk);
+ struct pll_clk *pll = to_pll_clk(c);
/*
* Disable the PLL output, disable test mode, enable
@@ -191,9 +190,9 @@
spin_unlock_irqrestore(&pll_reg_lock, flags);
}
-static enum handoff local_pll_clk_handoff(struct clk *clk)
+static enum handoff local_pll_clk_handoff(struct clk *c)
{
- struct pll_clk *pll = to_pll_clk(clk);
+ struct pll_clk *pll = to_pll_clk(c);
u32 mode = readl_relaxed(PLL_MODE_REG(pll));
u32 mask = PLL_BYPASSNL | PLL_RESET_N | PLL_OUTCTRL;
@@ -203,17 +202,16 @@
return HANDOFF_DISABLED_CLK;
}
-static struct clk *local_pll_clk_get_parent(struct clk *clk)
+static struct clk *local_pll_clk_get_parent(struct clk *c)
{
- struct pll_clk *pll = to_pll_clk(clk);
- return pll->parent;
+ return to_pll_clk(c)->parent;
}
-int sr_pll_clk_enable(struct clk *clk)
+int sr_pll_clk_enable(struct clk *c)
{
u32 mode;
unsigned long flags;
- struct pll_clk *pll = to_pll_clk(clk);
+ struct pll_clk *pll = to_pll_clk(c);
spin_lock_irqsave(&pll_reg_lock, flags);
mode = readl_relaxed(PLL_MODE_REG(pll));
@@ -250,10 +248,10 @@
#define PLL_LOCKED_BIT BIT(16)
-int copper_pll_clk_enable(struct clk *clk)
+int copper_pll_clk_enable(struct clk *c)
{
unsigned long flags;
- struct pll_clk *pll = to_pll_clk(clk);
+ struct pll_clk *pll = to_pll_clk(c);
u32 count, mode;
int ret = 0;
@@ -282,7 +280,7 @@
}
if (!(readl_relaxed(PLL_STATUS_REG(pll)) & PLL_LOCKED_BIT)) {
- WARN("PLL %s didn't lock after enabling it!\n", clk->dbg_name);
+ WARN("PLL %s didn't lock after enabling it!\n", c->dbg_name);
ret = -ETIMEDOUT;
goto out;
}
@@ -379,9 +377,9 @@
}
-static int pll_clk_enable(struct clk *clk)
+static int pll_clk_enable(struct clk *c)
{
- struct pll_shared_clk *pll = to_pll_shared_clk(clk);
+ struct pll_shared_clk *pll = to_pll_shared_clk(c);
unsigned int pll_id = pll->id;
remote_spin_lock(&pll_lock);
@@ -396,9 +394,9 @@
return 0;
}
-static void pll_clk_disable(struct clk *clk)
+static void pll_clk_disable(struct clk *c)
{
- struct pll_shared_clk *pll = to_pll_shared_clk(clk);
+ struct pll_shared_clk *pll = to_pll_shared_clk(c);
unsigned int pll_id = pll->id;
remote_spin_lock(&pll_lock);
@@ -413,16 +411,14 @@
remote_spin_unlock(&pll_lock);
}
-static int pll_clk_is_enabled(struct clk *clk)
+static int pll_clk_is_enabled(struct clk *c)
{
- struct pll_shared_clk *pll = to_pll_shared_clk(clk);
-
- return readl_relaxed(PLL_MODE_REG(pll)) & BIT(0);
+ return readl_relaxed(PLL_MODE_REG(to_pll_shared_clk(c))) & BIT(0);
}
-static enum handoff pll_clk_handoff(struct clk *clk)
+static enum handoff pll_clk_handoff(struct clk *c)
{
- struct pll_shared_clk *pll = to_pll_shared_clk(clk);
+ struct pll_shared_clk *pll = to_pll_shared_clk(c);
unsigned int pll_lval;
struct pll_rate *l;
@@ -438,12 +434,12 @@
/* Convert PLL L values to PLL Output rate */
for (l = pll_l_rate; l->rate != 0; l++) {
if (l->lvalue == pll_lval) {
- clk->rate = l->rate;
+ c->rate = l->rate;
break;
}
}
- if (!clk->rate) {
+ if (!c->rate) {
pr_crit("Unknown PLL's L value!\n");
BUG();
}
diff --git a/arch/arm/mach-msm/clock-pll.h b/arch/arm/mach-msm/clock-pll.h
index a8c642f..f24b066 100644
--- a/arch/arm/mach-msm/clock-pll.h
+++ b/arch/arm/mach-msm/clock-pll.h
@@ -34,7 +34,7 @@
* @id: PLL ID
* @mode_reg: enable register
* @parent: clock source
- * @c: clk
+ * @c: clock
*/
struct pll_shared_clk {
unsigned int id;
@@ -45,9 +45,9 @@
extern struct clk_ops clk_ops_pll;
-static inline struct pll_shared_clk *to_pll_shared_clk(struct clk *clk)
+static inline struct pll_shared_clk *to_pll_shared_clk(struct clk *c)
{
- return container_of(clk, struct pll_shared_clk, c);
+ return container_of(c, struct pll_shared_clk, c);
}
/**
@@ -64,7 +64,7 @@
* @status_mask: ANDed with @status_reg to determine if PLL is active.
* @status_reg: status register
* @parent: clock source
- * @c: clk
+ * @c: clock
*/
struct pll_vote_clk {
u32 *soft_vote;
@@ -81,9 +81,9 @@
extern struct clk_ops clk_ops_pll_vote;
-static inline struct pll_vote_clk *to_pll_vote_clk(struct clk *clk)
+static inline struct pll_vote_clk *to_pll_vote_clk(struct clk *c)
{
- return container_of(clk, struct pll_vote_clk, c);
+ return container_of(c, struct pll_vote_clk, c);
}
/**
@@ -105,21 +105,21 @@
extern struct clk_ops clk_ops_local_pll;
-static inline struct pll_clk *to_pll_clk(struct clk *clk)
+static inline struct pll_clk *to_pll_clk(struct clk *c)
{
- return container_of(clk, struct pll_clk, c);
+ return container_of(c, struct pll_clk, c);
}
-int sr_pll_clk_enable(struct clk *clk);
-int copper_pll_clk_enable(struct clk *clk);
+int sr_pll_clk_enable(struct clk *c);
+int copper_pll_clk_enable(struct clk *c);
/*
* PLL vote clock APIs
*/
-int pll_vote_clk_enable(struct clk *clk);
-void pll_vote_clk_disable(struct clk *clk);
-struct clk *pll_vote_clk_get_parent(struct clk *clk);
-int pll_vote_clk_is_enabled(struct clk *clk);
+int pll_vote_clk_enable(struct clk *c);
+void pll_vote_clk_disable(struct clk *c);
+struct clk *pll_vote_clk_get_parent(struct clk *c);
+int pll_vote_clk_is_enabled(struct clk *c);
struct pll_config {
u32 l;
diff --git a/arch/arm/mach-msm/clock-rpm.c b/arch/arm/mach-msm/clock-rpm.c
index 2ec40ce..8096c10 100644
--- a/arch/arm/mach-msm/clock-rpm.c
+++ b/arch/arm/mach-msm/clock-rpm.c
@@ -51,16 +51,18 @@
int rc;
struct msm_rpm_iv_pair iv = { .id = r->rpm_status_id, };
rc = msm_rpm_get_status(&iv, 1);
- return (rc < 0) ? rc : iv.value * 1000;
+ return (rc < 0) ? rc : iv.value * r->factor;
}
-#define RPM_SMD_KEY_CLOCK_SET_RATE 0x007A484B
+#define RPM_SMD_KEY_RATE 0x007A484B
+#define RPM_SMD_KEY_ENABLE 0x62616E45
static int clk_rpmrs_set_rate_smd(struct rpm_clk *r, uint32_t value,
uint32_t context, int noirq)
{
+ u32 rpm_key = r->branch ? RPM_SMD_KEY_ENABLE : RPM_SMD_KEY_RATE;
struct msm_rpm_kvp kvp = {
- .key = RPM_SMD_KEY_CLOCK_SET_RATE,
+ .key = rpm_key,
.data = (void *)&value,
.length = sizeof(value),
};
@@ -190,7 +192,7 @@
unsigned long this_khz, this_sleep_khz;
int rc = 0;
- this_khz = DIV_ROUND_UP(rate, 1000);
+ this_khz = DIV_ROUND_UP(rate, r->factor);
spin_lock_irqsave(&rpm_clock_lock, flags);
@@ -275,7 +277,7 @@
if (!r->branch) {
r->last_set_khz = iv.value;
r->last_set_sleep_khz = iv.value;
- clk->rate = iv.value * 1000;
+ clk->rate = iv.value * r->factor;
}
return HANDOFF_ENABLED_CLK;
diff --git a/arch/arm/mach-msm/clock-rpm.h b/arch/arm/mach-msm/clock-rpm.h
index b2358bc..107fb02 100644
--- a/arch/arm/mach-msm/clock-rpm.h
+++ b/arch/arm/mach-msm/clock-rpm.h
@@ -32,6 +32,7 @@
unsigned last_set_sleep_khz;
bool enabled;
bool branch; /* true: RPM only accepts 1 for ON and 0 for OFF */
+ unsigned factor;
struct clk_rpmrs_data *rpmrs_data;
struct rpm_clk *peer;
@@ -53,6 +54,7 @@
.rpm_clk_id = (r_id), \
.rpm_status_id = (stat_id), \
.peer = &active, \
+ .factor = 1000, \
.rpmrs_data = (rpmrsdata),\
.c = { \
.ops = &clk_ops_rpm, \
@@ -68,6 +70,7 @@
.rpm_status_id = (stat_id), \
.peer = &name, \
.active_only = true, \
+ .factor = 1000, \
.rpmrs_data = (rpmrsdata),\
.c = { \
.ops = &clk_ops_rpm, \
@@ -88,6 +91,7 @@
.peer = &active, \
.last_set_khz = ((r) / 1000), \
.last_set_sleep_khz = ((r) / 1000), \
+ .factor = 1000, \
.branch = true, \
.rpmrs_data = (rpmrsdata),\
.c = { \
@@ -106,6 +110,7 @@
.peer = &name, \
.last_set_khz = ((r) / 1000), \
.active_only = true, \
+ .factor = 1000, \
.branch = true, \
.rpmrs_data = (rpmrsdata),\
.c = { \
@@ -118,10 +123,48 @@
}, \
};
+#define __DEFINE_CLK_RPM_QDSS(name, active, type, r_id, stat_id, rpmrsdata) \
+ static struct rpm_clk active; \
+ static struct rpm_clk name = { \
+ .rpm_res_type = (type), \
+ .rpm_clk_id = (r_id), \
+ .rpm_status_id = (stat_id), \
+ .peer = &active, \
+ .factor = 1, \
+ .rpmrs_data = (rpmrsdata),\
+ .c = { \
+ .ops = &clk_ops_rpm, \
+ .flags = CLKFLAG_SKIP_AUTO_OFF, \
+ .dbg_name = #name, \
+ CLK_INIT(name.c), \
+ .warned = true, \
+ }, \
+ }; \
+ static struct rpm_clk active = { \
+ .rpm_res_type = (type), \
+ .rpm_clk_id = (r_id), \
+ .rpm_status_id = (stat_id), \
+ .peer = &name, \
+ .active_only = true, \
+ .factor = 1, \
+ .rpmrs_data = (rpmrsdata),\
+ .c = { \
+ .ops = &clk_ops_rpm, \
+ .flags = CLKFLAG_SKIP_AUTO_OFF, \
+ .dbg_name = #active, \
+ CLK_INIT(active.c), \
+ .warned = true, \
+ }, \
+ };
+
#define DEFINE_CLK_RPM(name, active, r_id, dep) \
__DEFINE_CLK_RPM(name, active, 0, MSM_RPM_ID_##r_id##_CLK, \
MSM_RPM_STATUS_ID_##r_id##_CLK, dep, &clk_rpmrs_data)
+#define DEFINE_CLK_RPM_QDSS(name, active) \
+ __DEFINE_CLK_RPM_QDSS(name, active, 0, MSM_RPM_ID_QDSS_CLK, \
+ MSM_RPM_STATUS_ID_QDSS_CLK, &clk_rpmrs_data)
+
#define DEFINE_CLK_RPM_BRANCH(name, active, r_id, r) \
__DEFINE_CLK_RPM_BRANCH(name, active, 0, MSM_RPM_ID_##r_id##_CLK, \
MSM_RPM_STATUS_ID_##r_id##_CLK, r, &clk_rpmrs_data)
diff --git a/arch/arm/mach-msm/devices-8064.c b/arch/arm/mach-msm/devices-8064.c
index f6ce848..472a87e 100644
--- a/arch/arm/mach-msm/devices-8064.c
+++ b/arch/arm/mach-msm/devices-8064.c
@@ -17,6 +17,7 @@
#include <linux/msm_rotator.h>
#include <linux/clkdev.h>
#include <linux/dma-mapping.h>
+#include <linux/coresight.h>
#include <mach/irqs-8064.h>
#include <mach/board.h>
#include <mach/msm_iomap.h>
@@ -32,7 +33,6 @@
#include <mach/msm_smd.h>
#include <mach/msm_dcvs.h>
#include <mach/msm_rtb.h>
-#include <mach/qdss.h>
#include <linux/ion.h>
#include "clock.h"
#include "devices.h"
@@ -1917,6 +1917,7 @@
MSM_RPM_MAP(8064, HDMI_SWITCH, HDMI_SWITCH, 1),
MSM_RPM_MAP(8064, DDR_DMM_0, DDR_DMM, 2),
MSM_RPM_MAP(8064, QDSS_CLK, QDSS_CLK, 1),
+ MSM_RPM_MAP(8064, VDDMIN_GPIO, VDDMIN_GPIO, 1),
},
.target_status = {
MSM_RPM_STATUS_ID_MAP(8064, VERSION_MAJOR),
@@ -2050,6 +2051,7 @@
MSM_RPM_STATUS_ID_MAP(8064, PM8821_S2_1),
MSM_RPM_STATUS_ID_MAP(8064, PM8821_L1_0),
MSM_RPM_STATUS_ID_MAP(8064, PM8821_L1_1),
+ MSM_RPM_STATUS_ID_MAP(8064, VDDMIN_GPIO),
},
.target_ctrl_id = {
MSM_RPM_CTRL_MAP(8064, VERSION_MAJOR),
@@ -2271,6 +2273,7 @@
#define AP2MDM_STATUS 48
#define AP2MDM_SOFT_RESET 27
#define AP2MDM_WAKEUP 35
+#define MDM2AP_PBLRDY 46
static struct resource mdm_resources[] = {
{
@@ -2309,6 +2312,12 @@
.name = "AP2MDM_WAKEUP",
.flags = IORESOURCE_IO,
},
+ {
+ .start = MDM2AP_PBLRDY,
+ .end = MDM2AP_PBLRDY,
+ .name = "MDM2AP_PBLRDY",
+ .flags = IORESOURCE_IO,
+ },
};
struct platform_device mdm_8064_device = {
@@ -2586,15 +2595,15 @@
.name = "jpegd_dst",
.domain = CAMERA_DOMAIN,
},
- /* Rotator src*/
+ /* Rotator */
{
.name = "rot_src",
- .domain = ROTATOR_SRC_DOMAIN,
+ .domain = ROTATOR_DOMAIN,
},
- /* Rotator dst */
+ /* Rotator */
{
.name = "rot_dst",
- .domain = ROTATOR_DST_DOMAIN,
+ .domain = ROTATOR_DOMAIN,
},
/* Video */
{
@@ -2650,36 +2659,18 @@
},
};
-static struct mem_pool apq8064_display_read_pools[] = {
+static struct mem_pool apq8064_display_pools[] = {
[GEN_POOL] =
- /* One address space for display reads */
+ /* One address space for display */
{
.paddr = SZ_128K,
.size = SZ_2G - SZ_128K,
},
};
-static struct mem_pool apq8064_display_write_pools[] = {
+static struct mem_pool apq8064_rotator_pools[] = {
[GEN_POOL] =
- /* One address space for display writes */
- {
- .paddr = SZ_128K,
- .size = SZ_2G - SZ_128K,
- },
-};
-
-static struct mem_pool apq8064_rotator_src_pools[] = {
- [GEN_POOL] =
- /* One address space for rotator src */
- {
- .paddr = SZ_128K,
- .size = SZ_2G - SZ_128K,
- },
-};
-
-static struct mem_pool apq8064_rotator_dst_pools[] = {
- [GEN_POOL] =
- /* One address space for rotator dst */
+ /* One address space for rotator */
{
.paddr = SZ_128K,
.size = SZ_2G - SZ_128K,
@@ -2695,21 +2686,13 @@
.iova_pools = apq8064_camera_pools,
.npools = ARRAY_SIZE(apq8064_camera_pools),
},
- [DISPLAY_READ_DOMAIN] = {
- .iova_pools = apq8064_display_read_pools,
- .npools = ARRAY_SIZE(apq8064_display_read_pools),
+ [DISPLAY_DOMAIN] = {
+ .iova_pools = apq8064_display_pools,
+ .npools = ARRAY_SIZE(apq8064_display_pools),
},
- [DISPLAY_WRITE_DOMAIN] = {
- .iova_pools = apq8064_display_write_pools,
- .npools = ARRAY_SIZE(apq8064_display_write_pools),
- },
- [ROTATOR_SRC_DOMAIN] = {
- .iova_pools = apq8064_rotator_src_pools,
- .npools = ARRAY_SIZE(apq8064_rotator_src_pools),
- },
- [ROTATOR_DST_DOMAIN] = {
- .iova_pools = apq8064_rotator_dst_pools,
- .npools = ARRAY_SIZE(apq8064_rotator_dst_pools),
+ [ROTATOR_DOMAIN] = {
+ .iova_pools = apq8064_rotator_pools,
+ .npools = ARRAY_SIZE(apq8064_rotator_pools),
},
};
diff --git a/arch/arm/mach-msm/devices-8930.c b/arch/arm/mach-msm/devices-8930.c
index 03685da..6ea8d7b 100644
--- a/arch/arm/mach-msm/devices-8930.c
+++ b/arch/arm/mach-msm/devices-8930.c
@@ -25,6 +25,7 @@
#include <mach/socinfo.h>
#include <mach/iommu_domains.h>
#include <mach/msm_rtb.h>
+#include <mach/msm_cache_dump.h>
#include "devices.h"
#include "rpm_log.h"
@@ -765,12 +766,12 @@
/* Rotator */
{
.name = "rot_src",
- .domain = ROTATOR_SRC_DOMAIN,
+ .domain = ROTATOR_DOMAIN,
},
/* Rotator */
{
.name = "rot_dst",
- .domain = ROTATOR_DST_DOMAIN,
+ .domain = ROTATOR_DOMAIN,
},
/* Video */
{
@@ -826,36 +827,18 @@
},
};
-static struct mem_pool msm8930_display_read_pools[] = {
+static struct mem_pool msm8930_display_pools[] = {
[GEN_POOL] =
- /* One address space for display reads */
+ /* One address space for display */
{
.paddr = SZ_128K,
.size = SZ_2G - SZ_128K,
},
};
-static struct mem_pool msm8930_display_write_pools[] = {
+static struct mem_pool msm8930_rotator_pools[] = {
[GEN_POOL] =
- /* One address space for display writes */
- {
- .paddr = SZ_128K,
- .size = SZ_2G - SZ_128K,
- },
-};
-
-static struct mem_pool msm8930_rotator_src_pools[] = {
- [GEN_POOL] =
- /* One address space for rotator src */
- {
- .paddr = SZ_128K,
- .size = SZ_2G - SZ_128K,
- },
-};
-
-static struct mem_pool msm8930_rotator_dst_pools[] = {
- [GEN_POOL] =
- /* One address space for rotator dst */
+ /* One address space for rotator */
{
.paddr = SZ_128K,
.size = SZ_2G - SZ_128K,
@@ -871,21 +854,13 @@
.iova_pools = msm8930_camera_pools,
.npools = ARRAY_SIZE(msm8930_camera_pools),
},
- [DISPLAY_READ_DOMAIN] = {
- .iova_pools = msm8930_display_read_pools,
- .npools = ARRAY_SIZE(msm8930_display_read_pools),
+ [DISPLAY_DOMAIN] = {
+ .iova_pools = msm8930_display_pools,
+ .npools = ARRAY_SIZE(msm8930_display_pools),
},
- [DISPLAY_WRITE_DOMAIN] = {
- .iova_pools = msm8930_display_write_pools,
- .npools = ARRAY_SIZE(msm8930_display_write_pools),
- },
- [ROTATOR_SRC_DOMAIN] = {
- .iova_pools = msm8930_rotator_src_pools,
- .npools = ARRAY_SIZE(msm8930_rotator_src_pools),
- },
- [ROTATOR_DST_DOMAIN] = {
- .iova_pools = msm8930_rotator_dst_pools,
- .npools = ARRAY_SIZE(msm8930_rotator_dst_pools),
+ [ROTATOR_DOMAIN] = {
+ .iova_pools = msm8930_rotator_pools,
+ .npools = ARRAY_SIZE(msm8930_rotator_pools),
},
};
@@ -927,3 +902,23 @@
.platform_data = &msm8930_rtb_pdata,
},
};
+
+#define MSM8930_L1_SIZE SZ_1M
+/*
+ * The actual L2 size is smaller but we need a larger buffer
+ * size to store other dump information
+ */
+#define MSM8930_L2_SIZE SZ_4M
+
+struct msm_cache_dump_platform_data msm8930_cache_dump_pdata = {
+ .l2_size = MSM8930_L2_SIZE,
+ .l1_size = MSM8930_L1_SIZE,
+};
+
+struct platform_device msm8930_cache_dump_device = {
+ .name = "msm_cache_dump",
+ .id = -1,
+ .dev = {
+ .platform_data = &msm8930_cache_dump_pdata,
+ },
+};
diff --git a/arch/arm/mach-msm/devices-8960.c b/arch/arm/mach-msm/devices-8960.c
index 0d417bd..3522e80 100644
--- a/arch/arm/mach-msm/devices-8960.c
+++ b/arch/arm/mach-msm/devices-8960.c
@@ -17,6 +17,7 @@
#include <linux/msm_rotator.h>
#include <linux/ion.h>
#include <linux/gpio.h>
+#include <linux/coresight.h>
#include <asm/clkdev.h>
#include <linux/msm_kgsl.h>
#include <linux/android_pmem.h>
@@ -37,7 +38,6 @@
#include <sound/msm-dai-q6.h>
#include <sound/apr_audio.h>
#include <mach/msm_tsif.h>
-#include <mach/qdss.h>
#include <mach/msm_serial_hs_lite.h>
#include "clock.h"
#include "devices.h"
@@ -3639,15 +3639,15 @@
.name = "jpegd_dst",
.domain = CAMERA_DOMAIN,
},
- /* Rotator src*/
+ /* Rotator */
{
.name = "rot_src",
- .domain = ROTATOR_SRC_DOMAIN,
+ .domain = ROTATOR_DOMAIN,
},
- /* Rotator dst */
+ /* Rotator */
{
.name = "rot_dst",
- .domain = ROTATOR_DST_DOMAIN,
+ .domain = ROTATOR_DOMAIN,
},
/* Video */
{
@@ -3703,36 +3703,18 @@
},
};
-static struct mem_pool msm8960_display_read_pools[] = {
+static struct mem_pool msm8960_display_pools[] = {
[GEN_POOL] =
- /* One address space for display reads */
+ /* One address space for display */
{
.paddr = SZ_128K,
.size = SZ_2G - SZ_128K,
},
};
-static struct mem_pool msm8960_display_write_pools[] = {
+static struct mem_pool msm8960_rotator_pools[] = {
[GEN_POOL] =
- /* One address space for display writes */
- {
- .paddr = SZ_128K,
- .size = SZ_2G - SZ_128K,
- },
-};
-
-static struct mem_pool msm8960_rotator_src_pools[] = {
- [GEN_POOL] =
- /* One address space for rotator src */
- {
- .paddr = SZ_128K,
- .size = SZ_2G - SZ_128K,
- },
-};
-
-static struct mem_pool msm8960_rotator_dst_pools[] = {
- [GEN_POOL] =
- /* One address space for rotator dst */
+ /* One address space for rotator */
{
.paddr = SZ_128K,
.size = SZ_2G - SZ_128K,
@@ -3748,21 +3730,13 @@
.iova_pools = msm8960_camera_pools,
.npools = ARRAY_SIZE(msm8960_camera_pools),
},
- [DISPLAY_READ_DOMAIN] = {
- .iova_pools = msm8960_display_read_pools,
- .npools = ARRAY_SIZE(msm8960_display_read_pools),
+ [DISPLAY_DOMAIN] = {
+ .iova_pools = msm8960_display_pools,
+ .npools = ARRAY_SIZE(msm8960_display_pools),
},
- [DISPLAY_WRITE_DOMAIN] = {
- .iova_pools = msm8960_display_write_pools,
- .npools = ARRAY_SIZE(msm8960_display_write_pools),
- },
- [ROTATOR_SRC_DOMAIN] = {
- .iova_pools = msm8960_rotator_src_pools,
- .npools = ARRAY_SIZE(msm8960_rotator_src_pools),
- },
- [ROTATOR_DST_DOMAIN] = {
- .iova_pools = msm8960_rotator_dst_pools,
- .npools = ARRAY_SIZE(msm8960_rotator_dst_pools),
+ [ROTATOR_DOMAIN] = {
+ .iova_pools = msm8960_rotator_pools,
+ .npools = ARRAY_SIZE(msm8960_rotator_pools),
},
};
diff --git a/arch/arm/mach-msm/devices-fsm9xxx.c b/arch/arm/mach-msm/devices-fsm9xxx.c
index 777b6d6..5f4d940 100644
--- a/arch/arm/mach-msm/devices-fsm9xxx.c
+++ b/arch/arm/mach-msm/devices-fsm9xxx.c
@@ -71,6 +71,26 @@
.resource = resources_uart2,
};
+static struct resource resources_uart3[] = {
+ {
+ .start = INT_UART3,
+ .end = INT_UART3,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = MSM_UART3_PHYS,
+ .end = MSM_UART3_PHYS + MSM_UART3_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+struct platform_device msm_device_uart3 = {
+ .name = "msm_uim",
+ .id = 2,
+ .num_resources = ARRAY_SIZE(resources_uart3),
+ .resource = resources_uart3,
+};
+
/*
* SSBIs
*/
diff --git a/arch/arm/mach-msm/devices-msm7x27a.c b/arch/arm/mach-msm/devices-msm7x27a.c
index b3454cd..1c58490 100644
--- a/arch/arm/mach-msm/devices-msm7x27a.c
+++ b/arch/arm/mach-msm/devices-msm7x27a.c
@@ -558,8 +558,8 @@
},
{
.name = "sdcc_dma_chnl",
- .start = DMOV_SDC3_CHAN,
- .end = DMOV_SDC3_CHAN,
+ .start = DMOV_NAND_CHAN,
+ .end = DMOV_NAND_CHAN,
.flags = IORESOURCE_DMA,
},
{
@@ -1715,7 +1715,7 @@
void __init msm8625_init_irq(void)
{
- msm_gic_irq_extn_init(MSM_QGIC_DIST_BASE, MSM_QGIC_CPU_BASE);
+ msm_gic_irq_extn_init();
gic_init(0, GIC_PPI_START, MSM_QGIC_DIST_BASE,
(void *)MSM_QGIC_CPU_BASE);
}
diff --git a/arch/arm/mach-msm/devices-msm7x30.c b/arch/arm/mach-msm/devices-msm7x30.c
index ff747e2..722575d 100644
--- a/arch/arm/mach-msm/devices-msm7x30.c
+++ b/arch/arm/mach-msm/devices-msm7x30.c
@@ -820,8 +820,8 @@
},
{
.name = "sdcc_dma_chnl",
- .start = DMOV_SDC2_CHAN,
- .end = DMOV_SDC2_CHAN,
+ .start = DMOV_NAND_CHAN,
+ .end = DMOV_NAND_CHAN,
.flags = IORESOURCE_DMA,
},
{
diff --git a/arch/arm/mach-msm/devices.h b/arch/arm/mach-msm/devices.h
index ea47727..152ca5b 100644
--- a/arch/arm/mach-msm/devices.h
+++ b/arch/arm/mach-msm/devices.h
@@ -406,6 +406,7 @@
extern struct platform_device msm8960_cache_dump_device;
extern struct platform_device apq8064_cache_dump_device;
+extern struct platform_device msm8930_cache_dump_device;
extern struct platform_device copper_device_tz_log;
diff --git a/arch/arm/mach-msm/footswitch-pcom.c b/arch/arm/mach-msm/footswitch-pcom.c
index 07d7118..8903859 100644
--- a/arch/arm/mach-msm/footswitch-pcom.c
+++ b/arch/arm/mach-msm/footswitch-pcom.c
@@ -42,7 +42,6 @@
* @init_data: Regulator platform data
* @pcom_id: Proc-comm ID of the footswitch
* @is_enabled: Flag set when footswitch is enabled
- * @is_manual: Flag set when footswitch is in manual proc-comm mode
* @has_ahb_clk: Flag set if footswitched core has an ahb_clk
* @has_src_clk: Flag set if footswitched core has a src_clk
* @src_clk: Controls the core clock's rate
@@ -57,7 +56,6 @@
struct regulator_init_data init_data;
unsigned pcom_id;
bool is_enabled;
- bool is_manual;
struct clk *src_clk;
struct clk *core_clk;
struct clk *ahb_clk;
@@ -256,12 +254,19 @@
if (pdev->id >= MAX_FS)
return -ENODEV;
- fs = &footswitches[pdev->id];
- if (!fs->is_manual) {
- pr_err("%s is not in manual mode\n", fs->desc.name);
- return -EINVAL;
- }
init_data = pdev->dev.platform_data;
+ fs = &footswitches[pdev->id];
+
+ /*
+ * Enable footswitch in manual mode (ie. not controlled along
+ * with pcom clocks).
+ */
+ rc = set_rail_state(fs->pcom_id, PCOM_CLKCTL_RPC_RAIL_ENABLE);
+ if (rc)
+ return rc;
+ rc = set_rail_mode(fs->pcom_id, PCOM_RAIL_MODE_MANUAL);
+ if (rc)
+ return rc;
rc = get_clocks(&pdev->dev, fs);
if (rc)
@@ -305,21 +310,6 @@
static int __init footswitch_init(void)
{
- struct footswitch *fs;
- int ret;
-
- /*
- * Enable all footswitches in manual mode (ie. not controlled along
- * with pcom clocks).
- */
- for (fs = footswitches; fs < footswitches + ARRAY_SIZE(footswitches);
- fs++) {
- set_rail_state(fs->pcom_id, PCOM_CLKCTL_RPC_RAIL_ENABLE);
- ret = set_rail_mode(fs->pcom_id, PCOM_RAIL_MODE_MANUAL);
- if (!ret)
- fs->is_manual = 1;
- }
-
return platform_driver_register(&footswitch_driver);
}
subsys_initcall(footswitch_init);
diff --git a/arch/arm/mach-msm/gdsc.c b/arch/arm/mach-msm/gdsc.c
index df3a92d..f91e3dc 100644
--- a/arch/arm/mach-msm/gdsc.c
+++ b/arch/arm/mach-msm/gdsc.c
@@ -63,10 +63,18 @@
ret = readl_tight_poll_timeout(sc->gdscr, regval, regval & PWR_ON_MASK,
TIMEOUT_US);
- if (ret)
+ if (ret) {
dev_err(&rdev->dev, "%s enable timed out\n", sc->rdesc.name);
+ return ret;
+ }
- return ret;
+ /*
+ * If clocks to this power domain were already on, they will take an
+ * additional 4 clock cycles to re-enable after the rail is enabled.
+ */
+ udelay(1);
+
+ return 0;
}
static int gdsc_disable(struct regulator_dev *rdev)
diff --git a/arch/arm/mach-msm/gss-8064.c b/arch/arm/mach-msm/gss-8064.c
index 126f8e0..e65f2d2 100644
--- a/arch/arm/mach-msm/gss-8064.c
+++ b/arch/arm/mach-msm/gss-8064.c
@@ -69,40 +69,12 @@
wmb();
}
-static void gss_fatal_fn(struct work_struct *work)
+static void restart_gss(void)
{
- uint32_t panic_smsm_states = SMSM_RESET | SMSM_SYSTEM_DOWNLOAD;
- uint32_t reset_smsm_states = SMSM_SYSTEM_REBOOT_USR |
- SMSM_SYSTEM_PWRDWN_USR;
- uint32_t gss_state;
-
- pr_err("Watchdog bite received from GSS!\n");
-
- gss_state = smsm_get_state(SMSM_MODEM_STATE);
-
- if (gss_state & panic_smsm_states) {
-
- pr_err("GSS SMSM state changed to SMSM_RESET.\n"
- "Probable err_fatal on the GSS. "
- "Calling subsystem restart...\n");
- log_gss_sfr();
- subsystem_restart("gss");
-
- } else if (gss_state & reset_smsm_states) {
-
- pr_err("%s: User-invoked system reset/powerdown. "
- "Resetting the SoC now.\n",
- __func__);
- kernel_restart(NULL);
- } else {
- /* TODO: Bus unlock code/sequence goes _here_ */
- log_gss_sfr();
- subsystem_restart("gss");
- }
+ log_gss_sfr();
+ subsystem_restart("gss");
}
-static DECLARE_WORK(gss_fatal_work, gss_fatal_fn);
-
static void smsm_state_cb(void *data, uint32_t old_state, uint32_t new_state)
{
/* Ignore if we're the one that set SMSM_RESET */
@@ -113,8 +85,7 @@
pr_err("GSS SMSM state changed to SMSM_RESET.\n"
"Probable err_fatal on the GSS. "
"Calling subsystem restart...\n");
- log_gss_sfr();
- subsystem_restart("gss");
+ restart_gss();
}
}
@@ -180,8 +151,8 @@
static irqreturn_t gss_wdog_bite_irq(int irq, void *dev_id)
{
- schedule_work(&gss_fatal_work);
- disable_irq_nosync(GSS_A5_WDOG_EXPIRED);
+ pr_err("Watchdog bite received from GSS!\n");
+ restart_gss();
return IRQ_HANDLED;
}
diff --git a/arch/arm/mach-msm/hotplug.c b/arch/arm/mach-msm/hotplug.c
index 46e835f..f8324ce 100644
--- a/arch/arm/mach-msm/hotplug.c
+++ b/arch/arm/mach-msm/hotplug.c
@@ -16,7 +16,7 @@
#include <asm/smp_plat.h>
#include <asm/vfp.h>
-#include <mach/qdss.h>
+#include <mach/jtag.h>
#include <mach/msm_rtb.h>
#include "pm.h"
diff --git a/arch/arm/mach-msm/include/mach/iommu_domains.h b/arch/arm/mach-msm/include/mach/iommu_domains.h
index 1d538f2..1a3a022 100644
--- a/arch/arm/mach-msm/include/mach/iommu_domains.h
+++ b/arch/arm/mach-msm/include/mach/iommu_domains.h
@@ -18,10 +18,8 @@
enum {
VIDEO_DOMAIN,
CAMERA_DOMAIN,
- DISPLAY_READ_DOMAIN,
- DISPLAY_WRITE_DOMAIN,
- ROTATOR_SRC_DOMAIN,
- ROTATOR_DST_DOMAIN,
+ DISPLAY_DOMAIN,
+ ROTATOR_DOMAIN,
MAX_DOMAINS
};
diff --git a/arch/arm/mach-msm/include/mach/jtag.h b/arch/arm/mach-msm/include/mach/jtag.h
new file mode 100644
index 0000000..3850eff
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/jtag.h
@@ -0,0 +1,24 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MACH_JTAG_H
+#define __MACH_JTAG_H
+
+#ifdef CONFIG_MSM_JTAG
+extern void msm_jtag_save_state(void);
+extern void msm_jtag_restore_state(void);
+#else
+static inline void msm_jtag_save_state(void) {}
+static inline void msm_jtag_restore_state(void) {}
+#endif
+
+#endif
diff --git a/arch/arm/mach-msm/include/mach/mdm2.h b/arch/arm/mach-msm/include/mach/mdm2.h
index 997b3be..637a3cc 100644
--- a/arch/arm/mach-msm/include/mach/mdm2.h
+++ b/arch/arm/mach-msm/include/mach/mdm2.h
@@ -13,13 +13,24 @@
#ifndef _ARCH_ARM_MACH_MSM_MDM2_H
#define _ARCH_ARM_MACH_MSM_MDM2_H
+struct mdm_vddmin_resource {
+ int rpm_id;
+ int ap2mdm_vddmin_gpio;
+ unsigned int modes;
+ unsigned int drive_strength;
+ int mdm2ap_vddmin_gpio;
+};
+
struct mdm_platform_data {
char *mdm_version;
int ramdump_delay_ms;
int soft_reset_inverted;
int early_power_on;
int sfr_query;
+ int no_powerdown_after_ramdumps;
+ struct mdm_vddmin_resource *vddmin_resource;
struct platform_device *peripheral_platform_device;
+ const unsigned int ramdump_timeout_ms;
};
#endif
diff --git a/arch/arm/mach-msm/include/mach/msm_bus_board.h b/arch/arm/mach-msm/include/mach/msm_bus_board.h
index 0c556b5..574491a 100644
--- a/arch/arm/mach-msm/include/mach/msm_bus_board.h
+++ b/arch/arm/mach-msm/include/mach/msm_bus_board.h
@@ -83,6 +83,14 @@
extern struct msm_bus_fabric_registration msm_bus_8930_sys_fpb_pdata;
extern struct msm_bus_fabric_registration msm_bus_8930_cpss_fpb_pdata;
+extern struct msm_bus_fabric_registration msm_bus_copper_sys_noc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_copper_mmss_noc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_copper_bimc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_copper_ocmem_noc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_copper_periph_noc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_copper_config_noc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_copper_ocmem_vnoc_pdata;
+
void msm_bus_rpm_set_mt_mask(void);
int msm_bus_board_rpm_get_il_ids(uint16_t *id);
int msm_bus_board_get_iid(int id);
@@ -148,6 +156,20 @@
MSM_BUS_CLK_UNHALT<<MSM_BUS_MASTER_SHIFT((master),\
MSM_BUS_CLK_HALT_FIELDSIZE))\
+#define RPM_BUS_SLAVE_REQ 0x766c7362
+#define RPM_BUS_MASTER_REQ 0x73616d62
+
+enum msm_bus_rpm_slave_field_type {
+ RPM_SLAVE_FIELD_BW = 0x00007762,
+};
+
+enum msm_bus_rpm_mas_field_type {
+ RPM_MASTER_FIELD_BW = 0x00007762,
+ RPM_MASTER_FIELD_BW_T0 = 0x30747762,
+ RPM_MASTER_FIELD_BW_T1 = 0x31747762,
+ RPM_MASTER_FIELD_BW_T2 = 0x32747762,
+};
+
/* Topology related enums */
enum msm_bus_fabric_type {
MSM_BUS_FAB_DEFAULT = 0,
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap-fsm9xxx.h b/arch/arm/mach-msm/include/mach/msm_iomap-fsm9xxx.h
index c30c9e4..a99f1f7 100644
--- a/arch/arm/mach-msm/include/mach/msm_iomap-fsm9xxx.h
+++ b/arch/arm/mach-msm/include/mach/msm_iomap-fsm9xxx.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -78,7 +78,10 @@
#define MSM_UART1_PHYS 0x94000000
#define MSM_UART1_SIZE SZ_4K
-#define MSM_UART2_PHYS 0x94100000
+#define MSM_UART2_PHYS 0x94010000
#define MSM_UART2_SIZE SZ_4K
+#define MSM_UART3_PHYS 0x94100000
+#define MSM_UART3_SIZE SZ_4K
+
#endif
diff --git a/arch/arm/mach-msm/include/mach/msm_spi.h b/arch/arm/mach-msm/include/mach/msm_spi.h
index 51081b6..11d3014 100644
--- a/arch/arm/mach-msm/include/mach/msm_spi.h
+++ b/arch/arm/mach-msm/include/mach/msm_spi.h
@@ -21,4 +21,5 @@
int (*dma_config)(void);
const char *rsl_id;
uint32_t pm_lat;
+ uint32_t infinite_mode;
};
diff --git a/arch/arm/mach-msm/include/mach/qdss.h b/arch/arm/mach-msm/include/mach/qdss.h
deleted file mode 100644
index 05d8577..0000000
--- a/arch/arm/mach-msm/include/mach/qdss.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __MACH_QDSS_H
-#define __MACH_QDSS_H
-
-struct qdss_source {
- struct list_head link;
- const char *name;
- uint32_t fport_mask;
-};
-
-struct msm_qdss_platform_data {
- struct qdss_source *src_table;
- size_t size;
- uint8_t afamily;
-};
-
-#ifdef CONFIG_MSM_QDSS
-extern struct qdss_source *qdss_get(const char *name);
-extern void qdss_put(struct qdss_source *src);
-extern int qdss_enable(struct qdss_source *src);
-extern void qdss_disable(struct qdss_source *src);
-extern void qdss_disable_sink(void);
-extern int qdss_clk_enable(void);
-extern void qdss_clk_disable(void);
-#else
-static inline struct qdss_source *qdss_get(const char *name) { return NULL; }
-static inline void qdss_put(struct qdss_source *src) {}
-static inline int qdss_enable(struct qdss_source *src) { return -ENOSYS; }
-static inline void qdss_disable(struct qdss_source *src) {}
-static inline void qdss_disable_sink(void) {}
-static inline int qdss_clk_enable(void) { return -ENOSYS; }
-static inline void qdss_clk_disable(void) {}
-#endif
-
-#ifdef CONFIG_MSM_JTAG
-extern void msm_jtag_save_state(void);
-extern void msm_jtag_restore_state(void);
-#else
-static inline void msm_jtag_save_state(void) {}
-static inline void msm_jtag_restore_state(void) {}
-#endif
-
-#endif
diff --git a/arch/arm/mach-msm/include/mach/qpnp-int.h b/arch/arm/mach-msm/include/mach/qpnp-int.h
index a79d2fc..21d95e6 100644
--- a/arch/arm/mach-msm/include/mach/qpnp-int.h
+++ b/arch/arm/mach-msm/include/mach/qpnp-int.h
@@ -52,7 +52,8 @@
* Used by the PMIC Arbiter driver or equivalent to register
* callbacks for interrupt events.
*/
-int qpnpint_register_controller(unsigned int busno,
+int qpnpint_register_controller(struct device_node *node,
+ struct spmi_controller *ctrl,
struct qpnp_local_int *li_cb);
/**
@@ -68,8 +69,11 @@
{
return -ENXIO;
}
-static inline int qpnpint_register_controller(unsigned int busno,
- struct qpnp_local_int *li_cb)
+
+static inline int qpnpint_register_controller(struct device_node *node,
+ struct spmi_controller *ctrl,
+ struct qpnp_local_int *li_cb)
+
{
return -ENXIO;
}
diff --git a/arch/arm/mach-msm/include/mach/qpnp.h b/arch/arm/mach-msm/include/mach/qpnp.h
deleted file mode 100644
index 1d2e440..0000000
--- a/arch/arm/mach-msm/include/mach/qpnp.h
+++ /dev/null
@@ -1,19 +0,0 @@
- /* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/spmi.h>
-
-struct resource *qpnp_get_resource(struct spmi_device *dev,
- unsigned int node_idx, unsigned int type,
- unsigned int res_num);
-int qpnp_get_irq(struct spmi_device *dev, unsigned int node_idx,
- unsigned int res_num);
diff --git a/arch/arm/mach-msm/include/mach/rpm-8064.h b/arch/arm/mach-msm/include/mach/rpm-8064.h
index c4c6b0a..39ec7ff 100644
--- a/arch/arm/mach-msm/include/mach/rpm-8064.h
+++ b/arch/arm/mach-msm/include/mach/rpm-8064.h
@@ -120,7 +120,9 @@
MSM_RPM_8064_SEL_HDMI_SWITCH = 83,
MSM_RPM_8064_SEL_DDR_DMM = 84,
- MSM_RPM_8064_SEL_LAST = MSM_RPM_8064_SEL_DDR_DMM,
+ MSM_RPM_8064_SEL_VDDMIN_GPIO = 89,
+
+ MSM_RPM_8064_SEL_LAST = MSM_RPM_8064_SEL_VDDMIN_GPIO,
};
/* RPM resource (4 byte) word ID enum */
@@ -287,8 +289,9 @@
MSM_RPM_8064_ID_DDR_DMM_0 = 212,
MSM_RPM_8064_ID_DDR_DMM_1 = 213,
MSM_RPM_8064_ID_QDSS_CLK = 214,
+ MSM_RPM_8064_ID_VDDMIN_GPIO = 215,
- MSM_RPM_8064_ID_LAST = MSM_RPM_8064_ID_QDSS_CLK,
+ MSM_RPM_8064_ID_LAST = MSM_RPM_8064_ID_VDDMIN_GPIO,
};
@@ -425,8 +428,9 @@
MSM_RPM_8064_STATUS_ID_DDR_DMM_1 = 128,
MSM_RPM_8064_STATUS_ID_EBI1_CH0_RANGE = 129,
MSM_RPM_8064_STATUS_ID_EBI1_CH1_RANGE = 130,
+ MSM_RPM_8064_STATUS_ID_VDDMIN_GPIO = 131,
- MSM_RPM_8064_STATUS_ID_LAST = MSM_RPM_8064_STATUS_ID_EBI1_CH1_RANGE,
+ MSM_RPM_8064_STATUS_ID_LAST = MSM_RPM_8064_STATUS_ID_VDDMIN_GPIO,
};
#endif /* __ARCH_ARM_MACH_MSM_RPM_8064_H */
diff --git a/arch/arm/mach-msm/include/mach/rpm-regulator-smd.h b/arch/arm/mach-msm/include/mach/rpm-regulator-smd.h
index 2eb59f5..319c2d8 100644
--- a/arch/arm/mach-msm/include/mach/rpm-regulator-smd.h
+++ b/arch/arm/mach-msm/include/mach/rpm-regulator-smd.h
@@ -17,6 +17,26 @@
struct rpm_regulator;
+/**
+ * enum rpm_regulator_voltage_corner - possible voltage corner values
+ *
+ * These should be used in regulator_set_voltage() and
+ * rpm_regulator_set_voltage() calls for corner type regulators as if they had
+ * units of uV.
+ *
+ * Note, the meaning of corner values is set by the RPM. It is possible that
+ * future platforms will utilize different corner values. The values specified
+ * in this enum correspond to MSM8974 for PMIC PM8841 SMPS 2 (VDD_Dig).
+ */
+enum rpm_regulator_voltage_corner {
+ RPM_REGULATOR_CORNER_RETENTION = 1,
+ RPM_REGULATOR_CORNER_SVS_KRAIT,
+ RPM_REGULATOR_CORNER_SVS_SOC,
+ RPM_REGULATOR_CORNER_NORMAL,
+ RPM_REGULATOR_CORNER_TURBO,
+ RPM_REGULATOR_CORNER_SUPER_TURBO,
+};
+
#if defined(CONFIG_MSM_RPM_REGULATOR_SMD) || defined(CONFIG_MSM_RPM_REGULATOR)
struct rpm_regulator *rpm_regulator_get(struct device *dev, const char *supply);
diff --git a/arch/arm/mach-msm/include/mach/rpm.h b/arch/arm/mach-msm/include/mach/rpm.h
index de4c9d9..f6b9a6e 100644
--- a/arch/arm/mach-msm/include/mach/rpm.h
+++ b/arch/arm/mach-msm/include/mach/rpm.h
@@ -460,6 +460,7 @@
MSM_RPM_ID_PM8821_S2_1,
MSM_RPM_ID_PM8821_L1_0,
MSM_RPM_ID_PM8821_L1_1,
+ MSM_RPM_ID_VDDMIN_GPIO,
MSM_RPM_ID_LAST,
};
@@ -825,6 +826,7 @@
MSM_RPM_STATUS_ID_PM8821_S2_1,
MSM_RPM_STATUS_ID_PM8821_L1_0,
MSM_RPM_STATUS_ID_PM8821_L1_1,
+ MSM_RPM_STATUS_ID_VDDMIN_GPIO,
MSM_RPM_STATUS_ID_LAST,
};
diff --git a/arch/arm/mach-msm/iommu_domains.c b/arch/arm/mach-msm/iommu_domains.c
index a7e06ba..e92b5c5 100644
--- a/arch/arm/mach-msm/iommu_domains.c
+++ b/arch/arm/mach-msm/iommu_domains.c
@@ -47,31 +47,57 @@
unsigned long page_size,
int cached)
{
- int i, ret_value = 0;
- unsigned long order = get_order(page_size);
- unsigned long aligned_size = ALIGN(size, page_size);
- unsigned long nrpages = aligned_size >> (PAGE_SHIFT + order);
+ int ret = 0;
+ int i = 0;
unsigned long phy_addr = ALIGN(virt_to_phys(iommu_dummy), page_size);
unsigned long temp_iova = start_iova;
+ if (page_size == SZ_4K) {
+ struct scatterlist *sglist;
+ unsigned int nrpages = PFN_ALIGN(size) >> PAGE_SHIFT;
+ struct page *dummy_page = phys_to_page(phy_addr);
- for (i = 0; i < nrpages; i++) {
- int ret = iommu_map(domain, temp_iova, phy_addr, page_size,
- cached);
- if (ret) {
- pr_err("%s: could not map %lx in domain %p, error: %d\n",
- __func__, start_iova, domain, ret);
- ret_value = -EAGAIN;
+ sglist = vmalloc(sizeof(*sglist) * nrpages);
+ if (!sglist) {
+ ret = -ENOMEM;
goto out;
}
- temp_iova += page_size;
+
+ sg_init_table(sglist, nrpages);
+
+ for (i = 0; i < nrpages; i++)
+ sg_set_page(&sglist[i], dummy_page, PAGE_SIZE, 0);
+
+ ret = iommu_map_range(domain, temp_iova, sglist, size, cached);
+ if (ret) {
+ pr_err("%s: could not map extra %lx in domain %p\n",
+ __func__, start_iova, domain);
+ }
+
+ vfree(sglist);
+ } else {
+ unsigned long order = get_order(page_size);
+ unsigned long aligned_size = ALIGN(size, page_size);
+ unsigned long nrpages = aligned_size >> (PAGE_SHIFT + order);
+
+ for (i = 0; i < nrpages; i++) {
+ ret = iommu_map(domain, temp_iova, phy_addr, page_size,
+ cached);
+ if (ret) {
+ pr_err("%s: could not map %lx in domain %p, error: %d\n",
+ __func__, start_iova, domain, ret);
+ ret = -EAGAIN;
+ goto out;
+ }
+ temp_iova += page_size;
+ }
}
- return ret_value;
+ return ret;
out:
for (; i > 0; --i) {
temp_iova -= page_size;
iommu_unmap(domain, start_iova, page_size);
}
- return ret_value;
+ return ret;
}
void msm_iommu_unmap_extra(struct iommu_domain *domain,
diff --git a/arch/arm/mach-msm/jtag.c b/arch/arm/mach-msm/jtag.c
index 8dae9c6..bf5857c 100644
--- a/arch/arm/mach-msm/jtag.c
+++ b/arch/arm/mach-msm/jtag.c
@@ -19,11 +19,16 @@
#include <linux/export.h>
#include <linux/printk.h>
#include <linux/ratelimit.h>
+#include <linux/coresight.h>
#include <mach/scm.h>
+#include <mach/jtag.h>
-#include "qdss-priv.h"
#include "cp14.h"
+#define BM(lsb, msb) ((BIT(msb) - BIT(lsb)) + BIT(msb))
+#define BMVAL(val, lsb, msb) ((val & BM(lsb, msb)) >> lsb)
+#define BVAL(val, n) ((val & BIT(n)) >> n)
+
/* no of dbg regs + 1 (for storing the reg count) */
#define MAX_DBG_REGS (90)
#define MAX_DBG_STATE_SIZE (MAX_DBG_REGS * num_possible_cpus())
diff --git a/arch/arm/mach-msm/mdm2.c b/arch/arm/mach-msm/mdm2.c
index bd7bd9e..f851545 100644
--- a/arch/arm/mach-msm/mdm2.c
+++ b/arch/arm/mach-msm/mdm2.c
@@ -42,9 +42,7 @@
#include "clock.h"
#include "mdm_private.h"
-#define MDM_MODEM_TIMEOUT 6000
-#define MDM_HOLD_TIME 4000
-#define MDM_MODEM_DELTA 100
+#define MDM_PBLRDY_CNT 20
static int mdm_debug_on;
static int power_on_count;
@@ -79,20 +77,54 @@
mutex_unlock(&hsic_status_lock);
}
+static void mdm_toggle_soft_reset(struct mdm_modem_drv *mdm_drv)
+{
+ int soft_reset_direction_assert = 0,
+ soft_reset_direction_de_assert = 1;
+
+ if (mdm_drv->pdata->soft_reset_inverted) {
+ soft_reset_direction_assert = 1;
+ soft_reset_direction_de_assert = 0;
+ }
+ gpio_direction_output(mdm_drv->ap2mdm_soft_reset_gpio,
+ soft_reset_direction_assert);
+ usleep_range(5000, 10000);
+ gpio_direction_output(mdm_drv->ap2mdm_soft_reset_gpio,
+ soft_reset_direction_de_assert);
+}
+
static void mdm_power_down_common(struct mdm_modem_drv *mdm_drv)
{
+ int i;
int soft_reset_direction =
mdm_drv->pdata->soft_reset_inverted ? 1 : 0;
- gpio_direction_output(mdm_drv->ap2mdm_soft_reset_gpio,
- soft_reset_direction);
+ /* Wait for the modem to complete its power down actions. */
+ for (i = 20; i > 0; i--) {
+ if (gpio_get_value(mdm_drv->mdm2ap_status_gpio) == 0)
+ break;
+ msleep(100);
+ }
+ if (i == 0) {
+ pr_err("%s: MDM2AP_STATUS never went low. Doing a hard reset\n",
+ __func__);
+ gpio_direction_output(mdm_drv->ap2mdm_soft_reset_gpio,
+ soft_reset_direction);
+ /*
+ * Currently, there is a debounce timer on the charm PMIC. It is
+ * necessary to hold the PMIC RESET low for ~3.5 seconds
+ * for the reset to fully take place. Sleep here to ensure the
+ * reset has occured before the function exits.
+ */
+ msleep(4000);
+ }
mdm_peripheral_disconnect(mdm_drv);
}
static void mdm_do_first_power_on(struct mdm_modem_drv *mdm_drv)
{
- int soft_reset_direction =
- mdm_drv->pdata->soft_reset_inverted ? 0 : 1;
+ int i;
+ int pblrdy;
if (power_on_count != 1) {
pr_err("%s: Calling fn when power_on_count != 1\n",
@@ -103,6 +135,13 @@
pr_err("%s: Powering on modem for the first time\n", __func__);
mdm_peripheral_disconnect(mdm_drv);
+ /* If this is the first power-up after a panic, the modem may still
+ * be in a power-on state, in which case we need to toggle the gpio
+ * instead of just de-asserting it. No harm done if the modem was
+ * powered down.
+ */
+ mdm_toggle_soft_reset(mdm_drv);
+
/* If the device has a kpd pwr gpio then toggle it. */
if (mdm_drv->ap2mdm_kpdpwr_n_gpio > 0) {
/* Pull AP2MDM_KPDPWR gpio high and wait for PS_HOLD to settle,
@@ -114,31 +153,45 @@
gpio_direction_output(mdm_drv->ap2mdm_kpdpwr_n_gpio, 0);
}
- /* De-assert the soft reset line. */
- pr_debug("%s: De-asserting soft reset gpio\n", __func__);
- gpio_direction_output(mdm_drv->ap2mdm_soft_reset_gpio,
- soft_reset_direction);
+ if (!mdm_drv->mdm2ap_pblrdy)
+ goto start_mdm_peripheral;
+ for (i = 0; i < MDM_PBLRDY_CNT; i++) {
+ pblrdy = gpio_get_value(mdm_drv->mdm2ap_pblrdy);
+ if (pblrdy)
+ break;
+ usleep_range(5000, 5000);
+ }
+
+ pr_debug("%s: i:%d\n", __func__, i);
+
+start_mdm_peripheral:
mdm_peripheral_connect(mdm_drv);
msleep(200);
}
static void mdm_do_soft_power_on(struct mdm_modem_drv *mdm_drv)
{
- int soft_reset_direction =
- mdm_drv->pdata->soft_reset_inverted ? 0 : 1;
+ int i;
+ int pblrdy;
- /* De-assert the soft reset line. */
pr_err("%s: soft resetting mdm modem\n", __func__);
-
mdm_peripheral_disconnect(mdm_drv);
+ mdm_toggle_soft_reset(mdm_drv);
- gpio_direction_output(mdm_drv->ap2mdm_soft_reset_gpio,
- soft_reset_direction == 1 ? 0 : 1);
- usleep_range(5000, 10000);
- gpio_direction_output(mdm_drv->ap2mdm_soft_reset_gpio,
- soft_reset_direction == 1 ? 1 : 0);
+ if (!mdm_drv->mdm2ap_pblrdy)
+ goto start_mdm_peripheral;
+ for (i = 0; i < MDM_PBLRDY_CNT; i++) {
+ pblrdy = gpio_get_value(mdm_drv->mdm2ap_pblrdy);
+ if (pblrdy)
+ break;
+ usleep_range(5000, 5000);
+ }
+
+ pr_debug("%s: i:%d\n", __func__, i);
+
+start_mdm_peripheral:
mdm_peripheral_connect(mdm_drv);
msleep(200);
}
diff --git a/arch/arm/mach-msm/mdm_common.c b/arch/arm/mach-msm/mdm_common.c
index 74bf25d..1b09c34 100644
--- a/arch/arm/mach-msm/mdm_common.c
+++ b/arch/arm/mach-msm/mdm_common.c
@@ -36,6 +36,7 @@
#include <mach/restart.h>
#include <mach/subsystem_notif.h>
#include <mach/subsystem_restart.h>
+#include <mach/rpm.h>
#include <linux/msm_charm.h>
#include "msm_watchdog.h"
#include "mdm_private.h"
@@ -44,12 +45,13 @@
#define MDM_MODEM_TIMEOUT 6000
#define MDM_MODEM_DELTA 100
#define MDM_BOOT_TIMEOUT 60000L
-#define MDM_RDUMP_TIMEOUT 60000L
+#define MDM_RDUMP_TIMEOUT 120000L
#define MDM2AP_STATUS_TIMEOUT_MS 60000L
static int mdm_debug_on;
static struct workqueue_struct *mdm_queue;
static struct workqueue_struct *mdm_sfr_queue;
+static unsigned int dump_timeout_ms;
#define EXTERNAL_MODEM "external_modem"
@@ -65,6 +67,57 @@
#define SFR_MAX_RETRIES 10
#define SFR_RETRY_INTERVAL 1000
+static irqreturn_t mdm_vddmin_change(int irq, void *dev_id)
+{
+ int value = gpio_get_value(
+ mdm_drv->pdata->vddmin_resource->mdm2ap_vddmin_gpio);
+
+ if (value == 0)
+ pr_info("External Modem entered Vddmin\n");
+ else
+ pr_info("External Modem exited Vddmin\n");
+
+ return IRQ_HANDLED;
+}
+
+static void mdm_setup_vddmin_gpios(void)
+{
+ struct msm_rpm_iv_pair req;
+ struct mdm_vddmin_resource *vddmin_res;
+ int irq, ret;
+
+ /* This resource may not be supported by some platforms. */
+ vddmin_res = mdm_drv->pdata->vddmin_resource;
+ if (!vddmin_res)
+ return;
+
+ req.id = vddmin_res->rpm_id;
+ req.value = ((uint32_t)vddmin_res->ap2mdm_vddmin_gpio & 0x0000FFFF)
+ << 16;
+ req.value |= ((uint32_t)vddmin_res->modes & 0x000000FF) << 8;
+ req.value |= (uint32_t)vddmin_res->drive_strength & 0x000000FF;
+
+ msm_rpm_set(MSM_RPM_CTX_SET_0, &req, 1);
+
+ /* Monitor low power gpio from mdm */
+ irq = MSM_GPIO_TO_INT(vddmin_res->mdm2ap_vddmin_gpio);
+ if (irq < 0) {
+ pr_err("%s: could not get LPM POWER IRQ resource.\n",
+ __func__);
+ goto error_end;
+ }
+
+ ret = request_threaded_irq(irq, NULL, mdm_vddmin_change,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ "mdm lpm", NULL);
+
+ if (ret < 0)
+ pr_err("%s: MDM LPM IRQ#%d request failed with error=%d",
+ __func__, irq, ret);
+error_end:
+ return;
+}
+
static void mdm_restart_reason_fn(struct work_struct *work)
{
int ret, ntries = 0;
@@ -288,6 +341,14 @@
return IRQ_HANDLED;
}
+static irqreturn_t mdm_pblrdy_change(int irq, void *dev_id)
+{
+ pr_info("%s: pbl ready:%d\n", __func__,
+ gpio_get_value(mdm_drv->mdm2ap_pblrdy));
+
+ return IRQ_HANDLED;
+}
+
static int mdm_subsys_shutdown(const struct subsys_data *crashed_subsys)
{
gpio_direction_output(mdm_drv->ap2mdm_errfatal_gpio, 1);
@@ -335,7 +396,7 @@
mdm_drv->boot_type = CHARM_RAM_DUMPS;
complete(&mdm_needs_reload);
if (!wait_for_completion_timeout(&mdm_ram_dumps,
- msecs_to_jiffies(MDM_RDUMP_TIMEOUT))) {
+ msecs_to_jiffies(dump_timeout_ms))) {
mdm_drv->mdm_ram_dump_status = -ETIMEDOUT;
pr_info("%s: mdm modem ramdumps timed out.\n",
__func__);
@@ -343,7 +404,8 @@
pr_info("%s: mdm modem ramdumps completed.\n",
__func__);
INIT_COMPLETION(mdm_ram_dumps);
- mdm_drv->ops->power_down_mdm_cb(mdm_drv);
+ if (!mdm_drv->pdata->no_powerdown_after_ramdumps)
+ mdm_drv->ops->power_down_mdm_cb(mdm_drv);
}
return mdm_drv->mdm_ram_dump_status;
}
@@ -445,10 +507,18 @@
if (pres)
mdm_drv->ap2mdm_pmic_pwr_en_gpio = pres->start;
+ /* MDM2AP_PBLRDY */
+ pres = platform_get_resource_byname(pdev, IORESOURCE_IO,
+ "MDM2AP_PBLRDY");
+ if (pres)
+ mdm_drv->mdm2ap_pblrdy = pres->start;
+
mdm_drv->boot_type = CHARM_NORMAL_BOOT;
mdm_drv->ops = mdm_ops;
mdm_drv->pdata = pdev->dev.platform_data;
+ dump_timeout_ms = mdm_drv->pdata->ramdump_timeout_ms > 0 ?
+ mdm_drv->pdata->ramdump_timeout_ms : MDM_RDUMP_TIMEOUT;
}
int mdm_common_create(struct platform_device *pdev,
@@ -472,6 +542,8 @@
gpio_request(mdm_drv->ap2mdm_kpdpwr_n_gpio, "AP2MDM_KPDPWR_N");
gpio_request(mdm_drv->mdm2ap_status_gpio, "MDM2AP_STATUS");
gpio_request(mdm_drv->mdm2ap_errfatal_gpio, "MDM2AP_ERRFATAL");
+ if (mdm_drv->mdm2ap_pblrdy > 0)
+ gpio_request(mdm_drv->mdm2ap_pblrdy, "MDM2AP_PBLRDY");
if (mdm_drv->ap2mdm_pmic_pwr_en_gpio > 0)
gpio_request(mdm_drv->ap2mdm_pmic_pwr_en_gpio,
@@ -560,12 +632,35 @@
mdm_drv->mdm_status_irq = irq;
status_err:
+ if (mdm_drv->mdm2ap_pblrdy > 0) {
+ irq = MSM_GPIO_TO_INT(mdm_drv->mdm2ap_pblrdy);
+ if (irq < 0) {
+ pr_err("%s: could not get MDM2AP_PBLRDY IRQ resource",
+ __func__);
+ goto pblrdy_err;
+ }
+
+ ret = request_threaded_irq(irq, NULL, mdm_pblrdy_change,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
+ IRQF_SHARED,
+ "mdm pbl ready", mdm_drv);
+
+ if (ret < 0) {
+ pr_err("%s: MDM2AP_PBL IRQ#%d request failed error=%d",
+ __func__, irq, ret);
+ goto pblrdy_err;
+ }
+ }
+
+pblrdy_err:
/*
* If AP2MDM_PMIC_PWR_EN gpio is used, pull it high. It remains
* high until the whole phone is shut down.
*/
if (mdm_drv->ap2mdm_pmic_pwr_en_gpio > 0)
gpio_direction_output(mdm_drv->ap2mdm_pmic_pwr_en_gpio, 1);
+ /* Register VDDmin gpios with RPM */
+ mdm_setup_vddmin_gpios();
/* Perform early powerup of the external modem in order to
* allow tabla devices to be found.
diff --git a/arch/arm/mach-msm/mdm_private.h b/arch/arm/mach-msm/mdm_private.h
index f157d88..53bfaf0 100644
--- a/arch/arm/mach-msm/mdm_private.h
+++ b/arch/arm/mach-msm/mdm_private.h
@@ -35,6 +35,7 @@
unsigned ap2mdm_kpdpwr_n_gpio;
unsigned ap2mdm_soft_reset_gpio;
unsigned ap2mdm_pmic_pwr_en_gpio;
+ unsigned mdm2ap_pblrdy;
int mdm_errfatal_irq;
int mdm_status_irq;
diff --git a/arch/arm/mach-msm/memory.c b/arch/arm/mach-msm/memory.c
index 40845d7..a1b21c5 100644
--- a/arch/arm/mach-msm/memory.c
+++ b/arch/arm/mach-msm/memory.c
@@ -363,64 +363,6 @@
}
EXPORT_SYMBOL(allocate_contiguous_ebi_nomap);
-/* emulation of the deprecated pmem_kalloc and pmem_kfree */
-int32_t pmem_kalloc(const size_t size, const uint32_t flags)
-{
- int pmem_memtype;
- int memtype = MEMTYPE_NONE;
- int ebi1_memtype = MEMTYPE_EBI1;
- unsigned int align;
- int32_t paddr;
-
- switch (flags & PMEM_ALIGNMENT_MASK) {
- case PMEM_ALIGNMENT_4K:
- align = SZ_4K;
- break;
- case PMEM_ALIGNMENT_1M:
- align = SZ_1M;
- break;
- default:
- pr_alert("Invalid alignment %x\n",
- (flags & PMEM_ALIGNMENT_MASK));
- return -EINVAL;
- }
-
- /* on 7x30 and 8x55 "EBI1 kernel PMEM" is really on EBI0 */
- if (cpu_is_msm7x30() || cpu_is_msm8x55())
- ebi1_memtype = MEMTYPE_EBI0;
-
- pmem_memtype = flags & PMEM_MEMTYPE_MASK;
- if (pmem_memtype == PMEM_MEMTYPE_EBI1)
- memtype = ebi1_memtype;
- else if (pmem_memtype == PMEM_MEMTYPE_SMI)
- memtype = MEMTYPE_SMI_KERNEL;
- else {
- pr_alert("Invalid memory type %x\n",
- flags & PMEM_MEMTYPE_MASK);
- return -EINVAL;
- }
-
- paddr = _allocate_contiguous_memory_nomap(size, memtype, align,
- __builtin_return_address(0));
-
- if (!paddr && pmem_memtype == PMEM_MEMTYPE_SMI)
- paddr = _allocate_contiguous_memory_nomap(size,
- ebi1_memtype, align, __builtin_return_address(0));
-
- if (!paddr)
- return -ENOMEM;
- return paddr;
-}
-EXPORT_SYMBOL(pmem_kalloc);
-
-int pmem_kfree(const int32_t physaddr)
-{
- free_contiguous_memory_by_paddr(physaddr);
-
- return 0;
-}
-EXPORT_SYMBOL(pmem_kfree);
-
unsigned int msm_ttbr0;
void store_ttbr0(void)
diff --git a/arch/arm/mach-msm/mpm-8625.c b/arch/arm/mach-msm/mpm-8625.c
index fa966d2..954e5cc 100644
--- a/arch/arm/mach-msm/mpm-8625.c
+++ b/arch/arm/mach-msm/mpm-8625.c
@@ -152,7 +152,7 @@
return 0;
}
-void __init msm_gic_irq_extn_init(void __iomem *db, void __iomem *cb)
+void __init msm_gic_irq_extn_init(void)
{
gic_arch_extn.irq_mask = msm_gic_mask_irq;
gic_arch_extn.irq_unmask = msm_gic_unmask_irq;
diff --git a/arch/arm/mach-msm/mpm-8625.h b/arch/arm/mach-msm/mpm-8625.h
index 4ada9e2..1c28390 100644
--- a/arch/arm/mach-msm/mpm-8625.h
+++ b/arch/arm/mach-msm/mpm-8625.h
@@ -14,7 +14,7 @@
#ifndef _ARCH_ARM_MACH_MSM_MPM_8625_H_
#define _ARCH_ARM_MACH_MSM_MPM_8625_H_
-void msm_gic_irq_extn_init(void __iomem *, void __iomem *);
+void msm_gic_irq_extn_init(void);
unsigned int msm_gic_spi_ppi_pending(void);
int msm_gic_irq_idle_sleep_allowed(void);
diff --git a/arch/arm/mach-msm/msm_bus/Makefile b/arch/arm/mach-msm/msm_bus/Makefile
index 766856c..98e1250 100644
--- a/arch/arm/mach-msm/msm_bus/Makefile
+++ b/arch/arm/mach-msm/msm_bus/Makefile
@@ -8,4 +8,5 @@
obj-$(CONFIG_ARCH_MSM9615) += msm_bus_board_9615.o
obj-$(CONFIG_ARCH_APQ8064) += msm_bus_board_8064.o
obj-$(CONFIG_ARCH_MSM8930) += msm_bus_board_8930.o
+obj-$(CONFIG_ARCH_MSMCOPPER) += msm_bus_board_copper.o
obj-$(CONFIG_DEBUG_FS) += msm_bus_dbg.o
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_board_copper.c b/arch/arm/mach-msm/msm_bus/msm_bus_board_copper.c
new file mode 100644
index 0000000..9858a73
--- /dev/null
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_board_copper.c
@@ -0,0 +1,2002 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <mach/msm_bus.h>
+#include <mach/msm_bus_board.h>
+#include <mach/board.h>
+#include <mach/rpm.h>
+#include "msm_bus_core.h"
+#include "msm_bus_noc.h"
+#include "msm_bus_bimc.h"
+
+#define NMASTERS 120
+#define NSLAVES 150
+#define NFAB_COPPER 7
+
+enum msm_bus_copper_master_ports_type {
+ /* System NOC Masters */
+ MASTER_PORT_LPASS_AHB = 0,
+ MASTER_PORT_QDSS_BAM,
+ MASTER_PORT_SNOC_CFG,
+ MASTER_PORT_GW_BIMC_SNOC,
+ MASTER_PORT_GW_CNOC_SNOC,
+ MASTER_PORT_CRYPTO_CORE0,
+ MASTER_PORT_CRYPTO_CORE1,
+ MASTER_PORT_LPASS_PROC,
+ MASTER_PORT_MSS,
+ MASTER_PORT_MSS_NAV,
+ MASTER_PORT_OCMEM_DMA,
+ MASTER_PORT_GW_PNOC_SNOC,
+ MASTER_PORT_WCSS,
+ MASTER_PORT_QDSS_ETR,
+ MASTER_PORT_USB3,
+
+ /* MMSS NOC Masters */
+ MASTER_PORT_GW_CNOC_MNOC_MMSS_CFG = 0,
+ MASTER_PORT_GW_CNOC_MNOC_CFG,
+ MASTER_PORT_GFX3D_PORT0,
+ MASTER_PORT_GFX3D_PORT1,
+ MASTER_PORT_JPEG,
+ MASTER_PORT_MDP,
+ /* Venus video core */
+ MASTER_PORT_VIDEO_PORT0,
+ MASTER_PORT_VIDEO_PORT1,
+ MASTER_PORT_VFE = 16,
+
+ /* BIMC Masters */
+ MASTER_PORT_KMPSS_M0 = 0,
+ MASTER_PORT_KMPSS_M1,
+ MASTER_PORT_MSS_PROC,
+ MASTER_PORT_GW_MNOC_BIMC_0,
+ MASTER_PORT_GW_MNOC_BIMC_1,
+ MASTER_PORT_GW_SNOC_BIMC_0,
+ MASTER_PORT_GW_SNOC_BIMC_1,
+
+ /* OCMEM NOC Masters */
+ MASTER_PORT_CNOC_ONOC_CFG = 0,
+ MASTER_PORT_JPEG_OCMEM,
+ MASTER_PORT_MDP_OCMEM,
+ MASTER_PORT_VIDEO_P0_OCMEM,
+ MASTER_PORT_VIDEO_P1_OCMEM,
+ MASTER_PORT_VFE_OCMEM,
+
+ /* Peripheral NOC Masters */
+ MASTER_PORT_SDCC_1 = 0,
+ MASTER_PORT_SDCC_3,
+ MASTER_PORT_SDCC_2,
+ MASTER_PORT_SDCC_4,
+ MASTER_PORT_TSIF,
+ MASTER_PORT_BAM_DMA,
+ MASTER_PORT_BLSP_2,
+ MASTER_PORT_USB_HSIC,
+ MASTER_PORT_BLSP_1,
+ MASTER_PORT_USB_HS,
+ MASTER_PORT_PNOC_CFG,
+ MASTER_PORT_GW_SNOC_PNOC,
+
+ /* Config NOC Masters */
+ MASTER_PORT_RPM_INST = 0,
+ MASTER_PORT_RPM_DATA,
+ MASTER_PORT_RPM_SYS,
+ MASTER_PORT_DEHR,
+ MASTER_PORT_QDSS_DAP,
+ MASTER_PORT_SPDM,
+ MASTER_PORT_TIC,
+ MASTER_PORT_GW_SNOC_CNOC,
+};
+
+enum msm_bus_copper_slave_ports_type {
+ /* System NOC Slaves */
+ SLAVE_PORT_KMPSS = 1,
+ SLAVE_PORT_LPASS,
+ SLAVE_PORT_USB3 = 4,
+ SLAVE_PORT_WCSS = 6,
+ SLAVE_PORT_GW_SNOC_BIMC_P0,
+ SLAVE_PORT_GW_SNOC_BIMC_P1,
+ SLAVE_PORT_GW_SNOC_CNOC,
+ SLAVE_PORT_OCIMEM,
+ SLAVE_PORT_SNOC_OCMEM,
+ SLAVE_PORT_GW_SNOC_PNOC,
+ SLAVE_PORT_SERVICE_SNOC,
+ SLAVE_PORT_QDSS_STM,
+
+ /* MMSS NOC Slaves */
+ SLAVE_PORT_CAMERA_CFG = 0,
+ SLAVE_PORT_DISPLAY_CFG,
+ SLAVE_PORT_OCMEM_CFG,
+ SLAVE_PORT_CPR_CFG,
+ SLAVE_PORT_CPR_XPU_CFG,
+ SLAVE_PORT_MISC_CFG = 6,
+ SLAVE_PORT_MISC_XPU_CFG,
+ SLAVE_PORT_VENUS_CFG,
+ SLAVE_PORT_GFX3D_CFG,
+ SLAVE_PORT_MMSS_CLK_CFG = 11,
+ SLAVE_PORT_MMSS_CLK_XPU_CFG,
+ SLAVE_PORT_MNOC_MPU_CFG,
+ SLAVE_PORT_ONOC_MPU_CFG,
+ SLAVE_PORT_GW_MMSS_BIMC_P0 = 16,
+ SLAVE_PORT_GW_MMSS_BIMC_P1,
+ SLAVE_PORT_SERVICE_MNOC,
+
+ /* BIMC Slaves */
+ SLAVE_PORT_EBI1_CH0 = 0,
+ SLAVE_PORT_EBI1_CH1,
+ SLAVE_PORT_KMPSS_L2,
+ SLAVE_PORT_GW_BIMC_SNOC,
+
+ /* OCMEM NOC Slaves */
+ SLAVE_PORT_OCMEM_P0 = 0,
+ SLAVE_PORT_OCMEM_P1,
+ SLAVE_PORT_SERVICE_ONOC,
+
+ /*Peripheral NOC Slaves */
+ SLAVE_PORT_SDCC_1 = 0,
+ SLAVE_PORT_SDCC_3,
+ SLAVE_PORT_SDCC_2,
+ SLAVE_PORT_SDCC_4,
+ SLAVE_PORT_TSIF,
+ SLAVE_PORT_BAM_DMA,
+ SLAVE_PORT_BLSP_2,
+ SLAVE_PORT_USB_HSIC,
+ SLAVE_PORT_BLSP_1,
+ SLAVE_PORT_USB_HS,
+ SLAVE_PORT_PDM,
+ SLAVE_PORT_PERIPH_APU_CFG,
+ SLAVE_PORT_PNOC_MPU_CFG,
+ SLAVE_PORT_PRNG,
+ SLAVE_PORT_GW_PNOC_SNOC,
+ SLAVE_PORT_SERVICE_PNOC,
+
+ /* Config NOC slaves */
+ SLAVE_PORT_CLK_CTL = 1,
+ SLAVE_PORT_CNOC_MSS,
+ SLAVE_PORT_SECURITY,
+ SLAVE_PORT_TCSR,
+ SLAVE_PORT_TLMM,
+ SLAVE_PORT_CRYPTO_0_CFG,
+ SLAVE_PORT_CRYPTO_1_CFG,
+ SLAVE_PORT_IMEM_CFG,
+ SLAVE_PORT_MESSAGE_RAM,
+ SLAVE_PORT_BIMC_CFG,
+ SLAVE_PORT_BOOT_ROM,
+ SLAVE_PORT_CNOC_MNOC_MMSS_CFG,
+ SLAVE_PORT_PMIC_ARB,
+ SLAVE_PORT_SPDM_WRAPPER,
+ SLAVE_PORT_DEHR_CFG,
+ SLAVE_PORT_MPM,
+ SLAVE_PORT_QDSS_CFG,
+ SLAVE_PORT_RBCPR_CFG,
+ SLAVE_PORT_RBCPR_QDSS_APU_CFG,
+ SLAVE_PORT_CNOC_MNOC_CFG,
+ SLAVE_PORT_SNOC_MPU_CFG,
+ SLAVE_PORT_CNOC_ONOC_CFG,
+ SLAVE_PORT_PNOC_CFG,
+ SLAVE_PORT_SNOC_CFG,
+ SLAVE_PORT_EBI1_DLL_CFG,
+ SLAVE_PORT_PHY_APU_CFG,
+ SLAVE_PORT_EBI1_PHY_CFG,
+ SLAVE_PORT_RPM,
+ SLAVE_PORT_GW_CNOC_SNOC,
+ SLAVE_PORT_SERVICE_CNOC,
+};
+
+/* Hardware IDs for RPM */
+enum msm_bus_copper_mas_hw_id {
+ MAS_APPSS_PROC = 0,
+ MAS_AMSS_PROC,
+ MAS_MNOC_BIMC,
+ MAS_SNOC_BIMC,
+ MAS_CNOC_MNOC_MMSS_CFG,
+ MAS_CNOC_MNOC_CFG,
+ MAS_GFX3D,
+ MAS_JPEG,
+ MAS_MDP,
+ MAS_VIDEO_P0,
+ MAS_VIDEO_P1,
+ MAS_VFE,
+ MAS_CNOC_ONOC_CFG,
+ MAS_JPEG_OCMEM,
+ MAS_MDP_OCMEM,
+ MAS_VIDEO_P0_OCMEM,
+ MAS_VIDEO_P1_OCMEM,
+ MAS_VFE_OCMEM,
+ MAS_LPASS_AHB,
+ MAS_QDSS_BAM,
+ MAS_SNOC_CFG,
+ MAS_BIMC_SNOC,
+ MAS_CNOC_SNOC,
+ MAS_CRYPTO_CORE0,
+ MAS_CRYPTO_CORE1,
+ MAS_LPASS_PROC,
+ MAS_MSS,
+ MAS_MSS_NAV,
+ MAS_OCMEM_DMA,
+ MAS_PNOC_SNOC,
+ MAS_WCSS,
+ MAS_QDSS_ETR,
+ MAS_USB3,
+ MAS_SDCC_1,
+ MAS_SDCC_3,
+ MAS_SDCC_2,
+ MAS_SDCC_4,
+ MAS_TSIF,
+ MAS_BAM_DMA,
+ MAS_BLSP_2,
+ MAS_USB_HSIC,
+ MAS_BLSP_1,
+ MAS_USB_HS,
+ MAS_PNOC_CFG,
+ MAS_SNOC_PNOC,
+ MAS_RPM_INST,
+ MAS_RPM_DATA,
+ MAS_RPM_SYS,
+ MAS_DEHR,
+ MAS_QDSS_DAP,
+ MAS_SPDM,
+ MAS_TIC,
+ MAS_SNOC_CNOC,
+ MAS_OVNOC_SNOC,
+ MAS_OVNOC_ONOC,
+ MAS_V_OCMEM_GFX3D,
+ MAS_ONOC_OVNOC,
+ MAS_SNOC_OVNOC,
+};
+
+enum msm_bus_copper_slv_hw_id {
+ SLV_EBI = 0,
+ SLV_APSS_L2,
+ SLV_BIMC_SNOC,
+ SLV_CAMERA_CFG,
+ SLV_DISPLAY_CFG,
+ SLV_OCMEM_CFG,
+ SLV_CPR_CFG,
+ SLV_CPR_XPU_CFG,
+ SLV_MISC_CFG,
+ SLV_MISC_XPU_CFG,
+ SLV_VENUS_CFG,
+ SLV_GFX3D_CFG,
+ SLV_MMSS_CLK_CFG,
+ SLV_MMSS_CLK_XPU_CFG,
+ SLV_MNOC_MPU_CFG,
+ SLV_ONOC_MPU_CFG,
+ SLV_MMSS_BIMC,
+ SLV_SERVICE_MNOC,
+ SLV_OCMEM,
+ SLV_SERVICE_ONOC,
+ SLV_APPSS,
+ SLV_LPASS,
+ SLV_USB3,
+ SLV_WCSS,
+ SLV_SNOC_BIMC,
+ SLV_SNOC_CNOC,
+ SLV_OCIMEM,
+ SLV_SNOC_OCMEM,
+ SLV_SNOC_PNOC,
+ SLV_SERVICE_SNOC,
+ SLV_QDSS_STM,
+ SLV_SDCC_1,
+ SLV_SDCC_3,
+ SLV_SDCC_2,
+ SLV_SDCC_4,
+ SLV_TSIF,
+ SLV_BAM_DMA,
+ SLV_BLSP_2,
+ SLV_USB_HSIC,
+ SLV_BLSP_1,
+ SLV_USB_HS,
+ SLV_PDM,
+ SLV_PERIPH_APU_CFG,
+ SLV_MPU_CFG,
+ SLV_PRNG,
+ SLV_PNOC_SNOC,
+ SLV_SERVICE_PNOC,
+ SLV_CLK_CTL,
+ SLV_CNOC_MSS,
+ SLV_SECURITY,
+ SLV_TCSR,
+ SLV_TLMM,
+ SLV_CRYPTO_0_CFG,
+ SLV_CRYPTO_1_CFG,
+ SLV_IMEM_CFG,
+ SLV_MESSAGE_RAM,
+ SLV_BIMC_CFG,
+ SLV_BOOT_ROM,
+ SLV_CNOC_MNOC_MMSS_CFG,
+ SLV_PMIC_ARB,
+ SLV_SPDM_WRAPPER,
+ SLV_DEHR_CFG,
+ SLV_MPM,
+ SLV_QDSS_CFG,
+ SLV_RBCPR_CFG,
+ SLV_RBCPR_QDSS_APU_CFG,
+ SLV_CNOC_MNOC_CFG,
+ SLV_SNOC_MPU_CFG,
+ SLV_CNOC_ONOC_CFG,
+ SLV_PNOC_CFG,
+ SLV_SNOC_CFG,
+ SLV_EBI1_DLL_CFG,
+ SLV_PHY_APU_CFG,
+ SLV_EBI1_PHY_CFG,
+ SLV_RPM,
+ SLV_CNOC_SNOC,
+ SLV_SERVICE_CNOC,
+ SLV_SNOC_OVNOC,
+ SLV_ONOC_OVNOC,
+ SLV_OVNOC_ONOC,
+ SLV_OVNOC_SNOC,
+};
+
+static uint32_t master_iids[NMASTERS];
+static uint32_t slave_iids[NSLAVES];
+
+/* System NOC nodes */
+static int mport_lpass_ahb[] = {MASTER_PORT_LPASS_AHB,};
+static int mport_qdss_bam[] = {MASTER_PORT_QDSS_BAM,};
+static int mport_snoc_cfg[] = {MASTER_PORT_SNOC_CFG,};
+static int mport_gw_bimc_snoc[] = {MASTER_PORT_GW_BIMC_SNOC,};
+static int mport_gw_cnoc_snoc[] = {MASTER_PORT_GW_CNOC_SNOC,};
+static int mport_crypto_core0[] = {MASTER_PORT_CRYPTO_CORE0,};
+static int mport_crypto_core1[] = {MASTER_PORT_CRYPTO_CORE1};
+static int mport_lpass_proc[] = {MASTER_PORT_LPASS_PROC};
+static int mport_mss[] = {MASTER_PORT_MSS};
+static int mport_mss_nav[] = {MASTER_PORT_MSS_NAV};
+static int mport_ocmem_dma[] = {MASTER_PORT_OCMEM_DMA};
+static int mport_gw_pnoc_snoc[] = {MASTER_PORT_GW_PNOC_SNOC};
+static int mport_wcss[] = {MASTER_PORT_WCSS};
+static int mport_qdss_etr[] = {MASTER_PORT_QDSS_ETR};
+static int mport_usb3[] = {MASTER_PORT_USB3};
+
+static int sport_kmpss[] = {SLAVE_PORT_KMPSS};
+static int sport_lpass[] = {SLAVE_PORT_LPASS};
+static int sport_usb3[] = {SLAVE_PORT_USB3};
+static int sport_wcss[] = {SLAVE_PORT_WCSS};
+static int sport_gw_snoc_bimc[] = {
+ SLAVE_PORT_GW_SNOC_BIMC_P0,
+ SLAVE_PORT_GW_SNOC_BIMC_P1,
+ };
+static int sport_gw_snoc_cnoc[] = {SLAVE_PORT_GW_SNOC_CNOC};
+static int sport_ocimem[] = {SLAVE_PORT_OCIMEM};
+static int sport_snoc_ocmem[] = {SLAVE_PORT_SNOC_OCMEM};
+static int sport_gw_snoc_pnoc[] = {SLAVE_PORT_GW_SNOC_PNOC};
+static int sport_service_snoc[] = {SLAVE_PORT_SERVICE_SNOC};
+static int sport_qdss_stm[] = {SLAVE_PORT_QDSS_STM};
+
+
+/* MMSS NOC nodes */
+static int mport_gw_cnoc_mnoc_cfg[] = {
+ MASTER_PORT_GW_CNOC_MNOC_MMSS_CFG,
+ MASTER_PORT_GW_CNOC_MNOC_CFG,
+};
+static int mport_gfx3d[] = {
+ MASTER_PORT_GFX3D_PORT0,
+ MASTER_PORT_GFX3D_PORT1,
+};
+static int mport_jpeg[] = {MASTER_PORT_JPEG};
+static int mport_mdp[] = {MASTER_PORT_MDP};
+static int mport_video_port0[] = {MASTER_PORT_VIDEO_PORT0};
+static int mport_video_port1[] = {MASTER_PORT_VIDEO_PORT1};
+static int mport_vfe[] = {MASTER_PORT_VFE};
+
+static int sport_camera_cfg[] = {SLAVE_PORT_CAMERA_CFG};
+static int sport_display_cfg[] = {SLAVE_PORT_DISPLAY_CFG};
+static int sport_ocmem_cfg[] = {SLAVE_PORT_OCMEM_CFG};
+static int sport_cpr_cfg[] = {SLAVE_PORT_CPR_CFG};
+static int sport_cpr_xpu_cfg[] = {SLAVE_PORT_CPR_XPU_CFG,};
+static int sport_misc_cfg[] = {SLAVE_PORT_MISC_CFG};
+static int sport_misc_xpu_cfg[] = {SLAVE_PORT_MISC_XPU_CFG};
+static int sport_venus_cfg[] = {SLAVE_PORT_VENUS_CFG};
+static int sport_gfx3d_cfg[] = {SLAVE_PORT_GFX3D_CFG};
+static int sport_mmss_clk_cfg[] = {SLAVE_PORT_MMSS_CLK_CFG};
+static int sport_mmss_clk_xpu_cfg[] = {
+ SLAVE_PORT_MMSS_CLK_XPU_CFG
+};
+static int sport_mnoc_mpu_cfg[] = {SLAVE_PORT_MNOC_MPU_CFG};
+static int sport_onoc_mpu_cfg[] = {SLAVE_PORT_ONOC_MPU_CFG};
+static int sport_gw_mmss_bimc[] = {
+ SLAVE_PORT_GW_MMSS_BIMC_P0,
+ SLAVE_PORT_GW_MMSS_BIMC_P1,
+};
+static int sport_service_mnoc[] = {SLAVE_PORT_SERVICE_MNOC};
+
+/* BIMC Nodes */
+
+static int mport_kmpss_m0[] = {MASTER_PORT_KMPSS_M0,};
+static int mport_kmpss_m1[] = {MASTER_PORT_KMPSS_M1};
+static int mport_mss_proc[] = {MASTER_PORT_MSS_PROC};
+static int mport_gw_mnoc_bimc[] = {
+ MASTER_PORT_GW_MNOC_BIMC_0,
+ MASTER_PORT_GW_MNOC_BIMC_1,
+};
+static int mport_gw_snoc_bimc[] = {
+ MASTER_PORT_GW_SNOC_BIMC_0,
+ MASTER_PORT_GW_SNOC_BIMC_1,
+};
+
+static int sport_ebi1[] = {
+ SLAVE_PORT_EBI1_CH0,
+ SLAVE_PORT_EBI1_CH1,
+};
+static int sport_kmpss_l2[] = {SLAVE_PORT_KMPSS_L2,};
+static int sport_gw_bimc_snoc[] = {SLAVE_PORT_GW_BIMC_SNOC,};
+
+/* OCMEM NOC Nodes */
+static int mport_cnoc_onoc_cfg[] = {
+ MASTER_PORT_CNOC_ONOC_CFG,
+};
+static int mport_jpeg_ocmem[] = {MASTER_PORT_JPEG_OCMEM,};
+static int mport_mdp_ocmem[] = {MASTER_PORT_MDP_OCMEM,};
+static int mport_video_p0_ocmem[] = {
+ MASTER_PORT_VIDEO_P0_OCMEM,
+};
+static int mport_video_p1_ocmem[] = {
+ MASTER_PORT_VIDEO_P1_OCMEM,
+};
+static int mport_vfe_ocmem[] = {MASTER_PORT_VFE_OCMEM,};
+static int sport_ocmem[] = {
+ SLAVE_PORT_OCMEM_P0,
+ SLAVE_PORT_OCMEM_P1,
+};
+
+static int sport_service_onoc[] = {SLAVE_PORT_SERVICE_ONOC,};
+
+/* Peripheral NOC Nodes */
+static int mport_sdcc_1[] = {MASTER_PORT_SDCC_1,};
+static int mport_sdcc_3[] = {MASTER_PORT_SDCC_3,};
+static int mport_sdcc_2[] = {MASTER_PORT_SDCC_2,};
+static int mport_sdcc_4[] = {MASTER_PORT_SDCC_4,};
+static int mport_tsif[] = {MASTER_PORT_TSIF,};
+static int mport_bam_dma[] = {MASTER_PORT_BAM_DMA,};
+static int mport_blsp_2[] = {MASTER_PORT_BLSP_2,};
+static int mport_usb_hsic[] = {MASTER_PORT_USB_HSIC,};
+static int mport_blsp_1[] = {MASTER_PORT_BLSP_1,};
+static int mport_usb_hs[] = {MASTER_PORT_USB_HS,};
+static int mport_pnoc_cfg[] = {MASTER_PORT_PNOC_CFG,};
+static int mport_gw_snoc_pnoc[] = {MASTER_PORT_GW_SNOC_PNOC,};
+
+static int sport_sdcc_1[] = {SLAVE_PORT_SDCC_1,};
+static int sport_sdcc_3[] = {SLAVE_PORT_SDCC_3,};
+static int sport_sdcc_2[] = {SLAVE_PORT_SDCC_2,};
+static int sport_sdcc_4[] = {SLAVE_PORT_SDCC_4,};
+static int sport_tsif[] = {SLAVE_PORT_TSIF,};
+static int sport_bam_dma[] = {SLAVE_PORT_BAM_DMA,};
+static int sport_blsp_2[] = {SLAVE_PORT_BLSP_2,};
+static int sport_usb_hsic[] = {SLAVE_PORT_USB_HSIC,};
+static int sport_blsp_1[] = {SLAVE_PORT_BLSP_1,};
+static int sport_usb_hs[] = {SLAVE_PORT_USB_HS,};
+static int sport_pdm[] = {SLAVE_PORT_PDM,};
+static int sport_periph_apu_cfg[] = {
+ SLAVE_PORT_PERIPH_APU_CFG,
+};
+static int sport_pnoc_mpu_cfg[] = {SLAVE_PORT_PNOC_MPU_CFG,};
+static int sport_prng[] = {SLAVE_PORT_PRNG,};
+static int sport_gw_pnoc_snoc[] = {SLAVE_PORT_GW_PNOC_SNOC,};
+static int sport_service_pnoc[] = {SLAVE_PORT_SERVICE_PNOC,};
+
+/* Config NOC Nodes */
+static int mport_rpm_inst[] = {MASTER_PORT_RPM_INST,};
+static int mport_rpm_data[] = {MASTER_PORT_RPM_DATA,};
+static int mport_rpm_sys[] = {MASTER_PORT_RPM_SYS,};
+static int mport_dehr[] = {MASTER_PORT_DEHR,};
+static int mport_qdss_dap[] = {MASTER_PORT_QDSS_DAP,};
+static int mport_spdm[] = {MASTER_PORT_SPDM,};
+static int mport_tic[] = {MASTER_PORT_TIC,};
+static int mport_gw_snoc_cnoc[] = {MASTER_PORT_GW_SNOC_CNOC,};
+
+static int sport_clk_ctl[] = {SLAVE_PORT_CLK_CTL,};
+static int sport_cnoc_mss[] = {SLAVE_PORT_CNOC_MSS,};
+static int sport_security[] = {SLAVE_PORT_SECURITY,};
+static int sport_tcsr[] = {SLAVE_PORT_TCSR,};
+static int sport_tlmm[] = {SLAVE_PORT_TLMM,};
+static int sport_crypto_0_cfg[] = {SLAVE_PORT_CRYPTO_0_CFG,};
+static int sport_crypto_1_cfg[] = {SLAVE_PORT_CRYPTO_1_CFG,};
+static int sport_imem_cfg[] = {SLAVE_PORT_IMEM_CFG,};
+static int sport_message_ram[] = {SLAVE_PORT_MESSAGE_RAM,};
+static int sport_bimc_cfg[] = {SLAVE_PORT_BIMC_CFG,};
+static int sport_boot_rom[] = {SLAVE_PORT_BOOT_ROM,};
+static int sport_cnoc_mnoc_mmss_cfg[] = {SLAVE_PORT_CNOC_MNOC_MMSS_CFG,};
+static int sport_cnoc_mnoc_cfg[] = {SLAVE_PORT_CNOC_MNOC_CFG,};
+static int sport_pmic_arb[] = {SLAVE_PORT_PMIC_ARB,};
+static int sport_spdm_wrapper[] = {SLAVE_PORT_SPDM_WRAPPER,};
+static int sport_dehr_cfg[] = {SLAVE_PORT_DEHR_CFG,};
+static int sport_mpm[] = {SLAVE_PORT_MPM,};
+static int sport_qdss_cfg[] = {SLAVE_PORT_QDSS_CFG,};
+static int sport_rbcpr_cfg[] = {SLAVE_PORT_RBCPR_CFG,};
+static int sport_rbcpr_qdss_apu_cfg[] = {SLAVE_PORT_RBCPR_QDSS_APU_CFG,};
+static int sport_snoc_mpu_cfg[] = {SLAVE_PORT_SNOC_MPU_CFG,};
+static int sport_cnoc_onoc_cfg[] = {SLAVE_PORT_CNOC_ONOC_CFG,};
+static int sport_pnoc_cfg[] = {SLAVE_PORT_PNOC_CFG,};
+static int sport_snoc_cfg[] = {SLAVE_PORT_SNOC_CFG,};
+static int sport_ebi1_dll_cfg[] = {SLAVE_PORT_EBI1_DLL_CFG,};
+static int sport_phy_apu_cfg[] = {SLAVE_PORT_PHY_APU_CFG,};
+static int sport_ebi1_phy_cfg[] = {SLAVE_PORT_EBI1_PHY_CFG,};
+static int sport_rpm[] = {SLAVE_PORT_RPM,};
+static int sport_gw_cnoc_snoc[] = {SLAVE_PORT_GW_CNOC_SNOC,};
+static int sport_service_cnoc[] = {SLAVE_PORT_SERVICE_CNOC,};
+
+static int tier2[] = {MSM_BUS_BW_TIER2,};
+
+/*
+ * QOS Ports defined only when qos ports are different than
+ * master ports
+ **/
+static int qports_gemini[] = {0};
+static int qports_mdp[] = {1};
+static int qports_venus_p0[] = {4};
+static int qports_venus_p1[] = {5};
+static int qports_vfe[] = {6};
+static int qports_gemini_ocmem[] = {0};
+static int qports_mdp_ocmem[] = {1};
+static int qports_venus_p0_ocmem[] = {2};
+static int qports_venus_p1_ocmem[] = {3};
+static int qports_vfe_ocmem[] = {4};
+static int qports_crypto_c0[] = {2};
+static int qports_crypto_c1[] = {3};
+static int qports_lpass_proc[] = {4};
+static int qports_ocmem_dma[] = {7};
+static int qports_gw_snoc_bimc[] = {5, 6};
+static int qports_kmpss[] = {0, 1};
+static int qports_lpass_ahb[] = {0};
+static int qports_qdss_bam[] = {1};
+static int qports_gw_pnoc_snoc[] = {8};
+static int qports_qdss_etr[] = {10};
+static int qports_usb3[] = {11};
+static int qports_oxili[] = {2, 3};
+static int qports_gw_mnoc_bimc[] = {3, 4};
+
+static struct msm_bus_node_info sys_noc_info[] = {
+ {
+ .id = MSM_BUS_MASTER_LPASS_AHB,
+ .masterp = mport_lpass_ahb,
+ .num_mports = ARRAY_SIZE(mport_lpass_ahb),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .qport = qports_lpass_ahb,
+ .mas_hw_id = MAS_LPASS_AHB,
+ .mode = NOC_QOS_MODE_FIXED,
+ .prio_rd = 2,
+ .prio_wr = 2,
+ },
+ {
+ .id = MSM_BUS_MASTER_QDSS_BAM,
+ .masterp = mport_qdss_bam,
+ .num_mports = ARRAY_SIZE(mport_qdss_bam),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .mode = NOC_QOS_MODE_FIXED,
+ .qport = qports_qdss_bam,
+ .mas_hw_id = MAS_QDSS_BAM,
+ },
+ {
+ .id = MSM_BUS_MASTER_SNOC_CFG,
+ .masterp = mport_snoc_cfg,
+ .num_mports = ARRAY_SIZE(mport_snoc_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .mas_hw_id = MAS_SNOC_CFG,
+ },
+ {
+ .id = MSM_BUS_FAB_BIMC,
+ .gateway = 1,
+ .slavep = sport_gw_snoc_bimc,
+ .num_sports = ARRAY_SIZE(sport_gw_snoc_bimc),
+ .masterp = mport_gw_bimc_snoc,
+ .num_mports = ARRAY_SIZE(mport_gw_bimc_snoc),
+ .buswidth = 8,
+ .mas_hw_id = MAS_BIMC_SNOC,
+ .slv_hw_id = SLV_SNOC_BIMC,
+ },
+ {
+ .id = MSM_BUS_FAB_CONFIG_NOC,
+ .gateway = 1,
+ .slavep = sport_gw_snoc_cnoc,
+ .num_sports = ARRAY_SIZE(sport_gw_snoc_cnoc),
+ .masterp = mport_gw_cnoc_snoc,
+ .num_mports = ARRAY_SIZE(mport_gw_cnoc_snoc),
+ .buswidth = 8,
+ .mas_hw_id = MAS_CNOC_SNOC,
+ .slv_hw_id = SLV_SNOC_CNOC,
+ },
+ {
+ .id = MSM_BUS_FAB_PERIPH_NOC,
+ .gateway = 1,
+ .slavep = sport_gw_snoc_pnoc,
+ .num_sports = ARRAY_SIZE(sport_gw_snoc_pnoc),
+ .masterp = mport_gw_pnoc_snoc,
+ .num_mports = ARRAY_SIZE(mport_gw_pnoc_snoc),
+ .buswidth = 8,
+ .qport = qports_gw_pnoc_snoc,
+ .mas_hw_id = MAS_PNOC_SNOC,
+ .slv_hw_id = SLV_SNOC_PNOC,
+ .mode = NOC_QOS_MODE_FIXED,
+ .prio_rd = 2,
+ .prio_wr = 2,
+ },
+ {
+ .id = MSM_BUS_FAB_OCMEM_VNOC,
+ .gateway = 1,
+ .buswidth = 8,
+ .mas_hw_id = MAS_OVNOC_SNOC,
+ .slv_hw_id = SLV_SNOC_OVNOC,
+ },
+ {
+ .id = MSM_BUS_MASTER_CRYPTO_CORE0,
+ .masterp = mport_crypto_core0,
+ .num_mports = ARRAY_SIZE(mport_crypto_core0),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .mode = NOC_QOS_MODE_FIXED,
+ .qport = qports_crypto_c0,
+ .mas_hw_id = MAS_CRYPTO_CORE0,
+ },
+ {
+ .id = MSM_BUS_MASTER_CRYPTO_CORE1,
+ .masterp = mport_crypto_core1,
+ .num_mports = ARRAY_SIZE(mport_crypto_core1),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .mode = NOC_QOS_MODE_FIXED,
+ .qport = qports_crypto_c1,
+ .mas_hw_id = MAS_CRYPTO_CORE1,
+ },
+ {
+ .id = MSM_BUS_MASTER_LPASS_PROC,
+ .masterp = mport_lpass_proc,
+ .num_mports = ARRAY_SIZE(mport_lpass_proc),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .qport = qports_lpass_proc,
+ .mas_hw_id = MAS_LPASS_PROC,
+ .mode = NOC_QOS_MODE_FIXED,
+ .prio_rd = 2,
+ .prio_wr = 2,
+ },
+ {
+ .id = MSM_BUS_MASTER_MSS,
+ .masterp = mport_mss,
+ .num_mports = ARRAY_SIZE(mport_mss),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .mas_hw_id = MAS_MSS,
+ },
+ {
+ .id = MSM_BUS_MASTER_MSS_NAV,
+ .masterp = mport_mss_nav,
+ .num_mports = ARRAY_SIZE(mport_mss_nav),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .mas_hw_id = MAS_MSS_NAV,
+ },
+ {
+ .id = MSM_BUS_MASTER_OCMEM_DMA,
+ .masterp = mport_ocmem_dma,
+ .num_mports = ARRAY_SIZE(mport_ocmem_dma),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .mode = NOC_QOS_MODE_FIXED,
+ .qport = qports_ocmem_dma,
+ .mas_hw_id = MAS_OCMEM_DMA,
+ },
+ {
+ .id = MSM_BUS_MASTER_WCSS,
+ .masterp = mport_wcss,
+ .num_mports = ARRAY_SIZE(mport_wcss),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .mas_hw_id = MAS_WCSS,
+ },
+ {
+ .id = MSM_BUS_MASTER_QDSS_ETR,
+ .masterp = mport_qdss_etr,
+ .num_mports = ARRAY_SIZE(mport_qdss_etr),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .qport = qports_qdss_etr,
+ .mode = NOC_QOS_MODE_FIXED,
+ .mas_hw_id = MAS_QDSS_ETR,
+ },
+ {
+ .id = MSM_BUS_MASTER_USB3,
+ .masterp = mport_usb3,
+ .num_mports = ARRAY_SIZE(mport_usb3),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .mode = NOC_QOS_MODE_FIXED,
+ .qport = qports_usb3,
+ .mas_hw_id = MAS_USB3,
+ .prio_rd = 2,
+ .prio_wr = 2,
+ },
+ {
+ .id = MSM_BUS_SLAVE_AMPSS,
+ .slavep = sport_kmpss,
+ .num_sports = ARRAY_SIZE(sport_kmpss),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_APPSS,
+ },
+ {
+ .id = MSM_BUS_SLAVE_LPASS,
+ .slavep = sport_lpass,
+ .num_sports = ARRAY_SIZE(sport_lpass),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_LPASS,
+ },
+ {
+ .id = MSM_BUS_SLAVE_USB3,
+ .slavep = sport_usb3,
+ .num_sports = ARRAY_SIZE(sport_usb3),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_USB3,
+ },
+ {
+ .id = MSM_BUS_SLAVE_WCSS,
+ .slavep = sport_wcss,
+ .num_sports = ARRAY_SIZE(sport_wcss),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_WCSS,
+ },
+ {
+ .id = MSM_BUS_SLAVE_OCIMEM,
+ .slavep = sport_ocimem,
+ .num_sports = ARRAY_SIZE(sport_ocimem),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_OCIMEM,
+ },
+ {
+ .id = MSM_BUS_SLAVE_SNOC_OCMEM,
+ .slavep = sport_snoc_ocmem,
+ .num_sports = ARRAY_SIZE(sport_snoc_ocmem),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_SNOC_OCMEM,
+ },
+ {
+ .id = MSM_BUS_SLAVE_SERVICE_SNOC,
+ .slavep = sport_service_snoc,
+ .num_sports = ARRAY_SIZE(sport_service_snoc),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_SERVICE_SNOC,
+ },
+ {
+ .id = MSM_BUS_SLAVE_QDSS_STM,
+ .slavep = sport_qdss_stm,
+ .num_sports = ARRAY_SIZE(sport_qdss_stm),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_QDSS_STM,
+ },
+};
+
+
+static struct msm_bus_node_info mmss_noc_info[] = {
+ {
+ .id = MSM_BUS_MASTER_GRAPHICS_3D,
+ .masterp = mport_gfx3d,
+ .num_mports = ARRAY_SIZE(mport_gfx3d),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .hw_sel = MSM_BUS_NOC,
+ .perm_mode = NOC_QOS_MODES_ALL_PERM,
+ .mode = NOC_QOS_MODE_FIXED,
+ .ws = 10000,
+ .qport = qports_oxili,
+ .mas_hw_id = MAS_GFX3D,
+ },
+ {
+ .id = MSM_BUS_MASTER_JPEG,
+ .masterp = mport_jpeg,
+ .num_mports = ARRAY_SIZE(mport_jpeg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .hw_sel = MSM_BUS_NOC,
+ .perm_mode = NOC_QOS_MODES_ALL_PERM,
+ .mode = NOC_QOS_MODE_BYPASS,
+ .qport = qports_gemini,
+ .ws = 10000,
+ .mas_hw_id = MAS_JPEG,
+ },
+ {
+ .id = MSM_BUS_MASTER_MDP_PORT0,
+ .masterp = mport_mdp,
+ .num_mports = ARRAY_SIZE(mport_mdp),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .hw_sel = MSM_BUS_NOC,
+ .perm_mode = NOC_QOS_MODES_ALL_PERM,
+ .mode = NOC_QOS_MODE_BYPASS,
+ .qport = qports_mdp,
+ .ws = 10000,
+ .mas_hw_id = MAS_MDP,
+ },
+ {
+ .id = MSM_BUS_MASTER_VIDEO_P0,
+ .masterp = mport_video_port0,
+ .num_mports = ARRAY_SIZE(mport_video_port0),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .hw_sel = MSM_BUS_NOC,
+ .perm_mode = NOC_QOS_MODES_ALL_PERM,
+ .mode = NOC_QOS_MODE_BYPASS,
+ .ws = 10000,
+ .qport = qports_venus_p0,
+ .mas_hw_id = MAS_VIDEO_P0,
+ },
+ {
+ .id = MSM_BUS_MASTER_VIDEO_P1,
+ .masterp = mport_video_port1,
+ .num_mports = ARRAY_SIZE(mport_video_port1),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .hw_sel = MSM_BUS_NOC,
+ .perm_mode = NOC_QOS_MODES_ALL_PERM,
+ .mode = NOC_QOS_MODE_BYPASS,
+ .ws = 10000,
+ .qport = qports_venus_p1,
+ .mas_hw_id = MAS_VIDEO_P1,
+ },
+ {
+ .id = MSM_BUS_MASTER_VFE,
+ .masterp = mport_vfe,
+ .num_mports = ARRAY_SIZE(mport_vfe),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .hw_sel = MSM_BUS_NOC,
+ .perm_mode = NOC_QOS_MODES_ALL_PERM,
+ .mode = NOC_QOS_MODE_BYPASS,
+ .ws = 10000,
+ .qport = qports_vfe,
+ .mas_hw_id = MAS_VFE,
+ },
+ {
+ .id = MSM_BUS_FAB_CONFIG_NOC,
+ .gateway = 1,
+ .masterp = mport_gw_cnoc_mnoc_cfg,
+ .num_mports = ARRAY_SIZE(mport_gw_cnoc_mnoc_cfg),
+ .buswidth = 16,
+ .hw_sel = MSM_BUS_RPM,
+ .mas_hw_id = MAS_CNOC_MNOC_MMSS_CFG,
+ },
+ {
+ .id = MSM_BUS_FAB_BIMC,
+ .gateway = 1,
+ .slavep = sport_gw_mmss_bimc,
+ .num_sports = ARRAY_SIZE(sport_gw_mmss_bimc),
+ .buswidth = 16,
+ .hw_sel = MSM_BUS_NOC,
+ .slv_hw_id = SLV_MMSS_BIMC,
+ },
+ {
+ .id = MSM_BUS_SLAVE_CAMERA_CFG,
+ .slavep = sport_camera_cfg,
+ .num_sports = ARRAY_SIZE(sport_camera_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 16,
+ .hw_sel = MSM_BUS_NOC,
+ .slv_hw_id = SLV_CAMERA_CFG,
+ },
+ {
+ .id = MSM_BUS_SLAVE_DISPLAY_CFG,
+ .slavep = sport_display_cfg,
+ .num_sports = ARRAY_SIZE(sport_display_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 16,
+ .hw_sel = MSM_BUS_NOC,
+ .slv_hw_id = SLV_DISPLAY_CFG,
+ },
+ {
+ .id = MSM_BUS_SLAVE_OCMEM_CFG,
+ .slavep = sport_ocmem_cfg,
+ .num_sports = ARRAY_SIZE(sport_ocmem_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 16,
+ .hw_sel = MSM_BUS_NOC,
+ .slv_hw_id = SLV_OCMEM_CFG,
+ },
+ {
+ .id = MSM_BUS_SLAVE_CPR_CFG,
+ .slavep = sport_cpr_cfg,
+ .num_sports = ARRAY_SIZE(sport_cpr_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 16,
+ .hw_sel = MSM_BUS_NOC,
+ .slv_hw_id = SLV_CPR_CFG,
+ },
+ {
+ .id = MSM_BUS_SLAVE_CPR_XPU_CFG,
+ .slavep = sport_cpr_xpu_cfg,
+ .num_sports = ARRAY_SIZE(sport_cpr_xpu_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 16,
+ .hw_sel = MSM_BUS_NOC,
+ .slv_hw_id = SLV_CPR_XPU_CFG,
+ },
+ {
+ .id = MSM_BUS_SLAVE_MISC_CFG,
+ .slavep = sport_misc_cfg,
+ .num_sports = ARRAY_SIZE(sport_misc_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 16,
+ .hw_sel = MSM_BUS_NOC,
+ .slv_hw_id = SLV_MISC_CFG,
+ },
+ {
+ .id = MSM_BUS_SLAVE_MISC_XPU_CFG,
+ .slavep = sport_misc_xpu_cfg,
+ .num_sports = ARRAY_SIZE(sport_misc_xpu_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 16,
+ .hw_sel = MSM_BUS_NOC,
+ .slv_hw_id = SLV_MISC_XPU_CFG,
+ },
+ {
+ .id = MSM_BUS_SLAVE_VENUS_CFG,
+ .slavep = sport_venus_cfg,
+ .num_sports = ARRAY_SIZE(sport_venus_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 16,
+ .hw_sel = MSM_BUS_NOC,
+ .slv_hw_id = SLV_VENUS_CFG,
+ },
+ {
+ .id = MSM_BUS_SLAVE_GRAPHICS_3D_CFG,
+ .slavep = sport_gfx3d_cfg,
+ .num_sports = ARRAY_SIZE(sport_gfx3d_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 16,
+ .hw_sel = MSM_BUS_NOC,
+ .slv_hw_id = SLV_GFX3D_CFG,
+ },
+ {
+ .id = MSM_BUS_SLAVE_MMSS_CLK_CFG,
+ .slavep = sport_mmss_clk_cfg,
+ .num_sports = ARRAY_SIZE(sport_mmss_clk_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 16,
+ .hw_sel = MSM_BUS_NOC,
+ .slv_hw_id = SLV_MMSS_CLK_CFG,
+ },
+ {
+ .id = MSM_BUS_SLAVE_MMSS_CLK_XPU_CFG,
+ .slavep = sport_mmss_clk_xpu_cfg,
+ .num_sports = ARRAY_SIZE(sport_mmss_clk_xpu_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 16,
+ .hw_sel = MSM_BUS_NOC,
+ .slv_hw_id = SLV_MMSS_CLK_XPU_CFG,
+ },
+ {
+ .id = MSM_BUS_SLAVE_MNOC_MPU_CFG,
+ .slavep = sport_mnoc_mpu_cfg,
+ .num_sports = ARRAY_SIZE(sport_mnoc_mpu_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 16,
+ .hw_sel = MSM_BUS_NOC,
+ .slv_hw_id = SLV_MNOC_MPU_CFG,
+ },
+ {
+ .id = MSM_BUS_SLAVE_ONOC_MPU_CFG,
+ .slavep = sport_onoc_mpu_cfg,
+ .num_sports = ARRAY_SIZE(sport_onoc_mpu_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 16,
+ .hw_sel = MSM_BUS_NOC,
+ .slv_hw_id = SLV_ONOC_MPU_CFG,
+ },
+ {
+ .id = MSM_BUS_SLAVE_SERVICE_MNOC,
+ .slavep = sport_service_mnoc,
+ .num_sports = ARRAY_SIZE(sport_service_mnoc),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 16,
+ .hw_sel = MSM_BUS_NOC,
+ .slv_hw_id = SLV_SERVICE_MNOC,
+ },
+};
+
+static struct msm_bus_node_info bimc_info[] = {
+ {
+ .id = MSM_BUS_MASTER_AMPSS_M0,
+ .masterp = mport_kmpss_m0,
+ .num_mports = ARRAY_SIZE(mport_kmpss_m0),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .hw_sel = MSM_BUS_BIMC,
+ .mode = NOC_QOS_MODE_FIXED,
+ .qport = qports_kmpss,
+ .ws = 10000,
+ .mas_hw_id = MAS_APPSS_PROC,
+ .prio_lvl = 0,
+ .prio_rd = 2,
+ .prio_wr = 2,
+ },
+ {
+ .id = MSM_BUS_MASTER_AMPSS_M1,
+ .masterp = mport_kmpss_m1,
+ .num_mports = ARRAY_SIZE(mport_kmpss_m1),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .hw_sel = MSM_BUS_BIMC,
+ .mode = NOC_QOS_MODE_FIXED,
+ .qport = qports_kmpss,
+ .ws = 10000,
+ .mas_hw_id = MAS_APPSS_PROC,
+ },
+ {
+ .id = MSM_BUS_MASTER_MSS_PROC,
+ .masterp = mport_mss_proc,
+ .num_mports = ARRAY_SIZE(mport_mss_proc),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .hw_sel = MSM_BUS_RPM,
+ .mas_hw_id = MAS_AMSS_PROC,
+ },
+ {
+ .id = MSM_BUS_FAB_MMSS_NOC,
+ .gateway = 1,
+ .masterp = mport_gw_mnoc_bimc,
+ .num_mports = ARRAY_SIZE(mport_gw_mnoc_bimc),
+ .qport = qports_gw_mnoc_bimc,
+ .buswidth = 8,
+ .ws = 10000,
+ .mas_hw_id = MAS_MNOC_BIMC,
+ .hw_sel = MSM_BUS_BIMC,
+ .mode = NOC_QOS_MODE_BYPASS,
+ },
+ {
+ .id = MSM_BUS_FAB_SYS_NOC,
+ .gateway = 1,
+ .slavep = sport_gw_bimc_snoc,
+ .num_sports = ARRAY_SIZE(sport_gw_bimc_snoc),
+ .masterp = mport_gw_snoc_bimc,
+ .num_mports = ARRAY_SIZE(mport_gw_snoc_bimc),
+ .qport = qports_gw_snoc_bimc,
+ .buswidth = 8,
+ .ws = 10000,
+ .mas_hw_id = MAS_SNOC_BIMC,
+ .slv_hw_id = SLV_BIMC_SNOC,
+ },
+ {
+ .id = MSM_BUS_SLAVE_EBI_CH0,
+ .slavep = sport_ebi1,
+ .num_sports = ARRAY_SIZE(sport_ebi1),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_EBI,
+ .mode = NOC_QOS_MODE_BYPASS,
+ },
+ {
+ .id = MSM_BUS_SLAVE_AMPSS_L2,
+ .slavep = sport_kmpss_l2,
+ .num_sports = ARRAY_SIZE(sport_kmpss_l2),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_APSS_L2,
+ },
+};
+
+static struct msm_bus_node_info ocmem_noc_info[] = {
+ {
+ .id = MSM_BUS_FAB_OCMEM_VNOC,
+ .gateway = 1,
+ .buswidth = 16,
+ .mas_hw_id = MAS_OVNOC_ONOC,
+ .slv_hw_id = SLV_ONOC_OVNOC,
+ },
+ {
+ .id = MSM_BUS_MASTER_JPEG_OCMEM,
+ .masterp = mport_jpeg_ocmem,
+ .num_mports = ARRAY_SIZE(mport_jpeg_ocmem),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .perm_mode = NOC_QOS_PERM_MODE_FIXED,
+ .mode = NOC_QOS_MODE_FIXED,
+ .qport = qports_gemini_ocmem,
+ .mas_hw_id = MAS_JPEG_OCMEM,
+ .hw_sel = MSM_BUS_NOC,
+ },
+ {
+ .id = MSM_BUS_MASTER_MDP_OCMEM,
+ .masterp = mport_mdp_ocmem,
+ .num_mports = ARRAY_SIZE(mport_mdp_ocmem),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .perm_mode = NOC_QOS_PERM_MODE_FIXED,
+ .mode = NOC_QOS_MODE_FIXED,
+ .qport = qports_mdp_ocmem,
+ .mas_hw_id = MAS_MDP_OCMEM,
+ .hw_sel = MSM_BUS_NOC,
+ },
+ {
+ .id = MSM_BUS_MASTER_VIDEO_P0_OCMEM,
+ .masterp = mport_video_p0_ocmem,
+ .num_mports = ARRAY_SIZE(mport_video_p0_ocmem),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .perm_mode = NOC_QOS_PERM_MODE_FIXED,
+ .mode = NOC_QOS_MODE_FIXED,
+ .qport = qports_venus_p0_ocmem,
+ .mas_hw_id = MAS_VIDEO_P0_OCMEM,
+ .hw_sel = MSM_BUS_NOC,
+ },
+ {
+ .id = MSM_BUS_MASTER_VIDEO_P1_OCMEM,
+ .masterp = mport_video_p1_ocmem,
+ .num_mports = ARRAY_SIZE(mport_video_p1_ocmem),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .perm_mode = NOC_QOS_PERM_MODE_FIXED,
+ .mode = NOC_QOS_MODE_FIXED,
+ .qport = qports_venus_p1_ocmem,
+ .mas_hw_id = MAS_VIDEO_P1_OCMEM,
+ .hw_sel = MSM_BUS_NOC,
+ },
+ {
+ .id = MSM_BUS_MASTER_VFE_OCMEM,
+ .masterp = mport_vfe_ocmem,
+ .num_mports = ARRAY_SIZE(mport_vfe_ocmem),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .perm_mode = NOC_QOS_PERM_MODE_FIXED,
+ .mode = NOC_QOS_MODE_FIXED,
+ .qport = qports_vfe_ocmem,
+ .mas_hw_id = MAS_VFE_OCMEM,
+ .hw_sel = MSM_BUS_NOC,
+ .prio_rd = 1,
+ .prio_wr = 1,
+ },
+ {
+ .id = MSM_BUS_MASTER_CNOC_ONOC_CFG,
+ .masterp = mport_cnoc_onoc_cfg,
+ .num_mports = ARRAY_SIZE(mport_cnoc_onoc_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 16,
+ .mas_hw_id = MAS_CNOC_ONOC_CFG,
+ .hw_sel = MSM_BUS_NOC,
+ },
+ {
+ .id = MSM_BUS_SLAVE_SERVICE_ONOC,
+ .slavep = sport_service_onoc,
+ .num_sports = ARRAY_SIZE(sport_service_onoc),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 16,
+ .slv_hw_id = SLV_SERVICE_ONOC,
+ },
+};
+
+static struct msm_bus_node_info periph_noc_info[] = {
+ {
+ .id = MSM_BUS_MASTER_PNOC_CFG,
+ .masterp = mport_pnoc_cfg,
+ .num_mports = ARRAY_SIZE(mport_pnoc_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .mas_hw_id = MAS_PNOC_CFG,
+ },
+ {
+ .id = MSM_BUS_MASTER_SDCC_1,
+ .masterp = mport_sdcc_1,
+ .num_mports = ARRAY_SIZE(mport_sdcc_1),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .mas_hw_id = MAS_SDCC_1,
+ },
+ {
+ .id = MSM_BUS_MASTER_SDCC_3,
+ .masterp = mport_sdcc_3,
+ .num_mports = ARRAY_SIZE(mport_sdcc_3),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .mas_hw_id = MAS_SDCC_3,
+ },
+ {
+ .id = MSM_BUS_MASTER_SDCC_4,
+ .masterp = mport_sdcc_4,
+ .num_mports = ARRAY_SIZE(mport_sdcc_4),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .mas_hw_id = MAS_SDCC_4,
+ },
+ {
+ .id = MSM_BUS_MASTER_SDCC_2,
+ .masterp = mport_sdcc_2,
+ .num_mports = ARRAY_SIZE(mport_sdcc_2),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .mas_hw_id = MAS_SDCC_2,
+ },
+ {
+ .id = MSM_BUS_MASTER_TSIF,
+ .masterp = mport_tsif,
+ .num_mports = ARRAY_SIZE(mport_tsif),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .mas_hw_id = MAS_TSIF,
+ },
+ {
+ .id = MSM_BUS_MASTER_BAM_DMA,
+ .masterp = mport_bam_dma,
+ .num_mports = ARRAY_SIZE(mport_bam_dma),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .mas_hw_id = MAS_BAM_DMA,
+ },
+ {
+ .id = MSM_BUS_MASTER_BLSP_2,
+ .masterp = mport_blsp_2,
+ .num_mports = ARRAY_SIZE(mport_blsp_2),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .mas_hw_id = MAS_BLSP_2,
+ },
+ {
+ .id = MSM_BUS_MASTER_USB_HSIC,
+ .masterp = mport_usb_hsic,
+ .num_mports = ARRAY_SIZE(mport_usb_hsic),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .mas_hw_id = MAS_USB_HSIC,
+ },
+ {
+ .id = MSM_BUS_MASTER_BLSP_1,
+ .masterp = mport_blsp_1,
+ .num_mports = ARRAY_SIZE(mport_blsp_1),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .mas_hw_id = MAS_BLSP_1,
+ },
+ {
+ .id = MSM_BUS_MASTER_USB_HS,
+ .masterp = mport_usb_hs,
+ .num_mports = ARRAY_SIZE(mport_usb_hs),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .mas_hw_id = MAS_USB_HS,
+ },
+ {
+ .id = MSM_BUS_FAB_SYS_NOC,
+ .gateway = 1,
+ .slavep = sport_gw_pnoc_snoc,
+ .num_sports = ARRAY_SIZE(sport_gw_pnoc_snoc),
+ .masterp = mport_gw_snoc_pnoc,
+ .num_mports = ARRAY_SIZE(mport_gw_snoc_pnoc),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_PNOC_SNOC,
+ .mas_hw_id = MAS_SNOC_PNOC,
+ },
+ {
+ .id = MSM_BUS_SLAVE_SDCC_1,
+ .slavep = sport_sdcc_1,
+ .num_sports = ARRAY_SIZE(sport_sdcc_1),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_SDCC_1,
+ },
+ {
+ .id = MSM_BUS_SLAVE_SDCC_3,
+ .slavep = sport_sdcc_3,
+ .num_sports = ARRAY_SIZE(sport_sdcc_3),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_SDCC_3,
+ },
+ {
+ .id = MSM_BUS_SLAVE_SDCC_2,
+ .slavep = sport_sdcc_2,
+ .num_sports = ARRAY_SIZE(sport_sdcc_2),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_SDCC_2,
+ },
+ {
+ .id = MSM_BUS_SLAVE_SDCC_4,
+ .slavep = sport_sdcc_4,
+ .num_sports = ARRAY_SIZE(sport_sdcc_4),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_SDCC_4,
+ },
+ {
+ .id = MSM_BUS_SLAVE_TSIF,
+ .slavep = sport_tsif,
+ .num_sports = ARRAY_SIZE(sport_tsif),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_TSIF,
+ },
+ {
+ .id = MSM_BUS_SLAVE_BAM_DMA,
+ .slavep = sport_bam_dma,
+ .num_sports = ARRAY_SIZE(sport_bam_dma),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_BAM_DMA,
+ },
+ {
+ .id = MSM_BUS_SLAVE_BLSP_2,
+ .slavep = sport_blsp_2,
+ .num_sports = ARRAY_SIZE(sport_blsp_2),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_BLSP_2,
+ },
+ {
+ .id = MSM_BUS_SLAVE_USB_HSIC,
+ .slavep = sport_usb_hsic,
+ .num_sports = ARRAY_SIZE(sport_usb_hsic),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_USB_HSIC,
+ },
+ {
+ .id = MSM_BUS_SLAVE_BLSP_1,
+ .slavep = sport_blsp_1,
+ .num_sports = ARRAY_SIZE(sport_blsp_1),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_BLSP_1,
+ },
+ {
+ .id = MSM_BUS_SLAVE_USB_HS,
+ .slavep = sport_usb_hs,
+ .num_sports = ARRAY_SIZE(sport_usb_hs),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_USB_HS,
+ },
+ {
+ .id = MSM_BUS_SLAVE_PDM,
+ .slavep = sport_pdm,
+ .num_sports = ARRAY_SIZE(sport_pdm),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_PDM,
+ },
+ {
+ .id = MSM_BUS_SLAVE_PERIPH_APU_CFG,
+ .slavep = sport_periph_apu_cfg,
+ .num_sports = ARRAY_SIZE(sport_periph_apu_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_PERIPH_APU_CFG,
+ },
+ {
+ .id = MSM_BUS_SLAVE_PNOC_MPU_CFG,
+ .slavep = sport_pnoc_mpu_cfg,
+ .num_sports = ARRAY_SIZE(sport_pnoc_mpu_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_MPU_CFG,
+ },
+ {
+ .id = MSM_BUS_SLAVE_PRNG,
+ .slavep = sport_prng,
+ .num_sports = ARRAY_SIZE(sport_prng),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_PRNG,
+ },
+ {
+ .id = MSM_BUS_SLAVE_SERVICE_PNOC,
+ .slavep = sport_service_pnoc,
+ .num_sports = ARRAY_SIZE(sport_service_pnoc),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_SERVICE_PNOC,
+ },
+};
+
+static struct msm_bus_node_info config_noc_info[] = {
+ {
+ .id = MSM_BUS_MASTER_RPM_INST,
+ .masterp = mport_rpm_inst,
+ .num_mports = ARRAY_SIZE(mport_rpm_inst),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .mas_hw_id = MAS_RPM_INST,
+ },
+ {
+ .id = MSM_BUS_MASTER_RPM_DATA,
+ .masterp = mport_rpm_data,
+ .num_mports = ARRAY_SIZE(mport_rpm_data),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .mas_hw_id = MAS_RPM_DATA,
+ },
+ {
+ .id = MSM_BUS_MASTER_RPM_SYS,
+ .masterp = mport_rpm_sys,
+ .num_mports = ARRAY_SIZE(mport_rpm_sys),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .mas_hw_id = MAS_RPM_SYS,
+ },
+ {
+ .id = MSM_BUS_MASTER_DEHR,
+ .masterp = mport_dehr,
+ .num_mports = ARRAY_SIZE(mport_dehr),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .mas_hw_id = MAS_DEHR,
+ },
+ {
+ .id = MSM_BUS_MASTER_QDSS_DAP,
+ .masterp = mport_qdss_dap,
+ .num_mports = ARRAY_SIZE(mport_qdss_dap),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .mas_hw_id = MAS_QDSS_DAP,
+ },
+ {
+ .id = MSM_BUS_MASTER_SPDM,
+ .masterp = mport_spdm,
+ .num_mports = ARRAY_SIZE(mport_spdm),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .mas_hw_id = MAS_SPDM,
+ },
+ {
+ .id = MSM_BUS_MASTER_TIC,
+ .masterp = mport_tic,
+ .num_mports = ARRAY_SIZE(mport_tic),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .mas_hw_id = MAS_TIC,
+ },
+ {
+ .id = MSM_BUS_SLAVE_CLK_CTL,
+ .slavep = sport_clk_ctl,
+ .num_sports = ARRAY_SIZE(sport_clk_ctl),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_CLK_CTL,
+ },
+ {
+ .id = MSM_BUS_SLAVE_CNOC_MSS,
+ .slavep = sport_cnoc_mss,
+ .num_sports = ARRAY_SIZE(sport_cnoc_mss),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_CNOC_MSS,
+ },
+ {
+ .id = MSM_BUS_SLAVE_SECURITY,
+ .slavep = sport_security,
+ .num_sports = ARRAY_SIZE(sport_security),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_SECURITY,
+ },
+ {
+ .id = MSM_BUS_SLAVE_TCSR,
+ .slavep = sport_tcsr,
+ .num_sports = ARRAY_SIZE(sport_tcsr),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_TCSR,
+ },
+ {
+ .id = MSM_BUS_SLAVE_TLMM,
+ .slavep = sport_tlmm,
+ .num_sports = ARRAY_SIZE(sport_tlmm),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_TLMM,
+ },
+ {
+ .id = MSM_BUS_SLAVE_CRYPTO_0_CFG,
+ .slavep = sport_crypto_0_cfg,
+ .num_sports = ARRAY_SIZE(sport_crypto_0_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_CRYPTO_0_CFG,
+ },
+ {
+ .id = MSM_BUS_SLAVE_CRYPTO_1_CFG,
+ .slavep = sport_crypto_1_cfg,
+ .num_sports = ARRAY_SIZE(sport_crypto_1_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_CRYPTO_1_CFG,
+ },
+ {
+ .id = MSM_BUS_SLAVE_IMEM_CFG,
+ .slavep = sport_imem_cfg,
+ .num_sports = ARRAY_SIZE(sport_imem_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_IMEM_CFG,
+ },
+ {
+ .id = MSM_BUS_SLAVE_MESSAGE_RAM,
+ .slavep = sport_message_ram,
+ .num_sports = ARRAY_SIZE(sport_message_ram),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_MESSAGE_RAM,
+ },
+ {
+ .id = MSM_BUS_SLAVE_BIMC_CFG,
+ .slavep = sport_bimc_cfg,
+ .num_sports = ARRAY_SIZE(sport_bimc_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_BIMC_CFG,
+ },
+ {
+ .id = MSM_BUS_SLAVE_BOOT_ROM,
+ .slavep = sport_boot_rom,
+ .num_sports = ARRAY_SIZE(sport_boot_rom),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_BOOT_ROM,
+ },
+ {
+ .id = MSM_BUS_SLAVE_PMIC_ARB,
+ .slavep = sport_pmic_arb,
+ .num_sports = ARRAY_SIZE(sport_pmic_arb),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_PMIC_ARB,
+ },
+ {
+ .id = MSM_BUS_SLAVE_SPDM_WRAPPER,
+ .slavep = sport_spdm_wrapper,
+ .num_sports = ARRAY_SIZE(sport_spdm_wrapper),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_SPDM_WRAPPER,
+ },
+ {
+ .id = MSM_BUS_SLAVE_DEHR_CFG,
+ .slavep = sport_dehr_cfg,
+ .num_sports = ARRAY_SIZE(sport_dehr_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_DEHR_CFG,
+ },
+ {
+ .id = MSM_BUS_SLAVE_MPM,
+ .slavep = sport_mpm,
+ .num_sports = ARRAY_SIZE(sport_mpm),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_MPM,
+ },
+ {
+ .id = MSM_BUS_SLAVE_QDSS_CFG,
+ .slavep = sport_qdss_cfg,
+ .num_sports = ARRAY_SIZE(sport_qdss_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_QDSS_CFG,
+ },
+ {
+ .id = MSM_BUS_SLAVE_RBCPR_CFG,
+ .slavep = sport_rbcpr_cfg,
+ .num_sports = ARRAY_SIZE(sport_rbcpr_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_RBCPR_CFG,
+ },
+ {
+ .id = MSM_BUS_SLAVE_RBCPR_QDSS_APU_CFG,
+ .slavep = sport_rbcpr_qdss_apu_cfg,
+ .num_sports = ARRAY_SIZE(sport_rbcpr_qdss_apu_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_RBCPR_QDSS_APU_CFG,
+ },
+ {
+ .id = MSM_BUS_FAB_SYS_NOC,
+ .gateway = 1,
+ .slavep = sport_gw_cnoc_snoc,
+ .num_sports = ARRAY_SIZE(sport_gw_cnoc_snoc),
+ .masterp = mport_gw_snoc_cnoc,
+ .num_mports = ARRAY_SIZE(mport_gw_snoc_cnoc),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .mas_hw_id = MAS_SNOC_CNOC,
+ .slv_hw_id = SLV_CNOC_SNOC,
+ },
+ {
+ .id = MSM_BUS_SLAVE_CNOC_ONOC_CFG,
+ .slavep = sport_cnoc_onoc_cfg,
+ .num_sports = ARRAY_SIZE(sport_cnoc_onoc_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_CNOC_ONOC_CFG,
+ },
+ {
+ .id = MSM_BUS_SLAVE_CNOC_MNOC_MMSS_CFG,
+ .slavep = sport_cnoc_mnoc_mmss_cfg,
+ .num_sports = ARRAY_SIZE(sport_cnoc_mnoc_mmss_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_CNOC_MNOC_MMSS_CFG,
+ },
+ {
+ .id = MSM_BUS_SLAVE_CNOC_MNOC_CFG,
+ .slavep = sport_cnoc_mnoc_cfg,
+ .num_sports = ARRAY_SIZE(sport_cnoc_mnoc_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_CNOC_MNOC_CFG,
+ },
+ {
+ .id = MSM_BUS_SLAVE_PNOC_CFG,
+ .slavep = sport_pnoc_cfg,
+ .num_sports = ARRAY_SIZE(sport_pnoc_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_PNOC_CFG,
+ },
+ {
+ .id = MSM_BUS_SLAVE_SNOC_MPU_CFG,
+ .slavep = sport_snoc_mpu_cfg,
+ .num_sports = ARRAY_SIZE(sport_snoc_mpu_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_SNOC_MPU_CFG,
+ },
+ {
+ .id = MSM_BUS_SLAVE_SNOC_CFG,
+ .slavep = sport_snoc_cfg,
+ .num_sports = ARRAY_SIZE(sport_snoc_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_SNOC_CFG,
+ },
+ {
+ .id = MSM_BUS_SLAVE_EBI1_DLL_CFG,
+ .slavep = sport_ebi1_dll_cfg,
+ .num_sports = ARRAY_SIZE(sport_ebi1_dll_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_EBI1_DLL_CFG,
+ },
+ {
+ .id = MSM_BUS_SLAVE_PHY_APU_CFG,
+ .slavep = sport_phy_apu_cfg,
+ .num_sports = ARRAY_SIZE(sport_phy_apu_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_PHY_APU_CFG,
+ },
+ {
+ .id = MSM_BUS_SLAVE_EBI1_PHY_CFG,
+ .slavep = sport_ebi1_phy_cfg,
+ .num_sports = ARRAY_SIZE(sport_ebi1_phy_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_EBI1_PHY_CFG,
+ },
+ {
+ .id = MSM_BUS_SLAVE_RPM,
+ .slavep = sport_rpm,
+ .num_sports = ARRAY_SIZE(sport_rpm),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_RPM,
+ },
+ {
+ .id = MSM_BUS_SLAVE_SERVICE_CNOC,
+ .slavep = sport_service_cnoc,
+ .num_sports = ARRAY_SIZE(sport_service_cnoc),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_SERVICE_CNOC,
+ },
+};
+
+/* A virtual NoC is needed for connection to OCMEM */
+static struct msm_bus_node_info ocmem_vnoc_info[] = {
+ {
+ .id = MSM_BUS_MASTER_V_OCMEM_GFX3D,
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .mas_hw_id = MAS_V_OCMEM_GFX3D,
+ },
+ {
+ .id = MSM_BUS_SLAVE_OCMEM,
+ .slavep = sport_ocmem,
+ .num_sports = ARRAY_SIZE(sport_ocmem),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 16,
+ .slv_hw_id = SLV_OCMEM,
+ .tier = tier2,
+ .slaveclk[DUAL_CTX] = "ocmem_clk",
+ .slaveclk[ACTIVE_CTX] = "ocmem_a_clk",
+ },
+ {
+ .id = MSM_BUS_FAB_SYS_NOC,
+ .gateway = 1,
+ .buswidth = 8,
+ .ws = 10000,
+ .mas_hw_id = MAS_SNOC_OVNOC,
+ .slv_hw_id = SLV_OVNOC_SNOC,
+ },
+ {
+ .id = MSM_BUS_FAB_OCMEM_NOC,
+ .gateway = 1,
+ .buswidth = 16,
+ .ws = 10000,
+ .mas_hw_id = MAS_ONOC_OVNOC,
+ .slv_hw_id = SLV_OVNOC_ONOC,
+ },
+};
+
+static void msm_bus_board_assign_iids(struct msm_bus_fabric_registration
+ *fabreg, int fabid)
+{
+ int i;
+ for (i = 0; i < fabreg->len; i++) {
+ if (!fabreg->info[i].gateway) {
+ fabreg->info[i].priv_id = fabid + fabreg->info[i].id;
+ if (fabreg->info[i].id < SLAVE_ID_KEY) {
+ WARN(fabreg->info[i].id >= NMASTERS,
+ "id %d exceeds array size!\n",
+ fabreg->info[i].id);
+ master_iids[fabreg->info[i].id] =
+ fabreg->info[i].priv_id;
+ } else {
+ WARN((fabreg->info[i].id - SLAVE_ID_KEY) >=
+ NSLAVES, "id %d exceeds array size!\n",
+ fabreg->info[i].id);
+ slave_iids[fabreg->info[i].id - (SLAVE_ID_KEY)]
+ = fabreg->info[i].priv_id;
+ }
+ } else {
+ fabreg->info[i].priv_id = fabreg->info[i].id;
+ }
+ }
+}
+
+static int msm_bus_board_copper_get_iid(int id)
+{
+ if ((id < SLAVE_ID_KEY && id >= NMASTERS) ||
+ id >= (SLAVE_ID_KEY + NSLAVES)) {
+ MSM_BUS_ERR("Cannot get iid. Invalid id %d passed\n", id);
+ return -EINVAL;
+ }
+
+ return CHECK_ID(((id < SLAVE_ID_KEY) ? master_iids[id] :
+ slave_iids[id - SLAVE_ID_KEY]), id);
+}
+
+int msm_bus_board_rpm_get_il_ids(uint16_t *id)
+{
+ return -ENXIO;
+}
+
+static struct msm_bus_board_algorithm msm_bus_board_algo = {
+ .board_nfab = NFAB_COPPER,
+ .get_iid = msm_bus_board_copper_get_iid,
+ .assign_iids = msm_bus_board_assign_iids,
+};
+
+struct msm_bus_fabric_registration msm_bus_copper_sys_noc_pdata = {
+ .id = MSM_BUS_FAB_SYS_NOC,
+ .name = "msm_sys_noc",
+ .info = sys_noc_info,
+ .len = ARRAY_SIZE(sys_noc_info),
+ .ahb = 0,
+ .fabclk[DUAL_CTX] = "bus_clk",
+ .fabclk[ACTIVE_CTX] = "bus_a_clk",
+ .nmasters = 15,
+ .nslaves = 12,
+ .ntieredslaves = 0,
+ .board_algo = &msm_bus_board_algo,
+ .qos_freq = 4800,
+ .hw_sel = MSM_BUS_NOC,
+ .rpm_enabled = 1,
+};
+
+struct msm_bus_fabric_registration msm_bus_copper_mmss_noc_pdata = {
+ .id = MSM_BUS_FAB_MMSS_NOC,
+ .name = "msm_mmss_noc",
+ .info = mmss_noc_info,
+ .len = ARRAY_SIZE(mmss_noc_info),
+ .ahb = 0,
+ .fabclk[DUAL_CTX] = "bus_clk",
+ .fabclk[ACTIVE_CTX] = "bus_a_clk",
+ .nmasters = 9,
+ .nslaves = 16,
+ .ntieredslaves = 0,
+ .board_algo = &msm_bus_board_algo,
+ .qos_freq = 4800,
+ .hw_sel = MSM_BUS_NOC,
+ .rpm_enabled = 1,
+};
+
+struct msm_bus_fabric_registration msm_bus_copper_bimc_pdata = {
+ .id = MSM_BUS_FAB_BIMC,
+ .name = "msm_bimc",
+ .info = bimc_info,
+ .len = ARRAY_SIZE(bimc_info),
+ .ahb = 0,
+ .fabclk[DUAL_CTX] = "mem_clk",
+ .fabclk[ACTIVE_CTX] = "mem_a_clk",
+ .nmasters = 7,
+ .nslaves = 4,
+ .ntieredslaves = 0,
+ .board_algo = &msm_bus_board_algo,
+ .qos_freq = 4800,
+ .hw_sel = MSM_BUS_BIMC,
+ .rpm_enabled = 1,
+};
+
+struct msm_bus_fabric_registration msm_bus_copper_ocmem_noc_pdata = {
+ .id = MSM_BUS_FAB_OCMEM_NOC,
+ .name = "msm_ocmem_noc",
+ .info = ocmem_noc_info,
+ .len = ARRAY_SIZE(ocmem_noc_info),
+ .ahb = 0,
+ .fabclk[DUAL_CTX] = "bus_clk",
+ .fabclk[ACTIVE_CTX] = "bus_a_clk",
+ .nmasters = 6,
+ .nslaves = 3,
+ .ntieredslaves = 0,
+ .board_algo = &msm_bus_board_algo,
+ .qos_freq = 4800,
+ .hw_sel = MSM_BUS_NOC,
+ .rpm_enabled = 1,
+};
+
+struct msm_bus_fabric_registration msm_bus_copper_periph_noc_pdata = {
+ .id = MSM_BUS_FAB_PERIPH_NOC,
+ .name = "msm_periph_noc",
+ .info = periph_noc_info,
+ .len = ARRAY_SIZE(periph_noc_info),
+ .ahb = 0,
+ .fabclk[DUAL_CTX] = "bus_clk",
+ .fabclk[ACTIVE_CTX] = "bus_a_clk",
+ .nmasters = 12,
+ .nslaves = 16,
+ .ntieredslaves = 0,
+ .board_algo = &msm_bus_board_algo,
+ .hw_sel = MSM_BUS_NOC,
+ .rpm_enabled = 1,
+};
+
+struct msm_bus_fabric_registration msm_bus_copper_config_noc_pdata = {
+ .id = MSM_BUS_FAB_CONFIG_NOC,
+ .name = "msm_config_noc",
+ .info = config_noc_info,
+ .len = ARRAY_SIZE(config_noc_info),
+ .ahb = 0,
+ .fabclk[DUAL_CTX] = "bus_clk",
+ .fabclk[ACTIVE_CTX] = "bus_a_clk",
+ .nmasters = 8,
+ .nslaves = 30,
+ .ntieredslaves = 0,
+ .board_algo = &msm_bus_board_algo,
+ .hw_sel = MSM_BUS_NOC,
+ .rpm_enabled = 1,
+};
+
+struct msm_bus_fabric_registration msm_bus_copper_ocmem_vnoc_pdata = {
+ .id = MSM_BUS_FAB_OCMEM_VNOC,
+ .name = "msm_ocmem_vnoc",
+ .info = ocmem_vnoc_info,
+ .len = ARRAY_SIZE(ocmem_vnoc_info),
+ .ahb = 0,
+ .nmasters = 5,
+ .nslaves = 4,
+ .ntieredslaves = 0,
+ .board_algo = &msm_bus_board_algo,
+ .hw_sel = MSM_BUS_NOC,
+ .virt = 1,
+ .rpm_enabled = 1,
+};
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_fabric.c b/arch/arm/mach-msm/msm_bus/msm_bus_fabric.c
index 3671916..a4b9b51 100644
--- a/arch/arm/mach-msm/msm_bus/msm_bus_fabric.c
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_fabric.c
@@ -131,7 +131,7 @@
fabric->pdata->id, fabric->pdata->len);
fabric->hw_data = fabric->fabdev.hw_algo.allocate_hw_data(pdev,
fabric->pdata);
- if (ZERO_OR_NULL_PTR(fabric->hw_data)) {
+ if (ZERO_OR_NULL_PTR(fabric->hw_data) && fabric->pdata->ahb == 0) {
MSM_BUS_ERR("Couldn't allocate hw_data for fab: %d\n",
fabric->fabdev.id);
goto error;
diff --git a/arch/arm/mach-msm/msm_watchdog_v2.c b/arch/arm/mach-msm/msm_watchdog_v2.c
new file mode 100644
index 0000000..a5f8bcc
--- /dev/null
+++ b/arch/arm/mach-msm/msm_watchdog_v2.c
@@ -0,0 +1,400 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/cpu.h>
+#include <linux/platform_device.h>
+
+#define MODULE_NAME "msm_watchdog"
+#define WDT0_ACCSCSSNBARK_INT 0
+#define TCSR_WDT_CFG 0x30
+#define WDT0_RST 0x04
+#define WDT0_EN 0x08
+#define WDT0_STS 0x0C
+#define WDT0_BARK_TIME 0x10
+#define WDT0_BITE_TIME 0x14
+
+#define MASK_SIZE 32
+
+struct msm_watchdog_data {
+ unsigned int __iomem phys_base;
+ size_t size;
+ void __iomem *base;
+ struct device *dev;
+ unsigned int pet_time;
+ unsigned int bark_time;
+ unsigned int bark_irq;
+ unsigned int bite_irq;
+ unsigned int do_ipi_ping;
+ unsigned long long last_pet;
+ unsigned min_slack_ticks;
+ unsigned long long min_slack_ns;
+ cpumask_t alive_mask;
+ struct work_struct init_dogwork_struct;
+ struct delayed_work dogwork_struct;
+ struct notifier_block panic_blk;
+};
+
+/*
+ * On the kernel command line specify
+ * msm_watchdog.enable=1 to enable the watchdog
+ * By default watchdog is turned on
+ */
+static int enable = 1;
+module_param(enable, int, 0);
+
+/*
+ * On the kernel command line specify
+ * msm_watchdog.WDT_HZ=<clock val in HZ> to set Watchdog
+ * ticks. By default it is set to 32765.
+ */
+static long WDT_HZ = 32765;
+module_param(WDT_HZ, long, 0);
+
+/*
+ * If the watchdog is enabled at bootup (enable=1),
+ * the runtime_disable sysfs node at
+ * /sys/module/msm_watchdog/parameters/runtime_disable
+ * can be used to deactivate the watchdog.
+ * This is a one-time setting. The watchdog
+ * cannot be re-enabled once it is disabled.
+ */
+static int runtime_disable;
+static int wdog_enable_set(const char *val, struct kernel_param *kp);
+module_param_call(runtime_disable, wdog_enable_set, param_get_int,
+ &runtime_disable, 0644);
+
+static void pet_watchdog_work(struct work_struct *work);
+static void init_watchdog_work(struct work_struct *work);
+
+static void dump_cpu_alive_mask(struct msm_watchdog_data *wdog_dd)
+{
+ static char alive_mask_buf[MASK_SIZE];
+ size_t count = cpulist_scnprintf(alive_mask_buf, MASK_SIZE,
+ &wdog_dd->alive_mask);
+ alive_mask_buf[count] = '\n';
+ alive_mask_buf[count++] = '\0';
+ printk(KERN_INFO "cpu alive mask from last pet\n%s", alive_mask_buf);
+}
+
+static int msm_watchdog_suspend(struct device *dev)
+{
+ struct msm_watchdog_data *wdog_dd =
+ (struct msm_watchdog_data *)dev_get_drvdata(dev);
+ if (!enable)
+ return 0;
+ __raw_writel(1, wdog_dd->base + WDT0_RST);
+ __raw_writel(0, wdog_dd->base + WDT0_EN);
+ mb();
+ return 0;
+}
+
+static int msm_watchdog_resume(struct device *dev)
+{
+ struct msm_watchdog_data *wdog_dd =
+ (struct msm_watchdog_data *)dev_get_drvdata(dev);
+ if (!enable)
+ return 0;
+ __raw_writel(1, wdog_dd->base + WDT0_EN);
+ __raw_writel(1, wdog_dd->base + WDT0_RST);
+ mb();
+ return 0;
+}
+
+static int panic_wdog_handler(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct msm_watchdog_data *wdog_dd = container_of(this,
+ struct msm_watchdog_data, panic_blk);
+ if (panic_timeout == 0) {
+ __raw_writel(0, wdog_dd->base + WDT0_EN);
+ mb();
+ } else {
+ __raw_writel(WDT_HZ * (panic_timeout + 4),
+ wdog_dd->base + WDT0_BARK_TIME);
+ __raw_writel(WDT_HZ * (panic_timeout + 4),
+ wdog_dd->base + WDT0_BITE_TIME);
+ __raw_writel(1, wdog_dd->base + WDT0_RST);
+ }
+ return NOTIFY_DONE;
+}
+/*
+ * TODO: implement enable/disable.
+ */
+static int wdog_enable_set(const char *val, struct kernel_param *kp)
+{
+ return 0;
+}
+
+
+static void pet_watchdog(struct msm_watchdog_data *wdog_dd)
+{
+ int slack;
+ unsigned long long time_ns;
+ unsigned long long slack_ns;
+ unsigned long long bark_time_ns = wdog_dd->bark_time * 1000000ULL;
+
+ slack = __raw_readl(wdog_dd->base + WDT0_STS) >> 3;
+ slack = ((wdog_dd->bark_time*WDT_HZ)/1000) - slack;
+ if (slack < wdog_dd->min_slack_ticks)
+ wdog_dd->min_slack_ticks = slack;
+ __raw_writel(1, wdog_dd->base + WDT0_RST);
+ time_ns = sched_clock();
+ slack_ns = (wdog_dd->last_pet + bark_time_ns) - time_ns;
+ if (slack_ns < wdog_dd->min_slack_ns)
+ wdog_dd->min_slack_ns = slack_ns;
+ wdog_dd->last_pet = time_ns;
+}
+
+static void keep_alive_response(void *info)
+{
+ int cpu = smp_processor_id();
+ struct msm_watchdog_data *wdog_dd = (struct msm_watchdog_data *)info;
+ cpumask_set_cpu(cpu, &wdog_dd->alive_mask);
+ smp_mb();
+}
+
+/*
+ * If this function does not return, it implies one of the
+ * other cpu's is not responsive.
+ */
+static void ping_other_cpus(struct msm_watchdog_data *wdog_dd)
+{
+ int cpu;
+ cpumask_clear(&wdog_dd->alive_mask);
+ smp_mb();
+ for_each_cpu(cpu, cpu_online_mask)
+ smp_call_function_single(cpu, keep_alive_response, wdog_dd, 1);
+}
+
+static void pet_watchdog_work(struct work_struct *work)
+{
+ unsigned long delay_time;
+ struct delayed_work *delayed_work = to_delayed_work(work);
+ struct msm_watchdog_data *wdog_dd = container_of(delayed_work,
+ struct msm_watchdog_data,
+ dogwork_struct);
+ delay_time = msecs_to_jiffies(wdog_dd->pet_time);
+ if (wdog_dd->do_ipi_ping)
+ ping_other_cpus(wdog_dd);
+ pet_watchdog(wdog_dd);
+ if (wdog_dd->do_ipi_ping)
+ dump_cpu_alive_mask(wdog_dd);
+ if (enable)
+ schedule_delayed_work(&wdog_dd->dogwork_struct,
+ delay_time);
+}
+
+static int msm_watchdog_remove(struct platform_device *pdev)
+{
+ struct msm_watchdog_data *wdog_dd =
+ (struct msm_watchdog_data *)platform_get_drvdata(pdev);
+ if (enable) {
+ __raw_writel(0, wdog_dd->base + WDT0_EN);
+ mb();
+ enable = 0;
+ /*
+ * TODO: Not sure if we need to call into TZ to disable
+ * secure wdog.
+ */
+ /* In case we got suspended mid-exit */
+ __raw_writel(0, wdog_dd->base + WDT0_EN);
+ }
+ printk(KERN_INFO "MSM Watchdog Exit - Deactivated\n");
+ kzfree(wdog_dd);
+ return 0;
+}
+
+static irqreturn_t wdog_bark_handler(int irq, void *dev_id)
+{
+ struct msm_watchdog_data *wdog_dd = (struct msm_watchdog_data *)dev_id;
+ unsigned long nanosec_rem;
+ unsigned long long t = sched_clock();
+
+ nanosec_rem = do_div(t, 1000000000);
+ printk(KERN_INFO "Watchdog bark! Now = %lu.%06lu\n", (unsigned long) t,
+ nanosec_rem / 1000);
+
+ nanosec_rem = do_div(wdog_dd->last_pet, 1000000000);
+ printk(KERN_INFO "Watchdog last pet at %lu.%06lu\n", (unsigned long)
+ wdog_dd->last_pet, nanosec_rem / 1000);
+ if (wdog_dd->do_ipi_ping)
+ dump_cpu_alive_mask(wdog_dd);
+ panic("Apps watchdog bark received!");
+ return IRQ_HANDLED;
+}
+
+static void init_watchdog_work(struct work_struct *work)
+{
+ struct msm_watchdog_data *wdog_dd = container_of(work,
+ struct msm_watchdog_data,
+ init_dogwork_struct);
+ unsigned long delay_time;
+ u64 timeout;
+ delay_time = msecs_to_jiffies(wdog_dd->pet_time);
+ wdog_dd->min_slack_ticks = UINT_MAX;
+ wdog_dd->min_slack_ns = ULLONG_MAX;
+ timeout = (wdog_dd->bark_time * WDT_HZ)/1000;
+ __raw_writel(timeout, wdog_dd->base + WDT0_BARK_TIME);
+ __raw_writel(timeout + 3*WDT_HZ, wdog_dd->base + WDT0_BITE_TIME);
+
+ wdog_dd->panic_blk.notifier_call = panic_wdog_handler;
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &wdog_dd->panic_blk);
+ schedule_delayed_work(&wdog_dd->dogwork_struct, delay_time);
+
+ __raw_writel(1, wdog_dd->base + WDT0_EN);
+ __raw_writel(1, wdog_dd->base + WDT0_RST);
+ wdog_dd->last_pet = sched_clock();
+ printk(KERN_INFO "MSM Watchdog Initialized\n");
+ return;
+}
+
+static struct of_device_id msm_wdog_match_table[] = {
+ { .compatible = "qcom,msm-watchdog" },
+ {}
+};
+
+static void __devinit dump_pdata(struct msm_watchdog_data *pdata)
+{
+ dev_dbg(pdata->dev, "wdog bark_time %d", pdata->bark_time);
+ dev_dbg(pdata->dev, "wdog pet_time %d", pdata->pet_time);
+ dev_dbg(pdata->dev, "wdog perform ipi ping %d", pdata->do_ipi_ping);
+ dev_dbg(pdata->dev, "wdog base address is 0x%x\n", (unsigned int)
+ pdata->base);
+}
+
+static int __devinit msm_wdog_dt_to_pdata(struct platform_device *pdev,
+ struct msm_watchdog_data *pdata)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct resource *wdog_resource;
+ int ret;
+
+ wdog_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pdata->size = resource_size(wdog_resource);
+ pdata->phys_base = wdog_resource->start;
+ if (unlikely(!(devm_request_region(&pdev->dev, pdata->phys_base,
+ pdata->size, "msm-watchdog")))) {
+ dev_err(&pdev->dev, "%s cannot reserve watchdog region\n",
+ __func__);
+ return -ENXIO;
+ }
+ pdata->base = devm_ioremap(&pdev->dev, pdata->phys_base,
+ pdata->size);
+ if (!pdata->base) {
+ dev_err(&pdev->dev, "%s cannot map wdog register space\n",
+ __func__);
+ return -ENXIO;
+ }
+
+ pdata->bark_irq = platform_get_irq(pdev, 0);
+ pdata->bite_irq = platform_get_irq(pdev, 1);
+ ret = of_property_read_u32(node, "qcom,bark-time", &pdata->bark_time);
+ if (ret) {
+ dev_err(&pdev->dev, "reading bark time failed\n");
+ return -ENXIO;
+ }
+ ret = of_property_read_u32(node, "qcom,pet-time", &pdata->pet_time);
+ if (ret) {
+ dev_err(&pdev->dev, "reading pet time failed\n");
+ return -ENXIO;
+ }
+ ret = of_property_read_u32(node, "qcom,ipi-ping", &pdata->do_ipi_ping);
+ if (ret) {
+ dev_err(&pdev->dev, "reading do ipi failed\n");
+ return -ENXIO;
+ }
+ if (!pdata->bark_time) {
+ dev_err(&pdev->dev, "%s watchdog bark time not setup\n",
+ __func__);
+ return -ENXIO;
+ }
+ if (!pdata->pet_time) {
+ dev_err(&pdev->dev, "%s watchdog pet time not setup\n",
+ __func__);
+ return -ENXIO;
+ }
+ if (pdata->do_ipi_ping > 1) {
+ dev_err(&pdev->dev, "%s invalid watchdog ipi value\n",
+ __func__);
+ return -ENXIO;
+ }
+ dump_pdata(pdata);
+ return 0;
+}
+
+static int __devinit msm_watchdog_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct msm_watchdog_data *wdog_dd;
+
+ if (!pdev->dev.of_node || !enable)
+ return -ENODEV;
+ wdog_dd = kzalloc(sizeof(struct msm_watchdog_data), GFP_KERNEL);
+ if (!wdog_dd)
+ return -EIO;
+ ret = msm_wdog_dt_to_pdata(pdev, wdog_dd);
+ if (ret)
+ goto err;
+ wdog_dd->dev = &pdev->dev;
+ platform_set_drvdata(pdev, wdog_dd);
+ ret = devm_request_irq(&pdev->dev, wdog_dd->bark_irq, wdog_bark_handler,
+ IRQF_TRIGGER_RISING, "apps_wdog_bark", wdog_dd);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request bark irq\n");
+ ret = -ENXIO;
+ goto err;
+ }
+ cpumask_clear(&wdog_dd->alive_mask);
+ INIT_WORK(&wdog_dd->init_dogwork_struct, init_watchdog_work);
+ INIT_DELAYED_WORK(&wdog_dd->dogwork_struct, pet_watchdog_work);
+ schedule_work_on(0, &wdog_dd->init_dogwork_struct);
+ return 0;
+err:
+ kzfree(wdog_dd);
+ return ret;
+}
+
+static const struct dev_pm_ops msm_watchdog_dev_pm_ops = {
+ .suspend_noirq = msm_watchdog_suspend,
+ .resume_noirq = msm_watchdog_resume,
+};
+
+static struct platform_driver msm_watchdog_driver = {
+ .probe = msm_watchdog_probe,
+ .remove = msm_watchdog_remove,
+ .driver = {
+ .name = MODULE_NAME,
+ .owner = THIS_MODULE,
+ .pm = &msm_watchdog_dev_pm_ops,
+ .of_match_table = msm_wdog_match_table,
+ },
+};
+
+static int __devinit init_watchdog(void)
+{
+ return platform_driver_register(&msm_watchdog_driver);
+}
+
+late_initcall(init_watchdog);
+MODULE_DESCRIPTION("MSM Watchdog Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/arch/arm/mach-msm/perf_event_msm_krait_l2.c b/arch/arm/mach-msm/perf_event_msm_krait_l2.c
index d82f4dd..5f76a92 100644
--- a/arch/arm/mach-msm/perf_event_msm_krait_l2.c
+++ b/arch/arm/mach-msm/perf_event_msm_krait_l2.c
@@ -14,12 +14,17 @@
#include <linux/irq.h>
#include <asm/pmu.h>
#include <linux/platform_device.h>
+#include <linux/spinlock.h>
#include <mach/msm-krait-l2-accessors.h>
#define MAX_L2_PERIOD ((1ULL << 32) - 1)
#define MAX_KRAIT_L2_CTRS 5
+#define L2_EVT_MASK 0xfffff
+
+#define L2_SLAVE_EV_PREFIX 4
+
#define L2PMCCNTR 0x409
#define L2PMCCNTCR 0x408
#define L2PMCCNTSR 0x40A
@@ -48,16 +53,62 @@
/* event format is -e rsRCCG See get_event_desc() */
-#define EVENT_REG_MASK 0xf000
-#define EVENT_GROUPSEL_MASK 0x000f
-#define EVENT_GROUPCODE_MASK 0x0ff0
+#define EVENT_PREFIX_MASK 0xf0000
+#define EVENT_REG_MASK 0x0f000
+#define EVENT_GROUPSEL_MASK 0x0000f
+#define EVENT_GROUPCODE_MASK 0x00ff0
+
+#define EVENT_PREFIX_SHIFT 16
#define EVENT_REG_SHIFT 12
#define EVENT_GROUPCODE_SHIFT 4
#define RESRX_VALUE_EN 0x80000000
+/*
+ * The L2 PMU is shared between all CPU's, so protect
+ * its bitmap access.
+ */
+struct pmu_constraints {
+ u64 pmu_bitmap;
+ raw_spinlock_t lock;
+} l2_pmu_constraints = {
+ .pmu_bitmap = 0,
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(l2_pmu_constraints.lock),
+};
+
+/* NRCCG format for perf RAW codes. */
+PMU_FORMAT_ATTR(l2_prefix, "config:16-19");
+PMU_FORMAT_ATTR(l2_reg, "config:12-15");
+PMU_FORMAT_ATTR(l2_code, "config:4-11");
+PMU_FORMAT_ATTR(l2_grp, "config:0-3");
+
+static struct attribute *msm_l2_ev_formats[] = {
+ &format_attr_l2_prefix.attr,
+ &format_attr_l2_reg.attr,
+ &format_attr_l2_code.attr,
+ &format_attr_l2_grp.attr,
+ NULL,
+};
+
+/*
+ * Format group is essential to access PMU's from userspace
+ * via their .name field.
+ */
+static struct attribute_group msm_l2_pmu_format_group = {
+ .name = "format",
+ .attrs = msm_l2_ev_formats,
+};
+
+static const struct attribute_group *msm_l2_pmu_attr_grps[] = {
+ &msm_l2_pmu_format_group,
+ NULL,
+};
+
static u32 l2_orig_filter_prefix = 0x000f0030;
+/* L2 slave port traffic filtering */
+static u32 l2_slv_filter_prefix = 0x000f0010;
+
static u32 pmu_type;
static struct arm_pmu krait_l2_pmu;
@@ -128,19 +179,25 @@
set_l2_indirect_reg(group_reg, resr_val);
}
-static void set_evfilter_task_mode(int ctr)
+static void set_evfilter_task_mode(int ctr, unsigned int is_slv)
{
u32 filter_reg = (ctr * 16) + IA_L2PMXEVFILTER_BASE;
u32 filter_val = l2_orig_filter_prefix | 1 << smp_processor_id();
+ if (is_slv)
+ filter_val = l2_slv_filter_prefix;
+
set_l2_indirect_reg(filter_reg, filter_val);
}
-static void set_evfilter_sys_mode(int ctr)
+static void set_evfilter_sys_mode(int ctr, unsigned int is_slv)
{
u32 filter_reg = (ctr * 16) + IA_L2PMXEVFILTER_BASE;
u32 filter_val = l2_orig_filter_prefix | 0xf;
+ if (is_slv)
+ filter_val = l2_slv_filter_prefix;
+
set_l2_indirect_reg(filter_reg, filter_val);
}
@@ -212,12 +269,21 @@
{
struct event_desc evdesc;
unsigned long iflags;
+ unsigned int is_slv = 0;
+ unsigned int evt_prefix;
raw_spin_lock_irqsave(&krait_l2_pmu_hw_events.pmu_lock, iflags);
if (hwc->config_base == L2CYCLE_CTR_RAW_CODE)
goto out;
+ /* Check if user requested any special origin filtering. */
+ evt_prefix = (hwc->config_base &
+ EVENT_PREFIX_MASK) >> EVENT_PREFIX_SHIFT;
+
+ if (evt_prefix == L2_SLAVE_EV_PREFIX)
+ is_slv = 1;
+
set_evcntcr(idx);
memset(&evdesc, 0, sizeof(evdesc));
@@ -230,9 +296,9 @@
evdesc.event_group_code);
if (cpu < 0)
- set_evfilter_task_mode(idx);
+ set_evfilter_task_mode(idx, is_slv);
else
- set_evfilter_sys_mode(idx);
+ set_evfilter_sys_mode(idx, is_slv);
out:
enable_intenset(idx);
@@ -358,7 +424,7 @@
static int krait_l2_map_event(struct perf_event *event)
{
if (pmu_type > 0 && pmu_type == event->attr.type)
- return event->attr.config & 0xfffff;
+ return event->attr.config & L2_EVT_MASK;
else
return -ENOENT;
}
@@ -378,6 +444,50 @@
free_irq(irq, NULL);
}
+static int msm_l2_test_set_ev_constraint(struct perf_event *event)
+{
+ u32 evt_type = event->attr.config & L2_EVT_MASK;
+ u8 reg = (evt_type & 0x0F000) >> 12;
+ u8 group = evt_type & 0x0000F;
+ unsigned long flags;
+ u32 err = 0;
+ u64 bitmap_t;
+
+ raw_spin_lock_irqsave(&l2_pmu_constraints.lock, flags);
+
+ bitmap_t = 1 << ((reg * 4) + group);
+
+ if (!(l2_pmu_constraints.pmu_bitmap & bitmap_t)) {
+ l2_pmu_constraints.pmu_bitmap |= bitmap_t;
+ goto out;
+ }
+
+ /* Bit is already set. Constraint failed. */
+ err = -EPERM;
+out:
+ raw_spin_unlock_irqrestore(&l2_pmu_constraints.lock, flags);
+ return err;
+}
+
+static int msm_l2_clear_ev_constraint(struct perf_event *event)
+{
+ u32 evt_type = event->attr.config & L2_EVT_MASK;
+ u8 reg = (evt_type & 0x0F000) >> 12;
+ u8 group = evt_type & 0x0000F;
+ unsigned long flags;
+ u64 bitmap_t;
+
+ raw_spin_lock_irqsave(&l2_pmu_constraints.lock, flags);
+
+ bitmap_t = 1 << ((reg * 4) + group);
+
+ /* Clear constraint bit. */
+ l2_pmu_constraints.pmu_bitmap &= ~bitmap_t;
+
+ raw_spin_unlock_irqrestore(&l2_pmu_constraints.lock, flags);
+ return 1;
+}
+
static struct arm_pmu krait_l2_pmu = {
.id = ARM_PERF_PMU_ID_KRAIT_L2,
.type = ARM_PMU_DEVICE_L2CC,
@@ -393,16 +503,19 @@
.read_counter = krait_l2_read_counter,
.write_counter = krait_l2_write_counter,
.map_event = krait_l2_map_event,
- .max_period = (1LLU << 32) - 1,
+ .max_period = MAX_L2_PERIOD,
.get_hw_events = krait_l2_get_hw_events,
.num_events = MAX_KRAIT_L2_CTRS,
+ .test_set_event_constraints = msm_l2_test_set_ev_constraint,
+ .clear_event_constraints = msm_l2_clear_ev_constraint,
+ .pmu.attr_groups = msm_l2_pmu_attr_grps,
};
static int __devinit krait_l2_pmu_device_probe(struct platform_device *pdev)
{
krait_l2_pmu.plat_device = pdev;
- if (!armpmu_register(&krait_l2_pmu, "krait-l2", -1))
+ if (!armpmu_register(&krait_l2_pmu, "kraitl2", -1))
pmu_type = krait_l2_pmu.pmu.type;
return 0;
diff --git a/arch/arm/mach-msm/perf_event_msm_l2.c b/arch/arm/mach-msm/perf_event_msm_l2.c
index 3310d92..5a5bf57 100644
--- a/arch/arm/mach-msm/perf_event_msm_l2.c
+++ b/arch/arm/mach-msm/perf_event_msm_l2.c
@@ -13,6 +13,7 @@
#include <linux/irq.h>
#include <asm/pmu.h>
#include <linux/platform_device.h>
+#include <linux/spinlock.h>
#define MAX_SCORPION_L2_CTRS 5
@@ -23,10 +24,61 @@
#define SCORPION_L2_EVT_PREFIX 3
#define SCORPION_MAX_L2_REG 4
+#define L2_EVT_MASK 0xfffff
+#define L2_EVT_PREFIX_MASK 0xf0000
+#define L2_EVT_PREFIX_SHIFT 16
+#define L2_SLAVE_EVT_PREFIX 4
+
+
+/*
+ * The L2 PMU is shared between all CPU's, so protect
+ * its bitmap access.
+ */
+struct pmu_constraints {
+ u64 pmu_bitmap;
+ raw_spinlock_t lock;
+} l2_pmu_constraints = {
+ .pmu_bitmap = 0,
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(l2_pmu_constraints.lock),
+};
+
+/* NRCCG format for perf RAW codes. */
+PMU_FORMAT_ATTR(l2_prefix, "config:16-19");
+PMU_FORMAT_ATTR(l2_reg, "config:12-15");
+PMU_FORMAT_ATTR(l2_code, "config:4-11");
+PMU_FORMAT_ATTR(l2_grp, "config:0-3");
+
+static struct attribute *msm_l2_ev_formats[] = {
+ &format_attr_l2_prefix.attr,
+ &format_attr_l2_reg.attr,
+ &format_attr_l2_code.attr,
+ &format_attr_l2_grp.attr,
+ NULL,
+};
+
+/*
+ * Format group is essential to access PMU's from userspace
+ * via their .name field.
+ */
+static struct attribute_group msm_l2_pmu_format_group = {
+ .name = "format",
+ .attrs = msm_l2_ev_formats,
+};
+
+static const struct attribute_group *msm_l2_pmu_attr_grps[] = {
+ &msm_l2_pmu_format_group,
+ NULL,
+};
+
static u32 pmu_type;
static struct arm_pmu scorpion_l2_pmu;
+static u32 l2_orig_filter_prefix = 0x000f0030;
+
+/* L2 slave port traffic filtering */
+static u32 l2_slv_filter_prefix = 0x000f0010;
+
static struct perf_event *l2_events[MAX_SCORPION_L2_CTRS];
static unsigned long l2_used_mask[BITS_TO_LONGS(MAX_SCORPION_L2_CTRS)];
@@ -376,7 +428,8 @@
u8 group;
prefix = (evt_type & 0xF0000) >> 16;
- if (prefix == SCORPION_L2_EVT_PREFIX) {
+ if (prefix == SCORPION_L2_EVT_PREFIX ||
+ prefix == L2_SLAVE_EVT_PREFIX) {
reg = (evt_type & 0x0F000) >> 12;
code = (evt_type & 0x00FF0) >> 4;
group = evt_type & 0x0000F;
@@ -433,16 +486,22 @@
asm volatile ("mcr p15, 3, %0, c15, c6, 7" : : "r" (val));
}
-static void scorpion_l2_set_evfilter_task_mode(void)
+static void scorpion_l2_set_evfilter_task_mode(unsigned int is_slv)
{
- u32 filter_val = 0x000f0030 | 1 << smp_processor_id();
+ u32 filter_val = l2_orig_filter_prefix | 1 << smp_processor_id();
+
+ if (is_slv)
+ filter_val = l2_slv_filter_prefix;
asm volatile ("mcr p15, 3, %0, c15, c6, 3" : : "r" (filter_val));
}
-static void scorpion_l2_set_evfilter_sys_mode(void)
+static void scorpion_l2_set_evfilter_sys_mode(unsigned int is_slv)
{
- u32 filter_val = 0x000f003f;
+ u32 filter_val = l2_orig_filter_prefix | 0xf;
+
+ if (is_slv)
+ filter_val = l2_slv_filter_prefix;
asm volatile ("mcr p15, 3, %0, c15, c6, 3" : : "r" (filter_val));
}
@@ -542,12 +601,21 @@
int evtype = hwc->config_base;
int ev_typer;
unsigned long iflags;
+ unsigned int is_slv = 0;
+ unsigned int evt_prefix;
raw_spin_lock_irqsave(&scorpion_l2_pmu_hw_events.pmu_lock, iflags);
if (hwc->config_base == SCORPION_L2CYCLE_CTR_RAW_CODE)
goto out;
+ /* Check if user requested any special origin filtering. */
+ evt_prefix = (hwc->config_base &
+ L2_EVT_PREFIX_MASK) >> L2_EVT_PREFIX_SHIFT;
+
+ if (evt_prefix == L2_SLAVE_EVT_PREFIX)
+ is_slv = 1;
+
memset(&evtinfo, 0, sizeof(evtinfo));
ev_typer = get_scorpion_l2_evtinfo(evtype, &evtinfo);
@@ -557,9 +625,9 @@
scorpion_l2_set_evcntcr();
if (cpu < 0)
- scorpion_l2_set_evfilter_task_mode();
+ scorpion_l2_set_evfilter_task_mode(is_slv);
else
- scorpion_l2_set_evfilter_sys_mode();
+ scorpion_l2_set_evfilter_sys_mode(is_slv);
scorpion_l2_evt_setup(evtinfo.grp, evtinfo.val);
@@ -693,7 +761,7 @@
static int scorpion_l2_map_event(struct perf_event *event)
{
if (pmu_type > 0 && pmu_type == event->attr.type)
- return event->attr.config & 0xfffff;
+ return event->attr.config & L2_EVT_MASK;
else
return -ENOENT;
}
@@ -713,6 +781,59 @@
free_irq(irq, NULL);
}
+static int msm_l2_test_set_ev_constraint(struct perf_event *event)
+{
+ u32 evt_type = event->attr.config & L2_EVT_MASK;
+ u8 prefix = (evt_type & 0xF0000) >> 16;
+ u8 reg = (evt_type & 0x0F000) >> 12;
+ u8 group = evt_type & 0x0000F;
+ unsigned long flags;
+ u32 err = 0;
+ u64 bitmap_t;
+
+ if (!prefix)
+ return 0;
+
+ raw_spin_lock_irqsave(&l2_pmu_constraints.lock, flags);
+
+ bitmap_t = 1 << ((reg * 4) + group);
+
+ if (!(l2_pmu_constraints.pmu_bitmap & bitmap_t)) {
+ l2_pmu_constraints.pmu_bitmap |= bitmap_t;
+ goto out;
+ }
+
+ /* Bit is already set. Constraint failed. */
+ err = -EPERM;
+
+out:
+ raw_spin_unlock_irqrestore(&l2_pmu_constraints.lock, flags);
+ return err;
+}
+
+static int msm_l2_clear_ev_constraint(struct perf_event *event)
+{
+ u32 evt_type = event->attr.config & L2_EVT_MASK;
+ u8 prefix = (evt_type & 0xF0000) >> 16;
+ u8 reg = (evt_type & 0x0F000) >> 12;
+ u8 group = evt_type & 0x0000F;
+ unsigned long flags;
+ u64 bitmap_t;
+
+ if (!prefix)
+ return 0;
+
+ raw_spin_lock_irqsave(&l2_pmu_constraints.lock, flags);
+
+ bitmap_t = 1 << ((reg * 4) + group);
+
+ /* Clear constraint bit. */
+ l2_pmu_constraints.pmu_bitmap &= ~bitmap_t;
+
+ raw_spin_unlock_irqrestore(&l2_pmu_constraints.lock, flags);
+ return 1;
+}
+
static struct arm_pmu scorpion_l2_pmu = {
.id = ARM_PERF_PMU_ID_SCORPIONMP_L2,
.type = ARM_PMU_DEVICE_L2CC,
@@ -731,13 +852,16 @@
.max_period = (1LLU << 32) - 1,
.get_hw_events = scorpion_l2_get_hw_events,
.num_events = MAX_SCORPION_L2_CTRS,
+ .test_set_event_constraints = msm_l2_test_set_ev_constraint,
+ .clear_event_constraints = msm_l2_clear_ev_constraint,
+ .pmu.attr_groups = msm_l2_pmu_attr_grps,
};
static int __devinit scorpion_l2_pmu_device_probe(struct platform_device *pdev)
{
scorpion_l2_pmu.plat_device = pdev;
- if (!armpmu_register(&scorpion_l2_pmu, "scorpion-l2", -1))
+ if (!armpmu_register(&scorpion_l2_pmu, "scorpionl2", -1))
pmu_type = scorpion_l2_pmu.pmu.type;
return 0;
diff --git a/arch/arm/mach-msm/platsmp-8625.c b/arch/arm/mach-msm/platsmp-8625.c
index b31d94b..700f966 100644
--- a/arch/arm/mach-msm/platsmp-8625.c
+++ b/arch/arm/mach-msm/platsmp-8625.c
@@ -18,6 +18,7 @@
#include <linux/smp.h>
#include <linux/io.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <asm/cacheflush.h>
#include <asm/hardware/gic.h>
diff --git a/arch/arm/mach-msm/qdsp5v2/audio_lpa.c b/arch/arm/mach-msm/qdsp5v2/audio_lpa.c
index d5fb2e9..60f43b9 100644
--- a/arch/arm/mach-msm/qdsp5v2/audio_lpa.c
+++ b/arch/arm/mach-msm/qdsp5v2/audio_lpa.c
@@ -34,6 +34,7 @@
#include <linux/slab.h>
#include <linux/msm_audio.h>
#include <mach/qdsp5v2/audio_dev_ctl.h>
+#include <linux/memory_alloc.h>
#include <mach/qdsp5v2/qdsp5audppmsg.h>
#include <mach/qdsp5v2/qdsp5audplaycmdi.h>
@@ -1410,7 +1411,7 @@
wake_up(&audio->event_wait);
audlpa_reset_event_queue(audio);
iounmap(audio->data);
- pmem_kfree(audio->phys);
+ free_contiguous_memory_by_paddr(audio->phys);
mutex_unlock(&audio->lock);
#ifdef CONFIG_DEBUG_FS
if (audio->dentry)
@@ -1655,7 +1656,7 @@
msm_adsp_put(audio->audplay);
err:
iounmap(audio->data);
- pmem_kfree(audio->phys);
+ free_contiguous_memory_by_paddr(audio->phys);
audpp_adec_free(audio->dec_id);
MM_INFO("audio instance 0x%08x freeing\n", (int)audio);
kfree(audio);
diff --git a/arch/arm/mach-msm/qdsp5v2/audio_mvs.c b/arch/arm/mach-msm/qdsp5v2/audio_mvs.c
index 99da836..1884b3c 100644
--- a/arch/arm/mach-msm/qdsp5v2/audio_mvs.c
+++ b/arch/arm/mach-msm/qdsp5v2/audio_mvs.c
@@ -304,6 +304,7 @@
uint32_t buf_free_cnt;
uint32_t rate_type;
uint32_t dtx_mode;
+ struct min_max_rate min_max_rate;
struct msm_rpc_endpoint *rpc_endpt;
uint32_t rpc_prog;
@@ -416,8 +417,10 @@
/* Set EVRC mode. */
memset(&set_voc_mode_msg, 0, sizeof(set_voc_mode_msg));
- set_voc_mode_msg.min_rate = cpu_to_be32(audio->rate_type);
- set_voc_mode_msg.max_rate = cpu_to_be32(audio->rate_type);
+ set_voc_mode_msg.min_rate =
+ cpu_to_be32(audio->min_max_rate.min_rate);
+ set_voc_mode_msg.max_rate =
+ cpu_to_be32(audio->min_max_rate.max_rate);
msm_rpc_setup_req(&set_voc_mode_msg.rpc_hdr,
audio->rpc_prog,
@@ -1555,6 +1558,8 @@
mutex_lock(&audio->lock);
config.mvs_mode = audio->mvs_mode;
config.rate_type = audio->rate_type;
+ config.min_max_rate.min_rate = audio->min_max_rate.min_rate;
+ config.min_max_rate.max_rate = audio->min_max_rate.max_rate;
mutex_unlock(&audio->lock);
rc = copy_to_user((void *)arg, &config, sizeof(config));
@@ -1579,6 +1584,10 @@
audio->mvs_mode = config.mvs_mode;
audio->rate_type = config.rate_type;
audio->dtx_mode = config.dtx_mode;
+ audio->min_max_rate.min_rate =
+ config.min_max_rate.min_rate;
+ audio->min_max_rate.max_rate =
+ config.min_max_rate.max_rate;
} else {
pr_err("%s: Set confg called in state %d\n",
__func__, audio->state);
diff --git a/arch/arm/mach-msm/qdsp6v2/audio_acdb.c b/arch/arm/mach-msm/qdsp6v2/audio_acdb.c
index e7a81d3..22779b4 100644
--- a/arch/arm/mach-msm/qdsp6v2/audio_acdb.c
+++ b/arch/arm/mach-msm/qdsp6v2/audio_acdb.c
@@ -617,11 +617,14 @@
{
if (atomic64_read(&acdb_data.mem_len)) {
mutex_lock(&acdb_data.acdb_mutex);
+ atomic_set(&acdb_data.vocstrm_total_cal_size, 0);
+ atomic_set(&acdb_data.vocproc_total_cal_size, 0);
+ atomic_set(&acdb_data.vocvol_total_cal_size, 0);
+ atomic64_set(&acdb_data.mem_len, 0);
ion_unmap_kernel(acdb_data.ion_client, acdb_data.ion_handle);
ion_free(acdb_data.ion_client, acdb_data.ion_handle);
ion_client_destroy(acdb_data.ion_client);
mutex_unlock(&acdb_data.acdb_mutex);
- atomic64_set(&acdb_data.mem_len, 0);
}
return 0;
}
@@ -666,11 +669,11 @@
goto err_ion_handle;
}
kvaddr = (unsigned long)kvptr;
- mutex_unlock(&acdb_data.acdb_mutex);
-
atomic64_set(&acdb_data.paddr, paddr);
atomic64_set(&acdb_data.kvaddr, kvaddr);
atomic64_set(&acdb_data.mem_len, mem_len);
+ mutex_unlock(&acdb_data.acdb_mutex);
+
pr_debug("%s done! paddr = 0x%lx, "
"kvaddr = 0x%lx, len = x%lx\n",
__func__,
diff --git a/arch/arm/mach-msm/qdsp6v2/audio_multi_aac.c b/arch/arm/mach-msm/qdsp6v2/audio_multi_aac.c
index 9253056..fbd94c5 100644
--- a/arch/arm/mach-msm/qdsp6v2/audio_multi_aac.c
+++ b/arch/arm/mach-msm/qdsp6v2/audio_multi_aac.c
@@ -42,13 +42,16 @@
struct asm_aac_cfg aac_cfg;
struct msm_audio_aac_config *aac_config;
uint32_t sbr_ps = 0x00;
+ aac_config = (struct msm_audio_aac_config *)audio->codec_cfg;
+ aac_cfg.ch_cfg = aac_config->channel_configuration;
+ aac_cfg.sample_rate = audio->pcm_cfg.sample_rate;
pr_debug("%s: AUDIO_START session_id[%d]\n", __func__,
audio->ac->session);
if (audio->feedback == NON_TUNNEL_MODE) {
/* Configure PCM output block */
- rc = q6asm_enc_cfg_blk_pcm(audio->ac,
- 0, /*native sampling rate*/
- 0 /*native channel count*/);
+ rc = q6asm_enc_cfg_blk_pcm_native(audio->ac,
+ aac_cfg.sample_rate,
+ aac_cfg.ch_cfg);
if (rc < 0) {
pr_err("pcm output block config failed\n");
break;
@@ -58,7 +61,6 @@
rc = q6asm_enable_sbrps(audio->ac, sbr_ps);
if (rc < 0)
pr_err("sbr-ps enable failed\n");
- aac_config = (struct msm_audio_aac_config *)audio->codec_cfg;
if (aac_config->sbr_ps_on_flag)
aac_cfg.aot = AAC_ENC_MODE_EAAC_P;
else if (aac_config->sbr_on_flag)
@@ -87,8 +89,6 @@
aac_config->aac_scalefactor_data_resilience_flag;
aac_cfg.spectral_data_resilience =
aac_config->aac_spectral_data_resilience_flag;
- aac_cfg.ch_cfg = aac_config->channel_configuration;
- aac_cfg.sample_rate = audio->pcm_cfg.sample_rate;
pr_debug("%s:format=%x aot=%d ch=%d sr=%d\n",
__func__, aac_cfg.format,
@@ -146,16 +146,14 @@
AUDIO_AAC_DUAL_MONO_PL_PR) ||
(aac_config->dual_mono_mode >
AUDIO_AAC_DUAL_MONO_PL_SR)) {
- pr_err("%s:AUDIO_SET_AAC_CONFIG: Invalid"
- "dual_mono mode =%d\n", __func__,
- aac_config->dual_mono_mode);
+ pr_err("%s:AUDIO_SET_AAC_CONFIG: Invalid dual_mono mode =%d\n",
+ __func__, aac_config->dual_mono_mode);
} else {
/* convert the data from user into sce_left
* and sce_right based on the definitions
*/
- pr_debug("%s: AUDIO_SET_AAC_CONFIG: modify"
- "dual_mono mode =%d\n", __func__,
- aac_config->dual_mono_mode);
+ pr_debug("%s: AUDIO_SET_AAC_CONFIG: modify dual_mono mode =%d\n",
+ __func__, aac_config->dual_mono_mode);
switch (aac_config->dual_mono_mode) {
case AUDIO_AAC_DUAL_MONO_PL_PR:
sce_left = 1;
@@ -178,8 +176,8 @@
rc = q6asm_cfg_dual_mono_aac(audio->ac,
sce_left, sce_right);
if (rc < 0)
- pr_err("%s: asm cmd dualmono failed"
- " rc=%d\n", __func__, rc);
+ pr_err("%s: asm cmd dualmono failed rc=%d\n",
+ __func__, rc);
} break;
}
break;
@@ -212,8 +210,8 @@
audio->codec_cfg = kzalloc(sizeof(struct msm_audio_aac_config),
GFP_KERNEL);
if (audio->codec_cfg == NULL) {
- pr_err("%s: Could not allocate memory for aac"
- "config\n", __func__);
+ pr_err("%s: Could not allocate memory for aac config\n",
+ __func__);
kfree(audio);
return -ENOMEM;
}
diff --git a/arch/arm/mach-msm/qdsp6v2/audio_utils_aio.c b/arch/arm/mach-msm/qdsp6v2/audio_utils_aio.c
index 6a99be2..fdc596d 100644
--- a/arch/arm/mach-msm/qdsp6v2/audio_utils_aio.c
+++ b/arch/arm/mach-msm/qdsp6v2/audio_utils_aio.c
@@ -33,7 +33,7 @@
return 0;
}
-ssize_t audio_aio_debug_read(struct file *file, char __user * buf,
+ssize_t audio_aio_debug_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
const int debug_bufmax = 4096;
@@ -67,7 +67,7 @@
}
#endif
-static int insert_eos_buf(struct q6audio_aio *audio,
+int insert_eos_buf(struct q6audio_aio *audio,
struct audio_aio_buffer_node *buf_node)
{
struct dec_meta_out *eos_buf = buf_node->kvaddr;
@@ -93,7 +93,7 @@
sizeof(meta_data->meta_out_dsp[0]);
}
-static void extract_meta_out_info(struct q6audio_aio *audio,
+void extract_meta_out_info(struct q6audio_aio *audio,
struct audio_aio_buffer_node *buf_node, int dir)
{
struct dec_meta_out *meta_data = buf_node->kvaddr;
@@ -114,8 +114,7 @@
&buf_node->meta_info.meta_out,
sizeof(struct dec_meta_out));
meta_data->meta_out_dsp[0].nflags = 0x00000000;
- pr_debug("%s[%p]:o/p: msw_ts 0x%8x lsw_ts 0x%8x nflags 0x%8x,"
- "num_frames = %d\n",
+ pr_debug("%s[%p]:o/p: msw_ts 0x%8x lsw_ts 0x%8x nflags 0x%8x, num_frames = %d\n",
__func__, audio,
((struct dec_meta_out *)buf_node->kvaddr)->\
meta_out_dsp[0].msw_ts,
@@ -293,8 +292,8 @@
kfree(used_buf);
if (list_empty(&audio->out_queue) &&
(audio->drv_status & ADRV_STATUS_FSYNC)) {
- pr_debug("%s[%p]: list is empty, reached EOS in"
- "Tunnel\n", __func__, audio);
+ pr_debug("%s[%p]: list is empty, reached EOS in Tunnel\n",
+ __func__, audio);
wake_up(&audio->write_wait);
}
} else {
@@ -304,60 +303,6 @@
}
}
-/* Read buffer from DSP / Handle Ack from DSP */
-void audio_aio_async_read_ack(struct q6audio_aio *audio, uint32_t token,
- uint32_t *payload)
-{
- unsigned long flags;
- union msm_audio_event_payload event_payload;
- struct audio_aio_buffer_node *filled_buf;
-
- /* No active flush in progress */
- if (audio->rflush)
- return;
-
- /* Statistics of read */
- atomic_add(payload[2], &audio->in_bytes);
- atomic_add(payload[7], &audio->in_samples);
-
- spin_lock_irqsave(&audio->dsp_lock, flags);
- BUG_ON(list_empty(&audio->in_queue));
- filled_buf = list_first_entry(&audio->in_queue,
- struct audio_aio_buffer_node, list);
- if (token == (filled_buf->token)) {
- list_del(&filled_buf->list);
- spin_unlock_irqrestore(&audio->dsp_lock, flags);
- event_payload.aio_buf = filled_buf->buf;
- /* Read done Buffer due to flush/normal condition
- after EOS event, so append EOS buffer */
- if (audio->eos_rsp == 0x1) {
- event_payload.aio_buf.data_len =
- insert_eos_buf(audio, filled_buf);
- /* Reset flag back to indicate eos intimated */
- audio->eos_rsp = 0;
- } else {
- filled_buf->meta_info.meta_out.num_of_frames =
- payload[7];
- event_payload.aio_buf.data_len = payload[2] + \
- payload[3] + \
- sizeof(struct dec_meta_out);
- pr_debug("%s[%p]:nr of frames 0x%8x len=%d\n",
- __func__, audio,
- filled_buf->meta_info.meta_out.num_of_frames,
- event_payload.aio_buf.data_len);
- extract_meta_out_info(audio, filled_buf, 0);
- audio->eos_rsp = 0;
- }
- audio_aio_post_event(audio, AUDIO_EVENT_READ_DONE,
- event_payload);
- kfree(filled_buf);
- } else {
- pr_err("%s[%p]:expected=%lx ret=%x\n",
- __func__, audio, filled_buf->token, token);
- spin_unlock_irqrestore(&audio->dsp_lock, flags);
- }
-}
-
/* ------------------- device --------------------- */
void audio_aio_async_out_flush(struct q6audio_aio *audio)
{
@@ -404,8 +349,8 @@
/* Forcefull send o/p eos buffer after flush, if no eos response
* received by dsp even after sending eos command */
if ((audio->eos_rsp != 1) && audio->eos_flag) {
- pr_debug("%s[%p]: send eos on o/p buffer during"
- "flush\n", __func__, audio);
+ pr_debug("%s[%p]: send eos on o/p buffer during flush\n",
+ __func__, audio);
payload.aio_buf = buf_node->buf;
payload.aio_buf.data_len =
insert_eos_buf(audio, buf_node);
@@ -716,9 +661,7 @@
list_for_each_entry(region_elt, &audio->ion_region_queue, list) {
if (CONTAINS(region_elt, &t) || CONTAINS(&t, region_elt) ||
OVERLAPS(region_elt, &t)) {
- pr_err("%s[%p]:region (vaddr %p len %ld)"
- " clashes with registered region"
- " (vaddr %p paddr %p len %ld)\n",
+ pr_err("%s[%p]:region (vaddr %p len %ld) clashes with registered region (vaddr %p paddr %p len %ld)\n",
__func__, audio, vaddr, len,
region_elt->vaddr,
(void *)region_elt->paddr, region_elt->len);
@@ -870,8 +813,7 @@
struct audio_client *ac;
struct audio_aio_write_param param;
- pr_debug("%s[%p]: Send write buff %p phy %lx len %d"
- "meta_enable = %d\n",
+ pr_debug("%s[%p]: Send write buff %p phy %lx len %d meta_enable = %d\n",
__func__, audio, buf_node, buf_node->paddr,
buf_node->buf.data_len,
audio->buf_cfg.meta_info_enable);
@@ -973,8 +915,8 @@
return -EFAULT;
}
- pr_debug("%s[%p]:node %p dir %x buf_addr %p buf_len %d data_len"
- "%d\n", __func__, audio, buf_node, dir, buf_node->buf.buf_addr,
+ pr_debug("%s[%p]:node %p dir %x buf_addr %p buf_len %d data_len %d\n",
+ __func__, audio, buf_node, dir, buf_node->buf.buf_addr,
buf_node->buf.buf_len, buf_node->buf.data_len);
buf_node->paddr = audio_aio_ion_fixup(audio, buf_node->buf.buf_addr,
buf_node->buf.buf_len, 1,
@@ -1335,8 +1277,8 @@
break;
}
if (audio->feedback != NON_TUNNEL_MODE) {
- pr_err("%s[%p]:Not sufficient permission to"
- "change the playback mode\n", __func__, audio);
+ pr_err("%s[%p]:Not sufficient permission to change the playback mode\n",
+ __func__, audio);
rc = -EACCES;
mutex_unlock(&audio->lock);
break;
@@ -1379,8 +1321,8 @@
break;
}
case AUDIO_GET_BUF_CFG: {
- pr_debug("%s[%p]:session id %d: Get-buf-cfg: meta[%d]"
- "framesperbuf[%d]\n", __func__, audio,
+ pr_debug("%s[%p]:session id %d: Get-buf-cfg: meta[%d] framesperbuf[%d]\n",
+ __func__, audio,
audio->ac->session, audio->buf_cfg.meta_info_enable,
audio->buf_cfg.frames_per_buf);
diff --git a/arch/arm/mach-msm/qdsp6v2/audio_utils_aio.h b/arch/arm/mach-msm/qdsp6v2/audio_utils_aio.h
index 77288da..4a65304 100644
--- a/arch/arm/mach-msm/qdsp6v2/audio_utils_aio.h
+++ b/arch/arm/mach-msm/qdsp6v2/audio_utils_aio.h
@@ -195,6 +195,12 @@
void audio_aio_async_read_ack(struct q6audio_aio *audio, uint32_t token,
uint32_t *payload);
+int insert_eos_buf(struct q6audio_aio *audio,
+ struct audio_aio_buffer_node *buf_node);
+
+void extract_meta_out_info(struct q6audio_aio *audio,
+ struct audio_aio_buffer_node *buf_node, int dir);
+
int audio_aio_open(struct q6audio_aio *audio, struct file *file);
int audio_aio_enable(struct q6audio_aio *audio);
void audio_aio_post_event(struct q6audio_aio *audio, int type,
@@ -206,6 +212,6 @@
void audio_aio_async_in_flush(struct q6audio_aio *audio);
#ifdef CONFIG_DEBUG_FS
ssize_t audio_aio_debug_open(struct inode *inode, struct file *file);
-ssize_t audio_aio_debug_read(struct file *file, char __user * buf,
+ssize_t audio_aio_debug_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos);
#endif
diff --git a/arch/arm/mach-msm/qdsp6v2/q6audio_v1_aio.c b/arch/arm/mach-msm/qdsp6v2/q6audio_v1_aio.c
index 112de62..078eea8 100644
--- a/arch/arm/mach-msm/qdsp6v2/q6audio_v1_aio.c
+++ b/arch/arm/mach-msm/qdsp6v2/q6audio_v1_aio.c
@@ -97,9 +97,8 @@
"payload[2] = %d, payload[3] = %d\n", __func__,
audio, payload[0], payload[1], payload[2],
payload[3]);
- pr_debug("%s[%p]: ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY, "
- "sr(prev) = %d, chl(prev) = %d,",
- __func__, audio, audio->pcm_cfg.sample_rate,
+ pr_debug("%s[%p]: ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY, sr(prev) = %d, chl(prev) = %d,",
+ __func__, audio, audio->pcm_cfg.sample_rate,
audio->pcm_cfg.channel_count);
audio->pcm_cfg.sample_rate = payload[0];
audio->pcm_cfg.channel_count = payload[1] & 0xFFFF;
@@ -111,3 +110,57 @@
break;
}
}
+
+/* Read buffer from DSP / Handle Ack from DSP */
+void audio_aio_async_read_ack(struct q6audio_aio *audio, uint32_t token,
+ uint32_t *payload)
+{
+ unsigned long flags;
+ union msm_audio_event_payload event_payload;
+ struct audio_aio_buffer_node *filled_buf;
+
+ /* No active flush in progress */
+ if (audio->rflush)
+ return;
+
+ /* Statistics of read */
+ atomic_add(payload[2], &audio->in_bytes);
+ atomic_add(payload[7], &audio->in_samples);
+
+ spin_lock_irqsave(&audio->dsp_lock, flags);
+ BUG_ON(list_empty(&audio->in_queue));
+ filled_buf = list_first_entry(&audio->in_queue,
+ struct audio_aio_buffer_node, list);
+ if (token == (filled_buf->token)) {
+ list_del(&filled_buf->list);
+ spin_unlock_irqrestore(&audio->dsp_lock, flags);
+ event_payload.aio_buf = filled_buf->buf;
+ /* Read done Buffer due to flush/normal condition
+ after EOS event, so append EOS buffer */
+ if (audio->eos_rsp == 0x1) {
+ event_payload.aio_buf.data_len =
+ insert_eos_buf(audio, filled_buf);
+ /* Reset flag back to indicate eos intimated */
+ audio->eos_rsp = 0;
+ } else {
+ filled_buf->meta_info.meta_out.num_of_frames =
+ payload[7];
+ event_payload.aio_buf.data_len = payload[2] + \
+ payload[3] + \
+ sizeof(struct dec_meta_out);
+ pr_debug("%s[%p]:nr of frames 0x%8x len=%d\n",
+ __func__, audio,
+ filled_buf->meta_info.meta_out.num_of_frames,
+ event_payload.aio_buf.data_len);
+ extract_meta_out_info(audio, filled_buf, 0);
+ audio->eos_rsp = 0;
+ }
+ audio_aio_post_event(audio, AUDIO_EVENT_READ_DONE,
+ event_payload);
+ kfree(filled_buf);
+ } else {
+ pr_err("%s[%p]:expected=%lx ret=%x\n",
+ __func__, audio, filled_buf->token, token);
+ spin_unlock_irqrestore(&audio->dsp_lock, flags);
+ }
+}
diff --git a/arch/arm/mach-msm/qdsp6v2/q6audio_v2_aio.c b/arch/arm/mach-msm/qdsp6v2/q6audio_v2_aio.c
index aab7b19..ad4fc6f 100644
--- a/arch/arm/mach-msm/qdsp6v2/q6audio_v2_aio.c
+++ b/arch/arm/mach-msm/qdsp6v2/q6audio_v2_aio.c
@@ -91,14 +91,13 @@
break;
case ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY:
case ASM_DATA_EVENT_ENC_SR_CM_CHANGE_NOTIFY:
-
pr_debug("%s[%p]: ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY, payload[0]-sr = %d, payload[1]-chl = %d, payload[2] = %d, payload[3] = %d\n",
__func__, audio, payload[0],
payload[1], payload[2], payload[3]);
pr_debug("%s[%p]: ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY, sr(prev) = %d, chl(prev) = %d,",
- __func__, audio, audio->pcm_cfg.sample_rate,
- audio->pcm_cfg.channel_count);
+ __func__, audio, audio->pcm_cfg.sample_rate,
+ audio->pcm_cfg.channel_count);
audio->pcm_cfg.sample_rate = payload[0];
audio->pcm_cfg.channel_count = payload[1] & 0xFFFF;
@@ -110,3 +109,61 @@
break;
}
}
+
+/* Read buffer from DSP / Handle Ack from DSP */
+void audio_aio_async_read_ack(struct q6audio_aio *audio, uint32_t token,
+ uint32_t *payload)
+{
+ unsigned long flags;
+ union msm_audio_event_payload event_payload;
+ struct audio_aio_buffer_node *filled_buf;
+ pr_debug("%s\n", __func__);
+
+ /* No active flush in progress */
+ if (audio->rflush)
+ return;
+
+ /* Statistics of read */
+ atomic_add(payload[4], &audio->in_bytes);
+ atomic_add(payload[9], &audio->in_samples);
+
+ spin_lock_irqsave(&audio->dsp_lock, flags);
+ BUG_ON(list_empty(&audio->in_queue));
+ filled_buf = list_first_entry(&audio->in_queue,
+ struct audio_aio_buffer_node, list);
+
+ pr_debug("%s token: 0x[%d], filled_buf->token: 0x[%lu]",
+ __func__, token, filled_buf->token);
+ if (token == (filled_buf->token)) {
+ list_del(&filled_buf->list);
+ spin_unlock_irqrestore(&audio->dsp_lock, flags);
+ event_payload.aio_buf = filled_buf->buf;
+ /* Read done Buffer due to flush/normal condition
+ after EOS event, so append EOS buffer */
+ if (audio->eos_rsp == 0x1) {
+ event_payload.aio_buf.data_len =
+ insert_eos_buf(audio, filled_buf);
+ /* Reset flag back to indicate eos intimated */
+ audio->eos_rsp = 0;
+ } else {
+ filled_buf->meta_info.meta_out.num_of_frames\
+ = payload[9];
+ event_payload.aio_buf.data_len = payload[4]\
+ + payload[5] + sizeof(struct dec_meta_out);
+ pr_debug("%s[%p]:nr of frames 0x%8x len=%d\n",
+ __func__, audio,
+ filled_buf->meta_info.meta_out.num_of_frames,
+ event_payload.aio_buf.data_len);
+ extract_meta_out_info(audio, filled_buf, 0);
+ audio->eos_rsp = 0;
+ }
+ pr_debug("%s, posting read done to the app here\n", __func__);
+ audio_aio_post_event(audio, AUDIO_EVENT_READ_DONE,
+ event_payload);
+ kfree(filled_buf);
+ } else {
+ pr_err("%s[%p]:expected=%lx ret=%x\n",
+ __func__, audio, filled_buf->token, token);
+ spin_unlock_irqrestore(&audio->dsp_lock, flags);
+ }
+}
diff --git a/arch/arm/mach-msm/qdss-etb.c b/arch/arm/mach-msm/qdss-etb.c
deleted file mode 100644
index 7837af0..0000000
--- a/arch/arm/mach-msm/qdss-etb.c
+++ /dev/null
@@ -1,412 +0,0 @@
-/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/err.h>
-#include <linux/fs.h>
-#include <linux/miscdevice.h>
-#include <linux/uaccess.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/mutex.h>
-
-#include "qdss-priv.h"
-
-#define etb_writel(etb, val, off) __raw_writel((val), etb.base + off)
-#define etb_readl(etb, off) __raw_readl(etb.base + off)
-
-#define ETB_RAM_DEPTH_REG (0x004)
-#define ETB_STATUS_REG (0x00C)
-#define ETB_RAM_READ_DATA_REG (0x010)
-#define ETB_RAM_READ_POINTER (0x014)
-#define ETB_RAM_WRITE_POINTER (0x018)
-#define ETB_TRG (0x01C)
-#define ETB_CTL_REG (0x020)
-#define ETB_RWD_REG (0x024)
-#define ETB_FFSR (0x300)
-#define ETB_FFCR (0x304)
-#define ETB_ITMISCOP0 (0xEE0)
-#define ETB_ITTRFLINACK (0xEE4)
-#define ETB_ITTRFLIN (0xEE8)
-#define ETB_ITATBDATA0 (0xEEC)
-#define ETB_ITATBCTR2 (0xEF0)
-#define ETB_ITATBCTR1 (0xEF4)
-#define ETB_ITATBCTR0 (0xEF8)
-
-
-#define BYTES_PER_WORD 4
-#define ETB_SIZE_WORDS 4096
-#define FRAME_SIZE_WORDS 4
-
-#define ETB_LOCK() \
-do { \
- mb(); \
- etb_writel(etb, 0x0, CS_LAR); \
-} while (0)
-#define ETB_UNLOCK() \
-do { \
- etb_writel(etb, CS_UNLOCK_MAGIC, CS_LAR); \
- mb(); \
-} while (0)
-
-struct etb_ctx {
- uint8_t *buf;
- void __iomem *base;
- bool enabled;
- bool reading;
- spinlock_t spinlock;
- atomic_t in_use;
- struct device *dev;
- struct kobject *kobj;
- uint32_t trigger_cntr;
-};
-
-static struct etb_ctx etb;
-
-static void __etb_enable(void)
-{
- int i;
-
- ETB_UNLOCK();
-
- etb_writel(etb, 0x0, ETB_RAM_WRITE_POINTER);
- for (i = 0; i < ETB_SIZE_WORDS; i++)
- etb_writel(etb, 0x0, ETB_RWD_REG);
-
- etb_writel(etb, 0x0, ETB_RAM_WRITE_POINTER);
- etb_writel(etb, 0x0, ETB_RAM_READ_POINTER);
-
- etb_writel(etb, etb.trigger_cntr, ETB_TRG);
- etb_writel(etb, BIT(13) | BIT(0), ETB_FFCR);
- etb_writel(etb, BIT(0), ETB_CTL_REG);
-
- ETB_LOCK();
-}
-
-void etb_enable(void)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&etb.spinlock, flags);
- __etb_enable();
- etb.enabled = true;
- dev_info(etb.dev, "ETB enabled\n");
- spin_unlock_irqrestore(&etb.spinlock, flags);
-}
-
-static void __etb_disable(void)
-{
- int count;
- uint32_t ffcr;
-
- ETB_UNLOCK();
-
- ffcr = etb_readl(etb, ETB_FFCR);
- ffcr |= (BIT(12) | BIT(6));
- etb_writel(etb, ffcr, ETB_FFCR);
-
- for (count = TIMEOUT_US; BVAL(etb_readl(etb, ETB_FFCR), 6) != 0
- && count > 0; count--)
- udelay(1);
- WARN(count == 0, "timeout while flushing ETB, ETB_FFCR: %#x\n",
- etb_readl(etb, ETB_FFCR));
-
- etb_writel(etb, 0x0, ETB_CTL_REG);
-
- for (count = TIMEOUT_US; BVAL(etb_readl(etb, ETB_FFSR), 1) != 1
- && count > 0; count--)
- udelay(1);
- WARN(count == 0, "timeout while disabling ETB, ETB_FFSR: %#x\n",
- etb_readl(etb, ETB_FFSR));
-
- ETB_LOCK();
-}
-
-void etb_disable(void)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&etb.spinlock, flags);
- __etb_disable();
- etb.enabled = false;
- dev_info(etb.dev, "ETB disabled\n");
- spin_unlock_irqrestore(&etb.spinlock, flags);
-}
-
-static void __etb_dump(void)
-{
- int i;
- uint8_t *buf_ptr;
- uint32_t read_data;
- uint32_t read_ptr;
- uint32_t write_ptr;
- uint32_t frame_off;
- uint32_t frame_endoff;
-
- ETB_UNLOCK();
-
- read_ptr = etb_readl(etb, ETB_RAM_READ_POINTER);
- write_ptr = etb_readl(etb, ETB_RAM_WRITE_POINTER);
-
- frame_off = write_ptr % FRAME_SIZE_WORDS;
- frame_endoff = FRAME_SIZE_WORDS - frame_off;
- if (frame_off) {
- dev_err(etb.dev, "write_ptr: %lu not aligned to formatter "
- "frame size\n", (unsigned long)write_ptr);
- dev_err(etb.dev, "frameoff: %lu, frame_endoff: %lu\n",
- (unsigned long)frame_off, (unsigned long)frame_endoff);
- write_ptr += frame_endoff;
- }
-
- if ((etb_readl(etb, ETB_STATUS_REG) & BIT(0)) == 0)
- etb_writel(etb, 0x0, ETB_RAM_READ_POINTER);
- else
- etb_writel(etb, write_ptr, ETB_RAM_READ_POINTER);
-
- buf_ptr = etb.buf;
- for (i = 0; i < ETB_SIZE_WORDS; i++) {
- read_data = etb_readl(etb, ETB_RAM_READ_DATA_REG);
- *buf_ptr++ = read_data >> 0;
- *buf_ptr++ = read_data >> 8;
- *buf_ptr++ = read_data >> 16;
- *buf_ptr++ = read_data >> 24;
- }
-
- if (frame_off) {
- buf_ptr -= (frame_endoff * BYTES_PER_WORD);
- for (i = 0; i < frame_endoff; i++) {
- *buf_ptr++ = 0x0;
- *buf_ptr++ = 0x0;
- *buf_ptr++ = 0x0;
- *buf_ptr++ = 0x0;
- }
- }
-
- etb_writel(etb, read_ptr, ETB_RAM_READ_POINTER);
-
- ETB_LOCK();
-}
-
-void etb_dump(void)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&etb.spinlock, flags);
- if (etb.enabled) {
- __etb_disable();
- __etb_dump();
- __etb_enable();
-
- dev_info(etb.dev, "ETB dumped\n");
- }
- spin_unlock_irqrestore(&etb.spinlock, flags);
-}
-
-static int etb_open(struct inode *inode, struct file *file)
-{
- if (atomic_cmpxchg(&etb.in_use, 0, 1))
- return -EBUSY;
-
- dev_dbg(etb.dev, "%s: successfully opened\n", __func__);
- return 0;
-}
-
-static ssize_t etb_read(struct file *file, char __user *data,
- size_t len, loff_t *ppos)
-{
- if (etb.reading == false) {
- etb_dump();
- etb.reading = true;
- }
-
- if (*ppos + len > ETB_SIZE_WORDS * BYTES_PER_WORD)
- len = ETB_SIZE_WORDS * BYTES_PER_WORD - *ppos;
-
- if (copy_to_user(data, etb.buf + *ppos, len)) {
- dev_dbg(etb.dev, "%s: copy_to_user failed\n", __func__);
- return -EFAULT;
- }
-
- *ppos += len;
-
- dev_dbg(etb.dev, "%s: %d bytes copied, %d bytes left\n",
- __func__, len, (int) (ETB_SIZE_WORDS * BYTES_PER_WORD - *ppos));
-
- return len;
-}
-
-static int etb_release(struct inode *inode, struct file *file)
-{
- etb.reading = false;
-
- atomic_set(&etb.in_use, 0);
-
- dev_dbg(etb.dev, "%s: released\n", __func__);
-
- return 0;
-}
-
-static const struct file_operations etb_fops = {
- .owner = THIS_MODULE,
- .open = etb_open,
- .read = etb_read,
- .release = etb_release,
-};
-
-static struct miscdevice etb_misc = {
- .name = "msm_etb",
- .minor = MISC_DYNAMIC_MINOR,
- .fops = &etb_fops,
-};
-
-#define ETB_ATTR(__name) \
-static struct kobj_attribute __name##_attr = \
- __ATTR(__name, S_IRUGO | S_IWUSR, __name##_show, __name##_store)
-
-static ssize_t trigger_cntr_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t n)
-{
- unsigned long val;
-
- if (sscanf(buf, "%lx", &val) != 1)
- return -EINVAL;
-
- etb.trigger_cntr = val;
- return n;
-}
-static ssize_t trigger_cntr_show(struct kobject *kobj,
- struct kobj_attribute *attr,
- char *buf)
-{
- unsigned long val = etb.trigger_cntr;
- return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-ETB_ATTR(trigger_cntr);
-
-static int __devinit etb_sysfs_init(void)
-{
- int ret;
-
- etb.kobj = kobject_create_and_add("etb", qdss_get_modulekobj());
- if (!etb.kobj) {
- dev_err(etb.dev, "failed to create ETB sysfs kobject\n");
- ret = -ENOMEM;
- goto err_create;
- }
-
- ret = sysfs_create_file(etb.kobj, &trigger_cntr_attr.attr);
- if (ret) {
- dev_err(etb.dev, "failed to create ETB sysfs trigger_cntr"
- " attribute\n");
- goto err_file;
- }
-
- return 0;
-err_file:
- kobject_put(etb.kobj);
-err_create:
- return ret;
-}
-
-static void __devexit etb_sysfs_exit(void)
-{
- sysfs_remove_file(etb.kobj, &trigger_cntr_attr.attr);
- kobject_put(etb.kobj);
-}
-
-static int __devinit etb_probe(struct platform_device *pdev)
-{
- int ret;
- struct resource *res;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- ret = -EINVAL;
- goto err_res;
- }
-
- etb.base = ioremap_nocache(res->start, resource_size(res));
- if (!etb.base) {
- ret = -EINVAL;
- goto err_ioremap;
- }
-
- etb.dev = &pdev->dev;
-
- spin_lock_init(&etb.spinlock);
-
- ret = misc_register(&etb_misc);
- if (ret)
- goto err_misc;
-
- etb.buf = kzalloc(ETB_SIZE_WORDS * BYTES_PER_WORD, GFP_KERNEL);
- if (!etb.buf) {
- ret = -ENOMEM;
- goto err_alloc;
- }
-
- etb_sysfs_init();
-
- dev_info(etb.dev, "ETB initialized\n");
- return 0;
-
-err_alloc:
- misc_deregister(&etb_misc);
-err_misc:
- iounmap(etb.base);
-err_ioremap:
-err_res:
- dev_err(etb.dev, "ETB init failed\n");
- return ret;
-}
-
-static int __devexit etb_remove(struct platform_device *pdev)
-{
- if (etb.enabled)
- etb_disable();
- etb_sysfs_exit();
- kfree(etb.buf);
- misc_deregister(&etb_misc);
- iounmap(etb.base);
-
- return 0;
-}
-
-static struct platform_driver etb_driver = {
- .probe = etb_probe,
- .remove = __devexit_p(etb_remove),
- .driver = {
- .name = "msm_etb",
- },
-};
-
-static int __init etb_init(void)
-{
- return platform_driver_register(&etb_driver);
-}
-module_init(etb_init);
-
-static void __exit etb_exit(void)
-{
- platform_driver_unregister(&etb_driver);
-}
-module_exit(etb_exit);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("CoreSight Embedded Trace Buffer driver");
diff --git a/arch/arm/mach-msm/qdss-etm.c b/arch/arm/mach-msm/qdss-etm.c
deleted file mode 100644
index ca6e0c6..0000000
--- a/arch/arm/mach-msm/qdss-etm.c
+++ /dev/null
@@ -1,1329 +0,0 @@
-/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/err.h>
-#include <linux/fs.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/smp.h>
-#include <linux/wakelock.h>
-#include <linux/pm_qos.h>
-#include <linux/sysfs.h>
-#include <linux/stat.h>
-#include <asm/sections.h>
-#include <mach/socinfo.h>
-
-#include "qdss-priv.h"
-
-#define etm_writel(etm, cpu, val, off) \
- __raw_writel((val), etm.base + (SZ_4K * cpu) + off)
-#define etm_readl(etm, cpu, off) \
- __raw_readl(etm.base + (SZ_4K * cpu) + off)
-
-/*
- * Device registers:
- * 0x000 - 0x2FC: Trace registers
- * 0x300 - 0x314: Management registers
- * 0x318 - 0xEFC: Trace registers
- *
- * Coresight registers
- * 0xF00 - 0xF9C: Management registers
- * 0xFA0 - 0xFA4: Management registers in PFTv1.0
- * Trace registers in PFTv1.1
- * 0xFA8 - 0xFFC: Management registers
- */
-
-/* Trace registers (0x000-0x2FC) */
-#define ETMCR (0x000)
-#define ETMCCR (0x004)
-#define ETMTRIGGER (0x008)
-#define ETMSR (0x010)
-#define ETMSCR (0x014)
-#define ETMTSSCR (0x018)
-#define ETMTEEVR (0x020)
-#define ETMTECR1 (0x024)
-#define ETMFFLR (0x02C)
-#define ETMACVRn(n) (0x040 + (n * 4))
-#define ETMACTRn(n) (0x080 + (n * 4))
-#define ETMCNTRLDVRn(n) (0x140 + (n * 4))
-#define ETMCNTENRn(n) (0x150 + (n * 4))
-#define ETMCNTRLDEVRn(n) (0x160 + (n * 4))
-#define ETMCNTVRn(n) (0x170 + (n * 4))
-#define ETMSQ12EVR (0x180)
-#define ETMSQ21EVR (0x184)
-#define ETMSQ23EVR (0x188)
-#define ETMSQ31EVR (0x18C)
-#define ETMSQ32EVR (0x190)
-#define ETMSQ13EVR (0x194)
-#define ETMSQR (0x19C)
-#define ETMEXTOUTEVRn(n) (0x1A0 + (n * 4))
-#define ETMCIDCVRn(n) (0x1B0 + (n * 4))
-#define ETMCIDCMR (0x1BC)
-#define ETMIMPSPEC0 (0x1C0)
-#define ETMIMPSPEC1 (0x1C4)
-#define ETMIMPSPEC2 (0x1C8)
-#define ETMIMPSPEC3 (0x1CC)
-#define ETMIMPSPEC4 (0x1D0)
-#define ETMIMPSPEC5 (0x1D4)
-#define ETMIMPSPEC6 (0x1D8)
-#define ETMIMPSPEC7 (0x1DC)
-#define ETMSYNCFR (0x1E0)
-#define ETMIDR (0x1E4)
-#define ETMCCER (0x1E8)
-#define ETMEXTINSELR (0x1EC)
-#define ETMTESSEICR (0x1F0)
-#define ETMEIBCR (0x1F4)
-#define ETMTSEVR (0x1F8)
-#define ETMAUXCR (0x1FC)
-#define ETMTRACEIDR (0x200)
-#define ETMVMIDCVR (0x240)
-/* Management registers (0x300-0x314) */
-#define ETMOSLAR (0x300)
-#define ETMOSLSR (0x304)
-#define ETMOSSRR (0x308)
-#define ETMPDCR (0x310)
-#define ETMPDSR (0x314)
-
-#define ETM_MAX_ADDR_CMP (16)
-#define ETM_MAX_CNTR (4)
-#define ETM_MAX_CTXID_CMP (3)
-
-#define ETM_MODE_EXCLUDE BIT(0)
-#define ETM_MODE_CYCACC BIT(1)
-#define ETM_MODE_STALL BIT(2)
-#define ETM_MODE_TIMESTAMP BIT(3)
-#define ETM_MODE_CTXID BIT(4)
-#define ETM_MODE_ALL (0x1F)
-
-#define ETM_EVENT_MASK (0x1FFFF)
-#define ETM_SYNC_MASK (0xFFF)
-#define ETM_ALL_MASK (0xFFFFFFFF)
-
-#define ETM_SEQ_STATE_MAX_VAL (0x2)
-
-enum {
- ETM_ADDR_TYPE_NONE,
- ETM_ADDR_TYPE_SINGLE,
- ETM_ADDR_TYPE_RANGE,
- ETM_ADDR_TYPE_START,
- ETM_ADDR_TYPE_STOP,
-};
-
-#define ETM_LOCK(cpu) \
-do { \
- mb(); \
- etm_writel(etm, cpu, 0x0, CS_LAR); \
-} while (0)
-#define ETM_UNLOCK(cpu) \
-do { \
- etm_writel(etm, cpu, CS_UNLOCK_MAGIC, CS_LAR); \
- mb(); \
-} while (0)
-
-
-#ifdef MODULE_PARAM_PREFIX
-#undef MODULE_PARAM_PREFIX
-#endif
-#define MODULE_PARAM_PREFIX "qdss."
-
-#ifdef CONFIG_MSM_QDSS_ETM_DEFAULT_ENABLE
-static int etm_boot_enable = 1;
-#else
-static int etm_boot_enable;
-#endif
-module_param_named(
- etm_boot_enable, etm_boot_enable, int, S_IRUGO
-);
-
-struct etm_ctx {
- void __iomem *base;
- bool enabled;
- struct wake_lock wake_lock;
- struct pm_qos_request qos_req;
- struct qdss_source *src;
- struct mutex mutex;
- struct device *dev;
- struct kobject *kobj;
- uint8_t arch;
- uint8_t nr_addr_cmp;
- uint8_t nr_cntr;
- uint8_t nr_ext_inp;
- uint8_t nr_ext_out;
- uint8_t nr_ctxid_cmp;
- uint8_t reset;
- uint32_t mode;
- uint32_t ctrl;
- uint32_t trigger_event;
- uint32_t startstop_ctrl;
- uint32_t enable_event;
- uint32_t enable_ctrl1;
- uint32_t fifofull_level;
- uint8_t addr_idx;
- uint32_t addr_val[ETM_MAX_ADDR_CMP];
- uint32_t addr_acctype[ETM_MAX_ADDR_CMP];
- uint32_t addr_type[ETM_MAX_ADDR_CMP];
- uint8_t cntr_idx;
- uint32_t cntr_rld_val[ETM_MAX_CNTR];
- uint32_t cntr_event[ETM_MAX_CNTR];
- uint32_t cntr_rld_event[ETM_MAX_CNTR];
- uint32_t cntr_val[ETM_MAX_CNTR];
- uint32_t seq_12_event;
- uint32_t seq_21_event;
- uint32_t seq_23_event;
- uint32_t seq_31_event;
- uint32_t seq_32_event;
- uint32_t seq_13_event;
- uint32_t seq_curr_state;
- uint8_t ctxid_idx;
- uint32_t ctxid_val[ETM_MAX_CTXID_CMP];
- uint32_t ctxid_mask;
- uint32_t sync_freq;
- uint32_t timestamp_event;
-};
-
-static struct etm_ctx etm = {
- .trigger_event = 0x406F,
- .enable_event = 0x6F,
- .enable_ctrl1 = 0x1,
- .fifofull_level = 0x28,
- .addr_val = {(uint32_t) _stext, (uint32_t) _etext},
- .addr_type = {ETM_ADDR_TYPE_RANGE, ETM_ADDR_TYPE_RANGE},
- .cntr_event = {[0 ... (ETM_MAX_CNTR - 1)] = 0x406F},
- .cntr_rld_event = {[0 ... (ETM_MAX_CNTR - 1)] = 0x406F},
- .seq_12_event = 0x406F,
- .seq_21_event = 0x406F,
- .seq_23_event = 0x406F,
- .seq_31_event = 0x406F,
- .seq_32_event = 0x406F,
- .seq_13_event = 0x406F,
- .sync_freq = 0x80,
- .timestamp_event = 0x406F,
-};
-
-
-/* ETM clock is derived from the processor clock and gets enabled on a
- * logical OR of below items on Krait (pass2 onwards):
- * 1.CPMR[ETMCLKEN] is 1
- * 2.ETMCR[PD] is 0
- * 3.ETMPDCR[PU] is 1
- * 4.Reset is asserted (core or debug)
- * 5.APB memory mapped requests (eg. EDAP access)
- *
- * 1., 2. and 3. above are permanent enables whereas 4. and 5. are temporary
- * enables
- *
- * We rely on 5. to be able to access ETMCR and then use 2. above for ETM
- * clock vote in the driver and the save-restore code uses 1. above
- * for its vote
- */
-static void etm_set_pwrdwn(int cpu)
-{
- uint32_t etmcr;
-
- etmcr = etm_readl(etm, cpu, ETMCR);
- etmcr |= BIT(0);
- etm_writel(etm, cpu, etmcr, ETMCR);
-}
-
-static void etm_clr_pwrdwn(int cpu)
-{
- uint32_t etmcr;
-
- etmcr = etm_readl(etm, cpu, ETMCR);
- etmcr &= ~BIT(0);
- etm_writel(etm, cpu, etmcr, ETMCR);
-}
-
-static void etm_set_prog(int cpu)
-{
- uint32_t etmcr;
- int count;
-
- etmcr = etm_readl(etm, cpu, ETMCR);
- etmcr |= BIT(10);
- etm_writel(etm, cpu, etmcr, ETMCR);
-
- for (count = TIMEOUT_US; BVAL(etm_readl(etm, cpu, ETMSR), 1) != 1
- && count > 0; count--)
- udelay(1);
- WARN(count == 0, "timeout while setting prog bit, ETMSR: %#x\n",
- etm_readl(etm, cpu, ETMSR));
-}
-
-static void etm_clr_prog(int cpu)
-{
- uint32_t etmcr;
- int count;
-
- etmcr = etm_readl(etm, cpu, ETMCR);
- etmcr &= ~BIT(10);
- etm_writel(etm, cpu, etmcr, ETMCR);
-
- for (count = TIMEOUT_US; BVAL(etm_readl(etm, cpu, ETMSR), 1) != 0
- && count > 0; count--)
- udelay(1);
- WARN(count == 0, "timeout while clearing prog bit, ETMSR: %#x\n",
- etm_readl(etm, cpu, ETMSR));
-}
-
-static void __etm_enable(int cpu)
-{
- int i;
-
- ETM_UNLOCK(cpu);
- /* Vote for ETM power/clock enable */
- etm_clr_pwrdwn(cpu);
- etm_set_prog(cpu);
-
- etm_writel(etm, cpu, etm.ctrl | BIT(10), ETMCR);
- etm_writel(etm, cpu, etm.trigger_event, ETMTRIGGER);
- etm_writel(etm, cpu, etm.startstop_ctrl, ETMTSSCR);
- etm_writel(etm, cpu, etm.enable_event, ETMTEEVR);
- etm_writel(etm, cpu, etm.enable_ctrl1, ETMTECR1);
- etm_writel(etm, cpu, etm.fifofull_level, ETMFFLR);
- for (i = 0; i < etm.nr_addr_cmp; i++) {
- etm_writel(etm, cpu, etm.addr_val[i], ETMACVRn(i));
- etm_writel(etm, cpu, etm.addr_acctype[i], ETMACTRn(i));
- }
- for (i = 0; i < etm.nr_cntr; i++) {
- etm_writel(etm, cpu, etm.cntr_rld_val[i], ETMCNTRLDVRn(i));
- etm_writel(etm, cpu, etm.cntr_event[i], ETMCNTENRn(i));
- etm_writel(etm, cpu, etm.cntr_rld_event[i], ETMCNTRLDEVRn(i));
- etm_writel(etm, cpu, etm.cntr_val[i], ETMCNTVRn(i));
- }
- etm_writel(etm, cpu, etm.seq_12_event, ETMSQ12EVR);
- etm_writel(etm, cpu, etm.seq_21_event, ETMSQ21EVR);
- etm_writel(etm, cpu, etm.seq_23_event, ETMSQ23EVR);
- etm_writel(etm, cpu, etm.seq_31_event, ETMSQ31EVR);
- etm_writel(etm, cpu, etm.seq_32_event, ETMSQ32EVR);
- etm_writel(etm, cpu, etm.seq_13_event, ETMSQ13EVR);
- etm_writel(etm, cpu, etm.seq_curr_state, ETMSQR);
- for (i = 0; i < etm.nr_ext_out; i++)
- etm_writel(etm, cpu, 0x0000406F, ETMEXTOUTEVRn(i));
- for (i = 0; i < etm.nr_ctxid_cmp; i++)
- etm_writel(etm, cpu, etm.ctxid_val[i], ETMCIDCVRn(i));
- etm_writel(etm, cpu, etm.ctxid_mask, ETMCIDCMR);
- etm_writel(etm, cpu, etm.sync_freq, ETMSYNCFR);
- etm_writel(etm, cpu, 0x00000000, ETMEXTINSELR);
- etm_writel(etm, cpu, etm.timestamp_event, ETMTSEVR);
- etm_writel(etm, cpu, 0x00000000, ETMAUXCR);
- etm_writel(etm, cpu, cpu+1, ETMTRACEIDR);
- etm_writel(etm, cpu, 0x00000000, ETMVMIDCVR);
-
- etm_clr_prog(cpu);
- ETM_LOCK(cpu);
-}
-
-static int etm_enable(void)
-{
- int ret, cpu;
-
- if (etm.enabled) {
- dev_err(etm.dev, "ETM tracing already enabled\n");
- ret = -EPERM;
- goto err;
- }
-
- wake_lock(&etm.wake_lock);
- /* 1. causes all online cpus to come out of idle PC
- * 2. prevents idle PC until save restore flag is enabled atomically
- *
- * we rely on the user to prevent hotplug on/off racing with this
- * operation and to ensure cores where trace is expected to be turned
- * on are already hotplugged on
- */
- pm_qos_update_request(&etm.qos_req, 0);
-
- ret = qdss_enable(etm.src);
- if (ret)
- goto err_qdss;
-
- for_each_online_cpu(cpu)
- __etm_enable(cpu);
-
- etm.enabled = true;
-
- pm_qos_update_request(&etm.qos_req, PM_QOS_DEFAULT_VALUE);
- wake_unlock(&etm.wake_lock);
-
- dev_info(etm.dev, "ETM tracing enabled\n");
- return 0;
-
-err_qdss:
- pm_qos_update_request(&etm.qos_req, PM_QOS_DEFAULT_VALUE);
- wake_unlock(&etm.wake_lock);
-err:
- return ret;
-}
-
-static void __etm_disable(int cpu)
-{
- ETM_UNLOCK(cpu);
- etm_set_prog(cpu);
-
- /* program trace enable to low by using always false event */
- etm_writel(etm, cpu, 0x6F | BIT(14), ETMTEEVR);
-
- /* Vote for ETM power/clock disable */
- etm_set_pwrdwn(cpu);
- ETM_LOCK(cpu);
-}
-
-static int etm_disable(void)
-{
- int ret, cpu;
-
- if (!etm.enabled) {
- dev_err(etm.dev, "ETM tracing already disabled\n");
- ret = -EPERM;
- goto err;
- }
-
- wake_lock(&etm.wake_lock);
- /* 1. causes all online cpus to come out of idle PC
- * 2. prevents idle PC until save restore flag is disabled atomically
- *
- * we rely on the user to prevent hotplug on/off racing with this
- * operation and to ensure cores where trace is expected to be turned
- * off are already hotplugged on
- */
- pm_qos_update_request(&etm.qos_req, 0);
-
- for_each_online_cpu(cpu)
- __etm_disable(cpu);
-
- qdss_disable(etm.src);
-
- etm.enabled = false;
-
- pm_qos_update_request(&etm.qos_req, PM_QOS_DEFAULT_VALUE);
- wake_unlock(&etm.wake_lock);
-
- dev_info(etm.dev, "ETM tracing disabled\n");
- return 0;
-err:
- return ret;
-}
-
-/* Memory mapped writes to clear os lock not supported */
-static void etm_os_unlock(void *unused)
-{
- unsigned long value = 0x0;
-
- asm("mcr p14, 1, %0, c1, c0, 4\n\t" : : "r" (value));
- asm("isb\n\t");
-}
-
-#define ETM_STORE(__name, mask) \
-static ssize_t __name##_store(struct kobject *kobj, \
- struct kobj_attribute *attr, \
- const char *buf, size_t n) \
-{ \
- unsigned long val; \
- \
- if (sscanf(buf, "%lx", &val) != 1) \
- return -EINVAL; \
- \
- etm.__name = val & mask; \
- return n; \
-}
-
-#define ETM_SHOW(__name) \
-static ssize_t __name##_show(struct kobject *kobj, \
- struct kobj_attribute *attr, \
- char *buf) \
-{ \
- unsigned long val = etm.__name; \
- return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); \
-}
-
-#define ETM_ATTR(__name) \
-static struct kobj_attribute __name##_attr = \
- __ATTR(__name, S_IRUGO | S_IWUSR, __name##_show, __name##_store)
-#define ETM_ATTR_RO(__name) \
-static struct kobj_attribute __name##_attr = \
- __ATTR(__name, S_IRUGO, __name##_show, NULL)
-
-static ssize_t enabled_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t n)
-{
- int ret = 0;
- unsigned long val;
-
- if (sscanf(buf, "%lx", &val) != 1)
- return -EINVAL;
-
- mutex_lock(&etm.mutex);
- if (val)
- ret = etm_enable();
- else
- ret = etm_disable();
- mutex_unlock(&etm.mutex);
-
- if (ret)
- return ret;
- return n;
-}
-ETM_SHOW(enabled);
-ETM_ATTR(enabled);
-
-ETM_SHOW(nr_addr_cmp);
-ETM_ATTR_RO(nr_addr_cmp);
-ETM_SHOW(nr_cntr);
-ETM_ATTR_RO(nr_cntr);
-ETM_SHOW(nr_ctxid_cmp);
-ETM_ATTR_RO(nr_ctxid_cmp);
-
-/* Reset to trace everything i.e. exclude nothing. */
-static ssize_t reset_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t n)
-{
- int i;
- unsigned long val;
-
- if (sscanf(buf, "%lx", &val) != 1)
- return -EINVAL;
-
- mutex_lock(&etm.mutex);
- if (val) {
- etm.mode = ETM_MODE_EXCLUDE;
- etm.ctrl = 0x0;
- if (cpu_is_krait_v1()) {
- etm.mode |= ETM_MODE_CYCACC;
- etm.ctrl |= BIT(12);
- }
- etm.trigger_event = 0x406F;
- etm.startstop_ctrl = 0x0;
- etm.enable_event = 0x6F;
- etm.enable_ctrl1 = 0x1000000;
- etm.fifofull_level = 0x28;
- etm.addr_idx = 0x0;
- for (i = 0; i < etm.nr_addr_cmp; i++) {
- etm.addr_val[i] = 0x0;
- etm.addr_acctype[i] = 0x0;
- etm.addr_type[i] = ETM_ADDR_TYPE_NONE;
- }
- etm.cntr_idx = 0x0;
- for (i = 0; i < etm.nr_cntr; i++) {
- etm.cntr_rld_val[i] = 0x0;
- etm.cntr_event[i] = 0x406F;
- etm.cntr_rld_event[i] = 0x406F;
- etm.cntr_val[i] = 0x0;
- }
- etm.seq_12_event = 0x406F;
- etm.seq_21_event = 0x406F;
- etm.seq_23_event = 0x406F;
- etm.seq_31_event = 0x406F;
- etm.seq_32_event = 0x406F;
- etm.seq_13_event = 0x406F;
- etm.seq_curr_state = 0x0;
- etm.ctxid_idx = 0x0;
- for (i = 0; i < etm.nr_ctxid_cmp; i++)
- etm.ctxid_val[i] = 0x0;
- etm.ctxid_mask = 0x0;
- etm.sync_freq = 0x80;
- etm.timestamp_event = 0x406F;
- }
- mutex_unlock(&etm.mutex);
- return n;
-}
-ETM_SHOW(reset);
-ETM_ATTR(reset);
-
-static ssize_t mode_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t n)
-{
- unsigned long val;
-
- if (sscanf(buf, "%lx", &val) != 1)
- return -EINVAL;
-
- mutex_lock(&etm.mutex);
- etm.mode = val & ETM_MODE_ALL;
-
- if (etm.mode & ETM_MODE_EXCLUDE)
- etm.enable_ctrl1 |= BIT(24);
- else
- etm.enable_ctrl1 &= ~BIT(24);
-
- if (etm.mode & ETM_MODE_CYCACC)
- etm.ctrl |= BIT(12);
- else
- etm.ctrl &= ~BIT(12);
-
- if (etm.mode & ETM_MODE_STALL)
- etm.ctrl |= BIT(7);
- else
- etm.ctrl &= ~BIT(7);
-
- if (etm.mode & ETM_MODE_TIMESTAMP)
- etm.ctrl |= BIT(28);
- else
- etm.ctrl &= ~BIT(28);
- if (etm.mode & ETM_MODE_CTXID)
- etm.ctrl |= (BIT(14) | BIT(15));
- else
- etm.ctrl &= ~(BIT(14) | BIT(15));
- mutex_unlock(&etm.mutex);
-
- return n;
-}
-ETM_SHOW(mode);
-ETM_ATTR(mode);
-
-ETM_STORE(trigger_event, ETM_EVENT_MASK);
-ETM_SHOW(trigger_event);
-ETM_ATTR(trigger_event);
-
-ETM_STORE(enable_event, ETM_EVENT_MASK);
-ETM_SHOW(enable_event);
-ETM_ATTR(enable_event);
-
-ETM_STORE(fifofull_level, ETM_ALL_MASK);
-ETM_SHOW(fifofull_level);
-ETM_ATTR(fifofull_level);
-
-static ssize_t addr_idx_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t n)
-{
- unsigned long val;
-
- if (sscanf(buf, "%lx", &val) != 1)
- return -EINVAL;
- if (val >= etm.nr_addr_cmp)
- return -EINVAL;
-
- /* Use mutex to ensure index doesn't change while it gets dereferenced
- * multiple times within a mutex block elsewhere.
- */
- mutex_lock(&etm.mutex);
- etm.addr_idx = val;
- mutex_unlock(&etm.mutex);
- return n;
-}
-ETM_SHOW(addr_idx);
-ETM_ATTR(addr_idx);
-
-static ssize_t addr_single_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t n)
-{
- unsigned long val;
- uint8_t idx;
-
- if (sscanf(buf, "%lx", &val) != 1)
- return -EINVAL;
-
- mutex_lock(&etm.mutex);
- idx = etm.addr_idx;
- if (!(etm.addr_type[idx] == ETM_ADDR_TYPE_NONE ||
- etm.addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
- mutex_unlock(&etm.mutex);
- return -EPERM;
- }
-
- etm.addr_val[idx] = val;
- etm.addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
- mutex_unlock(&etm.mutex);
- return n;
-}
-static ssize_t addr_single_show(struct kobject *kobj,
- struct kobj_attribute *attr,
- char *buf)
-{
- unsigned long val;
- uint8_t idx;
-
- mutex_lock(&etm.mutex);
- idx = etm.addr_idx;
- if (!(etm.addr_type[idx] == ETM_ADDR_TYPE_NONE ||
- etm.addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
- mutex_unlock(&etm.mutex);
- return -EPERM;
- }
-
- val = etm.addr_val[idx];
- mutex_unlock(&etm.mutex);
- return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-ETM_ATTR(addr_single);
-
-static ssize_t addr_range_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t n)
-{
- unsigned long val1, val2;
- uint8_t idx;
-
- if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
- return -EINVAL;
- /* lower address comparator cannot have a higher address value */
- if (val1 > val2)
- return -EINVAL;
-
- mutex_lock(&etm.mutex);
- idx = etm.addr_idx;
- if (idx % 2 != 0) {
- mutex_unlock(&etm.mutex);
- return -EPERM;
- }
- if (!((etm.addr_type[idx] == ETM_ADDR_TYPE_NONE &&
- etm.addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
- (etm.addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
- etm.addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
- mutex_unlock(&etm.mutex);
- return -EPERM;
- }
-
- etm.addr_val[idx] = val1;
- etm.addr_type[idx] = ETM_ADDR_TYPE_RANGE;
- etm.addr_val[idx + 1] = val2;
- etm.addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
- etm.enable_ctrl1 |= (1 << (idx/2));
- mutex_unlock(&etm.mutex);
- return n;
-}
-static ssize_t addr_range_show(struct kobject *kobj,
- struct kobj_attribute *attr,
- char *buf)
-{
- unsigned long val1, val2;
- uint8_t idx;
-
- mutex_lock(&etm.mutex);
- idx = etm.addr_idx;
- if (idx % 2 != 0) {
- mutex_unlock(&etm.mutex);
- return -EPERM;
- }
- if (!((etm.addr_type[idx] == ETM_ADDR_TYPE_NONE &&
- etm.addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
- (etm.addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
- etm.addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
- mutex_unlock(&etm.mutex);
- return -EPERM;
- }
-
- val1 = etm.addr_val[idx];
- val2 = etm.addr_val[idx + 1];
- mutex_unlock(&etm.mutex);
- return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
-}
-ETM_ATTR(addr_range);
-
-static ssize_t addr_start_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t n)
-{
- unsigned long val;
- uint8_t idx;
-
- if (sscanf(buf, "%lx", &val) != 1)
- return -EINVAL;
-
- mutex_lock(&etm.mutex);
- idx = etm.addr_idx;
- if (!(etm.addr_type[idx] == ETM_ADDR_TYPE_NONE ||
- etm.addr_type[idx] == ETM_ADDR_TYPE_START)) {
- mutex_unlock(&etm.mutex);
- return -EPERM;
- }
-
- etm.addr_val[idx] = val;
- etm.addr_type[idx] = ETM_ADDR_TYPE_START;
- etm.startstop_ctrl |= (1 << idx);
- etm.enable_ctrl1 |= BIT(25);
- mutex_unlock(&etm.mutex);
- return n;
-}
-static ssize_t addr_start_show(struct kobject *kobj,
- struct kobj_attribute *attr,
- char *buf)
-{
- unsigned long val;
- uint8_t idx;
-
- mutex_lock(&etm.mutex);
- idx = etm.addr_idx;
- if (!(etm.addr_type[idx] == ETM_ADDR_TYPE_NONE ||
- etm.addr_type[idx] == ETM_ADDR_TYPE_START)) {
- mutex_unlock(&etm.mutex);
- return -EPERM;
- }
-
- val = etm.addr_val[idx];
- mutex_unlock(&etm.mutex);
- return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-ETM_ATTR(addr_start);
-
-static ssize_t addr_stop_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t n)
-{
- unsigned long val;
- uint8_t idx;
-
- if (sscanf(buf, "%lx", &val) != 1)
- return -EINVAL;
-
- mutex_lock(&etm.mutex);
- idx = etm.addr_idx;
- if (!(etm.addr_type[idx] == ETM_ADDR_TYPE_NONE ||
- etm.addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
- mutex_unlock(&etm.mutex);
- return -EPERM;
- }
-
- etm.addr_val[idx] = val;
- etm.addr_type[idx] = ETM_ADDR_TYPE_STOP;
- etm.startstop_ctrl |= (1 << (idx + 16));
- etm.enable_ctrl1 |= BIT(25);
- mutex_unlock(&etm.mutex);
- return n;
-}
-static ssize_t addr_stop_show(struct kobject *kobj,
- struct kobj_attribute *attr,
- char *buf)
-{
- unsigned long val;
- uint8_t idx;
-
- mutex_lock(&etm.mutex);
- idx = etm.addr_idx;
- if (!(etm.addr_type[idx] == ETM_ADDR_TYPE_NONE ||
- etm.addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
- mutex_unlock(&etm.mutex);
- return -EPERM;
- }
-
- val = etm.addr_val[idx];
- mutex_unlock(&etm.mutex);
- return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-ETM_ATTR(addr_stop);
-
-static ssize_t addr_acctype_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t n)
-{
- unsigned long val;
-
- if (sscanf(buf, "%lx", &val) != 1)
- return -EINVAL;
-
- mutex_lock(&etm.mutex);
- etm.addr_acctype[etm.addr_idx] = val;
- mutex_unlock(&etm.mutex);
- return n;
-}
-static ssize_t addr_acctype_show(struct kobject *kobj,
- struct kobj_attribute *attr,
- char *buf)
-{
- unsigned long val;
-
- mutex_lock(&etm.mutex);
- val = etm.addr_acctype[etm.addr_idx];
- mutex_unlock(&etm.mutex);
- return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-ETM_ATTR(addr_acctype);
-
-static ssize_t cntr_idx_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t n)
-{
- unsigned long val;
-
- if (sscanf(buf, "%lx", &val) != 1)
- return -EINVAL;
- if (val >= etm.nr_cntr)
- return -EINVAL;
-
- /* Use mutex to ensure index doesn't change while it gets dereferenced
- * multiple times within a mutex block elsewhere.
- */
- mutex_lock(&etm.mutex);
- etm.cntr_idx = val;
- mutex_unlock(&etm.mutex);
- return n;
-}
-ETM_SHOW(cntr_idx);
-ETM_ATTR(cntr_idx);
-
-static ssize_t cntr_rld_val_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t n)
-{
- unsigned long val;
-
- if (sscanf(buf, "%lx", &val) != 1)
- return -EINVAL;
-
- mutex_lock(&etm.mutex);
- etm.cntr_rld_val[etm.cntr_idx] = val;
- mutex_unlock(&etm.mutex);
- return n;
-}
-static ssize_t cntr_rld_val_show(struct kobject *kobj,
- struct kobj_attribute *attr,
- char *buf)
-{
- unsigned long val;
- mutex_lock(&etm.mutex);
- val = etm.cntr_rld_val[etm.cntr_idx];
- mutex_unlock(&etm.mutex);
- return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-ETM_ATTR(cntr_rld_val);
-
-static ssize_t cntr_event_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t n)
-{
- unsigned long val;
-
- if (sscanf(buf, "%lx", &val) != 1)
- return -EINVAL;
-
- mutex_lock(&etm.mutex);
- etm.cntr_event[etm.cntr_idx] = val & ETM_EVENT_MASK;
- mutex_unlock(&etm.mutex);
- return n;
-}
-static ssize_t cntr_event_show(struct kobject *kobj,
- struct kobj_attribute *attr,
- char *buf)
-{
- unsigned long val;
-
- mutex_lock(&etm.mutex);
- val = etm.cntr_event[etm.cntr_idx];
- mutex_unlock(&etm.mutex);
- return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-ETM_ATTR(cntr_event);
-
-static ssize_t cntr_rld_event_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t n)
-{
- unsigned long val;
-
- if (sscanf(buf, "%lx", &val) != 1)
- return -EINVAL;
-
- mutex_lock(&etm.mutex);
- etm.cntr_rld_event[etm.cntr_idx] = val & ETM_EVENT_MASK;
- mutex_unlock(&etm.mutex);
- return n;
-}
-static ssize_t cntr_rld_event_show(struct kobject *kobj,
- struct kobj_attribute *attr,
- char *buf)
-{
- unsigned long val;
-
- mutex_lock(&etm.mutex);
- val = etm.cntr_rld_event[etm.cntr_idx];
- mutex_unlock(&etm.mutex);
- return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-ETM_ATTR(cntr_rld_event);
-
-static ssize_t cntr_val_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t n)
-{
- unsigned long val;
-
- if (sscanf(buf, "%lx", &val) != 1)
- return -EINVAL;
-
- mutex_lock(&etm.mutex);
- etm.cntr_val[etm.cntr_idx] = val;
- mutex_unlock(&etm.mutex);
- return n;
-}
-static ssize_t cntr_val_show(struct kobject *kobj,
- struct kobj_attribute *attr,
- char *buf)
-{
- unsigned long val;
-
- mutex_lock(&etm.mutex);
- val = etm.cntr_val[etm.cntr_idx];
- mutex_unlock(&etm.mutex);
- return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-ETM_ATTR(cntr_val);
-
-ETM_STORE(seq_12_event, ETM_EVENT_MASK);
-ETM_SHOW(seq_12_event);
-ETM_ATTR(seq_12_event);
-
-ETM_STORE(seq_21_event, ETM_EVENT_MASK);
-ETM_SHOW(seq_21_event);
-ETM_ATTR(seq_21_event);
-
-ETM_STORE(seq_23_event, ETM_EVENT_MASK);
-ETM_SHOW(seq_23_event);
-ETM_ATTR(seq_23_event);
-
-ETM_STORE(seq_31_event, ETM_EVENT_MASK);
-ETM_SHOW(seq_31_event);
-ETM_ATTR(seq_31_event);
-
-ETM_STORE(seq_32_event, ETM_EVENT_MASK);
-ETM_SHOW(seq_32_event);
-ETM_ATTR(seq_32_event);
-
-ETM_STORE(seq_13_event, ETM_EVENT_MASK);
-ETM_SHOW(seq_13_event);
-ETM_ATTR(seq_13_event);
-
-static ssize_t seq_curr_state_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t n)
-{
- unsigned long val;
-
- if (sscanf(buf, "%lx", &val) != 1)
- return -EINVAL;
- if (val > ETM_SEQ_STATE_MAX_VAL)
- return -EINVAL;
-
- etm.seq_curr_state = val;
- return n;
-}
-ETM_SHOW(seq_curr_state);
-ETM_ATTR(seq_curr_state);
-
-static ssize_t ctxid_idx_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t n)
-{
- unsigned long val;
-
- if (sscanf(buf, "%lx", &val) != 1)
- return -EINVAL;
- if (val >= etm.nr_ctxid_cmp)
- return -EINVAL;
-
- /* Use mutex to ensure index doesn't change while it gets dereferenced
- * multiple times within a mutex block elsewhere.
- */
- mutex_lock(&etm.mutex);
- etm.ctxid_idx = val;
- mutex_unlock(&etm.mutex);
- return n;
-}
-ETM_SHOW(ctxid_idx);
-ETM_ATTR(ctxid_idx);
-
-static ssize_t ctxid_val_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t n)
-{
- unsigned long val;
-
- if (sscanf(buf, "%lx", &val) != 1)
- return -EINVAL;
-
- mutex_lock(&etm.mutex);
- etm.ctxid_val[etm.ctxid_idx] = val;
- mutex_unlock(&etm.mutex);
- return n;
-}
-static ssize_t ctxid_val_show(struct kobject *kobj,
- struct kobj_attribute *attr,
- char *buf)
-{
- unsigned long val;
-
- mutex_lock(&etm.mutex);
- val = etm.ctxid_val[etm.ctxid_idx];
- mutex_unlock(&etm.mutex);
- return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-ETM_ATTR(ctxid_val);
-
-ETM_STORE(ctxid_mask, ETM_ALL_MASK);
-ETM_SHOW(ctxid_mask);
-ETM_ATTR(ctxid_mask);
-
-ETM_STORE(sync_freq, ETM_SYNC_MASK);
-ETM_SHOW(sync_freq);
-ETM_ATTR(sync_freq);
-
-ETM_STORE(timestamp_event, ETM_EVENT_MASK);
-ETM_SHOW(timestamp_event);
-ETM_ATTR(timestamp_event);
-
-static struct attribute *etm_attrs[] = {
- &nr_addr_cmp_attr.attr,
- &nr_cntr_attr.attr,
- &nr_ctxid_cmp_attr.attr,
- &reset_attr.attr,
- &mode_attr.attr,
- &trigger_event_attr.attr,
- &enable_event_attr.attr,
- &fifofull_level_attr.attr,
- &addr_idx_attr.attr,
- &addr_single_attr.attr,
- &addr_range_attr.attr,
- &addr_start_attr.attr,
- &addr_stop_attr.attr,
- &addr_acctype_attr.attr,
- &cntr_idx_attr.attr,
- &cntr_rld_val_attr.attr,
- &cntr_event_attr.attr,
- &cntr_rld_event_attr.attr,
- &cntr_val_attr.attr,
- &seq_12_event_attr.attr,
- &seq_21_event_attr.attr,
- &seq_23_event_attr.attr,
- &seq_31_event_attr.attr,
- &seq_32_event_attr.attr,
- &seq_13_event_attr.attr,
- &seq_curr_state_attr.attr,
- &ctxid_idx_attr.attr,
- &ctxid_val_attr.attr,
- &ctxid_mask_attr.attr,
- &sync_freq_attr.attr,
- ×tamp_event_attr.attr,
- NULL,
-};
-
-static struct attribute_group etm_attr_grp = {
- .attrs = etm_attrs,
-};
-
-static int __devinit etm_sysfs_init(void)
-{
- int ret;
-
- etm.kobj = kobject_create_and_add("etm", qdss_get_modulekobj());
- if (!etm.kobj) {
- dev_err(etm.dev, "failed to create ETM sysfs kobject\n");
- ret = -ENOMEM;
- goto err_create;
- }
-
- ret = sysfs_create_file(etm.kobj, &enabled_attr.attr);
- if (ret) {
- dev_err(etm.dev, "failed to create ETM sysfs enabled"
- " attribute\n");
- goto err_file;
- }
-
- if (sysfs_create_group(etm.kobj, &etm_attr_grp))
- dev_err(etm.dev, "failed to create ETM sysfs group\n");
-
- return 0;
-err_file:
- kobject_put(etm.kobj);
-err_create:
- return ret;
-}
-
-static void __devexit etm_sysfs_exit(void)
-{
- sysfs_remove_group(etm.kobj, &etm_attr_grp);
- sysfs_remove_file(etm.kobj, &enabled_attr.attr);
- kobject_put(etm.kobj);
-}
-
-static bool __devinit etm_arch_supported(uint8_t arch)
-{
- switch (arch) {
- case PFT_ARCH_V1_1:
- break;
- default:
- return false;
- }
- return true;
-}
-
-static int __devinit etm_arch_init(void)
-{
- int ret, i;
- /* use cpu 0 for setup */
- int cpu = 0;
- uint32_t etmidr;
- uint32_t etmccr;
-
- /* Unlock OS lock first to allow memory mapped reads and writes */
- etm_os_unlock(NULL);
- smp_call_function(etm_os_unlock, NULL, 1);
- ETM_UNLOCK(cpu);
- /* Vote for ETM power/clock enable */
- etm_clr_pwrdwn(cpu);
- /* Set prog bit. It will be set from reset but this is included to
- * ensure it is set
- */
- etm_set_prog(cpu);
-
- /* find all capabilities */
- etmidr = etm_readl(etm, cpu, ETMIDR);
- etm.arch = BMVAL(etmidr, 4, 11);
- if (etm_arch_supported(etm.arch) == false) {
- ret = -EINVAL;
- goto err;
- }
-
- etmccr = etm_readl(etm, cpu, ETMCCR);
- etm.nr_addr_cmp = BMVAL(etmccr, 0, 3) * 2;
- etm.nr_cntr = BMVAL(etmccr, 13, 15);
- etm.nr_ext_inp = BMVAL(etmccr, 17, 19);
- etm.nr_ext_out = BMVAL(etmccr, 20, 22);
- etm.nr_ctxid_cmp = BMVAL(etmccr, 24, 25);
-
- if (cpu_is_krait_v1()) {
- /* Krait pass1 doesn't support include filtering and non-cycle
- * accurate tracing
- */
- etm.mode = (ETM_MODE_EXCLUDE | ETM_MODE_CYCACC);
- etm.ctrl = 0x1000;
- etm.enable_ctrl1 = 0x1000000;
- for (i = 0; i < etm.nr_addr_cmp; i++) {
- etm.addr_val[i] = 0x0;
- etm.addr_acctype[i] = 0x0;
- etm.addr_type[i] = ETM_ADDR_TYPE_NONE;
- }
- }
-
- /* Vote for ETM power/clock disable */
- etm_set_pwrdwn(cpu);
- ETM_LOCK(cpu);
-
- return 0;
-err:
- return ret;
-}
-
-static int __devinit etm_probe(struct platform_device *pdev)
-{
- int ret;
- struct resource *res;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- ret = -EINVAL;
- goto err_res;
- }
-
- etm.base = ioremap_nocache(res->start, resource_size(res));
- if (!etm.base) {
- ret = -EINVAL;
- goto err_ioremap;
- }
-
- etm.dev = &pdev->dev;
-
- mutex_init(&etm.mutex);
- wake_lock_init(&etm.wake_lock, WAKE_LOCK_SUSPEND, "msm_etm");
- pm_qos_add_request(&etm.qos_req, PM_QOS_CPU_DMA_LATENCY,
- PM_QOS_DEFAULT_VALUE);
- etm.src = qdss_get("msm_etm");
- if (IS_ERR(etm.src)) {
- ret = PTR_ERR(etm.src);
- goto err_qdssget;
- }
-
- ret = qdss_clk_enable();
- if (ret)
- goto err_clk;
-
- ret = etm_arch_init();
- if (ret)
- goto err_arch;
-
- ret = etm_sysfs_init();
- if (ret)
- goto err_sysfs;
-
- etm.enabled = false;
-
- qdss_clk_disable();
-
- dev_info(etm.dev, "ETM initialized\n");
-
- if (etm_boot_enable)
- etm_enable();
-
- return 0;
-
-err_sysfs:
-err_arch:
- qdss_clk_disable();
-err_clk:
- qdss_put(etm.src);
-err_qdssget:
- pm_qos_remove_request(&etm.qos_req);
- wake_lock_destroy(&etm.wake_lock);
- mutex_destroy(&etm.mutex);
- iounmap(etm.base);
-err_ioremap:
-err_res:
- dev_err(etm.dev, "ETM init failed\n");
- return ret;
-}
-
-static int __devexit etm_remove(struct platform_device *pdev)
-{
- if (etm.enabled)
- etm_disable();
- etm_sysfs_exit();
- qdss_put(etm.src);
- pm_qos_remove_request(&etm.qos_req);
- wake_lock_destroy(&etm.wake_lock);
- mutex_destroy(&etm.mutex);
- iounmap(etm.base);
-
- return 0;
-}
-
-static struct platform_driver etm_driver = {
- .probe = etm_probe,
- .remove = __devexit_p(etm_remove),
- .driver = {
- .name = "msm_etm",
- },
-};
-
-int __init etm_init(void)
-{
- return platform_driver_register(&etm_driver);
-}
-module_init(etm_init);
-
-void __exit etm_exit(void)
-{
- platform_driver_unregister(&etm_driver);
-}
-module_exit(etm_exit);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("CoreSight Program Flow Trace driver");
diff --git a/arch/arm/mach-msm/qdss-funnel.c b/arch/arm/mach-msm/qdss-funnel.c
deleted file mode 100644
index 52eb2b6..0000000
--- a/arch/arm/mach-msm/qdss-funnel.c
+++ /dev/null
@@ -1,232 +0,0 @@
-/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/err.h>
-
-#include "qdss-priv.h"
-
-#define funnel_writel(funnel, id, val, off) \
- __raw_writel((val), funnel.base + (SZ_4K * id) + off)
-#define funnel_readl(funnel, id, off) \
- __raw_readl(funnel.base + (SZ_4K * id) + off)
-
-#define FUNNEL_FUNCTL (0x000)
-#define FUNNEL_PRICTL (0x004)
-#define FUNNEL_ITATBDATA0 (0xEEC)
-#define FUNNEL_ITATBCTR2 (0xEF0)
-#define FUNNEL_ITATBCTR1 (0xEF4)
-#define FUNNEL_ITATBCTR0 (0xEF8)
-
-
-#define FUNNEL_LOCK(id) \
-do { \
- mb(); \
- funnel_writel(funnel, id, 0x0, CS_LAR); \
-} while (0)
-#define FUNNEL_UNLOCK(id) \
-do { \
- funnel_writel(funnel, id, CS_UNLOCK_MAGIC, CS_LAR); \
- mb(); \
-} while (0)
-
-#define FUNNEL_HOLDTIME_MASK (0xF00)
-#define FUNNEL_HOLDTIME_SHFT (0x8)
-#define FUNNEL_HOLDTIME (0x7 << FUNNEL_HOLDTIME_SHFT)
-
-struct funnel_ctx {
- void __iomem *base;
- bool enabled;
- struct mutex mutex;
- struct device *dev;
- struct kobject *kobj;
- uint32_t priority;
-};
-
-static struct funnel_ctx funnel;
-
-static void __funnel_enable(uint8_t id, uint32_t port_mask)
-{
- uint32_t functl;
-
- FUNNEL_UNLOCK(id);
-
- functl = funnel_readl(funnel, id, FUNNEL_FUNCTL);
- functl &= ~FUNNEL_HOLDTIME_MASK;
- functl |= FUNNEL_HOLDTIME;
- functl |= port_mask;
- funnel_writel(funnel, id, functl, FUNNEL_FUNCTL);
- funnel_writel(funnel, id, funnel.priority, FUNNEL_PRICTL);
-
- FUNNEL_LOCK(id);
-}
-
-void funnel_enable(uint8_t id, uint32_t port_mask)
-{
- mutex_lock(&funnel.mutex);
- __funnel_enable(id, port_mask);
- funnel.enabled = true;
- dev_info(funnel.dev, "FUNNEL port mask 0x%lx enabled\n",
- (unsigned long) port_mask);
- mutex_unlock(&funnel.mutex);
-}
-
-static void __funnel_disable(uint8_t id, uint32_t port_mask)
-{
- uint32_t functl;
-
- FUNNEL_UNLOCK(id);
-
- functl = funnel_readl(funnel, id, FUNNEL_FUNCTL);
- functl &= ~port_mask;
- funnel_writel(funnel, id, functl, FUNNEL_FUNCTL);
-
- FUNNEL_LOCK(id);
-}
-
-void funnel_disable(uint8_t id, uint32_t port_mask)
-{
- mutex_lock(&funnel.mutex);
- __funnel_disable(id, port_mask);
- funnel.enabled = false;
- dev_info(funnel.dev, "FUNNEL port mask 0x%lx disabled\n",
- (unsigned long) port_mask);
- mutex_unlock(&funnel.mutex);
-}
-
-#define FUNNEL_ATTR(__name) \
-static struct kobj_attribute __name##_attr = \
- __ATTR(__name, S_IRUGO | S_IWUSR, __name##_show, __name##_store)
-
-static ssize_t priority_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t n)
-{
- unsigned long val;
-
- if (sscanf(buf, "%lx", &val) != 1)
- return -EINVAL;
-
- funnel.priority = val;
- return n;
-}
-static ssize_t priority_show(struct kobject *kobj,
- struct kobj_attribute *attr,
- char *buf)
-{
- unsigned long val = funnel.priority;
- return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-FUNNEL_ATTR(priority);
-
-static int __devinit funnel_sysfs_init(void)
-{
- int ret;
-
- funnel.kobj = kobject_create_and_add("funnel", qdss_get_modulekobj());
- if (!funnel.kobj) {
- dev_err(funnel.dev, "failed to create FUNNEL sysfs kobject\n");
- ret = -ENOMEM;
- goto err_create;
- }
-
- ret = sysfs_create_file(funnel.kobj, &priority_attr.attr);
- if (ret) {
- dev_err(funnel.dev, "failed to create FUNNEL sysfs priority"
- " attribute\n");
- goto err_file;
- }
-
- return 0;
-err_file:
- kobject_put(funnel.kobj);
-err_create:
- return ret;
-}
-
-static void __devexit funnel_sysfs_exit(void)
-{
- sysfs_remove_file(funnel.kobj, &priority_attr.attr);
- kobject_put(funnel.kobj);
-}
-
-static int __devinit funnel_probe(struct platform_device *pdev)
-{
- int ret;
- struct resource *res;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- ret = -EINVAL;
- goto err_res;
- }
-
- funnel.base = ioremap_nocache(res->start, resource_size(res));
- if (!funnel.base) {
- ret = -EINVAL;
- goto err_ioremap;
- }
-
- funnel.dev = &pdev->dev;
-
- mutex_init(&funnel.mutex);
-
- funnel_sysfs_init();
-
- dev_info(funnel.dev, "FUNNEL initialized\n");
- return 0;
-
-err_ioremap:
-err_res:
- dev_err(funnel.dev, "FUNNEL init failed\n");
- return ret;
-}
-
-static int __devexit funnel_remove(struct platform_device *pdev)
-{
- if (funnel.enabled)
- funnel_disable(0x0, 0xFF);
- funnel_sysfs_exit();
- mutex_destroy(&funnel.mutex);
- iounmap(funnel.base);
-
- return 0;
-}
-
-static struct platform_driver funnel_driver = {
- .probe = funnel_probe,
- .remove = __devexit_p(funnel_remove),
- .driver = {
- .name = "msm_funnel",
- },
-};
-
-static int __init funnel_init(void)
-{
- return platform_driver_register(&funnel_driver);
-}
-module_init(funnel_init);
-
-static void __exit funnel_exit(void)
-{
- platform_driver_unregister(&funnel_driver);
-}
-module_exit(funnel_exit);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("CoreSight Funnel driver");
diff --git a/arch/arm/mach-msm/qdss-priv.h b/arch/arm/mach-msm/qdss-priv.h
deleted file mode 100644
index f39bc52..0000000
--- a/arch/arm/mach-msm/qdss-priv.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef _ARCH_ARM_MACH_MSM_QDSS_H_
-#define _ARCH_ARM_MACH_MSM_QDSS_H_
-
-#include <linux/bitops.h>
-#include <mach/qdss.h>
-
-/* Coresight management registers (0xF00-0xFCC)
- * 0xFA0 - 0xFA4: Management registers in PFTv1.0
- * Trace registers in PFTv1.1
- */
-#define CS_ITCTRL (0xF00)
-#define CS_CLAIMSET (0xFA0)
-#define CS_CLAIMCLR (0xFA4)
-#define CS_LAR (0xFB0)
-#define CS_LSR (0xFB4)
-#define CS_AUTHSTATUS (0xFB8)
-#define CS_DEVID (0xFC8)
-#define CS_DEVTYPE (0xFCC)
-/* Peripheral id registers (0xFD0-0xFEC) */
-#define CS_PIDR4 (0xFD0)
-#define CS_PIDR5 (0xFD4)
-#define CS_PIDR6 (0xFD8)
-#define CS_PIDR7 (0xFDC)
-#define CS_PIDR0 (0xFE0)
-#define CS_PIDR1 (0xFE4)
-#define CS_PIDR2 (0xFE8)
-#define CS_PIDR3 (0xFEC)
-/* Component id registers (0xFF0-0xFFC) */
-#define CS_CIDR0 (0xFF0)
-#define CS_CIDR1 (0xFF4)
-#define CS_CIDR2 (0xFF8)
-#define CS_CIDR3 (0xFFC)
-
-/* DBGv7 with baseline CP14 registers implemented */
-#define ARM_DEBUG_ARCH_V7B (0x3)
-/* DBGv7 with all CP14 registers implemented */
-#define ARM_DEBUG_ARCH_V7 (0x4)
-#define ARM_DEBUG_ARCH_V7_1 (0x5)
-#define ETM_ARCH_V3_3 (0x23)
-#define PFT_ARCH_V1_1 (0x31)
-
-#define TIMEOUT_US (100)
-#define CS_UNLOCK_MAGIC (0xC5ACCE55)
-
-#define BM(lsb, msb) ((BIT(msb) - BIT(lsb)) + BIT(msb))
-#define BMVAL(val, lsb, msb) ((val & BM(lsb, msb)) >> lsb)
-#define BVAL(val, n) ((val & BIT(n)) >> n)
-
-void etb_enable(void);
-void etb_disable(void);
-void etb_dump(void);
-void tpiu_disable(void);
-void funnel_enable(uint8_t id, uint32_t port_mask);
-void funnel_disable(uint8_t id, uint32_t port_mask);
-
-struct kobject *qdss_get_modulekobj(void);
-
-#endif
diff --git a/arch/arm/mach-msm/qdss.c b/arch/arm/mach-msm/qdss.c
deleted file mode 100644
index fd1fc2b..0000000
--- a/arch/arm/mach-msm/qdss.c
+++ /dev/null
@@ -1,408 +0,0 @@
-/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/err.h>
-#include <linux/export.h>
-#include <mach/rpm.h>
-
-#include "rpm_resources.h"
-#include "qdss-priv.h"
-
-#define MAX_STR_LEN (65535)
-
-enum {
- QDSS_CLK_OFF,
- QDSS_CLK_ON_DBG,
- QDSS_CLK_ON_HSDBG,
-};
-
-/*
- * Exclusion rules for structure fields.
- *
- * S: qdss.sources_mutex protected.
- * I: qdss.sink_mutex protected.
- * C: qdss.clk_mutex protected.
- */
-struct qdss_ctx {
- struct kobject *modulekobj;
- struct msm_qdss_platform_data *pdata;
- struct list_head sources; /* S: sources list */
- struct mutex sources_mutex;
- uint8_t sink_count; /* I: sink count */
- struct mutex sink_mutex;
- uint8_t max_clk;
- uint8_t clk_count; /* C: clk count */
- struct mutex clk_mutex;
-};
-
-static struct qdss_ctx qdss;
-
-/**
- * qdss_get - get the qdss source handle
- * @name: name of the qdss source
- *
- * Searches the sources list to get the qdss source handle for this source.
- *
- * CONTEXT:
- * Typically called from init or probe functions
- *
- * RETURNS:
- * pointer to struct qdss_source on success, %NULL on failure
- */
-struct qdss_source *qdss_get(const char *name)
-{
- struct qdss_source *src, *source = NULL;
-
- mutex_lock(&qdss.sources_mutex);
- list_for_each_entry(src, &qdss.sources, link) {
- if (src->name) {
- if (strncmp(src->name, name, MAX_STR_LEN))
- continue;
- source = src;
- break;
- }
- }
- mutex_unlock(&qdss.sources_mutex);
-
- return source ? source : ERR_PTR(-ENOENT);
-}
-EXPORT_SYMBOL(qdss_get);
-
-/**
- * qdss_put - release the qdss source handle
- * @name: name of the qdss source
- *
- * CONTEXT:
- * Typically called from driver remove or exit functions
- */
-void qdss_put(struct qdss_source *src)
-{
-}
-EXPORT_SYMBOL(qdss_put);
-
-/**
- * qdss_enable - enable qdss for the source
- * @src: handle for the source making the call
- *
- * Enables qdss block (relevant funnel ports and sink) if not already
- * enabled, otherwise increments the reference count
- *
- * CONTEXT:
- * Might sleep. Uses a mutex lock. Should be called from a non-atomic context.
- *
- * RETURNS:
- * 0 on success, non-zero on failure
- */
-int qdss_enable(struct qdss_source *src)
-{
- int ret;
-
- if (!src)
- return -EINVAL;
-
- ret = qdss_clk_enable();
- if (ret)
- goto err;
-
- if ((qdss.pdata)->afamily) {
- mutex_lock(&qdss.sink_mutex);
- if (qdss.sink_count == 0) {
- etb_disable();
- tpiu_disable();
- /* enable ETB first to avoid losing any trace data */
- etb_enable();
- }
- qdss.sink_count++;
- mutex_unlock(&qdss.sink_mutex);
- }
-
- funnel_enable(0x0, src->fport_mask);
- return 0;
-err:
- return ret;
-}
-EXPORT_SYMBOL(qdss_enable);
-
-/**
- * qdss_disable - disable qdss for the source
- * @src: handle for the source making the call
- *
- * Disables qdss block (relevant funnel ports and sink) if the reference count
- * is one, otherwise decrements the reference count
- *
- * CONTEXT:
- * Might sleep. Uses a mutex lock. Should be called from a non-atomic context.
- */
-void qdss_disable(struct qdss_source *src)
-{
- if (!src)
- return;
-
- if ((qdss.pdata)->afamily) {
- mutex_lock(&qdss.sink_mutex);
- if (WARN(qdss.sink_count == 0, "qdss is unbalanced\n"))
- goto out;
- if (qdss.sink_count == 1) {
- etb_dump();
- etb_disable();
- }
- qdss.sink_count--;
- mutex_unlock(&qdss.sink_mutex);
- }
-
- funnel_disable(0x0, src->fport_mask);
- qdss_clk_disable();
- return;
-out:
- mutex_unlock(&qdss.sink_mutex);
-}
-EXPORT_SYMBOL(qdss_disable);
-
-/**
- * qdss_disable_sink - force disable the current qdss sink(s)
- *
- * Force disable the current qdss sink(s) to stop the sink from accepting any
- * trace generated subsequent to this call. This function should only be used
- * as a way to stop the sink from getting polluted with trace data that is
- * uninteresting after an event of interest has occured.
- *
- * CONTEXT:
- * Can be called from atomic or non-atomic context.
- */
-void qdss_disable_sink(void)
-{
- if ((qdss.pdata)->afamily) {
- etb_dump();
- etb_disable();
- }
-}
-EXPORT_SYMBOL(qdss_disable_sink);
-
-/**
- * qdss_clk_enable - enable qdss clocks
- *
- * Enables qdss clocks via RPM if they aren't already enabled, otherwise
- * increments the reference count.
- *
- * CONTEXT:
- * Might sleep. Uses a mutex lock. Should be called from a non-atomic context.
- *
- * RETURNS:
- * 0 on success, non-zero on failure
- */
-int qdss_clk_enable(void)
-{
- int ret;
- struct msm_rpm_iv_pair iv;
-
- mutex_lock(&qdss.clk_mutex);
- if (qdss.clk_count == 0) {
- iv.id = MSM_RPM_ID_QDSS_CLK;
- if (qdss.max_clk)
- iv.value = QDSS_CLK_ON_HSDBG;
- else
- iv.value = QDSS_CLK_ON_DBG;
- ret = msm_rpmrs_set(MSM_RPM_CTX_SET_0, &iv, 1);
- if (WARN(ret, "qdss clks not enabled (%d)\n", ret))
- goto err_clk;
- }
- qdss.clk_count++;
- mutex_unlock(&qdss.clk_mutex);
- return 0;
-err_clk:
- mutex_unlock(&qdss.clk_mutex);
- return ret;
-}
-EXPORT_SYMBOL(qdss_clk_enable);
-
-/**
- * qdss_clk_disable - disable qdss clocks
- *
- * Disables qdss clocks via RPM if the reference count is one, otherwise
- * decrements the reference count.
- *
- * CONTEXT:
- * Might sleep. Uses a mutex lock. Should be called from a non-atomic context.
- */
-void qdss_clk_disable(void)
-{
- int ret;
- struct msm_rpm_iv_pair iv;
-
- mutex_lock(&qdss.clk_mutex);
- if (WARN(qdss.clk_count == 0, "qdss clks are unbalanced\n"))
- goto out;
- if (qdss.clk_count == 1) {
- iv.id = MSM_RPM_ID_QDSS_CLK;
- iv.value = QDSS_CLK_OFF;
- ret = msm_rpmrs_set(MSM_RPM_CTX_SET_0, &iv, 1);
- WARN(ret, "qdss clks not disabled (%d)\n", ret);
- }
- qdss.clk_count--;
-out:
- mutex_unlock(&qdss.clk_mutex);
-}
-EXPORT_SYMBOL(qdss_clk_disable);
-
-struct kobject *qdss_get_modulekobj(void)
-{
- return qdss.modulekobj;
-}
-
-#define QDSS_ATTR(name) \
-static struct kobj_attribute name##_attr = \
- __ATTR(name, S_IRUGO | S_IWUSR, name##_show, name##_store)
-
-static ssize_t max_clk_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t n)
-{
- unsigned long val;
-
- if (sscanf(buf, "%lx", &val) != 1)
- return -EINVAL;
-
- qdss.max_clk = val;
- return n;
-}
-static ssize_t max_clk_show(struct kobject *kobj,
- struct kobj_attribute *attr,
- char *buf)
-{
- unsigned long val = qdss.max_clk;
- return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-QDSS_ATTR(max_clk);
-
-static void __devinit qdss_add_sources(struct qdss_source *srcs, size_t num)
-{
- mutex_lock(&qdss.sources_mutex);
- while (num--) {
- list_add_tail(&srcs->link, &qdss.sources);
- srcs++;
- }
- mutex_unlock(&qdss.sources_mutex);
-}
-
-static int __init qdss_sysfs_init(void)
-{
- int ret;
-
- qdss.modulekobj = kset_find_obj(module_kset, KBUILD_MODNAME);
- if (!qdss.modulekobj) {
- pr_err("failed to find QDSS sysfs module kobject\n");
- ret = -ENOENT;
- goto err;
- }
-
- ret = sysfs_create_file(qdss.modulekobj, &max_clk_attr.attr);
- if (ret) {
- pr_err("failed to create QDSS sysfs max_clk attribute\n");
- goto err;
- }
-
- return 0;
-err:
- return ret;
-}
-
-static void __devexit qdss_sysfs_exit(void)
-{
- sysfs_remove_file(qdss.modulekobj, &max_clk_attr.attr);
-}
-
-static int __devinit qdss_probe(struct platform_device *pdev)
-{
- int ret;
- struct qdss_source *src_table;
- size_t num_srcs;
-
- mutex_init(&qdss.sources_mutex);
- mutex_init(&qdss.clk_mutex);
- mutex_init(&qdss.sink_mutex);
-
- if (pdev->dev.platform_data == NULL) {
- pr_err("%s: platform data is NULL\n", __func__);
- ret = -ENODEV;
- goto err_pdata;
- }
- qdss.pdata = pdev->dev.platform_data;
-
- INIT_LIST_HEAD(&qdss.sources);
- src_table = (qdss.pdata)->src_table;
- num_srcs = (qdss.pdata)->size;
- qdss_add_sources(src_table, num_srcs);
-
- pr_info("QDSS arch initialized\n");
- return 0;
-err_pdata:
- mutex_destroy(&qdss.sink_mutex);
- mutex_destroy(&qdss.clk_mutex);
- mutex_destroy(&qdss.sources_mutex);
- pr_err("QDSS init failed\n");
- return ret;
-}
-
-static int __devexit qdss_remove(struct platform_device *pdev)
-{
- qdss_sysfs_exit();
- mutex_destroy(&qdss.sink_mutex);
- mutex_destroy(&qdss.clk_mutex);
- mutex_destroy(&qdss.sources_mutex);
-
- return 0;
-}
-
-static struct platform_driver qdss_driver = {
- .probe = qdss_probe,
- .remove = __devexit_p(qdss_remove),
- .driver = {
- .name = "msm_qdss",
- },
-};
-
-static int __init qdss_init(void)
-{
- return platform_driver_register(&qdss_driver);
-}
-arch_initcall(qdss_init);
-
-static int __init qdss_module_init(void)
-{
- int ret;
-
- ret = qdss_sysfs_init();
- if (ret)
- goto err_sysfs;
-
- pr_info("QDSS module initialized\n");
- return 0;
-err_sysfs:
- return ret;
-}
-module_init(qdss_module_init);
-
-static void __exit qdss_exit(void)
-{
- platform_driver_unregister(&qdss_driver);
-}
-module_exit(qdss_exit);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("Qualcomm Debug SubSystem Driver");
diff --git a/arch/arm/mach-msm/rpm-regulator-smd.c b/arch/arm/mach-msm/rpm-regulator-smd.c
index 152b6e5..fdff231 100644
--- a/arch/arm/mach-msm/rpm-regulator-smd.c
+++ b/arch/arm/mach-msm/rpm-regulator-smd.c
@@ -67,6 +67,7 @@
RPM_REGULATOR_PARAM_HEAD_ROOM,
RPM_REGULATOR_PARAM_QUIET_MODE,
RPM_REGULATOR_PARAM_FREQ_REASON,
+ RPM_REGULATOR_PARAM_CORNER,
RPM_REGULATOR_PARAM_MAX,
};
@@ -110,6 +111,7 @@
PARAM(HEAD_ROOM, 1, 0, 0, 1, "hr", 0, 0x7FFFFFFF, "qcom,init-head-room"),
PARAM(QUIET_MODE, 0, 1, 0, 0, "qm", 0, 2, "qcom,init-quiet-mode"),
PARAM(FREQ_REASON, 0, 1, 0, 1, "resn", 0, 8, "qcom,init-freq-reason"),
+ PARAM(CORNER, 0, 1, 0, 0, "corn", 0, 5, "qcom,init-voltage-corner"),
};
struct rpm_vreg_request {
@@ -437,6 +439,7 @@
RPM_VREG_AGGR_MAX(HEAD_ROOM, param_aggr, param_reg);
RPM_VREG_AGGR_MAX(QUIET_MODE, param_aggr, param_reg);
RPM_VREG_AGGR_MAX(FREQ_REASON, param_aggr, param_reg);
+ RPM_VREG_AGGR_MAX(CORNER, param_aggr, param_reg);
}
static int rpm_vreg_aggregate_requests(struct rpm_regulator *regulator)
@@ -666,6 +669,56 @@
return uV;
}
+static int rpm_vreg_set_voltage_corner(struct regulator_dev *rdev, int min_uV,
+ int max_uV, unsigned *selector)
+{
+ struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+ int rc = 0;
+ int corner;
+ u32 prev_corner;
+
+ /*
+ * Translate from values which work as inputs in the
+ * regulator_set_voltage function to the actual corner values
+ * sent to the RPM.
+ */
+ corner = min_uV - RPM_REGULATOR_CORNER_RETENTION;
+
+ if (corner < params[RPM_REGULATOR_PARAM_CORNER].min
+ || corner > params[RPM_REGULATOR_PARAM_CORNER].max) {
+ vreg_err(reg, "corner=%d is not within allowed range: [%u, %u]\n",
+ corner, params[RPM_REGULATOR_PARAM_CORNER].min,
+ params[RPM_REGULATOR_PARAM_CORNER].max);
+ return -EINVAL;
+ }
+
+ rpm_vreg_lock(reg->rpm_vreg);
+
+ prev_corner = reg->req.param[RPM_REGULATOR_PARAM_CORNER];
+ RPM_VREG_SET_PARAM(reg, CORNER, corner);
+
+ /* Only send a new voltage if the regulator is currently enabled. */
+ if (rpm_vreg_active_or_sleep_enabled(reg->rpm_vreg))
+ rc = rpm_vreg_aggregate_requests(reg);
+
+ if (rc) {
+ vreg_err(reg, "set voltage corner failed, rc=%d", rc);
+ RPM_VREG_SET_PARAM(reg, CORNER, prev_corner);
+ }
+
+ rpm_vreg_unlock(reg->rpm_vreg);
+
+ return rc;
+}
+
+static int rpm_vreg_get_voltage_corner(struct regulator_dev *rdev)
+{
+ struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+
+ return reg->req.param[RPM_REGULATOR_PARAM_CORNER]
+ + RPM_REGULATOR_CORNER_RETENTION;
+}
+
static int rpm_vreg_set_mode(struct regulator_dev *rdev, unsigned int mode)
{
struct rpm_regulator *reg = rdev_get_drvdata(rdev);
@@ -802,6 +855,7 @@
priv_reg->rdev->reg_data = priv_reg;
priv_reg->rpm_vreg = rpm_vreg;
priv_reg->rdesc.name = framework_reg->rdesc.name;
+ priv_reg->rdesc.ops = framework_reg->rdesc.ops;
priv_reg->set_active = framework_reg->set_active;
priv_reg->set_sleep = framework_reg->set_sleep;
priv_reg->min_uV = framework_reg->min_uV;
@@ -963,7 +1017,7 @@
return -EINVAL;
}
- return rpm_vreg_set_voltage(regulator->rdev, uV, uV, NULL);
+ return regulator->rdesc.ops->set_voltage(regulator->rdev, uV, uV, NULL);
}
EXPORT_SYMBOL_GPL(rpm_regulator_set_voltage);
@@ -993,6 +1047,19 @@
.enable_time = rpm_vreg_enable_time,
};
+static struct regulator_ops smps_corner_ops = {
+ .enable = rpm_vreg_enable,
+ .disable = rpm_vreg_disable,
+ .is_enabled = rpm_vreg_is_enabled,
+ .set_voltage = rpm_vreg_set_voltage_corner,
+ .get_voltage = rpm_vreg_get_voltage_corner,
+ .list_voltage = rpm_vreg_list_voltage,
+ .set_mode = rpm_vreg_set_mode,
+ .get_mode = rpm_vreg_get_mode,
+ .get_optimum_mode = rpm_vreg_get_optimum_mode,
+ .enable_time = rpm_vreg_enable_time,
+};
+
static struct regulator_ops switch_ops = {
.enable = rpm_vreg_enable,
.disable = rpm_vreg_disable,
@@ -1122,6 +1189,14 @@
reg->rdesc.owner = THIS_MODULE;
reg->rdesc.type = REGULATOR_VOLTAGE;
+ /*
+ * Switch to voltage corner regulator ops if qcom,use-voltage-corner
+ * is specified in the device node (SMPS only).
+ */
+ if (of_find_property(node, "qcom,use-voltage-corner", NULL)
+ && regulator_type == RPM_REGULATOR_SMD_TYPE_SMPS)
+ reg->rdesc.ops = &smps_corner_ops;
+
if (regulator_type == RPM_REGULATOR_SMD_TYPE_VS)
reg->rdesc.n_voltages = 0;
else
diff --git a/arch/arm/mach-msm/scm.c b/arch/arm/mach-msm/scm.c
index ac48990..6052918 100644
--- a/arch/arm/mach-msm/scm.c
+++ b/arch/arm/mach-msm/scm.c
@@ -297,6 +297,9 @@
__asmeq("%1", "r0")
__asmeq("%2", "r1")
__asmeq("%3", "r2")
+#ifdef REQUIRES_SEC
+ ".arch_extension sec\n"
+#endif
"smc #0 @ switch to secure world\n"
: "=r" (r0)
: "r" (r0), "r" (r1), "r" (r2)
@@ -329,6 +332,9 @@
__asmeq("%2", "r1")
__asmeq("%3", "r2")
__asmeq("%4", "r3")
+#ifdef REQUIRES_SEC
+ ".arch_extension sec\n"
+#endif
"smc #0 @ switch to secure world\n"
: "=r" (r0)
: "r" (r0), "r" (r1), "r" (r2), "r" (r3));
@@ -356,6 +362,9 @@
__asmeq("%4", "r1")
__asmeq("%5", "r2")
__asmeq("%6", "r3")
+#ifdef REQUIRES_SEC
+ ".arch_extension sec\n"
+#endif
"smc #0 @ switch to secure world\n"
: "=r" (r0), "=r" (r1), "=r" (r2)
: "r" (r0), "r" (r1), "r" (r2), "r" (r3), "r" (r4), "r" (r5));
@@ -388,6 +397,9 @@
__asmeq("%1", "r1")
__asmeq("%2", "r0")
__asmeq("%3", "r1")
+#ifdef REQUIRES_SEC
+ ".arch_extension sec\n"
+#endif
"smc #0 @ switch to secure world\n"
: "=r" (r0), "=r" (r1)
: "r" (r0), "r" (r1)
diff --git a/arch/arm/mach-msm/sdio_ctl.c b/arch/arm/mach-msm/sdio_ctl.c
index 586e890..ac16e77 100644
--- a/arch/arm/mach-msm/sdio_ctl.c
+++ b/arch/arm/mach-msm/sdio_ctl.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -25,6 +25,7 @@
#include <linux/mutex.h>
#include <linux/uaccess.h>
#include <linux/workqueue.h>
+#include <linux/poll.h>
#include <asm/ioctls.h>
#include <linux/platform_device.h>
#include <mach/msm_smd.h>
@@ -206,6 +207,34 @@
return ret;
}
+static unsigned int sdio_ctl_poll(struct file *file, poll_table *wait)
+{
+ struct sdio_ctl_dev *sdio_ctl_devp;
+ unsigned int mask = 0;
+
+ sdio_ctl_devp = file->private_data;
+ if (!sdio_ctl_devp) {
+ pr_err("%s: on a NULL device\n", __func__);
+ return POLLERR;
+ }
+
+ poll_wait(file, &sdio_ctl_devp->read_wait_queue, wait);
+ mutex_lock(&sdio_ctl_devp->rx_lock);
+ if (sdio_cmux_is_channel_reset(sdio_ctl_devp->id)) {
+ mutex_unlock(&sdio_ctl_devp->rx_lock);
+ pr_err("%s notifying reset for sdio_ctl_dev id:%d\n",
+ __func__, sdio_ctl_devp->id);
+ return POLLERR;
+ }
+
+ if (sdio_ctl_devp->read_avail > 0)
+ mask |= POLLIN | POLLRDNORM;
+
+ mutex_unlock(&sdio_ctl_devp->rx_lock);
+
+ return mask;
+}
+
ssize_t sdio_ctl_read(struct file *file,
char __user *buf,
size_t count,
@@ -417,6 +446,7 @@
.release = sdio_ctl_release,
.read = sdio_ctl_read,
.write = sdio_ctl_write,
+ .poll = sdio_ctl_poll,
.unlocked_ioctl = sdio_ctl_ioctl,
};
diff --git a/arch/arm/mach-msm/smd_pkt.c b/arch/arm/mach-msm/smd_pkt.c
index 8d567f8..f5f76f7 100644
--- a/arch/arm/mach-msm/smd_pkt.c
+++ b/arch/arm/mach-msm/smd_pkt.c
@@ -43,6 +43,7 @@
#define NUM_SMD_PKT_PORTS 15
#endif
+#define PDRIVER_NAME_MAX_SIZE 32
#define LOOPBACK_INX (NUM_SMD_PKT_PORTS - 1)
#define DEVICE_NAME "smdpkt"
@@ -52,6 +53,7 @@
struct cdev cdev;
struct device *devicep;
void *pil;
+ char pdriver_name[PDRIVER_NAME_MAX_SIZE];
struct platform_driver driver;
struct smd_channel *ch;
@@ -729,7 +731,10 @@
int i;
for (i = 0; i < NUM_SMD_PKT_PORTS; i++) {
- if (!strncmp(pdev->name, smd_ch_name[i], SMD_MAX_CH_NAME_LEN)) {
+ if (smd_ch_edge[i] == pdev->id
+ && !strncmp(pdev->name, smd_ch_name[i],
+ SMD_MAX_CH_NAME_LEN)
+ && smd_pkt_devp[i]->driver.probe) {
complete_all(&smd_pkt_devp[i]->ch_allocated);
D_STATUS("%s allocated SMD ch for smd_pkt_dev id:%d\n",
__func__, i);
@@ -772,8 +777,9 @@
if (smd_pkt_devp->ch == 0) {
init_completion(&smd_pkt_devp->ch_allocated);
smd_pkt_devp->driver.probe = smd_pkt_dummy_probe;
- smd_pkt_devp->driver.driver.name =
- smd_ch_name[smd_pkt_devp->i];
+ scnprintf(smd_pkt_devp->pdriver_name, PDRIVER_NAME_MAX_SIZE,
+ "%s", smd_ch_name[smd_pkt_devp->i]);
+ smd_pkt_devp->driver.driver.name = smd_pkt_devp->pdriver_name;
smd_pkt_devp->driver.driver.owner = THIS_MODULE;
r = platform_driver_register(&smd_pkt_devp->driver);
if (r) {
@@ -870,8 +876,10 @@
pil_put(smd_pkt_devp->pil);
release_pd:
- if (r < 0)
+ if (r < 0) {
platform_driver_unregister(&smd_pkt_devp->driver);
+ smd_pkt_devp->driver.probe = NULL;
+ }
out:
mutex_unlock(&smd_pkt_devp->ch_lock);
@@ -904,6 +912,7 @@
smd_pkt_devp->blocking_write = 0;
smd_pkt_devp->poll_mode = 0;
platform_driver_unregister(&smd_pkt_devp->driver);
+ smd_pkt_devp->driver.probe = NULL;
if (smd_pkt_devp->pil)
pil_put(smd_pkt_devp->pil);
}
diff --git a/arch/arm/mach-msm/socinfo.c b/arch/arm/mach-msm/socinfo.c
index b047cf4..533e6cd 100644
--- a/arch/arm/mach-msm/socinfo.c
+++ b/arch/arm/mach-msm/socinfo.c
@@ -189,12 +189,14 @@
[88] = MSM_CPU_7X25A,
[89] = MSM_CPU_7X25A,
[96] = MSM_CPU_7X25A,
+ [135] = MSM_CPU_7X25A,
/* 7x27A IDs */
[90] = MSM_CPU_7X27A,
[91] = MSM_CPU_7X27A,
[92] = MSM_CPU_7X27A,
[97] = MSM_CPU_7X27A,
+ [136] = MSM_CPU_7X27A,
/* FSM9xxx ID */
[94] = FSM_CPU_9XXX,
diff --git a/arch/arm/mach-msm/subsystem_map.c b/arch/arm/mach-msm/subsystem_map.c
index 5f5a02b..fcb8517 100644
--- a/arch/arm/mach-msm/subsystem_map.c
+++ b/arch/arm/mach-msm/subsystem_map.c
@@ -38,10 +38,8 @@
VIDEO_DOMAIN,
VIDEO_DOMAIN,
CAMERA_DOMAIN,
- DISPLAY_READ_DOMAIN,
- DISPLAY_WRITE_DOMAIN,
- ROTATOR_SRC_DOMAIN,
- ROTATOR_DST_DOMAIN,
+ DISPLAY_DOMAIN,
+ ROTATOR_DOMAIN,
0xFFFFFFFF
};
diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched
index 3199b76..4142d91 100644
--- a/block/Kconfig.iosched
+++ b/block/Kconfig.iosched
@@ -12,6 +12,17 @@
that do their own scheduling and require only minimal assistance from
the kernel.
+config IOSCHED_TEST
+ tristate "Test I/O scheduler"
+ depends on DEBUG_FS
+ default m
+ ---help---
+ The test I/O scheduler is a duplicate of the noop scheduler with
+ addition of test utlity.
+ It allows testing a block device by dispatching specific requests
+ according to the test case and declare PASS/FAIL according to the
+ requests completion error code.
+
config IOSCHED_DEADLINE
tristate "Deadline I/O scheduler"
default y
diff --git a/block/Makefile b/block/Makefile
index 39b76ba..436b220 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -15,6 +15,7 @@
obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o
obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o
obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o
+obj-$(CONFIG_IOSCHED_TEST) += test-iosched.o
obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o
obj-$(CONFIG_BLK_DEV_INTEGRITY) += blk-integrity.o
diff --git a/block/blk-core.c b/block/blk-core.c
index 038d11f..68d7158 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -977,8 +977,6 @@
{
struct request *rq;
- BUG_ON(rw != READ && rw != WRITE);
-
spin_lock_irq(q->queue_lock);
if (gfp_mask & __GFP_WAIT)
rq = get_request_wait(q, rw, NULL);
@@ -1311,6 +1309,7 @@
req->ioprio = bio_prio(bio);
blk_rq_bio_prep(req->q, req, bio);
}
+EXPORT_SYMBOL(init_request_from_bio);
void blk_queue_bio(struct request_queue *q, struct bio *bio)
{
diff --git a/block/test-iosched.c b/block/test-iosched.c
new file mode 100644
index 0000000..3c38734
--- /dev/null
+++ b/block/test-iosched.c
@@ -0,0 +1,1019 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * The test scheduler allows to test the block device by dispatching
+ * specific requests according to the test case and declare PASS/FAIL
+ * according to the requests completion error code.
+ * Each test is exposed via debugfs and can be triggered by writing to
+ * the debugfs file.
+ *
+ */
+
+/* elevator test iosched */
+#include <linux/blkdev.h>
+#include <linux/elevator.h>
+#include <linux/bio.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/debugfs.h>
+#include <linux/test-iosched.h>
+#include <linux/delay.h>
+#include "blk.h"
+
+#define MODULE_NAME "test-iosched"
+#define WR_RD_START_REQ_ID 1234
+#define UNIQUE_START_REQ_ID 5678
+#define TIMEOUT_TIMER_MS 40000
+#define TEST_MAX_TESTCASE_ROUNDS 15
+
+#define test_pr_debug(fmt, args...) pr_debug("%s: "fmt"\n", MODULE_NAME, args)
+#define test_pr_info(fmt, args...) pr_info("%s: "fmt"\n", MODULE_NAME, args)
+#define test_pr_err(fmt, args...) pr_err("%s: "fmt"\n", MODULE_NAME, args)
+
+static DEFINE_SPINLOCK(blk_dev_test_list_lock);
+static LIST_HEAD(blk_dev_test_list);
+static struct test_data *ptd;
+
+/* Get the request after `test_rq' in the test requests list */
+static struct test_request *
+latter_test_request(struct request_queue *q,
+ struct test_request *test_rq)
+{
+ struct test_data *td = q->elevator->elevator_data;
+
+ if (test_rq->queuelist.next == &td->test_queue)
+ return NULL;
+ return list_entry(test_rq->queuelist.next, struct test_request,
+ queuelist);
+}
+
+/**
+ * test_iosched_get_req_queue() - returns the request queue
+ * served by the scheduler
+ */
+struct request_queue *test_iosched_get_req_queue(void)
+{
+ if (!ptd)
+ return NULL;
+
+ return ptd->req_q;
+}
+EXPORT_SYMBOL(test_iosched_get_req_queue);
+
+/**
+ * test_iosched_mark_test_completion() - Wakeup the debugfs
+ * thread, waiting on the test completion
+ */
+void test_iosched_mark_test_completion(void)
+{
+ if (!ptd)
+ return;
+
+ ptd->test_state = TEST_COMPLETED;
+ wake_up(&ptd->wait_q);
+}
+EXPORT_SYMBOL(test_iosched_mark_test_completion);
+
+/* Check if all the queued test requests were completed */
+static void check_test_completion(void)
+{
+ struct test_request *test_rq;
+ struct request *rq;
+
+ list_for_each_entry(test_rq, &ptd->test_queue, queuelist) {
+ rq = test_rq->rq;
+ if (!test_rq->req_completed)
+ return;
+ }
+
+ test_pr_info("%s: Test is completed", __func__);
+
+ test_iosched_mark_test_completion();
+}
+
+/*
+ * A callback to be called per bio completion.
+ * Frees the bio memory.
+ */
+static void end_test_bio(struct bio *bio, int err)
+{
+ if (err)
+ clear_bit(BIO_UPTODATE, &bio->bi_flags);
+
+ bio_put(bio);
+}
+
+/*
+ * A callback to be called per request completion.
+ * the request memory is not freed here, will be freed later after the test
+ * results checking.
+ */
+static void end_test_req(struct request *rq, int err)
+{
+ struct test_request *test_rq;
+
+ test_rq = (struct test_request *)rq->elv.priv[0];
+ BUG_ON(!test_rq);
+
+ test_pr_info("%s: request %d completed, err=%d",
+ __func__, test_rq->req_id, err);
+
+ test_rq->req_completed = 1;
+ test_rq->req_result = err;
+
+ check_test_completion();
+}
+
+/**
+ * test_iosched_add_unique_test_req() - Create and queue a non
+ * read/write request (such as FLUSH/DISCRAD/SANITIZE).
+ * @is_err_expcted: A flag to indicate if this request
+ * should succeed or not
+ * @req_unique: The type of request to add
+ * @start_sec: start address of the first bio
+ * @nr_sects: number of sectors in the request
+ * @end_req_io: specific completion callback. When not
+ * set, the defaulcallback will be used
+ */
+int test_iosched_add_unique_test_req(int is_err_expcted,
+ enum req_unique_type req_unique,
+ int start_sec, int nr_sects, rq_end_io_fn *end_req_io)
+{
+ struct bio *bio;
+ struct request *rq;
+ int rw_flags;
+ struct test_request *test_rq;
+
+ if (!ptd)
+ return -ENODEV;
+
+ bio = bio_alloc(GFP_KERNEL, 0);
+ if (!bio) {
+ test_pr_err("%s: Failed to allocate a bio", __func__);
+ return -ENODEV;
+ }
+ bio_get(bio);
+ bio->bi_end_io = end_test_bio;
+
+ switch (req_unique) {
+ case REQ_UNIQUE_FLUSH:
+ bio->bi_rw = WRITE_FLUSH;
+ break;
+ case REQ_UNIQUE_DISCARD:
+ bio->bi_rw = REQ_WRITE | REQ_DISCARD;
+ bio->bi_size = nr_sects << 9;
+ bio->bi_sector = start_sec;
+ break;
+ default:
+ test_pr_err("%s: Invalid request type %d", __func__,
+ req_unique);
+ bio_put(bio);
+ return -ENODEV;
+ }
+
+ rw_flags = bio_data_dir(bio);
+ if (bio->bi_rw & REQ_SYNC)
+ rw_flags |= REQ_SYNC;
+
+ rq = blk_get_request(ptd->req_q, rw_flags, GFP_KERNEL);
+ if (!rq) {
+ test_pr_err("%s: Failed to allocate a request", __func__);
+ bio_put(bio);
+ return -ENODEV;
+ }
+
+ init_request_from_bio(rq, bio);
+ if (end_req_io)
+ rq->end_io = end_req_io;
+ else
+ rq->end_io = end_test_req;
+
+ test_rq = kzalloc(sizeof(struct test_request), GFP_KERNEL);
+ if (!test_rq) {
+ test_pr_err("%s: Failed to allocate a test request", __func__);
+ bio_put(bio);
+ blk_put_request(rq);
+ return -ENODEV;
+ }
+ test_rq->req_completed = 0;
+ test_rq->req_result = -1;
+ test_rq->rq = rq;
+ test_rq->is_err_expected = is_err_expcted;
+ rq->elv.priv[0] = (void *)test_rq;
+ test_rq->req_id = ptd->unique_next_req_id++;
+
+ test_pr_debug(
+ "%s: added request %d to the test requests list, type = %d",
+ __func__, test_rq->req_id, req_unique);
+
+ list_add_tail(&test_rq->queuelist, &ptd->test_queue);
+
+ return 0;
+}
+EXPORT_SYMBOL(test_iosched_add_unique_test_req);
+
+/*
+ * Get a pattern to be filled in the request data buffer.
+ * If the pattern used is (-1) the buffer will be filled with sequential
+ * numbers
+ */
+static void fill_buf_with_pattern(int *buf, int num_bytes, int pattern)
+{
+ int i = 0;
+ int num_of_dwords = num_bytes/sizeof(int);
+
+ if (pattern == TEST_NO_PATTERN)
+ return;
+
+ /* num_bytes should be aligned to sizeof(int) */
+ BUG_ON((num_bytes % sizeof(int)) != 0);
+
+ if (pattern == TEST_PATTERN_SEQUENTIAL) {
+ for (i = 0; i < num_of_dwords; i++)
+ buf[i] = i;
+ } else {
+ for (i = 0; i < num_of_dwords; i++)
+ buf[i] = pattern;
+ }
+}
+
+/**
+ * test_iosched_add_wr_rd_test_req() - Create and queue a
+ * read/write request.
+ * @is_err_expcted: A flag to indicate if this request
+ * should succeed or not
+ * @direction: READ/WRITE
+ * @start_sec: start address of the first bio
+ * @num_bios: number of BIOs to be allocated for the
+ * request
+ * @pattern: A pattern, to be written into the write
+ * requests data buffer. In case of READ
+ * request, the given pattern is kept as
+ * the expected pattern. The expected
+ * pattern will be compared in the test
+ * check result function. If no comparisson
+ * is required, set pattern to
+ * TEST_NO_PATTERN.
+ * @end_req_io: specific completion callback. When not
+ * set,the default callback will be used
+ *
+ * This function allocates the test request and the block
+ * request and calls blk_rq_map_kern which allocates the
+ * required BIO. The allocated test request and the block
+ * request memory is freed at the end of the test and the
+ * allocated BIO memory is freed by end_test_bio.
+ */
+int test_iosched_add_wr_rd_test_req(int is_err_expcted,
+ int direction, int start_sec,
+ int num_bios, int pattern, rq_end_io_fn *end_req_io)
+{
+ struct request *rq = NULL;
+ struct test_request *test_rq = NULL;
+ int rw_flags = 0;
+ int buf_size = 0;
+ int ret = 0, i = 0;
+ unsigned int *bio_ptr = NULL;
+ struct bio *bio = NULL;
+
+ if (!ptd)
+ return -ENODEV;
+
+ rw_flags = direction;
+
+ rq = blk_get_request(ptd->req_q, rw_flags, GFP_KERNEL);
+ if (!rq) {
+ test_pr_err("%s: Failed to allocate a request", __func__);
+ return -ENODEV;
+ }
+
+ test_rq = kzalloc(sizeof(struct test_request), GFP_KERNEL);
+ if (!test_rq) {
+ test_pr_err("%s: Failed to allocate test request", __func__);
+ blk_put_request(rq);
+ return -ENODEV;
+ }
+
+ buf_size = sizeof(unsigned int) * BIO_U32_SIZE * num_bios;
+ test_rq->bios_buffer = kzalloc(buf_size, GFP_KERNEL);
+ if (!test_rq->bios_buffer) {
+ test_pr_err("%s: Failed to allocate the data buf", __func__);
+ goto err;
+ }
+ test_rq->buf_size = buf_size;
+
+ if (direction == WRITE)
+ fill_buf_with_pattern(test_rq->bios_buffer,
+ buf_size, pattern);
+ test_rq->wr_rd_data_pattern = pattern;
+
+ bio_ptr = test_rq->bios_buffer;
+ for (i = 0; i < num_bios; ++i) {
+ ret = blk_rq_map_kern(ptd->req_q, rq,
+ (void *)bio_ptr,
+ sizeof(unsigned int)*BIO_U32_SIZE,
+ GFP_KERNEL);
+ if (ret) {
+ test_pr_err("%s: blk_rq_map_kern returned error %d",
+ __func__, ret);
+ goto err;
+ }
+ bio_ptr += BIO_U32_SIZE;
+ }
+
+ if (end_req_io)
+ rq->end_io = end_req_io;
+ else
+ rq->end_io = end_test_req;
+ rq->__sector = start_sec;
+ rq->cmd_type |= REQ_TYPE_FS;
+
+ if (rq->bio) {
+ rq->bio->bi_sector = start_sec;
+ rq->bio->bi_end_io = end_test_bio;
+ bio = rq->bio;
+ while ((bio = bio->bi_next) != NULL)
+ bio->bi_end_io = end_test_bio;
+ }
+
+ ptd->num_of_write_bios += num_bios;
+ test_rq->req_id = ptd->wr_rd_next_req_id++;
+
+ test_rq->req_completed = 0;
+ test_rq->req_result = -1;
+ test_rq->rq = rq;
+ test_rq->is_err_expected = is_err_expcted;
+ rq->elv.priv[0] = (void *)test_rq;
+
+ test_pr_debug(
+ "%s: added request %d to the test requests list, buf_size=%d",
+ __func__, test_rq->req_id, buf_size);
+
+ list_add_tail(&test_rq->queuelist, &ptd->test_queue);
+
+ return 0;
+err:
+ blk_put_request(rq);
+ kfree(test_rq->bios_buffer);
+ return -ENODEV;
+}
+EXPORT_SYMBOL(test_iosched_add_wr_rd_test_req);
+
+/* Converts the testcase number into a string */
+static char *get_test_case_str(struct test_data *td)
+{
+ if (td->test_info.get_test_case_str_fn)
+ return td->test_info.get_test_case_str_fn(td);
+
+ return "Unknown testcase";
+}
+
+/*
+ * Verify that the test request data buffer includes the expected
+ * pattern
+ */
+static int compare_buffer_to_pattern(struct test_request *test_rq)
+{
+ int i = 0;
+ int num_of_dwords = test_rq->buf_size/sizeof(int);
+
+ /* num_bytes should be aligned to sizeof(int) */
+ BUG_ON((test_rq->buf_size % sizeof(int)) != 0);
+ BUG_ON(test_rq->bios_buffer == NULL);
+
+ if (test_rq->wr_rd_data_pattern == TEST_NO_PATTERN)
+ return 0;
+
+ if (test_rq->wr_rd_data_pattern == TEST_PATTERN_SEQUENTIAL) {
+ for (i = 0; i < num_of_dwords; i++) {
+ if (test_rq->bios_buffer[i] != i) {
+ test_pr_err(
+ "%s: wrong pattern 0x%x in index %d",
+ __func__, test_rq->bios_buffer[i], i);
+ return -EINVAL;
+ }
+ }
+ } else {
+ for (i = 0; i < num_of_dwords; i++) {
+ if (test_rq->bios_buffer[i] !=
+ test_rq->wr_rd_data_pattern) {
+ test_pr_err(
+ "%s: wrong pattern 0x%x in index %d",
+ __func__, test_rq->bios_buffer[i], i);
+ return -EINVAL;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Determine if the test passed or failed.
+ * The function checks the test request completion value and calls
+ * check_testcase_result for result checking that are specific
+ * to a test case.
+ */
+static int check_test_result(struct test_data *td)
+{
+ struct test_request *test_rq;
+ struct request *rq;
+ int res = 0;
+ static int run;
+
+ list_for_each_entry(test_rq, &ptd->test_queue, queuelist) {
+ rq = test_rq->rq;
+ if (!test_rq->req_completed) {
+ test_pr_err("%s: rq %d not completed", __func__,
+ test_rq->req_id);
+ res = -EINVAL;
+ goto err;
+ }
+
+ if ((test_rq->req_result < 0) && !test_rq->is_err_expected) {
+ test_pr_err(
+ "%s: rq %d completed with err, not as expected",
+ __func__, test_rq->req_id);
+ res = -EINVAL;
+ goto err;
+ }
+ if ((test_rq->req_result == 0) && test_rq->is_err_expected) {
+ test_pr_err("%s: rq %d succeeded, not as expected",
+ __func__, test_rq->req_id);
+ res = -EINVAL;
+ goto err;
+ }
+ if (rq_data_dir(test_rq->rq) == READ) {
+ res = compare_buffer_to_pattern(test_rq);
+ if (res) {
+ test_pr_err("%s: read pattern not as expected",
+ __func__);
+ res = -EINVAL;
+ goto err;
+ }
+ }
+ }
+
+ if (td->test_info.check_test_result_fn) {
+ res = td->test_info.check_test_result_fn(td);
+ if (res)
+ goto err;
+ }
+
+ test_pr_info("%s: %s, run# %03d, PASSED",
+ __func__, get_test_case_str(td), ++run);
+ td->test_result = TEST_PASSED;
+
+ return 0;
+err:
+ test_pr_err("%s: %s, run# %03d, FAILED",
+ __func__, get_test_case_str(td), ++run);
+ td->test_result = TEST_FAILED;
+ return res;
+}
+
+/* Create and queue the required requests according to the test case */
+static int prepare_test(struct test_data *td)
+{
+ int ret = 0;
+
+ if (td->test_info.prepare_test_fn) {
+ ret = td->test_info.prepare_test_fn(td);
+ return ret;
+ }
+
+ return 0;
+}
+
+/* Run the test */
+static int run_test(struct test_data *td)
+{
+ int ret = 0;
+
+ if (td->test_info.run_test_fn) {
+ ret = td->test_info.run_test_fn(td);
+ return ret;
+ }
+
+ /*
+ * Set the next_req pointer to the first request in the test requests
+ * list
+ */
+ if (!list_empty(&td->test_queue))
+ td->next_req = list_entry(td->test_queue.next,
+ struct test_request, queuelist);
+ __blk_run_queue(td->req_q);
+
+ return 0;
+}
+
+/* Free the allocated test requests, their requests and BIOs buffer */
+static void free_test_requests(struct test_data *td)
+{
+ struct test_request *test_rq;
+ while (!list_empty(&td->test_queue)) {
+ test_rq = list_entry(td->test_queue.next, struct test_request,
+ queuelist);
+ list_del_init(&test_rq->queuelist);
+ blk_put_request(test_rq->rq);
+ kfree(test_rq->bios_buffer);
+ kfree(test_rq);
+ }
+}
+
+/*
+ * Do post test operations.
+ * Free the allocated test requests, their requests and BIOs buffer.
+ */
+static int post_test(struct test_data *td)
+{
+ int ret = 0;
+
+ if (td->test_info.post_test_fn)
+ ret = td->test_info.post_test_fn(td);
+
+ ptd->test_info.testcase = 0;
+ ptd->test_state = TEST_IDLE;
+
+ free_test_requests(td);
+
+ return ret;
+}
+
+/*
+ * The timer verifies that the test will be completed even if we don't get
+ * the completion callback for all the requests.
+ */
+static void test_timeout_handler(unsigned long data)
+{
+ struct test_data *td = (struct test_data *)data;
+
+ test_pr_info("%s: TIMEOUT timer expired", __func__);
+ td->test_state = TEST_COMPLETED;
+ wake_up(&td->wait_q);
+ return;
+}
+
+static unsigned int get_timeout_msec(struct test_data *td)
+{
+ if (td->test_info.timeout_msec)
+ return td->test_info.timeout_msec;
+ else
+ return TIMEOUT_TIMER_MS;
+}
+
+/**
+ * test_iosched_start_test() - Prepares and runs the test.
+ * @t_info: the current test testcase and callbacks
+ * functions
+ *
+ * The function also checks the test result upon test completion
+ */
+int test_iosched_start_test(struct test_info *t_info)
+{
+ int ret = 0;
+ unsigned timeout_msec;
+ int counter = 0;
+ char *test_name = NULL;
+
+ if (!ptd)
+ return -ENODEV;
+
+ if (!t_info) {
+ ptd->test_result = TEST_FAILED;
+ return -EINVAL;
+ }
+
+ do {
+ if (ptd->ignore_round)
+ /*
+ * We ignored the last run due to FS write requests.
+ * Sleep to allow those requests to be issued
+ */
+ msleep(2000);
+
+ spin_lock(&ptd->lock);
+
+ if (ptd->test_state != TEST_IDLE) {
+ test_pr_info(
+ "%s: Another test is running, try again later",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (ptd->start_sector == 0) {
+ test_pr_err("%s: Invalid start sector", __func__);
+ ptd->test_result = TEST_FAILED;
+ spin_unlock(&ptd->lock);
+ return -EINVAL;
+ }
+
+ memcpy(&ptd->test_info, t_info, sizeof(struct test_info));
+
+ ptd->next_req = NULL;
+ ptd->test_result = TEST_NO_RESULT;
+ ptd->num_of_write_bios = 0;
+
+ ptd->unique_next_req_id = UNIQUE_START_REQ_ID;
+ ptd->wr_rd_next_req_id = WR_RD_START_REQ_ID;
+
+ ptd->ignore_round = false;
+ ptd->fs_wr_reqs_during_test = false;
+
+ ptd->test_state = TEST_RUNNING;
+
+ spin_unlock(&ptd->lock);
+
+ timeout_msec = get_timeout_msec(ptd);
+ mod_timer(&ptd->timeout_timer, jiffies +
+ msecs_to_jiffies(timeout_msec));
+
+ if (ptd->test_info.get_test_case_str_fn)
+ test_name = ptd->test_info.get_test_case_str_fn(ptd);
+ else
+ test_name = "Unknown testcase";
+ test_pr_info("%s: Starting test %s\n", __func__, test_name);
+
+ ret = prepare_test(ptd);
+ if (ret) {
+ test_pr_err("%s: failed to prepare the test\n",
+ __func__);
+ goto error;
+ }
+
+ ret = run_test(ptd);
+ if (ret) {
+ test_pr_err("%s: failed to run the test\n", __func__);
+ goto error;
+ }
+
+ test_pr_info("%s: Waiting for the test completion", __func__);
+
+ wait_event(ptd->wait_q, ptd->test_state == TEST_COMPLETED);
+ del_timer_sync(&ptd->timeout_timer);
+
+ ret = check_test_result(ptd);
+ if (ret) {
+ test_pr_err("%s: check_test_result failed\n",
+ __func__);
+ goto error;
+ }
+
+ ret = post_test(ptd);
+ if (ret) {
+ test_pr_err("%s: post_test failed\n", __func__);
+ goto error;
+ }
+
+ /*
+ * Wakeup the queue thread to fetch FS requests that might got
+ * postponded due to the test
+ */
+ __blk_run_queue(ptd->req_q);
+
+ if (ptd->ignore_round)
+ test_pr_info(
+ "%s: Round canceled (Got wr reqs in the middle)",
+ __func__);
+
+ if (++counter == TEST_MAX_TESTCASE_ROUNDS) {
+ test_pr_info("%s: Too many rounds, did not succeed...",
+ __func__);
+ ptd->test_result = TEST_FAILED;
+ }
+
+ } while ((ptd->ignore_round) && (counter < TEST_MAX_TESTCASE_ROUNDS));
+
+ if (ptd->test_result == TEST_PASSED)
+ return 0;
+ else
+ return -EINVAL;
+
+error:
+ ptd->test_result = TEST_FAILED;
+ ptd->test_info.testcase = 0;
+ post_test(ptd);
+ return ret;
+}
+EXPORT_SYMBOL(test_iosched_start_test);
+
+/**
+ * test_iosched_register() - register a block device test
+ * utility.
+ * @bdt: the block device test type to register
+ */
+void test_iosched_register(struct blk_dev_test_type *bdt)
+{
+ spin_lock(&blk_dev_test_list_lock);
+ list_add_tail(&bdt->list, &blk_dev_test_list);
+ spin_unlock(&blk_dev_test_list_lock);
+}
+EXPORT_SYMBOL_GPL(test_iosched_register);
+
+/**
+ * test_iosched_unregister() - unregister a block device test
+ * utility.
+ * @bdt: the block device test type to unregister
+ */
+void test_iosched_unregister(struct blk_dev_test_type *bdt)
+{
+ spin_lock(&blk_dev_test_list_lock);
+ list_del_init(&bdt->list);
+ spin_unlock(&blk_dev_test_list_lock);
+}
+EXPORT_SYMBOL_GPL(test_iosched_unregister);
+
+/**
+ * test_iosched_set_test_result() - Set the test
+ * result(PASS/FAIL)
+ * @test_result: the test result
+ */
+void test_iosched_set_test_result(int test_result)
+{
+ if (!ptd)
+ return;
+
+ ptd->test_result = test_result;
+}
+EXPORT_SYMBOL(test_iosched_set_test_result);
+
+
+/**
+ * test_iosched_set_ignore_round() - Set the ignore_round flag
+ * @ignore_round: A flag to indicate if this test round
+ * should be ignored and re-run
+ */
+void test_iosched_set_ignore_round(bool ignore_round)
+{
+ if (!ptd)
+ return;
+
+ ptd->ignore_round = ignore_round;
+}
+EXPORT_SYMBOL(test_iosched_set_ignore_round);
+
+/**
+ * test_iosched_get_debugfs_tests_root() - returns the root
+ * debugfs directory for the test_iosched tests
+ */
+struct dentry *test_iosched_get_debugfs_tests_root(void)
+{
+ if (!ptd)
+ return NULL;
+
+ return ptd->debug.debug_tests_root;
+}
+EXPORT_SYMBOL(test_iosched_get_debugfs_tests_root);
+
+/**
+ * test_iosched_get_debugfs_utils_root() - returns the root
+ * debugfs directory for the test_iosched utils
+ */
+struct dentry *test_iosched_get_debugfs_utils_root(void)
+{
+ if (!ptd)
+ return NULL;
+
+ return ptd->debug.debug_utils_root;
+}
+EXPORT_SYMBOL(test_iosched_get_debugfs_utils_root);
+
+static int test_debugfs_init(struct test_data *td)
+{
+ td->debug.debug_root = debugfs_create_dir("test-iosched", NULL);
+ if (!td->debug.debug_root)
+ return -ENOENT;
+
+ td->debug.debug_tests_root = debugfs_create_dir("tests",
+ td->debug.debug_root);
+ if (!td->debug.debug_tests_root)
+ goto err;
+
+ td->debug.debug_utils_root = debugfs_create_dir("utils",
+ td->debug.debug_root);
+ if (!td->debug.debug_utils_root)
+ goto err;
+
+ td->debug.debug_test_result = debugfs_create_u32(
+ "test_result",
+ S_IRUGO | S_IWUGO,
+ td->debug.debug_utils_root,
+ &td->test_result);
+ if (!td->debug.debug_test_result)
+ goto err;
+
+ td->debug.start_sector = debugfs_create_u32(
+ "start_sector",
+ S_IRUGO | S_IWUGO,
+ td->debug.debug_utils_root,
+ &td->start_sector);
+ if (!td->debug.start_sector)
+ goto err;
+
+ return 0;
+
+err:
+ debugfs_remove_recursive(td->debug.debug_root);
+ return -ENOENT;
+}
+
+static void test_debugfs_cleanup(struct test_data *td)
+{
+ debugfs_remove_recursive(td->debug.debug_root);
+}
+
+static void print_req(struct request *req)
+{
+ struct bio *bio;
+ struct test_request *test_rq;
+
+ if (!req)
+ return;
+
+ test_rq = (struct test_request *)req->elv.priv[0];
+
+ if (test_rq) {
+ test_pr_debug("%s: Dispatch request %d: __sector=0x%lx",
+ __func__, test_rq->req_id, (unsigned long)req->__sector);
+ test_pr_debug("%s: nr_phys_segments=%d, num_of_sectors=%d",
+ __func__, req->nr_phys_segments, blk_rq_sectors(req));
+ bio = req->bio;
+ test_pr_debug("%s: bio: bi_size=%d, bi_sector=0x%lx",
+ __func__, bio->bi_size,
+ (unsigned long)bio->bi_sector);
+ while ((bio = bio->bi_next) != NULL) {
+ test_pr_debug("%s: bio: bi_size=%d, bi_sector=0x%lx",
+ __func__, bio->bi_size,
+ (unsigned long)bio->bi_sector);
+ }
+ }
+}
+
+static void test_merged_requests(struct request_queue *q,
+ struct request *rq, struct request *next)
+{
+ list_del_init(&next->queuelist);
+}
+
+/*
+ * Dispatch a test request in case there is a running test Otherwise, dispatch
+ * a request that was queued by the FS to keep the card functional.
+ */
+static int test_dispatch_requests(struct request_queue *q, int force)
+{
+ struct test_data *td = q->elevator->elevator_data;
+ struct request *rq = NULL;
+
+ switch (td->test_state) {
+ case TEST_IDLE:
+ if (!list_empty(&td->queue)) {
+ rq = list_entry(td->queue.next, struct request,
+ queuelist);
+ list_del_init(&rq->queuelist);
+ elv_dispatch_sort(q, rq);
+ return 1;
+ }
+ break;
+ case TEST_RUNNING:
+ if (td->next_req) {
+ rq = td->next_req->rq;
+ td->next_req =
+ latter_test_request(td->req_q, td->next_req);
+ if (!rq)
+ return 0;
+ print_req(rq);
+ elv_dispatch_sort(q, rq);
+ return 1;
+ }
+ break;
+ case TEST_COMPLETED:
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
+static void test_add_request(struct request_queue *q, struct request *rq)
+{
+ struct test_data *td = q->elevator->elevator_data;
+
+ list_add_tail(&rq->queuelist, &td->queue);
+
+ /*
+ * The write requests can be followed by a FLUSH request that might
+ * cause unexpected results of the test.
+ */
+ if ((rq_data_dir(rq) == WRITE) && (td->test_state == TEST_RUNNING)) {
+ test_pr_debug("%s: got WRITE req in the middle of the test",
+ __func__);
+ td->fs_wr_reqs_during_test = true;
+ }
+}
+
+static struct request *
+test_former_request(struct request_queue *q, struct request *rq)
+{
+ struct test_data *td = q->elevator->elevator_data;
+
+ if (rq->queuelist.prev == &td->queue)
+ return NULL;
+ return list_entry(rq->queuelist.prev, struct request, queuelist);
+}
+
+static struct request *
+test_latter_request(struct request_queue *q, struct request *rq)
+{
+ struct test_data *td = q->elevator->elevator_data;
+
+ if (rq->queuelist.next == &td->queue)
+ return NULL;
+ return list_entry(rq->queuelist.next, struct request, queuelist);
+}
+
+static void *test_init_queue(struct request_queue *q)
+{
+ struct blk_dev_test_type *__bdt;
+
+ ptd = kmalloc_node(sizeof(struct test_data), GFP_KERNEL,
+ q->node);
+ if (!ptd) {
+ test_pr_err("%s: failed to allocate test data", __func__);
+ return NULL;
+ }
+ memset((void *)ptd, 0, sizeof(struct test_data));
+ INIT_LIST_HEAD(&ptd->queue);
+ INIT_LIST_HEAD(&ptd->test_queue);
+ init_waitqueue_head(&ptd->wait_q);
+ ptd->req_q = q;
+
+ setup_timer(&ptd->timeout_timer, test_timeout_handler,
+ (unsigned long)ptd);
+
+ spin_lock_init(&ptd->lock);
+
+ if (test_debugfs_init(ptd)) {
+ test_pr_err("%s: Failed to create debugfs files", __func__);
+ return NULL;
+ }
+
+ list_for_each_entry(__bdt, &blk_dev_test_list, list)
+ __bdt->init_fn();
+
+ return ptd;
+}
+
+static void test_exit_queue(struct elevator_queue *e)
+{
+ struct test_data *td = e->elevator_data;
+ struct blk_dev_test_type *__bdt;
+
+ BUG_ON(!list_empty(&td->queue));
+
+ list_for_each_entry(__bdt, &blk_dev_test_list, list)
+ __bdt->exit_fn();
+
+ test_debugfs_cleanup(td);
+
+ kfree(td);
+}
+
+static struct elevator_type elevator_test_iosched = {
+ .ops = {
+ .elevator_merge_req_fn = test_merged_requests,
+ .elevator_dispatch_fn = test_dispatch_requests,
+ .elevator_add_req_fn = test_add_request,
+ .elevator_former_req_fn = test_former_request,
+ .elevator_latter_req_fn = test_latter_request,
+ .elevator_init_fn = test_init_queue,
+ .elevator_exit_fn = test_exit_queue,
+ },
+ .elevator_name = "test-iosched",
+ .elevator_owner = THIS_MODULE,
+};
+
+static int __init test_init(void)
+{
+ elv_register(&elevator_test_iosched);
+
+ return 0;
+}
+
+static void __exit test_exit(void)
+{
+ elv_unregister(&elevator_test_iosched);
+}
+
+module_init(test_init);
+module_exit(test_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Test IO scheduler");
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 286a4d4..a73d713 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -148,4 +148,6 @@
source "drivers/gud/Kconfig"
+source "drivers/coresight/Kconfig"
+
endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index bea505c..bd18a62 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -140,3 +140,5 @@
#MobiCore
obj-$(CONFIG_MOBICORE_SUPPORT) += gud/
+
+obj-$(CONFIG_MSM_QDSS) += coresight/
diff --git a/drivers/char/msm_rotator.c b/drivers/char/msm_rotator.c
index 9c8f7ee..6cd1806 100644
--- a/drivers/char/msm_rotator.c
+++ b/drivers/char/msm_rotator.c
@@ -170,9 +170,9 @@
CLK_SUSPEND,
};
-int msm_rotator_iommu_map_buf(int mem_id, int domain,
+int msm_rotator_iommu_map_buf(int mem_id, unsigned char src,
unsigned long *start, unsigned long *len,
- struct ion_handle **pihdl, unsigned int secure)
+ struct ion_handle **pihdl)
{
if (!msm_rotator_dev->client)
return -EINVAL;
@@ -185,20 +185,11 @@
pr_debug("%s(): ion_hdl %p, ion_buf %p\n", __func__, *pihdl,
ion_share(msm_rotator_dev->client, *pihdl));
- if (secure) {
- if (ion_phys(msm_rotator_dev->client,
- *pihdl, start, (unsigned *)len)) {
- pr_err("%s:%d: ion_phys map failed\n",
- __func__, __LINE__);
- return -ENOMEM;
- }
- } else {
- if (ion_map_iommu(msm_rotator_dev->client,
- *pihdl, domain, GEN_POOL,
- SZ_4K, 0, start, len, 0, ION_IOMMU_UNMAP_DELAYED)) {
- pr_err("ion_map_iommu() failed\n");
- return -EINVAL;
- }
+ if (ion_map_iommu(msm_rotator_dev->client,
+ *pihdl, ROTATOR_DOMAIN, GEN_POOL,
+ SZ_4K, 0, start, len, 0, ION_IOMMU_UNMAP_DELAYED)) {
+ pr_err("ion_map_iommu() failed\n");
+ return -EINVAL;
}
pr_debug("%s(): mem_id %d, start 0x%lx, len 0x%lx\n",
@@ -815,9 +806,9 @@
return 0;
}
-static int get_img(struct msmfb_data *fbd, int domain,
+static int get_img(struct msmfb_data *fbd, unsigned char src,
unsigned long *start, unsigned long *len, struct file **p_file,
- int *p_need, struct ion_handle **p_ihdl, unsigned int secure)
+ int *p_need, struct ion_handle **p_ihdl)
{
int ret = 0;
#ifdef CONFIG_FB
@@ -859,8 +850,8 @@
#endif
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
- return msm_rotator_iommu_map_buf(fbd->memory_id, domain, start,
- len, p_ihdl, secure);
+ return msm_rotator_iommu_map_buf(fbd->memory_id, src, start,
+ len, p_ihdl);
#endif
#ifdef CONFIG_ANDROID_PMEM
if (!get_pmem_file(fbd->memory_id, start, &vstart, len, p_file))
@@ -871,20 +862,17 @@
}
-static void put_img(struct file *p_file, struct ion_handle *p_ihdl,
- int domain, unsigned int secure)
+static void put_img(struct file *p_file, struct ion_handle *p_ihdl)
{
#ifdef CONFIG_ANDROID_PMEM
if (p_file != NULL)
put_pmem_file(p_file);
#endif
-
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
if (!IS_ERR_OR_NULL(p_ihdl)) {
pr_debug("%s(): p_ihdl %p\n", __func__, p_ihdl);
- if (!secure)
- ion_unmap_iommu(msm_rotator_dev->client,
- p_ihdl, domain, GEN_POOL);
+ ion_unmap_iommu(msm_rotator_dev->client,
+ p_ihdl, ROTATOR_DOMAIN, GEN_POOL);
ion_free(msm_rotator_dev->client, p_ihdl);
}
@@ -951,18 +939,18 @@
goto do_rotate_unlock_mutex;
}
- rc = get_img(&info.src, ROTATOR_SRC_DOMAIN, (unsigned long *)&in_paddr,
+ rc = get_img(&info.src, 1, (unsigned long *)&in_paddr,
(unsigned long *)&src_len, &srcp0_file, &ps0_need,
- &srcp0_ihdl, 0);
+ &srcp0_ihdl);
if (rc) {
pr_err("%s: in get_img() failed id=0x%08x\n",
DRIVER_NAME, info.src.memory_id);
goto do_rotate_unlock_mutex;
}
- rc = get_img(&info.dst, ROTATOR_DST_DOMAIN, (unsigned long *)&out_paddr,
+ rc = get_img(&info.dst, 0, (unsigned long *)&out_paddr,
(unsigned long *)&dst_len, &dstp0_file, &p_need,
- &dstp0_ihdl, img_info->secure);
+ &dstp0_ihdl);
if (rc) {
pr_err("%s: out get_img() failed id=0x%08x\n",
DRIVER_NAME, info.dst.memory_id);
@@ -990,20 +978,20 @@
goto do_rotate_unlock_mutex;
}
- rc = get_img(&info.src_chroma, ROTATOR_SRC_DOMAIN,
+ rc = get_img(&info.src_chroma, 1,
(unsigned long *)&in_chroma_paddr,
(unsigned long *)&src_len, &srcp1_file, &p_need,
- &srcp1_ihdl, 0);
+ &srcp1_ihdl);
if (rc) {
pr_err("%s: in chroma get_img() failed id=0x%08x\n",
DRIVER_NAME, info.src_chroma.memory_id);
goto do_rotate_unlock_mutex;
}
- rc = get_img(&info.dst_chroma, ROTATOR_DST_DOMAIN,
+ rc = get_img(&info.dst_chroma, 0,
(unsigned long *)&out_chroma_paddr,
(unsigned long *)&dst_len, &dstp1_file, &p_need,
- &dstp1_ihdl, img_info->secure);
+ &dstp1_ihdl);
if (rc) {
pr_err("%s: out chroma get_img() failed id=0x%08x\n",
DRIVER_NAME, info.dst_chroma.memory_id);
@@ -1174,17 +1162,15 @@
#endif
schedule_delayed_work(&msm_rotator_dev->rot_clk_work, HZ);
do_rotate_unlock_mutex:
- put_img(dstp1_file, dstp1_ihdl, ROTATOR_DST_DOMAIN,
- msm_rotator_dev->img_info[s]->secure);
- put_img(srcp1_file, srcp1_ihdl, ROTATOR_SRC_DOMAIN, 0);
- put_img(dstp0_file, dstp0_ihdl, ROTATOR_DST_DOMAIN,
- msm_rotator_dev->img_info[s]->secure);
+ put_img(dstp1_file, dstp1_ihdl);
+ put_img(srcp1_file, srcp1_ihdl);
+ put_img(dstp0_file, dstp0_ihdl);
/* only source may use frame buffer */
if (info.src.flags & MDP_MEMORY_ID_TYPE_FB)
fput_light(srcp0_file, ps0_need);
else
- put_img(srcp0_file, srcp0_ihdl, ROTATOR_SRC_DOMAIN, 0);
+ put_img(srcp0_file, srcp0_ihdl);
mutex_unlock(&msm_rotator_dev->rotator_lock);
dev_dbg(msm_rotator_dev->device, "%s() returning rc = %d\n",
__func__, rc);
diff --git a/drivers/coresight/Kconfig b/drivers/coresight/Kconfig
new file mode 100644
index 0000000..1219af1
--- /dev/null
+++ b/drivers/coresight/Kconfig
@@ -0,0 +1,32 @@
+config MSM_QDSS
+ bool "CoreSight tracing"
+ help
+ Enables support for CoreSight tracing. This uses CoreSight trace
+ components and buses to support both hardware (eg. processor ETM)
+ and hardware assisted software instrumentation based (eg. STM)
+ tracing.
+
+ For production builds, you should probably say 'N' here to avoid
+ potential power, performance and memory penalty.
+
+config MSM_QDSS_STM_DEFAULT_ENABLE
+ bool "Turn on CoreSight STM tracing by default"
+ depends on MSM_QDSS
+ help
+ Turns on CoreSight STM tracing (hardware assisted software
+ instrumentation based tracing) by default. Otherwise, tracing is
+ disabled by default but can be enabled via sysfs.
+
+ For production builds, you should probably say 'N' here to avoid
+ potential power, performance and memory penalty.
+
+config MSM_QDSS_ETM_DEFAULT_ENABLE
+ bool "Turn on CoreSight ETM tracing by default"
+ depends on MSM_QDSS
+ help
+ Turns on CoreSight ETM tracing (processor tracing) by default.
+ Otherwise, tracing is disabled by default but can be enabled via
+ sysfs.
+
+ For production builds, you should probably say 'N' here to avoid
+ potential power, performance and memory penalty.
diff --git a/drivers/coresight/Makefile b/drivers/coresight/Makefile
new file mode 100644
index 0000000..2ee2093
--- /dev/null
+++ b/drivers/coresight/Makefile
@@ -0,0 +1,2 @@
+
+obj-$(CONFIG_MSM_QDSS) += coresight.o coresight-etb.o coresight-tpiu.o coresight-funnel.o coresight-stm.o coresight-etm.o
diff --git a/drivers/coresight/coresight-etb.c b/drivers/coresight/coresight-etb.c
new file mode 100644
index 0000000..2bffae5
--- /dev/null
+++ b/drivers/coresight/coresight-etb.c
@@ -0,0 +1,451 @@
+/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/clk.h>
+#include <linux/coresight.h>
+
+#include "coresight-priv.h"
+
+#define etb_writel(drvdata, val, off) __raw_writel((val), drvdata->base + off)
+#define etb_readl(drvdata, off) __raw_readl(drvdata->base + off)
+
+#define ETB_RAM_DEPTH_REG (0x004)
+#define ETB_STATUS_REG (0x00C)
+#define ETB_RAM_READ_DATA_REG (0x010)
+#define ETB_RAM_READ_POINTER (0x014)
+#define ETB_RAM_WRITE_POINTER (0x018)
+#define ETB_TRG (0x01C)
+#define ETB_CTL_REG (0x020)
+#define ETB_RWD_REG (0x024)
+#define ETB_FFSR (0x300)
+#define ETB_FFCR (0x304)
+#define ETB_ITMISCOP0 (0xEE0)
+#define ETB_ITTRFLINACK (0xEE4)
+#define ETB_ITTRFLIN (0xEE8)
+#define ETB_ITATBDATA0 (0xEEC)
+#define ETB_ITATBCTR2 (0xEF0)
+#define ETB_ITATBCTR1 (0xEF4)
+#define ETB_ITATBCTR0 (0xEF8)
+
+
+#define BYTES_PER_WORD 4
+#define ETB_SIZE_WORDS 4096
+#define FRAME_SIZE_WORDS 4
+
+#define ETB_LOCK() \
+do { \
+ mb(); \
+ etb_writel(drvdata, 0x0, CORESIGHT_LAR); \
+} while (0)
+#define ETB_UNLOCK() \
+do { \
+ etb_writel(drvdata, CORESIGHT_UNLOCK, CORESIGHT_LAR); \
+ mb(); \
+} while (0)
+
+struct etb_drvdata {
+ uint8_t *buf;
+ void __iomem *base;
+ bool enabled;
+ bool reading;
+ spinlock_t spinlock;
+ atomic_t in_use;
+ struct device *dev;
+ struct kobject *kobj;
+ struct clk *clk;
+ uint32_t trigger_cntr;
+};
+
+static struct etb_drvdata *drvdata;
+
+static void __etb_enable(void)
+{
+ int i;
+
+ ETB_UNLOCK();
+
+ etb_writel(drvdata, 0x0, ETB_RAM_WRITE_POINTER);
+ for (i = 0; i < ETB_SIZE_WORDS; i++)
+ etb_writel(drvdata, 0x0, ETB_RWD_REG);
+
+ etb_writel(drvdata, 0x0, ETB_RAM_WRITE_POINTER);
+ etb_writel(drvdata, 0x0, ETB_RAM_READ_POINTER);
+
+ etb_writel(drvdata, drvdata->trigger_cntr, ETB_TRG);
+ etb_writel(drvdata, BIT(13) | BIT(0), ETB_FFCR);
+ etb_writel(drvdata, BIT(0), ETB_CTL_REG);
+
+ ETB_LOCK();
+}
+
+int etb_enable(void)
+{
+ int ret;
+ unsigned long flags;
+
+ ret = clk_prepare_enable(drvdata->clk);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+ __etb_enable();
+ drvdata->enabled = true;
+ dev_info(drvdata->dev, "ETB enabled\n");
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+ return 0;
+}
+
+static void __etb_disable(void)
+{
+ int count;
+ uint32_t ffcr;
+
+ ETB_UNLOCK();
+
+ ffcr = etb_readl(drvdata, ETB_FFCR);
+ ffcr |= (BIT(12) | BIT(6));
+ etb_writel(drvdata, ffcr, ETB_FFCR);
+
+ for (count = TIMEOUT_US; BVAL(etb_readl(drvdata, ETB_FFCR), 6) != 0
+ && count > 0; count--)
+ udelay(1);
+ WARN(count == 0, "timeout while flushing DRVDATA, ETB_FFCR: %#x\n",
+ etb_readl(drvdata, ETB_FFCR));
+
+ etb_writel(drvdata, 0x0, ETB_CTL_REG);
+
+ for (count = TIMEOUT_US; BVAL(etb_readl(drvdata, ETB_FFSR), 1) != 1
+ && count > 0; count--)
+ udelay(1);
+ WARN(count == 0, "timeout while disabling DRVDATA, ETB_FFSR: %#x\n",
+ etb_readl(drvdata, ETB_FFSR));
+
+ ETB_LOCK();
+}
+
+void etb_disable(void)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+ __etb_disable();
+ drvdata->enabled = false;
+ dev_info(drvdata->dev, "ETB disabled\n");
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+ clk_disable_unprepare(drvdata->clk);
+}
+
+static void __etb_dump(void)
+{
+ int i;
+ uint8_t *buf_ptr;
+ uint32_t read_data;
+ uint32_t read_ptr;
+ uint32_t write_ptr;
+ uint32_t frame_off;
+ uint32_t frame_endoff;
+
+ ETB_UNLOCK();
+
+ read_ptr = etb_readl(drvdata, ETB_RAM_READ_POINTER);
+ write_ptr = etb_readl(drvdata, ETB_RAM_WRITE_POINTER);
+
+ frame_off = write_ptr % FRAME_SIZE_WORDS;
+ frame_endoff = FRAME_SIZE_WORDS - frame_off;
+ if (frame_off) {
+ dev_err(drvdata->dev, "write_ptr: %lu not aligned to formatter "
+ "frame size\n", (unsigned long)write_ptr);
+ dev_err(drvdata->dev, "frameoff: %lu, frame_endoff: %lu\n",
+ (unsigned long)frame_off, (unsigned long)frame_endoff);
+ write_ptr += frame_endoff;
+ }
+
+ if ((etb_readl(drvdata, ETB_STATUS_REG) & BIT(0)) == 0)
+ etb_writel(drvdata, 0x0, ETB_RAM_READ_POINTER);
+ else
+ etb_writel(drvdata, write_ptr, ETB_RAM_READ_POINTER);
+
+ buf_ptr = drvdata->buf;
+ for (i = 0; i < ETB_SIZE_WORDS; i++) {
+ read_data = etb_readl(drvdata, ETB_RAM_READ_DATA_REG);
+ *buf_ptr++ = read_data >> 0;
+ *buf_ptr++ = read_data >> 8;
+ *buf_ptr++ = read_data >> 16;
+ *buf_ptr++ = read_data >> 24;
+ }
+
+ if (frame_off) {
+ buf_ptr -= (frame_endoff * BYTES_PER_WORD);
+ for (i = 0; i < frame_endoff; i++) {
+ *buf_ptr++ = 0x0;
+ *buf_ptr++ = 0x0;
+ *buf_ptr++ = 0x0;
+ *buf_ptr++ = 0x0;
+ }
+ }
+
+ etb_writel(drvdata, read_ptr, ETB_RAM_READ_POINTER);
+
+ ETB_LOCK();
+}
+
+void etb_dump(void)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+ if (drvdata->enabled) {
+ __etb_disable();
+ __etb_dump();
+ __etb_enable();
+
+ dev_info(drvdata->dev, "ETB dumped\n");
+ }
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+}
+
+static int etb_open(struct inode *inode, struct file *file)
+{
+ if (atomic_cmpxchg(&drvdata->in_use, 0, 1))
+ return -EBUSY;
+
+ dev_dbg(drvdata->dev, "%s: successfully opened\n", __func__);
+ return 0;
+}
+
+static ssize_t etb_read(struct file *file, char __user *data,
+ size_t len, loff_t *ppos)
+{
+ if (drvdata->reading == false) {
+ etb_dump();
+ drvdata->reading = true;
+ }
+
+ if (*ppos + len > ETB_SIZE_WORDS * BYTES_PER_WORD)
+ len = ETB_SIZE_WORDS * BYTES_PER_WORD - *ppos;
+
+ if (copy_to_user(data, drvdata->buf + *ppos, len)) {
+ dev_dbg(drvdata->dev, "%s: copy_to_user failed\n", __func__);
+ return -EFAULT;
+ }
+
+ *ppos += len;
+
+ dev_dbg(drvdata->dev, "%s: %d bytes copied, %d bytes left\n",
+ __func__, len, (int) (ETB_SIZE_WORDS * BYTES_PER_WORD - *ppos));
+
+ return len;
+}
+
+static int etb_release(struct inode *inode, struct file *file)
+{
+ drvdata->reading = false;
+
+ atomic_set(&drvdata->in_use, 0);
+
+ dev_dbg(drvdata->dev, "%s: released\n", __func__);
+
+ return 0;
+}
+
+static const struct file_operations etb_fops = {
+ .owner = THIS_MODULE,
+ .open = etb_open,
+ .read = etb_read,
+ .release = etb_release,
+};
+
+static struct miscdevice etb_misc = {
+ .name = "msm_etb",
+ .minor = MISC_DYNAMIC_MINOR,
+ .fops = &etb_fops,
+};
+
+static ssize_t etb_show_trigger_cntr(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val = drvdata->trigger_cntr;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t etb_store_trigger_cntr(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+
+ drvdata->trigger_cntr = val;
+ return size;
+}
+static DEVICE_ATTR(trigger_cntr, S_IRUGO | S_IWUSR, etb_show_trigger_cntr,
+ etb_store_trigger_cntr);
+
+static int __devinit etb_sysfs_init(void)
+{
+ int ret;
+
+ drvdata->kobj = kobject_create_and_add("etb", qdss_get_modulekobj());
+ if (!drvdata->kobj) {
+ dev_err(drvdata->dev, "failed to create ETB sysfs kobject\n");
+ ret = -ENOMEM;
+ goto err_create;
+ }
+
+ ret = sysfs_create_file(drvdata->kobj, &dev_attr_trigger_cntr.attr);
+ if (ret) {
+ dev_err(drvdata->dev, "failed to create ETB sysfs trigger_cntr"
+ " attribute\n");
+ goto err_file;
+ }
+
+ return 0;
+err_file:
+ kobject_put(drvdata->kobj);
+err_create:
+ return ret;
+}
+
+static void __devexit etb_sysfs_exit(void)
+{
+ sysfs_remove_file(drvdata->kobj, &dev_attr_trigger_cntr.attr);
+ kobject_put(drvdata->kobj);
+}
+
+static int __devinit etb_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct resource *res;
+
+ drvdata = kzalloc(sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata) {
+ ret = -ENOMEM;
+ goto err_kzalloc_drvdata;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ ret = -EINVAL;
+ goto err_res;
+ }
+
+ drvdata->base = ioremap_nocache(res->start, resource_size(res));
+ if (!drvdata->base) {
+ ret = -EINVAL;
+ goto err_ioremap;
+ }
+
+ drvdata->dev = &pdev->dev;
+
+ spin_lock_init(&drvdata->spinlock);
+
+ drvdata->clk = clk_get(drvdata->dev, "core_clk");
+ if (IS_ERR(drvdata->clk)) {
+ ret = PTR_ERR(drvdata->clk);
+ goto err_clk_get;
+ }
+
+ ret = clk_set_rate(drvdata->clk, CORESIGHT_CLK_RATE_TRACE);
+ if (ret)
+ goto err_clk_rate;
+
+ ret = misc_register(&etb_misc);
+ if (ret)
+ goto err_misc;
+
+ drvdata->buf = kzalloc(ETB_SIZE_WORDS * BYTES_PER_WORD, GFP_KERNEL);
+ if (!drvdata->buf) {
+ ret = -ENOMEM;
+ goto err_alloc;
+ }
+
+ etb_sysfs_init();
+
+ dev_info(drvdata->dev, "ETB initialized\n");
+ return 0;
+
+err_alloc:
+ misc_deregister(&etb_misc);
+err_misc:
+err_clk_rate:
+ clk_put(drvdata->clk);
+err_clk_get:
+ iounmap(drvdata->base);
+err_ioremap:
+err_res:
+ kfree(drvdata);
+err_kzalloc_drvdata:
+ dev_err(drvdata->dev, "ETB init failed\n");
+ return ret;
+}
+
+static int __devexit etb_remove(struct platform_device *pdev)
+{
+ if (drvdata->enabled)
+ etb_disable();
+ etb_sysfs_exit();
+ kfree(drvdata->buf);
+ misc_deregister(&etb_misc);
+ clk_put(drvdata->clk);
+ iounmap(drvdata->base);
+ kfree(drvdata);
+
+ return 0;
+}
+
+static struct of_device_id etb_match[] = {
+ {.compatible = "qcom,msm-etb"},
+ {}
+};
+
+static struct platform_driver etb_driver = {
+ .probe = etb_probe,
+ .remove = __devexit_p(etb_remove),
+ .driver = {
+ .name = "msm_etb",
+ .owner = THIS_MODULE,
+ .of_match_table = etb_match,
+ },
+};
+
+static int __init etb_init(void)
+{
+ return platform_driver_register(&etb_driver);
+}
+module_init(etb_init);
+
+static void __exit etb_exit(void)
+{
+ platform_driver_unregister(&etb_driver);
+}
+module_exit(etb_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("CoreSight Embedded Trace Buffer driver");
diff --git a/drivers/coresight/coresight-etm.c b/drivers/coresight/coresight-etm.c
new file mode 100644
index 0000000..b3d2a16
--- /dev/null
+++ b/drivers/coresight/coresight-etm.c
@@ -0,0 +1,1646 @@
+/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/smp.h>
+#include <linux/wakelock.h>
+#include <linux/pm_qos.h>
+#include <linux/sysfs.h>
+#include <linux/stat.h>
+#include <linux/clk.h>
+#include <linux/coresight.h>
+#include <asm/sections.h>
+#include <mach/socinfo.h>
+
+#include "coresight-priv.h"
+
+#define etm_writel(drvdata, cpu, val, off) \
+ __raw_writel((val), drvdata->base + (SZ_4K * cpu) + off)
+#define etm_readl(drvdata, cpu, off) \
+ __raw_readl(drvdata->base + (SZ_4K * cpu) + off)
+
+/*
+ * Device registers:
+ * 0x000 - 0x2FC: Trace registers
+ * 0x300 - 0x314: Management registers
+ * 0x318 - 0xEFC: Trace registers
+ *
+ * Coresight registers
+ * 0xF00 - 0xF9C: Management registers
+ * 0xFA0 - 0xFA4: Management registers in PFTv1.0
+ * Trace registers in PFTv1.1
+ * 0xFA8 - 0xFFC: Management registers
+ */
+
+/* Trace registers (0x000-0x2FC) */
+#define ETMCR (0x000)
+#define ETMCCR (0x004)
+#define ETMTRIGGER (0x008)
+#define ETMSR (0x010)
+#define ETMSCR (0x014)
+#define ETMTSSCR (0x018)
+#define ETMTEEVR (0x020)
+#define ETMTECR1 (0x024)
+#define ETMFFLR (0x02C)
+#define ETMACVRn(n) (0x040 + (n * 4))
+#define ETMACTRn(n) (0x080 + (n * 4))
+#define ETMCNTRLDVRn(n) (0x140 + (n * 4))
+#define ETMCNTENRn(n) (0x150 + (n * 4))
+#define ETMCNTRLDEVRn(n) (0x160 + (n * 4))
+#define ETMCNTVRn(n) (0x170 + (n * 4))
+#define ETMSQ12EVR (0x180)
+#define ETMSQ21EVR (0x184)
+#define ETMSQ23EVR (0x188)
+#define ETMSQ31EVR (0x18C)
+#define ETMSQ32EVR (0x190)
+#define ETMSQ13EVR (0x194)
+#define ETMSQR (0x19C)
+#define ETMEXTOUTEVRn(n) (0x1A0 + (n * 4))
+#define ETMCIDCVRn(n) (0x1B0 + (n * 4))
+#define ETMCIDCMR (0x1BC)
+#define ETMIMPSPEC0 (0x1C0)
+#define ETMIMPSPEC1 (0x1C4)
+#define ETMIMPSPEC2 (0x1C8)
+#define ETMIMPSPEC3 (0x1CC)
+#define ETMIMPSPEC4 (0x1D0)
+#define ETMIMPSPEC5 (0x1D4)
+#define ETMIMPSPEC6 (0x1D8)
+#define ETMIMPSPEC7 (0x1DC)
+#define ETMSYNCFR (0x1E0)
+#define ETMIDR (0x1E4)
+#define ETMCCER (0x1E8)
+#define ETMEXTINSELR (0x1EC)
+#define ETMTESSEICR (0x1F0)
+#define ETMEIBCR (0x1F4)
+#define ETMTSEVR (0x1F8)
+#define ETMAUXCR (0x1FC)
+#define ETMTRACEIDR (0x200)
+#define ETMVMIDCVR (0x240)
+/* Management registers (0x300-0x314) */
+#define ETMOSLAR (0x300)
+#define ETMOSLSR (0x304)
+#define ETMOSSRR (0x308)
+#define ETMPDCR (0x310)
+#define ETMPDSR (0x314)
+
+#define ETM_MAX_ADDR_CMP (16)
+#define ETM_MAX_CNTR (4)
+#define ETM_MAX_CTXID_CMP (3)
+
+#define ETM_MODE_EXCLUDE BIT(0)
+#define ETM_MODE_CYCACC BIT(1)
+#define ETM_MODE_STALL BIT(2)
+#define ETM_MODE_TIMESTAMP BIT(3)
+#define ETM_MODE_CTXID BIT(4)
+#define ETM_MODE_ALL (0x1F)
+
+#define ETM_EVENT_MASK (0x1FFFF)
+#define ETM_SYNC_MASK (0xFFF)
+#define ETM_ALL_MASK (0xFFFFFFFF)
+
+#define ETM_SEQ_STATE_MAX_VAL (0x2)
+
+enum {
+ ETM_ADDR_TYPE_NONE,
+ ETM_ADDR_TYPE_SINGLE,
+ ETM_ADDR_TYPE_RANGE,
+ ETM_ADDR_TYPE_START,
+ ETM_ADDR_TYPE_STOP,
+};
+
+#define ETM_LOCK(cpu) \
+do { \
+ mb(); \
+ etm_writel(drvdata, cpu, 0x0, CORESIGHT_LAR); \
+} while (0)
+#define ETM_UNLOCK(cpu) \
+do { \
+ etm_writel(drvdata, cpu, CORESIGHT_UNLOCK, CORESIGHT_LAR); \
+ mb(); \
+} while (0)
+
+
+#ifdef MODULE_PARAM_PREFIX
+#undef MODULE_PARAM_PREFIX
+#endif
+#define MODULE_PARAM_PREFIX "coresight."
+
+#ifdef CONFIG_MSM_QDSS_ETM_DEFAULT_ENABLE
+static int etm_boot_enable = 1;
+#else
+static int etm_boot_enable;
+#endif
+module_param_named(
+ etm_boot_enable, etm_boot_enable, int, S_IRUGO
+);
+
+struct etm_drvdata {
+ void __iomem *base;
+ bool enabled;
+ struct wake_lock wake_lock;
+ struct pm_qos_request qos_req;
+ struct qdss_source *src;
+ struct mutex mutex;
+ struct device *dev;
+ struct kobject *kobj;
+ struct clk *clk;
+ uint8_t arch;
+ uint8_t nr_addr_cmp;
+ uint8_t nr_cntr;
+ uint8_t nr_ext_inp;
+ uint8_t nr_ext_out;
+ uint8_t nr_ctxid_cmp;
+ uint8_t reset;
+ uint32_t mode;
+ uint32_t ctrl;
+ uint32_t trigger_event;
+ uint32_t startstop_ctrl;
+ uint32_t enable_event;
+ uint32_t enable_ctrl1;
+ uint32_t fifofull_level;
+ uint8_t addr_idx;
+ uint32_t addr_val[ETM_MAX_ADDR_CMP];
+ uint32_t addr_acctype[ETM_MAX_ADDR_CMP];
+ uint32_t addr_type[ETM_MAX_ADDR_CMP];
+ uint8_t cntr_idx;
+ uint32_t cntr_rld_val[ETM_MAX_CNTR];
+ uint32_t cntr_event[ETM_MAX_CNTR];
+ uint32_t cntr_rld_event[ETM_MAX_CNTR];
+ uint32_t cntr_val[ETM_MAX_CNTR];
+ uint32_t seq_12_event;
+ uint32_t seq_21_event;
+ uint32_t seq_23_event;
+ uint32_t seq_31_event;
+ uint32_t seq_32_event;
+ uint32_t seq_13_event;
+ uint32_t seq_curr_state;
+ uint8_t ctxid_idx;
+ uint32_t ctxid_val[ETM_MAX_CTXID_CMP];
+ uint32_t ctxid_mask;
+ uint32_t sync_freq;
+ uint32_t timestamp_event;
+};
+
+static struct etm_drvdata *drvdata;
+
+
+/* ETM clock is derived from the processor clock and gets enabled on a
+ * logical OR of below items on Krait (pass2 onwards):
+ * 1.CPMR[ETMCLKEN] is 1
+ * 2.ETMCR[PD] is 0
+ * 3.ETMPDCR[PU] is 1
+ * 4.Reset is asserted (core or debug)
+ * 5.APB memory mapped requests (eg. EDAP access)
+ *
+ * 1., 2. and 3. above are permanent enables whereas 4. and 5. are temporary
+ * enables
+ *
+ * We rely on 5. to be able to access ETMCR and then use 2. above for ETM
+ * clock vote in the driver and the save-restore code uses 1. above
+ * for its vote
+ */
+static void etm_set_pwrdwn(int cpu)
+{
+ uint32_t etmcr;
+
+ etmcr = etm_readl(drvdata, cpu, ETMCR);
+ etmcr |= BIT(0);
+ etm_writel(drvdata, cpu, etmcr, ETMCR);
+}
+
+static void etm_clr_pwrdwn(int cpu)
+{
+ uint32_t etmcr;
+
+ etmcr = etm_readl(drvdata, cpu, ETMCR);
+ etmcr &= ~BIT(0);
+ etm_writel(drvdata, cpu, etmcr, ETMCR);
+}
+
+static void etm_set_prog(int cpu)
+{
+ uint32_t etmcr;
+ int count;
+
+ etmcr = etm_readl(drvdata, cpu, ETMCR);
+ etmcr |= BIT(10);
+ etm_writel(drvdata, cpu, etmcr, ETMCR);
+
+ for (count = TIMEOUT_US; BVAL(etm_readl(drvdata, cpu, ETMSR), 1) != 1
+ && count > 0; count--)
+ udelay(1);
+ WARN(count == 0, "timeout while setting prog bit, ETMSR: %#x\n",
+ etm_readl(drvdata, cpu, ETMSR));
+}
+
+static void etm_clr_prog(int cpu)
+{
+ uint32_t etmcr;
+ int count;
+
+ etmcr = etm_readl(drvdata, cpu, ETMCR);
+ etmcr &= ~BIT(10);
+ etm_writel(drvdata, cpu, etmcr, ETMCR);
+
+ for (count = TIMEOUT_US; BVAL(etm_readl(drvdata, cpu, ETMSR), 1) != 0
+ && count > 0; count--)
+ udelay(1);
+ WARN(count == 0, "timeout while clearing prog bit, ETMSR: %#x\n",
+ etm_readl(drvdata, cpu, ETMSR));
+}
+
+static void __etm_enable(int cpu)
+{
+ int i;
+
+ ETM_UNLOCK(cpu);
+ /* Vote for ETM power/clock enable */
+ etm_clr_pwrdwn(cpu);
+ etm_set_prog(cpu);
+
+ etm_writel(drvdata, cpu, drvdata->ctrl | BIT(10), ETMCR);
+ etm_writel(drvdata, cpu, drvdata->trigger_event, ETMTRIGGER);
+ etm_writel(drvdata, cpu, drvdata->startstop_ctrl, ETMTSSCR);
+ etm_writel(drvdata, cpu, drvdata->enable_event, ETMTEEVR);
+ etm_writel(drvdata, cpu, drvdata->enable_ctrl1, ETMTECR1);
+ etm_writel(drvdata, cpu, drvdata->fifofull_level, ETMFFLR);
+ for (i = 0; i < drvdata->nr_addr_cmp; i++) {
+ etm_writel(drvdata, cpu, drvdata->addr_val[i], ETMACVRn(i));
+ etm_writel(drvdata, cpu, drvdata->addr_acctype[i], ETMACTRn(i));
+ }
+ for (i = 0; i < drvdata->nr_cntr; i++) {
+ etm_writel(drvdata, cpu, drvdata->cntr_rld_val[i],
+ ETMCNTRLDVRn(i));
+ etm_writel(drvdata, cpu, drvdata->cntr_event[i], ETMCNTENRn(i));
+ etm_writel(drvdata, cpu, drvdata->cntr_rld_event[i],
+ ETMCNTRLDEVRn(i));
+ etm_writel(drvdata, cpu, drvdata->cntr_val[i], ETMCNTVRn(i));
+ }
+ etm_writel(drvdata, cpu, drvdata->seq_12_event, ETMSQ12EVR);
+ etm_writel(drvdata, cpu, drvdata->seq_21_event, ETMSQ21EVR);
+ etm_writel(drvdata, cpu, drvdata->seq_23_event, ETMSQ23EVR);
+ etm_writel(drvdata, cpu, drvdata->seq_31_event, ETMSQ31EVR);
+ etm_writel(drvdata, cpu, drvdata->seq_32_event, ETMSQ32EVR);
+ etm_writel(drvdata, cpu, drvdata->seq_13_event, ETMSQ13EVR);
+ etm_writel(drvdata, cpu, drvdata->seq_curr_state, ETMSQR);
+ for (i = 0; i < drvdata->nr_ext_out; i++)
+ etm_writel(drvdata, cpu, 0x0000406F, ETMEXTOUTEVRn(i));
+ for (i = 0; i < drvdata->nr_ctxid_cmp; i++)
+ etm_writel(drvdata, cpu, drvdata->ctxid_val[i], ETMCIDCVRn(i));
+ etm_writel(drvdata, cpu, drvdata->ctxid_mask, ETMCIDCMR);
+ etm_writel(drvdata, cpu, drvdata->sync_freq, ETMSYNCFR);
+ etm_writel(drvdata, cpu, 0x00000000, ETMEXTINSELR);
+ etm_writel(drvdata, cpu, drvdata->timestamp_event, ETMTSEVR);
+ etm_writel(drvdata, cpu, 0x00000000, ETMAUXCR);
+ etm_writel(drvdata, cpu, cpu+1, ETMTRACEIDR);
+ etm_writel(drvdata, cpu, 0x00000000, ETMVMIDCVR);
+
+ etm_clr_prog(cpu);
+ ETM_LOCK(cpu);
+}
+
+static int etm_enable(void)
+{
+ int ret, cpu;
+
+ if (drvdata->enabled) {
+ dev_err(drvdata->dev, "ETM tracing already enabled\n");
+ ret = -EPERM;
+ goto err;
+ }
+
+ wake_lock(&drvdata->wake_lock);
+ /* 1. causes all online cpus to come out of idle PC
+ * 2. prevents idle PC until save restore flag is enabled atomically
+ *
+ * we rely on the user to prevent hotplug on/off racing with this
+ * operation and to ensure cores where trace is expected to be turned
+ * on are already hotplugged on
+ */
+ pm_qos_update_request(&drvdata->qos_req, 0);
+
+ ret = clk_prepare_enable(drvdata->clk);
+ if (ret)
+ goto err_clk;
+
+ ret = qdss_enable(drvdata->src);
+ if (ret)
+ goto err_qdss;
+
+ for_each_online_cpu(cpu)
+ __etm_enable(cpu);
+
+ drvdata->enabled = true;
+
+ pm_qos_update_request(&drvdata->qos_req, PM_QOS_DEFAULT_VALUE);
+ wake_unlock(&drvdata->wake_lock);
+
+ dev_info(drvdata->dev, "ETM tracing enabled\n");
+ return 0;
+
+err_qdss:
+ clk_disable_unprepare(drvdata->clk);
+err_clk:
+ pm_qos_update_request(&drvdata->qos_req, PM_QOS_DEFAULT_VALUE);
+ wake_unlock(&drvdata->wake_lock);
+err:
+ return ret;
+}
+
+static void __etm_disable(int cpu)
+{
+ ETM_UNLOCK(cpu);
+ etm_set_prog(cpu);
+
+ /* program trace enable to low by using always false event */
+ etm_writel(drvdata, cpu, 0x6F | BIT(14), ETMTEEVR);
+
+ /* Vote for ETM power/clock disable */
+ etm_set_pwrdwn(cpu);
+ ETM_LOCK(cpu);
+}
+
+static int etm_disable(void)
+{
+ int ret, cpu;
+
+ if (!drvdata->enabled) {
+ dev_err(drvdata->dev, "ETM tracing already disabled\n");
+ ret = -EPERM;
+ goto err;
+ }
+
+ wake_lock(&drvdata->wake_lock);
+ /* 1. causes all online cpus to come out of idle PC
+ * 2. prevents idle PC until save restore flag is disabled atomically
+ *
+ * we rely on the user to prevent hotplug on/off racing with this
+ * operation and to ensure cores where trace is expected to be turned
+ * off are already hotplugged on
+ */
+ pm_qos_update_request(&drvdata->qos_req, 0);
+
+ for_each_online_cpu(cpu)
+ __etm_disable(cpu);
+
+ drvdata->enabled = false;
+
+ qdss_disable(drvdata->src);
+
+ clk_disable_unprepare(drvdata->clk);
+
+ pm_qos_update_request(&drvdata->qos_req, PM_QOS_DEFAULT_VALUE);
+ wake_unlock(&drvdata->wake_lock);
+
+ dev_info(drvdata->dev, "ETM tracing disabled\n");
+ return 0;
+err:
+ return ret;
+}
+
+/* Memory mapped writes to clear os lock not supported */
+static void etm_os_unlock(void *unused)
+{
+ unsigned long value = 0x0;
+
+ asm("mcr p14, 1, %0, c1, c0, 4\n\t" : : "r" (value));
+ asm("isb\n\t");
+}
+
+static ssize_t etm_show_enabled(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val = drvdata->enabled;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t etm_store_enabled(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret = 0;
+ unsigned long val;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+
+ mutex_lock(&drvdata->mutex);
+ if (val)
+ ret = etm_enable();
+ else
+ ret = etm_disable();
+ mutex_unlock(&drvdata->mutex);
+
+ if (ret)
+ return ret;
+ return size;
+}
+static DEVICE_ATTR(enabled, S_IRUGO | S_IWUSR, etm_show_enabled,
+ etm_store_enabled);
+
+static ssize_t etm_show_nr_addr_cmp(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val = drvdata->nr_addr_cmp;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+static DEVICE_ATTR(nr_addr_cmp, S_IRUGO, etm_show_nr_addr_cmp, NULL);
+
+static ssize_t etm_show_nr_cntr(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val = drvdata->nr_cntr;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+static DEVICE_ATTR(nr_cntr, S_IRUGO, etm_show_nr_cntr, NULL);
+
+static ssize_t etm_show_nr_ctxid_cmp(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val = drvdata->nr_ctxid_cmp;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+static DEVICE_ATTR(nr_ctxid_cmp, S_IRUGO, etm_show_nr_ctxid_cmp, NULL);
+
+static ssize_t etm_show_reset(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val = drvdata->reset;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+/* Reset to trace everything i.e. exclude nothing. */
+static ssize_t etm_store_reset(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t size)
+{
+ int i;
+ unsigned long val;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+
+ mutex_lock(&drvdata->mutex);
+ if (val) {
+ drvdata->mode = ETM_MODE_EXCLUDE;
+ drvdata->ctrl = 0x0;
+ if (cpu_is_krait_v1()) {
+ drvdata->mode |= ETM_MODE_CYCACC;
+ drvdata->ctrl |= BIT(12);
+ }
+ drvdata->trigger_event = 0x406F;
+ drvdata->startstop_ctrl = 0x0;
+ drvdata->enable_event = 0x6F;
+ drvdata->enable_ctrl1 = 0x1000000;
+ drvdata->fifofull_level = 0x28;
+ drvdata->addr_idx = 0x0;
+ for (i = 0; i < drvdata->nr_addr_cmp; i++) {
+ drvdata->addr_val[i] = 0x0;
+ drvdata->addr_acctype[i] = 0x0;
+ drvdata->addr_type[i] = ETM_ADDR_TYPE_NONE;
+ }
+ drvdata->cntr_idx = 0x0;
+ for (i = 0; i < drvdata->nr_cntr; i++) {
+ drvdata->cntr_rld_val[i] = 0x0;
+ drvdata->cntr_event[i] = 0x406F;
+ drvdata->cntr_rld_event[i] = 0x406F;
+ drvdata->cntr_val[i] = 0x0;
+ }
+ drvdata->seq_12_event = 0x406F;
+ drvdata->seq_21_event = 0x406F;
+ drvdata->seq_23_event = 0x406F;
+ drvdata->seq_31_event = 0x406F;
+ drvdata->seq_32_event = 0x406F;
+ drvdata->seq_13_event = 0x406F;
+ drvdata->seq_curr_state = 0x0;
+ drvdata->ctxid_idx = 0x0;
+ for (i = 0; i < drvdata->nr_ctxid_cmp; i++)
+ drvdata->ctxid_val[i] = 0x0;
+ drvdata->ctxid_mask = 0x0;
+ drvdata->sync_freq = 0x80;
+ drvdata->timestamp_event = 0x406F;
+ }
+ mutex_unlock(&drvdata->mutex);
+ return size;
+}
+static DEVICE_ATTR(reset, S_IRUGO | S_IWUSR, etm_show_reset, etm_store_reset);
+
+static ssize_t etm_show_mode(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val = drvdata->mode;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t etm_store_mode(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+
+ mutex_lock(&drvdata->mutex);
+ drvdata->mode = val & ETM_MODE_ALL;
+
+ if (drvdata->mode & ETM_MODE_EXCLUDE)
+ drvdata->enable_ctrl1 |= BIT(24);
+ else
+ drvdata->enable_ctrl1 &= ~BIT(24);
+
+ if (drvdata->mode & ETM_MODE_CYCACC)
+ drvdata->ctrl |= BIT(12);
+ else
+ drvdata->ctrl &= ~BIT(12);
+
+ if (drvdata->mode & ETM_MODE_STALL)
+ drvdata->ctrl |= BIT(7);
+ else
+ drvdata->ctrl &= ~BIT(7);
+
+ if (drvdata->mode & ETM_MODE_TIMESTAMP)
+ drvdata->ctrl |= BIT(28);
+ else
+ drvdata->ctrl &= ~BIT(28);
+ if (drvdata->mode & ETM_MODE_CTXID)
+ drvdata->ctrl |= (BIT(14) | BIT(15));
+ else
+ drvdata->ctrl &= ~(BIT(14) | BIT(15));
+ mutex_unlock(&drvdata->mutex);
+
+ return size;
+}
+static DEVICE_ATTR(mode, S_IRUGO | S_IWUSR, etm_show_mode, etm_store_mode);
+
+static ssize_t etm_show_trigger_event(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val = drvdata->trigger_event;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t etm_store_trigger_event(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+
+ drvdata->trigger_event = val & ETM_EVENT_MASK;
+ return size;
+}
+static DEVICE_ATTR(trigger_event, S_IRUGO | S_IWUSR, etm_show_trigger_event,
+ etm_store_trigger_event);
+
+static ssize_t etm_show_enable_event(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val = drvdata->enable_event;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t etm_store_enable_event(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+
+ drvdata->enable_event = val & ETM_EVENT_MASK;
+ return size;
+}
+static DEVICE_ATTR(enable_event, S_IRUGO | S_IWUSR, etm_show_enable_event,
+ etm_store_enable_event);
+
+static ssize_t etm_show_fifofull_level(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val = drvdata->fifofull_level;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t etm_store_fifofull_level(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+
+ drvdata->fifofull_level = val;
+ return size;
+}
+static DEVICE_ATTR(fifofull_level, S_IRUGO | S_IWUSR, etm_show_fifofull_level,
+ etm_store_fifofull_level);
+
+static ssize_t etm_show_addr_idx(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val = drvdata->addr_idx;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t etm_store_addr_idx(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+ if (val >= drvdata->nr_addr_cmp)
+ return -EINVAL;
+
+ /* Use mutex to ensure index doesn't change while it gets dereferenced
+ * multiple times within a mutex block elsewhere.
+ */
+ mutex_lock(&drvdata->mutex);
+ drvdata->addr_idx = val;
+ mutex_unlock(&drvdata->mutex);
+ return size;
+}
+static DEVICE_ATTR(addr_idx, S_IRUGO | S_IWUSR, etm_show_addr_idx,
+ etm_store_addr_idx);
+
+static ssize_t etm_show_addr_single(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ uint8_t idx;
+
+ mutex_lock(&drvdata->mutex);
+ idx = drvdata->addr_idx;
+ if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+ drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
+ mutex_unlock(&drvdata->mutex);
+ return -EPERM;
+ }
+
+ val = drvdata->addr_val[idx];
+ mutex_unlock(&drvdata->mutex);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t etm_store_addr_single(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+ uint8_t idx;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+
+ mutex_lock(&drvdata->mutex);
+ idx = drvdata->addr_idx;
+ if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+ drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
+ mutex_unlock(&drvdata->mutex);
+ return -EPERM;
+ }
+
+ drvdata->addr_val[idx] = val;
+ drvdata->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
+ mutex_unlock(&drvdata->mutex);
+ return size;
+}
+static DEVICE_ATTR(addr_single, S_IRUGO | S_IWUSR, etm_show_addr_single,
+ etm_store_addr_single);
+
+static ssize_t etm_show_addr_range(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val1, val2;
+ uint8_t idx;
+
+ mutex_lock(&drvdata->mutex);
+ idx = drvdata->addr_idx;
+ if (idx % 2 != 0) {
+ mutex_unlock(&drvdata->mutex);
+ return -EPERM;
+ }
+ if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
+ drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
+ (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
+ drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
+ mutex_unlock(&drvdata->mutex);
+ return -EPERM;
+ }
+
+ val1 = drvdata->addr_val[idx];
+ val2 = drvdata->addr_val[idx + 1];
+ mutex_unlock(&drvdata->mutex);
+ return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
+}
+
+static ssize_t etm_store_addr_range(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val1, val2;
+ uint8_t idx;
+
+ if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
+ return -EINVAL;
+ /* lower address comparator cannot have a higher address value */
+ if (val1 > val2)
+ return -EINVAL;
+
+ mutex_lock(&drvdata->mutex);
+ idx = drvdata->addr_idx;
+ if (idx % 2 != 0) {
+ mutex_unlock(&drvdata->mutex);
+ return -EPERM;
+ }
+ if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
+ drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
+ (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
+ drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
+ mutex_unlock(&drvdata->mutex);
+ return -EPERM;
+ }
+
+ drvdata->addr_val[idx] = val1;
+ drvdata->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
+ drvdata->addr_val[idx + 1] = val2;
+ drvdata->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
+ drvdata->enable_ctrl1 |= (1 << (idx/2));
+ mutex_unlock(&drvdata->mutex);
+ return size;
+}
+static DEVICE_ATTR(addr_range, S_IRUGO | S_IWUSR, etm_show_addr_range,
+ etm_store_addr_range);
+
+static ssize_t etm_show_addr_start(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ uint8_t idx;
+
+ mutex_lock(&drvdata->mutex);
+ idx = drvdata->addr_idx;
+ if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+ drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) {
+ mutex_unlock(&drvdata->mutex);
+ return -EPERM;
+ }
+
+ val = drvdata->addr_val[idx];
+ mutex_unlock(&drvdata->mutex);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t etm_store_addr_start(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+ uint8_t idx;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+
+ mutex_lock(&drvdata->mutex);
+ idx = drvdata->addr_idx;
+ if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+ drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) {
+ mutex_unlock(&drvdata->mutex);
+ return -EPERM;
+ }
+
+ drvdata->addr_val[idx] = val;
+ drvdata->addr_type[idx] = ETM_ADDR_TYPE_START;
+ drvdata->startstop_ctrl |= (1 << idx);
+ drvdata->enable_ctrl1 |= BIT(25);
+ mutex_unlock(&drvdata->mutex);
+ return size;
+}
+static DEVICE_ATTR(addr_start, S_IRUGO | S_IWUSR, etm_show_addr_start,
+ etm_store_addr_start);
+
+static ssize_t etm_show_addr_stop(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ uint8_t idx;
+
+ mutex_lock(&drvdata->mutex);
+ idx = drvdata->addr_idx;
+ if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+ drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
+ mutex_unlock(&drvdata->mutex);
+ return -EPERM;
+ }
+
+ val = drvdata->addr_val[idx];
+ mutex_unlock(&drvdata->mutex);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t etm_store_addr_stop(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+ uint8_t idx;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+
+ mutex_lock(&drvdata->mutex);
+ idx = drvdata->addr_idx;
+ if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+ drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
+ mutex_unlock(&drvdata->mutex);
+ return -EPERM;
+ }
+
+ drvdata->addr_val[idx] = val;
+ drvdata->addr_type[idx] = ETM_ADDR_TYPE_STOP;
+ drvdata->startstop_ctrl |= (1 << (idx + 16));
+ drvdata->enable_ctrl1 |= BIT(25);
+ mutex_unlock(&drvdata->mutex);
+ return size;
+}
+static DEVICE_ATTR(addr_stop, S_IRUGO | S_IWUSR, etm_show_addr_stop,
+ etm_store_addr_stop);
+
+static ssize_t etm_show_addr_acctype(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+
+ mutex_lock(&drvdata->mutex);
+ val = drvdata->addr_acctype[drvdata->addr_idx];
+ mutex_unlock(&drvdata->mutex);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t etm_store_addr_acctype(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+
+ mutex_lock(&drvdata->mutex);
+ drvdata->addr_acctype[drvdata->addr_idx] = val;
+ mutex_unlock(&drvdata->mutex);
+ return size;
+}
+static DEVICE_ATTR(addr_acctype, S_IRUGO | S_IWUSR, etm_show_addr_acctype,
+ etm_store_addr_acctype);
+
+static ssize_t etm_show_cntr_idx(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val = drvdata->addr_idx;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t etm_store_cntr_idx(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+ if (val >= drvdata->nr_cntr)
+ return -EINVAL;
+
+ /* Use mutex to ensure index doesn't change while it gets dereferenced
+ * multiple times within a mutex block elsewhere.
+ */
+ mutex_lock(&drvdata->mutex);
+ drvdata->cntr_idx = val;
+ mutex_unlock(&drvdata->mutex);
+ return size;
+}
+static DEVICE_ATTR(cntr_idx, S_IRUGO | S_IWUSR, etm_show_cntr_idx,
+ etm_store_cntr_idx);
+
+static ssize_t etm_show_cntr_rld_val(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ mutex_lock(&drvdata->mutex);
+ val = drvdata->cntr_rld_val[drvdata->cntr_idx];
+ mutex_unlock(&drvdata->mutex);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t etm_store_cntr_rld_val(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+
+ mutex_lock(&drvdata->mutex);
+ drvdata->cntr_rld_val[drvdata->cntr_idx] = val;
+ mutex_unlock(&drvdata->mutex);
+ return size;
+}
+static DEVICE_ATTR(cntr_rld_val, S_IRUGO | S_IWUSR, etm_show_cntr_rld_val,
+ etm_store_cntr_rld_val);
+
+static ssize_t etm_show_cntr_event(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+
+ mutex_lock(&drvdata->mutex);
+ val = drvdata->cntr_event[drvdata->cntr_idx];
+ mutex_unlock(&drvdata->mutex);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t etm_store_cntr_event(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+
+ mutex_lock(&drvdata->mutex);
+ drvdata->cntr_event[drvdata->cntr_idx] = val & ETM_EVENT_MASK;
+ mutex_unlock(&drvdata->mutex);
+ return size;
+}
+static DEVICE_ATTR(cntr_event, S_IRUGO | S_IWUSR, etm_show_cntr_event,
+ etm_store_cntr_event);
+
+static ssize_t etm_show_cntr_rld_event(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+
+ mutex_lock(&drvdata->mutex);
+ val = drvdata->cntr_rld_event[drvdata->cntr_idx];
+ mutex_unlock(&drvdata->mutex);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t etm_store_cntr_rld_event(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+
+ mutex_lock(&drvdata->mutex);
+ drvdata->cntr_rld_event[drvdata->cntr_idx] = val & ETM_EVENT_MASK;
+ mutex_unlock(&drvdata->mutex);
+ return size;
+}
+static DEVICE_ATTR(cntr_rld_event, S_IRUGO | S_IWUSR, etm_show_cntr_rld_event,
+ etm_store_cntr_rld_event);
+
+static ssize_t etm_show_cntr_val(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+
+ mutex_lock(&drvdata->mutex);
+ val = drvdata->cntr_val[drvdata->cntr_idx];
+ mutex_unlock(&drvdata->mutex);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t etm_store_cntr_val(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+
+ mutex_lock(&drvdata->mutex);
+ drvdata->cntr_val[drvdata->cntr_idx] = val;
+ mutex_unlock(&drvdata->mutex);
+ return size;
+}
+static DEVICE_ATTR(cntr_val, S_IRUGO | S_IWUSR, etm_show_cntr_val,
+ etm_store_cntr_val);
+
+static ssize_t etm_show_seq_12_event(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val = drvdata->seq_12_event;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t etm_store_seq_12_event(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+
+ drvdata->seq_12_event = val & ETM_EVENT_MASK;
+ return size;
+}
+static DEVICE_ATTR(seq_12_event, S_IRUGO | S_IWUSR, etm_show_seq_12_event,
+ etm_store_seq_12_event);
+
+static ssize_t etm_show_seq_21_event(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val = drvdata->seq_21_event;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t etm_store_seq_21_event(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+
+ drvdata->seq_21_event = val & ETM_EVENT_MASK;
+ return size;
+}
+static DEVICE_ATTR(seq_21_event, S_IRUGO | S_IWUSR, etm_show_seq_21_event,
+ etm_store_seq_21_event);
+
+static ssize_t etm_show_seq_23_event(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val = drvdata->seq_23_event;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t etm_store_seq_23_event(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+
+ drvdata->seq_23_event = val & ETM_EVENT_MASK;
+ return size;
+}
+static DEVICE_ATTR(seq_23_event, S_IRUGO | S_IWUSR, etm_show_seq_23_event,
+ etm_store_seq_23_event);
+
+static ssize_t etm_show_seq_31_event(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val = drvdata->seq_31_event;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t etm_store_seq_31_event(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+
+ drvdata->seq_31_event = val & ETM_EVENT_MASK;
+ return size;
+}
+static DEVICE_ATTR(seq_31_event, S_IRUGO | S_IWUSR, etm_show_seq_31_event,
+ etm_store_seq_31_event);
+
+static ssize_t etm_show_seq_32_event(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val = drvdata->seq_32_event;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t etm_store_seq_32_event(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+
+ drvdata->seq_32_event = val & ETM_EVENT_MASK;
+ return size;
+}
+static DEVICE_ATTR(seq_32_event, S_IRUGO | S_IWUSR, etm_show_seq_32_event,
+ etm_store_seq_32_event);
+
+static ssize_t etm_show_seq_13_event(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val = drvdata->seq_13_event;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t etm_store_seq_13_event(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+
+ drvdata->seq_13_event = val & ETM_EVENT_MASK;
+ return size;
+}
+static DEVICE_ATTR(seq_13_event, S_IRUGO | S_IWUSR, etm_show_seq_13_event,
+ etm_store_seq_13_event);
+
+static ssize_t etm_show_seq_curr_state(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val = drvdata->seq_curr_state;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t etm_store_seq_curr_state(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+ if (val > ETM_SEQ_STATE_MAX_VAL)
+ return -EINVAL;
+
+ drvdata->seq_curr_state = val;
+ return size;
+}
+static DEVICE_ATTR(seq_curr_state, S_IRUGO | S_IWUSR, etm_show_seq_curr_state,
+ etm_store_seq_curr_state);
+
+static ssize_t etm_show_ctxid_idx(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val = drvdata->ctxid_idx;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t etm_store_ctxid_idx(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+ if (val >= drvdata->nr_ctxid_cmp)
+ return -EINVAL;
+
+ /* Use mutex to ensure index doesn't change while it gets dereferenced
+ * multiple times within a mutex block elsewhere.
+ */
+ mutex_lock(&drvdata->mutex);
+ drvdata->ctxid_idx = val;
+ mutex_unlock(&drvdata->mutex);
+ return size;
+}
+static DEVICE_ATTR(ctxid_idx, S_IRUGO | S_IWUSR, etm_show_ctxid_idx,
+ etm_store_ctxid_idx);
+
+static ssize_t etm_show_ctxid_val(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+
+ mutex_lock(&drvdata->mutex);
+ val = drvdata->ctxid_val[drvdata->ctxid_idx];
+ mutex_unlock(&drvdata->mutex);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t etm_store_ctxid_val(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+
+ mutex_lock(&drvdata->mutex);
+ drvdata->ctxid_val[drvdata->ctxid_idx] = val;
+ mutex_unlock(&drvdata->mutex);
+ return size;
+}
+static DEVICE_ATTR(ctxid_val, S_IRUGO | S_IWUSR, etm_show_ctxid_val,
+ etm_store_ctxid_val);
+
+static ssize_t etm_show_ctxid_mask(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val = drvdata->ctxid_mask;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t etm_store_ctxid_mask(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+
+ drvdata->ctxid_mask = val;
+ return size;
+}
+static DEVICE_ATTR(ctxid_mask, S_IRUGO | S_IWUSR, etm_show_ctxid_mask,
+ etm_store_ctxid_mask);
+
+static ssize_t etm_show_sync_freq(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val = drvdata->sync_freq;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t etm_store_sync_freq(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+
+ drvdata->sync_freq = val & ETM_SYNC_MASK;
+ return size;
+}
+static DEVICE_ATTR(sync_freq, S_IRUGO | S_IWUSR, etm_show_sync_freq,
+ etm_store_sync_freq);
+
+static ssize_t etm_show_timestamp_event(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val = drvdata->timestamp_event;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t etm_store_timestamp_event(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+
+ drvdata->timestamp_event = val & ETM_EVENT_MASK;
+ return size;
+}
+static DEVICE_ATTR(timestamp_event, S_IRUGO | S_IWUSR, etm_show_timestamp_event,
+ etm_store_timestamp_event);
+
+static struct attribute *etm_attrs[] = {
+ &dev_attr_nr_addr_cmp.attr,
+ &dev_attr_nr_cntr.attr,
+ &dev_attr_nr_ctxid_cmp.attr,
+ &dev_attr_reset.attr,
+ &dev_attr_mode.attr,
+ &dev_attr_trigger_event.attr,
+ &dev_attr_enable_event.attr,
+ &dev_attr_fifofull_level.attr,
+ &dev_attr_addr_idx.attr,
+ &dev_attr_addr_single.attr,
+ &dev_attr_addr_range.attr,
+ &dev_attr_addr_start.attr,
+ &dev_attr_addr_stop.attr,
+ &dev_attr_addr_acctype.attr,
+ &dev_attr_cntr_idx.attr,
+ &dev_attr_cntr_rld_val.attr,
+ &dev_attr_cntr_event.attr,
+ &dev_attr_cntr_rld_event.attr,
+ &dev_attr_cntr_val.attr,
+ &dev_attr_seq_12_event.attr,
+ &dev_attr_seq_21_event.attr,
+ &dev_attr_seq_23_event.attr,
+ &dev_attr_seq_31_event.attr,
+ &dev_attr_seq_32_event.attr,
+ &dev_attr_seq_13_event.attr,
+ &dev_attr_seq_curr_state.attr,
+ &dev_attr_ctxid_idx.attr,
+ &dev_attr_ctxid_val.attr,
+ &dev_attr_ctxid_mask.attr,
+ &dev_attr_sync_freq.attr,
+ &dev_attr_timestamp_event.attr,
+ NULL,
+};
+
+static struct attribute_group etm_attr_grp = {
+ .attrs = etm_attrs,
+};
+
+static int __devinit etm_sysfs_init(void)
+{
+ int ret;
+
+ drvdata->kobj = kobject_create_and_add("etm", qdss_get_modulekobj());
+ if (!drvdata->kobj) {
+ dev_err(drvdata->dev, "failed to create ETM sysfs kobject\n");
+ ret = -ENOMEM;
+ goto err_create;
+ }
+
+ ret = sysfs_create_file(drvdata->kobj, &dev_attr_enabled.attr);
+ if (ret) {
+ dev_err(drvdata->dev, "failed to create ETM sysfs enabled"
+ " attribute\n");
+ goto err_file;
+ }
+
+ if (sysfs_create_group(drvdata->kobj, &etm_attr_grp))
+ dev_err(drvdata->dev, "failed to create ETM sysfs group\n");
+
+ return 0;
+err_file:
+ kobject_put(drvdata->kobj);
+err_create:
+ return ret;
+}
+
+static void __devexit etm_sysfs_exit(void)
+{
+ sysfs_remove_group(drvdata->kobj, &etm_attr_grp);
+ sysfs_remove_file(drvdata->kobj, &dev_attr_enabled.attr);
+ kobject_put(drvdata->kobj);
+}
+
+static bool __devinit etm_arch_supported(uint8_t arch)
+{
+ switch (arch) {
+ case PFT_ARCH_V1_1:
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+static int __devinit etm_init_arch_data(void)
+{
+ int ret;
+ /* use cpu 0 for setup */
+ int cpu = 0;
+ uint32_t etmidr;
+ uint32_t etmccr;
+
+ /* Unlock OS lock first to allow memory mapped reads and writes */
+ etm_os_unlock(NULL);
+ smp_call_function(etm_os_unlock, NULL, 1);
+ ETM_UNLOCK(cpu);
+ /* Vote for ETM power/clock enable */
+ etm_clr_pwrdwn(cpu);
+ /* Set prog bit. It will be set from reset but this is included to
+ * ensure it is set
+ */
+ etm_set_prog(cpu);
+
+ /* find all capabilities */
+ etmidr = etm_readl(drvdata, cpu, ETMIDR);
+ drvdata->arch = BMVAL(etmidr, 4, 11);
+ if (etm_arch_supported(drvdata->arch) == false) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ etmccr = etm_readl(drvdata, cpu, ETMCCR);
+ drvdata->nr_addr_cmp = BMVAL(etmccr, 0, 3) * 2;
+ drvdata->nr_cntr = BMVAL(etmccr, 13, 15);
+ drvdata->nr_ext_inp = BMVAL(etmccr, 17, 19);
+ drvdata->nr_ext_out = BMVAL(etmccr, 20, 22);
+ drvdata->nr_ctxid_cmp = BMVAL(etmccr, 24, 25);
+
+ /* Vote for ETM power/clock disable */
+ etm_set_pwrdwn(cpu);
+ ETM_LOCK(cpu);
+
+ return 0;
+err:
+ return ret;
+}
+
+static void __devinit etm_init_default_data(void)
+{
+ int i;
+
+ drvdata->trigger_event = 0x406F;
+ drvdata->enable_event = 0x6F;
+ drvdata->enable_ctrl1 = 0x1;
+ drvdata->fifofull_level = 0x28;
+ if (drvdata->nr_addr_cmp >= 2) {
+ drvdata->addr_val[0] = (uint32_t) _stext;
+ drvdata->addr_val[1] = (uint32_t) _etext;
+ drvdata->addr_type[0] = ETM_ADDR_TYPE_RANGE;
+ drvdata->addr_type[1] = ETM_ADDR_TYPE_RANGE;
+ }
+ for (i = 0; i < drvdata->nr_cntr; i++) {
+ drvdata->cntr_event[i] = 0x406F;
+ drvdata->cntr_rld_event[i] = 0x406F;
+ }
+ drvdata->seq_12_event = 0x406F;
+ drvdata->seq_21_event = 0x406F;
+ drvdata->seq_23_event = 0x406F;
+ drvdata->seq_31_event = 0x406F;
+ drvdata->seq_32_event = 0x406F;
+ drvdata->seq_13_event = 0x406F;
+ drvdata->sync_freq = 0x80;
+ drvdata->timestamp_event = 0x406F;
+
+ /* Overrides for Krait pass1 */
+ if (cpu_is_krait_v1()) {
+ /* Krait pass1 doesn't support include filtering and non-cycle
+ * accurate tracing
+ */
+ drvdata->mode = (ETM_MODE_EXCLUDE | ETM_MODE_CYCACC);
+ drvdata->ctrl = 0x1000;
+ drvdata->enable_ctrl1 = 0x1000000;
+ for (i = 0; i < drvdata->nr_addr_cmp; i++) {
+ drvdata->addr_val[i] = 0x0;
+ drvdata->addr_acctype[i] = 0x0;
+ drvdata->addr_type[i] = ETM_ADDR_TYPE_NONE;
+ }
+ }
+}
+
+static int __devinit etm_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct resource *res;
+
+ drvdata = kzalloc(sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata) {
+ ret = -ENOMEM;
+ goto err_kzalloc_drvdata;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ ret = -EINVAL;
+ goto err_res;
+ }
+
+ drvdata->base = ioremap_nocache(res->start, resource_size(res));
+ if (!drvdata->base) {
+ ret = -EINVAL;
+ goto err_ioremap;
+ }
+
+ drvdata->dev = &pdev->dev;
+
+ mutex_init(&drvdata->mutex);
+ wake_lock_init(&drvdata->wake_lock, WAKE_LOCK_SUSPEND, "msm_etm");
+ pm_qos_add_request(&drvdata->qos_req, PM_QOS_CPU_DMA_LATENCY,
+ PM_QOS_DEFAULT_VALUE);
+ drvdata->src = qdss_get("msm_etm");
+ if (IS_ERR(drvdata->src)) {
+ ret = PTR_ERR(drvdata->src);
+ goto err_qdssget;
+ }
+
+ drvdata->clk = clk_get(drvdata->dev, "core_clk");
+ if (IS_ERR(drvdata->clk)) {
+ ret = PTR_ERR(drvdata->clk);
+ goto err_clk_get;
+ }
+
+ ret = clk_set_rate(drvdata->clk, CORESIGHT_CLK_RATE_TRACE);
+ if (ret)
+ goto err_clk_rate;
+
+ ret = clk_prepare_enable(drvdata->clk);
+ if (ret)
+ goto err_clk_enable;
+
+ ret = etm_init_arch_data();
+ if (ret)
+ goto err_arch;
+
+ etm_init_default_data();
+
+ ret = etm_sysfs_init();
+ if (ret)
+ goto err_sysfs;
+
+ drvdata->enabled = false;
+
+ clk_disable_unprepare(drvdata->clk);
+
+ dev_info(drvdata->dev, "ETM initialized\n");
+
+ if (etm_boot_enable)
+ etm_enable();
+
+ return 0;
+
+err_sysfs:
+err_arch:
+ clk_disable_unprepare(drvdata->clk);
+err_clk_enable:
+err_clk_rate:
+ clk_put(drvdata->clk);
+err_clk_get:
+ qdss_put(drvdata->src);
+err_qdssget:
+ pm_qos_remove_request(&drvdata->qos_req);
+ wake_lock_destroy(&drvdata->wake_lock);
+ mutex_destroy(&drvdata->mutex);
+ iounmap(drvdata->base);
+err_ioremap:
+err_res:
+ kfree(drvdata);
+err_kzalloc_drvdata:
+ dev_err(drvdata->dev, "ETM init failed\n");
+ return ret;
+}
+
+static int __devexit etm_remove(struct platform_device *pdev)
+{
+ if (drvdata->enabled)
+ etm_disable();
+ etm_sysfs_exit();
+ clk_put(drvdata->clk);
+ qdss_put(drvdata->src);
+ pm_qos_remove_request(&drvdata->qos_req);
+ wake_lock_destroy(&drvdata->wake_lock);
+ mutex_destroy(&drvdata->mutex);
+ iounmap(drvdata->base);
+ kfree(drvdata);
+
+ return 0;
+}
+
+static struct of_device_id etm_match[] = {
+ {.compatible = "qcom,msm-etm"},
+ {}
+};
+
+static struct platform_driver etm_driver = {
+ .probe = etm_probe,
+ .remove = __devexit_p(etm_remove),
+ .driver = {
+ .name = "msm_etm",
+ .owner = THIS_MODULE,
+ .of_match_table = etm_match,
+ },
+};
+
+int __init etm_init(void)
+{
+ return platform_driver_register(&etm_driver);
+}
+module_init(etm_init);
+
+void __exit etm_exit(void)
+{
+ platform_driver_unregister(&etm_driver);
+}
+module_exit(etm_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("CoreSight Program Flow Trace driver");
diff --git a/drivers/coresight/coresight-funnel.c b/drivers/coresight/coresight-funnel.c
new file mode 100644
index 0000000..79a27f4
--- /dev/null
+++ b/drivers/coresight/coresight-funnel.c
@@ -0,0 +1,275 @@
+/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/coresight.h>
+
+#include "coresight-priv.h"
+
+#define funnel_writel(drvdata, id, val, off) \
+ __raw_writel((val), drvdata->base + (SZ_4K * id) + off)
+#define funnel_readl(drvdata, id, off) \
+ __raw_readl(drvdata->base + (SZ_4K * id) + off)
+
+#define FUNNEL_FUNCTL (0x000)
+#define FUNNEL_PRICTL (0x004)
+#define FUNNEL_ITATBDATA0 (0xEEC)
+#define FUNNEL_ITATBCTR2 (0xEF0)
+#define FUNNEL_ITATBCTR1 (0xEF4)
+#define FUNNEL_ITATBCTR0 (0xEF8)
+
+
+#define FUNNEL_LOCK(id) \
+do { \
+ mb(); \
+ funnel_writel(drvdata, id, 0x0, CORESIGHT_LAR); \
+} while (0)
+#define FUNNEL_UNLOCK(id) \
+do { \
+ funnel_writel(drvdata, id, CORESIGHT_UNLOCK, CORESIGHT_LAR); \
+ mb(); \
+} while (0)
+
+#define FUNNEL_HOLDTIME_MASK (0xF00)
+#define FUNNEL_HOLDTIME_SHFT (0x8)
+#define FUNNEL_HOLDTIME (0x7 << FUNNEL_HOLDTIME_SHFT)
+
+struct funnel_drvdata {
+ void __iomem *base;
+ bool enabled;
+ struct mutex mutex;
+ struct device *dev;
+ struct kobject *kobj;
+ struct clk *clk;
+ uint32_t priority;
+};
+
+static struct funnel_drvdata *drvdata;
+
+static void __funnel_enable(uint8_t id, uint32_t port_mask)
+{
+ uint32_t functl;
+
+ FUNNEL_UNLOCK(id);
+
+ functl = funnel_readl(drvdata, id, FUNNEL_FUNCTL);
+ functl &= ~FUNNEL_HOLDTIME_MASK;
+ functl |= FUNNEL_HOLDTIME;
+ functl |= port_mask;
+ funnel_writel(drvdata, id, functl, FUNNEL_FUNCTL);
+ funnel_writel(drvdata, id, drvdata->priority, FUNNEL_PRICTL);
+
+ FUNNEL_LOCK(id);
+}
+
+int funnel_enable(uint8_t id, uint32_t port_mask)
+{
+ int ret;
+
+ ret = clk_prepare_enable(drvdata->clk);
+ if (ret)
+ return ret;
+
+ mutex_lock(&drvdata->mutex);
+ __funnel_enable(id, port_mask);
+ drvdata->enabled = true;
+ dev_info(drvdata->dev, "FUNNEL port mask 0x%lx enabled\n",
+ (unsigned long) port_mask);
+ mutex_unlock(&drvdata->mutex);
+
+ return 0;
+}
+
+static void __funnel_disable(uint8_t id, uint32_t port_mask)
+{
+ uint32_t functl;
+
+ FUNNEL_UNLOCK(id);
+
+ functl = funnel_readl(drvdata, id, FUNNEL_FUNCTL);
+ functl &= ~port_mask;
+ funnel_writel(drvdata, id, functl, FUNNEL_FUNCTL);
+
+ FUNNEL_LOCK(id);
+}
+
+void funnel_disable(uint8_t id, uint32_t port_mask)
+{
+ mutex_lock(&drvdata->mutex);
+ __funnel_disable(id, port_mask);
+ drvdata->enabled = false;
+ dev_info(drvdata->dev, "FUNNEL port mask 0x%lx disabled\n",
+ (unsigned long) port_mask);
+ mutex_unlock(&drvdata->mutex);
+
+ clk_disable_unprepare(drvdata->clk);
+}
+
+static ssize_t funnel_show_priority(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val = drvdata->priority;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t funnel_store_priority(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+
+ drvdata->priority = val;
+ return size;
+}
+static DEVICE_ATTR(priority, S_IRUGO | S_IWUSR, funnel_show_priority,
+ funnel_store_priority);
+
+static int __devinit funnel_sysfs_init(void)
+{
+ int ret;
+
+ drvdata->kobj = kobject_create_and_add("funnel", qdss_get_modulekobj());
+ if (!drvdata->kobj) {
+ dev_err(drvdata->dev, "failed to create FUNNEL sysfs kobject\n");
+ ret = -ENOMEM;
+ goto err_create;
+ }
+
+ ret = sysfs_create_file(drvdata->kobj, &dev_attr_priority.attr);
+ if (ret) {
+ dev_err(drvdata->dev, "failed to create FUNNEL sysfs priority"
+ " attribute\n");
+ goto err_file;
+ }
+
+ return 0;
+err_file:
+ kobject_put(drvdata->kobj);
+err_create:
+ return ret;
+}
+
+static void __devexit funnel_sysfs_exit(void)
+{
+ sysfs_remove_file(drvdata->kobj, &dev_attr_priority.attr);
+ kobject_put(drvdata->kobj);
+}
+
+static int __devinit funnel_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct resource *res;
+
+ drvdata = kzalloc(sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata) {
+ ret = -ENOMEM;
+ goto err_kzalloc_drvdata;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ ret = -EINVAL;
+ goto err_res;
+ }
+
+ drvdata->base = ioremap_nocache(res->start, resource_size(res));
+ if (!drvdata->base) {
+ ret = -EINVAL;
+ goto err_ioremap;
+ }
+
+ drvdata->dev = &pdev->dev;
+
+ mutex_init(&drvdata->mutex);
+
+ drvdata->clk = clk_get(drvdata->dev, "core_clk");
+ if (IS_ERR(drvdata->clk)) {
+ ret = PTR_ERR(drvdata->clk);
+ goto err_clk_get;
+ }
+
+ ret = clk_set_rate(drvdata->clk, CORESIGHT_CLK_RATE_TRACE);
+ if (ret)
+ goto err_clk_rate;
+
+ funnel_sysfs_init();
+
+ dev_info(drvdata->dev, "FUNNEL initialized\n");
+ return 0;
+
+err_clk_rate:
+ clk_put(drvdata->clk);
+err_clk_get:
+ mutex_destroy(&drvdata->mutex);
+ iounmap(drvdata->base);
+err_ioremap:
+err_res:
+ kfree(drvdata);
+err_kzalloc_drvdata:
+ dev_err(drvdata->dev, "FUNNEL init failed\n");
+ return ret;
+}
+
+static int __devexit funnel_remove(struct platform_device *pdev)
+{
+ if (drvdata->enabled)
+ funnel_disable(0x0, 0xFF);
+ funnel_sysfs_exit();
+ clk_put(drvdata->clk);
+ mutex_destroy(&drvdata->mutex);
+ iounmap(drvdata->base);
+ kfree(drvdata);
+
+ return 0;
+}
+
+static struct of_device_id funnel_match[] = {
+ {.compatible = "qcom,msm-funnel"},
+ {}
+};
+
+static struct platform_driver funnel_driver = {
+ .probe = funnel_probe,
+ .remove = __devexit_p(funnel_remove),
+ .driver = {
+ .name = "msm_funnel",
+ .owner = THIS_MODULE,
+ .of_match_table = funnel_match,
+ },
+};
+
+static int __init funnel_init(void)
+{
+ return platform_driver_register(&funnel_driver);
+}
+module_init(funnel_init);
+
+static void __exit funnel_exit(void)
+{
+ platform_driver_unregister(&funnel_driver);
+}
+module_exit(funnel_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("CoreSight Funnel driver");
diff --git a/drivers/coresight/coresight-priv.h b/drivers/coresight/coresight-priv.h
new file mode 100644
index 0000000..dab854c
--- /dev/null
+++ b/drivers/coresight/coresight-priv.h
@@ -0,0 +1,48 @@
+/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CORESIGHT_PRIV_H
+#define _CORESIGHT_PRIV_H
+
+#include <linux/bitops.h>
+
+/* Coresight management registers (0xF00-0xFCC)
+ * 0xFA0 - 0xFA4: Management registers in PFTv1.0
+ * Trace registers in PFTv1.1
+ */
+#define CORESIGHT_ITCTRL (0xF00)
+#define CORESIGHT_CLAIMSET (0xFA0)
+#define CORESIGHT_CLAIMCLR (0xFA4)
+#define CORESIGHT_LAR (0xFB0)
+#define CORESIGHT_LSR (0xFB4)
+#define CORESIGHT_AUTHSTATUS (0xFB8)
+#define CORESIGHT_DEVID (0xFC8)
+#define CORESIGHT_DEVTYPE (0xFCC)
+
+#define CORESIGHT_UNLOCK (0xC5ACCE55)
+
+#define TIMEOUT_US (100)
+
+#define BM(lsb, msb) ((BIT(msb) - BIT(lsb)) + BIT(msb))
+#define BMVAL(val, lsb, msb) ((val & BM(lsb, msb)) >> lsb)
+#define BVAL(val, n) ((val & BIT(n)) >> n)
+
+int etb_enable(void);
+void etb_disable(void);
+void etb_dump(void);
+void tpiu_disable(void);
+int funnel_enable(uint8_t id, uint32_t port_mask);
+void funnel_disable(uint8_t id, uint32_t port_mask);
+
+struct kobject *qdss_get_modulekobj(void);
+
+#endif
diff --git a/arch/arm/mach-msm/qdss-stm.c b/drivers/coresight/coresight-stm.c
similarity index 66%
rename from arch/arm/mach-msm/qdss-stm.c
rename to drivers/coresight/coresight-stm.c
index 9ce6318..6387947 100644
--- a/arch/arm/mach-msm/qdss-stm.c
+++ b/drivers/coresight/coresight-stm.c
@@ -22,15 +22,17 @@
#include <linux/miscdevice.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/coresight.h>
+#include <linux/coresight-stm.h>
#include <asm/unaligned.h>
-#include <mach/stm.h>
-#include "qdss-priv.h"
+#include "coresight-priv.h"
-#define stm_writel(stm, val, off) \
- __raw_writel((val), stm.base + off)
-#define stm_readl(stm, val, off) \
- __raw_readl(stm.base + off)
+#define stm_writel(drvdata, val, off) \
+ __raw_writel((val), drvdata->base + off)
+#define stm_readl(drvdata, val, off) \
+ __raw_readl(drvdata->base + off)
#define NR_STM_CHANNEL (32)
#define BYTES_PER_CHANNEL (256)
@@ -51,17 +53,17 @@
#define OST_VERSION (0x1)
#define stm_channel_addr(ch) \
- (stm.chs.base + (ch * BYTES_PER_CHANNEL))
+ (drvdata->chs.base + (ch * BYTES_PER_CHANNEL))
#define stm_channel_off(type, opts) (type & ~opts)
#define STM_LOCK() \
do { \
mb(); \
- stm_writel(stm, 0x0, CS_LAR); \
+ stm_writel(drvdata, 0x0, CORESIGHT_LAR); \
} while (0)
#define STM_UNLOCK() \
do { \
- stm_writel(stm, CS_UNLOCK_MAGIC, CS_LAR); \
+ stm_writel(drvdata, CORESIGHT_UNLOCK, CORESIGHT_LAR); \
mb(); \
} while (0)
@@ -91,29 +93,27 @@
unsigned long *bitmap;
};
-struct stm_ctx {
+struct stm_drvdata {
void __iomem *base;
bool enabled;
struct qdss_source *src;
struct device *dev;
struct kobject *kobj;
+ struct clk *clk;
uint32_t entity;
struct channel_space chs;
};
-static struct stm_ctx stm = {
- .entity = OST_ENTITY_ALL,
-};
-
+static struct stm_drvdata *drvdata;
static void __stm_enable(void)
{
STM_UNLOCK();
- stm_writel(stm, 0x80, STMSYNCR);
- stm_writel(stm, 0xFFFFFFFF, STMSPTER);
- stm_writel(stm, 0xFFFFFFFF, STMSPER);
- stm_writel(stm, 0x30003, STMTCSR);
+ stm_writel(drvdata, 0x80, STMSYNCR);
+ stm_writel(drvdata, 0xFFFFFFFF, STMSPTER);
+ stm_writel(drvdata, 0xFFFFFFFF, STMSPER);
+ stm_writel(drvdata, 0x30003, STMTCSR);
STM_LOCK();
}
@@ -122,23 +122,30 @@
{
int ret;
- if (stm.enabled) {
- dev_err(stm.dev, "STM tracing already enabled\n");
+ if (drvdata->enabled) {
+ dev_err(drvdata->dev, "STM tracing already enabled\n");
ret = -EINVAL;
goto err;
}
- ret = qdss_enable(stm.src);
+ ret = clk_prepare_enable(drvdata->clk);
if (ret)
- goto err;
+ goto err_clk;
+
+ ret = qdss_enable(drvdata->src);
+ if (ret)
+ goto err_qdss;
__stm_enable();
- stm.enabled = true;
+ drvdata->enabled = true;
- dev_info(stm.dev, "STM tracing enabled\n");
+ dev_info(drvdata->dev, "STM tracing enabled\n");
return 0;
+err_qdss:
+ clk_disable_unprepare(drvdata->clk);
+err_clk:
err:
return ret;
}
@@ -147,9 +154,9 @@
{
STM_UNLOCK();
- stm_writel(stm, 0x30000, STMTCSR);
- stm_writel(stm, 0x0, STMSPER);
- stm_writel(stm, 0x0, STMSPTER);
+ stm_writel(drvdata, 0x30000, STMTCSR);
+ stm_writel(drvdata, 0x0, STMSPER);
+ stm_writel(drvdata, 0x0, STMSPTER);
STM_LOCK();
}
@@ -158,19 +165,21 @@
{
int ret;
- if (!stm.enabled) {
- dev_err(stm.dev, "STM tracing already disabled\n");
+ if (!drvdata->enabled) {
+ dev_err(drvdata->dev, "STM tracing already disabled\n");
ret = -EINVAL;
goto err;
}
__stm_disable();
- qdss_disable(stm.src);
+ drvdata->enabled = false;
- stm.enabled = false;
+ qdss_disable(drvdata->src);
- dev_info(stm.dev, "STM tracing disabled\n");
+ clk_disable_unprepare(drvdata->clk);
+
+ dev_info(drvdata->dev, "STM tracing disabled\n");
return 0;
err:
@@ -182,15 +191,17 @@
uint32_t ch;
do {
- ch = find_next_zero_bit(stm.chs.bitmap, NR_STM_CHANNEL, off);
- } while ((ch < NR_STM_CHANNEL) && test_and_set_bit(ch, stm.chs.bitmap));
+ ch = find_next_zero_bit(drvdata->chs.bitmap,
+ NR_STM_CHANNEL, off);
+ } while ((ch < NR_STM_CHANNEL) &&
+ test_and_set_bit(ch, drvdata->chs.bitmap));
return ch;
}
static void stm_channel_free(uint32_t ch)
{
- clear_bit(ch, stm.chs.bitmap);
+ clear_bit(ch, drvdata->chs.bitmap);
}
static int stm_send(void *addr, const void *data, uint32_t size)
@@ -331,10 +342,10 @@
* number of bytes transfered over STM
*/
int stm_trace(uint32_t options, uint8_t entity_id, uint8_t proto_id,
- const void *data, uint32_t size)
+ const void *data, uint32_t size)
{
/* we don't support sizes more than 24bits (0 to 23) */
- if (!(stm.enabled && (stm.entity & entity_id) &&
+ if (!(drvdata->enabled && (drvdata->entity & entity_id) &&
(size < 0x1000000)))
return 0;
@@ -347,10 +358,10 @@
{
char *buf;
- if (!stm.enabled)
+ if (!drvdata->enabled)
return -EINVAL;
- if (!(stm.entity & OST_ENTITY_DEV_NODE))
+ if (!(drvdata->entity & OST_ENTITY_DEV_NODE))
return size;
if (size > STM_TRACE_BUF_SIZE)
@@ -362,7 +373,7 @@
if (copy_from_user(buf, data, size)) {
kfree(buf);
- dev_dbg(stm.dev, "%s: copy_from_user failed\n", __func__);
+ dev_dbg(drvdata->dev, "%s: copy_from_user failed\n", __func__);
return -EFAULT;
}
@@ -385,13 +396,16 @@
.fops = &stm_fops,
};
-#define STM_ATTR(__name) \
-static struct kobj_attribute __name##_attr = \
- __ATTR(__name, S_IRUGO | S_IWUSR, __name##_show, __name##_store)
+static ssize_t stm_show_enabled(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val = drvdata->enabled;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
-static ssize_t enabled_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t n)
+static ssize_t stm_store_enabled(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
{
int ret = 0;
unsigned long val;
@@ -406,70 +420,65 @@
if (ret)
return ret;
- return n;
+ return size;
}
-static ssize_t enabled_show(struct kobject *kobj,
- struct kobj_attribute *attr,
- char *buf)
+static DEVICE_ATTR(enabled, S_IRUGO | S_IWUSR, stm_show_enabled,
+ stm_store_enabled);
+
+static ssize_t stm_show_entity(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- unsigned long val = stm.enabled;
+ unsigned long val = drvdata->entity;
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
-STM_ATTR(enabled);
-static ssize_t entity_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t n)
+static ssize_t stm_store_entity(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
{
unsigned long val;
if (sscanf(buf, "%lx", &val) != 1)
return -EINVAL;
- stm.entity = val;
- return n;
+ drvdata->entity = val;
+ return size;
}
-static ssize_t entity_show(struct kobject *kobj,
- struct kobj_attribute *attr,
- char *buf)
-{
- unsigned long val = stm.entity;
- return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-STM_ATTR(entity);
+static DEVICE_ATTR(entity, S_IRUGO | S_IWUSR, stm_show_entity,
+ stm_store_entity);
static int __devinit stm_sysfs_init(void)
{
int ret;
- stm.kobj = kobject_create_and_add("stm", qdss_get_modulekobj());
- if (!stm.kobj) {
- dev_err(stm.dev, "failed to create STM sysfs kobject\n");
+ drvdata->kobj = kobject_create_and_add("stm", qdss_get_modulekobj());
+ if (!drvdata->kobj) {
+ dev_err(drvdata->dev, "failed to create STM sysfs kobject\n");
ret = -ENOMEM;
goto err_create;
}
- ret = sysfs_create_file(stm.kobj, &enabled_attr.attr);
+ ret = sysfs_create_file(drvdata->kobj, &dev_attr_enabled.attr);
if (ret) {
- dev_err(stm.dev, "failed to create STM sysfs enabled attr\n");
+ dev_err(drvdata->dev, "failed to create STM sysfs enabled attr\n");
goto err_file;
}
- if (sysfs_create_file(stm.kobj, &entity_attr.attr))
- dev_err(stm.dev, "failed to create STM sysfs entity attr\n");
+ if (sysfs_create_file(drvdata->kobj, &dev_attr_entity.attr))
+ dev_err(drvdata->dev, "failed to create STM sysfs entity attr\n");
return 0;
err_file:
- kobject_put(stm.kobj);
+ kobject_put(drvdata->kobj);
err_create:
return ret;
}
static void __devexit stm_sysfs_exit(void)
{
- sysfs_remove_file(stm.kobj, &entity_attr.attr);
- sysfs_remove_file(stm.kobj, &enabled_attr.attr);
- kobject_put(stm.kobj);
+ sysfs_remove_file(drvdata->kobj, &dev_attr_entity.attr);
+ sysfs_remove_file(drvdata->kobj, &dev_attr_enabled.attr);
+ kobject_put(drvdata->kobj);
}
static int __devinit stm_probe(struct platform_device *pdev)
@@ -478,14 +487,20 @@
struct resource *res;
size_t res_size, bitmap_size;
+ drvdata = kzalloc(sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata) {
+ ret = -ENOMEM;
+ goto err_kzalloc_drvdata;
+ }
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
ret = -EINVAL;
goto err_res0;
}
- stm.base = ioremap_nocache(res->start, resource_size(res));
- if (!stm.base) {
+ drvdata->base = ioremap_nocache(res->start, resource_size(res));
+ if (!drvdata->base) {
ret = -EINVAL;
goto err_ioremap0;
}
@@ -506,30 +521,42 @@
bitmap_size = NR_STM_CHANNEL * sizeof(long);
}
- stm.chs.bitmap = kzalloc(bitmap_size, GFP_KERNEL);
- if (!stm.chs.bitmap) {
+ drvdata->chs.bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+ if (!drvdata->chs.bitmap) {
ret = -ENOMEM;
goto err_bitmap;
}
- stm.chs.base = ioremap_nocache(res->start, res_size);
- if (!stm.chs.base) {
+ drvdata->chs.base = ioremap_nocache(res->start, res_size);
+ if (!drvdata->chs.base) {
ret = -EINVAL;
goto err_ioremap1;
}
- stm.dev = &pdev->dev;
+ drvdata->dev = &pdev->dev;
ret = misc_register(&stm_misc);
if (ret)
goto err_misc;
- stm.src = qdss_get("msm_stm");
- if (IS_ERR(stm.src)) {
- ret = PTR_ERR(stm.src);
+ drvdata->src = qdss_get("msm_stm");
+ if (IS_ERR(drvdata->src)) {
+ ret = PTR_ERR(drvdata->src);
goto err_qdssget;
}
+ drvdata->clk = clk_get(drvdata->dev, "core_clk");
+ if (IS_ERR(drvdata->clk)) {
+ ret = PTR_ERR(drvdata->clk);
+ goto err_clk_get;
+ }
+
+ ret = clk_set_rate(drvdata->clk, CORESIGHT_CLK_RATE_TRACE);
+ if (ret)
+ goto err_clk_rate;
+
+ drvdata->entity = OST_ENTITY_ALL;
+
ret = stm_sysfs_init();
if (ret)
goto err_sysfs;
@@ -537,45 +564,60 @@
if (stm_boot_enable)
stm_enable();
- dev_info(stm.dev, "STM initialized\n");
+ dev_info(drvdata->dev, "STM initialized\n");
return 0;
err_sysfs:
- qdss_put(stm.src);
+err_clk_rate:
+ clk_put(drvdata->clk);
+err_clk_get:
+ qdss_put(drvdata->src);
err_qdssget:
misc_deregister(&stm_misc);
err_misc:
- iounmap(stm.chs.base);
+ iounmap(drvdata->chs.base);
err_ioremap1:
- kfree(stm.chs.bitmap);
+ kfree(drvdata->chs.bitmap);
err_bitmap:
err_res1:
- iounmap(stm.base);
+ iounmap(drvdata->base);
err_ioremap0:
err_res0:
- dev_err(stm.dev, "STM init failed\n");
+ kfree(drvdata);
+err_kzalloc_drvdata:
+
+ dev_err(drvdata->dev, "STM init failed\n");
return ret;
}
static int __devexit stm_remove(struct platform_device *pdev)
{
- if (stm.enabled)
+ if (drvdata->enabled)
stm_disable();
stm_sysfs_exit();
- qdss_put(stm.src);
+ clk_put(drvdata->clk);
+ qdss_put(drvdata->src);
misc_deregister(&stm_misc);
- iounmap(stm.chs.base);
- kfree(stm.chs.bitmap);
- iounmap(stm.base);
+ iounmap(drvdata->chs.base);
+ kfree(drvdata->chs.bitmap);
+ iounmap(drvdata->base);
+ kfree(drvdata);
return 0;
}
+static struct of_device_id stm_match[] = {
+ {.compatible = "qcom,msm-stm"},
+ {}
+};
+
static struct platform_driver stm_driver = {
.probe = stm_probe,
.remove = __devexit_p(stm_remove),
.driver = {
.name = "msm_stm",
+ .owner = THIS_MODULE,
+ .of_match_table = stm_match,
},
};
diff --git a/arch/arm/mach-msm/qdss-tpiu.c b/drivers/coresight/coresight-tpiu.c
similarity index 63%
rename from arch/arm/mach-msm/qdss-tpiu.c
rename to drivers/coresight/coresight-tpiu.c
index fa15635..4b52c4d 100644
--- a/arch/arm/mach-msm/qdss-tpiu.c
+++ b/drivers/coresight/coresight-tpiu.c
@@ -17,11 +17,14 @@
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/coresight.h>
-#include "qdss-priv.h"
+#include "coresight-priv.h"
-#define tpiu_writel(tpiu, val, off) __raw_writel((val), tpiu.base + off)
-#define tpiu_readl(tpiu, off) __raw_readl(tpiu.base + off)
+#define tpiu_writel(drvdata, val, off) __raw_writel((val), drvdata->base + off)
+#define tpiu_readl(drvdata, off) __raw_readl(drvdata->base + off)
#define TPIU_SUPP_PORTSZ (0x000)
#define TPIU_CURR_PORTSZ (0x004)
@@ -47,28 +50,29 @@
#define TPIU_LOCK() \
do { \
mb(); \
- tpiu_writel(tpiu, 0x0, CS_LAR); \
+ tpiu_writel(drvdata, 0x0, CORESIGHT_LAR); \
} while (0)
#define TPIU_UNLOCK() \
do { \
- tpiu_writel(tpiu, CS_UNLOCK_MAGIC, CS_LAR); \
+ tpiu_writel(drvdata, CORESIGHT_UNLOCK, CORESIGHT_LAR); \
mb(); \
} while (0)
-struct tpiu_ctx {
+struct tpiu_drvdata {
void __iomem *base;
bool enabled;
struct device *dev;
+ struct clk *clk;
};
-static struct tpiu_ctx tpiu;
+static struct tpiu_drvdata *drvdata;
static void __tpiu_disable(void)
{
TPIU_UNLOCK();
- tpiu_writel(tpiu, 0x3000, TPIU_FFCR);
- tpiu_writel(tpiu, 0x3040, TPIU_FFCR);
+ tpiu_writel(drvdata, 0x3000, TPIU_FFCR);
+ tpiu_writel(drvdata, 0x3040, TPIU_FFCR);
TPIU_LOCK();
}
@@ -76,8 +80,8 @@
void tpiu_disable(void)
{
__tpiu_disable();
- tpiu.enabled = false;
- dev_info(tpiu.dev, "TPIU disabled\n");
+ drvdata->enabled = false;
+ dev_info(drvdata->dev, "TPIU disabled\n");
}
static int __devinit tpiu_probe(struct platform_device *pdev)
@@ -85,43 +89,74 @@
int ret;
struct resource *res;
+ drvdata = kzalloc(sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata) {
+ ret = -ENOMEM;
+ goto err_kzalloc_drvdata;
+ }
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
ret = -EINVAL;
goto err_res;
}
- tpiu.base = ioremap_nocache(res->start, resource_size(res));
- if (!tpiu.base) {
+ drvdata->base = ioremap_nocache(res->start, resource_size(res));
+ if (!drvdata->base) {
ret = -EINVAL;
goto err_ioremap;
}
- tpiu.dev = &pdev->dev;
+ drvdata->dev = &pdev->dev;
- dev_info(tpiu.dev, "TPIU initialized\n");
+ drvdata->clk = clk_get(drvdata->dev, "core_clk");
+ if (IS_ERR(drvdata->clk)) {
+ ret = PTR_ERR(drvdata->clk);
+ goto err_clk_get;
+ }
+
+ ret = clk_set_rate(drvdata->clk, CORESIGHT_CLK_RATE_TRACE);
+ if (ret)
+ goto err_clk_rate;
+
+ dev_info(drvdata->dev, "TPIU initialized\n");
return 0;
+err_clk_rate:
+ clk_put(drvdata->clk);
+err_clk_get:
+ iounmap(drvdata->base);
err_ioremap:
err_res:
- dev_err(tpiu.dev, "TPIU init failed\n");
+ kfree(drvdata);
+err_kzalloc_drvdata:
+ dev_err(drvdata->dev, "TPIU init failed\n");
return ret;
}
static int __devexit tpiu_remove(struct platform_device *pdev)
{
- if (tpiu.enabled)
+ if (drvdata->enabled)
tpiu_disable();
- iounmap(tpiu.base);
+ clk_put(drvdata->clk);
+ iounmap(drvdata->base);
+ kfree(drvdata);
return 0;
}
+static struct of_device_id tpiu_match[] = {
+ {.compatible = "qcom,msm-tpiu"},
+ {}
+};
+
static struct platform_driver tpiu_driver = {
.probe = tpiu_probe,
.remove = __devexit_p(tpiu_remove),
.driver = {
.name = "msm_tpiu",
+ .owner = THIS_MODULE,
+ .of_match_table = tpiu_match,
},
};
diff --git a/drivers/coresight/coresight.c b/drivers/coresight/coresight.c
new file mode 100644
index 0000000..055ef55
--- /dev/null
+++ b/drivers/coresight/coresight.c
@@ -0,0 +1,606 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/clk.h>
+#include <linux/coresight.h>
+
+#include "coresight-priv.h"
+
+#define MAX_STR_LEN (65535)
+
+
+static LIST_HEAD(coresight_orph_conns);
+static DEFINE_MUTEX(coresight_conns_mutex);
+static LIST_HEAD(coresight_devs);
+static DEFINE_MUTEX(coresight_devs_mutex);
+
+
+int coresight_enable(struct coresight_device *csdev, int port)
+{
+ int i;
+ int ret;
+ struct coresight_connection *conn;
+
+ mutex_lock(&csdev->mutex);
+ if (csdev->refcnt[port] == 0) {
+ for (i = 0; i < csdev->nr_conns; i++) {
+ conn = &csdev->conns[i];
+ ret = coresight_enable(conn->child_dev,
+ conn->child_port);
+ if (ret)
+ goto err_enable_child;
+ }
+ if (csdev->ops->enable)
+ ret = csdev->ops->enable(csdev, port);
+ if (ret)
+ goto err_enable;
+ }
+ csdev->refcnt[port]++;
+ mutex_unlock(&csdev->mutex);
+ return 0;
+err_enable_child:
+ while (i) {
+ conn = &csdev->conns[--i];
+ coresight_disable(conn->child_dev, conn->child_port);
+ }
+err_enable:
+ mutex_unlock(&csdev->mutex);
+ return ret;
+}
+EXPORT_SYMBOL(coresight_enable);
+
+void coresight_disable(struct coresight_device *csdev, int port)
+{
+ int i;
+ struct coresight_connection *conn;
+
+ mutex_lock(&csdev->mutex);
+ if (csdev->refcnt[port] == 1) {
+ if (csdev->ops->disable)
+ csdev->ops->disable(csdev, port);
+ for (i = 0; i < csdev->nr_conns; i++) {
+ conn = &csdev->conns[i];
+ coresight_disable(conn->child_dev, conn->child_port);
+ }
+ }
+ csdev->refcnt[port]--;
+ mutex_unlock(&csdev->mutex);
+}
+EXPORT_SYMBOL(coresight_disable);
+
+static ssize_t coresight_show_type(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%s\n", dev->type->name);
+}
+
+static struct device_attribute coresight_dev_attrs[] = {
+ __ATTR(type, S_IRUGO, coresight_show_type, NULL),
+ { },
+};
+
+struct bus_type coresight_bus_type = {
+ .name = "coresight",
+ .dev_attrs = coresight_dev_attrs,
+};
+
+static ssize_t coresight_show_enable(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct coresight_device *csdev = to_coresight_device(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", (unsigned)csdev->enable);
+}
+
+static ssize_t coresight_store_enable(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret = 0;
+ unsigned long val;
+ struct coresight_device *csdev = to_coresight_device(dev);
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+
+ if (val)
+ ret = coresight_enable(csdev, 0);
+ else
+ coresight_disable(csdev, 0);
+
+ if (ret)
+ return ret;
+ return size;
+}
+static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, coresight_show_enable,
+ coresight_store_enable);
+
+static struct attribute *coresight_attrs[] = {
+ &dev_attr_enable.attr,
+ NULL,
+};
+
+static struct attribute_group coresight_attr_grp = {
+ .attrs = coresight_attrs,
+};
+
+static const struct attribute_group *coresight_attr_grps[] = {
+ &coresight_attr_grp,
+ NULL,
+};
+
+static struct device_type coresight_dev_type[CORESIGHT_DEV_TYPE_MAX] = {
+ {
+ .name = "source",
+ .groups = coresight_attr_grps,
+ },
+ {
+ .name = "link",
+ },
+ {
+ .name = "sink",
+ .groups = coresight_attr_grps,
+ },
+};
+
+static void coresight_device_release(struct device *dev)
+{
+ struct coresight_device *csdev = to_coresight_device(dev);
+ mutex_destroy(&csdev->mutex);
+ kfree(csdev);
+}
+
+static void coresight_fixup_orphan_conns(struct coresight_device *csdev)
+{
+ struct coresight_connection *conn, *temp;
+
+ mutex_lock(&coresight_conns_mutex);
+ list_for_each_entry_safe(conn, temp, &coresight_orph_conns, link) {
+ if (conn->child_id == csdev->id) {
+ conn->child_dev = csdev;
+ list_del(&conn->link);
+ }
+ }
+ mutex_unlock(&coresight_conns_mutex);
+}
+
+static void coresight_fixup_device_conns(struct coresight_device *csdev)
+{
+ int i;
+ struct coresight_device *cd;
+ bool found;
+
+ for (i = 0; i < csdev->nr_conns; i++) {
+ found = false;
+ mutex_lock(&coresight_devs_mutex);
+ list_for_each_entry(cd, &coresight_devs, link) {
+ if (csdev->conns[i].child_id == cd->id) {
+ csdev->conns[i].child_dev = cd;
+ found = true;
+ break;
+ }
+ }
+ mutex_unlock(&coresight_devs_mutex);
+ if (!found) {
+ mutex_lock(&coresight_conns_mutex);
+ list_add_tail(&csdev->conns[i].link,
+ &coresight_orph_conns);
+ mutex_unlock(&coresight_conns_mutex);
+ }
+ }
+}
+
+struct coresight_device *coresight_register(struct coresight_desc *desc)
+{
+ int i;
+ int ret;
+ int *refcnt;
+ struct coresight_device *csdev;
+ struct coresight_connection *conns;
+
+ csdev = kzalloc(sizeof(*csdev), GFP_KERNEL);
+ if (!csdev) {
+ ret = -ENOMEM;
+ goto err_kzalloc_csdev;
+ }
+
+ mutex_init(&csdev->mutex);
+ csdev->id = desc->pdata->id;
+
+ refcnt = kzalloc(sizeof(*refcnt) * desc->pdata->nr_ports, GFP_KERNEL);
+ if (!refcnt) {
+ ret = -ENOMEM;
+ goto err_kzalloc_refcnt;
+ }
+ csdev->refcnt = refcnt;
+
+ csdev->nr_conns = desc->pdata->nr_children;
+ conns = kzalloc(sizeof(*conns) * csdev->nr_conns, GFP_KERNEL);
+ if (!conns) {
+ ret = -ENOMEM;
+ goto err_kzalloc_conns;
+ }
+ for (i = 0; i < csdev->nr_conns; i++) {
+ conns[i].child_id = desc->pdata->child_ids[i];
+ conns[i].child_port = desc->pdata->child_ports[i];
+ }
+ csdev->conns = conns;
+
+ csdev->ops = desc->ops;
+ csdev->owner = desc->owner;
+
+ csdev->dev.type = &coresight_dev_type[desc->type];
+ csdev->dev.groups = desc->groups;
+ csdev->dev.parent = desc->dev;
+ csdev->dev.bus = &coresight_bus_type;
+ csdev->dev.release = coresight_device_release;
+ dev_set_name(&csdev->dev, "%s", desc->pdata->name);
+
+ coresight_fixup_device_conns(csdev);
+ ret = device_register(&csdev->dev);
+ if (ret)
+ goto err_dev_reg;
+ coresight_fixup_orphan_conns(csdev);
+
+ mutex_lock(&coresight_devs_mutex);
+ list_add_tail(&csdev->link, &coresight_devs);
+ mutex_unlock(&coresight_devs_mutex);
+
+ return csdev;
+err_dev_reg:
+ put_device(&csdev->dev);
+ kfree(conns);
+err_kzalloc_conns:
+ kfree(refcnt);
+err_kzalloc_refcnt:
+ mutex_destroy(&csdev->mutex);
+ kfree(csdev);
+err_kzalloc_csdev:
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(coresight_register);
+
+void coresight_unregister(struct coresight_device *csdev)
+{
+ if (IS_ERR_OR_NULL(csdev))
+ return;
+
+ if (get_device(&csdev->dev)) {
+ mutex_lock(&csdev->mutex);
+ device_unregister(&csdev->dev);
+ mutex_unlock(&csdev->mutex);
+ put_device(&csdev->dev);
+ }
+}
+EXPORT_SYMBOL(coresight_unregister);
+
+static int __init coresight_init(void)
+{
+ return bus_register(&coresight_bus_type);
+}
+subsys_initcall(coresight_init);
+
+static void __exit coresight_exit(void)
+{
+ bus_unregister(&coresight_bus_type);
+}
+module_exit(coresight_exit);
+
+MODULE_LICENSE("GPL v2");
+/*
+ * Exclusion rules for structure fields.
+ *
+ * S: qdss.sources_mutex protected.
+ * I: qdss.sink_mutex protected.
+ * C: qdss.clk_mutex protected.
+ */
+struct qdss_ctx {
+ struct kobject *modulekobj;
+ uint8_t afamily;
+ struct list_head sources; /* S: sources list */
+ struct mutex sources_mutex;
+ uint8_t sink_count; /* I: sink count */
+ struct mutex sink_mutex;
+ uint8_t max_clk;
+ struct clk *clk;
+};
+
+static struct qdss_ctx qdss;
+
+/**
+ * qdss_get - get the qdss source handle
+ * @name: name of the qdss source
+ *
+ * Searches the sources list to get the qdss source handle for this source.
+ *
+ * CONTEXT:
+ * Typically called from init or probe functions
+ *
+ * RETURNS:
+ * pointer to struct qdss_source on success, %NULL on failure
+ */
+struct qdss_source *qdss_get(const char *name)
+{
+ struct qdss_source *src, *source = NULL;
+
+ mutex_lock(&qdss.sources_mutex);
+ list_for_each_entry(src, &qdss.sources, link) {
+ if (src->name) {
+ if (strncmp(src->name, name, MAX_STR_LEN))
+ continue;
+ source = src;
+ break;
+ }
+ }
+ mutex_unlock(&qdss.sources_mutex);
+
+ return source ? source : ERR_PTR(-ENOENT);
+}
+EXPORT_SYMBOL(qdss_get);
+
+/**
+ * qdss_put - release the qdss source handle
+ * @name: name of the qdss source
+ *
+ * CONTEXT:
+ * Typically called from driver remove or exit functions
+ */
+void qdss_put(struct qdss_source *src)
+{
+}
+EXPORT_SYMBOL(qdss_put);
+
+/**
+ * qdss_enable - enable qdss for the source
+ * @src: handle for the source making the call
+ *
+ * Enables qdss block (relevant funnel ports and sink) if not already
+ * enabled, otherwise increments the reference count
+ *
+ * CONTEXT:
+ * Might sleep. Uses a mutex lock. Should be called from a non-atomic context.
+ *
+ * RETURNS:
+ * 0 on success, non-zero on failure
+ */
+int qdss_enable(struct qdss_source *src)
+{
+ if (!src)
+ return -EINVAL;
+
+ if (qdss.afamily) {
+ mutex_lock(&qdss.sink_mutex);
+ if (qdss.sink_count == 0) {
+ tpiu_disable();
+ /* enable ETB first to avoid losing any trace data */
+ etb_enable();
+ }
+ qdss.sink_count++;
+ mutex_unlock(&qdss.sink_mutex);
+ }
+
+ funnel_enable(0x0, src->fport_mask);
+ return 0;
+}
+EXPORT_SYMBOL(qdss_enable);
+
+/**
+ * qdss_disable - disable qdss for the source
+ * @src: handle for the source making the call
+ *
+ * Disables qdss block (relevant funnel ports and sink) if the reference count
+ * is one, otherwise decrements the reference count
+ *
+ * CONTEXT:
+ * Might sleep. Uses a mutex lock. Should be called from a non-atomic context.
+ */
+void qdss_disable(struct qdss_source *src)
+{
+ if (!src)
+ return;
+
+ if (qdss.afamily) {
+ mutex_lock(&qdss.sink_mutex);
+ if (WARN(qdss.sink_count == 0, "qdss is unbalanced\n"))
+ goto out;
+ if (qdss.sink_count == 1) {
+ etb_dump();
+ etb_disable();
+ }
+ qdss.sink_count--;
+ mutex_unlock(&qdss.sink_mutex);
+ }
+
+ funnel_disable(0x0, src->fport_mask);
+ return;
+out:
+ mutex_unlock(&qdss.sink_mutex);
+}
+EXPORT_SYMBOL(qdss_disable);
+
+/**
+ * qdss_disable_sink - force disable the current qdss sink(s)
+ *
+ * Force disable the current qdss sink(s) to stop the sink from accepting any
+ * trace generated subsequent to this call. This function should only be used
+ * as a way to stop the sink from getting polluted with trace data that is
+ * uninteresting after an event of interest has occured.
+ *
+ * CONTEXT:
+ * Can be called from atomic or non-atomic context.
+ */
+void qdss_disable_sink(void)
+{
+ if (qdss.afamily) {
+ etb_dump();
+ etb_disable();
+ }
+}
+EXPORT_SYMBOL(qdss_disable_sink);
+
+struct kobject *qdss_get_modulekobj(void)
+{
+ return qdss.modulekobj;
+}
+
+#define QDSS_ATTR(name) \
+static struct kobj_attribute name##_attr = \
+ __ATTR(name, S_IRUGO | S_IWUSR, name##_show, name##_store)
+
+static ssize_t max_clk_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ unsigned long val;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+
+ qdss.max_clk = val;
+ return n;
+}
+static ssize_t max_clk_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ unsigned long val = qdss.max_clk;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+QDSS_ATTR(max_clk);
+
+static void __devinit qdss_add_sources(struct qdss_source *srcs, size_t num)
+{
+ mutex_lock(&qdss.sources_mutex);
+ while (num--) {
+ list_add_tail(&srcs->link, &qdss.sources);
+ srcs++;
+ }
+ mutex_unlock(&qdss.sources_mutex);
+}
+
+static int __init qdss_sysfs_init(void)
+{
+ int ret;
+
+ qdss.modulekobj = kset_find_obj(module_kset, KBUILD_MODNAME);
+ if (!qdss.modulekobj) {
+ pr_err("failed to find QDSS sysfs module kobject\n");
+ ret = -ENOENT;
+ goto err;
+ }
+
+ ret = sysfs_create_file(qdss.modulekobj, &max_clk_attr.attr);
+ if (ret) {
+ pr_err("failed to create QDSS sysfs max_clk attribute\n");
+ goto err;
+ }
+
+ return 0;
+err:
+ return ret;
+}
+
+static void __devexit qdss_sysfs_exit(void)
+{
+ sysfs_remove_file(qdss.modulekobj, &max_clk_attr.attr);
+}
+
+static int __devinit qdss_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct msm_qdss_platform_data *pdata;
+
+ mutex_init(&qdss.sources_mutex);
+ mutex_init(&qdss.sink_mutex);
+
+ INIT_LIST_HEAD(&qdss.sources);
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata)
+ goto err_pdata;
+
+ qdss.afamily = pdata->afamily;
+ qdss_add_sources(pdata->src_table, pdata->size);
+
+ pr_info("QDSS arch initialized\n");
+ return 0;
+err_pdata:
+ mutex_destroy(&qdss.sink_mutex);
+ mutex_destroy(&qdss.sources_mutex);
+ pr_err("QDSS init failed\n");
+ return ret;
+}
+
+static int __devexit qdss_remove(struct platform_device *pdev)
+{
+ qdss_sysfs_exit();
+ mutex_destroy(&qdss.sink_mutex);
+ mutex_destroy(&qdss.sources_mutex);
+
+ return 0;
+}
+
+static struct of_device_id qdss_match[] = {
+ {.compatible = "qcom,msm-qdss"},
+ {}
+};
+
+static struct platform_driver qdss_driver = {
+ .probe = qdss_probe,
+ .remove = __devexit_p(qdss_remove),
+ .driver = {
+ .name = "msm_qdss",
+ .owner = THIS_MODULE,
+ .of_match_table = qdss_match,
+ },
+};
+
+static int __init qdss_init(void)
+{
+ return platform_driver_register(&qdss_driver);
+}
+arch_initcall(qdss_init);
+
+static int __init qdss_module_init(void)
+{
+ int ret;
+
+ ret = qdss_sysfs_init();
+ if (ret)
+ goto err_sysfs;
+
+ pr_info("QDSS module initialized\n");
+ return 0;
+err_sysfs:
+ return ret;
+}
+module_init(qdss_module_init);
+
+static void __exit qdss_exit(void)
+{
+ platform_driver_unregister(&qdss_driver);
+}
+module_exit(qdss_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Qualcomm Debug SubSystem Driver");
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 0dcf1a4..fdbc36f 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -555,20 +555,24 @@
This option enables support for on-chip GPIO found on Qualcomm PM8xxx
PMICs through RPC.
-config GPIO_QPNP
+config GPIO_QPNP_PIN
depends on ARCH_MSMCOPPER
+ depends on SPMI
depends on OF_SPMI
depends on MSM_QPNP_INT
- tristate "Qualcomm QPNP GPIO support"
+ tristate "Qualcomm QPNP gpio support"
help
Say 'y' here to include support for the Qualcomm QPNP gpio
- support. QPNP is a SPMI based PMIC implementation.
+ driver. This driver supports Device Tree and allows a
+ device_node to be registered as a gpio-controller. It
+ does not handle gpio interrupts directly. That work is handled
+ by CONFIG_MSM_QPNP_INT.
-config GPIO_QPNP_DEBUG
- depends on GPIO_QPNP
+config GPIO_QPNP_PIN_DEBUG
+ depends on GPIO_QPNP_PIN
depends on DEBUG_FS
bool "Qualcomm QPNP GPIO debug support"
help
Say 'y' here to include debug support for the Qualcomm
- QPNP gpio support
+ QPNP gpio driver.
endif
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index d15b628..405e498 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -48,7 +48,7 @@
obj-$(CONFIG_GPIO_PM8XXX_MPP) += pm8xxx-mpp.o
obj-$(CONFIG_GPIO_PM8XXX_RPC) += gpio-pm8xxx-rpc.o
obj-$(CONFIG_GPIO_PXA) += gpio-pxa.o
-obj-$(CONFIG_GPIO_QPNP) += qpnp-gpio.o
+obj-$(CONFIG_GPIO_QPNP_PIN) += qpnp-pin.o
obj-$(CONFIG_GPIO_RDC321X) += gpio-rdc321x.o
obj-$(CONFIG_PLAT_SAMSUNG) += gpio-samsung.o
obj-$(CONFIG_ARCH_SA1100) += gpio-sa1100.o
diff --git a/drivers/gpio/gpio-msm-common.c b/drivers/gpio/gpio-msm-common.c
index 9a9a783..5539950 100644
--- a/drivers/gpio/gpio-msm-common.c
+++ b/drivers/gpio/gpio-msm-common.c
@@ -100,7 +100,7 @@
DECLARE_BITMAP(enabled_irqs, NR_MSM_GPIOS);
DECLARE_BITMAP(wake_irqs, NR_MSM_GPIOS);
DECLARE_BITMAP(dual_edge_irqs, NR_MSM_GPIOS);
- struct irq_domain domain;
+ struct irq_domain *domain;
};
static DEFINE_SPINLOCK(tlmm_lock);
@@ -152,15 +152,14 @@
static int msm_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
{
struct msm_gpio_dev *g_dev = to_msm_gpio_dev(chip);
- struct irq_domain *domain = &g_dev->domain;
- return domain->irq_base + (offset - chip->base);
+ struct irq_domain *domain = g_dev->domain;
+ return irq_linear_revmap(domain, offset - chip->base);
}
static inline int msm_irq_to_gpio(struct gpio_chip *chip, unsigned irq)
{
- struct msm_gpio_dev *g_dev = to_msm_gpio_dev(chip);
- struct irq_domain *domain = &g_dev->domain;
- return irq - domain->irq_base;
+ struct irq_data *irq_data = irq_get_irq_data(irq);
+ return irq_data->hwirq;
}
#else
static int msm_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
@@ -391,6 +390,7 @@
*/
static struct lock_class_key msm_gpio_lock_class;
+/* TODO: This should be a real platform_driver */
static int __devinit msm_gpio_probe(void)
{
int i, irq, ret;
@@ -573,12 +573,12 @@
EXPORT_SYMBOL(msm_gpio_install_direct_irq);
#ifdef CONFIG_OF
-static int msm_gpio_domain_dt_translate(struct irq_domain *d,
- struct device_node *controller,
- const u32 *intspec,
- unsigned int intsize,
- unsigned long *out_hwirq,
- unsigned int *out_type)
+static int msm_gpio_irq_domain_xlate(struct irq_domain *d,
+ struct device_node *controller,
+ const u32 *intspec,
+ unsigned int intsize,
+ unsigned long *out_hwirq,
+ unsigned int *out_type)
{
if (d->of_node != controller)
return -EINVAL;
@@ -593,32 +593,32 @@
return 0;
}
+/*
+ * TODO: this really should be doing all the things that msm_gpio_probe() does,
+ * but since the msm_gpio_probe is called unconditionally for DT and non-DT
+ * configs, we can't duplicate it here. This should be fixed.
+ */
+int msm_gpio_irq_domain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ return 0;
+}
+
static struct irq_domain_ops msm_gpio_irq_domain_ops = {
- .dt_translate = msm_gpio_domain_dt_translate,
+ .xlate = msm_gpio_irq_domain_xlate,
+ .map = msm_gpio_irq_domain_map,
};
int __init msm_gpio_of_init(struct device_node *node,
struct device_node *parent)
{
- struct irq_domain *domain = &msm_gpio.domain;
- int start;
-
- start = irq_domain_find_free_range(0, NR_MSM_GPIOS);
- domain->irq_base = irq_alloc_descs(start, 0, NR_MSM_GPIOS,
- numa_node_id());
- if (IS_ERR_VALUE(domain->irq_base)) {
- WARN(1, "Cannot allocate irq_descs @ IRQ%d\n", start);
- return domain->irq_base;
+ msm_gpio.domain = irq_domain_add_linear(node, NR_MSM_GPIOS,
+ &msm_gpio_irq_domain_ops, &msm_gpio);
+ if (!msm_gpio.domain) {
+ WARN(1, "Cannot allocate irq_domain\n");
+ return -ENOMEM;
}
- domain->nr_irq = NR_MSM_GPIOS;
- domain->of_node = of_node_get(node);
- domain->priv = &msm_gpio;
- domain->ops = &msm_gpio_irq_domain_ops;
- irq_domain_add(domain);
- msm_gpio.gpio_chip.of_node = of_node_get(node);
- pr_debug("%s: irq_base = %u\n", __func__, domain->irq_base);
-
return 0;
}
#endif
diff --git a/drivers/gpio/qpnp-gpio.c b/drivers/gpio/qpnp-gpio.c
deleted file mode 100644
index 97859e5..0000000
--- a/drivers/gpio/qpnp-gpio.c
+++ /dev/null
@@ -1,1091 +0,0 @@
-/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#define pr_fmt(fmt) "%s: " fmt, __func__
-
-#include <linux/interrupt.h>
-#include <linux/types.h>
-#include <linux/spmi.h>
-#include <linux/platform_device.h>
-#include <linux/debugfs.h>
-#include <linux/gpio.h>
-#include <linux/slab.h>
-#include <linux/of.h>
-#include <linux/of_gpio.h>
-#include <linux/of_irq.h>
-#include <linux/export.h>
-#include <linux/module.h>
-#include <linux/qpnp/gpio.h>
-#include <linux/export.h>
-
-#include <mach/qpnp.h>
-
-#define Q_REG_ADDR(q_spec, reg_index) \
- ((q_spec)->offset + reg_index)
-
-#define Q_REG_STATUS1 0x8
-#define Q_NUM_CTL_REGS 7
-
-/* type registers base address offsets */
-#define Q_REG_TYPE 0x10
-#define Q_REG_SUBTYPE 0x11
-
-/* gpio peripheral type and subtype values */
-#define Q_GPIO_TYPE 0x10
-#define Q_GPIO_SUBTYPE_GPIO_4CH 0x1
-#define Q_GPIO_SUBTYPE_GPIOC_4CH 0x5
-#define Q_GPIO_SUBTYPE_GPIO_8CH 0x9
-#define Q_GPIO_SUBTYPE_GPIOC_8CH 0xD
-
-/* control register base address offsets */
-#define Q_REG_MODE_CTL 0x40
-#define Q_REG_DIG_PULL_CTL 0x42
-#define Q_REG_DIG_IN_CTL 0x43
-#define Q_REG_DIG_VIN_CTL 0x44
-#define Q_REG_DIG_OUT_CTL 0x45
-#define Q_REG_EN_CTL 0x46
-
-/* control register regs array indices */
-#define Q_REG_I_MODE_CTL 0
-#define Q_REG_I_DIG_PULL_CTL 2
-#define Q_REG_I_DIG_IN_CTL 3
-#define Q_REG_I_DIG_VIN_CTL 4
-#define Q_REG_I_DIG_OUT_CTL 5
-#define Q_REG_I_EN_CTL 6
-
-/* control reg: mode */
-#define Q_REG_OUT_INVERT_SHIFT 0
-#define Q_REG_OUT_INVERT_MASK 0x1
-#define Q_REG_SRC_SEL_SHIFT 1
-#define Q_REG_SRC_SEL_MASK 0xE
-#define Q_REG_MODE_SEL_SHIFT 4
-#define Q_REG_MODE_SEL_MASK 0x70
-
-/* control reg: dig_vin */
-#define Q_REG_VIN_SHIFT 0
-#define Q_REG_VIN_MASK 0x7
-
-/* control reg: dig_pull */
-#define Q_REG_PULL_SHIFT 0
-#define Q_REG_PULL_MASK 0x7
-
-/* control reg: dig_out */
-#define Q_REG_OUT_STRENGTH_SHIFT 0
-#define Q_REG_OUT_STRENGTH_MASK 0x3
-#define Q_REG_OUT_TYPE_SHIFT 4
-#define Q_REG_OUT_TYPE_MASK 0x30
-
-/* control reg: en */
-#define Q_REG_MASTER_EN_SHIFT 7
-#define Q_REG_MASTER_EN_MASK 0x80
-
-enum qpnp_gpio_param_type {
- Q_GPIO_CFG_DIRECTION,
- Q_GPIO_CFG_OUTPUT_TYPE,
- Q_GPIO_CFG_INVERT,
- Q_GPIO_CFG_PULL,
- Q_GPIO_CFG_VIN_SEL,
- Q_GPIO_CFG_OUT_STRENGTH,
- Q_GPIO_CFG_SRC_SELECT,
- Q_GPIO_CFG_MASTER_EN,
- Q_GPIO_CFG_INVALID,
-};
-
-#define Q_NUM_PARAMS Q_GPIO_CFG_INVALID
-
-/* param error checking */
-#define QPNP_GPIO_DIR_INVALID 3
-#define QPNP_GPIO_INVERT_INVALID 2
-#define QPNP_GPIO_OUT_BUF_INVALID 3
-#define QPNP_GPIO_VIN_INVALID 8
-#define QPNP_GPIO_PULL_INVALID 6
-#define QPNP_GPIO_OUT_STRENGTH_INVALID 4
-#define QPNP_GPIO_SRC_INVALID 8
-#define QPNP_GPIO_MASTER_INVALID 2
-
-struct qpnp_gpio_spec {
- uint8_t slave; /* 0-15 */
- uint16_t offset; /* 0-255 */
- uint32_t gpio_chip_idx; /* offset from gpio_chip base */
- uint32_t pmic_gpio; /* PMIC gpio number */
- int irq; /* logical IRQ number */
- u8 regs[Q_NUM_CTL_REGS]; /* Control regs */
- u8 type; /* peripheral type */
- u8 subtype; /* peripheral subtype */
- struct device_node *node;
- enum qpnp_gpio_param_type params[Q_NUM_PARAMS];
- struct qpnp_gpio_chip *q_chip;
-};
-
-struct qpnp_gpio_chip {
- struct gpio_chip gpio_chip;
- struct spmi_device *spmi;
- struct qpnp_gpio_spec **pmic_gpios;
- struct qpnp_gpio_spec **chip_gpios;
- uint32_t pmic_gpio_lowest;
- uint32_t pmic_gpio_highest;
- struct device_node *int_ctrl;
- struct list_head chip_list;
- struct dentry *dfs_dir;
-};
-
-static LIST_HEAD(qpnp_gpio_chips);
-static DEFINE_MUTEX(qpnp_gpio_chips_lock);
-
-static inline void qpnp_pmic_gpio_set_spec(struct qpnp_gpio_chip *q_chip,
- uint32_t pmic_gpio,
- struct qpnp_gpio_spec *spec)
-{
- q_chip->pmic_gpios[pmic_gpio - q_chip->pmic_gpio_lowest] = spec;
-}
-
-static inline struct qpnp_gpio_spec *qpnp_pmic_gpio_get_spec(
- struct qpnp_gpio_chip *q_chip,
- uint32_t pmic_gpio)
-{
- if (pmic_gpio < q_chip->pmic_gpio_lowest ||
- pmic_gpio > q_chip->pmic_gpio_highest)
- return NULL;
-
- return q_chip->pmic_gpios[pmic_gpio - q_chip->pmic_gpio_lowest];
-}
-
-static inline struct qpnp_gpio_spec *qpnp_chip_gpio_get_spec(
- struct qpnp_gpio_chip *q_chip,
- uint32_t chip_gpio)
-{
- if (chip_gpio > q_chip->gpio_chip.ngpio)
- return NULL;
-
- return q_chip->chip_gpios[chip_gpio];
-}
-
-static inline void qpnp_chip_gpio_set_spec(struct qpnp_gpio_chip *q_chip,
- uint32_t chip_gpio,
- struct qpnp_gpio_spec *spec)
-{
- q_chip->chip_gpios[chip_gpio] = spec;
-}
-
-static int qpnp_gpio_check_config(struct qpnp_gpio_spec *q_spec,
- struct qpnp_gpio_cfg *param)
-{
- int gpio = q_spec->pmic_gpio;
-
- if (param->direction >= QPNP_GPIO_DIR_INVALID)
- pr_err("invalid direction for gpio %d\n", gpio);
- else if (param->invert >= QPNP_GPIO_INVERT_INVALID)
- pr_err("invalid invert polarity for gpio %d\n", gpio);
- else if (param->src_select >= QPNP_GPIO_SRC_INVALID)
- pr_err("invalid source select for gpio %d\n", gpio);
- else if (param->out_strength >= QPNP_GPIO_OUT_STRENGTH_INVALID ||
- param->out_strength == 0)
- pr_err("invalid out strength for gpio %d\n", gpio);
- else if (param->output_type >= QPNP_GPIO_OUT_BUF_INVALID)
- pr_err("invalid out type for gpio %d\n", gpio);
- else if ((param->output_type == QPNP_GPIO_OUT_BUF_OPEN_DRAIN_NMOS ||
- param->output_type == QPNP_GPIO_OUT_BUF_OPEN_DRAIN_PMOS) &&
- (q_spec->subtype == Q_GPIO_SUBTYPE_GPIOC_4CH ||
- (q_spec->subtype == Q_GPIO_SUBTYPE_GPIOC_8CH)))
- pr_err("invalid out type for gpio %d\n"
- "gpioc does not support open-drain\n", gpio);
- else if (param->vin_sel >= QPNP_GPIO_VIN_INVALID)
- pr_err("invalid vin select value for gpio %d\n", gpio);
- else if (param->pull >= QPNP_GPIO_PULL_INVALID)
- pr_err("invalid pull value for gpio %d\n", gpio);
- else if (param->master_en >= QPNP_GPIO_MASTER_INVALID)
- pr_err("invalid master_en value for gpio %d\n", gpio);
- else
- return 0;
-
- return -EINVAL;
-}
-
-static inline u8 q_reg_get(u8 *reg, int shift, int mask)
-{
- return (*reg & mask) >> shift;
-}
-
-static inline void q_reg_set(u8 *reg, int shift, int mask, int value)
-{
- *reg |= (value << shift) & mask;
-}
-
-static inline void q_reg_clr_set(u8 *reg, int shift, int mask, int value)
-{
- *reg &= ~mask;
- *reg |= (value << shift) & mask;
-}
-
-static int qpnp_gpio_cache_regs(struct qpnp_gpio_chip *q_chip,
- struct qpnp_gpio_spec *q_spec)
-{
- int rc;
- struct device *dev = &q_chip->spmi->dev;
-
- rc = spmi_ext_register_readl(q_chip->spmi->ctrl, q_spec->slave,
- Q_REG_ADDR(q_spec, Q_REG_MODE_CTL),
- &q_spec->regs[Q_REG_I_MODE_CTL],
- Q_NUM_CTL_REGS);
- if (rc)
- dev_err(dev, "%s: unable to read control regs\n", __func__);
-
- return rc;
-}
-
-static int _qpnp_gpio_config(struct qpnp_gpio_chip *q_chip,
- struct qpnp_gpio_spec *q_spec,
- struct qpnp_gpio_cfg *param)
-{
- struct device *dev = &q_chip->spmi->dev;
- int rc;
-
- rc = qpnp_gpio_check_config(q_spec, param);
- if (rc)
- goto gpio_cfg;
-
- /* set direction */
- q_reg_clr_set(&q_spec->regs[Q_REG_I_MODE_CTL],
- Q_REG_MODE_SEL_SHIFT, Q_REG_MODE_SEL_MASK,
- param->direction);
-
- /* output specific configuration */
- q_reg_clr_set(&q_spec->regs[Q_REG_I_MODE_CTL],
- Q_REG_OUT_INVERT_SHIFT, Q_REG_OUT_INVERT_MASK,
- param->invert);
- q_reg_clr_set(&q_spec->regs[Q_REG_I_MODE_CTL],
- Q_REG_SRC_SEL_SHIFT, Q_REG_SRC_SEL_MASK,
- param->src_select);
- q_reg_clr_set(&q_spec->regs[Q_REG_I_DIG_OUT_CTL],
- Q_REG_OUT_STRENGTH_SHIFT, Q_REG_OUT_STRENGTH_MASK,
- param->out_strength);
- q_reg_clr_set(&q_spec->regs[Q_REG_I_DIG_OUT_CTL],
- Q_REG_OUT_TYPE_SHIFT, Q_REG_OUT_TYPE_MASK,
- param->output_type);
-
- /* config applicable for both input / output */
- q_reg_clr_set(&q_spec->regs[Q_REG_I_DIG_VIN_CTL],
- Q_REG_VIN_SHIFT, Q_REG_VIN_MASK,
- param->vin_sel);
- q_reg_clr_set(&q_spec->regs[Q_REG_I_DIG_PULL_CTL],
- Q_REG_PULL_SHIFT, Q_REG_PULL_MASK,
- param->pull);
- q_reg_clr_set(&q_spec->regs[Q_REG_I_EN_CTL],
- Q_REG_MASTER_EN_SHIFT, Q_REG_MASTER_EN_MASK,
- param->master_en);
-
- rc = spmi_ext_register_writel(q_chip->spmi->ctrl, q_spec->slave,
- Q_REG_ADDR(q_spec, Q_REG_MODE_CTL),
- &q_spec->regs[Q_REG_I_MODE_CTL], Q_NUM_CTL_REGS);
- if (rc) {
- dev_err(&q_chip->spmi->dev, "%s: unable to write master"
- " enable\n", __func__);
- goto gpio_cfg;
- }
-
- return 0;
-
-gpio_cfg:
- dev_err(dev, "%s: unable to set default config for"
- " pmic gpio %d\n", __func__, q_spec->pmic_gpio);
-
- return rc;
-}
-
-int qpnp_gpio_config(int gpio, struct qpnp_gpio_cfg *param)
-{
- int rc, chip_offset;
- struct qpnp_gpio_chip *q_chip;
- struct qpnp_gpio_spec *q_spec = NULL;
- struct gpio_chip *gpio_chip;
-
- if (param == NULL)
- return -EINVAL;
-
- mutex_lock(&qpnp_gpio_chips_lock);
- list_for_each_entry(q_chip, &qpnp_gpio_chips, chip_list) {
- gpio_chip = &q_chip->gpio_chip;
- if (gpio >= gpio_chip->base
- && gpio < gpio_chip->base + gpio_chip->ngpio) {
- chip_offset = gpio - gpio_chip->base;
- q_spec = qpnp_chip_gpio_get_spec(q_chip, chip_offset);
- if (WARN_ON(!q_spec)) {
- mutex_unlock(&qpnp_gpio_chips_lock);
- return -ENODEV;
- }
- break;
- }
- }
- mutex_unlock(&qpnp_gpio_chips_lock);
-
- rc = _qpnp_gpio_config(q_chip, q_spec, param);
-
- return rc;
-}
-EXPORT_SYMBOL(qpnp_gpio_config);
-
-int qpnp_gpio_map_gpio(uint16_t slave_id, uint32_t pmic_gpio)
-{
- struct qpnp_gpio_chip *q_chip;
- struct qpnp_gpio_spec *q_spec = NULL;
-
- mutex_lock(&qpnp_gpio_chips_lock);
- list_for_each_entry(q_chip, &qpnp_gpio_chips, chip_list) {
- if (q_chip->spmi->sid != slave_id)
- continue;
- if (q_chip->pmic_gpio_lowest <= pmic_gpio &&
- q_chip->pmic_gpio_highest >= pmic_gpio) {
- q_spec = qpnp_pmic_gpio_get_spec(q_chip, pmic_gpio);
- mutex_unlock(&qpnp_gpio_chips_lock);
- if (WARN_ON(!q_spec))
- return -ENODEV;
- return q_chip->gpio_chip.base + q_spec->gpio_chip_idx;
- }
- }
- mutex_unlock(&qpnp_gpio_chips_lock);
- return -EINVAL;
-}
-EXPORT_SYMBOL(qpnp_gpio_map_gpio);
-
-static int qpnp_gpio_to_irq(struct gpio_chip *gpio_chip, unsigned offset)
-{
- struct qpnp_gpio_chip *q_chip = dev_get_drvdata(gpio_chip->dev);
- struct qpnp_gpio_spec *q_spec;
-
- q_spec = qpnp_chip_gpio_get_spec(q_chip, offset);
- if (!q_spec)
- return -EINVAL;
-
- return q_spec->irq;
-}
-
-static int qpnp_gpio_get(struct gpio_chip *gpio_chip, unsigned offset)
-{
- int rc, ret_val;
- struct qpnp_gpio_chip *q_chip = dev_get_drvdata(gpio_chip->dev);
- struct qpnp_gpio_spec *q_spec = NULL;
- u8 buf[1];
-
- if (WARN_ON(!q_chip))
- return -ENODEV;
-
- q_spec = qpnp_chip_gpio_get_spec(q_chip, offset);
- if (WARN_ON(!q_spec))
- return -ENODEV;
-
- /* gpio val is from RT status iff input is enabled */
- if ((q_spec->regs[Q_REG_I_MODE_CTL] & Q_REG_MODE_SEL_MASK)
- == QPNP_GPIO_DIR_IN) {
- /* INT_RT_STS */
- rc = spmi_ext_register_readl(q_chip->spmi->ctrl, q_spec->slave,
- Q_REG_ADDR(q_spec, Q_REG_STATUS1),
- &buf[0], 1);
- return buf[0];
-
- } else {
- ret_val = (q_spec->regs[Q_REG_I_MODE_CTL] &
- Q_REG_OUT_INVERT_MASK) >> Q_REG_OUT_INVERT_SHIFT;
- return ret_val;
- }
-
- return 0;
-}
-
-static int __qpnp_gpio_set(struct qpnp_gpio_chip *q_chip,
- struct qpnp_gpio_spec *q_spec, int value)
-{
- int rc;
-
- if (!q_chip || !q_spec)
- return -EINVAL;
-
- if (value)
- q_reg_clr_set(&q_spec->regs[Q_REG_I_MODE_CTL],
- Q_REG_OUT_INVERT_SHIFT, Q_REG_OUT_INVERT_MASK, 1);
- else
- q_reg_clr_set(&q_spec->regs[Q_REG_I_MODE_CTL],
- Q_REG_OUT_INVERT_SHIFT, Q_REG_OUT_INVERT_MASK, 0);
-
- rc = spmi_ext_register_writel(q_chip->spmi->ctrl, q_spec->slave,
- Q_REG_ADDR(q_spec, Q_REG_I_MODE_CTL),
- &q_spec->regs[Q_REG_I_MODE_CTL], 1);
- if (rc)
- dev_err(&q_chip->spmi->dev, "%s: spmi write failed\n",
- __func__);
- return rc;
-}
-
-
-static void qpnp_gpio_set(struct gpio_chip *gpio_chip,
- unsigned offset, int value)
-{
- struct qpnp_gpio_chip *q_chip = dev_get_drvdata(gpio_chip->dev);
- struct qpnp_gpio_spec *q_spec;
-
- if (WARN_ON(!q_chip))
- return;
-
- q_spec = qpnp_chip_gpio_get_spec(q_chip, offset);
- if (WARN_ON(!q_spec))
- return;
-
- __qpnp_gpio_set(q_chip, q_spec, value);
-}
-
-static int qpnp_gpio_set_direction(struct qpnp_gpio_chip *q_chip,
- struct qpnp_gpio_spec *q_spec, int direction)
-{
- int rc;
-
- if (!q_chip || !q_spec)
- return -EINVAL;
-
- if (direction >= QPNP_GPIO_DIR_INVALID) {
- pr_err("invalid direction specification %d\n", direction);
- return -EINVAL;
- }
-
- q_reg_clr_set(&q_spec->regs[Q_REG_I_MODE_CTL],
- Q_REG_MODE_SEL_SHIFT,
- Q_REG_MODE_SEL_MASK,
- direction);
-
- rc = spmi_ext_register_writel(q_chip->spmi->ctrl, q_spec->slave,
- Q_REG_ADDR(q_spec, Q_REG_I_MODE_CTL),
- &q_spec->regs[Q_REG_I_MODE_CTL], 1);
- return rc;
-}
-
-static int qpnp_gpio_direction_input(struct gpio_chip *gpio_chip,
- unsigned offset)
-{
- struct qpnp_gpio_chip *q_chip = dev_get_drvdata(gpio_chip->dev);
- struct qpnp_gpio_spec *q_spec;
-
- if (WARN_ON(!q_chip))
- return -ENODEV;
-
- q_spec = qpnp_chip_gpio_get_spec(q_chip, offset);
- if (WARN_ON(!q_spec))
- return -ENODEV;
-
- return qpnp_gpio_set_direction(q_chip, q_spec, QPNP_GPIO_DIR_IN);
-}
-
-static int qpnp_gpio_direction_output(struct gpio_chip *gpio_chip,
- unsigned offset,
- int val)
-{
- int rc;
- struct qpnp_gpio_chip *q_chip = dev_get_drvdata(gpio_chip->dev);
- struct qpnp_gpio_spec *q_spec;
-
- if (WARN_ON(!q_chip))
- return -ENODEV;
-
- q_spec = qpnp_chip_gpio_get_spec(q_chip, offset);
- if (WARN_ON(!q_spec))
- return -ENODEV;
-
- rc = __qpnp_gpio_set(q_chip, q_spec, val);
- if (rc)
- return rc;
-
- rc = qpnp_gpio_set_direction(q_chip, q_spec, QPNP_GPIO_DIR_OUT);
-
- return rc;
-}
-
-static int qpnp_gpio_of_gpio_xlate(struct gpio_chip *gpio_chip,
- const struct of_phandle_args *gpio_spec,
- u32 *flags)
-{
- struct qpnp_gpio_chip *q_chip = dev_get_drvdata(gpio_chip->dev);
- struct qpnp_gpio_spec *q_spec;
-
- if (WARN_ON(gpio_chip->of_gpio_n_cells < 2)) {
- pr_err("of_gpio_n_cells < 2\n");
- return -EINVAL;
- }
-
- q_spec = qpnp_pmic_gpio_get_spec(q_chip, gpio_spec->args[0]);
- if (!q_spec) {
- pr_err("no such PMIC gpio %u in device topology\n",
- gpio_spec->args[0]);
- return -EINVAL;
- }
-
- if (flags)
- *flags = gpio_spec->args[1];
-
- return q_spec->gpio_chip_idx;
-}
-
-static int qpnp_gpio_apply_config(struct qpnp_gpio_chip *q_chip,
- struct qpnp_gpio_spec *q_spec)
-{
- struct qpnp_gpio_cfg param;
- struct device_node *node = q_spec->node;
- int rc;
-
- param.direction = q_reg_get(&q_spec->regs[Q_REG_I_MODE_CTL],
- Q_REG_MODE_SEL_SHIFT,
- Q_REG_MODE_SEL_MASK);
- param.output_type = q_reg_get(&q_spec->regs[Q_REG_I_DIG_OUT_CTL],
- Q_REG_OUT_TYPE_SHIFT,
- Q_REG_OUT_TYPE_MASK);
- param.invert = q_reg_get(&q_spec->regs[Q_REG_I_MODE_CTL],
- Q_REG_OUT_INVERT_MASK,
- Q_REG_OUT_INVERT_MASK);
- param.pull = q_reg_get(&q_spec->regs[Q_REG_I_MODE_CTL],
- Q_REG_PULL_SHIFT, Q_REG_PULL_MASK);
- param.vin_sel = q_reg_get(&q_spec->regs[Q_REG_I_DIG_VIN_CTL],
- Q_REG_VIN_SHIFT, Q_REG_VIN_MASK);
- param.out_strength = q_reg_get(&q_spec->regs[Q_REG_I_DIG_OUT_CTL],
- Q_REG_OUT_STRENGTH_SHIFT,
- Q_REG_OUT_STRENGTH_MASK);
- param.src_select = q_reg_get(&q_spec->regs[Q_REG_I_MODE_CTL],
- Q_REG_SRC_SEL_SHIFT, Q_REG_SRC_SEL_MASK);
- param.master_en = q_reg_get(&q_spec->regs[Q_REG_I_EN_CTL],
- Q_REG_MASTER_EN_SHIFT,
- Q_REG_MASTER_EN_MASK);
-
- of_property_read_u32(node, "qcom,direction",
- ¶m.direction);
- of_property_read_u32(node, "qcom,output-type",
- ¶m.output_type);
- of_property_read_u32(node, "qcom,invert",
- ¶m.invert);
- of_property_read_u32(node, "qcom,pull",
- ¶m.pull);
- of_property_read_u32(node, "qcom,vin-sel",
- ¶m.vin_sel);
- of_property_read_u32(node, "qcom,out-strength",
- ¶m.out_strength);
- of_property_read_u32(node, "qcom,src-select",
- ¶m.src_select);
- rc = of_property_read_u32(node, "qcom,master-en",
- ¶m.master_en);
-
- rc = _qpnp_gpio_config(q_chip, q_spec, ¶m);
-
- return rc;
-}
-
-static int qpnp_gpio_free_chip(struct qpnp_gpio_chip *q_chip)
-{
- struct spmi_device *spmi = q_chip->spmi;
- int rc, i;
-
- if (q_chip->chip_gpios)
- for (i = 0; i < spmi->num_dev_node; i++)
- kfree(q_chip->chip_gpios[i]);
-
- mutex_lock(&qpnp_gpio_chips_lock);
- list_del(&q_chip->chip_list);
- mutex_unlock(&qpnp_gpio_chips_lock);
- rc = gpiochip_remove(&q_chip->gpio_chip);
- if (rc)
- dev_err(&q_chip->spmi->dev, "%s: unable to remove gpio\n",
- __func__);
- kfree(q_chip->chip_gpios);
- kfree(q_chip->pmic_gpios);
- kfree(q_chip);
- return rc;
-}
-
-#ifdef CONFIG_GPIO_QPNP_DEBUG
-struct qpnp_gpio_reg {
- uint32_t addr;
- uint32_t idx;
- uint32_t shift;
- uint32_t mask;
-};
-
-static struct dentry *driver_dfs_dir;
-
-static int qpnp_gpio_reg_attr(enum qpnp_gpio_param_type type,
- struct qpnp_gpio_reg *cfg)
-{
- switch (type) {
- case Q_GPIO_CFG_DIRECTION:
- cfg->addr = Q_REG_MODE_CTL;
- cfg->idx = Q_REG_I_MODE_CTL;
- cfg->shift = Q_REG_MODE_SEL_SHIFT;
- cfg->mask = Q_REG_MODE_SEL_MASK;
- break;
- case Q_GPIO_CFG_OUTPUT_TYPE:
- cfg->addr = Q_REG_DIG_OUT_CTL;
- cfg->idx = Q_REG_I_DIG_OUT_CTL;
- cfg->shift = Q_REG_OUT_TYPE_SHIFT;
- cfg->mask = Q_REG_OUT_TYPE_MASK;
- break;
- case Q_GPIO_CFG_INVERT:
- cfg->addr = Q_REG_MODE_CTL;
- cfg->idx = Q_REG_I_MODE_CTL;
- cfg->shift = Q_REG_OUT_INVERT_SHIFT;
- cfg->mask = Q_REG_OUT_INVERT_MASK;
- break;
- case Q_GPIO_CFG_PULL:
- cfg->addr = Q_REG_DIG_PULL_CTL;
- cfg->idx = Q_REG_I_DIG_PULL_CTL;
- cfg->shift = Q_REG_PULL_SHIFT;
- cfg->mask = Q_REG_PULL_MASK;
- break;
- case Q_GPIO_CFG_VIN_SEL:
- cfg->addr = Q_REG_DIG_VIN_CTL;
- cfg->idx = Q_REG_I_DIG_VIN_CTL;
- cfg->shift = Q_REG_VIN_SHIFT;
- cfg->mask = Q_REG_VIN_MASK;
- break;
- case Q_GPIO_CFG_OUT_STRENGTH:
- cfg->addr = Q_REG_DIG_OUT_CTL;
- cfg->idx = Q_REG_I_DIG_OUT_CTL;
- cfg->shift = Q_REG_OUT_STRENGTH_SHIFT;
- cfg->mask = Q_REG_OUT_STRENGTH_MASK;
- break;
- case Q_GPIO_CFG_SRC_SELECT:
- cfg->addr = Q_REG_MODE_CTL;
- cfg->idx = Q_REG_I_MODE_CTL;
- cfg->shift = Q_REG_SRC_SEL_SHIFT;
- cfg->mask = Q_REG_SRC_SEL_MASK;
- break;
- case Q_GPIO_CFG_MASTER_EN:
- cfg->addr = Q_REG_EN_CTL;
- cfg->idx = Q_REG_I_EN_CTL;
- cfg->shift = Q_REG_MASTER_EN_SHIFT;
- cfg->mask = Q_REG_MASTER_EN_MASK;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int qpnp_gpio_debugfs_get(void *data, u64 *val)
-{
- enum qpnp_gpio_param_type *idx = data;
- struct qpnp_gpio_spec *q_spec;
- struct qpnp_gpio_reg cfg = {};
- int rc;
-
- rc = qpnp_gpio_reg_attr(*idx, &cfg);
- if (rc)
- return rc;
- q_spec = container_of(idx, struct qpnp_gpio_spec, params[*idx]);
- *val = q_reg_get(&q_spec->regs[cfg.idx], cfg.shift, cfg.mask);
- return 0;
-}
-
-static int qpnp_gpio_check_reg_val(enum qpnp_gpio_param_type idx,
- struct qpnp_gpio_spec *q_spec,
- uint32_t val)
-{
- switch (idx) {
- case Q_GPIO_CFG_DIRECTION:
- if (val >= QPNP_GPIO_DIR_INVALID)
- return -EINVAL;
- break;
- case Q_GPIO_CFG_OUTPUT_TYPE:
- if ((val >= QPNP_GPIO_OUT_BUF_INVALID) ||
- ((val == QPNP_GPIO_OUT_BUF_OPEN_DRAIN_NMOS ||
- val == QPNP_GPIO_OUT_BUF_OPEN_DRAIN_PMOS) &&
- (q_spec->subtype == Q_GPIO_SUBTYPE_GPIOC_4CH ||
- (q_spec->subtype == Q_GPIO_SUBTYPE_GPIOC_8CH))))
- return -EINVAL;
- break;
- case Q_GPIO_CFG_INVERT:
- if (val >= QPNP_GPIO_INVERT_INVALID)
- return -EINVAL;
- break;
- case Q_GPIO_CFG_PULL:
- if (val >= QPNP_GPIO_PULL_INVALID)
- return -EINVAL;
- break;
- case Q_GPIO_CFG_VIN_SEL:
- if (val >= QPNP_GPIO_VIN_INVALID)
- return -EINVAL;
- break;
- case Q_GPIO_CFG_OUT_STRENGTH:
- if (val >= QPNP_GPIO_OUT_STRENGTH_INVALID ||
- val == 0)
- return -EINVAL;
- break;
- case Q_GPIO_CFG_SRC_SELECT:
- if (val >= QPNP_GPIO_SRC_INVALID)
- return -EINVAL;
- break;
- case Q_GPIO_CFG_MASTER_EN:
- if (val >= QPNP_GPIO_MASTER_INVALID)
- return -EINVAL;
- break;
- default:
- pr_err("invalid param type %u specified\n", idx);
- return -EINVAL;
- }
- return 0;
-}
-
-static int qpnp_gpio_debugfs_set(void *data, u64 val)
-{
- enum qpnp_gpio_param_type *idx = data;
- struct qpnp_gpio_spec *q_spec;
- struct qpnp_gpio_chip *q_chip;
- struct qpnp_gpio_reg cfg = {};
- int rc;
-
- q_spec = container_of(idx, struct qpnp_gpio_spec, params[*idx]);
- q_chip = q_spec->q_chip;
-
- rc = qpnp_gpio_check_reg_val(*idx, q_spec, val);
- if (rc)
- return rc;
-
- rc = qpnp_gpio_reg_attr(*idx, &cfg);
- if (rc)
- return rc;
- q_reg_clr_set(&q_spec->regs[cfg.idx], cfg.shift, cfg.mask, val);
- rc = spmi_ext_register_writel(q_chip->spmi->ctrl, q_spec->slave,
- Q_REG_ADDR(q_spec, cfg.addr),
- &q_spec->regs[cfg.idx], 1);
-
- return rc;
-}
-DEFINE_SIMPLE_ATTRIBUTE(qpnp_gpio_fops, qpnp_gpio_debugfs_get,
- qpnp_gpio_debugfs_set, "%llu\n");
-
-#define DEBUGFS_BUF_SIZE 11 /* supports 2^32 in decimal */
-
-struct qpnp_gpio_debugfs_args {
- enum qpnp_gpio_param_type type;
- const char *filename;
-};
-
-static struct qpnp_gpio_debugfs_args dfs_args[] = {
- { Q_GPIO_CFG_DIRECTION, "direction" },
- { Q_GPIO_CFG_OUTPUT_TYPE, "output_type" },
- { Q_GPIO_CFG_INVERT, "invert" },
- { Q_GPIO_CFG_PULL, "pull" },
- { Q_GPIO_CFG_VIN_SEL, "vin_sel" },
- { Q_GPIO_CFG_OUT_STRENGTH, "out_strength" },
- { Q_GPIO_CFG_SRC_SELECT, "src_select" },
- { Q_GPIO_CFG_MASTER_EN, "master_en" }
-};
-
-static int qpnp_gpio_debugfs_create(struct qpnp_gpio_chip *q_chip)
-{
- struct spmi_device *spmi = q_chip->spmi;
- struct device *dev = &spmi->dev;
- struct qpnp_gpio_spec *q_spec;
- enum qpnp_gpio_param_type *params;
- enum qpnp_gpio_param_type type;
- char pmic_gpio[DEBUGFS_BUF_SIZE];
- const char *filename;
- struct dentry *dfs, *dfs_io_dir;
- int i, j;
-
- BUG_ON(Q_NUM_PARAMS != ARRAY_SIZE(dfs_args));
-
- q_chip->dfs_dir = debugfs_create_dir(dev->of_node->name,
- driver_dfs_dir);
- if (q_chip->dfs_dir == NULL) {
- dev_err(dev, "%s: cannot register chip debugfs directory %s\n",
- __func__, dev->of_node->name);
- return -ENODEV;
- }
-
- for (i = 0; i < spmi->num_dev_node; i++) {
- q_spec = qpnp_chip_gpio_get_spec(q_chip, i);
- params = q_spec->params;
- snprintf(pmic_gpio, DEBUGFS_BUF_SIZE, "%u", q_spec->pmic_gpio);
- dfs_io_dir = debugfs_create_dir(pmic_gpio,
- q_chip->dfs_dir);
- if (dfs_io_dir == NULL)
- goto dfs_err;
-
- for (j = 0; j < Q_NUM_PARAMS; j++) {
- type = dfs_args[j].type;
- filename = dfs_args[j].filename;
-
- params[type] = type;
- dfs = debugfs_create_file(
- filename,
- S_IRUGO | S_IWUSR,
- dfs_io_dir,
- &q_spec->params[type],
- &qpnp_gpio_fops);
- if (dfs == NULL)
- goto dfs_err;
- }
- }
- return 0;
-dfs_err:
- dev_err(dev, "%s: cannot register debugfs for pmic gpio %u on"
- " chip %s\n", __func__,
- q_spec->pmic_gpio, dev->of_node->name);
- debugfs_remove_recursive(q_chip->dfs_dir);
- return -ENFILE;
-}
-#else
-static int qpnp_gpio_debugfs_create(struct qpnp_gpio_chip *q_chip)
-{
- return 0;
-}
-#endif
-
-static int qpnp_gpio_probe(struct spmi_device *spmi)
-{
- struct qpnp_gpio_chip *q_chip;
- struct resource *res;
- struct qpnp_gpio_spec *q_spec;
- int i, rc;
- int lowest_gpio = UINT_MAX, highest_gpio = 0;
- u32 intspec[3], gpio;
- char buf[2];
-
- q_chip = kzalloc(sizeof(*q_chip), GFP_KERNEL);
- if (!q_chip) {
- dev_err(&spmi->dev, "%s: Can't allocate gpio_chip\n",
- __func__);
- return -ENOMEM;
- }
- q_chip->spmi = spmi;
- dev_set_drvdata(&spmi->dev, q_chip);
-
- mutex_lock(&qpnp_gpio_chips_lock);
- list_add(&q_chip->chip_list, &qpnp_gpio_chips);
- mutex_unlock(&qpnp_gpio_chips_lock);
-
- /* first scan through nodes to find the range required for allocation */
- for (i = 0; i < spmi->num_dev_node; i++) {
- rc = of_property_read_u32(spmi->dev_node[i].of_node,
- "qcom,gpio-num", &gpio);
- if (rc) {
- dev_err(&spmi->dev, "%s: unable to get"
- " qcom,gpio-num property\n", __func__);
- goto err_probe;
- }
-
- if (gpio < lowest_gpio)
- lowest_gpio = gpio;
- if (gpio > highest_gpio)
- highest_gpio = gpio;
- }
-
- if (highest_gpio < lowest_gpio) {
- dev_err(&spmi->dev, "%s: no device nodes specified in"
- " topology\n", __func__);
- rc = -EINVAL;
- goto err_probe;
- } else if (lowest_gpio == 0) {
- dev_err(&spmi->dev, "%s: 0 is not a valid PMIC GPIO\n",
- __func__);
- rc = -EINVAL;
- goto err_probe;
- }
-
- q_chip->pmic_gpio_lowest = lowest_gpio;
- q_chip->pmic_gpio_highest = highest_gpio;
-
- /* allocate gpio lookup tables */
- q_chip->pmic_gpios = kzalloc(sizeof(struct qpnp_gpio_spec *) *
- highest_gpio - lowest_gpio + 1,
- GFP_KERNEL);
- q_chip->chip_gpios = kzalloc(sizeof(struct qpnp_gpio_spec *) *
- spmi->num_dev_node, GFP_KERNEL);
- if (!q_chip->pmic_gpios || !q_chip->chip_gpios) {
- dev_err(&spmi->dev, "%s: unable to allocate memory\n",
- __func__);
- rc = -ENOMEM;
- goto err_probe;
- }
-
- /* get interrupt controller device_node */
- q_chip->int_ctrl = of_irq_find_parent(spmi->dev.of_node);
- if (!q_chip->int_ctrl) {
- dev_err(&spmi->dev, "%s: Can't find interrupt parent\n",
- __func__);
- rc = -EINVAL;
- goto err_probe;
- }
-
- /* now scan through again and populate the lookup table */
- for (i = 0; i < spmi->num_dev_node; i++) {
- res = qpnp_get_resource(spmi, i, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&spmi->dev, "%s: node %s is missing has no"
- " base address definition\n",
- __func__, spmi->dev_node[i].of_node->full_name);
- }
-
- rc = of_property_read_u32(spmi->dev_node[i].of_node,
- "qcom,gpio-num", &gpio);
- if (rc) {
- dev_err(&spmi->dev, "%s: unable to get"
- " qcom,gpio-num property\n", __func__);
- goto err_probe;
- }
-
- q_spec = kzalloc(sizeof(struct qpnp_gpio_spec),
- GFP_KERNEL);
- if (!q_spec) {
- dev_err(&spmi->dev, "%s: unable to allocate"
- " memory\n",
- __func__);
- rc = -ENOMEM;
- goto err_probe;
- }
-
- q_spec->slave = spmi->sid;
- q_spec->offset = res->start;
- q_spec->gpio_chip_idx = i;
- q_spec->pmic_gpio = gpio;
- q_spec->node = spmi->dev_node[i].of_node;
- q_spec->q_chip = q_chip;
-
- rc = spmi_ext_register_readl(spmi->ctrl, q_spec->slave,
- Q_REG_ADDR(q_spec, Q_REG_TYPE), &buf[0], 2);
- if (rc) {
- dev_err(&spmi->dev, "%s: unable to read type regs\n",
- __func__);
- goto err_probe;
- }
- q_spec->type = buf[0];
- q_spec->subtype = buf[1];
-
- /* call into irq_domain to get irq mapping */
- intspec[0] = q_chip->spmi->sid;
- intspec[1] = (q_spec->offset >> 8) & 0xFF;
- intspec[2] = 0;
- q_spec->irq = irq_create_of_mapping(q_chip->int_ctrl,
- intspec, 3);
- if (!q_spec->irq) {
- dev_err(&spmi->dev, "%s: invalid irq for gpio"
- " %u\n", __func__, gpio);
- rc = -EINVAL;
- goto err_probe;
- }
- /* initialize lookup table params */
- qpnp_pmic_gpio_set_spec(q_chip, gpio, q_spec);
- qpnp_chip_gpio_set_spec(q_chip, i, q_spec);
- }
-
- q_chip->gpio_chip.base = -1;
- q_chip->gpio_chip.ngpio = spmi->num_dev_node;
- q_chip->gpio_chip.label = "qpnp-gpio";
- q_chip->gpio_chip.direction_input = qpnp_gpio_direction_input;
- q_chip->gpio_chip.direction_output = qpnp_gpio_direction_output;
- q_chip->gpio_chip.to_irq = qpnp_gpio_to_irq;
- q_chip->gpio_chip.get = qpnp_gpio_get;
- q_chip->gpio_chip.set = qpnp_gpio_set;
- q_chip->gpio_chip.dev = &spmi->dev;
- q_chip->gpio_chip.of_xlate = qpnp_gpio_of_gpio_xlate;
- q_chip->gpio_chip.of_gpio_n_cells = 2;
- q_chip->gpio_chip.can_sleep = 0;
-
- rc = gpiochip_add(&q_chip->gpio_chip);
- if (rc) {
- dev_err(&spmi->dev, "%s: Can't add gpio chip, rc = %d\n",
- __func__, rc);
- goto err_probe;
- }
-
- /* now configure gpio config defaults if they exist */
- for (i = 0; i < spmi->num_dev_node; i++) {
- q_spec = qpnp_chip_gpio_get_spec(q_chip, i);
- if (WARN_ON(!q_spec)) {
- rc = -ENODEV;
- goto err_probe;
- }
-
- rc = qpnp_gpio_cache_regs(q_chip, q_spec);
- if (rc)
- goto err_probe;
-
- rc = qpnp_gpio_apply_config(q_chip, q_spec);
- if (rc)
- goto err_probe;
- }
-
- dev_dbg(&spmi->dev, "%s: gpio_chip registered between %d-%u\n",
- __func__, q_chip->gpio_chip.base,
- (q_chip->gpio_chip.base + q_chip->gpio_chip.ngpio) - 1);
-
- rc = qpnp_gpio_debugfs_create(q_chip);
- if (rc) {
- dev_err(&spmi->dev, "%s: debugfs creation failed\n", __func__);
- goto err_probe;
- }
-
- return 0;
-
-err_probe:
- qpnp_gpio_free_chip(q_chip);
- return rc;
-}
-
-static int qpnp_gpio_remove(struct spmi_device *spmi)
-{
- struct qpnp_gpio_chip *q_chip = dev_get_drvdata(&spmi->dev);
-
- debugfs_remove_recursive(q_chip->dfs_dir);
-
- return qpnp_gpio_free_chip(q_chip);
-}
-
-static struct of_device_id spmi_match_table[] = {
- { .compatible = "qcom,qpnp-gpio",
- },
- {}
-};
-
-static const struct spmi_device_id qpnp_gpio_id[] = {
- { "qcom,qpnp-gpio", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(spmi, qpnp_gpio_id);
-
-static struct spmi_driver qpnp_gpio_driver = {
- .driver = {
- .name = "qcom,qpnp-gpio",
- .of_match_table = spmi_match_table,
- },
- .probe = qpnp_gpio_probe,
- .remove = qpnp_gpio_remove,
- .id_table = qpnp_gpio_id,
-};
-
-static int __init qpnp_gpio_init(void)
-{
-#ifdef CONFIG_GPIO_QPNP_DEBUG
- driver_dfs_dir = debugfs_create_dir("qpnp_gpio", NULL);
- if (driver_dfs_dir == NULL)
- pr_err("Cannot register top level debugfs directory\n");
-#endif
-
- return spmi_driver_register(&qpnp_gpio_driver);
-}
-
-static void __exit qpnp_gpio_exit(void)
-{
-#ifdef CONFIG_GPIO_QPNP_DEBUG
- debugfs_remove_recursive(driver_dfs_dir);
-#endif
- spmi_driver_unregister(&qpnp_gpio_driver);
-}
-
-MODULE_DESCRIPTION("QPNP PMIC gpio driver");
-MODULE_LICENSE("GPL v2");
-
-module_init(qpnp_gpio_init);
-module_exit(qpnp_gpio_exit);
diff --git a/drivers/gpio/qpnp-pin.c b/drivers/gpio/qpnp-pin.c
new file mode 100644
index 0000000..bbcba81
--- /dev/null
+++ b/drivers/gpio/qpnp-pin.c
@@ -0,0 +1,1335 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/spmi.h>
+#include <linux/platform_device.h>
+#include <linux/debugfs.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/of_irq.h>
+#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/qpnp/pin.h>
+
+#define Q_REG_ADDR(q_spec, reg_index) \
+ ((q_spec)->offset + reg_index)
+
+#define Q_REG_STATUS1 0x8
+#define Q_NUM_CTL_REGS 0xD
+
+/* type registers base address offsets */
+#define Q_REG_TYPE 0x4
+#define Q_REG_SUBTYPE 0x5
+
+/* gpio peripheral type and subtype values */
+#define Q_GPIO_TYPE 0x10
+#define Q_GPIO_SUBTYPE_GPIO_4CH 0x0
+#define Q_GPIO_SUBTYPE_GPIOC_4CH 0x2
+#define Q_GPIO_SUBTYPE_GPIO_8CH 0x4
+#define Q_GPIO_SUBTYPE_GPIOC_8CH 0x6
+
+/* mpp peripheral type and subtype values */
+#define Q_MPP_TYPE 0x11
+#define Q_MPP_SUBTYPE_4CH_NO_ANA_OUT 0x1
+#define Q_MPP_SUBTYPE_4CH_NO_SINK 0x2
+#define Q_MPP_SUBTYPE_4CH_FULL_FUNC 0x3
+#define Q_MPP_SUBTYPE_8CH_FULL_FUNC 0x7
+
+/* control register base address offsets */
+#define Q_REG_MODE_CTL 0x40
+#define Q_REG_DIG_VIN_CTL 0x41
+#define Q_REG_DIG_PULL_CTL 0x42
+#define Q_REG_DIG_IN_CTL 0x43
+#define Q_REG_DIG_OUT_CTL 0x45
+#define Q_REG_EN_CTL 0x46
+#define Q_REG_AOUT_CTL 0x48
+#define Q_REG_AIN_CTL 0x4A
+#define Q_REG_SINK_CTL 0x4C
+
+/* control register regs array indices */
+#define Q_REG_I_MODE_CTL 0
+#define Q_REG_I_DIG_VIN_CTL 1
+#define Q_REG_I_DIG_PULL_CTL 2
+#define Q_REG_I_DIG_IN_CTL 3
+#define Q_REG_I_DIG_OUT_CTL 5
+#define Q_REG_I_EN_CTL 6
+#define Q_REG_I_AOUT_CTL 8
+#define Q_REG_I_AIN_CTL 10
+#define Q_REG_I_SINK_CTL 12
+
+/* control reg: mode */
+#define Q_REG_OUT_INVERT_SHIFT 0
+#define Q_REG_OUT_INVERT_MASK 0x1
+#define Q_REG_SRC_SEL_SHIFT 1
+#define Q_REG_SRC_SEL_MASK 0xE
+#define Q_REG_MODE_SEL_SHIFT 4
+#define Q_REG_MODE_SEL_MASK 0x70
+
+/* control reg: dig_vin */
+#define Q_REG_VIN_SHIFT 0
+#define Q_REG_VIN_MASK 0x7
+
+/* control reg: dig_pull */
+#define Q_REG_PULL_SHIFT 0
+#define Q_REG_PULL_MASK 0x7
+
+/* control reg: dig_out */
+#define Q_REG_OUT_STRENGTH_SHIFT 0
+#define Q_REG_OUT_STRENGTH_MASK 0x3
+#define Q_REG_OUT_TYPE_SHIFT 4
+#define Q_REG_OUT_TYPE_MASK 0x30
+
+/* control reg: en */
+#define Q_REG_MASTER_EN_SHIFT 7
+#define Q_REG_MASTER_EN_MASK 0x80
+
+/* control reg: ana_out */
+#define Q_REG_AOUT_REF_SHIFT 0
+#define Q_REG_AOUT_REF_MASK 0x7
+
+/* control reg: ana_in */
+#define Q_REG_AIN_ROUTE_SHIFT 0
+#define Q_REG_AIN_ROUTE_MASK 0x7
+
+/* control reg: sink */
+#define Q_REG_CS_OUT_SHIFT 0
+#define Q_REG_CS_OUT_MASK 0x7
+
+enum qpnp_pin_param_type {
+ Q_PIN_CFG_MODE,
+ Q_PIN_CFG_OUTPUT_TYPE,
+ Q_PIN_CFG_INVERT,
+ Q_PIN_CFG_PULL,
+ Q_PIN_CFG_VIN_SEL,
+ Q_PIN_CFG_OUT_STRENGTH,
+ Q_PIN_CFG_SELECT,
+ Q_PIN_CFG_MASTER_EN,
+ Q_PIN_CFG_AOUT_REF,
+ Q_PIN_CFG_AIN_ROUTE,
+ Q_PIN_CFG_CS_OUT,
+ Q_PIN_CFG_INVALID,
+};
+
+#define Q_NUM_PARAMS Q_PIN_CFG_INVALID
+
+/* param error checking */
+#define QPNP_PIN_MODE_INVALID 3
+#define QPNP_PIN_INVERT_INVALID 2
+#define QPNP_PIN_OUT_BUF_INVALID 3
+#define QPNP_PIN_VIN_4CH_INVALID 5
+#define QPNP_PIN_VIN_8CH_INVALID 8
+#define QPNP_PIN_GPIO_PULL_INVALID 6
+#define QPNP_PIN_MPP_PULL_INVALID 4
+#define QPNP_PIN_OUT_STRENGTH_INVALID 4
+#define QPNP_PIN_SRC_INVALID 8
+#define QPNP_PIN_MASTER_INVALID 2
+#define QPNP_PIN_AOUT_REF_INVALID 8
+#define QPNP_PIN_AIN_ROUTE_INVALID 8
+#define QPNP_PIN_CS_OUT_INVALID 8
+
+struct qpnp_pin_spec {
+ uint8_t slave; /* 0-15 */
+ uint16_t offset; /* 0-255 */
+ uint32_t gpio_chip_idx; /* offset from gpio_chip base */
+ uint32_t pmic_pin; /* PMIC pin number */
+ int irq; /* logical IRQ number */
+ u8 regs[Q_NUM_CTL_REGS]; /* Control regs */
+ u8 num_ctl_regs; /* usable number on this pin */
+ u8 type; /* peripheral type */
+ u8 subtype; /* peripheral subtype */
+ struct device_node *node;
+ enum qpnp_pin_param_type params[Q_NUM_PARAMS];
+ struct qpnp_pin_chip *q_chip;
+};
+
+struct qpnp_pin_chip {
+ struct gpio_chip gpio_chip;
+ struct spmi_device *spmi;
+ struct qpnp_pin_spec **pmic_pins;
+ struct qpnp_pin_spec **chip_gpios;
+ uint32_t pmic_pin_lowest;
+ uint32_t pmic_pin_highest;
+ struct device_node *int_ctrl;
+ struct list_head chip_list;
+ struct dentry *dfs_dir;
+};
+
+static LIST_HEAD(qpnp_pin_chips);
+static DEFINE_MUTEX(qpnp_pin_chips_lock);
+
+static inline void qpnp_pmic_pin_set_spec(struct qpnp_pin_chip *q_chip,
+ uint32_t pmic_pin,
+ struct qpnp_pin_spec *spec)
+{
+ q_chip->pmic_pins[pmic_pin - q_chip->pmic_pin_lowest] = spec;
+}
+
+static inline struct qpnp_pin_spec *qpnp_pmic_pin_get_spec(
+ struct qpnp_pin_chip *q_chip,
+ uint32_t pmic_pin)
+{
+ if (pmic_pin < q_chip->pmic_pin_lowest ||
+ pmic_pin > q_chip->pmic_pin_highest)
+ return NULL;
+
+ return q_chip->pmic_pins[pmic_pin - q_chip->pmic_pin_lowest];
+}
+
+static inline struct qpnp_pin_spec *qpnp_chip_gpio_get_spec(
+ struct qpnp_pin_chip *q_chip,
+ uint32_t chip_gpio)
+{
+ if (chip_gpio > q_chip->gpio_chip.ngpio)
+ return NULL;
+
+ return q_chip->chip_gpios[chip_gpio];
+}
+
+static inline void qpnp_chip_gpio_set_spec(struct qpnp_pin_chip *q_chip,
+ uint32_t chip_gpio,
+ struct qpnp_pin_spec *spec)
+{
+ q_chip->chip_gpios[chip_gpio] = spec;
+}
+
+/*
+ * Determines whether a specified param's configuration is correct.
+ * This check is two tier. First a check is done whether the hardware
+ * supports this param and value requested. The second check validates
+ * that the configuration is correct, given the fact that the hardware
+ * supports it.
+ *
+ * Returns
+ * -ENXIO is the hardware does not support this param.
+ * -EINVAL if the the hardware does support this param, but the
+ * requested value is outside the supported range.
+ */
+static int qpnp_pin_check_config(enum qpnp_pin_param_type idx,
+ struct qpnp_pin_spec *q_spec, uint32_t val)
+{
+ switch (idx) {
+ case Q_PIN_CFG_MODE:
+ if (val >= QPNP_PIN_MODE_INVALID)
+ return -EINVAL;
+ break;
+ case Q_PIN_CFG_OUTPUT_TYPE:
+ if (q_spec->type != Q_GPIO_TYPE)
+ return -ENXIO;
+ if ((val == QPNP_PIN_OUT_BUF_OPEN_DRAIN_NMOS ||
+ val == QPNP_PIN_OUT_BUF_OPEN_DRAIN_PMOS) &&
+ (q_spec->subtype == Q_GPIO_SUBTYPE_GPIOC_4CH ||
+ (q_spec->subtype == Q_GPIO_SUBTYPE_GPIOC_8CH)))
+ return -EINVAL;
+ else if (val >= QPNP_PIN_OUT_BUF_INVALID)
+ return -EINVAL;
+ break;
+ case Q_PIN_CFG_INVERT:
+ if (val >= QPNP_PIN_INVERT_INVALID)
+ return -EINVAL;
+ break;
+ case Q_PIN_CFG_PULL:
+ if (q_spec->type == Q_GPIO_TYPE &&
+ val >= QPNP_PIN_GPIO_PULL_INVALID)
+ return -EINVAL;
+ if (q_spec->type == Q_MPP_TYPE &&
+ val >= QPNP_PIN_MPP_PULL_INVALID)
+ return -EINVAL;
+ break;
+ case Q_PIN_CFG_VIN_SEL:
+ if (val >= QPNP_PIN_VIN_8CH_INVALID)
+ return -EINVAL;
+ else if (val >= QPNP_PIN_VIN_4CH_INVALID) {
+ if (q_spec->type == Q_GPIO_TYPE &&
+ (q_spec->subtype == Q_GPIO_SUBTYPE_GPIO_4CH ||
+ q_spec->subtype == Q_GPIO_SUBTYPE_GPIOC_4CH))
+ return -EINVAL;
+ if (q_spec->type == Q_MPP_TYPE &&
+ (q_spec->subtype == Q_MPP_SUBTYPE_4CH_NO_ANA_OUT ||
+ q_spec->subtype == Q_MPP_SUBTYPE_4CH_NO_SINK ||
+ q_spec->subtype == Q_MPP_SUBTYPE_4CH_FULL_FUNC))
+ return -EINVAL;
+ }
+ break;
+ case Q_PIN_CFG_OUT_STRENGTH:
+ if (q_spec->type != Q_GPIO_TYPE)
+ return -ENXIO;
+ if (val >= QPNP_PIN_OUT_STRENGTH_INVALID ||
+ val == 0)
+ return -EINVAL;
+ break;
+ case Q_PIN_CFG_SELECT:
+ if (q_spec->type == Q_MPP_TYPE &&
+ (val == QPNP_PIN_SEL_FUNC_1 ||
+ val == QPNP_PIN_SEL_FUNC_2))
+ return -EINVAL;
+ if (val >= QPNP_PIN_SRC_INVALID)
+ return -EINVAL;
+ break;
+ case Q_PIN_CFG_MASTER_EN:
+ if (val >= QPNP_PIN_MASTER_INVALID)
+ return -EINVAL;
+ break;
+ case Q_PIN_CFG_AOUT_REF:
+ if (q_spec->type != Q_MPP_TYPE)
+ return -ENXIO;
+ if (q_spec->subtype == Q_MPP_SUBTYPE_4CH_NO_ANA_OUT)
+ return -ENXIO;
+ if (val >= QPNP_PIN_AOUT_REF_INVALID)
+ return -EINVAL;
+ break;
+ case Q_PIN_CFG_AIN_ROUTE:
+ if (q_spec->type != Q_MPP_TYPE)
+ return -ENXIO;
+ if (val >= QPNP_PIN_AIN_ROUTE_INVALID)
+ return -EINVAL;
+ break;
+ case Q_PIN_CFG_CS_OUT:
+ if (q_spec->type != Q_MPP_TYPE)
+ return -ENXIO;
+ if (q_spec->subtype == Q_MPP_SUBTYPE_4CH_NO_SINK)
+ return -ENXIO;
+ if (val >= QPNP_PIN_CS_OUT_INVALID)
+ return -EINVAL;
+ break;
+
+ default:
+ pr_err("invalid param type %u specified\n", idx);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+#define Q_CHK_INVALID(idx, q_spec, val) \
+ (qpnp_pin_check_config(idx, q_spec, val) == -EINVAL)
+
+static int qpnp_pin_check_constraints(struct qpnp_pin_spec *q_spec,
+ struct qpnp_pin_cfg *param)
+{
+ int pin = q_spec->pmic_pin;
+ const char *name;
+
+ name = (q_spec->type == Q_GPIO_TYPE) ? "gpio" : "mpp";
+
+ if (Q_CHK_INVALID(Q_PIN_CFG_MODE, q_spec, param->mode))
+ pr_err("invalid direction for %s %d\n", name, pin);
+ else if (Q_CHK_INVALID(Q_PIN_CFG_INVERT, q_spec, param->invert))
+ pr_err("invalid invert polarity for %s %d\n", name, pin);
+ else if (Q_CHK_INVALID(Q_PIN_CFG_SELECT, q_spec, param->select))
+ pr_err("invalid source select for %s %d\n", name, pin);
+ else if (Q_CHK_INVALID(Q_PIN_CFG_OUT_STRENGTH,
+ q_spec, param->out_strength))
+ pr_err("invalid out strength for %s %d\n", name, pin);
+ else if (Q_CHK_INVALID(Q_PIN_CFG_OUTPUT_TYPE,
+ q_spec, param->output_type))
+ pr_err("invalid out type for %s %d\n", name, pin);
+ else if (Q_CHK_INVALID(Q_PIN_CFG_VIN_SEL, q_spec, param->vin_sel))
+ pr_err("invalid vin select value for %s %d\n", name, pin);
+ else if (Q_CHK_INVALID(Q_PIN_CFG_PULL, q_spec, param->pull))
+ pr_err("invalid pull value for pin %s %d\n", name, pin);
+ else if (Q_CHK_INVALID(Q_PIN_CFG_MASTER_EN, q_spec, param->master_en))
+ pr_err("invalid master_en value for %s %d\n", name, pin);
+ else if (Q_CHK_INVALID(Q_PIN_CFG_AOUT_REF, q_spec, param->aout_ref))
+ pr_err("invalid aout_reg value for %s %d\n", name, pin);
+ else if (Q_CHK_INVALID(Q_PIN_CFG_AIN_ROUTE, q_spec, param->ain_route))
+ pr_err("invalid ain_route value for %s %d\n", name, pin);
+ else if (Q_CHK_INVALID(Q_PIN_CFG_CS_OUT, q_spec, param->cs_out))
+ pr_err("invalid cs_out value for %s %d\n", name, pin);
+ else
+ return 0;
+
+ return -EINVAL;
+}
+
+static inline u8 q_reg_get(u8 *reg, int shift, int mask)
+{
+ return (*reg & mask) >> shift;
+}
+
+static inline void q_reg_set(u8 *reg, int shift, int mask, int value)
+{
+ *reg |= (value << shift) & mask;
+}
+
+static inline void q_reg_clr_set(u8 *reg, int shift, int mask, int value)
+{
+ *reg &= ~mask;
+ *reg |= (value << shift) & mask;
+}
+
+/*
+ * Calculate the minimum number of registers that must be read / written
+ * in order to satisfy the full feature set of the given pin.
+ */
+static int qpnp_pin_ctl_regs_init(struct qpnp_pin_spec *q_spec)
+{
+ if (q_spec->type == Q_GPIO_TYPE)
+ q_spec->num_ctl_regs = 7;
+ else if (q_spec->type == Q_MPP_TYPE)
+ switch (q_spec->subtype) {
+ case Q_MPP_SUBTYPE_4CH_NO_SINK:
+ q_spec->num_ctl_regs = 12;
+ break;
+ case Q_MPP_SUBTYPE_4CH_NO_ANA_OUT:
+ case Q_MPP_SUBTYPE_4CH_FULL_FUNC:
+ case Q_MPP_SUBTYPE_8CH_FULL_FUNC:
+ q_spec->num_ctl_regs = 13;
+ break;
+ default:
+ pr_err("Invalid MPP subtype 0x%x\n", q_spec->subtype);
+ return -EINVAL;
+ }
+ else {
+ pr_err("Invalid type 0x%x\n", q_spec->type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int qpnp_pin_read_regs(struct qpnp_pin_chip *q_chip,
+ struct qpnp_pin_spec *q_spec, u16 addr, u8 *buf)
+{
+ int bytes_left = q_spec->num_ctl_regs;
+ int rc;
+ char *reg_p = &q_spec->regs[0];
+
+ while (bytes_left > 0) {
+ rc = spmi_ext_register_readl(q_chip->spmi->ctrl, q_spec->slave,
+ Q_REG_ADDR(q_spec, Q_REG_MODE_CTL),
+ reg_p, bytes_left < 8 ? bytes_left : 8);
+ if (rc)
+ return rc;
+ bytes_left -= 8;
+ reg_p += 8;
+ }
+ return 0;
+}
+
+static int qpnp_pin_write_regs(struct qpnp_pin_chip *q_chip,
+ struct qpnp_pin_spec *q_spec, u16 addr, u8 *buf)
+{
+ int bytes_left = q_spec->num_ctl_regs;
+ int rc;
+ char *reg_p = &q_spec->regs[0];
+
+ while (bytes_left > 0) {
+ rc = spmi_ext_register_writel(q_chip->spmi->ctrl, q_spec->slave,
+ Q_REG_ADDR(q_spec, Q_REG_MODE_CTL),
+ reg_p, bytes_left < 8 ? bytes_left : 8);
+ if (rc)
+ return rc;
+ bytes_left -= 8;
+ reg_p += 8;
+ }
+ return 0;
+}
+
+static int qpnp_pin_cache_regs(struct qpnp_pin_chip *q_chip,
+ struct qpnp_pin_spec *q_spec)
+{
+ int rc;
+ struct device *dev = &q_chip->spmi->dev;
+
+ rc = qpnp_pin_read_regs(q_chip, q_spec,
+ Q_REG_ADDR(q_spec, Q_REG_MODE_CTL),
+ &q_spec->regs[Q_REG_I_MODE_CTL]);
+ if (rc)
+ dev_err(dev, "%s: unable to read control regs\n", __func__);
+
+ return rc;
+}
+
+#define Q_HAVE_HW_SP(idx, q_spec, val) \
+ (qpnp_pin_check_config(idx, q_spec, val) == 0)
+
+static int _qpnp_pin_config(struct qpnp_pin_chip *q_chip,
+ struct qpnp_pin_spec *q_spec,
+ struct qpnp_pin_cfg *param)
+{
+ struct device *dev = &q_chip->spmi->dev;
+ int rc;
+
+ rc = qpnp_pin_check_constraints(q_spec, param);
+ if (rc)
+ goto gpio_cfg;
+
+ /* set mode */
+ if (Q_HAVE_HW_SP(Q_PIN_CFG_MODE, q_spec, param->mode))
+ q_reg_clr_set(&q_spec->regs[Q_REG_I_MODE_CTL],
+ Q_REG_MODE_SEL_SHIFT, Q_REG_MODE_SEL_MASK,
+ param->mode);
+
+ /* output specific configuration */
+ if (Q_HAVE_HW_SP(Q_PIN_CFG_INVERT, q_spec, param->invert))
+ q_reg_clr_set(&q_spec->regs[Q_REG_I_MODE_CTL],
+ Q_REG_OUT_INVERT_SHIFT, Q_REG_OUT_INVERT_MASK,
+ param->invert);
+ if (Q_HAVE_HW_SP(Q_PIN_CFG_SELECT, q_spec, param->select))
+ q_reg_clr_set(&q_spec->regs[Q_REG_I_MODE_CTL],
+ Q_REG_SRC_SEL_SHIFT, Q_REG_SRC_SEL_MASK,
+ param->select);
+ if (Q_HAVE_HW_SP(Q_PIN_CFG_OUT_STRENGTH, q_spec, param->out_strength))
+ q_reg_clr_set(&q_spec->regs[Q_REG_I_DIG_OUT_CTL],
+ Q_REG_OUT_STRENGTH_SHIFT, Q_REG_OUT_STRENGTH_MASK,
+ param->out_strength);
+ if (Q_HAVE_HW_SP(Q_PIN_CFG_OUTPUT_TYPE, q_spec, param->output_type))
+ q_reg_clr_set(&q_spec->regs[Q_REG_I_DIG_OUT_CTL],
+ Q_REG_OUT_TYPE_SHIFT, Q_REG_OUT_TYPE_MASK,
+ param->output_type);
+
+ /* config applicable for both input / output */
+ if (Q_HAVE_HW_SP(Q_PIN_CFG_VIN_SEL, q_spec, param->vin_sel))
+ q_reg_clr_set(&q_spec->regs[Q_REG_I_DIG_VIN_CTL],
+ Q_REG_VIN_SHIFT, Q_REG_VIN_MASK,
+ param->vin_sel);
+ if (Q_HAVE_HW_SP(Q_PIN_CFG_PULL, q_spec, param->pull))
+ q_reg_clr_set(&q_spec->regs[Q_REG_I_DIG_PULL_CTL],
+ Q_REG_PULL_SHIFT, Q_REG_PULL_MASK,
+ param->pull);
+ if (Q_HAVE_HW_SP(Q_PIN_CFG_MASTER_EN, q_spec, param->master_en))
+ q_reg_clr_set(&q_spec->regs[Q_REG_I_EN_CTL],
+ Q_REG_MASTER_EN_SHIFT, Q_REG_MASTER_EN_MASK,
+ param->master_en);
+
+ /* mpp specific config */
+ if (Q_HAVE_HW_SP(Q_PIN_CFG_AOUT_REF, q_spec, param->aout_ref))
+ q_reg_clr_set(&q_spec->regs[Q_REG_I_AOUT_CTL],
+ Q_REG_AOUT_REF_SHIFT, Q_REG_AOUT_REF_MASK,
+ param->aout_ref);
+ if (Q_HAVE_HW_SP(Q_PIN_CFG_AIN_ROUTE, q_spec, param->ain_route))
+ q_reg_clr_set(&q_spec->regs[Q_REG_I_AIN_CTL],
+ Q_REG_AIN_ROUTE_SHIFT, Q_REG_AIN_ROUTE_MASK,
+ param->ain_route);
+ if (Q_HAVE_HW_SP(Q_PIN_CFG_CS_OUT, q_spec, param->cs_out))
+ q_reg_clr_set(&q_spec->regs[Q_REG_I_SINK_CTL],
+ Q_REG_CS_OUT_SHIFT, Q_REG_CS_OUT_MASK,
+ param->cs_out);
+
+ rc = qpnp_pin_write_regs(q_chip, q_spec,
+ Q_REG_ADDR(q_spec, Q_REG_MODE_CTL),
+ &q_spec->regs[Q_REG_I_MODE_CTL]);
+ if (rc) {
+ dev_err(&q_chip->spmi->dev, "%s: unable to write master enable\n",
+ __func__);
+ goto gpio_cfg;
+ }
+
+ return 0;
+
+gpio_cfg:
+ dev_err(dev, "%s: unable to set default config for pmic gpio %d\n",
+ __func__, q_spec->pmic_pin);
+
+ return rc;
+}
+
+int qpnp_pin_config(int gpio, struct qpnp_pin_cfg *param)
+{
+ int rc, chip_offset;
+ struct qpnp_pin_chip *q_chip;
+ struct qpnp_pin_spec *q_spec = NULL;
+ struct gpio_chip *gpio_chip;
+
+ if (param == NULL)
+ return -EINVAL;
+
+ mutex_lock(&qpnp_pin_chips_lock);
+ list_for_each_entry(q_chip, &qpnp_pin_chips, chip_list) {
+ gpio_chip = &q_chip->gpio_chip;
+ if (gpio >= gpio_chip->base
+ && gpio < gpio_chip->base + gpio_chip->ngpio) {
+ chip_offset = gpio - gpio_chip->base;
+ q_spec = qpnp_chip_gpio_get_spec(q_chip, chip_offset);
+ if (WARN_ON(!q_spec)) {
+ mutex_unlock(&qpnp_pin_chips_lock);
+ return -ENODEV;
+ }
+ break;
+ }
+ }
+ mutex_unlock(&qpnp_pin_chips_lock);
+
+ rc = _qpnp_pin_config(q_chip, q_spec, param);
+
+ return rc;
+}
+EXPORT_SYMBOL(qpnp_pin_config);
+
+#define Q_MAX_CHIP_NAME 128
+int qpnp_pin_map(const char *name, uint32_t pmic_pin)
+{
+ struct qpnp_pin_chip *q_chip;
+ struct qpnp_pin_spec *q_spec = NULL;
+
+ mutex_lock(&qpnp_pin_chips_lock);
+ list_for_each_entry(q_chip, &qpnp_pin_chips, chip_list) {
+ if (strncmp(q_chip->gpio_chip.label, name,
+ Q_MAX_CHIP_NAME) != 0)
+ continue;
+ if (q_chip->pmic_pin_lowest <= pmic_pin &&
+ q_chip->pmic_pin_highest >= pmic_pin) {
+ q_spec = qpnp_pmic_pin_get_spec(q_chip, pmic_pin);
+ mutex_unlock(&qpnp_pin_chips_lock);
+ if (WARN_ON(!q_spec))
+ return -ENODEV;
+ return q_chip->gpio_chip.base + q_spec->gpio_chip_idx;
+ }
+ }
+ mutex_unlock(&qpnp_pin_chips_lock);
+ return -EINVAL;
+}
+EXPORT_SYMBOL(qpnp_pin_map);
+
+static int qpnp_pin_to_irq(struct gpio_chip *gpio_chip, unsigned offset)
+{
+ struct qpnp_pin_chip *q_chip = dev_get_drvdata(gpio_chip->dev);
+ struct qpnp_pin_spec *q_spec;
+
+ q_spec = qpnp_chip_gpio_get_spec(q_chip, offset);
+ if (!q_spec)
+ return -EINVAL;
+
+ return q_spec->irq;
+}
+
+static int qpnp_pin_get(struct gpio_chip *gpio_chip, unsigned offset)
+{
+ int rc, ret_val;
+ struct qpnp_pin_chip *q_chip = dev_get_drvdata(gpio_chip->dev);
+ struct qpnp_pin_spec *q_spec = NULL;
+ u8 buf[1];
+
+ if (WARN_ON(!q_chip))
+ return -ENODEV;
+
+ q_spec = qpnp_chip_gpio_get_spec(q_chip, offset);
+ if (WARN_ON(!q_spec))
+ return -ENODEV;
+
+ /* gpio val is from RT status iff input is enabled */
+ if ((q_spec->regs[Q_REG_I_MODE_CTL] & Q_REG_MODE_SEL_MASK)
+ == QPNP_PIN_MODE_DIG_IN) {
+ /* INT_RT_STS */
+ rc = spmi_ext_register_readl(q_chip->spmi->ctrl, q_spec->slave,
+ Q_REG_ADDR(q_spec, Q_REG_STATUS1),
+ &buf[0], 1);
+ return buf[0];
+
+ } else {
+ ret_val = (q_spec->regs[Q_REG_I_MODE_CTL] &
+ Q_REG_OUT_INVERT_MASK) >> Q_REG_OUT_INVERT_SHIFT;
+ return ret_val;
+ }
+
+ return 0;
+}
+
+static int __qpnp_pin_set(struct qpnp_pin_chip *q_chip,
+ struct qpnp_pin_spec *q_spec, int value)
+{
+ int rc;
+
+ if (!q_chip || !q_spec)
+ return -EINVAL;
+
+ if (value)
+ q_reg_clr_set(&q_spec->regs[Q_REG_I_MODE_CTL],
+ Q_REG_OUT_INVERT_SHIFT, Q_REG_OUT_INVERT_MASK, 1);
+ else
+ q_reg_clr_set(&q_spec->regs[Q_REG_I_MODE_CTL],
+ Q_REG_OUT_INVERT_SHIFT, Q_REG_OUT_INVERT_MASK, 0);
+
+ rc = spmi_ext_register_writel(q_chip->spmi->ctrl, q_spec->slave,
+ Q_REG_ADDR(q_spec, Q_REG_I_MODE_CTL),
+ &q_spec->regs[Q_REG_I_MODE_CTL], 1);
+ if (rc)
+ dev_err(&q_chip->spmi->dev, "%s: spmi write failed\n",
+ __func__);
+ return rc;
+}
+
+
+static void qpnp_pin_set(struct gpio_chip *gpio_chip,
+ unsigned offset, int value)
+{
+ struct qpnp_pin_chip *q_chip = dev_get_drvdata(gpio_chip->dev);
+ struct qpnp_pin_spec *q_spec;
+
+ if (WARN_ON(!q_chip))
+ return;
+
+ q_spec = qpnp_chip_gpio_get_spec(q_chip, offset);
+ if (WARN_ON(!q_spec))
+ return;
+
+ __qpnp_pin_set(q_chip, q_spec, value);
+}
+
+static int qpnp_pin_set_mode(struct qpnp_pin_chip *q_chip,
+ struct qpnp_pin_spec *q_spec, int mode)
+{
+ int rc;
+
+ if (!q_chip || !q_spec)
+ return -EINVAL;
+
+ if (mode >= QPNP_PIN_MODE_INVALID) {
+ pr_err("invalid mode specification %d\n", mode);
+ return -EINVAL;
+ }
+
+ q_reg_clr_set(&q_spec->regs[Q_REG_I_MODE_CTL],
+ Q_REG_MODE_SEL_SHIFT,
+ Q_REG_MODE_SEL_MASK,
+ mode);
+
+ rc = spmi_ext_register_writel(q_chip->spmi->ctrl, q_spec->slave,
+ Q_REG_ADDR(q_spec, Q_REG_I_MODE_CTL),
+ &q_spec->regs[Q_REG_I_MODE_CTL], 1);
+ return rc;
+}
+
+static int qpnp_pin_direction_input(struct gpio_chip *gpio_chip,
+ unsigned offset)
+{
+ struct qpnp_pin_chip *q_chip = dev_get_drvdata(gpio_chip->dev);
+ struct qpnp_pin_spec *q_spec;
+
+ if (WARN_ON(!q_chip))
+ return -ENODEV;
+
+ q_spec = qpnp_chip_gpio_get_spec(q_chip, offset);
+ if (WARN_ON(!q_spec))
+ return -ENODEV;
+
+ return qpnp_pin_set_mode(q_chip, q_spec, QPNP_PIN_MODE_DIG_IN);
+}
+
+static int qpnp_pin_direction_output(struct gpio_chip *gpio_chip,
+ unsigned offset,
+ int val)
+{
+ int rc;
+ struct qpnp_pin_chip *q_chip = dev_get_drvdata(gpio_chip->dev);
+ struct qpnp_pin_spec *q_spec;
+
+ if (WARN_ON(!q_chip))
+ return -ENODEV;
+
+ q_spec = qpnp_chip_gpio_get_spec(q_chip, offset);
+ if (WARN_ON(!q_spec))
+ return -ENODEV;
+
+ rc = __qpnp_pin_set(q_chip, q_spec, val);
+ if (rc)
+ return rc;
+
+ rc = qpnp_pin_set_mode(q_chip, q_spec, QPNP_PIN_MODE_DIG_OUT);
+
+ return rc;
+}
+
+static int qpnp_pin_of_gpio_xlate(struct gpio_chip *gpio_chip,
+ const struct of_phandle_args *gpio_spec,
+ u32 *flags)
+{
+ struct qpnp_pin_chip *q_chip = dev_get_drvdata(gpio_chip->dev);
+ struct qpnp_pin_spec *q_spec;
+
+ if (WARN_ON(gpio_chip->of_gpio_n_cells < 2)) {
+ pr_err("of_gpio_n_cells < 2\n");
+ return -EINVAL;
+ }
+
+ q_spec = qpnp_pmic_pin_get_spec(q_chip, gpio_spec->args[0]);
+ if (!q_spec) {
+ pr_err("no such PMIC gpio %u in device topology\n",
+ gpio_spec->args[0]);
+ return -EINVAL;
+ }
+
+ if (flags)
+ *flags = gpio_spec->args[1];
+
+ return q_spec->gpio_chip_idx;
+}
+
+static int qpnp_pin_apply_config(struct qpnp_pin_chip *q_chip,
+ struct qpnp_pin_spec *q_spec)
+{
+ struct qpnp_pin_cfg param;
+ struct device_node *node = q_spec->node;
+ int rc;
+
+ param.mode = q_reg_get(&q_spec->regs[Q_REG_I_MODE_CTL],
+ Q_REG_MODE_SEL_SHIFT,
+ Q_REG_MODE_SEL_MASK);
+ param.output_type = q_reg_get(&q_spec->regs[Q_REG_I_DIG_OUT_CTL],
+ Q_REG_OUT_TYPE_SHIFT,
+ Q_REG_OUT_TYPE_MASK);
+ param.invert = q_reg_get(&q_spec->regs[Q_REG_I_MODE_CTL],
+ Q_REG_OUT_INVERT_MASK,
+ Q_REG_OUT_INVERT_MASK);
+ param.pull = q_reg_get(&q_spec->regs[Q_REG_I_MODE_CTL],
+ Q_REG_PULL_SHIFT, Q_REG_PULL_MASK);
+ param.vin_sel = q_reg_get(&q_spec->regs[Q_REG_I_DIG_VIN_CTL],
+ Q_REG_VIN_SHIFT, Q_REG_VIN_MASK);
+ param.out_strength = q_reg_get(&q_spec->regs[Q_REG_I_DIG_OUT_CTL],
+ Q_REG_OUT_STRENGTH_SHIFT,
+ Q_REG_OUT_STRENGTH_MASK);
+ param.select = q_reg_get(&q_spec->regs[Q_REG_I_MODE_CTL],
+ Q_REG_SRC_SEL_SHIFT, Q_REG_SRC_SEL_MASK);
+ param.master_en = q_reg_get(&q_spec->regs[Q_REG_I_EN_CTL],
+ Q_REG_MASTER_EN_SHIFT,
+ Q_REG_MASTER_EN_MASK);
+ param.aout_ref = q_reg_get(&q_spec->regs[Q_REG_I_AOUT_CTL],
+ Q_REG_AOUT_REF_SHIFT,
+ Q_REG_AOUT_REF_MASK);
+ param.ain_route = q_reg_get(&q_spec->regs[Q_REG_I_AIN_CTL],
+ Q_REG_AIN_ROUTE_SHIFT,
+ Q_REG_AIN_ROUTE_MASK);
+ param.cs_out = q_reg_get(&q_spec->regs[Q_REG_I_SINK_CTL],
+ Q_REG_CS_OUT_SHIFT,
+ Q_REG_CS_OUT_MASK);
+
+ of_property_read_u32(node, "qcom,mode",
+ ¶m.mode);
+ of_property_read_u32(node, "qcom,output-type",
+ ¶m.output_type);
+ of_property_read_u32(node, "qcom,invert",
+ ¶m.invert);
+ of_property_read_u32(node, "qcom,pull",
+ ¶m.pull);
+ of_property_read_u32(node, "qcom,vin-sel",
+ ¶m.vin_sel);
+ of_property_read_u32(node, "qcom,out-strength",
+ ¶m.out_strength);
+ of_property_read_u32(node, "qcom,src-select",
+ ¶m.select);
+ of_property_read_u32(node, "qcom,master-en",
+ ¶m.master_en);
+ of_property_read_u32(node, "qcom,aout-ref",
+ ¶m.aout_ref);
+ of_property_read_u32(node, "qcom,ain-route",
+ ¶m.ain_route);
+ of_property_read_u32(node, "qcom,cs-out",
+ ¶m.cs_out);
+ rc = _qpnp_pin_config(q_chip, q_spec, ¶m);
+
+ return rc;
+}
+
+static int qpnp_pin_free_chip(struct qpnp_pin_chip *q_chip)
+{
+ struct spmi_device *spmi = q_chip->spmi;
+ int rc, i;
+
+ if (q_chip->chip_gpios)
+ for (i = 0; i < spmi->num_dev_node; i++)
+ kfree(q_chip->chip_gpios[i]);
+
+ mutex_lock(&qpnp_pin_chips_lock);
+ list_del(&q_chip->chip_list);
+ mutex_unlock(&qpnp_pin_chips_lock);
+ rc = gpiochip_remove(&q_chip->gpio_chip);
+ if (rc)
+ dev_err(&q_chip->spmi->dev, "%s: unable to remove gpio\n",
+ __func__);
+ kfree(q_chip->chip_gpios);
+ kfree(q_chip->pmic_pins);
+ kfree(q_chip);
+ return rc;
+}
+
+#ifdef CONFIG_GPIO_QPNP_PIN_DEBUG
+struct qpnp_pin_reg {
+ uint32_t addr;
+ uint32_t idx;
+ uint32_t shift;
+ uint32_t mask;
+};
+
+static struct dentry *driver_dfs_dir;
+
+static int qpnp_pin_reg_attr(enum qpnp_pin_param_type type,
+ struct qpnp_pin_reg *cfg)
+{
+ switch (type) {
+ case Q_PIN_CFG_MODE:
+ cfg->addr = Q_REG_MODE_CTL;
+ cfg->idx = Q_REG_I_MODE_CTL;
+ cfg->shift = Q_REG_MODE_SEL_SHIFT;
+ cfg->mask = Q_REG_MODE_SEL_MASK;
+ break;
+ case Q_PIN_CFG_OUTPUT_TYPE:
+ cfg->addr = Q_REG_DIG_OUT_CTL;
+ cfg->idx = Q_REG_I_DIG_OUT_CTL;
+ cfg->shift = Q_REG_OUT_TYPE_SHIFT;
+ cfg->mask = Q_REG_OUT_TYPE_MASK;
+ break;
+ case Q_PIN_CFG_INVERT:
+ cfg->addr = Q_REG_MODE_CTL;
+ cfg->idx = Q_REG_I_MODE_CTL;
+ cfg->shift = Q_REG_OUT_INVERT_SHIFT;
+ cfg->mask = Q_REG_OUT_INVERT_MASK;
+ break;
+ case Q_PIN_CFG_PULL:
+ cfg->addr = Q_REG_DIG_PULL_CTL;
+ cfg->idx = Q_REG_I_DIG_PULL_CTL;
+ cfg->shift = Q_REG_PULL_SHIFT;
+ cfg->mask = Q_REG_PULL_MASK;
+ break;
+ case Q_PIN_CFG_VIN_SEL:
+ cfg->addr = Q_REG_DIG_VIN_CTL;
+ cfg->idx = Q_REG_I_DIG_VIN_CTL;
+ cfg->shift = Q_REG_VIN_SHIFT;
+ cfg->mask = Q_REG_VIN_MASK;
+ break;
+ case Q_PIN_CFG_OUT_STRENGTH:
+ cfg->addr = Q_REG_DIG_OUT_CTL;
+ cfg->idx = Q_REG_I_DIG_OUT_CTL;
+ cfg->shift = Q_REG_OUT_STRENGTH_SHIFT;
+ cfg->mask = Q_REG_OUT_STRENGTH_MASK;
+ break;
+ case Q_PIN_CFG_SELECT:
+ cfg->addr = Q_REG_MODE_CTL;
+ cfg->idx = Q_REG_I_MODE_CTL;
+ cfg->shift = Q_REG_SRC_SEL_SHIFT;
+ cfg->mask = Q_REG_SRC_SEL_MASK;
+ break;
+ case Q_PIN_CFG_MASTER_EN:
+ cfg->addr = Q_REG_EN_CTL;
+ cfg->idx = Q_REG_I_EN_CTL;
+ cfg->shift = Q_REG_MASTER_EN_SHIFT;
+ cfg->mask = Q_REG_MASTER_EN_MASK;
+ break;
+ case Q_PIN_CFG_AOUT_REF:
+ cfg->addr = Q_REG_AOUT_CTL;
+ cfg->idx = Q_REG_I_AOUT_CTL;
+ cfg->shift = Q_REG_AOUT_REF_SHIFT;
+ cfg->mask = Q_REG_AOUT_REF_MASK;
+ break;
+ case Q_PIN_CFG_AIN_ROUTE:
+ cfg->addr = Q_REG_AIN_CTL;
+ cfg->idx = Q_REG_I_AIN_CTL;
+ cfg->shift = Q_REG_AIN_ROUTE_SHIFT;
+ cfg->mask = Q_REG_AIN_ROUTE_MASK;
+ break;
+ case Q_PIN_CFG_CS_OUT:
+ cfg->addr = Q_REG_SINK_CTL;
+ cfg->idx = Q_REG_I_SINK_CTL;
+ cfg->shift = Q_REG_CS_OUT_SHIFT;
+ cfg->mask = Q_REG_CS_OUT_MASK;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int qpnp_pin_debugfs_get(void *data, u64 *val)
+{
+ enum qpnp_pin_param_type *idx = data;
+ struct qpnp_pin_spec *q_spec;
+ struct qpnp_pin_reg cfg = {};
+ int rc;
+
+ rc = qpnp_pin_reg_attr(*idx, &cfg);
+ if (rc)
+ return rc;
+ q_spec = container_of(idx, struct qpnp_pin_spec, params[*idx]);
+ *val = q_reg_get(&q_spec->regs[cfg.idx], cfg.shift, cfg.mask);
+ return 0;
+}
+
+static int qpnp_pin_debugfs_set(void *data, u64 val)
+{
+ enum qpnp_pin_param_type *idx = data;
+ struct qpnp_pin_spec *q_spec;
+ struct qpnp_pin_chip *q_chip;
+ struct qpnp_pin_reg cfg = {};
+ int rc;
+
+ q_spec = container_of(idx, struct qpnp_pin_spec, params[*idx]);
+ q_chip = q_spec->q_chip;
+
+ rc = qpnp_pin_check_config(*idx, q_spec, val);
+ if (rc)
+ return rc;
+
+ rc = qpnp_pin_reg_attr(*idx, &cfg);
+ if (rc)
+ return rc;
+ q_reg_clr_set(&q_spec->regs[cfg.idx], cfg.shift, cfg.mask, val);
+ rc = spmi_ext_register_writel(q_chip->spmi->ctrl, q_spec->slave,
+ Q_REG_ADDR(q_spec, cfg.addr),
+ &q_spec->regs[cfg.idx], 1);
+
+ return rc;
+}
+DEFINE_SIMPLE_ATTRIBUTE(qpnp_pin_fops, qpnp_pin_debugfs_get,
+ qpnp_pin_debugfs_set, "%llu\n");
+
+#define DEBUGFS_BUF_SIZE 11 /* supports 2^32 in decimal */
+
+struct qpnp_pin_debugfs_args {
+ enum qpnp_pin_param_type type;
+ const char *filename;
+};
+
+static struct qpnp_pin_debugfs_args dfs_args[] = {
+ { Q_PIN_CFG_MODE, "mode" },
+ { Q_PIN_CFG_OUTPUT_TYPE, "output_type" },
+ { Q_PIN_CFG_INVERT, "invert" },
+ { Q_PIN_CFG_PULL, "pull" },
+ { Q_PIN_CFG_VIN_SEL, "vin_sel" },
+ { Q_PIN_CFG_OUT_STRENGTH, "out_strength" },
+ { Q_PIN_CFG_SELECT, "select" },
+ { Q_PIN_CFG_MASTER_EN, "master_en" },
+ { Q_PIN_CFG_AOUT_REF, "aout_ref" },
+ { Q_PIN_CFG_AIN_ROUTE, "ain_route" },
+ { Q_PIN_CFG_CS_OUT, "cs_out" },
+};
+
+static int qpnp_pin_debugfs_create(struct qpnp_pin_chip *q_chip)
+{
+ struct spmi_device *spmi = q_chip->spmi;
+ struct device *dev = &spmi->dev;
+ struct qpnp_pin_spec *q_spec;
+ enum qpnp_pin_param_type *params;
+ enum qpnp_pin_param_type type;
+ char pmic_pin[DEBUGFS_BUF_SIZE];
+ const char *filename;
+ struct dentry *dfs, *dfs_io_dir;
+ int i, j, rc;
+
+ BUG_ON(Q_NUM_PARAMS != ARRAY_SIZE(dfs_args));
+
+ q_chip->dfs_dir = debugfs_create_dir(q_chip->gpio_chip.label,
+ driver_dfs_dir);
+ if (q_chip->dfs_dir == NULL) {
+ dev_err(dev, "%s: cannot register chip debugfs directory %s\n",
+ __func__, dev->of_node->name);
+ return -ENODEV;
+ }
+
+ for (i = 0; i < spmi->num_dev_node; i++) {
+ q_spec = qpnp_chip_gpio_get_spec(q_chip, i);
+ params = q_spec->params;
+ snprintf(pmic_pin, DEBUGFS_BUF_SIZE, "%u", q_spec->pmic_pin);
+ dfs_io_dir = debugfs_create_dir(pmic_pin, q_chip->dfs_dir);
+ if (dfs_io_dir == NULL)
+ goto dfs_err;
+
+ for (j = 0; j < Q_NUM_PARAMS; j++) {
+ type = dfs_args[j].type;
+ filename = dfs_args[j].filename;
+
+ /*
+ * Use a value of '0' to see if the pin has even basic
+ * support for a function. Do not create a file if
+ * it doesn't.
+ */
+ rc = qpnp_pin_check_config(type, q_spec, 0);
+ if (rc == -ENXIO)
+ continue;
+
+ params[type] = type;
+ dfs = debugfs_create_file(
+ filename,
+ S_IRUGO | S_IWUSR,
+ dfs_io_dir,
+ &q_spec->params[type],
+ &qpnp_pin_fops);
+ if (dfs == NULL)
+ goto dfs_err;
+ }
+ }
+ return 0;
+dfs_err:
+ dev_err(dev, "%s: cannot register debugfs for pmic gpio %u on chip %s\n",
+ __func__, q_spec->pmic_pin, dev->of_node->name);
+ debugfs_remove_recursive(q_chip->dfs_dir);
+ return -ENFILE;
+}
+#else
+static int qpnp_pin_debugfs_create(struct qpnp_pin_chip *q_chip)
+{
+ return 0;
+}
+#endif
+
+static int qpnp_pin_probe(struct spmi_device *spmi)
+{
+ struct qpnp_pin_chip *q_chip;
+ struct qpnp_pin_spec *q_spec;
+ struct resource *res;
+ struct spmi_resource *d_node;
+ int i, rc;
+ int lowest_gpio = UINT_MAX, highest_gpio = 0;
+ u32 intspec[3], gpio;
+ char buf[2];
+ const char *dev_name;
+
+ dev_name = spmi_get_primary_dev_name(spmi);
+ if (!dev_name) {
+ dev_err(&spmi->dev, "%s: label binding undefined for node %s\n",
+ __func__, spmi->dev.of_node->full_name);
+ return -EINVAL;
+ }
+
+ q_chip = kzalloc(sizeof(*q_chip), GFP_KERNEL);
+ if (!q_chip) {
+ dev_err(&spmi->dev, "%s: Can't allocate gpio_chip\n",
+ __func__);
+ return -ENOMEM;
+ }
+ q_chip->spmi = spmi;
+ dev_set_drvdata(&spmi->dev, q_chip);
+
+ mutex_lock(&qpnp_pin_chips_lock);
+ list_add(&q_chip->chip_list, &qpnp_pin_chips);
+ mutex_unlock(&qpnp_pin_chips_lock);
+
+ /* first scan through nodes to find the range required for allocation */
+ for (i = 0; i < spmi->num_dev_node; i++) {
+ rc = of_property_read_u32(spmi->dev_node[i].of_node,
+ "qcom,pin-num", &gpio);
+ if (rc) {
+ dev_err(&spmi->dev, "%s: unable to get qcom,pin-num property\n",
+ __func__);
+ goto err_probe;
+ }
+
+ if (gpio < lowest_gpio)
+ lowest_gpio = gpio;
+ if (gpio > highest_gpio)
+ highest_gpio = gpio;
+ }
+
+ if (highest_gpio < lowest_gpio) {
+ dev_err(&spmi->dev, "%s: no device nodes specified in topology\n",
+ __func__);
+ rc = -EINVAL;
+ goto err_probe;
+ } else if (lowest_gpio == 0) {
+ dev_err(&spmi->dev, "%s: 0 is not a valid PMIC GPIO\n",
+ __func__);
+ rc = -EINVAL;
+ goto err_probe;
+ }
+
+ q_chip->pmic_pin_lowest = lowest_gpio;
+ q_chip->pmic_pin_highest = highest_gpio;
+
+ /* allocate gpio lookup tables */
+ q_chip->pmic_pins = kzalloc(sizeof(struct qpnp_pin_spec *) *
+ highest_gpio - lowest_gpio + 1,
+ GFP_KERNEL);
+ q_chip->chip_gpios = kzalloc(sizeof(struct qpnp_pin_spec *) *
+ spmi->num_dev_node, GFP_KERNEL);
+ if (!q_chip->pmic_pins || !q_chip->chip_gpios) {
+ dev_err(&spmi->dev, "%s: unable to allocate memory\n",
+ __func__);
+ rc = -ENOMEM;
+ goto err_probe;
+ }
+
+ /* get interrupt controller device_node */
+ q_chip->int_ctrl = of_irq_find_parent(spmi->dev.of_node);
+ if (!q_chip->int_ctrl) {
+ dev_err(&spmi->dev, "%s: Can't find interrupt parent\n",
+ __func__);
+ rc = -EINVAL;
+ goto err_probe;
+ }
+
+ /* now scan through again and populate the lookup table */
+ for (i = 0; i < spmi->num_dev_node; i++) {
+ d_node = &spmi->dev_node[i];
+ res = spmi_get_resource(spmi, d_node, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&spmi->dev, "%s: node %s is missing has no base address definition\n",
+ __func__, d_node->of_node->full_name);
+ }
+
+ rc = of_property_read_u32(d_node->of_node,
+ "qcom,pin-num", &gpio);
+ if (rc) {
+ dev_err(&spmi->dev, "%s: unable to get qcom,pin-num property\n",
+ __func__);
+ goto err_probe;
+ }
+
+ q_spec = kzalloc(sizeof(struct qpnp_pin_spec),
+ GFP_KERNEL);
+ if (!q_spec) {
+ dev_err(&spmi->dev, "%s: unable to allocate memory\n",
+ __func__);
+ rc = -ENOMEM;
+ goto err_probe;
+ }
+
+ q_spec->slave = spmi->sid;
+ q_spec->offset = res->start;
+ q_spec->gpio_chip_idx = i;
+ q_spec->pmic_pin = gpio;
+ q_spec->node = d_node->of_node;
+ q_spec->q_chip = q_chip;
+
+ rc = spmi_ext_register_readl(spmi->ctrl, q_spec->slave,
+ Q_REG_ADDR(q_spec, Q_REG_TYPE), &buf[0], 2);
+ if (rc) {
+ dev_err(&spmi->dev, "%s: unable to read type regs\n",
+ __func__);
+ goto err_probe;
+ }
+ q_spec->type = buf[0];
+ q_spec->subtype = buf[1];
+
+ rc = qpnp_pin_ctl_regs_init(q_spec);
+ if (rc)
+ goto err_probe;
+
+ /* call into irq_domain to get irq mapping */
+ intspec[0] = q_chip->spmi->sid;
+ intspec[1] = (q_spec->offset >> 8) & 0xFF;
+ intspec[2] = 0;
+ q_spec->irq = irq_create_of_mapping(q_chip->int_ctrl,
+ intspec, 3);
+ if (!q_spec->irq) {
+ dev_err(&spmi->dev, "%s: invalid irq for gpio %u\n",
+ __func__, gpio);
+ rc = -EINVAL;
+ goto err_probe;
+ }
+ /* initialize lookup table params */
+ qpnp_pmic_pin_set_spec(q_chip, gpio, q_spec);
+ qpnp_chip_gpio_set_spec(q_chip, i, q_spec);
+ }
+
+ q_chip->gpio_chip.base = -1;
+ q_chip->gpio_chip.ngpio = spmi->num_dev_node;
+ q_chip->gpio_chip.label = dev_name;
+ q_chip->gpio_chip.direction_input = qpnp_pin_direction_input;
+ q_chip->gpio_chip.direction_output = qpnp_pin_direction_output;
+ q_chip->gpio_chip.to_irq = qpnp_pin_to_irq;
+ q_chip->gpio_chip.get = qpnp_pin_get;
+ q_chip->gpio_chip.set = qpnp_pin_set;
+ q_chip->gpio_chip.dev = &spmi->dev;
+ q_chip->gpio_chip.of_xlate = qpnp_pin_of_gpio_xlate;
+ q_chip->gpio_chip.of_gpio_n_cells = 2;
+ q_chip->gpio_chip.can_sleep = 0;
+
+ rc = gpiochip_add(&q_chip->gpio_chip);
+ if (rc) {
+ dev_err(&spmi->dev, "%s: Can't add gpio chip, rc = %d\n",
+ __func__, rc);
+ goto err_probe;
+ }
+
+ /* now configure gpio config defaults if they exist */
+ for (i = 0; i < spmi->num_dev_node; i++) {
+ q_spec = qpnp_chip_gpio_get_spec(q_chip, i);
+ if (WARN_ON(!q_spec)) {
+ rc = -ENODEV;
+ goto err_probe;
+ }
+
+ rc = qpnp_pin_cache_regs(q_chip, q_spec);
+ if (rc)
+ goto err_probe;
+
+ rc = qpnp_pin_apply_config(q_chip, q_spec);
+ if (rc)
+ goto err_probe;
+ }
+
+ dev_dbg(&spmi->dev, "%s: gpio_chip registered between %d-%u\n",
+ __func__, q_chip->gpio_chip.base,
+ (q_chip->gpio_chip.base + q_chip->gpio_chip.ngpio) - 1);
+
+ rc = qpnp_pin_debugfs_create(q_chip);
+ if (rc) {
+ dev_err(&spmi->dev, "%s: debugfs creation failed\n", __func__);
+ goto err_probe;
+ }
+
+ return 0;
+
+err_probe:
+ qpnp_pin_free_chip(q_chip);
+ return rc;
+}
+
+static int qpnp_pin_remove(struct spmi_device *spmi)
+{
+ struct qpnp_pin_chip *q_chip = dev_get_drvdata(&spmi->dev);
+
+ debugfs_remove_recursive(q_chip->dfs_dir);
+
+ return qpnp_pin_free_chip(q_chip);
+}
+
+static struct of_device_id spmi_match_table[] = {
+ { .compatible = "qcom,qpnp-pin",
+ },
+ {}
+};
+
+static const struct spmi_device_id qpnp_pin_id[] = {
+ { "qcom,qpnp-pin", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(spmi, qpnp_pin_id);
+
+static struct spmi_driver qpnp_pin_driver = {
+ .driver = {
+ .name = "qcom,qpnp-pin",
+ .of_match_table = spmi_match_table,
+ },
+ .probe = qpnp_pin_probe,
+ .remove = qpnp_pin_remove,
+ .id_table = qpnp_pin_id,
+};
+
+static int __init qpnp_pin_init(void)
+{
+#ifdef CONFIG_GPIO_QPNP_PIN_DEBUG
+ driver_dfs_dir = debugfs_create_dir("qpnp_pin", NULL);
+ if (driver_dfs_dir == NULL)
+ pr_err("Cannot register top level debugfs directory\n");
+#endif
+
+ return spmi_driver_register(&qpnp_pin_driver);
+}
+
+static void __exit qpnp_pin_exit(void)
+{
+#ifdef CONFIG_GPIO_QPNP_PIN_DEBUG
+ debugfs_remove_recursive(driver_dfs_dir);
+#endif
+ spmi_driver_unregister(&qpnp_pin_driver);
+}
+
+MODULE_DESCRIPTION("QPNP PMIC gpio driver");
+MODULE_LICENSE("GPL v2");
+
+module_init(qpnp_pin_init);
+module_exit(qpnp_pin_exit);
diff --git a/drivers/gpu/msm/a3xx_reg.h b/drivers/gpu/msm/a3xx_reg.h
index bd58b4e..f5ee1d7 100644
--- a/drivers/gpu/msm/a3xx_reg.h
+++ b/drivers/gpu/msm/a3xx_reg.h
@@ -509,6 +509,6 @@
#define RBBM_BLOCK_ID_MARB_3 0x2b
/* RBBM_CLOCK_CTL default value */
-#define A3XX_RBBM_CLOCK_CTL_DEFAULT 0x00000000
+#define A3XX_RBBM_CLOCK_CTL_DEFAULT 0xBFFFFFFF
#endif
diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c
index 56696c4..a6b4210 100644
--- a/drivers/gpu/msm/adreno_a3xx.c
+++ b/drivers/gpu/msm/adreno_a3xx.c
@@ -2607,6 +2607,10 @@
adreno_regwrite(device, A3XX_RBBM_INTERFACE_HANG_INT_CTL,
(1 << 16) | 0xFFF);
+ /* Enable Clock gating */
+ adreno_regwrite(device, A3XX_RBBM_CLOCK_CTL,
+ A3XX_RBBM_CLOCK_CTL_DEFAULT);
+
}
/* Defined in adreno_a3xx_snapshot.c */
diff --git a/drivers/gpu/msm/adreno_a3xx_snapshot.c b/drivers/gpu/msm/adreno_a3xx_snapshot.c
index 60aab64..a3bee4d 100644
--- a/drivers/gpu/msm/adreno_a3xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a3xx_snapshot.c
@@ -285,6 +285,9 @@
remain, REG_CP_ME_CNTL, REG_CP_ME_STATUS,
64, 44);
+ /* Disable Clock gating temporarily for the debug bus to work */
+ adreno_regwrite(device, A3XX_RBBM_CLOCK_CTL, 0x00);
+
/* VPC memory */
snapshot = kgsl_snapshot_add_section(device,
KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain,
@@ -321,5 +324,9 @@
snapshot = a3xx_snapshot_debugbus(device, snapshot, remain);
+ /* Enable Clock gating */
+ adreno_regwrite(device, A3XX_RBBM_CLOCK_CTL,
+ A3XX_RBBM_CLOCK_CTL_DEFAULT);
+
return snapshot;
}
diff --git a/drivers/gpu/msm/kgsl_drm.c b/drivers/gpu/msm/kgsl_drm.c
index 66ac08f..870a7d7 100644
--- a/drivers/gpu/msm/kgsl_drm.c
+++ b/drivers/gpu/msm/kgsl_drm.c
@@ -238,11 +238,8 @@
}
if (TYPE_IS_PMEM(priv->type)) {
- int type;
-
if (priv->type == DRM_KGSL_GEM_TYPE_EBI ||
priv->type & DRM_KGSL_GEM_PMEM_EBI) {
- type = PMEM_MEMTYPE_EBI1;
result = kgsl_sharedmem_ebimem_user(
&priv->memdesc,
priv->pagetable,
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index d55d476..409fe40 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -785,9 +785,6 @@
case KGSL_STATE_NAP:
case KGSL_STATE_SLEEP:
del_timer_sync(&device->idle_timer);
- if (!device->pwrctrl.strtstp_sleepwake)
- kgsl_pwrctrl_pwrlevel_change(device,
- KGSL_PWRLEVEL_NOMINAL);
device->pwrctrl.restore_slumber = true;
device->ftbl->suspend_context(device);
device->ftbl->stop(device);
diff --git a/drivers/gpu/msm/kgsl_pwrscale_trustzone.c b/drivers/gpu/msm/kgsl_pwrscale_trustzone.c
index ad1e7ed..04896be 100644
--- a/drivers/gpu/msm/kgsl_pwrscale_trustzone.c
+++ b/drivers/gpu/msm/kgsl_pwrscale_trustzone.c
@@ -114,8 +114,7 @@
{
struct tz_priv *priv = pwrscale->priv;
if (device->state != KGSL_STATE_NAP &&
- priv->governor == TZ_GOVERNOR_ONDEMAND &&
- device->pwrctrl.restore_slumber == 0)
+ priv->governor == TZ_GOVERNOR_ONDEMAND)
kgsl_pwrctrl_pwrlevel_change(device,
device->pwrctrl.default_pwrlevel);
}
diff --git a/drivers/media/radio/radio-iris.c b/drivers/media/radio/radio-iris.c
index 53eb85c..23d11c3 100644
--- a/drivers/media/radio/radio-iris.c
+++ b/drivers/media/radio/radio-iris.c
@@ -88,6 +88,7 @@
unsigned char power_mode;
int search_on;
unsigned int tone_freq;
+ unsigned char spur_table_size;
unsigned char g_scan_time;
unsigned int g_antenna;
unsigned int g_rds_grp_proc_ps;
@@ -101,11 +102,13 @@
struct hci_fm_sig_threshold_rsp sig_th;
struct hci_fm_ch_det_threshold ch_det_threshold;
struct hci_fm_data_rd_rsp default_data;
+ struct hci_fm_spur_data spur_data;
};
static struct video_device *priv_videodev;
static int iris_do_calibration(struct iris_device *radio);
+static int update_spur_table(struct iris_device *radio);
static struct v4l2_queryctrl iris_v4l2_queryctrl[] = {
{
.id = V4L2_CID_AUDIO_VOLUME,
@@ -2896,6 +2899,7 @@
FMDERR("get frequency failed %d\n", retval);
break;
case FM_OFF:
+ radio->spur_table_size = 0;
switch (radio->mode) {
case FM_RECV:
retval = hci_cmd(HCI_FM_DISABLE_RECV_CMD,
@@ -3248,12 +3252,116 @@
*/
retval = 0;
break;
+ case V4L2_CID_PRIVATE_SPUR_FREQ:
+ if (radio->spur_table_size >= MAX_SPUR_FREQ_LIMIT) {
+ FMDERR("%s: Spur Table Full!\n", __func__);
+ retval = -1;
+ } else
+ radio->spur_data.freq[radio->spur_table_size] =
+ ctrl->value;
+ break;
+ case V4L2_CID_PRIVATE_SPUR_FREQ_RMSSI:
+ if (radio->spur_table_size >= MAX_SPUR_FREQ_LIMIT) {
+ FMDERR("%s: Spur Table Full!\n", __func__);
+ retval = -1;
+ } else
+ radio->spur_data.rmssi[radio->spur_table_size] =
+ ctrl->value;
+ break;
+ case V4L2_CID_PRIVATE_SPUR_SELECTION:
+ if (radio->spur_table_size >= MAX_SPUR_FREQ_LIMIT) {
+ FMDERR("%s: Spur Table Full!\n", __func__);
+ retval = -1;
+ } else {
+ radio->spur_data.enable[radio->spur_table_size] =
+ ctrl->value;
+ radio->spur_table_size++;
+ }
+ break;
+ case V4L2_CID_PRIVATE_UPDATE_SPUR_TABLE:
+ update_spur_table(radio);
+ break;
default:
retval = -EINVAL;
}
return retval;
}
+static int update_spur_table(struct iris_device *radio)
+{
+ struct hci_fm_def_data_wr_req default_data;
+ int len = 0, index = 0, offset = 0, i = 0;
+ int retval = 0, temp = 0, cnt = 0;
+
+ memset(&default_data, 0, sizeof(default_data));
+
+ /* Pass the mode of SPUR_CLK */
+ default_data.mode = CKK_SPUR;
+
+ temp = radio->spur_table_size;
+ for (cnt = 0; cnt < (temp / 5); cnt++) {
+ offset = 0;
+ /*
+ * Program the spur entries in spur table in following order:
+ * Spur index
+ * Length of the spur data
+ * Spur Data:
+ * MSB of the spur frequency
+ * LSB of the spur frequency
+ * Enable/Disable the spur frequency
+ * RMSSI value of the spur frequency
+ */
+ default_data.data[offset++] = ENTRY_0 + cnt;
+ for (i = 0; i < SPUR_ENTRIES_PER_ID; i++) {
+ default_data.data[offset++] = GET_FREQ(COMPUTE_SPUR(
+ radio->spur_data.freq[index]), 0);
+ default_data.data[offset++] = GET_FREQ(COMPUTE_SPUR(
+ radio->spur_data.freq[index]), 1);
+ default_data.data[offset++] =
+ radio->spur_data.enable[index];
+ default_data.data[offset++] =
+ radio->spur_data.rmssi[index];
+ index++;
+ }
+ len = (SPUR_ENTRIES_PER_ID * SPUR_DATA_SIZE);
+ default_data.length = (len + 1);
+ retval = hci_def_data_write(&default_data, radio->fm_hdev);
+ if (retval < 0) {
+ FMDBG("%s: Failed to configure entries for ID : %d\n",
+ __func__, default_data.data[0]);
+ return retval;
+ }
+ }
+
+ /* Compute balance SPUR frequencies to be programmed */
+ temp %= SPUR_ENTRIES_PER_ID;
+ if (temp > 0) {
+ offset = 0;
+ default_data.data[offset++] = (radio->spur_table_size / 5);
+ for (i = 0; i < temp; i++) {
+ default_data.data[offset++] = GET_FREQ(COMPUTE_SPUR(
+ radio->spur_data.freq[index]), 0);
+ default_data.data[offset++] = GET_FREQ(COMPUTE_SPUR(
+ radio->spur_data.freq[index]), 1);
+ default_data.data[offset++] =
+ radio->spur_data.enable[index];
+ default_data.data[offset++] =
+ radio->spur_data.rmssi[index];
+ index++;
+ }
+ len = (temp * SPUR_DATA_SIZE);
+ default_data.length = (len + 1);
+ retval = hci_def_data_write(&default_data, radio->fm_hdev);
+ if (retval < 0) {
+ FMDERR("%s: Failed to configure entries for ID : %d\n",
+ __func__, default_data.data[0]);
+ return retval;
+ }
+ }
+
+ return retval;
+}
+
static int iris_vidioc_g_tuner(struct file *file, void *priv,
struct v4l2_tuner *tuner)
{
diff --git a/drivers/media/radio/radio-tavarua.c b/drivers/media/radio/radio-tavarua.c
index 971cf10..116b7f9 100644
--- a/drivers/media/radio/radio-tavarua.c
+++ b/drivers/media/radio/radio-tavarua.c
@@ -996,6 +996,10 @@
FMDBG("read PHY_TXGAIN is successful");
complete(&radio->sync_req_done);
break;
+ case (XFR_EXT | 0x80):
+ FMDBG("Set tone generator successful\n");
+ complete(&radio->sync_req_done);
+ break;
case (0x80 | RX_CONFIG):
case (0x80 | RADIO_CONFIG):
case (0x80 | RDS_CONFIG):
@@ -1214,7 +1218,7 @@
unsigned char adie_type_bahma;
int retval = 0;
unsigned int rdsMask = 0;
- unsigned char value;
+ unsigned char value = 0;
adie_type_bahma = is_bahama();
@@ -3449,9 +3453,17 @@
if (retval < 0)
FMDBG("write failed");
} break;
+ case V4L2_CID_PRIVATE_SOFT_MUTE:
+ radio->registers[IOCTRL] &= ~(IOC_SFT_MUTE);
+ if (ctrl->value)
+ radio->registers[IOCTRL] |= IOC_SFT_MUTE;
+ retval = tavarua_write_register(radio, IOCTRL,
+ radio->registers[IOCTRL]);
+ if (retval < 0)
+ FMDERR("Failed to enable/disable SMute\n");
+ break;
/*These IOCTL's are place holders to keep the
driver compatible with change in frame works for IRIS */
- case V4L2_CID_PRIVATE_SOFT_MUTE:
case V4L2_CID_PRIVATE_RIVA_ACCS_ADDR:
case V4L2_CID_PRIVATE_RIVA_ACCS_LEN:
case V4L2_CID_PRIVATE_RIVA_PEEK:
@@ -3459,7 +3471,6 @@
case V4L2_CID_PRIVATE_SSBI_ACCS_ADDR:
case V4L2_CID_PRIVATE_SSBI_PEEK:
case V4L2_CID_PRIVATE_SSBI_POKE:
- case V4L2_CID_PRIVATE_TX_TONE:
case V4L2_CID_PRIVATE_RDS_GRP_COUNTERS:
case V4L2_CID_PRIVATE_SET_NOTCH_FILTER:
case V4L2_CID_PRIVATE_TAVARUA_DO_CALIBRATION:
@@ -3479,6 +3490,54 @@
case V4L2_CID_PRIVATE_UPDATE_SPUR_TABLE:
retval = update_spur_table(radio);
break;
+ case V4L2_CID_PRIVATE_TX_TONE:
+ retval = 0;
+ memset(xfr_buf, 0, sizeof(xfr_buf));
+ switch (ctrl->value) {
+ case ONE_KHZ_LR_EQUA_0DBFS:
+ xfr_buf[TONE_CHANNEL_EN_AND_SCALING_BYTE]
+ = TONE_LEFT_RIGHT_CH_ENABLED;
+ xfr_buf[TONE_LEFT_FREQ_BYTE] = 0x01;
+ xfr_buf[TONE_RIGHT_FREQ_BYTE] = 0x01;
+ break;
+ case ONE_KHZ_LEFTONLY_EQUA_0DBFS:
+ xfr_buf[TONE_CHANNEL_EN_AND_SCALING_BYTE]
+ = TONE_LEFT_CH_ENABLED;
+ xfr_buf[TONE_LEFT_FREQ_BYTE] = 0x01;
+ break;
+ case ONE_KHZ_RIGHTONLY_EQUA_0DBFS:
+ xfr_buf[TONE_CHANNEL_EN_AND_SCALING_BYTE]
+ = TONE_RIGHT_CH_ENABLED;
+ xfr_buf[TONE_RIGHT_FREQ_BYTE] = 0x01;
+ break;
+ case ONE_KHZ_LR_EQUA_l8DBFS:
+ xfr_buf[TONE_CHANNEL_EN_AND_SCALING_BYTE]
+ = (LSH_DATA(TONE_SCALE_IND_12,
+ TONE_SCALING_SHIFT)
+ | TONE_LEFT_RIGHT_CH_ENABLED);
+ xfr_buf[TONE_LEFT_FREQ_BYTE] = 0x01;
+ xfr_buf[TONE_RIGHT_FREQ_BYTE] = 0x01;
+ break;
+ case FIFTEEN_KHZ_LR_EQUA_l8DBFS:
+ xfr_buf[TONE_CHANNEL_EN_AND_SCALING_BYTE]
+ = (LSH_DATA(TONE_SCALE_IND_12,
+ TONE_SCALING_SHIFT)
+ | TONE_LEFT_RIGHT_CH_ENABLED);
+ xfr_buf[TONE_LEFT_FREQ_BYTE] = 0x0F;
+ xfr_buf[TONE_RIGHT_FREQ_BYTE] = 0x0F;
+ break;
+ default:
+ retval = -1;
+ FMDERR("tone generator value not valid\n");
+ break;
+ }
+ if (retval >= 0) {
+ xfr_buf[TONE_GEN_CTRL_BYTE] = 0x01;
+ retval = sync_write_xfr(radio, XFR_EXT, xfr_buf);
+ }
+ if (retval < 0)
+ FMDERR("Tone generator failed\n");
+ break;
default:
retval = -EINVAL;
}
diff --git a/drivers/media/video/msm/Kconfig b/drivers/media/video/msm/Kconfig
index 5ffc133..9c791e4 100644
--- a/drivers/media/video/msm/Kconfig
+++ b/drivers/media/video/msm/Kconfig
@@ -271,6 +271,15 @@
cores and composite them into a single
interrupt to the MSM.
+config MSM_CPP
+ bool "Qualcomm MSM Camera Post Processing Engine support"
+ depends on MSM_CAMERA && MSM_CAMERA_V4L2
+ ---help---
+ Enable support for Camera Post-processing Engine
+ The Post processing engine is capable of scaling
+ and cropping image. The driver support V4L2 subdev
+ APIs.
+
config QUP_EXCLUSIVE_TO_CAMERA
bool "QUP exclusive to camera"
depends on MSM_CAMERA
diff --git a/drivers/media/video/msm/Makefile b/drivers/media/video/msm/Makefile
index 431da2e..63120da 100644
--- a/drivers/media/video/msm/Makefile
+++ b/drivers/media/video/msm/Makefile
@@ -13,6 +13,7 @@
EXTRA_CFLAGS += -Idrivers/media/video/msm/server
obj-$(CONFIG_MSM_CAMERA) += msm_isp.o msm.o msm_mem.o msm_mctl.o msm_mctl_buf.o msm_mctl_pp.o
obj-$(CONFIG_MSM_CAMERA) += server/ eeprom/ sensors/ actuators/ csi/
+ obj-$(CONFIG_MSM_CPP) += cpp/
obj-$(CONFIG_MSM_CAMERA) += msm_gesture.o
obj-$(CONFIG_MSM_CAM_IRQ_ROUTER) += msm_camirq_router.o
else
diff --git a/drivers/media/video/msm/actuators/msm_actuator.c b/drivers/media/video/msm/actuators/msm_actuator.c
index 50399de..554cddc 100644
--- a/drivers/media/video/msm/actuators/msm_actuator.c
+++ b/drivers/media/video/msm/actuators/msm_actuator.c
@@ -14,55 +14,42 @@
#include "msm_actuator.h"
static struct msm_actuator_ctrl_t msm_actuator_t;
-
-static struct msm_actuator msm_vcm_actuator_table = {
- .act_type = ACTUATOR_VCM,
- .func_tbl = {
- .actuator_init_step_table = msm_actuator_init_step_table,
- .actuator_move_focus = msm_actuator_move_focus,
- .actuator_write_focus = msm_actuator_write_focus,
- .actuator_set_default_focus = msm_actuator_set_default_focus,
- .actuator_init_focus = msm_actuator_init_focus,
- .actuator_i2c_write = msm_actuator_i2c_write,
- },
-};
-
-static struct msm_actuator msm_piezo_actuator_table = {
- .act_type = ACTUATOR_PIEZO,
- .func_tbl = {
- .actuator_init_step_table = NULL,
- .actuator_move_focus = msm_actuator_piezo_move_focus,
- .actuator_write_focus = NULL,
- .actuator_set_default_focus =
- msm_actuator_piezo_set_default_focus,
- .actuator_init_focus = msm_actuator_init_focus,
- .actuator_i2c_write = msm_actuator_i2c_write,
- },
-};
+static struct msm_actuator msm_vcm_actuator_table;
+static struct msm_actuator msm_piezo_actuator_table;
static struct msm_actuator *actuators[] = {
&msm_vcm_actuator_table,
&msm_piezo_actuator_table,
};
-int32_t msm_actuator_piezo_set_default_focus(
+static int32_t msm_actuator_piezo_set_default_focus(
struct msm_actuator_ctrl_t *a_ctrl,
struct msm_actuator_move_params_t *move_params)
{
int32_t rc = 0;
if (a_ctrl->curr_step_pos != 0) {
- rc = a_ctrl->func_tbl->actuator_i2c_write(a_ctrl,
- a_ctrl->initial_code, 0);
- rc = a_ctrl->func_tbl->actuator_i2c_write(a_ctrl,
- a_ctrl->initial_code, 0);
+ a_ctrl->i2c_tbl_index = 0;
+ rc = a_ctrl->func_tbl->actuator_parse_i2c_params(a_ctrl,
+ a_ctrl->initial_code, 0, 0);
+ rc = a_ctrl->func_tbl->actuator_parse_i2c_params(a_ctrl,
+ a_ctrl->initial_code, 0, 0);
+ rc = msm_camera_i2c_write_table_w_microdelay(
+ &a_ctrl->i2c_client, a_ctrl->i2c_reg_tbl,
+ a_ctrl->i2c_tbl_index, a_ctrl->i2c_data_type);
+ if (rc < 0) {
+ pr_err("%s: i2c write error:%d\n",
+ __func__, rc);
+ return rc;
+ }
+ a_ctrl->i2c_tbl_index = 0;
a_ctrl->curr_step_pos = 0;
}
return rc;
}
-int32_t msm_actuator_i2c_write(struct msm_actuator_ctrl_t *a_ctrl,
- int16_t next_lens_position, uint32_t hw_params)
+static int32_t msm_actuator_parse_i2c_params(struct msm_actuator_ctrl_t *a_ctrl,
+ int16_t next_lens_position, uint32_t hw_params, uint16_t delay)
{
struct msm_actuator_reg_params_t *write_arr = a_ctrl->reg_tbl;
uint32_t hw_dword = hw_params;
@@ -70,6 +57,7 @@
uint16_t value = 0;
uint32_t size = a_ctrl->reg_tbl_size, i = 0;
int32_t rc = 0;
+ struct msm_camera_i2c_reg_tbl *i2c_tbl = a_ctrl->i2c_reg_tbl;
CDBG("%s: IN\n", __func__);
for (i = 0; i < size; i++) {
if (write_arr[i].reg_write_type == MSM_ACTUATOR_WRITE_DAC) {
@@ -82,22 +70,19 @@
i2c_byte1 = write_arr[i].reg_addr;
i2c_byte2 = value;
if (size != (i+1)) {
- i2c_byte2 = (i2c_byte2 & 0xFF00) >> 8;
+ i2c_byte2 = value & 0xFF;
CDBG("%s: byte1:0x%x, byte2:0x%x\n",
__func__, i2c_byte1, i2c_byte2);
- rc = msm_camera_i2c_write(
- &a_ctrl->i2c_client,
- i2c_byte1, i2c_byte2,
- a_ctrl->i2c_data_type);
- if (rc < 0) {
- pr_err("%s: i2c write error:%d\n",
- __func__, rc);
- return rc;
- }
-
+ i2c_tbl[a_ctrl->i2c_tbl_index].
+ reg_addr = i2c_byte1;
+ i2c_tbl[a_ctrl->i2c_tbl_index].
+ reg_data = i2c_byte2;
+ i2c_tbl[a_ctrl->i2c_tbl_index].
+ delay = 0;
+ a_ctrl->i2c_tbl_index++;
i++;
i2c_byte1 = write_arr[i].reg_addr;
- i2c_byte2 = value & 0xFF;
+ i2c_byte2 = (value & 0xFF00) >> 8;
}
} else {
i2c_byte1 = (value & 0xFF00) >> 8;
@@ -110,14 +95,16 @@
}
CDBG("%s: i2c_byte1:0x%x, i2c_byte2:0x%x\n", __func__,
i2c_byte1, i2c_byte2);
- rc = msm_camera_i2c_write(&a_ctrl->i2c_client,
- i2c_byte1, i2c_byte2, a_ctrl->i2c_data_type);
+ i2c_tbl[a_ctrl->i2c_tbl_index].reg_addr = i2c_byte1;
+ i2c_tbl[a_ctrl->i2c_tbl_index].reg_data = i2c_byte2;
+ i2c_tbl[a_ctrl->i2c_tbl_index].delay = delay;
+ a_ctrl->i2c_tbl_index++;
}
CDBG("%s: OUT\n", __func__);
return rc;
}
-int32_t msm_actuator_init_focus(struct msm_actuator_ctrl_t *a_ctrl,
+static int32_t msm_actuator_init_focus(struct msm_actuator_ctrl_t *a_ctrl,
uint16_t size, enum msm_actuator_data_type type,
struct reg_settings_t *settings)
{
@@ -153,7 +140,7 @@
return rc;
}
-int32_t msm_actuator_write_focus(
+static int32_t msm_actuator_write_focus(
struct msm_actuator_ctrl_t *a_ctrl,
uint16_t curr_lens_pos,
struct damping_params_t *damping_params,
@@ -177,27 +164,25 @@
(next_lens_pos +
(sign_direction * damping_code_step))) {
rc = a_ctrl->func_tbl->
- actuator_i2c_write(a_ctrl, next_lens_pos,
- damping_params->hw_params);
+ actuator_parse_i2c_params(a_ctrl, next_lens_pos,
+ damping_params->hw_params, wait_time);
if (rc < 0) {
pr_err("%s: error:%d\n",
__func__, rc);
return rc;
}
curr_lens_pos = next_lens_pos;
- usleep(wait_time);
}
if (curr_lens_pos != code_boundary) {
rc = a_ctrl->func_tbl->
- actuator_i2c_write(a_ctrl, code_boundary,
- damping_params->hw_params);
- usleep(wait_time);
+ actuator_parse_i2c_params(a_ctrl, code_boundary,
+ damping_params->hw_params, wait_time);
}
return rc;
}
-int32_t msm_actuator_piezo_move_focus(
+static int32_t msm_actuator_piezo_move_focus(
struct msm_actuator_ctrl_t *a_ctrl,
struct msm_actuator_move_params_t *move_params)
{
@@ -208,17 +193,27 @@
if (num_steps == 0)
return rc;
+ a_ctrl->i2c_tbl_index = 0;
rc = a_ctrl->func_tbl->
- actuator_i2c_write(a_ctrl,
+ actuator_parse_i2c_params(a_ctrl,
(num_steps *
a_ctrl->region_params[0].code_per_step),
- move_params->ringing_params[0].hw_params);
+ move_params->ringing_params[0].hw_params, 0);
+ rc = msm_camera_i2c_write_table_w_microdelay(&a_ctrl->i2c_client,
+ a_ctrl->i2c_reg_tbl, a_ctrl->i2c_tbl_index,
+ a_ctrl->i2c_data_type);
+ if (rc < 0) {
+ pr_err("%s: i2c write error:%d\n",
+ __func__, rc);
+ return rc;
+ }
+ a_ctrl->i2c_tbl_index = 0;
a_ctrl->curr_step_pos = dest_step_position;
return rc;
}
-int32_t msm_actuator_move_focus(
+static int32_t msm_actuator_move_focus(
struct msm_actuator_ctrl_t *a_ctrl,
struct msm_actuator_move_params_t *move_params)
{
@@ -241,6 +236,7 @@
return rc;
curr_lens_pos = a_ctrl->step_position_table[a_ctrl->curr_step_pos];
+ a_ctrl->i2c_tbl_index = 0;
CDBG("curr_step_pos =%d dest_step_pos =%d curr_lens_pos=%d\n",
a_ctrl->curr_step_pos, dest_step_pos, curr_lens_pos);
@@ -299,10 +295,20 @@
a_ctrl->curr_step_pos = target_step_pos;
}
+ rc = msm_camera_i2c_write_table_w_microdelay(&a_ctrl->i2c_client,
+ a_ctrl->i2c_reg_tbl, a_ctrl->i2c_tbl_index,
+ a_ctrl->i2c_data_type);
+ if (rc < 0) {
+ pr_err("%s: i2c write error:%d\n",
+ __func__, rc);
+ return rc;
+ }
+ a_ctrl->i2c_tbl_index = 0;
+
return rc;
}
-int32_t msm_actuator_init_step_table(struct msm_actuator_ctrl_t *a_ctrl,
+static int32_t msm_actuator_init_step_table(struct msm_actuator_ctrl_t *a_ctrl,
struct msm_actuator_set_info_t *set_info)
{
int16_t code_per_step = 0;
@@ -361,7 +367,7 @@
return rc;
}
-int32_t msm_actuator_set_default_focus(
+static int32_t msm_actuator_set_default_focus(
struct msm_actuator_ctrl_t *a_ctrl,
struct msm_actuator_move_params_t *move_params)
{
@@ -373,7 +379,7 @@
return rc;
}
-int32_t msm_actuator_power_down(struct msm_actuator_ctrl_t *a_ctrl)
+static int32_t msm_actuator_power_down(struct msm_actuator_ctrl_t *a_ctrl)
{
int32_t rc = 0;
if (a_ctrl->vcm_enable) {
@@ -384,10 +390,13 @@
kfree(a_ctrl->step_position_table);
a_ctrl->step_position_table = NULL;
+ kfree(a_ctrl->i2c_reg_tbl);
+ a_ctrl->i2c_reg_tbl = NULL;
+ a_ctrl->i2c_tbl_index = 0;
return rc;
}
-int32_t msm_actuator_init(struct msm_actuator_ctrl_t *a_ctrl,
+static int32_t msm_actuator_init(struct msm_actuator_ctrl_t *a_ctrl,
struct msm_actuator_set_info_t *set_info) {
struct reg_settings_t *init_settings = NULL;
int32_t rc = -EFAULT;
@@ -412,7 +421,6 @@
pr_err("%s: MAX_ACTUATOR_REGION is exceeded.\n", __func__);
return -EFAULT;
}
- a_ctrl->total_steps = set_info->af_tuning_params.total_steps;
a_ctrl->pwd_step = set_info->af_tuning_params.pwd_step;
a_ctrl->total_steps = set_info->af_tuning_params.total_steps;
@@ -430,11 +438,22 @@
__func__);
return -EFAULT;
}
+
+ a_ctrl->i2c_reg_tbl =
+ kmalloc(sizeof(struct msm_camera_i2c_reg_tbl) *
+ (set_info->af_tuning_params.total_steps + 1), GFP_KERNEL);
+ if (!a_ctrl->i2c_reg_tbl) {
+ pr_err("%s kmalloc fail\n", __func__);
+ return -EFAULT;
+ }
+
if (copy_from_user(&a_ctrl->reg_tbl,
(void *)set_info->actuator_params.reg_tbl_params,
a_ctrl->reg_tbl_size *
- sizeof(struct msm_actuator_reg_params_t)))
+ sizeof(struct msm_actuator_reg_params_t))) {
+ kfree(a_ctrl->i2c_reg_tbl);
return -EFAULT;
+ }
if (set_info->actuator_params.init_setting_size) {
if (a_ctrl->func_tbl->actuator_init_focus) {
@@ -442,6 +461,7 @@
(set_info->actuator_params.init_setting_size),
GFP_KERNEL);
if (init_settings == NULL) {
+ kfree(a_ctrl->i2c_reg_tbl);
pr_err("%s Error allocating memory for init_settings\n",
__func__);
return -EFAULT;
@@ -451,6 +471,7 @@
set_info->actuator_params.init_setting_size *
sizeof(struct reg_settings_t))) {
kfree(init_settings);
+ kfree(a_ctrl->i2c_reg_tbl);
pr_err("%s Error copying init_settings\n",
__func__);
return -EFAULT;
@@ -461,6 +482,7 @@
init_settings);
kfree(init_settings);
if (rc < 0) {
+ kfree(a_ctrl->i2c_reg_tbl);
pr_err("%s Error actuator_init_focus\n",
__func__);
return -EFAULT;
@@ -480,7 +502,7 @@
}
-int32_t msm_actuator_config(struct msm_actuator_ctrl_t *a_ctrl,
+static int32_t msm_actuator_config(struct msm_actuator_ctrl_t *a_ctrl,
void __user *argp)
{
struct msm_actuator_cfg_data cdata;
@@ -519,7 +541,7 @@
return rc;
}
-int32_t msm_actuator_i2c_probe(
+static int32_t msm_actuator_i2c_probe(
struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -554,7 +576,7 @@
return rc;
}
-int32_t msm_actuator_power_up(struct msm_actuator_ctrl_t *a_ctrl)
+static int32_t msm_actuator_power_up(struct msm_actuator_ctrl_t *a_ctrl)
{
int rc = 0;
CDBG("%s called\n", __func__);
@@ -594,7 +616,7 @@
return i2c_add_driver(msm_actuator_t.i2c_driver);
}
-long msm_actuator_subdev_ioctl(struct v4l2_subdev *sd,
+static long msm_actuator_subdev_ioctl(struct v4l2_subdev *sd,
unsigned int cmd, void *arg)
{
struct msm_actuator_ctrl_t *a_ctrl = get_actrl(sd);
@@ -607,7 +629,7 @@
}
}
-int32_t msm_actuator_power(struct v4l2_subdev *sd, int on)
+static int32_t msm_actuator_power(struct v4l2_subdev *sd, int on)
{
int rc = 0;
struct msm_actuator_ctrl_t *a_ctrl = get_actrl(sd);
@@ -644,6 +666,31 @@
};
+static struct msm_actuator msm_vcm_actuator_table = {
+ .act_type = ACTUATOR_VCM,
+ .func_tbl = {
+ .actuator_init_step_table = msm_actuator_init_step_table,
+ .actuator_move_focus = msm_actuator_move_focus,
+ .actuator_write_focus = msm_actuator_write_focus,
+ .actuator_set_default_focus = msm_actuator_set_default_focus,
+ .actuator_init_focus = msm_actuator_init_focus,
+ .actuator_parse_i2c_params = msm_actuator_parse_i2c_params,
+ },
+};
+
+static struct msm_actuator msm_piezo_actuator_table = {
+ .act_type = ACTUATOR_PIEZO,
+ .func_tbl = {
+ .actuator_init_step_table = NULL,
+ .actuator_move_focus = msm_actuator_piezo_move_focus,
+ .actuator_write_focus = NULL,
+ .actuator_set_default_focus =
+ msm_actuator_piezo_set_default_focus,
+ .actuator_init_focus = msm_actuator_init_focus,
+ .actuator_parse_i2c_params = msm_actuator_parse_i2c_params,
+ },
+};
+
subsys_initcall(msm_actuator_i2c_add_driver);
MODULE_DESCRIPTION("MSM ACTUATOR");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/video/msm/actuators/msm_actuator.h b/drivers/media/video/msm/actuators/msm_actuator.h
index 4f936e7..82157e8 100644
--- a/drivers/media/video/msm/actuators/msm_actuator.h
+++ b/drivers/media/video/msm/actuators/msm_actuator.h
@@ -51,8 +51,8 @@
struct msm_actuator_move_params_t *);
int32_t (*actuator_move_focus) (struct msm_actuator_ctrl_t *,
struct msm_actuator_move_params_t *);
- int32_t (*actuator_i2c_write)(struct msm_actuator_ctrl_t *,
- int16_t, uint32_t);
+ int32_t (*actuator_parse_i2c_params)(struct msm_actuator_ctrl_t *,
+ int16_t, uint32_t, uint16_t);
int32_t (*actuator_write_focus)(struct msm_actuator_ctrl_t *,
uint16_t,
struct damping_params_t *,
@@ -87,40 +87,11 @@
uint32_t total_steps;
uint16_t pwd_step;
uint16_t initial_code;
+ struct msm_camera_i2c_reg_tbl *i2c_reg_tbl;
+ uint16_t i2c_tbl_index;
};
struct msm_actuator_ctrl_t *get_actrl(struct v4l2_subdev *sd);
-int32_t msm_actuator_i2c_write(struct msm_actuator_ctrl_t *a_ctrl,
- int16_t next_lens_position, uint32_t hw_params);
-int32_t msm_actuator_init_focus(struct msm_actuator_ctrl_t *a_ctrl,
- uint16_t size, enum msm_actuator_data_type type,
- struct reg_settings_t *settings);
-int32_t msm_actuator_i2c_write_b_af(struct msm_actuator_ctrl_t *a_ctrl,
- uint8_t msb,
- uint8_t lsb);
-int32_t msm_actuator_move_focus(struct msm_actuator_ctrl_t *a_ctrl,
- struct msm_actuator_move_params_t *move_params);
-int32_t msm_actuator_piezo_move_focus(
- struct msm_actuator_ctrl_t *a_ctrl,
- struct msm_actuator_move_params_t *move_params);
-int32_t msm_actuator_init_step_table(struct msm_actuator_ctrl_t *a_ctrl,
- struct msm_actuator_set_info_t *set_info);
-int32_t msm_actuator_set_default_focus(struct msm_actuator_ctrl_t *a_ctrl,
- struct msm_actuator_move_params_t *move_params);
-int32_t msm_actuator_piezo_set_default_focus(
- struct msm_actuator_ctrl_t *a_ctrl,
- struct msm_actuator_move_params_t *move_params);
-int32_t msm_actuator_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id);
-int32_t msm_actuator_write_focus(struct msm_actuator_ctrl_t *a_ctrl,
- uint16_t curr_lens_pos, struct damping_params_t *damping_params,
- int8_t sign_direction, int16_t code_boundary);
-int32_t msm_actuator_write_focus2(struct msm_actuator_ctrl_t *a_ctrl,
- uint16_t curr_lens_pos, struct damping_params_t *damping_params,
- int8_t sign_direction, int16_t code_boundary);
-long msm_actuator_subdev_ioctl(struct v4l2_subdev *sd,
- unsigned int cmd, void *arg);
-int32_t msm_actuator_power(struct v4l2_subdev *sd, int on);
#define VIDIOC_MSM_ACTUATOR_CFG \
_IOWR('V', BASE_VIDIOC_PRIVATE + 11, void __user *)
diff --git a/drivers/media/video/msm/cpp/Makefile b/drivers/media/video/msm/cpp/Makefile
new file mode 100644
index 0000000..b4f1fdf
--- /dev/null
+++ b/drivers/media/video/msm/cpp/Makefile
@@ -0,0 +1,5 @@
+GCC_VERSION := $(shell $(CONFIG_SHELL) $(PWD)/scripts/gcc-version.sh $(CROSS_COMPILE)gcc)
+ccflags-y += -Idrivers/media/video/msm
+ccflags-y += -Idrivers/media/video/msm/io
+obj-$(CONFIG_MSM_CPP) += msm_cpp.o
+
diff --git a/drivers/media/video/msm/cpp/msm_cpp.c b/drivers/media/video/msm/cpp/msm_cpp.c
new file mode 100644
index 0000000..e569388
--- /dev/null
+++ b/drivers/media/video/msm/cpp/msm_cpp.c
@@ -0,0 +1,412 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <mach/board.h>
+#include <mach/camera.h>
+#include <mach/vreg.h>
+#include <media/msm_isp.h>
+#include <linux/proc_fs.h>
+#include <linux/debugfs.h>
+
+#include "msm_cpp.h"
+#include "msm.h"
+
+#define CONFIG_MSM_CPP_DBG 0
+
+#if CONFIG_MSM_CPP_DBG
+#define CPP_DBG(fmt, args...) pr_info(fmt, ##args)
+#else
+#define CPP_DBG(fmt, args...) pr_debug(fmt, ##args)
+#endif
+
+static int cpp_open_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ uint32_t i;
+ struct cpp_device *cpp_dev = v4l2_get_subdevdata(sd);
+ CPP_DBG("%s\n", __func__);
+
+ mutex_lock(&cpp_dev->mutex);
+ if (cpp_dev->cpp_open_cnt == MAX_ACTIVE_CPP_INSTANCE) {
+ pr_err("No free CPP instance\n");
+ mutex_unlock(&cpp_dev->mutex);
+ return -ENODEV;
+ }
+
+ for (i = 0; i < MAX_ACTIVE_CPP_INSTANCE; i++) {
+ if (cpp_dev->cpp_subscribe_list[i].active == 0) {
+ cpp_dev->cpp_subscribe_list[i].active = 1;
+ cpp_dev->cpp_subscribe_list[i].vfh = &fh->vfh;
+ break;
+ }
+ }
+ if (i == MAX_ACTIVE_CPP_INSTANCE) {
+ pr_err("No free instance\n");
+ mutex_unlock(&cpp_dev->mutex);
+ return -ENODEV;
+ }
+
+ CPP_DBG("open %d %p\n", i, &fh->vfh);
+ cpp_dev->cpp_open_cnt++;
+ mutex_unlock(&cpp_dev->mutex);
+ return 0;
+}
+
+static int cpp_close_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ uint32_t i;
+ struct cpp_device *cpp_dev = v4l2_get_subdevdata(sd);
+ mutex_lock(&cpp_dev->mutex);
+ for (i = 0; i < MAX_ACTIVE_CPP_INSTANCE; i++) {
+ if (cpp_dev->cpp_subscribe_list[i].vfh == &fh->vfh) {
+ cpp_dev->cpp_subscribe_list[i].active = 0;
+ cpp_dev->cpp_subscribe_list[i].vfh = NULL;
+ break;
+ }
+ }
+ if (i == MAX_ACTIVE_CPP_INSTANCE) {
+ pr_err("Invalid close\n");
+ mutex_unlock(&cpp_dev->mutex);
+ return -ENODEV;
+ }
+
+ CPP_DBG("close %d %p\n", i, &fh->vfh);
+ cpp_dev->cpp_open_cnt--;
+ mutex_unlock(&cpp_dev->mutex);
+ return 0;
+}
+
+static const struct v4l2_subdev_internal_ops msm_cpp_internal_ops = {
+ .open = cpp_open_node,
+ .close = cpp_close_node,
+};
+
+static int msm_cpp_notify_frame_done(struct cpp_device *cpp_dev)
+{
+ struct v4l2_event v4l2_evt;
+ struct msm_queue_cmd *frame_qcmd;
+ struct msm_queue_cmd *event_qcmd;
+ struct msm_cpp_frame_info_t *processed_frame;
+ struct msm_device_queue *queue = &cpp_dev->processing_q;
+
+ if (queue->len > 0) {
+ frame_qcmd = msm_dequeue(queue, list_frame);
+ processed_frame = frame_qcmd->command;
+
+ event_qcmd = kzalloc(sizeof(struct msm_queue_cmd), GFP_KERNEL);
+ if (!event_qcmd) {
+ pr_err("%s Insufficient memory. return", __func__);
+ return -ENOMEM;
+ }
+ atomic_set(&event_qcmd->on_heap, 1);
+ event_qcmd->command = processed_frame;
+ CPP_DBG("fid %d\n", processed_frame->frame_id);
+ msm_enqueue(&cpp_dev->eventData_q, &event_qcmd->list_eventdata);
+
+ v4l2_evt.id = processed_frame->inst_id;
+ v4l2_evt.type = V4L2_EVENT_CPP_FRAME_DONE;
+ v4l2_event_queue(cpp_dev->subdev.devnode, &v4l2_evt);
+ }
+ return 0;
+}
+
+static int msm_cpp_send_frame_to_hardware(struct cpp_device *cpp_dev)
+{
+ struct msm_queue_cmd *frame_qcmd;
+ struct msm_cpp_frame_info_t *process_frame;
+ struct msm_device_queue *queue;
+
+ if (cpp_dev->processing_q.len < MAX_CPP_PROCESSING_FRAME) {
+ while (cpp_dev->processing_q.len < MAX_CPP_PROCESSING_FRAME) {
+ if (cpp_dev->realtime_q.len != 0) {
+ queue = &cpp_dev->realtime_q;
+ } else if (cpp_dev->offline_q.len != 0) {
+ queue = &cpp_dev->offline_q;
+ } else {
+ pr_debug("%s: All frames queued\n", __func__);
+ break;
+ }
+ frame_qcmd = msm_dequeue(queue, list_frame);
+ /*TBD Code to actually sending to harware*/
+ process_frame = frame_qcmd->command;
+
+ msm_enqueue(&cpp_dev->processing_q,
+ &frame_qcmd->list_frame);
+ }
+ }
+ return 0;
+}
+
+long msm_cpp_subdev_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ struct cpp_device *cpp_dev = v4l2_get_subdevdata(sd);
+ struct msm_camera_v4l2_ioctl_t *ioctl_ptr = arg;
+ int rc = 0;
+
+ CPP_DBG("%s: %d\n", __func__, __LINE__);
+ mutex_lock(&cpp_dev->mutex);
+ CPP_DBG("%s cmd: %d\n", __func__, cmd);
+ switch (cmd) {
+ case VIDIOC_MSM_CPP_CFG: {
+ struct msm_queue_cmd *frame_qcmd;
+ struct msm_cpp_frame_info_t *new_frame =
+ kzalloc(sizeof(struct msm_cpp_frame_info_t),
+ GFP_KERNEL);
+ if (!new_frame) {
+ pr_err("%s Insufficient memory. return", __func__);
+ mutex_unlock(&cpp_dev->mutex);
+ return -ENOMEM;
+ }
+
+ COPY_FROM_USER(rc, new_frame,
+ (void __user *)ioctl_ptr->ioctl_ptr,
+ sizeof(struct msm_cpp_frame_info_t));
+ if (rc) {
+ ERR_COPY_FROM_USER();
+ kfree(new_frame);
+ mutex_unlock(&cpp_dev->mutex);
+ return -EINVAL;
+ }
+
+ frame_qcmd = kzalloc(sizeof(struct msm_queue_cmd), GFP_KERNEL);
+ if (!frame_qcmd) {
+ pr_err("%s Insufficient memory. return", __func__);
+ kfree(new_frame);
+ mutex_unlock(&cpp_dev->mutex);
+ return -ENOMEM;
+ }
+
+ atomic_set(&frame_qcmd->on_heap, 1);
+ frame_qcmd->command = new_frame;
+ if (new_frame->frame_type == MSM_CPP_REALTIME_FRAME) {
+ msm_enqueue(&cpp_dev->realtime_q,
+ &frame_qcmd->list_frame);
+ } else if (new_frame->frame_type == MSM_CPP_OFFLINE_FRAME) {
+ msm_enqueue(&cpp_dev->offline_q,
+ &frame_qcmd->list_frame);
+ } else {
+ pr_err("%s: Invalid frame type\n", __func__);
+ kfree(new_frame);
+ kfree(frame_qcmd);
+ mutex_unlock(&cpp_dev->mutex);
+ return -EINVAL;
+ }
+ break;
+ }
+ case VIDIOC_MSM_CPP_GET_EVENTPAYLOAD: {
+ struct msm_device_queue *queue = &cpp_dev->eventData_q;
+ struct msm_queue_cmd *event_qcmd;
+ struct msm_cpp_frame_info_t *process_frame;
+ event_qcmd = msm_dequeue(queue, list_eventdata);
+ process_frame = event_qcmd->command;
+ CPP_DBG("fid %d\n", process_frame->frame_id);
+ if (copy_to_user((void __user *)ioctl_ptr->ioctl_ptr,
+ process_frame,
+ sizeof(struct msm_cpp_frame_info_t))) {
+ mutex_unlock(&cpp_dev->mutex);
+ return -EINVAL;
+ }
+ kfree(process_frame);
+ kfree(event_qcmd);
+ break;
+ }
+ }
+ mutex_unlock(&cpp_dev->mutex);
+ CPP_DBG("%s: %d\n", __func__, __LINE__);
+ return 0;
+}
+
+int msm_cpp_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ CPP_DBG("%s\n", __func__);
+ return v4l2_event_subscribe(fh, sub, MAX_CPP_V4l2_EVENTS);
+}
+
+int msm_cpp_unsubscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ CPP_DBG("%s\n", __func__);
+ return v4l2_event_unsubscribe(fh, sub);
+}
+
+static struct v4l2_subdev_core_ops msm_cpp_subdev_core_ops = {
+ .ioctl = msm_cpp_subdev_ioctl,
+ .subscribe_event = msm_cpp_subscribe_event,
+ .unsubscribe_event = msm_cpp_unsubscribe_event,
+};
+
+static const struct v4l2_subdev_ops msm_cpp_subdev_ops = {
+ .core = &msm_cpp_subdev_core_ops,
+};
+
+static int msm_cpp_enable_debugfs(struct cpp_device *cpp_dev);
+
+static struct v4l2_file_operations msm_cpp_v4l2_subdev_fops;
+
+static long msm_cpp_subdev_do_ioctl(
+ struct file *file, unsigned int cmd, void *arg)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
+ struct v4l2_fh *vfh = file->private_data;
+
+ switch (cmd) {
+ case VIDIOC_DQEVENT:
+ if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
+ return -ENOIOCTLCMD;
+
+ return v4l2_event_dequeue(vfh, arg, file->f_flags & O_NONBLOCK);
+
+ case VIDIOC_SUBSCRIBE_EVENT:
+ return v4l2_subdev_call(sd, core, subscribe_event, vfh, arg);
+
+ case VIDIOC_UNSUBSCRIBE_EVENT:
+ return v4l2_subdev_call(sd, core, unsubscribe_event, vfh, arg);
+
+ case VIDIOC_MSM_CPP_GET_INST_INFO: {
+ uint32_t i;
+ struct cpp_device *cpp_dev = v4l2_get_subdevdata(sd);
+ struct msm_camera_v4l2_ioctl_t *ioctl_ptr = arg;
+ struct msm_cpp_frame_info_t inst_info;
+ for (i = 0; i < MAX_ACTIVE_CPP_INSTANCE; i++) {
+ if (cpp_dev->cpp_subscribe_list[i].vfh == vfh) {
+ inst_info.inst_id = i;
+ break;
+ }
+ }
+ if (copy_to_user(
+ (void __user *)ioctl_ptr->ioctl_ptr, &inst_info,
+ sizeof(struct msm_cpp_frame_info_t))) {
+ return -EINVAL;
+ }
+ }
+ break;
+ default:
+ return v4l2_subdev_call(sd, core, ioctl, cmd, arg);
+ }
+
+ return 0;
+}
+
+static long msm_cpp_subdev_fops_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ return video_usercopy(file, cmd, arg, msm_cpp_subdev_do_ioctl);
+}
+
+static int __devinit cpp_probe(struct platform_device *pdev)
+{
+ struct cpp_device *cpp_dev;
+ struct msm_cam_subdev_info sd_info;
+ int rc = 0;
+ CDBG("%s: device id = %d\n", __func__, pdev->id);
+ cpp_dev = kzalloc(sizeof(struct cpp_device), GFP_KERNEL);
+ if (!cpp_dev) {
+ pr_err("%s: no enough memory\n", __func__);
+ return -ENOMEM;
+ }
+ v4l2_subdev_init(&cpp_dev->subdev, &msm_cpp_subdev_ops);
+ cpp_dev->subdev.internal_ops = &msm_cpp_internal_ops;
+ snprintf(cpp_dev->subdev.name, ARRAY_SIZE(cpp_dev->subdev.name),
+ "cpp");
+ cpp_dev->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ cpp_dev->subdev.flags |= V4L2_SUBDEV_FL_HAS_EVENTS;
+ v4l2_set_subdevdata(&cpp_dev->subdev, cpp_dev);
+ platform_set_drvdata(pdev, &cpp_dev->subdev);
+ mutex_init(&cpp_dev->mutex);
+
+ cpp_dev->pdev = pdev;
+
+ media_entity_init(&cpp_dev->subdev.entity, 0, NULL, 0);
+ cpp_dev->subdev.entity.type = MEDIA_ENT_T_DEVNODE_V4L;
+ cpp_dev->subdev.entity.group_id = CPP_DEV;
+ cpp_dev->subdev.entity.name = pdev->name;
+ sd_info.sdev_type = CPP_DEV;
+ sd_info.sd_index = pdev->id;
+ msm_cam_register_subdev_node(&cpp_dev->subdev, &sd_info);
+ msm_cpp_v4l2_subdev_fops.owner = v4l2_subdev_fops.owner;
+ msm_cpp_v4l2_subdev_fops.open = v4l2_subdev_fops.open;
+ msm_cpp_v4l2_subdev_fops.unlocked_ioctl = msm_cpp_subdev_fops_ioctl;
+ msm_cpp_v4l2_subdev_fops.release = v4l2_subdev_fops.release;
+ msm_cpp_v4l2_subdev_fops.poll = v4l2_subdev_fops.poll;
+
+ cpp_dev->subdev.devnode->fops = &msm_cpp_v4l2_subdev_fops;
+ cpp_dev->subdev.entity.revision = cpp_dev->subdev.devnode->num;
+ msm_cpp_enable_debugfs(cpp_dev);
+ msm_queue_init(&cpp_dev->eventData_q, "eventdata");
+ msm_queue_init(&cpp_dev->offline_q, "frame");
+ msm_queue_init(&cpp_dev->realtime_q, "frame");
+ msm_queue_init(&cpp_dev->processing_q, "frame");
+ cpp_dev->cpp_open_cnt = 0;
+
+ return rc;
+}
+
+static struct platform_driver cpp_driver = {
+ .probe = cpp_probe,
+ .driver = {
+ .name = MSM_CPP_DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init msm_cpp_init_module(void)
+{
+ return platform_driver_register(&cpp_driver);
+}
+
+static void __exit msm_cpp_exit_module(void)
+{
+ platform_driver_unregister(&cpp_driver);
+}
+
+static int msm_cpp_debugfs_stream_s(void *data, u64 val)
+{
+ struct cpp_device *cpp_dev = data;
+ CPP_DBG("CPP processing frame E\n");
+ while (1) {
+ mutex_lock(&cpp_dev->mutex);
+ msm_cpp_notify_frame_done(cpp_dev);
+ msm_cpp_send_frame_to_hardware(cpp_dev);
+ mutex_unlock(&cpp_dev->mutex);
+ msleep(20);
+ }
+ CPP_DBG("CPP processing frame X\n");
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(cpp_debugfs_stream, NULL,
+ msm_cpp_debugfs_stream_s, "%llu\n");
+
+static int msm_cpp_enable_debugfs(struct cpp_device *cpp_dev)
+{
+ struct dentry *debugfs_base;
+ debugfs_base = debugfs_create_dir("msm_camera", NULL);
+ if (!debugfs_base)
+ return -ENOMEM;
+
+ if (!debugfs_create_file("test", S_IRUGO | S_IWUSR, debugfs_base,
+ (void *)cpp_dev, &cpp_debugfs_stream))
+ return -ENOMEM;
+
+ return 0;
+}
+
+module_init(msm_cpp_init_module);
+module_exit(msm_cpp_exit_module);
+MODULE_DESCRIPTION("MSM CPP driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/video/msm/cpp/msm_cpp.h b/drivers/media/video/msm/cpp/msm_cpp.h
new file mode 100644
index 0000000..8c10cac
--- /dev/null
+++ b/drivers/media/video/msm/cpp/msm_cpp.h
@@ -0,0 +1,65 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <media/v4l2-subdev.h>
+
+#define MAX_ACTIVE_CPP_INSTANCE 8
+#define MAX_CPP_PROCESSING_FRAME 2
+#define MAX_CPP_V4l2_EVENTS 30
+
+#define MSM_CPP_MICRO_BASE 0x4000
+#define MSM_CPP_MICRO_HW_VERSION 0x0000
+#define MSM_CPP_MICRO_IRQGEN_STAT 0x0004
+#define MSM_CPP_MICRO_IRQGEN_CLR 0x0008
+#define MSM_CPP_MICRO_IRQGEN_MASK 0x000C
+#define MSM_CPP_MICRO_FIFO_TX_DATA 0x0010
+#define MSM_CPP_MICRO_FIFO_TX_STAT 0x0014
+#define MSM_CPP_MICRO_FIFO_RX_DATA 0x0018
+#define MSM_CPP_MICRO_FIFO_RX_STAT 0x001C
+#define MSM_CPP_MICRO_BOOT_START 0x0020
+#define MSM_CPP_MICRO_BOOT_LDORG 0x0024
+#define MSM_CPP_MICRO_CLKEN_CTL 0x0030
+
+struct cpp_subscribe_info {
+ struct v4l2_fh *vfh;
+ uint32_t active;
+};
+
+struct cpp_device {
+ struct platform_device *pdev;
+ struct v4l2_subdev subdev;
+ struct resource *mem;
+ struct resource *irq;
+ struct resource *io;
+ void __iomem *base;
+ struct clk *cpp_clk[2];
+ struct mutex mutex;
+
+ struct cpp_subscribe_info cpp_subscribe_list[MAX_ACTIVE_CPP_INSTANCE];
+ uint32_t cpp_open_cnt;
+
+ struct msm_device_queue eventData_q; /*V4L2 Event Payload Queue*/
+
+ /*Offline Frame Queue
+ process when realtime queue is empty*/
+ struct msm_device_queue offline_q;
+ /*Realtime Frame Queue
+ process with highest priority*/
+ struct msm_device_queue realtime_q;
+ /*Processing Queue
+ store frame info for frames sent to microcontroller*/
+ struct msm_device_queue processing_q;
+};
+
diff --git a/drivers/media/video/msm/gemini/msm_gemini_sync.c b/drivers/media/video/msm/gemini/msm_gemini_sync.c
index b55ec18..ae3de13 100644
--- a/drivers/media/video/msm/gemini/msm_gemini_sync.c
+++ b/drivers/media/video/msm/gemini/msm_gemini_sync.c
@@ -280,6 +280,7 @@
GMN_DBG("%s:%d] no output return buffer\n", __func__,
__LINE__);
rc = -1;
+ return rc;
}
buf_out = msm_gemini_q_out(&pgmn_dev->output_buf_q);
diff --git a/drivers/media/video/msm/io/msm_camera_i2c.c b/drivers/media/video/msm/io/msm_camera_i2c.c
index cecf9b0..e946569 100644
--- a/drivers/media/video/msm/io/msm_camera_i2c.c
+++ b/drivers/media/video/msm/io/msm_camera_i2c.c
@@ -267,6 +267,35 @@
return rc;
}
+int32_t msm_camera_i2c_write_table_w_microdelay(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_tbl *reg_tbl, uint16_t size,
+ enum msm_camera_i2c_data_type data_type)
+{
+ int i;
+ int32_t rc = -EFAULT;
+
+ if (!client || !reg_tbl)
+ return rc;
+
+ if ((client->addr_type != MSM_CAMERA_I2C_BYTE_ADDR
+ && client->addr_type != MSM_CAMERA_I2C_WORD_ADDR)
+ || (data_type != MSM_CAMERA_I2C_BYTE_DATA
+ && data_type != MSM_CAMERA_I2C_WORD_DATA))
+ return rc;
+
+ for (i = 0; i < size; i++) {
+ rc = msm_camera_i2c_write(client, reg_tbl->reg_addr,
+ reg_tbl->reg_data, data_type);
+ if (rc < 0)
+ break;
+ if (reg_tbl->delay)
+ usleep_range(reg_tbl->delay, reg_tbl->delay + 1000);
+ reg_tbl++;
+ }
+ return rc;
+}
+
int32_t msm_camera_i2c_write_tbl(struct msm_camera_i2c_client *client,
struct msm_camera_i2c_reg_conf *reg_conf_tbl, uint16_t size,
enum msm_camera_i2c_data_type data_type)
diff --git a/drivers/media/video/msm/io/msm_camera_i2c.h b/drivers/media/video/msm/io/msm_camera_i2c.h
index 01c8259..a0cdd77 100644
--- a/drivers/media/video/msm/io/msm_camera_i2c.h
+++ b/drivers/media/video/msm/io/msm_camera_i2c.h
@@ -58,6 +58,12 @@
enum msm_camera_i2c_cmd_type cmd_type;
};
+struct msm_camera_i2c_reg_tbl {
+ uint16_t reg_addr;
+ uint16_t reg_data;
+ uint16_t delay;
+};
+
struct msm_camera_i2c_conf_array {
struct msm_camera_i2c_reg_conf *conf;
uint16_t size;
@@ -107,6 +113,11 @@
uint16_t addr, uint16_t data,
enum msm_camera_i2c_data_type data_type);
+int32_t msm_camera_i2c_write_table_w_microdelay(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_tbl *reg_tbl, uint16_t size,
+ enum msm_camera_i2c_data_type data_type);
+
int32_t msm_camera_i2c_write_tbl(struct msm_camera_i2c_client *client,
struct msm_camera_i2c_reg_conf *reg_conf_tbl, uint16_t size,
enum msm_camera_i2c_data_type data_type);
diff --git a/drivers/media/video/msm/mercury/msm_mercury_platform.c b/drivers/media/video/msm/mercury/msm_mercury_platform.c
index 9366ef3..67ce82d 100644
--- a/drivers/media/video/msm/mercury/msm_mercury_platform.c
+++ b/drivers/media/video/msm/mercury/msm_mercury_platform.c
@@ -11,7 +11,6 @@
*/
#include <linux/module.h>
-#include <linux/pm_qos_params.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/android_pmem.h>
diff --git a/drivers/media/video/msm/msm.c b/drivers/media/video/msm/msm.c
index d34b8e1..c6fadc0 100644
--- a/drivers/media/video/msm/msm.c
+++ b/drivers/media/video/msm/msm.c
@@ -166,17 +166,22 @@
struct msm_cam_v4l2_dev_inst, eventHandle);
D("%s\n", __func__);
WARN_ON(pctx != f->private_data);
+
+ mutex_lock(&pcam_inst->inst_lock);
rc = vb2_reqbufs(&pcam_inst->vid_bufq, pb);
if (rc < 0) {
pr_err("%s reqbufs failed %d ", __func__, rc);
+ mutex_unlock(&pcam_inst->inst_lock);
return rc;
}
if (!pb->count) {
/* Deallocation. free buf_offset array */
D("%s Inst %p freeing buffer offsets array",
__func__, pcam_inst);
- for (j = 0 ; j < pcam_inst->buf_count ; j++)
+ for (j = 0 ; j < pcam_inst->buf_count ; j++) {
kfree(pcam_inst->buf_offset[j]);
+ pcam_inst->buf_offset[j] = NULL;
+ }
kfree(pcam_inst->buf_offset);
pcam_inst->buf_offset = NULL;
/* If the userspace has deallocated all the
@@ -194,6 +199,7 @@
GFP_KERNEL);
if (!pcam_inst->buf_offset) {
pr_err("%s out of memory ", __func__);
+ mutex_unlock(&pcam_inst->inst_lock);
return -ENOMEM;
}
for (i = 0; i < pb->count; i++) {
@@ -202,15 +208,19 @@
pcam_inst->plane_info.num_planes, GFP_KERNEL);
if (!pcam_inst->buf_offset[i]) {
pr_err("%s out of memory ", __func__);
- for (j = i-1 ; j >= 0; j--)
+ for (j = i-1 ; j >= 0; j--) {
kfree(pcam_inst->buf_offset[j]);
+ pcam_inst->buf_offset[j] = NULL;
+ }
kfree(pcam_inst->buf_offset);
pcam_inst->buf_offset = NULL;
+ mutex_unlock(&pcam_inst->inst_lock);
return -ENOMEM;
}
}
}
pcam_inst->buf_count = pb->count;
+ mutex_unlock(&pcam_inst->inst_lock);
return rc;
}
@@ -218,13 +228,17 @@
struct v4l2_buffer *pb)
{
/* get the video device */
+ int rc = 0;
struct msm_cam_v4l2_dev_inst *pcam_inst;
pcam_inst = container_of(f->private_data,
struct msm_cam_v4l2_dev_inst, eventHandle);
D("%s\n", __func__);
WARN_ON(pctx != f->private_data);
- return vb2_querybuf(&pcam_inst->vid_bufq, pb);
+ mutex_lock(&pcam_inst->inst_lock);
+ rc = vb2_querybuf(&pcam_inst->vid_bufq, pb);
+ mutex_unlock(&pcam_inst->inst_lock);
+ return rc;
}
static int msm_camera_v4l2_qbuf(struct file *f, void *pctx,
@@ -240,8 +254,10 @@
pcam_inst->image_mode, pb->index);
WARN_ON(pctx != f->private_data);
+ mutex_lock(&pcam_inst->inst_lock);
if (!pcam_inst->buf_offset) {
pr_err("%s Buffer is already released. Returning.\n", __func__);
+ mutex_unlock(&pcam_inst->inst_lock);
return -EINVAL;
}
@@ -249,6 +265,7 @@
/* Reject the buffer if planes array was not allocated */
if (pb->m.planes == NULL) {
pr_err("%s Planes array is null\n", __func__);
+ mutex_unlock(&pcam_inst->inst_lock);
return -EINVAL;
}
for (i = 0; i < pcam_inst->plane_info.num_planes; i++) {
@@ -269,7 +286,7 @@
rc = vb2_qbuf(&pcam_inst->vid_bufq, pb);
D("%s, videobuf_qbuf mode %d and idx %d returns %d\n", __func__,
pcam_inst->image_mode, pb->index, rc);
-
+ mutex_unlock(&pcam_inst->inst_lock);
return rc;
}
@@ -285,6 +302,11 @@
D("%s\n", __func__);
WARN_ON(pctx != f->private_data);
+ mutex_lock(&pcam_inst->inst_lock);
+ if (0 == pcam_inst->streamon) {
+ mutex_unlock(&pcam_inst->inst_lock);
+ return -EACCES;
+ }
rc = vb2_dqbuf(&pcam_inst->vid_bufq, pb, f->f_flags & O_NONBLOCK);
D("%s, videobuf_dqbuf returns %d\n", __func__, rc);
@@ -292,6 +314,7 @@
/* Reject the buffer if planes array was not allocated */
if (pb->m.planes == NULL) {
pr_err("%s Planes array is null\n", __func__);
+ mutex_unlock(&pcam_inst->inst_lock);
return -EINVAL;
}
for (i = 0; i < pcam_inst->plane_info.num_planes; i++) {
@@ -309,6 +332,7 @@
pb->reserved = pcam_inst->buf_offset[pb->index][0].addr_offset;
}
+ mutex_unlock(&pcam_inst->inst_lock);
return rc;
}
@@ -325,9 +349,13 @@
D("%s Inst %p\n", __func__, pcam_inst);
WARN_ON(pctx != f->private_data);
+ mutex_lock(&pcam->vid_lock);
+ mutex_lock(&pcam_inst->inst_lock);
if ((buf_type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) &&
(buf_type != V4L2_BUF_TYPE_VIDEO_CAPTURE)) {
pr_err("%s Invalid buffer type ", __func__);
+ mutex_unlock(&pcam_inst->inst_lock);
+ mutex_unlock(&pcam->vid_lock);
return -EINVAL;
}
@@ -336,10 +364,10 @@
rc = vb2_streamon(&pcam_inst->vid_bufq, buf_type);
D("%s, videobuf_streamon returns %d\n", __func__, rc);
- mutex_lock(&pcam->vid_lock);
/* turn HW (VFE/sensor) streaming */
pcam_inst->streamon = 1;
rc = msm_server_streamon(pcam, pcam_inst->my_index);
+ mutex_unlock(&pcam_inst->inst_lock);
mutex_unlock(&pcam->vid_lock);
D("%s rc = %d\n", __func__, rc);
return rc;
@@ -367,16 +395,20 @@
/* first turn of HW (VFE/sensor) streaming so that buffers are
not in use when we free the buffers */
mutex_lock(&pcam->vid_lock);
+ mutex_lock(&pcam_inst->inst_lock);
pcam_inst->streamon = 0;
if (msm_server_get_usecount() > 0)
rc = msm_server_streamoff(pcam, pcam_inst->my_index);
- mutex_unlock(&pcam->vid_lock);
+
if (rc < 0)
pr_err("%s: hw failed to stop streaming\n", __func__);
/* stop buffer streaming */
rc = vb2_streamoff(&pcam_inst->vid_bufq, buf_type);
D("%s, videobuf_streamoff returns %d\n", __func__, rc);
+
+ mutex_unlock(&pcam_inst->inst_lock);
+ mutex_unlock(&pcam->vid_lock);
return rc;
}
@@ -466,11 +498,13 @@
D("%s\n", __func__);
WARN_ON(pctx != f->private_data);
+ mutex_lock(&pcam->vid_lock);
rc = msm_server_try_fmt(pcam, pfmt);
if (rc)
pr_err("Format %x not found, rc = %d\n",
pfmt->fmt.pix.pixelformat, rc);
+ mutex_unlock(&pcam->vid_lock);
return rc;
}
@@ -484,11 +518,13 @@
D("%s\n", __func__);
WARN_ON(pctx != f->private_data);
+ mutex_lock(&pcam->vid_lock);
rc = msm_server_try_fmt_mplane(pcam, pfmt);
if (rc)
pr_err("Format %x not found, rc = %d\n",
pfmt->fmt.pix_mp.pixelformat, rc);
+ mutex_unlock(&pcam->vid_lock);
return rc;
}
@@ -795,6 +831,7 @@
mutex_unlock(&pcam->vid_lock);
return rc;
}
+ mutex_init(&pcam_inst->inst_lock);
pcam_inst->sensor_pxlcode = pcam->usr_fmts[0].pxlcode;
pcam_inst->my_index = i;
pcam_inst->pcam = pcam;
@@ -887,7 +924,9 @@
pcam->dev_inst[i] = NULL;
pcam->use_count = 0;
}
+ pcam->dev_inst[i] = NULL;
mutex_unlock(&pcam->vid_lock);
+ mutex_destroy(&pcam_inst->inst_lock);
kfree(pcam_inst);
pr_err("%s: error end", __func__);
return rc;
@@ -986,6 +1025,7 @@
}
mutex_lock(&pcam->vid_lock);
+ mutex_lock(&pcam_inst->inst_lock);
if (pcam_inst->streamon) {
/*something went wrong since instance
@@ -1011,6 +1051,8 @@
v4l2_fh_del(&pcam_inst->eventHandle);
v4l2_fh_exit(&pcam_inst->eventHandle);
}
+ mutex_unlock(&pcam_inst->inst_lock);
+ mutex_destroy(&pcam_inst->inst_lock);
kfree(pcam_inst);
f->private_data = NULL;
diff --git a/drivers/media/video/msm/msm.h b/drivers/media/video/msm/msm.h
index d0322d1..7d07e7b 100644
--- a/drivers/media/video/msm/msm.h
+++ b/drivers/media/video/msm/msm.h
@@ -59,6 +59,7 @@
#define MSM_MERCURY_DRV_NAME "msm_mercury"
#define MSM_I2C_MUX_DRV_NAME "msm_cam_i2c_mux"
#define MSM_IRQ_ROUTER_DRV_NAME "msm_cam_irq_router"
+#define MSM_CPP_DRV_NAME "msm_cpp"
#define MAX_NUM_CSIPHY_DEV 3
#define MAX_NUM_CSID_DEV 4
@@ -68,6 +69,7 @@
#define MAX_NUM_AXI_DEV 2
#define MAX_NUM_VPE_DEV 1
#define MAX_NUM_JPEG_DEV 3
+#define MAX_NUM_CPP_DEV 1
enum msm_cam_subdev_type {
CSIPHY_DEV,
@@ -82,6 +84,7 @@
EEPROM_DEV,
GESTURE_DEV,
IRQ_ROUTER_DEV,
+ CPP_DEV,
};
/* msm queue management APIs*/
@@ -261,6 +264,7 @@
struct v4l2_subdev *vpe_sdev; /* vpe sub device */
struct v4l2_subdev *axi_sdev; /* axi sub device */
struct v4l2_subdev *eeprom_sdev; /* eeprom sub device */
+ struct v4l2_subdev *cpp_sdev;/*cpp sub device*/
struct msm_isp_ops *isp_sdev; /* isp sub device : camif/VFE */
struct msm_cam_config_dev *config_device;
@@ -343,6 +347,7 @@
int is_mem_map_inst;
struct img_plane_info plane_info;
int vbqueue_initialized;
+ struct mutex inst_lock;
};
struct msm_cam_mctl_node {
@@ -540,6 +545,7 @@
struct v4l2_subdev *axi_device[MAX_NUM_AXI_DEV];
struct v4l2_subdev *vpe_device[MAX_NUM_VPE_DEV];
struct v4l2_subdev *gesture_device;
+ struct v4l2_subdev *cpp_device[MAX_NUM_CPP_DEV];
struct v4l2_subdev *irqr_device;
spinlock_t intr_table_lock;
diff --git a/drivers/media/video/msm/msm_mctl.c b/drivers/media/video/msm/msm_mctl.c
index cdfad3b..1359792 100644
--- a/drivers/media/video/msm/msm_mctl.c
+++ b/drivers/media/video/msm/msm_mctl.c
@@ -1123,17 +1123,22 @@
struct msm_cam_v4l2_dev_inst, eventHandle);
D("%s\n", __func__);
WARN_ON(pctx != f->private_data);
+
+ mutex_lock(&pcam_inst->inst_lock);
rc = vb2_reqbufs(&pcam_inst->vid_bufq, pb);
if (rc < 0) {
pr_err("%s reqbufs failed %d ", __func__, rc);
+ mutex_unlock(&pcam_inst->inst_lock);
return rc;
}
if (!pb->count) {
/* Deallocation. free buf_offset array */
D("%s Inst %p freeing buffer offsets array",
__func__, pcam_inst);
- for (j = 0 ; j < pcam_inst->buf_count ; j++)
+ for (j = 0 ; j < pcam_inst->buf_count ; j++) {
kfree(pcam_inst->buf_offset[j]);
+ pcam_inst->buf_offset[j] = NULL;
+ }
kfree(pcam_inst->buf_offset);
pcam_inst->buf_offset = NULL;
/* If the userspace has deallocated all the
@@ -1151,6 +1156,7 @@
GFP_KERNEL);
if (!pcam_inst->buf_offset) {
pr_err("%s out of memory ", __func__);
+ mutex_unlock(&pcam_inst->inst_lock);
return -ENOMEM;
}
for (i = 0; i < pb->count; i++) {
@@ -1159,10 +1165,13 @@
pcam_inst->plane_info.num_planes, GFP_KERNEL);
if (!pcam_inst->buf_offset[i]) {
pr_err("%s out of memory ", __func__);
- for (j = i-1 ; j >= 0; j--)
+ for (j = i-1 ; j >= 0; j--) {
kfree(pcam_inst->buf_offset[j]);
+ pcam_inst->buf_offset[j] = NULL;
+ }
kfree(pcam_inst->buf_offset);
pcam_inst->buf_offset = NULL;
+ mutex_unlock(&pcam_inst->inst_lock);
return -ENOMEM;
}
}
@@ -1170,6 +1179,7 @@
pcam_inst->buf_count = pb->count;
D("%s inst %p, buf count %d ", __func__,
pcam_inst, pcam_inst->buf_count);
+ mutex_unlock(&pcam_inst->inst_lock);
return rc;
}
@@ -1177,13 +1187,17 @@
struct v4l2_buffer *pb)
{
/* get the video device */
+ int rc = 0;
struct msm_cam_v4l2_dev_inst *pcam_inst;
pcam_inst = container_of(f->private_data,
struct msm_cam_v4l2_dev_inst, eventHandle);
D("%s\n", __func__);
WARN_ON(pctx != f->private_data);
- return vb2_querybuf(&pcam_inst->vid_bufq, pb);
+ mutex_lock(&pcam_inst->inst_lock);
+ rc = vb2_querybuf(&pcam_inst->vid_bufq, pb);
+ mutex_unlock(&pcam_inst->inst_lock);
+ return rc;
}
static int msm_mctl_v4l2_qbuf(struct file *f, void *pctx,
@@ -1198,8 +1212,10 @@
D("%s Inst = %p\n", __func__, pcam_inst);
WARN_ON(pctx != f->private_data);
+ mutex_lock(&pcam_inst->inst_lock);
if (!pcam_inst->buf_offset) {
pr_err("%s Buffer is already released. Returning. ", __func__);
+ mutex_unlock(&pcam_inst->inst_lock);
return -EINVAL;
}
@@ -1207,6 +1223,7 @@
/* Reject the buffer if planes array was not allocated */
if (pb->m.planes == NULL) {
pr_err("%s Planes array is null ", __func__);
+ mutex_unlock(&pcam_inst->inst_lock);
return -EINVAL;
}
for (i = 0; i < pcam_inst->plane_info.num_planes; i++) {
@@ -1232,6 +1249,7 @@
rc = vb2_qbuf(&pcam_inst->vid_bufq, pb);
D("%s, videobuf_qbuf returns %d\n", __func__, rc);
+ mutex_unlock(&pcam_inst->inst_lock);
return rc;
}
@@ -1246,10 +1264,16 @@
D("%s\n", __func__);
WARN_ON(pctx != f->private_data);
+ mutex_lock(&pcam_inst->inst_lock);
+ if (0 == pcam_inst->streamon) {
+ mutex_unlock(&pcam_inst->inst_lock);
+ return -EACCES;
+ }
rc = vb2_dqbuf(&pcam_inst->vid_bufq, pb, f->f_flags & O_NONBLOCK);
D("%s, videobuf_dqbuf returns %d\n", __func__, rc);
+ mutex_unlock(&pcam_inst->inst_lock);
return rc;
}
@@ -1266,9 +1290,13 @@
D("%s Inst %p\n", __func__, pcam_inst);
WARN_ON(pctx != f->private_data);
+ mutex_lock(&pcam->mctl_node.dev_lock);
+ mutex_lock(&pcam_inst->inst_lock);
if ((buf_type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) &&
(buf_type != V4L2_BUF_TYPE_VIDEO_CAPTURE)) {
pr_err("%s Invalid buffer type ", __func__);
+ mutex_unlock(&pcam_inst->inst_lock);
+ mutex_unlock(&pcam->mctl_node.dev_lock);
return -EINVAL;
}
@@ -1277,9 +1305,9 @@
rc = vb2_streamon(&pcam_inst->vid_bufq, buf_type);
D("%s, videobuf_streamon returns %d\n", __func__, rc);
- mutex_lock(&pcam->mctl_node.dev_lock);
/* turn HW (VFE/sensor) streaming */
pcam_inst->streamon = 1;
+ mutex_unlock(&pcam_inst->inst_lock);
mutex_unlock(&pcam->mctl_node.dev_lock);
D("%s rc = %d\n", __func__, rc);
return rc;
@@ -1307,14 +1335,16 @@
/* first turn of HW (VFE/sensor) streaming so that buffers are
not in use when we free the buffers */
mutex_lock(&pcam->mctl_node.dev_lock);
+ mutex_lock(&pcam_inst->inst_lock);
pcam_inst->streamon = 0;
- mutex_unlock(&pcam->mctl_node.dev_lock);
if (rc < 0)
pr_err("%s: hw failed to stop streaming\n", __func__);
/* stop buffer streaming */
rc = vb2_streamoff(&pcam_inst->vid_bufq, buf_type);
D("%s, videobuf_streamoff returns %d\n", __func__, rc);
+ mutex_unlock(&pcam_inst->inst_lock);
+ mutex_unlock(&pcam->mctl_node.dev_lock);
return rc;
}
diff --git a/drivers/media/video/msm/msm_mctl_buf.c b/drivers/media/video/msm/msm_mctl_buf.c
index eade6f1..cd86a80 100644
--- a/drivers/media/video/msm/msm_mctl_buf.c
+++ b/drivers/media/video/msm/msm_mctl_buf.c
@@ -116,6 +116,10 @@
}
buf_idx = vb->v4l2_buf.index;
pmctl = msm_cam_server_get_mctl(pcam->mctl_handle);
+ if (pmctl == NULL) {
+ pr_err("%s No mctl found\n", __func__);
+ return -EINVAL;
+ }
for (i = 0; i < vb->num_planes; i++) {
mem = vb2_plane_cookie(vb, i);
if (buf_type == VIDEOBUF2_MULTIPLE_PLANES)
@@ -147,13 +151,14 @@
struct msm_cam_v4l2_dev_inst *pcam_inst;
struct msm_cam_v4l2_device *pcam;
struct msm_frame_buffer *buf;
- struct vb2_queue *vq = vb->vb2_queue;
+ struct vb2_queue *vq;
D("%s\n", __func__);
- if (!vb || !vq) {
+ if (!vb || !vb->vb2_queue) {
pr_err("%s error : input is NULL\n", __func__);
return -EINVAL;
}
+ vq = vb->vb2_queue;
pcam_inst = vb2_get_drv_priv(vq);
pcam = pcam_inst->pcam;
buf = container_of(vb, struct msm_frame_buffer, vidbuf);
@@ -207,6 +212,12 @@
pcam = pcam_inst->pcam;
buf = container_of(vb, struct msm_frame_buffer, vidbuf);
+ pmctl = msm_cam_server_get_mctl(pcam->mctl_handle);
+ if (pmctl == NULL) {
+ pr_err("%s No mctl found\n", __func__);
+ return;
+ }
+
if (pcam_inst->vid_fmt.type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
for (i = 0; i < vb->num_planes; i++) {
mem = vb2_plane_cookie(vb, i);
@@ -251,7 +262,6 @@
}
spin_unlock_irqrestore(&pcam_inst->vq_irqlock, flags);
}
- pmctl = msm_cam_server_get_mctl(pcam->mctl_handle);
for (i = 0; i < vb->num_planes; i++) {
mem = vb2_plane_cookie(vb, i);
videobuf2_pmem_contig_user_put(mem, pmctl->client);
@@ -274,13 +284,14 @@
struct msm_cam_v4l2_dev_inst *pcam_inst = NULL;
struct msm_cam_v4l2_device *pcam = NULL;
unsigned long flags = 0;
- struct vb2_queue *vq = vb->vb2_queue;
+ struct vb2_queue *vq;
struct msm_frame_buffer *buf;
D("%s\n", __func__);
- if (!vb || !vq) {
+ if (!vb || !vb->vb2_queue) {
pr_err("%s error : input is NULL\n", __func__);
return ;
}
+ vq = vb->vb2_queue;
pcam_inst = vb2_get_drv_priv(vq);
pcam = pcam_inst->pcam;
D("%s pcam_inst=%p,(vb=0x%p),idx=%d,len=%d\n",
@@ -473,6 +484,10 @@
{
struct msm_cam_media_controller *pmctl;
pmctl = msm_cam_server_get_mctl(pcam->mctl_handle);
+ if (pmctl == NULL) {
+ pr_err("%s No mctl found\n", __func__);
+ return -EINVAL;
+ }
pmctl->mctl_vbqueue_init = msm_vbqueue_init;
return 0;
}
diff --git a/drivers/media/video/msm/msm_vfe32.c b/drivers/media/video/msm/msm_vfe32.c
index acff492..75904cc 100644
--- a/drivers/media/video/msm/msm_vfe32.c
+++ b/drivers/media/video/msm/msm_vfe32.c
@@ -50,6 +50,9 @@
vfe32_put_ch_ping_addr((base), (chn), (addr)))
static uint32_t vfe_clk_rate;
+static void vfe32_send_isp_msg(struct v4l2_subdev *sd,
+ uint32_t vfeFrameId, uint32_t isp_msg_id);
+
struct vfe32_isr_queue_cmd {
struct list_head list;
@@ -796,6 +799,15 @@
share_ctrl->vfebase + VFE_REG_UPDATE_CMD);
}
+static void vfe32_stop_liveshot(
+ struct msm_cam_media_controller *pmctl,
+ struct vfe32_ctrl_type *vfe32_ctrl)
+{
+ vfe32_ctrl->share_ctrl->liveshot_state = VFE_STATE_STOP_REQUESTED;
+ msm_camera_io_w_mb(1,
+ vfe32_ctrl->share_ctrl->vfebase + VFE_REG_UPDATE_CMD);
+}
+
static int vfe32_zsl(
struct msm_cam_media_controller *pmctl,
struct vfe32_ctrl_type *vfe32_ctrl)
@@ -2703,6 +2715,10 @@
*cmdp & VFE_FRAME_SKIP_PERIOD_MASK) + 1;
vfe32_ctrl->frame_skip_pattern = (uint32_t)(*(cmdp + 2));
break;
+ case VFE_CMD_STOP_LIVESHOT:
+ CDBG("%s Stopping liveshot ", __func__);
+ vfe32_stop_liveshot(pmctl, vfe32_ctrl);
+ break;
default:
if (cmd->length != vfe32_cmd[cmd->id].length)
return -EINVAL;
@@ -2923,48 +2939,62 @@
}
}
- if (vfe32_ctrl->share_ctrl->liveshot_state ==
- VFE_STATE_START_REQUESTED) {
- pr_info("%s enabling liveshot output\n", __func__);
+ switch (vfe32_ctrl->share_ctrl->liveshot_state) {
+ case VFE_STATE_START_REQUESTED:
+ CDBG("%s enabling liveshot output\n", __func__);
if (vfe32_ctrl->share_ctrl->outpath.output_mode &
- VFE32_OUTPUT_MODE_PRIMARY) {
+ VFE32_OUTPUT_MODE_PRIMARY) {
msm_camera_io_w(1, vfe32_ctrl->share_ctrl->vfebase +
vfe32_AXI_WM_CFG[vfe32_ctrl->
share_ctrl->outpath.out0.ch0]);
msm_camera_io_w(1, vfe32_ctrl->share_ctrl->vfebase +
- vfe32_AXI_WM_CFG[vfe32_ctrl->
+ vfe32_AXI_WM_CFG[vfe32_ctrl->
share_ctrl->outpath.out0.ch1]);
+
vfe32_ctrl->share_ctrl->liveshot_state =
VFE_STATE_STARTED;
}
- }
-
- if (vfe32_ctrl->share_ctrl->liveshot_state == VFE_STATE_STARTED) {
+ break;
+ case VFE_STATE_STARTED:
vfe32_ctrl->share_ctrl->vfe_capture_count--;
- if (!vfe32_ctrl->share_ctrl->vfe_capture_count)
- vfe32_ctrl->share_ctrl->liveshot_state =
- VFE_STATE_STOP_REQUESTED;
- msm_camera_io_w_mb(1, vfe32_ctrl->
- share_ctrl->vfebase + VFE_REG_UPDATE_CMD);
- } else if (vfe32_ctrl->share_ctrl->liveshot_state ==
- VFE_STATE_STOP_REQUESTED) {
- CDBG("%s: disabling liveshot output\n", __func__);
- if (vfe32_ctrl->share_ctrl->outpath.output_mode &
- VFE32_OUTPUT_MODE_PRIMARY) {
+ if (!vfe32_ctrl->share_ctrl->vfe_capture_count &&
+ (vfe32_ctrl->share_ctrl->outpath.output_mode &
+ VFE32_OUTPUT_MODE_PRIMARY)) {
msm_camera_io_w(0, vfe32_ctrl->share_ctrl->vfebase +
vfe32_AXI_WM_CFG[vfe32_ctrl->
share_ctrl->outpath.out0.ch0]);
msm_camera_io_w(0, vfe32_ctrl->share_ctrl->vfebase +
vfe32_AXI_WM_CFG[vfe32_ctrl->
share_ctrl->outpath.out0.ch1]);
+ }
+ break;
+ case VFE_STATE_STOP_REQUESTED:
+ if (vfe32_ctrl->share_ctrl->outpath.output_mode &
+ VFE32_OUTPUT_MODE_PRIMARY) {
+ /* Stop requested, stop write masters, and
+ * trigger REG_UPDATE. Send STOP_LS_ACK in
+ * next reg update. */
+ msm_camera_io_w(0, vfe32_ctrl->share_ctrl->vfebase +
+ vfe32_AXI_WM_CFG[vfe32_ctrl->
+ share_ctrl->outpath.out0.ch0]);
+ msm_camera_io_w(0, vfe32_ctrl->share_ctrl->vfebase +
+ vfe32_AXI_WM_CFG[vfe32_ctrl->
+ share_ctrl->outpath.out0.ch1]);
+
vfe32_ctrl->share_ctrl->liveshot_state =
VFE_STATE_STOPPED;
msm_camera_io_w_mb(1, vfe32_ctrl->share_ctrl->vfebase +
VFE_REG_UPDATE_CMD);
}
- } else if (vfe32_ctrl->share_ctrl->liveshot_state ==
- VFE_STATE_STOPPED) {
+ break;
+ case VFE_STATE_STOPPED:
+ CDBG("%s Sending STOP_LS ACK\n", __func__);
+ vfe32_send_isp_msg(&vfe32_ctrl->subdev,
+ vfe32_ctrl->share_ctrl->vfeFrameId, MSG_ID_STOP_LS_ACK);
vfe32_ctrl->share_ctrl->liveshot_state = VFE_STATE_IDLE;
+ break;
+ default:
+ break;
}
if ((vfe32_ctrl->share_ctrl->operation_mode ==
@@ -3356,9 +3386,6 @@
ch1_paddr, ch2_paddr,
axi_ctrl->share_ctrl->outpath.out0.image_mode);
- if (axi_ctrl->share_ctrl->liveshot_state == VFE_STATE_STOPPED)
- axi_ctrl->share_ctrl->liveshot_state = VFE_STATE_IDLE;
-
} else {
axi_ctrl->share_ctrl->outpath.out0.frame_drop_cnt++;
CDBG("path_irq_0 - no free buffer!\n");
diff --git a/drivers/media/video/msm/sensors/imx074_v4l2.c b/drivers/media/video/msm/sensors/imx074_v4l2.c
index 3d23337..ddf0754 100644
--- a/drivers/media/video/msm/sensors/imx074_v4l2.c
+++ b/drivers/media/video/msm/sensors/imx074_v4l2.c
@@ -276,7 +276,7 @@
.sensor_config = msm_sensor_config,
.sensor_power_up = msm_sensor_power_up,
.sensor_power_down = msm_sensor_power_down,
- .sensor_adjust_frame_lines = msm_sensor_adjust_frame_lines,
+ .sensor_adjust_frame_lines = msm_sensor_adjust_frame_lines1,
.sensor_get_csi_params = msm_sensor_get_csi_params,
};
diff --git a/drivers/media/video/msm/sensors/imx091.c b/drivers/media/video/msm/sensors/imx091.c
index 49442e9..7fda037 100644
--- a/drivers/media/video/msm/sensors/imx091.c
+++ b/drivers/media/video/msm/sensors/imx091.c
@@ -303,7 +303,7 @@
.sensor_config = msm_sensor_config,
.sensor_power_up = msm_sensor_power_up,
.sensor_power_down = msm_sensor_power_down,
- .sensor_adjust_frame_lines = msm_sensor_adjust_frame_lines,
+ .sensor_adjust_frame_lines = msm_sensor_adjust_frame_lines1,
.sensor_get_csi_params = msm_sensor_get_csi_params,
};
diff --git a/drivers/media/video/msm/sensors/msm_sensor.c b/drivers/media/video/msm/sensors/msm_sensor.c
index 8ab3963..be1efe0 100644
--- a/drivers/media/video/msm/sensors/msm_sensor.c
+++ b/drivers/media/video/msm/sensors/msm_sensor.c
@@ -16,7 +16,7 @@
#include "msm_camera_i2c_mux.h"
/*=============================================================*/
-int32_t msm_sensor_adjust_frame_lines(struct msm_sensor_ctrl_t *s_ctrl,
+int32_t msm_sensor_adjust_frame_lines1(struct msm_sensor_ctrl_t *s_ctrl,
uint16_t res)
{
uint16_t cur_line = 0;
@@ -50,6 +50,45 @@
return 0;
}
+int32_t msm_sensor_adjust_frame_lines2(struct msm_sensor_ctrl_t *s_ctrl,
+ uint16_t res)
+{
+ uint16_t cur_line = 0;
+ uint16_t exp_fl_lines = 0;
+ uint8_t int_time[3];
+ if (s_ctrl->sensor_exp_gain_info) {
+ if (s_ctrl->prev_gain && s_ctrl->prev_line &&
+ s_ctrl->func_tbl->sensor_write_exp_gain)
+ s_ctrl->func_tbl->sensor_write_exp_gain(
+ s_ctrl,
+ s_ctrl->prev_gain,
+ s_ctrl->prev_line);
+
+ msm_camera_i2c_read_seq(s_ctrl->sensor_i2c_client,
+ s_ctrl->sensor_exp_gain_info->coarse_int_time_addr-1,
+ &int_time[0], 3);
+ cur_line |= int_time[0] << 12;
+ cur_line |= int_time[1] << 4;
+ cur_line |= int_time[2] >> 4;
+ exp_fl_lines = cur_line +
+ s_ctrl->sensor_exp_gain_info->vert_offset;
+ if (exp_fl_lines > s_ctrl->msm_sensor_reg->
+ output_settings[res].frame_length_lines)
+ msm_camera_i2c_write(s_ctrl->sensor_i2c_client,
+ s_ctrl->sensor_output_reg_addr->
+ frame_length_lines,
+ exp_fl_lines,
+ MSM_CAMERA_I2C_WORD_DATA);
+ CDBG("%s cur_line %x cur_fl_lines %x, exp_fl_lines %x\n",
+ __func__,
+ cur_line,
+ s_ctrl->msm_sensor_reg->
+ output_settings[res].frame_length_lines,
+ exp_fl_lines);
+ }
+ return 0;
+}
+
int32_t msm_sensor_write_init_settings(struct msm_sensor_ctrl_t *s_ctrl)
{
int32_t rc;
diff --git a/drivers/media/video/msm/sensors/msm_sensor.h b/drivers/media/video/msm/sensors/msm_sensor.h
index b1e584d..a3ddaa7 100644
--- a/drivers/media/video/msm/sensors/msm_sensor.h
+++ b/drivers/media/video/msm/sensors/msm_sensor.h
@@ -236,7 +236,10 @@
int32_t msm_sensor_write_output_settings(struct msm_sensor_ctrl_t *s_ctrl,
uint16_t res);
-int32_t msm_sensor_adjust_frame_lines(struct msm_sensor_ctrl_t *s_ctrl,
+int32_t msm_sensor_adjust_frame_lines1(struct msm_sensor_ctrl_t *s_ctrl,
+ uint16_t res);
+
+int32_t msm_sensor_adjust_frame_lines2(struct msm_sensor_ctrl_t *s_ctrl,
uint16_t res);
int32_t msm_sensor_setting(struct msm_sensor_ctrl_t *s_ctrl,
diff --git a/drivers/media/video/msm/sensors/ov2720.c b/drivers/media/video/msm/sensors/ov2720.c
index 03f1af1..e4c5061 100644
--- a/drivers/media/video/msm/sensors/ov2720.c
+++ b/drivers/media/video/msm/sensors/ov2720.c
@@ -783,7 +783,7 @@
.sensor_config = msm_sensor_config,
.sensor_power_up = msm_sensor_power_up,
.sensor_power_down = msm_sensor_power_down,
- .sensor_adjust_frame_lines = msm_sensor_adjust_frame_lines,
+ .sensor_adjust_frame_lines = msm_sensor_adjust_frame_lines2,
.sensor_get_csi_params = msm_sensor_get_csi_params,
};
diff --git a/drivers/media/video/msm/sensors/s5k3l1yx.c b/drivers/media/video/msm/sensors/s5k3l1yx.c
index d7aeb74..c24da00 100644
--- a/drivers/media/video/msm/sensors/s5k3l1yx.c
+++ b/drivers/media/video/msm/sensors/s5k3l1yx.c
@@ -652,7 +652,7 @@
.sensor_config = msm_sensor_config,
.sensor_power_up = msm_sensor_power_up,
.sensor_power_down = msm_sensor_power_down,
- .sensor_adjust_frame_lines = msm_sensor_adjust_frame_lines,
+ .sensor_adjust_frame_lines = msm_sensor_adjust_frame_lines1,
.sensor_get_csi_params = msm_sensor_get_csi_params,
};
diff --git a/drivers/media/video/msm/server/msm_cam_server.c b/drivers/media/video/msm/server/msm_cam_server.c
index dfa7fbe..e21de29 100644
--- a/drivers/media/video/msm/server/msm_cam_server.c
+++ b/drivers/media/video/msm/server/msm_cam_server.c
@@ -637,6 +637,7 @@
__func__, tmp_cmd.type, (uint32_t)tmp_cmd.value,
tmp_cmd.length, tmp_cmd.status, rc);
kfree(ctrl_data);
+ ctrl_data = NULL;
return rc;
}
@@ -899,6 +900,12 @@
/*for single VFE msms (8660, 8960v1), just populate the session
with our VFE devices that registered*/
pmctl = msm_cam_server_get_mctl(pcam->mctl_handle);
+ if (pmctl == NULL) {
+ pr_err("%s: cannot find mctl\n", __func__);
+ msm_mctl_free(pcam);
+ atomic_dec(&ps->number_pcam_active);
+ return -ENODEV;
+ }
pmctl->axi_sdev = ps->axi_device[0];
pmctl->isp_sdev = ps->isp_subdev[0];
return rc;
@@ -1888,8 +1895,17 @@
case GESTURE_DEV:
g_server_dev.gesture_device = sd;
break;
+
case IRQ_ROUTER_DEV:
g_server_dev.irqr_device = sd;
+
+ case CPP_DEV:
+ if (index >= MAX_NUM_CPP_DEV) {
+ pr_err("%s Invalid CPP idx %d", __func__, index);
+ err = -EINVAL;
+ break;
+ }
+ g_server_dev.cpp_device[index] = sd;
break;
default:
break;
@@ -2019,7 +2035,7 @@
}
pmctl = msm_cam_server_get_mctl(pcam->mctl_handle);
- if (!pmctl->mctl_open) {
+ if (!pmctl || !pmctl->mctl_open) {
D("%s: media contoller is not inited\n",
__func__);
rc = -ENODEV;
@@ -2297,7 +2313,10 @@
/* assume there is only one active camera possible*/
config_cam->p_mctl =
msm_cam_server_get_mctl(g_server_dev.pcam_active->mctl_handle);
-
+ if (!config_cam->p_mctl) {
+ pr_err("%s: cannot find mctl\n", __func__);
+ return -ENODEV;
+ }
INIT_HLIST_HEAD(&config_cam->p_mctl->stats_info.pmem_stats_list);
spin_lock_init(&config_cam->p_mctl->stats_info.pmem_stats_spinlock);
@@ -2387,6 +2406,7 @@
/* Next, copy the userspace event ctrl structure */
if (copy_from_user((void *)&u_isp_event, user_ptr,
sizeof(struct msm_isp_event_ctrl))) {
+ rc = -EFAULT;
break;
}
/* Save the pointer of the user allocated command buffer*/
@@ -2398,6 +2418,7 @@
&ev, fp->f_flags & O_NONBLOCK);
if (rc < 0) {
pr_err("no pending events?");
+ rc = -EFAULT;
break;
}
/* Use k_isp_event to point to the event_ctrl structure
@@ -2427,6 +2448,7 @@
break;
}
kfree(k_msg_value);
+ k_msg_value = NULL;
}
}
}
@@ -2439,6 +2461,7 @@
break;
}
kfree(k_isp_event);
+ k_isp_event = NULL;
/* Copy the v4l2_event structure back to the user*/
if (copy_to_user((void __user *)arg, &ev,
diff --git a/drivers/media/video/msm/wfd/wfd-ioctl.c b/drivers/media/video/msm/wfd/wfd-ioctl.c
index 68a8a7d..c198815 100644
--- a/drivers/media/video/msm/wfd/wfd-ioctl.c
+++ b/drivers/media/video/msm/wfd/wfd-ioctl.c
@@ -288,7 +288,7 @@
mdp_mregion->ion_handle = enc_mregion->ion_handle;
rc = ion_map_iommu(wfd_dev->ion_client, mdp_mregion->ion_handle,
- DISPLAY_WRITE_DOMAIN, GEN_POOL, SZ_4K,
+ DISPLAY_DOMAIN, GEN_POOL, SZ_4K,
0, (unsigned long *)&mdp_mregion->paddr,
(unsigned long *)&mdp_mregion->size, 0, 0);
if (rc) {
@@ -363,7 +363,7 @@
if (mpair->mdp->paddr)
ion_unmap_iommu(wfd_dev->ion_client,
mpair->mdp->ion_handle,
- DISPLAY_WRITE_DOMAIN, GEN_POOL);
+ DISPLAY_DOMAIN, GEN_POOL);
if (mpair->enc->paddr)
ion_unmap_iommu(wfd_dev->ion_client,
diff --git a/drivers/media/video/msm_vidc/msm_venc.c b/drivers/media/video/msm_vidc/msm_venc.c
index ec93628..63f23eb 100644
--- a/drivers/media/video/msm_vidc/msm_venc.c
+++ b/drivers/media/video/msm_vidc/msm_venc.c
@@ -477,7 +477,14 @@
rc = vidc_hal_session_set_property((void *)inst->session,
HAL_PARAM_FRAME_SIZE, &frame_sz);
if (rc) {
- pr_err("Failed to set hal property for framesize\n");
+ pr_err("Failed to set framesize for Output port\n");
+ break;
+ }
+ frame_sz.buffer_type = HAL_BUFFER_OUTPUT;
+ rc = vidc_hal_session_set_property((void *)inst->session,
+ HAL_PARAM_FRAME_SIZE, &frame_sz);
+ if (rc) {
+ pr_err("Failed to set framesize for Capture port\n");
break;
}
rc = msm_comm_try_get_bufreqs(inst);
@@ -743,6 +750,8 @@
venc_profile_level.profile = control.value;
profile_level.level = venc_profile_level.level;
pdata = &profile_level;
+ pr_debug("\nprofile: %d\n",
+ profile_level.profile);
break;
case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
property_id =
@@ -804,6 +813,8 @@
venc_profile_level.level = control.value;
profile_level.profile = venc_profile_level.profile;
pdata = &profile_level;
+ pr_debug("\nLevel: %d\n",
+ profile_level.level);
break;
case V4L2_CID_MPEG_VIDC_VIDEO_ROTATION:
property_id =
@@ -895,7 +906,7 @@
case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA:
property_id =
HAL_PARAM_VENC_H264_DEBLOCK_CONTROL;
- h264_db_control.slicebeta_offset = control.value;
+ h264_db_control.slice_beta_offset = control.value;
pdata = &h264_db_control;
default:
break;
@@ -1164,7 +1175,7 @@
}
rc = vb2_dqbuf(q, b, true);
if (rc)
- pr_err("Failed to qbuf, %d\n", rc);
+ pr_err("Failed to dqbuf, %d\n", rc);
return rc;
}
diff --git a/drivers/media/video/msm_vidc/msm_vidc_common.c b/drivers/media/video/msm_vidc/msm_vidc_common.c
index 9b617aa..ba5fdc4 100644
--- a/drivers/media/video/msm_vidc/msm_vidc_common.c
+++ b/drivers/media/video/msm_vidc/msm_vidc_common.c
@@ -813,7 +813,7 @@
if (rc || state == inst->state)
break;
default:
- pr_err("State not recognized\n");
+ pr_err("State not recognized: %d\n", flipped_state);
rc = -EINVAL;
break;
}
@@ -855,6 +855,7 @@
frame_data.alloc_len = vb->v4l2_planes[0].length;
frame_data.filled_len = vb->v4l2_planes[0].bytesused;
frame_data.device_addr = vb->v4l2_planes[0].m.userptr;
+ frame_data.timestamp = vb->v4l2_buf.timestamp.tv_usec;
frame_data.clnt_data = (u32)vb;
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
frame_data.buffer_type = HAL_BUFFER_INPUT;
@@ -871,10 +872,8 @@
frame_data.filled_len = 0;
frame_data.buffer_type = HAL_BUFFER_OUTPUT;
frame_data.extradata_addr = 0;
- pr_debug("Sending ftb to hal...: Alloc: %d :filled: %d"
- " extradata_addr: %d\n", frame_data.alloc_len,
- frame_data.filled_len,
- frame_data.extradata_addr);
+ pr_debug("Sending ftb to hal..: Alloc: %d :filled: %d\n",
+ frame_data.alloc_len, frame_data.filled_len);
rc = vidc_hal_session_ftb((void *) inst->session,
&frame_data);
} else {
diff --git a/drivers/media/video/msm_vidc/vidc_hal.c b/drivers/media/video/msm_vidc/vidc_hal.c
index 13a319d9..583b5a9 100644
--- a/drivers/media/video/msm_vidc/vidc_hal.c
+++ b/drivers/media/video/msm_vidc/vidc_hal.c
@@ -23,7 +23,7 @@
#define REG_ADDR_OFFSET_BITMASK 0x000FFFFF
/*Workaround for virtio */
-#define HFI_VIRTIO_FW_BIAS 0x34f00000
+#define HFI_VIRTIO_FW_BIAS 0x14f00000
struct hal_device_data hal_ctxt;
@@ -40,7 +40,7 @@
sys_init = (struct hfi_cmd_sys_session_init_packet *)packet;
sess = (struct hal_session *) sys_init->session_id;
- switch (sys_init->packet) {
+ switch (sys_init->packet_type) {
case HFI_CMD_SESSION_EMPTY_BUFFER:
if (sess->is_decoder) {
struct hfi_cmd_session_empty_buffer_compressed_packet
@@ -73,7 +73,7 @@
struct hfi_buffer_info *buff;
buff = (struct hfi_buffer_info *) pkt->rg_buffer_info;
buff->buffer_addr -= HFI_VIRTIO_FW_BIAS;
- buff->extradata_addr -= HFI_VIRTIO_FW_BIAS;
+ buff->extra_data_addr -= HFI_VIRTIO_FW_BIAS;
} else {
for (i = 0; i < pkt->num_buffers; i++)
pkt->rg_buffer_info[i] -= HFI_VIRTIO_FW_BIAS;
@@ -89,7 +89,7 @@
struct hfi_buffer_info *buff;
buff = (struct hfi_buffer_info *) pkt->rg_buffer_info;
buff->buffer_addr -= HFI_VIRTIO_FW_BIAS;
- buff->extradata_addr -= HFI_VIRTIO_FW_BIAS;
+ buff->extra_data_addr -= HFI_VIRTIO_FW_BIAS;
} else {
for (i = 0; i < pkt->num_buffers; i++)
pkt->rg_buffer_info[i] -= HFI_VIRTIO_FW_BIAS;
@@ -640,7 +640,8 @@
goto err_no_dev;
}
pkt.size = sizeof(struct hfi_cmd_sys_init_packet);
- pkt.packet = HFI_CMD_SYS_INIT;
+ pkt.packet_type = HFI_CMD_SYS_INIT;
+ pkt.arch_type = HFI_ARCH_OX_OFFSET;
if (vidc_hal_iface_cmdq_write(dev, &pkt)) {
rc = -ENOTEMPTY;
goto err_write_fail;
@@ -664,8 +665,6 @@
}
write_register(dev->hal_data->register_base_addr,
VIDC_CPU_CS_SCIACMDARG3, 0, 0);
- disable_irq_nosync(dev->hal_data->irq);
- vidc_hal_interface_queues_release(dev);
HAL_MSG_INFO("\nHAL exited\n");
return 0;
}
@@ -742,8 +741,8 @@
switch (resource_hdr->resource_id) {
case VIDC_RESOURCE_OCMEM:
{
- struct hfi_resource_ocmem_type *hfioc_mem =
- (struct hfi_resource_ocmem_type *)
+ struct hfi_resource_ocmem *hfioc_mem =
+ (struct hfi_resource_ocmem *)
&pkt->rg_resource_data[0];
struct vidc_mem_addr *vidc_oc_mem =
(struct vidc_mem_addr *) resource_value;
@@ -751,7 +750,7 @@
pkt->resource_type = HFI_RESOURCE_OCMEM;
hfioc_mem->size = (u32) vidc_oc_mem->mem_size;
hfioc_mem->mem = (u8 *) vidc_oc_mem->align_device_addr;
- pkt->size += sizeof(struct hfi_resource_ocmem_type);
+ pkt->size += sizeof(struct hfi_resource_ocmem);
if (vidc_hal_iface_cmdq_write(dev, pkt))
rc = -ENOTEMPTY;
break;
@@ -807,7 +806,41 @@
rc = -ENOTEMPTY;
return rc;
}
-
+static u32 get_hfi_buffer(int hal_buffer)
+{
+ u32 buffer;
+ switch (hal_buffer) {
+ case HAL_BUFFER_INPUT:
+ buffer = HFI_BUFFER_INPUT;
+ break;
+ case HAL_BUFFER_OUTPUT:
+ buffer = HFI_BUFFER_OUTPUT;
+ break;
+ case HAL_BUFFER_OUTPUT2:
+ buffer = HFI_BUFFER_OUTPUT;
+ break;
+ case HAL_BUFFER_EXTRADATA_INPUT:
+ buffer = HFI_BUFFER_EXTRADATA_INPUT;
+ break;
+ case HAL_BUFFER_EXTRADATA_OUTPUT:
+ buffer = HFI_BUFFER_EXTRADATA_OUTPUT;
+ break;
+ case HAL_BUFFER_EXTRADATA_OUTPUT2:
+ buffer = HFI_BUFFER_EXTRADATA_OUTPUT2;
+ break;
+ case HAL_BUFFER_INTERNAL_SCRATCH:
+ buffer = HFI_BUFFER_INTERNAL_SCRATCH;
+ break;
+ case HAL_BUFFER_INTERNAL_PERSIST:
+ buffer = HFI_BUFFER_INTERNAL_PERSIST;
+ break;
+ default:
+ HAL_MSG_ERROR("Invalid buffer type : 0x%x\n", hal_buffer);
+ buffer = 0;
+ break;
+ }
+ return buffer;
+}
int vidc_hal_session_set_property(void *sess,
enum hal_property ptype, void *pdata)
{
@@ -832,24 +865,37 @@
switch (ptype) {
case HAL_CONFIG_FRAME_RATE:
{
- struct hfi_frame_rate *hfi_fps;
+ struct hfi_frame_rate *hfi;
+ u32 buffer;
+ struct hal_frame_rate *prop =
+ (struct hal_frame_rate *) pdata;
pkt->rg_property_data[0] = HFI_PROPERTY_CONFIG_FRAME_RATE;
- hfi_fps = (struct hfi_frame_rate *) &pkt->rg_property_data[1];
- memcpy(hfi_fps, (struct hfi_frame_rate *)
- pdata, sizeof(struct hfi_frame_rate));
+ hfi = (struct hfi_frame_rate *) &pkt->rg_property_data[1];
+ buffer = get_hfi_buffer(prop->buffer_type);
+ if (buffer)
+ hfi->buffer_type = buffer;
+ else
+ return -EINVAL;
+ hfi->frame_rate = prop->frame_rate;
pkt->size += sizeof(u32) + sizeof(struct hfi_frame_rate);
break;
}
case HAL_PARAM_UNCOMPRESSED_FORMAT_SELECT:
{
- struct hfi_uncompressed_format_select *hfi_buf_fmt;
+ u32 buffer;
+ struct hfi_uncompressed_format_select *hfi;
+ struct hal_uncompressed_format_select *prop =
+ (struct hal_uncompressed_format_select *) pdata;
pkt->rg_property_data[0] =
HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SELECT;
- hfi_buf_fmt =
- (struct hfi_uncompressed_format_select *)
- &pkt->rg_property_data[1];
- memcpy(hfi_buf_fmt, (struct hfi_uncompressed_format_select *)
- pdata, sizeof(struct hfi_uncompressed_format_select));
+ hfi = (struct hfi_uncompressed_format_select *)
+ &pkt->rg_property_data[1];
+ buffer = get_hfi_buffer(prop->buffer_type);
+ if (buffer)
+ hfi->buffer_type = buffer;
+ else
+ return -EINVAL;
+ hfi->format = prop->format;
pkt->size += sizeof(u32) + sizeof(struct
hfi_uncompressed_format_select);
break;
@@ -862,11 +908,18 @@
break;
case HAL_PARAM_FRAME_SIZE:
{
- struct hfi_frame_size *hfi_rect;
+ u32 buffer;
+ struct hfi_frame_size *hfi;
+ struct hal_frame_size *prop = (struct hal_frame_size *) pdata;
pkt->rg_property_data[0] = HFI_PROPERTY_PARAM_FRAME_SIZE;
- hfi_rect = (struct hfi_frame_size *) &pkt->rg_property_data[1];
- memcpy(hfi_rect, (struct hfi_frame_size *) pdata,
- sizeof(struct hfi_frame_size));
+ hfi = (struct hfi_frame_size *) &pkt->rg_property_data[1];
+ buffer = get_hfi_buffer(prop->buffer_type);
+ if (buffer)
+ hfi->buffer_type = buffer;
+ else
+ return -EINVAL;
+ hfi->height = prop->height;
+ hfi->width = prop->width;
pkt->size += sizeof(u32) + sizeof(struct hfi_frame_size);
break;
}
@@ -875,38 +928,85 @@
struct hfi_enable *hfi;
pkt->rg_property_data[0] = HFI_PROPERTY_CONFIG_REALTIME;
hfi = (struct hfi_enable *) &pkt->rg_property_data[1];
- memcpy(hfi, (struct hfi_enable *) pdata,
- sizeof(struct hfi_enable));
- pkt->size += sizeof(u32) + sizeof(struct hfi_enable);
+ hfi->enable = ((struct hfi_enable *) pdata)->enable;
+ pkt->size += sizeof(u32) * 2;
break;
}
case HAL_PARAM_BUFFER_COUNT_ACTUAL:
{
+ u32 buffer;
struct hfi_buffer_count_actual *hfi;
+ struct hal_buffer_count_actual *prop =
+ (struct hal_buffer_count_actual *) pdata;
pkt->rg_property_data[0] =
HFI_PROPERTY_PARAM_BUFFER_COUNT_ACTUAL;
hfi = (struct hfi_buffer_count_actual *)
&pkt->rg_property_data[1];
- memcpy(hfi, (struct hfi_buffer_count_actual *) pdata,
- sizeof(struct hfi_buffer_count_actual));
+ hfi->buffer_count_actual = prop->buffer_count_actual;
+ buffer = get_hfi_buffer(prop->buffer_type);
+ if (buffer)
+ hfi->buffer_type = buffer;
+ else
+ return -EINVAL;
pkt->size += sizeof(u32) + sizeof(struct
hfi_buffer_count_actual);
break;
}
case HAL_PARAM_NAL_STREAM_FORMAT_SELECT:
{
+ struct hal_nal_stream_format_supported *prop =
+ (struct hal_nal_stream_format_supported *)pdata;
pkt->rg_property_data[0] =
HFI_PROPERTY_PARAM_NAL_STREAM_FORMAT_SELECT;
- pkt->rg_property_data[1] = (enum HFI_NAL_STREAM_FORMAT)pdata;
- pkt->size += sizeof(u32) + sizeof(enum HFI_NAL_STREAM_FORMAT);
+ HAL_MSG_ERROR("\ndata is :%d",
+ prop->nal_stream_format_supported);
+ switch (prop->nal_stream_format_supported) {
+ case HAL_NAL_FORMAT_STARTCODES:
+ pkt->rg_property_data[1] =
+ HFI_NAL_FORMAT_STARTCODES;
+ break;
+ case HAL_NAL_FORMAT_ONE_NAL_PER_BUFFER:
+ pkt->rg_property_data[1] =
+ HFI_NAL_FORMAT_ONE_NAL_PER_BUFFER;
+ break;
+ case HAL_NAL_FORMAT_ONE_BYTE_LENGTH:
+ pkt->rg_property_data[1] =
+ HFI_NAL_FORMAT_ONE_BYTE_LENGTH;
+ break;
+ case HAL_NAL_FORMAT_TWO_BYTE_LENGTH:
+ pkt->rg_property_data[1] =
+ HFI_NAL_FORMAT_TWO_BYTE_LENGTH;
+ break;
+ case HAL_NAL_FORMAT_FOUR_BYTE_LENGTH:
+ pkt->rg_property_data[1] =
+ HFI_NAL_FORMAT_FOUR_BYTE_LENGTH;
+ break;
+ default:
+ HAL_MSG_ERROR("Invalid nal format: 0x%x",
+ prop->nal_stream_format_supported);
+ break;
+ }
+ pkt->size += sizeof(u32) * 2;
break;
}
case HAL_PARAM_VDEC_OUTPUT_ORDER:
{
+ int *data = (int *) pdata;
pkt->rg_property_data[0] =
HFI_PROPERTY_PARAM_VDEC_OUTPUT_ORDER;
- pkt->rg_property_data[1] = (enum HFI_OUTPUT_ORDER)pdata;
- pkt->size += sizeof(u32) + sizeof(enum HFI_OUTPUT_ORDER);
+ switch (*data) {
+ case HAL_OUTPUT_ORDER_DECODE:
+ pkt->rg_property_data[1] = HFI_OUTPUT_ORDER_DISPLAY;
+ break;
+ case HAL_OUTPUT_ORDER_DISPLAY:
+ pkt->rg_property_data[1] = HFI_OUTPUT_ORDER_DECODE;
+ break;
+ default:
+ HAL_MSG_ERROR("invalid output order: 0x%x",
+ *data);
+ break;
+ }
+ pkt->size += sizeof(u32) * 2;
break;
}
case HAL_PARAM_VDEC_PICTURE_TYPE_DECODE:
@@ -916,7 +1016,7 @@
HFI_PROPERTY_PARAM_VDEC_PICTURE_TYPE_DECODE;
hfi = (struct hfi_enable_picture *) &pkt->rg_property_data[1];
hfi->picture_type = (u32) pdata;
- pkt->size += sizeof(u32) + sizeof(struct hfi_enable_picture);
+ pkt->size += sizeof(u32) * 2;
break;
}
case HAL_PARAM_VDEC_OUTPUT2_KEEP_ASPECT_RATIO:
@@ -925,9 +1025,8 @@
pkt->rg_property_data[0] =
HFI_PROPERTY_PARAM_VDEC_OUTPUT2_KEEP_ASPECT_RATIO;
hfi = (struct hfi_enable *) &pkt->rg_property_data[1];
- memcpy(hfi, (struct hfi_enable *) pdata,
- sizeof(struct hfi_enable));
- pkt->size += sizeof(u32) + sizeof(struct hfi_enable);
+ hfi->enable = ((struct hfi_enable *) pdata)->enable;
+ pkt->size += sizeof(u32) * 2;
break;
}
case HAL_CONFIG_VDEC_POST_LOOP_DEBLOCKER:
@@ -936,41 +1035,64 @@
pkt->rg_property_data[0] =
HFI_PROPERTY_CONFIG_VDEC_POST_LOOP_DEBLOCKER;
hfi = (struct hfi_enable *) &pkt->rg_property_data[1];
- memcpy(hfi, (struct hfi_enable *) pdata,
- sizeof(struct hfi_enable));
- pkt->size += sizeof(u32) + sizeof(struct hfi_enable);
+ hfi->enable = ((struct hfi_enable *) pdata)->enable;
+ pkt->size += sizeof(u32) * 2;
break;
}
case HAL_PARAM_VDEC_MULTI_STREAM:
{
+ u32 buffer;
struct hfi_multi_stream *hfi;
+ struct hal_multi_stream *prop =
+ (struct hal_multi_stream *) pdata;
pkt->rg_property_data[0] =
HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM;
hfi = (struct hfi_multi_stream *) &pkt->rg_property_data[1];
- memcpy(hfi, (struct hfi_multi_stream *)pdata,
- sizeof(struct hfi_multi_stream));
+ buffer = get_hfi_buffer(prop->buffer_type);
+ if (buffer)
+ hfi->buffer_type = buffer;
+ else
+ return -EINVAL;
+ hfi->enable = prop->enable;
+ hfi->width = prop->width;
+ hfi->height = prop->height;
pkt->size += sizeof(u32) + sizeof(struct hfi_multi_stream);
break;
}
case HAL_PARAM_VDEC_DISPLAY_PICTURE_BUFFER_COUNT:
{
- struct hfi_display_picture_buffer_count *hfi_disp_buf;
+ struct hfi_display_picture_buffer_count *hfi;
+ struct hal_display_picture_buffer_count *prop =
+ (struct hal_display_picture_buffer_count *) pdata;
pkt->rg_property_data[0] =
HFI_PROPERTY_PARAM_VDEC_DISPLAY_PICTURE_BUFFER_COUNT;
- hfi_disp_buf = (struct hfi_display_picture_buffer_count *)
+ hfi = (struct hfi_display_picture_buffer_count *)
&pkt->rg_property_data[1];
- memcpy(hfi_disp_buf,
- (struct hfi_display_picture_buffer_count *)pdata,
- sizeof(struct hfi_display_picture_buffer_count));
+ hfi->count = prop->count;
+ hfi->enable = prop->enable;
pkt->size += sizeof(u32) +
sizeof(struct hfi_display_picture_buffer_count);
break;
}
case HAL_PARAM_DIVX_FORMAT:
{
+ int *data = pdata;
pkt->rg_property_data[0] = HFI_PROPERTY_PARAM_DIVX_FORMAT;
- pkt->rg_property_data[1] = (enum HFI_DIVX_FORMAT)pdata;
- pkt->size += sizeof(u32) + sizeof(enum HFI_DIVX_FORMAT);
+ switch (*data) {
+ case HAL_DIVX_FORMAT_4:
+ pkt->rg_property_data[1] = HFI_DIVX_FORMAT_4;
+ break;
+ case HAL_DIVX_FORMAT_5:
+ pkt->rg_property_data[1] = HFI_DIVX_FORMAT_5;
+ break;
+ case HAL_DIVX_FORMAT_6:
+ pkt->rg_property_data[1] = HFI_DIVX_FORMAT_6;
+ break;
+ default:
+ HAL_MSG_ERROR("Invalid divx format: 0x%x", *data);
+ break;
+ }
+ pkt->size += sizeof(u32) * 2;
break;
}
case HAL_CONFIG_VDEC_MB_ERROR_MAP_REPORTING:
@@ -979,25 +1101,23 @@
pkt->rg_property_data[0] =
HFI_PROPERTY_CONFIG_VDEC_MB_ERROR_MAP_REPORTING;
hfi = (struct hfi_enable *) &pkt->rg_property_data[1];
- memcpy(hfi, (struct hfi_enable *) pdata,
- sizeof(struct hfi_enable));
- pkt->size += sizeof(u32) + sizeof(struct hfi_enable);
+ hfi->enable = ((struct hfi_enable *) pdata)->enable;
+ pkt->size += sizeof(u32) * 2;
break;
}
case HAL_PARAM_VDEC_CONTINUE_DATA_TRANSFER:
{
- struct hfi_enable *enable;
+ struct hfi_enable *hfi;
pkt->rg_property_data[0] =
HFI_PROPERTY_PARAM_VDEC_CONTINUE_DATA_TRANSFER;
- enable = (struct hfi_enable *) &pkt->rg_property_data[1];
- memcpy(enable, (struct hfi_enable *) pdata,
- sizeof(struct hfi_enable));
- pkt->size += sizeof(u32) + sizeof(struct hfi_enable);
+ hfi = (struct hfi_enable *) &pkt->rg_property_data[1];
+ hfi->enable = ((struct hfi_enable *) pdata)->enable;
+ pkt->size += sizeof(u32) * 2;
break;
}
case HAL_CONFIG_VENC_REQUEST_IFRAME:
pkt->rg_property_data[0] =
- HFI_PROPERTY_CONFIG_VENC_REQUEST_IFRAME;
+ HFI_PROPERTY_CONFIG_VENC_REQUEST_SYNC_FRAME;
break;
case HAL_PARAM_VENC_MPEG4_SHORT_HEADER:
break;
@@ -1009,31 +1129,64 @@
pkt->rg_property_data[0] =
HFI_PROPERTY_CONFIG_VENC_TARGET_BITRATE;
hfi = (struct hfi_bitrate *) &pkt->rg_property_data[1];
- hfi->bit_rate = ((struct hfi_bitrate *)pdata)->bit_rate;
- pkt->size += sizeof(u32) + sizeof(struct hfi_bitrate);
+ hfi->bit_rate = ((struct hal_bitrate *)pdata)->bit_rate;
+ pkt->size += sizeof(u32) * 2;
break;
}
case HAL_PARAM_PROFILE_LEVEL_CURRENT:
{
- struct hfi_profile_level *hfi_profile_level;
+ struct hfi_profile_level *hfi;
+ struct hal_profile_level *prop =
+ (struct hal_profile_level *) pdata;
pkt->rg_property_data[0] =
HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT;
- hfi_profile_level = (struct hfi_profile_level *)
- &pkt->rg_property_data[1];
- memcpy(hfi_profile_level, (struct hfi_profile_level *) pdata,
- sizeof(struct hfi_profile_level));
+ hfi = (struct hfi_profile_level *)
+ &pkt->rg_property_data[1];
+ hfi->level = (u32) prop->level;
+ hfi->profile = prop->profile;
+ if (!hfi->profile)
+ hfi->profile = HFI_H264_PROFILE_HIGH;
+ if (!hfi->level)
+ hfi->level = 1;
pkt->size += sizeof(u32) + sizeof(struct hfi_profile_level);
break;
}
case HAL_PARAM_VENC_H264_ENTROPY_CONTROL:
{
struct hfi_h264_entropy_control *hfi;
+ struct hal_h264_entropy_control *prop =
+ (struct hal_h264_entropy_control *) pdata;
pkt->rg_property_data[0] =
HFI_PROPERTY_PARAM_VENC_H264_ENTROPY_CONTROL;
hfi = (struct hfi_h264_entropy_control *)
&pkt->rg_property_data[1];
- memcpy(hfi, (struct hfi_h264_entropy_control *) pdata,
- sizeof(struct hfi_h264_entropy_control));
+ switch (prop->entropy_mode) {
+ case HAL_H264_ENTROPY_CAVLC:
+ hfi->cabac_model = HFI_H264_ENTROPY_CAVLC;
+ break;
+ case HAL_H264_ENTROPY_CABAC:
+ hfi->cabac_model = HFI_H264_ENTROPY_CABAC;
+ switch (prop->cabac_model) {
+ case HAL_H264_CABAC_MODEL_0:
+ hfi->cabac_model = HFI_H264_CABAC_MODEL_0;
+ break;
+ case HAL_H264_CABAC_MODEL_1:
+ hfi->cabac_model = HFI_H264_CABAC_MODEL_1;
+ break;
+ case HAL_H264_CABAC_MODEL_2:
+ hfi->cabac_model = HFI_H264_CABAC_MODEL_2;
+ break;
+ default:
+ HAL_MSG_ERROR("Invalid cabac model 0x%x",
+ prop->entropy_mode);
+ break;
+ }
+ break;
+ default:
+ HAL_MSG_ERROR("Invalid entropy selected: 0x%x",
+ prop->cabac_model);
+ break;
+ }
pkt->size += sizeof(u32) + sizeof(
struct hfi_h264_entropy_control);
break;
@@ -1042,8 +1195,28 @@
{
pkt->rg_property_data[0] =
HFI_PROPERTY_PARAM_VENC_RATE_CONTROL;
- pkt->rg_property_data[1] = (enum HFI_RATE_CONTROL)pdata;
- pkt->size += sizeof(u32) + sizeof(enum HFI_RATE_CONTROL);
+ switch ((enum hal_rate_control)pdata) {
+ case HAL_RATE_CONTROL_OFF:
+ pkt->rg_property_data[1] = HFI_RATE_CONTROL_OFF;
+ break;
+ case HAL_RATE_CONTROL_CBR_CFR:
+ pkt->rg_property_data[1] = HFI_RATE_CONTROL_CBR_CFR;
+ break;
+ case HAL_RATE_CONTROL_CBR_VFR:
+ pkt->rg_property_data[1] = HFI_RATE_CONTROL_CBR_VFR;
+ break;
+ case HAL_RATE_CONTROL_VBR_CFR:
+ pkt->rg_property_data[1] = HFI_RATE_CONTROL_VBR_CFR;
+ break;
+ case HAL_RATE_CONTROL_VBR_VFR:
+ pkt->rg_property_data[1] = HFI_RATE_CONTROL_VBR_VFR;
+ break;
+ default:
+ HAL_MSG_ERROR("Invalid Rate control setting: 0x%x",
+ (int) pdata);
+ break;
+ }
+ pkt->size += sizeof(u32) * 2;
break;
}
case HAL_PARAM_VENC_MPEG4_TIME_RESOLUTION:
@@ -1056,8 +1229,7 @@
hfi->time_increment_resolution =
((struct hal_mpeg4_time_resolution *)pdata)->
time_increment_resolution;
- pkt->size += sizeof(u32) + sizeof(
- struct hfi_mpeg4_time_resolution);
+ pkt->size += sizeof(u32) * 2;
break;
}
case HAL_PARAM_VENC_MPEG4_HEADER_EXTENSION:
@@ -1066,20 +1238,36 @@
pkt->rg_property_data[0] =
HFI_PROPERTY_PARAM_VENC_MPEG4_HEADER_EXTENSION;
hfi = (struct hfi_mpeg4_header_extension *)
- &pkt->rg_property_data[1];
+ &pkt->rg_property_data[1];
hfi->header_extension = (u32) pdata;
- pkt->size += sizeof(u32) +
- sizeof(struct hfi_mpeg4_header_extension);
+ pkt->size += sizeof(u32) * 2;
break;
}
case HAL_PARAM_VENC_H264_DEBLOCK_CONTROL:
{
struct hfi_h264_db_control *hfi;
+ struct hal_h264_db_control *prop =
+ (struct hal_h264_db_control *) pdata;
pkt->rg_property_data[0] =
HFI_PROPERTY_PARAM_VENC_H264_DEBLOCK_CONTROL;
hfi = (struct hfi_h264_db_control *) &pkt->rg_property_data[1];
- memcpy(hfi, (struct hfi_h264_db_control *) pdata,
- sizeof(struct hfi_h264_db_control));
+ switch (prop->mode) {
+ case HAL_H264_DB_MODE_DISABLE:
+ hfi->mode = HFI_H264_DB_MODE_DISABLE;
+ break;
+ case HAL_H264_DB_MODE_SKIP_SLICE_BOUNDARY:
+ hfi->mode = HFI_H264_DB_MODE_SKIP_SLICE_BOUNDARY;
+ break;
+ case HAL_H264_DB_MODE_ALL_BOUNDARY:
+ hfi->mode = HFI_H264_DB_MODE_ALL_BOUNDARY;
+ break;
+ default:
+ HAL_MSG_ERROR("Invalid deblocking mode: 0x%x",
+ prop->mode);
+ break;
+ }
+ hfi->slice_alpha_offset = prop->slice_alpha_offset;
+ hfi->slice_beta_offset = prop->slice_beta_offset;
pkt->size += sizeof(u32) +
sizeof(struct hfi_h264_db_control);
break;
@@ -1090,11 +1278,10 @@
pkt->rg_property_data[0] =
HFI_PROPERTY_PARAM_VENC_TEMPORAL_SPATIAL_TRADEOFF;
hfi = (struct hfi_temporal_spatial_tradeoff *)
- &pkt->rg_property_data[1];
+ &pkt->rg_property_data[1];
hfi->ts_factor = ((struct hfi_temporal_spatial_tradeoff *)
pdata)->ts_factor;
- pkt->size += sizeof(u32) +
- sizeof(struct hfi_temporal_spatial_tradeoff);
+ pkt->size += sizeof(u32) * 2;
break;
}
case HAL_PARAM_VENC_SESSION_QP:
@@ -1125,7 +1312,7 @@
pkt->rg_property_data[0] = HFI_PROPERTY_CONFIG_VENC_IDR_PERIOD;
hfi = (struct hfi_idr_period *) &pkt->rg_property_data[1];
hfi->idr_period = ((struct hfi_idr_period *) pdata)->idr_period;
- pkt->size += sizeof(u32) + sizeof(struct hfi_idr_period);
+ pkt->size += sizeof(u32) * 2;
break;
}
case HAL_CONFIG_VPE_OPERATIONS:
@@ -1133,25 +1320,67 @@
case HAL_PARAM_VENC_INTRA_REFRESH:
{
struct hfi_intra_refresh *hfi;
+ struct hal_intra_refresh *prop =
+ (struct hal_intra_refresh *) pdata;
pkt->rg_property_data[0] =
HFI_PROPERTY_PARAM_VENC_INTRA_REFRESH;
hfi = (struct hfi_intra_refresh *) &pkt->rg_property_data[1];
- memcpy(hfi, (struct hfi_intra_refresh *) pdata,
- sizeof(struct hfi_intra_refresh));
+ switch (prop->mode) {
+ case HAL_INTRA_REFRESH_NONE:
+ hfi->mode = HFI_INTRA_REFRESH_NONE;
+ break;
+ case HAL_INTRA_REFRESH_ADAPTIVE:
+ hfi->mode = HFI_INTRA_REFRESH_ADAPTIVE;
+ break;
+ case HAL_INTRA_REFRESH_CYCLIC:
+ hfi->mode = HFI_INTRA_REFRESH_CYCLIC;
+ break;
+ case HAL_INTRA_REFRESH_CYCLIC_ADAPTIVE:
+ hfi->mode = HFI_INTRA_REFRESH_CYCLIC_ADAPTIVE;
+ break;
+ case HAL_INTRA_REFRESH_RANDOM:
+ hfi->mode = HFI_INTRA_REFRESH_RANDOM;
+ break;
+ default:
+ HAL_MSG_ERROR("Invalid intra refresh setting: 0x%x",
+ prop->mode);
+ break;
+ }
+ hfi->air_mbs = prop->air_mbs;
+ hfi->air_ref = prop->air_ref;
+ hfi->cir_mbs = prop->cir_mbs;
pkt->size += sizeof(u32) + sizeof(struct hfi_intra_refresh);
break;
}
case HAL_PARAM_VENC_MULTI_SLICE_CONTROL:
{
struct hfi_multi_slice_control *hfi;
+ struct hal_multi_slice_control *prop =
+ (struct hal_multi_slice_control *) pdata;
pkt->rg_property_data[0] =
HFI_PROPERTY_PARAM_VENC_MULTI_SLICE_CONTROL;
hfi = (struct hfi_multi_slice_control *)
- &pkt->rg_property_data[1];
- memcpy(hfi, (struct hfi_multi_slice_control *) pdata,
- sizeof(struct hfi_multi_slice_control));
+ &pkt->rg_property_data[1];
+ switch (prop->multi_slice) {
+ case HAL_MULTI_SLICE_OFF:
+ hfi->multi_slice = HFI_MULTI_SLICE_OFF;
+ break;
+ case HAL_MULTI_SLICE_GOB:
+ hfi->multi_slice = HFI_MULTI_SLICE_GOB;
+ break;
+ case HAL_MULTI_SLICE_BY_MB_COUNT:
+ hfi->multi_slice = HFI_MULTI_SLICE_BY_MB_COUNT;
+ break;
+ case HAL_MULTI_SLICE_BY_BYTE_COUNT:
+ hfi->multi_slice = HFI_MULTI_SLICE_BY_BYTE_COUNT;
+ break;
+ default:
+ HAL_MSG_ERROR("Invalid slice settings: 0x%x",
+ prop->multi_slice);
+ break;
+ }
pkt->size += sizeof(u32) + sizeof(struct
- hfi_multi_slice_control);
+ hfi_multi_slice_control);
break;
}
case HAL_CONFIG_VPE_DEINTERLACE:
@@ -1161,8 +1390,8 @@
struct hfi_debug_config *hfi;
pkt->rg_property_data[0] = HFI_PROPERTY_SYS_DEBUG_CONFIG;
hfi = (struct hfi_debug_config *) &pkt->rg_property_data[1];
- memcpy(hfi, (struct hfi_debug_config *) pdata,
- sizeof(struct hfi_debug_config));
+ hfi->debug_config = ((struct hal_debug_config *)
+ pdata)->debug_config;
pkt->size = sizeof(struct hfi_cmd_sys_set_property_packet) +
sizeof(struct hfi_debug_config);
break;
@@ -1353,7 +1582,7 @@
new_session->device = dev;
list_add_tail(&new_session->list, &dev->sess_head);
pkt.size = sizeof(struct hfi_cmd_sys_session_init_packet);
- pkt.packet = HFI_CMD_SYS_SESSION_INIT;
+ pkt.packet_type = HFI_CMD_SYS_SESSION_INIT;
pkt.session_id = (u32) new_session;
pkt.session_domain = session_type;
pkt.session_codec = codec_type;
@@ -1363,7 +1592,7 @@
}
static int vidc_hal_send_session_cmd(void *session_id,
- enum HFI_COMMAND pkt_type)
+ int pkt_type)
{
struct vidc_hal_session_cmd_pkt pkt;
int rc = 0;
@@ -1400,6 +1629,7 @@
int vidc_hal_session_set_buffers(void *sess,
struct vidc_buffer_addr_info *buffer_info)
{
+ u32 buffer;
struct hfi_cmd_session_set_buffers_packet *pkt;
u8 packet[VIDC_IFACEQ_VAR_LARGE_PKT_SIZE];
int rc = 0;
@@ -1430,7 +1660,7 @@
if ((buffer_info->buffer_type == HAL_BUFFER_OUTPUT) ||
(buffer_info->buffer_type == HAL_BUFFER_OUTPUT2)) {
struct hfi_buffer_info *buff;
- pkt->extradata_size = buffer_info->extradata_size;
+ pkt->extra_data_size = buffer_info->extradata_size;
pkt->size = sizeof(struct hfi_cmd_session_set_buffers_packet) -
sizeof(u32) + ((buffer_info->num_buffers) *
sizeof(struct hfi_buffer_info));
@@ -1438,25 +1668,23 @@
for (i = 0; i < pkt->num_buffers; i++) {
buff->buffer_addr =
buffer_info->align_device_addr;
- buff->extradata_addr =
+ buff->extra_data_addr =
buffer_info->extradata_addr;
}
} else {
- pkt->extradata_size = 0;
+ pkt->extra_data_size = 0;
pkt->size = sizeof(struct hfi_cmd_session_set_buffers_packet) +
((buffer_info->num_buffers - 1) * sizeof(u32));
for (i = 0; i < pkt->num_buffers; i++)
pkt->rg_buffer_info[i] =
buffer_info->align_device_addr;
}
-
- if (buffer_info->buffer_type == HAL_BUFFER_INTERNAL_SCRATCH)
- pkt->buffer_type = HFI_BUFFER_INTERNAL_SCRATCH;
- else if (buffer_info->buffer_type == HAL_BUFFER_INTERNAL_PERSIST)
- pkt->buffer_type = HFI_BUFFER_INTERNAL_PERSIST;
+ buffer = get_hfi_buffer(buffer_info->buffer_type);
+ if (buffer)
+ pkt->buffer_type = buffer;
else
- pkt->buffer_type = (enum HFI_BUFFER) buffer_info->buffer_type;
-
+ return -EINVAL;
+ HAL_MSG_INFO("set buffers: 0x%x", buffer_info->buffer_type);
if (vidc_hal_iface_cmdq_write(session->device, pkt))
rc = -ENOTEMPTY;
return rc;
@@ -1465,6 +1693,7 @@
int vidc_hal_session_release_buffers(void *sess,
struct vidc_buffer_addr_info *buffer_info)
{
+ u32 buffer;
struct hfi_cmd_session_release_buffer_packet *pkt;
u8 packet[VIDC_IFACEQ_VAR_LARGE_PKT_SIZE];
int rc = 0;
@@ -1486,7 +1715,6 @@
((buffer_info->num_buffers - 1) * sizeof(u32));
pkt->packet_type = HFI_CMD_SESSION_RELEASE_BUFFERS;
pkt->session_id = (u32) session;
- pkt->buffer_type = (enum HFI_BUFFER) buffer_info->buffer_type;
pkt->buffer_size = buffer_info->buffer_size;
pkt->num_buffers = buffer_info->num_buffers;
@@ -1497,10 +1725,10 @@
for (i = 0; i < pkt->num_buffers; i++) {
buff->buffer_addr =
buffer_info->align_device_addr;
- buff->extradata_addr =
+ buff->extra_data_addr =
buffer_info->extradata_addr;
}
- pkt->extradata_size = buffer_info->extradata_size;
+ pkt->extra_data_size = buffer_info->extradata_size;
pkt->size = sizeof(struct hfi_cmd_session_set_buffers_packet) -
sizeof(u32) + ((buffer_info->num_buffers) *
sizeof(struct hfi_buffer_info));
@@ -1508,11 +1736,16 @@
for (i = 0; i < pkt->num_buffers; i++)
pkt->rg_buffer_info[i] =
buffer_info->align_device_addr;
- pkt->extradata_size = 0;
+ pkt->extra_data_size = 0;
pkt->size = sizeof(struct hfi_cmd_session_set_buffers_packet) +
((buffer_info->num_buffers - 1) * sizeof(u32));
}
-
+ buffer = get_hfi_buffer(buffer_info->buffer_type);
+ if (buffer)
+ pkt->buffer_type = buffer;
+ else
+ return -EINVAL;
+ HAL_MSG_INFO("Release buffers: 0x%x", buffer_info->buffer_type);
if (vidc_hal_iface_cmdq_write(session->device, pkt))
rc = -ENOTEMPTY;
return rc;
@@ -1572,8 +1805,8 @@
struct hfi_cmd_session_empty_buffer_compressed_packet);
pkt.packet_type = HFI_CMD_SESSION_EMPTY_BUFFER;
pkt.session_id = (u32) session;
- pkt.timestamp_hi = (int) (((u64)input_frame->timestamp) >> 32);
- pkt.timestamp_lo = (int) input_frame->timestamp;
+ pkt.time_stamp_hi = (int) (((u64)input_frame->timestamp) >> 32);
+ pkt.time_stamp_lo = (int) input_frame->timestamp;
pkt.flags = input_frame->flags;
pkt.mark_target = input_frame->mark_target;
pkt.mark_data = input_frame->mark_data;
@@ -1590,11 +1823,11 @@
pkt;
pkt.size = sizeof(struct
hfi_cmd_session_empty_buffer_uncompressed_plane0_packet);
- pkt.packet = HFI_CMD_SESSION_EMPTY_BUFFER;
+ pkt.packet_type = HFI_CMD_SESSION_EMPTY_BUFFER;
pkt.session_id = (u32) session;
pkt.view_id = 0;
- pkt.timestamp_hi = (u32) (((u64)input_frame->timestamp) >> 32);
- pkt.timestamp_lo = (u32) input_frame->timestamp;
+ pkt.time_stamp_hi = (u32) (((u64)input_frame->timestamp) >> 32);
+ pkt.time_stamp_lo = (u32) input_frame->timestamp;
pkt.flags = input_frame->flags;
pkt.mark_target = input_frame->mark_target;
pkt.mark_data = input_frame->mark_data;
@@ -1734,8 +1967,23 @@
pkt.size = sizeof(struct hfi_cmd_session_flush_packet);
pkt.packet_type = HFI_CMD_SESSION_FLUSH;
pkt.session_id = (u32) session;
- pkt.flush_type = flush_mode;
-
+ switch (flush_mode) {
+ case HAL_FLUSH_INPUT:
+ pkt.flush_type = HFI_FLUSH_INPUT;
+ break;
+ case HAL_FLUSH_OUTPUT:
+ pkt.flush_type = HFI_FLUSH_OUTPUT;
+ break;
+ case HAL_FLUSH_OUTPUT2:
+ pkt.flush_type = HFI_FLUSH_OUTPUT2;
+ break;
+ case HAL_FLUSH_ALL:
+ pkt.flush_type = HFI_FLUSH_ALL;
+ break;
+ default:
+ HAL_MSG_ERROR("Invalid flush mode: 0x%x\n", flush_mode);
+ break;
+ }
if (vidc_hal_iface_cmdq_write(session->device, &pkt))
rc = -ENOTEMPTY;
return rc;
diff --git a/drivers/media/video/msm_vidc/vidc_hal.h b/drivers/media/video/msm_vidc/vidc_hal.h
index 15441f4..a36d7f3 100644
--- a/drivers/media/video/msm_vidc/vidc_hal.h
+++ b/drivers/media/video/msm_vidc/vidc_hal.h
@@ -11,27 +11,28 @@
*
*/
-#ifndef __VIDC_HAL_H__
-#define __VIDC_HAL_H__
+#ifndef __H_VIDC_HAL_H__
+#define __H_VIDC_HAL_H__
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include "vidc_hal_api.h"
#include "msm_smem.h"
+#include "vidc_hal_helper.h"
#ifdef HAL_MSG_LOG
-#define HAL_MSG_LOW(x...) pr_debug(KERN_INFO x)
-#define HAL_MSG_MEDIUM(x...) pr_debug(KERN_INFO x)
-#define HAL_MSG_HIGH(x...) pr_debug(KERN_INFO x)
+#define HAL_MSG_LOW(x...) pr_info(KERN_INFO x)
+#define HAL_MSG_MEDIUM(x...) pr_info(KERN_INFO x)
+#define HAL_MSG_HIGH(x...) pr_info(KERN_INFO x)
#else
#define HAL_MSG_LOW(x...)
#define HAL_MSG_MEDIUM(x...)
#define HAL_MSG_HIGH(x...)
#endif
-#define HAL_MSG_ERROR(x...) pr_err(KERN_INFO x)
-#define HAL_MSG_FATAL(x...) pr_err(KERN_INFO x)
-#define HAL_MSG_INFO(x...) pr_info(KERN_INFO x)
+#define HAL_MSG_ERROR(x...) pr_err(KERN_INFO x)
+#define HAL_MSG_FATAL(x...) pr_err(KERN_INFO x)
+#define HAL_MSG_INFO(x...) pr_info(KERN_INFO x)
#define HFI_MASK_QHDR_TX_TYPE 0xFF000000
#define HFI_MASK_QHDR_RX_TYPE 0x00FF0000
@@ -87,7 +88,7 @@
#define VIDC_IFACEQ_TABLE_SIZE (sizeof(struct hfi_queue_table_header) \
+ sizeof(struct hfi_queue_header) * VIDC_IFACEQ_NUMQ)
-#define VIDC_IFACEQ_QUEUE_SIZE (VIDC_IFACEQ_MAX_PKT_SIZE * \
+#define VIDC_IFACEQ_QUEUE_SIZE (VIDC_IFACEQ_MAX_PKT_SIZE * \
VIDC_IFACEQ_MAX_BUF_COUNT * VIDC_IFACE_MAX_PARALLEL_CLNTS)
#define VIDC_IFACEQ_GET_QHDR_START_ADDR(ptr, i) \
@@ -107,409 +108,162 @@
VIDC_HWREG_HVI_SOFTINTEN = 0xA,
};
-enum HFI_EVENT {
- HFI_EVENT_SYS_ERROR,
- HFI_EVENT_SESSION_ERROR,
- HFI_EVENT_SESSION_SEQUENCE_CHANGED,
- HFI_EVENT_SESSION_PROPERTY_CHANGED,
- HFI_UNUSED_EVENT = 0x10000000,
-};
+#define HFI_EVENT_SESSION_SEQUENCE_CHANGED (HFI_OX_BASE + 0x3)
+#define HFI_EVENT_SESSION_PROPERTY_CHANGED (HFI_OX_BASE + 0x4)
-enum HFI_EVENT_DATA_SEQUENCE_CHANGED {
- HFI_EVENT_DATA_SEQUENCE_CHANGED_SUFFICIENT_BUFFER_RESOURCES,
- HFI_EVENT_DATA_SEQUENCE_CHANGED_INSUFFICIENT_BUFFER_RESOURCES,
- HFI_UNUSED_SEQCHG = 0x10000000,
-};
+#define HFI_EVENT_DATA_SEQUENCE_CHANGED_SUFFICIENT_BUFFER_RESOURCES \
+ (HFI_OX_BASE + 0x1)
+#define HFI_EVENT_DATA_SEQUENCE_CHANGED_INSUFFICIENT_BUFFER_RESOURCES \
+ (HFI_OX_BASE + 0x2)
-#define HFI_BUFFERFLAG_EOS 0x00000001
-#define HFI_BUFFERFLAG_STARTTIME 0x00000002
-#define HFI_BUFFERFLAG_DECODEONLY 0x00000004
-#define HFI_BUFFERFLAG_DATACORRUPT 0x00000008
-#define HFI_BUFFERFLAG_ENDOFFRAME 0x00000010
-#define HFI_BUFFERFLAG_SYNCFRAME 0x00000020
-#define HFI_BUFFERFLAG_EXTRADATA 0x00000040
-#define HFI_BUFFERFLAG_CODECCONFIG 0x00000080
-#define HFI_BUFFERFLAG_TIMESTAMPINVALID 0x00000100
-#define HFI_BUFFERFLAG_READONLY 0x00000200
-#define HFI_BUFFERFLAG_ENDOFSUBFRAME 0x00000400
+#define HFI_BUFFERFLAG_EOS 0x00000001
+#define HFI_BUFFERFLAG_STARTTIME 0x00000002
+#define HFI_BUFFERFLAG_DECODEONLY 0x00000004
+#define HFI_BUFFERFLAG_DATACORRUPT 0x00000008
+#define HFI_BUFFERFLAG_ENDOFFRAME 0x00000010
+#define HFI_BUFFERFLAG_SYNCFRAME 0x00000020
+#define HFI_BUFFERFLAG_EXTRADATA 0x00000040
+#define HFI_BUFFERFLAG_CODECCONFIG 0x00000080
+#define HFI_BUFFERFLAG_TIMESTAMPINVALID 0x00000100
+#define HFI_BUFFERFLAG_READONLY 0x00000200
+#define HFI_BUFFERFLAG_ENDOFSUBFRAME 0x00000400
-enum HFI_ERROR {
- HFI_ERR_NONE = 0,
- HFI_ERR_SYS_UNKNOWN = 0x80000001,
- HFI_ERR_SYS_FATAL = 0x80000002,
- HFI_ERR_SYS_INVALID_PARAMETER = 0x80000003,
- HFI_ERR_SYS_VERSION_MISMATCH = 0x80000004,
- HFI_ERR_SYS_INSUFFICIENT_RESOURCES = 0x80000005,
- HFI_ERR_SYS_MAX_SESSIONS_REACHED = 0x80000006,
- HFI_ERR_SYS_UNSUPPORTED_CODEC = 0x80000007,
- HFI_ERR_SYS_SESSION_IN_USE = 0x80000008,
- HFI_ERR_SYS_SESSION_ID_OUT_OF_RANGE = 0x80000009,
- HFI_ERR_SYS_UNSUPPORTED_DOMAIN = 0x8000000A,
- HFI_ERR_SESSION_START_UNUSED = 0x80001000,
- HFI_ERR_SESSION_UNKNOWN = 0x80001001,
- HFI_ERR_SESSION_FATAL = 0x80001002,
- HFI_ERR_SESSION_INVALID_PARAMETER = 0x80001003,
- HFI_ERR_SESSION_BAD_POINTER = 0x80001004,
- HFI_ERR_SESSION_INVALID_SESSION_ID = 0x80001005,
- HFI_ERR_SESSION_INVALID_STREAM_ID = 0x80001006,
- HFI_ERR_SESSION_INCORRECT_STATE_OPERATION = 0x80001007,
- HFI_ERR_SESSION_UNSUPPORTED_PROPERTY = 0x80001008,
- HFI_ERR_SESSION_UNSUPPORTED_SETTING = 0x80001009,
- HFI_ERR_SESSION_INSUFFICIENT_RESOURCES = 0x8000100A,
- HFI_ERR_SESSION_STREAM_CORRUPT = 0x8000100B,
- HFI_ERR_SESSION_STREAM_CORRUPT_OUTPUT_STALLED = 0x8000100C,
- HFI_ERR_SESSION_SYNC_FRAME_NOT_DETECTED = 0x8000100D,
- HFI_ERR_SESSION_EMPTY_BUFFER_DONE_OUTPUT_PENDING = 0x8000100E,
- HFI_ERR_SESSION_SAME_STATE_OPERATION = 0x8000100F,
- HFI_UNUSED_ERR = 0x10000000,
-};
+#define HFI_ERR_SESSION_EMPTY_BUFFER_DONE_OUTPUT_PENDING \
+ (HFI_OX_BASE + 0x1001)
+#define HFI_ERR_SESSION_SAME_STATE_OPERATION \
+ (HFI_OX_BASE + 0x1002)
+#define HFI_ERR_SESSION_SYNC_FRAME_NOT_DETECTED \
+ (HFI_OX_BASE + 0x1003)
-enum HFI_DOMAIN {
- HFI_VIDEO_DOMAIN_VPE,
- HFI_VIDEO_DOMAIN_ENCODER,
- HFI_VIDEO_DOMAIN_DECODER,
- HFI_UNUSED_DOMAIN = 0x10000000,
-};
+#define HFI_BUFFER_INTERNAL_SCRATCH (HFI_OX_BASE + 0x1)
+#define HFI_BUFFER_EXTRADATA_INPUT (HFI_OX_BASE + 0x2)
+#define HFI_BUFFER_EXTRADATA_OUTPUT (HFI_OX_BASE + 0x3)
+#define HFI_BUFFER_EXTRADATA_OUTPUT2 (HFI_OX_BASE + 0x4)
-enum HFI_VIDEO_CODEC {
- HFI_VIDEO_CODEC_UNKNOWN = 0x00000000,
- HFI_VIDEO_CODEC_H264 = 0x00000002,
- HFI_VIDEO_CODEC_H263 = 0x00000004,
- HFI_VIDEO_CODEC_MPEG1 = 0x00000008,
- HFI_VIDEO_CODEC_MPEG2 = 0x00000010,
- HFI_VIDEO_CODEC_MPEG4 = 0x00000020,
- HFI_VIDEO_CODEC_DIVX_311 = 0x00000040,
- HFI_VIDEO_CODEC_DIVX = 0x00000080,
- HFI_VIDEO_CODEC_VC1 = 0x00000100,
- HFI_VIDEO_CODEC_SPARK = 0x00000200,
- HFI_VIDEO_CODEC_VP6 = 0x00000400,
- HFI_VIDEO_CODEC_VP7 = 0x00000800,
- HFI_VIDEO_CODEC_VP8 = 0x00001000,
- HFI_UNUSED_CODEC = 0x10000000,
-};
+#define HFI_BUFFER_MODE_STATIC (HFI_OX_BASE + 0x1)
+#define HFI_BUFFER_MODE_RING (HFI_OX_BASE + 0x2)
-enum HFI_H263_PROFILE {
- HFI_H263_PROFILE_BASELINE = 0x00000001,
- HFI_H263_PROFILE_H320CODING = 0x00000002,
- HFI_H263_PROFILE_BACKWARDCOMPATIBLE = 0x00000004,
- HFI_H263_PROFILE_ISWV2 = 0x00000008,
- HFI_H263_PROFILE_ISWV3 = 0x00000010,
- HFI_H263_PROFILE_HIGHCOMPRESSION = 0x00000020,
- HFI_H263_PROFILE_INTERNET = 0x00000040,
- HFI_H263_PROFILE_INTERLACE = 0x00000080,
- HFI_H263_PROFILE_HIGHLATENCY = 0x00000100,
- HFI_UNUSED_H263_PROFILE = 0x10000000,
-};
+#define HFI_FLUSH_INPUT (HFI_OX_BASE + 0x1)
+#define HFI_FLUSH_OUTPUT (HFI_OX_BASE + 0x2)
+#define HFI_FLUSH_OUTPUT2 (HFI_OX_BASE + 0x3)
+#define HFI_FLUSH_ALL (HFI_OX_BASE + 0x4)
-enum HFI_H263_LEVEL {
- HFI_H263_LEVEL_10 = 0x00000001,
- HFI_H263_LEVEL_20 = 0x00000002,
- HFI_H263_LEVEL_30 = 0x00000004,
- HFI_H263_LEVEL_40 = 0x00000008,
- HFI_H263_LEVEL_45 = 0x00000010,
- HFI_H263_LEVEL_50 = 0x00000020,
- HFI_H263_LEVEL_60 = 0x00000040,
- HFI_H263_LEVEL_70 = 0x00000080,
- HFI_UNUSED_H263_LEVEL = 0x10000000,
-};
+#define HFI_EXTRADATA_NONE 0x00000000
+#define HFI_EXTRADATA_MB_QUANTIZATION 0x00000001
+#define HFI_EXTRADATA_INTERLACE_VIDEO 0x00000002
+#define HFI_EXTRADATA_VC1_FRAMEDISP 0x00000003
+#define HFI_EXTRADATA_VC1_SEQDISP 0x00000004
+#define HFI_EXTRADATA_TIMESTAMP 0x00000005
+#define HFI_EXTRADATA_S3D_FRAME_PACKING 0x00000006
+#define HFI_EXTRADATA_MULTISLICE_INFO 0x7F100000
+#define HFI_EXTRADATA_NUM_CONCEALED_MB 0x7F100001
+#define HFI_EXTRADATA_INDEX 0x7F100002
+#define HFI_EXTRADATA_METADATA_FILLER 0x7FE00002
-enum HFI_MPEG2_PROFILE {
- HFI_MPEG2_PROFILE_SIMPLE = 0x00000001,
- HFI_MPEG2_PROFILE_MAIN = 0x00000002,
- HFI_MPEG2_PROFILE_422 = 0x00000004,
- HFI_MPEG2_PROFILE_SNR = 0x00000008,
- HFI_MPEG2_PROFILE_SPATIAL = 0x00000010,
- HFI_MPEG2_PROFILE_HIGH = 0x00000020,
- HFI_UNUSED_MPEG2_PROFILE = 0x10000000,
-};
-
-enum HFI_MPEG2_LEVEL {
- HFI_MPEG2_LEVEL_LL = 0x00000001,
- HFI_MPEG2_LEVEL_ML = 0x00000002,
- HFI_MPEG2_LEVEL_H14 = 0x00000004,
- HFI_MPEG2_LEVEL_HL = 0x00000008,
- HFI_UNUSED_MEPG2_LEVEL = 0x10000000,
-};
-
-enum HFI_MPEG4_PROFILE {
- HFI_MPEG4_PROFILE_SIMPLE = 0x00000001,
- HFI_MPEG4_PROFILE_SIMPLESCALABLE = 0x00000002,
- HFI_MPEG4_PROFILE_CORE = 0x00000004,
- HFI_MPEG4_PROFILE_MAIN = 0x00000008,
- HFI_MPEG4_PROFILE_NBIT = 0x00000010,
- HFI_MPEG4_PROFILE_SCALABLETEXTURE = 0x00000020,
- HFI_MPEG4_PROFILE_SIMPLEFACE = 0x00000040,
- HFI_MPEG4_PROFILE_SIMPLEFBA = 0x00000080,
- HFI_MPEG4_PROFILE_BASICANIMATED = 0x00000100,
- HFI_MPEG4_PROFILE_HYBRID = 0x00000200,
- HFI_MPEG4_PROFILE_ADVANCEDREALTIME = 0x00000400,
- HFI_MPEG4_PROFILE_CORESCALABLE = 0x00000800,
- HFI_MPEG4_PROFILE_ADVANCEDCODING = 0x00001000,
- HFI_MPEG4_PROFILE_ADVANCEDCORE = 0x00002000,
- HFI_MPEG4_PROFILE_ADVANCEDSCALABLE = 0x00004000,
- HFI_MPEG4_PROFILE_ADVANCEDSIMPLE = 0x00008000,
- HFI_UNUSED_MPEG4_PROFILE = 0x10000000,
-};
-
-enum HFI_MPEG4_LEVEL {
- HFI_MPEG4_LEVEL_0 = 0x00000001,
- HFI_MPEG4_LEVEL_0b = 0x00000002,
- HFI_MPEG4_LEVEL_1 = 0x00000004,
- HFI_MPEG4_LEVEL_2 = 0x00000008,
- HFI_MPEG4_LEVEL_3 = 0x00000010,
- HFI_MPEG4_LEVEL_4 = 0x00000020,
- HFI_MPEG4_LEVEL_4a = 0x00000040,
- HFI_MPEG4_LEVEL_5 = 0x00000080,
- HFI_MPEG4_LEVEL_VENDOR_START_UNUSED = 0x7F000000,
- HFI_MPEG4_LEVEL_6 = 0x7F000001,
- HFI_MPEG4_LEVEL_7 = 0x7F000002,
- HFI_MPEG4_LEVEL_8 = 0x7F000003,
- HFI_MPEG4_LEVEL_9 = 0x7F000004,
- HFI_MPEG4_LEVEL_3b = 0x7F000005,
- HFI_UNUSED_MPEG4_LEVEL = 0x10000000,
-};
-
-enum HFI_H264_PROFILE {
- HFI_H264_PROFILE_BASELINE = 0x00000001,
- HFI_H264_PROFILE_MAIN = 0x00000002,
- HFI_H264_PROFILE_EXTENDED = 0x00000004,
- HFI_H264_PROFILE_HIGH = 0x00000008,
- HFI_H264_PROFILE_HIGH10 = 0x00000010,
- HFI_H264_PROFILE_HIGH422 = 0x00000020,
- HFI_H264_PROFILE_HIGH444 = 0x00000040,
- HFI_H264_PROFILE_STEREO_HIGH = 0x00000080,
- HFI_H264_PROFILE_MV_HIGH = 0x00000100,
- HFI_UNUSED_H264_PROFILE = 0x10000000,
-};
-
-enum HFI_H264_LEVEL {
- HFI_H264_LEVEL_1 = 0x00000001,
- HFI_H264_LEVEL_1b = 0x00000002,
- HFI_H264_LEVEL_11 = 0x00000004,
- HFI_H264_LEVEL_12 = 0x00000008,
- HFI_H264_LEVEL_13 = 0x00000010,
- HFI_H264_LEVEL_2 = 0x00000020,
- HFI_H264_LEVEL_21 = 0x00000040,
- HFI_H264_LEVEL_22 = 0x00000080,
- HFI_H264_LEVEL_3 = 0x00000100,
- HFI_H264_LEVEL_31 = 0x00000200,
- HFI_H264_LEVEL_32 = 0x00000400,
- HFI_H264_LEVEL_4 = 0x00000800,
- HFI_H264_LEVEL_41 = 0x00001000,
- HFI_H264_LEVEL_42 = 0x00002000,
- HFI_H264_LEVEL_5 = 0x00004000,
- HFI_H264_LEVEL_51 = 0x00008000,
- HFI_UNUSED_H264_LEVEL = 0x10000000,
-};
-
-enum HFI_VPX_PROFILE {
- HFI_VPX_PROFILE_SIMPLE = 0x00000001,
- HFI_VPX_PROFILE_ADVANCED = 0x00000002,
- HFI_VPX_PROFILE_VERSION_0 = 0x00000004,
- HFI_VPX_PROFILE_VERSION_1 = 0x00000008,
- HFI_VPX_PROFILE_VERSION_2 = 0x00000010,
- HFI_VPX_PROFILE_VERSION_3 = 0x00000020,
- HFI_VPX_PROFILE_UNUSED = 0x10000000,
-};
-
-enum HFI_VC1_PROFILE {
- HFI_VC1_PROFILE_SIMPLE = 0x00000001,
- HFI_VC1_PROFILE_MAIN = 0x00000002,
- HFI_VC1_PROFILE_ADVANCED = 0x00000004,
- HFI_UNUSED_VC1_PROFILE = 0x10000000,
-};
-
-enum HFI_VC1_LEVEL {
- HFI_VC1_LEVEL_LOW = 0x00000001,
- HFI_VC1_LEVEL_MEDIUM = 0x00000002,
- HFI_VC1_LEVEL_HIGH = 0x00000004,
- HFI_VC1_LEVEL_0 = 0x00000008,
- HFI_VC1_LEVEL_1 = 0x00000010,
- HFI_VC1_LEVEL_2 = 0x00000020,
- HFI_VC1_LEVEL_3 = 0x00000040,
- HFI_VC1_LEVEL_4 = 0x00000080,
- HFI_UNUSED_VC1_LEVEL = 0x10000000,
-};
-
-enum HFI_DIVX_FORMAT {
- HFI_DIVX_FORMAT_4,
- HFI_DIVX_FORMAT_5,
- HFI_DIVX_FORMAT_6,
- HFI_UNUSED_DIVX_FORMAT = 0x10000000,
-};
-
-enum HFI_DIVX_PROFILE {
- HFI_DIVX_PROFILE_QMOBILE = 0x00000001,
- HFI_DIVX_PROFILE_MOBILE = 0x00000002,
- HFI_DIVX_PROFILE_MT = 0x00000004,
- HFI_DIVX_PROFILE_HT = 0x00000008,
- HFI_DIVX_PROFILE_HD = 0x00000010,
- HFI_UNUSED_DIVX_PROFILE = 0x10000000,
-};
-
-enum HFI_BUFFER {
- HFI_BUFFER_INPUT,
- HFI_BUFFER_OUTPUT,
- HFI_BUFFER_OUTPUT2,
- HFI_BUFFER_EXTRADATA_INPUT,
- HFI_BUFFER_EXTRADATA_OUTPUT,
- HFI_BUFFER_EXTRADATA_OUTPUT2,
- HFI_BUFFER_INTERNAL_SCRATCH = 0x7F000001,
- HFI_BUFFER_INTERNAL_PERSIST = 0x7F000002,
- HFI_UNUSED_BUFFER = 0x10000000,
-};
-
-enum HFI_BUFFER_MODE {
- HFI_BUFFER_MODE_STATIC,
- HFI_BUFFER_MODE_RING,
- HFI_UNUSED_BUFFER_MODE = 0x10000000,
-};
-
-enum HFI_FLUSH {
- HFI_FLUSH_INPUT,
- HFI_FLUSH_OUTPUT,
- HFI_FLUSH_OUTPUT2,
- HFI_FLUSH_ALL,
- HFI_UNUSED_FLUSH = 0x10000000,
-};
-
-enum HFI_EXTRADATA {
- HFI_EXTRADATA_NONE = 0x00000000,
- HFI_EXTRADATA_MB_QUANTIZATION = 0x00000001,
- HFI_EXTRADATA_INTERLACE_VIDEO = 0x00000002,
- HFI_EXTRADATA_VC1_FRAMEDISP = 0x00000003,
- HFI_EXTRADATA_VC1_SEQDISP = 0x00000004,
- HFI_EXTRADATA_TIMESTAMP = 0x00000005,
- HFI_EXTRADATA_MULTISLICE_INFO = 0x7F100000,
- HFI_EXTRADATA_NUM_CONCEALED_MB = 0x7F100001,
- HFI_EXTRADATA_INDEX = 0x7F100002,
- HFI_EXTRADATA_METADATA_FILLER = 0x7FE00002,
- HFI_UNUSED_EXTRADATA = 0x10000000,
-};
-
-enum HFI_EXTRADATA_INDEX_TYPE {
- HFI_INDEX_EXTRADATA_INPUT_CROP = 0x0700000E,
- HFI_INDEX_EXTRADATA_DIGITAL_ZOOM = 0x07000010,
- HFI_INDEX_EXTRADATA_ASPECT_RATIO = 0x7F100003,
-};
+#define HFI_INDEX_EXTRADATA_INPUT_CROP 0x0700000E
+#define HFI_INDEX_EXTRADATA_DIGITAL_ZOOM 0x07000010
+#define HFI_INDEX_EXTRADATA_ASPECT_RATIO 0x7F100003
struct hfi_extradata_header {
u32 size;
u32 version;
- u32 port_tndex;
- enum HFI_EXTRADATA type;
+ u32 port_index;
+ u32 type;
u32 data_size;
u8 rg_data[1];
};
-enum HFI_INTERLACE_FORMAT {
- HFI_INTERLACE_FRAME_PROGRESSIVE = 0x01,
- HFI_INTERLACE_INTERLEAVE_FRAME_TOPFIELDFIRST = 0x02,
- HFI_INTERLACE_INTERLEAVE_FRAME_BOTTOMFIELDFIRST = 0x04,
- HFI_INTERLACE_FRAME_TOPFIELDFIRST = 0x08,
- HFI_INTERLACE_FRAME_BOTTOMFIELDFIRST = 0x10,
- HFI_UNUSED_INTERLACE = 0x10000000,
-};
+#define HFI_INTERLACE_FRAME_PROGRESSIVE 0x01
+#define HFI_INTERLACE_INTERLEAVE_FRAME_TOPFIELDFIRST 0x02
+#define HFI_INTERLACE_INTERLEAVE_FRAME_BOTTOMFIELDFIRST 0x04
+#define HFI_INTERLACE_FRAME_TOPFIELDFIRST 0x08
+#define HFI_INTERLACE_FRAME_BOTTOMFIELDFIRST 0x10
-enum HFI_PROPERTY {
- HFI_PROPERTY_SYS_UNUSED = 0x08000000,
- HFI_PROPERTY_SYS_IDLE_INDICATOR,
- HFI_PROPERTY_SYS_DEBUG_CONFIG,
- HFI_PROPERTY_SYS_RESOURCE_OCMEM_REQUIREMENT_INFO,
- HFI_PROPERTY_PARAM_UNUSED = 0x04000000,
- HFI_PROPERTY_PARAM_BUFFER_COUNT_ACTUAL,
- HFI_PROPERTY_PARAM_FRAME_SIZE,
- HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SELECT,
- HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SUPPORTED,
- HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_INFO,
- HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_CONSTRAINTS_INFO,
- HFI_PROPERTY_PARAM_INTERLACE_FORMAT_SUPPORTED,
- HFI_PROPERTY_PARAM_CHROMA_SITE,
- HFI_PROPERTY_PARAM_EXTRA_DATA_HEADER_CONFIG,
- HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT,
- HFI_PROPERTY_PARAM_PROFILE_LEVEL_SUPPORTED,
- HFI_PROPERTY_PARAM_CAPABILITY_SUPPORTED,
- HFI_PROPERTY_PARAM_NAL_STREAM_FORMAT_SUPPORTED,
- HFI_PROPERTY_PARAM_NAL_STREAM_FORMAT_SELECT,
- HFI_PROPERTY_PARAM_MULTI_VIEW_FORMAT,
- HFI_PROPERTY_PARAM_PROPERTIES_SUPPORTED,
- HFI_PROPERTY_PARAM_MAX_SEQUENCE_HEADER_SIZE,
- HFI_PROPERTY_PARAM_CODEC_SUPPORTED,
- HFI_PROPERTY_PARAM_DIVX_FORMAT,
+#define HFI_PROPERTY_SYS_OX_START \
+ (HFI_DOMAIN_BASE_COMMON + HFI_ARCH_OX_OFFSET + 0x0000)
+#define HFI_PROPERTY_SYS_IDLE_INDICATOR \
+ (HFI_PROPERTY_SYS_OX_START + 0x001)
- HFI_PROPERTY_CONFIG_UNUSED = 0x02000000,
- HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS,
- HFI_PROPERTY_CONFIG_REALTIME,
- HFI_PROPERTY_CONFIG_PRIORITY,
- HFI_PROPERTY_CONFIG_BATCH_INFO,
- HFI_PROPERTY_CONFIG_FRAME_RATE,
+#define HFI_PROPERTY_PARAM_OX_START \
+ (HFI_DOMAIN_BASE_COMMON + HFI_ARCH_OX_OFFSET + 0x1000)
+#define HFI_PROPERTY_PARAM_BUFFER_COUNT_ACTUAL \
+ (HFI_PROPERTY_PARAM_OX_START + 0x001)
+#define HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_CONSTRAINTS_INFO \
+ (HFI_PROPERTY_PARAM_OX_START + 0x002)
+#define HFI_PROPERTY_PARAM_INTERLACE_FORMAT_SUPPORTED \
+ (HFI_PROPERTY_PARAM_OX_START + 0x003)
+#define HFI_PROPERTY_PARAM_CHROMA_SITE \
+(HFI_PROPERTY_PARAM_OX_START + 0x004)
+#define HFI_PROPERTY_PARAM_EXTRA_DATA_HEADER_CONFIG \
+ (HFI_PROPERTY_PARAM_OX_START + 0x005)
+#define HFI_PROPERTY_PARAM_MAX_SEQUENCE_HEADER_SIZE \
+ (HFI_PROPERTY_PARAM_OX_START + 0x006)
+#define HFI_PROPERTY_PARAM_DIVX_FORMAT \
+ (HFI_PROPERTY_PARAM_OX_START + 0x007)
- HFI_PROPERTY_PARAM_VDEC_UNUSED = 0x01000000,
- HFI_PROPERTY_PARAM_VDEC_CONTINUE_DATA_TRANSFER,
- HFI_PROPERTY_PARAM_VDEC_DISPLAY_PICTURE_BUFFER_COUNT,
- HFI_PROPERTY_PARAM_VDEC_MULTI_VIEW_SELECT,
- HFI_PROPERTY_PARAM_VDEC_PICTURE_TYPE_DECODE,
- HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM,
- HFI_PROPERTY_PARAM_VDEC_OUTPUT_ORDER,
- HFI_PROPERTY_PARAM_VDEC_MB_QUANTIZATION,
- HFI_PROPERTY_PARAM_VDEC_NUM_CONCEALED_MB,
- HFI_PROPERTY_PARAM_VDEC_H264_ENTROPY_SWITCHING,
- HFI_PROPERTY_PARAM_VDEC_OUTPUT2_KEEP_ASPECT_RATIO,
+#define HFI_PROPERTY_CONFIG_OX_START \
+ (HFI_DOMAIN_BASE_COMMON + HFI_ARCH_OX_OFFSET + 0x02000)
+#define HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS \
+ (HFI_PROPERTY_CONFIG_OX_START + 0x001)
+#define HFI_PROPERTY_CONFIG_REALTIME \
+ (HFI_PROPERTY_CONFIG_OX_START + 0x002)
+#define HFI_PROPERTY_CONFIG_PRIORITY \
+ (HFI_PROPERTY_CONFIG_OX_START + 0x003)
+#define HFI_PROPERTY_CONFIG_BATCH_INFO \
+ (HFI_PROPERTY_CONFIG_OX_START + 0x004)
- HFI_PROPERTY_CONFIG_VDEC_UNUSED = 0x00800000,
- HFI_PROPERTY_CONFIG_VDEC_POST_LOOP_DEBLOCKER,
- HFI_PROPERTY_CONFIG_VDEC_MB_ERROR_MAP_REPORTING,
- HFI_PROPERTY_CONFIG_VDEC_MB_ERROR_MAP,
+#define HFI_PROPERTY_PARAM_VDEC_OX_START \
+ (HFI_DOMAIN_BASE_VDEC + HFI_ARCH_OX_OFFSET + 0x3000)
+#define HFI_PROPERTY_PARAM_VDEC_CONTINUE_DATA_TRANSFER \
+ (HFI_PROPERTY_PARAM_VDEC_OX_START + 0x001)
+#define HFI_PROPERTY_PARAM_VDEC_DISPLAY_PICTURE_BUFFER_COUNT\
+ (HFI_PROPERTY_PARAM_VDEC_OX_START + 0x002)
+#define HFI_PROPERTY_PARAM_VDEC_MULTI_VIEW_SELECT \
+ (HFI_PROPERTY_PARAM_VDEC_OX_START + 0x003)
+#define HFI_PROPERTY_PARAM_VDEC_PICTURE_TYPE_DECODE \
+ (HFI_PROPERTY_PARAM_VDEC_OX_START + 0x004)
+#define HFI_PROPERTY_PARAM_VDEC_OUTPUT_ORDER \
+ (HFI_PROPERTY_PARAM_VDEC_OX_START + 0x005)
+#define HFI_PROPERTY_PARAM_VDEC_MB_QUANTIZATION \
+ (HFI_PROPERTY_PARAM_VDEC_OX_START + 0x006)
+#define HFI_PROPERTY_PARAM_VDEC_NUM_CONCEALED_MB \
+ (HFI_PROPERTY_PARAM_VDEC_OX_START + 0x007)
+#define HFI_PROPERTY_PARAM_VDEC_H264_ENTROPY_SWITCHING \
+ (HFI_PROPERTY_PARAM_VDEC_OX_START + 0x008)
+#define HFI_PROPERTY_PARAM_VDEC_OUTPUT2_KEEP_ASPECT_RATIO\
+ (HFI_PROPERTY_PARAM_VDEC_OX_START + 0x009)
- HFI_PROPERTY_PARAM_VENC_UNUSED = 0x00400000,
- HFI_PROPERTY_PARAM_VENC_SLICE_DELIVERY_MODE,
- HFI_PROPERTY_PARAM_VENC_H264_ENTROPY_CONTROL,
- HFI_PROPERTY_PARAM_VENC_H264_DEBLOCK_CONTROL,
- HFI_PROPERTY_PARAM_VENC_RATE_CONTROL,
- HFI_PROPERTY_PARAM_VENC_TEMPORAL_SPATIAL_TRADEOFF,
- HFI_PROPERTY_PARAM_VENC_SESSION_QP,
- HFI_PROPERTY_PARAM_VENC_MPEG4_AC_PREDICTION,
- HFI_PROPERTY_PARAM_VENC_MPEG4_DATA_PARTITIONING,
- HFI_PROPERTY_PARAM_VENC_MPEG4_TIME_RESOLUTION,
- HFI_PROPERTY_PARAM_VENC_MPEG4_SHORT_HEADER,
- HFI_PROPERTY_PARAM_VENC_MPEG4_HEADER_EXTENSION,
- HFI_PROPERTY_PARAM_VENC_MULTI_SLICE_INFO,
- HFI_PROPERTY_PARAM_VENC_INTRA_REFRESH,
- HFI_PROPERTY_PARAM_VENC_MULTI_SLICE_CONTROL,
+#define HFI_PROPERTY_CONFIG_VDEC_OX_START \
+ (HFI_DOMAIN_BASE_VDEC + HFI_ARCH_OX_OFFSET + 0x0000)
+#define HFI_PROPERTY_CONFIG_VDEC_POST_LOOP_DEBLOCKER \
+ (HFI_PROPERTY_CONFIG_VDEC_OX_START + 0x001)
+#define HFI_PROPERTY_CONFIG_VDEC_MB_ERROR_MAP_REPORTING \
+ (HFI_PROPERTY_CONFIG_VDEC_OX_START + 0x002)
+#define HFI_PROPERTY_CONFIG_VDEC_MB_ERROR_MAP \
+ (HFI_PROPERTY_CONFIG_VDEC_OX_START + 0x003)
- HFI_PROPERTY_CONFIG_VENC_UNUSED = 0x00200000,
- HFI_PROPERTY_CONFIG_VENC_TARGET_BITRATE,
- HFI_PROPERTY_CONFIG_VENC_IDR_PERIOD,
- HFI_PROPERTY_CONFIG_VENC_INTRA_PERIOD,
- HFI_PROPERTY_CONFIG_VENC_REQUEST_IFRAME,
- HFI_PROPERTY_CONFIG_VENC_TIMESTAMP_SCALE,
- HFI_PROPERTY_PARAM_VENC_MPEG4_QPEL,
- HFI_PROPERTY_PARAM_VENC_ADVANCED,
+#define HFI_PROPERTY_PARAM_VENC_OX_START \
+ (HFI_DOMAIN_BASE_VENC + HFI_ARCH_OX_OFFSET + 0x5000)
+#define HFI_PROPERTY_CONFIG_VENC_OX_START \
+ (HFI_DOMAIN_BASE_VENC + HFI_ARCH_OX_OFFSET + 0x6000)
- HFI_PROPERTY_PARAM_VPE_UNUSED = 0x00100000,
-
- HFI_PROPERTY_CONFIG_VPE_UNUSED = 0x00080000,
- HFI_PROPERTY_CONFIG_VPE_DEINTERLACE,
- HFI_PROPERTY_CONFIG_VPE_OPERATIONS,
- HFI_PROPERTY_UNUSED = 0x10000000,
-};
+#define HFI_PROPERTY_PARAM_VPE_OX_START \
+ (HFI_DOMAIN_BASE_VPE + HFI_ARCH_OX_OFFSET + 0x7000)
+#define HFI_PROPERTY_CONFIG_VPE_OX_START \
+ (HFI_DOMAIN_BASE_VPE + HFI_ARCH_OX_OFFSET + 0x8000)
struct hfi_batch_info {
u32 input_batch_count;
u32 output_batch_count;
};
-struct hfi_bitrate {
- u32 bit_rate;
-};
-
struct hfi_buffer_count_actual {
- enum HFI_BUFFER buffer;
+ u32 buffer_type;
u32 buffer_count_actual;
};
struct hfi_buffer_requirements {
- enum HFI_BUFFER buffer;
+ u32 buffer_type;
u32 buffer_size;
u32 buffer_region_size;
u32 buffer_hold_count;
@@ -519,35 +273,12 @@
u32 buffer_alignment;
};
-enum HFI_CAPABILITY {
- HFI_CAPABILITY_FRAME_WIDTH,
- HFI_CAPABILITY_FRAME_HEIGHT,
- HFI_CAPABILITY_MBS_PER_FRAME,
- HFI_CAPABILITY_MBS_PER_SECOND,
- HFI_CAPABILITY_FRAMERATE,
- HFI_CAPABILITY_SCALE_X,
- HFI_CAPABILITY_SCALE_Y,
- HFI_CAPABILITY_BITRATE,
- HFI_UNUSED_CAPABILITY = 0x10000000,
-};
-
-struct hfi_capability_supported {
- enum HFI_CAPABILITY eCapabilityType;
- u32 min;
- u32 max;
- u32 step_size;
-};
-
-struct hfi_capability_supported_INFO {
- u32 num_capabilities;
- struct hfi_capability_supported rg_data[1];
-};
-
-enum HFI_CHROMA_SITE {
- HFI_CHROMA_SITE_0,
- HFI_CHROMA_SITE_1,
- HFI_UNUSED_CHROMA = 0x10000000,
-};
+#define HFI_CHROMA_SITE_0 (HFI_OX_BASE + 0x1)
+#define HFI_CHROMA_SITE_1 (HFI_OX_BASE + 0x2)
+#define HFI_CHROMA_SITE_2 (HFI_OX_BASE + 0x3)
+#define HFI_CHROMA_SITE_3 (HFI_OX_BASE + 0x4)
+#define HFI_CHROMA_SITE_4 (HFI_OX_BASE + 0x5)
+#define HFI_CHROMA_SITE_5 (HFI_OX_BASE + 0x6)
struct hfi_data_payload {
u32 size;
@@ -567,86 +298,17 @@
u32 count;
};
-struct hfi_enable {
- int enable;
-};
-
-enum HFI_H264_DB_MODE {
- HFI_H264_DB_MODE_DISABLE,
- HFI_H264_DB_MODE_SKIP_SLICE_BOUNDARY,
- HFI_H264_DB_MODE_ALL_BOUNDARY,
- HFI_UNUSED_H264_DB = 0x10000000,
-};
-
-struct hfi_h264_db_control {
- enum HFI_H264_DB_MODE mode;
- int slice_alpha_offset;
- int slice_beta_offset;
-};
-
-enum HFI_H264_ENTROPY {
- HFI_H264_ENTROPY_CAVLC,
- HFI_H264_ENTROPY_CABAC,
- HFI_UNUSED_ENTROPY = 0x10000000,
-};
-
-enum HFI_H264_CABAC_MODEL {
- HFI_H264_CABAC_MODEL_0,
- HFI_H264_CABAC_MODEL_1,
- HFI_H264_CABAC_MODEL_2,
- HFI_UNUSED_CABAC = 0x10000000,
-};
-
-struct hfi_h264_entropy_control {
- enum HFI_H264_ENTROPY entropy_mode;
- enum HFI_H264_CABAC_MODEL cabac_model;
-};
-
struct hfi_extra_data_header_config {
u32 type;
- enum HFI_BUFFER buffer_type;
+ u32 buffer_type;
u32 version;
u32 port_index;
- u32 client_extradata_id;
-};
-
-struct hfi_frame_rate {
- enum HFI_BUFFER buffer_type;
- u32 frame_rate;
+ u32 client_extra_data_id;
};
struct hfi_interlace_format_supported {
- enum HFI_BUFFER buffer;
- enum HFI_INTERLACE_FORMAT format;
-};
-
-enum hfi_intra_refresh_mode {
- HFI_INTRA_REFRESH_NONE,
- HFI_INTRA_REFRESH_CYCLIC,
- HFI_INTRA_REFRESH_ADAPTIVE,
- HFI_INTRA_REFRESH_CYCLIC_ADAPTIVE,
- HFI_INTRA_REFRESH_RANDOM,
- HFI_UNUSED_INTRA = 0x10000000,
-};
-
-struct hfi_intra_refresh {
- enum hfi_intra_refresh_mode mode;
- u32 air_mbs;
- u32 air_ref;
- u32 cir_mbs;
-};
-
-struct hfi_idr_period {
- u32 idr_period;
-};
-
-struct hfi_intra_period {
- u32 pframes;
- u32 bframes;
-};
-
-struct hfi_timestamp_scale {
- u32 time_stamp_scale;
+ u32 buffer_type;
+ u32 format;
};
struct hfi_mb_error_map {
@@ -659,424 +321,110 @@
u32 size;
};
-struct hfi_mpeg4_header_extension {
- u32 header_extension;
-};
-
-struct hfi_mpeg4_time_resolution {
- u32 time_increment_resolution;
-};
-
-enum HFI_MULTI_SLICE {
- HFI_MULTI_SLICE_OFF,
- HFI_MULTI_SLICE_BY_MB_COUNT,
- HFI_MULTI_SLICE_BY_BYTE_COUNT,
- HFI_MULTI_SLICE_GOB,
- HFI_UNUSED_SLICE = 0x10000000,
-};
-
-struct hfi_multi_slice_control {
- enum HFI_MULTI_SLICE multi_slice;
- u32 slice_size;
-};
-
-struct hfi_multi_stream {
- enum HFI_BUFFER buffer;
- u32 enable;
- u32 width;
- u32 height;
-};
-
-struct hfi_multi_view_format {
- u32 views;
- u32 rg_view_order[1];
-};
-
struct hfi_multi_view_select {
u32 view_index;
};
-enum HFI_NAL_STREAM_FORMAT {
- HFI_NAL_FORMAT_STARTCODES = 0x00000001,
- HFI_NAL_FORMAT_ONE_NAL_PER_BUFFER = 0x00000002,
- HFI_NAL_FORMAT_ONE_BYTE_LENGTH = 0x00000004,
- HFI_NAL_FORMAT_TWO_BYTE_LENGTH = 0x00000008,
- HFI_NAL_FORMAT_FOUR_BYTE_LENGTH = 0x00000010,
- HFI_UNUSED_NAL = 0x10000000,
-};
+#define HFI_PRIORITY_LOW 10
+#define HFI_PRIOIRTY_MEDIUM 20
+#define HFI_PRIORITY_HIGH 30
-struct hfi_nal_stream_format_supported {
- u32 nal_stream_format_supported;
-};
+#define HFI_OUTPUT_ORDER_DISPLAY (HFI_OX_BASE + 0x1)
+#define HFI_OUTPUT_ORDER_DECODE (HFI_OX_BASE + 0x2)
-enum HFI_PICTURE {
- HFI_PICTURE_I = 0x01,
- HFI_PICTURE_P = 0x02,
- HFI_PICTURE_B = 0x04,
- HFI_PICTURE_IDR = 0x7F001000,
- HFI_UNUSED_PICT = 0x10000000,
-};
-
-enum HFI_PRIORITY {
- HFI_PRIORITY_LOW = 10,
- HFI_PRIOIRTY_MEDIUM = 20,
- HFI_PRIORITY_HIGH = 30,
- HFI_UNUSED_PRIORITY = 0x10000000,
-};
-
-struct hfi_profile_level {
- u32 profile;
- u32 level;
-};
-
-struct hfi_profile_level_supported {
- u32 profile_count;
- struct hfi_profile_level rg_profile_level[1];
-};
-
-enum HFI_ROTATE {
- HFI_ROTATE_NONE,
- HFI_ROTATE_90,
- HFI_ROTATE_180,
- HFI_ROTATE_270,
- HFI_UNUSED_ROTATE = 0x10000000,
-};
-
-enum HFI_FLIP {
- HFI_FLIP_NONE,
- HFI_FLIP_HORIZONTAL,
- HFI_FLIP_VERTICAL,
- HFI_UNUSED_FLIP = 0x10000000,
-};
-
-struct hfi_operations {
- enum HFI_ROTATE rotate;
- enum HFI_FLIP flip;
-};
-
-enum HFI_OUTPUT_ORDER {
- HFI_OUTPUT_ORDER_DISPLAY,
- HFI_OUTPUT_ORDER_DECODE,
- HFI_UNUSED_OUTPUT = 0x10000000,
-};
-
-struct hfi_quantization {
- u32 qp_i;
- u32 qp_p;
- u32 qp_b;
-};
-
-enum HFI_RATE_CONTROL {
- HFI_RATE_CONTROL_OFF,
- HFI_RATE_CONTROL_VBR_VFR,
- HFI_RATE_CONTROL_VBR_CFR,
- HFI_RATE_CONTROL_CBR_VFR,
- HFI_RATE_CONTROL_CBR_CFR,
- HFI_UNUSED_RC = 0x10000000,
-};
-
-struct hfi_slice_delivery_mode {
- int enable;
-};
-
-struct hfi_temporal_spatial_tradeoff {
- u32 ts_factor;
-};
-
-struct hfi_frame_size {
- enum HFI_BUFFER buffer;
- u32 width;
- u32 height;
-};
-
-enum HFI_UNCOMPRESSED_FORMAT {
- HFI_COLOR_FORMAT_MONOCHROME,
- HFI_COLOR_FORMAT_NV12,
- HFI_COLOR_FORMAT_NV21,
- HFI_COLOR_FORMAT_NV12_4x4TILE,
- HFI_COLOR_FORMAT_NV21_4x4TILE,
- HFI_COLOR_FORMAT_YUYV,
- HFI_COLOR_FORMAT_YVYU,
- HFI_COLOR_FORMAT_UYVY,
- HFI_COLOR_FORMAT_VYUY,
- HFI_COLOR_FORMAT_RGB565,
- HFI_COLOR_FORMAT_BGR565,
- HFI_COLOR_FORMAT_RGB888,
- HFI_COLOR_FORMAT_BGR888,
- HFI_UNUSED_COLOR = 0x10000000,
-};
-
-struct hfi_uncompressed_format_select {
- enum HFI_BUFFER buffer;
- enum HFI_UNCOMPRESSED_FORMAT format;
-};
-
-struct hfi_uncompressed_format_supported {
- enum HFI_BUFFER buffer;
- u32 format_entries;
- u32 rg_format_info[1];
-};
-
-struct hfi_uncompressed_plane_actual {
- int actual_stride;
- u32 actual_plane_buffer_height;
-};
-
-struct hfi_uncompressed_plane_actual_info {
- enum HFI_BUFFER buffer;
- u32 num_planes;
- struct hfi_uncompressed_plane_actual rg_plane_format[1];
-};
-
-struct hfi_uncompressed_plane_constraints {
- u32 stride_multiples;
- u32 max_stride;
- u32 min_plane_buffer_height_multiple;
- u32 buffer_alignment;
-};
-
-struct hfi_uncompressed_plane_info {
- enum HFI_UNCOMPRESSED_FORMAT format;
- u32 num_planes;
- struct hfi_uncompressed_plane_constraints rg_plane_format[1];
-};
+#define HFI_RATE_CONTROL_OFF (HFI_OX_BASE + 0x1)
+#define HFI_RATE_CONTROL_VBR_VFR (HFI_OX_BASE + 0x2)
+#define HFI_RATE_CONTROL_VBR_CFR (HFI_OX_BASE + 0x3)
+#define HFI_RATE_CONTROL_CBR_VFR (HFI_OX_BASE + 0x4)
+#define HFI_RATE_CONTROL_CBR_CFR (HFI_OX_BASE + 0x5)
struct hfi_uncompressed_plane_actual_constraints_info {
- enum HFI_BUFFER buffer;
+ u32 buffer_type;
u32 num_planes;
struct hfi_uncompressed_plane_constraints rg_plane_format[1];
};
-struct hfi_codec_supported {
- u32 decoder_codec_supported;
- u32 encoder_codec_supported;
-};
+#define HFI_CMD_SYS_OX_START \
+ (HFI_DOMAIN_BASE_COMMON + HFI_ARCH_OX_OFFSET + 0x0000)
+#define HFI_CMD_SYS_SESSION_ABORT (HFI_CMD_SYS_OX_START + 0x001)
+#define HFI_CMD_SYS_PING (HFI_CMD_SYS_OX_START + 0x002)
-enum HFI_DEBUG_MSG {
- HFI_DEBUG_MSG_LOW = 0x00000001,
- HFI_DEBUG_MSG_MEDIUM = 0x00000002,
- HFI_DEBUG_MSG_HIGH = 0x00000004,
- HFI_DEBUG_MSG_ERROR = 0x00000008,
- HFI_DEBUG_MSG_FATAL = 0x00000010,
- HFI_UNUSED_DEBUG_MSG = 0x10000000,
-};
+#define HFI_CMD_SESSION_OX_START \
+ (HFI_DOMAIN_BASE_COMMON + HFI_ARCH_OX_OFFSET + 0x1000)
+#define HFI_CMD_SESSION_LOAD_RESOURCES (HFI_CMD_SESSION_OX_START + 0x001)
+#define HFI_CMD_SESSION_START (HFI_CMD_SESSION_OX_START + 0x002)
+#define HFI_CMD_SESSION_STOP (HFI_CMD_SESSION_OX_START + 0x003)
+#define HFI_CMD_SESSION_EMPTY_BUFFER (HFI_CMD_SESSION_OX_START + 0x004)
+#define HFI_CMD_SESSION_FILL_BUFFER (HFI_CMD_SESSION_OX_START + 0x005)
+#define HFI_CMD_SESSION_SUSPEND (HFI_CMD_SESSION_OX_START + 0x006)
+#define HFI_CMD_SESSION_RESUME (HFI_CMD_SESSION_OX_START + 0x007)
+#define HFI_CMD_SESSION_FLUSH (HFI_CMD_SESSION_OX_START + 0x008)
+#define HFI_CMD_SESSION_GET_PROPERTY (HFI_CMD_SESSION_OX_START + 0x009)
+#define HFI_CMD_SESSION_PARSE_SEQUENCE_HEADER \
+ (HFI_CMD_SESSION_OX_START + 0x00A)
+#define HFI_CMD_SESSION_RELEASE_BUFFERS \
+ (HFI_CMD_SESSION_OX_START + 0x00B)
+#define HFI_CMD_SESSION_RELEASE_RESOURCES \
+ (HFI_CMD_SESSION_OX_START + 0x00C)
-struct hfi_debug_config {
- u32 debug_config;
-};
+#define HFI_MSG_SYS_OX_START \
+ (HFI_DOMAIN_BASE_COMMON + HFI_ARCH_OX_OFFSET + 0x0000)
+#define HFI_MSG_SYS_IDLE (HFI_MSG_SYS_OX_START + 0x1)
+#define HFI_MSG_SYS_PING_ACK (HFI_MSG_SYS_OX_START + 0x2)
+#define HFI_MSG_SYS_PROPERTY_INFO (HFI_MSG_SYS_OX_START + 0x3)
+#define HFI_MSG_SYS_SESSION_ABORT_DONE (HFI_MSG_SYS_OX_START + 0x4)
-struct hfi_properties_supported {
- u32 num_properties;
- u32 rg_properties[1];
-};
-
-enum HFI_RESOURCE {
- HFI_RESOURCE_OCMEM = 0x00000001,
- HFI_UNUSED_RESOURCE = 0x10000000,
-};
-
-struct hfi_resource_ocmem_type {
- u32 size;
- u8 *mem;
-};
-
-struct hfi_resource_ocmem_requirement {
- enum HFI_DOMAIN session_domain;
- u32 width;
- u32 height;
- u32 size;
-};
-
-struct hfi_resource_ocmem_requirement_info {
- u32 num_entries;
- struct hfi_resource_ocmem_requirement rg_requirements[1];
-};
-
-struct hfi_venc_config_advanced {
- u8 pipe2d;
- u8 hw_mode;
- u8 low_delay_enforce;
- int h264_constrain_intra_pred;
- int h264_transform_8x8_flag;
- int mpeg4_qpel_enable;
- int multi_refP_en;
- int qmatrix_en;
- u8 vpp_info_packet_mode;
- u8 ref_tile_mode;
- u8 bitstream_flush_mode;
- u32 ds_display_frame_width;
- u32 ds_display_frame_height;
- u32 perf_tune_param_ptr;
-};
-
-enum HFI_COMMAND {
- HFI_CMD_SYS_UNUSED = 0x01000000,
- HFI_CMD_SYS_INIT,
- HFI_CMD_SYS_SESSION_INIT,
- HFI_CMD_SYS_SESSION_END,
- HFI_CMD_SYS_SESSION_ABORT,
- HFI_CMD_SYS_SET_RESOURCE,
- HFI_CMD_SYS_RELEASE_RESOURCE,
- HFI_CMD_SYS_PING,
- HFI_CMD_SYS_PC_PREP,
- HFI_CMD_SYS_SET_PROPERTY,
- HFI_CMD_SYS_GET_PROPERTY,
-
- HFI_CMD_SESSION_UNUSED = 0x02000000,
- HFI_CMD_SESSION_LOAD_RESOURCES,
- HFI_CMD_SESSION_START,
- HFI_CMD_SESSION_STOP,
- HFI_CMD_SESSION_EMPTY_BUFFER,
- HFI_CMD_SESSION_FILL_BUFFER,
- HFI_CMD_SESSION_FLUSH,
- HFI_CMD_SESSION_SUSPEND,
- HFI_CMD_SESSION_RESUME,
- HFI_CMD_SESSION_SET_PROPERTY,
- HFI_CMD_SESSION_GET_PROPERTY,
- HFI_CMD_SESSION_PARSE_SEQUENCE_HEADER,
- HFI_CMD_SESSION_GET_SEQUENCE_HEADER,
- HFI_CMD_SESSION_SET_BUFFERS,
- HFI_CMD_SESSION_RELEASE_BUFFERS,
- HFI_CMD_SESSION_RELEASE_RESOURCES,
-
- HFI_CMD_UNUSED = 0x10000000,
-};
-
-enum HFI_MESSAGE {
- HFI_MSG_SYS_UNUSED = 0x01000000,
- HFI_MSG_SYS_IDLE,
- HFI_MSG_SYS_PC_PREP_DONE,
- HFI_MSG_SYS_RELEASE_RESOURCE,
- HFI_MSG_SYS_PING_ACK,
- HFI_MSG_SYS_DEBUG,
- HFI_MSG_SYS_INIT_DONE,
- HFI_MSG_SYS_PROPERTY_INFO,
- HFI_MSG_SESSION_UNUSED = 0x02000000,
- HFI_MSG_EVENT_NOTIFY,
- HFI_MSG_SYS_SESSION_INIT_DONE,
- HFI_MSG_SYS_SESSION_END_DONE,
- HFI_MSG_SYS_SESSION_ABORT_DONE,
- HFI_MSG_SESSION_LOAD_RESOURCES_DONE,
- HFI_MSG_SESSION_START_DONE,
- HFI_MSG_SESSION_STOP_DONE,
- HFI_MSG_SESSION_SUSPEND_DONE,
- HFI_MSG_SESSION_RESUME_DONE,
- HFI_MSG_SESSION_EMPTY_BUFFER_DONE,
- HFI_MSG_SESSION_FILL_BUFFER_DONE,
- HFI_MSG_SESSION_FLUSH_DONE,
- HFI_MSG_SESSION_PROPERTY_INFO,
- HFI_MSG_SESSION_RELEASE_RESOURCES_DONE,
- HFI_MSG_SESSION_PARSE_SEQUENCE_HEADER_DONE,
- HFI_MSG_SESSION_GET_SEQUENCE_HEADER_DONE,
- HFI_MSG_UNUSED = 0x10000000,
-};
-
-struct vidc_hal_msg_pkt_hdr {
- u32 size;
- enum HFI_MESSAGE packet;
-};
-
-struct vidc_hal_session_cmd_pkt {
- u32 size;
- enum HFI_COMMAND packet_type;
- u32 session_id;
-};
-
-enum HFI_STATUS {
- HFI_FAIL = 0,
- HFI_SUCCESS,
- HFI_UNUSED_STATUS = 0x10000000,
-};
-
-struct hfi_cmd_sys_init_packet {
- u32 size;
- enum HFI_COMMAND packet;
-};
-
-struct hfi_cmd_sys_session_init_packet {
- u32 size;
- enum HFI_COMMAND packet;
- u32 session_id;
- enum HFI_DOMAIN session_domain;
- enum HFI_VIDEO_CODEC session_codec;
-};
-
-struct hfi_cmd_sys_session_end_packet {
- u32 size;
- enum HFI_COMMAND packet_type;
- u32 session_id;
-};
+#define HFI_MSG_SESSION_OX_START \
+ (HFI_DOMAIN_BASE_COMMON + HFI_ARCH_OX_OFFSET + 0x1000)
+#define HFI_MSG_SESSION_LOAD_RESOURCES_DONE (HFI_MSG_SESSION_OX_START + 0x1)
+#define HFI_MSG_SESSION_START_DONE (HFI_MSG_SESSION_OX_START + 0x2)
+#define HFI_MSG_SESSION_STOP_DONE (HFI_MSG_SESSION_OX_START + 0x3)
+#define HFI_MSG_SESSION_SUSPEND_DONE (HFI_MSG_SESSION_OX_START + 0x4)
+#define HFI_MSG_SESSION_RESUME_DONE (HFI_MSG_SESSION_OX_START + 0x5)
+#define HFI_MSG_SESSION_FLUSH_DONE (HFI_MSG_SESSION_OX_START + 0x6)
+#define HFI_MSG_SESSION_EMPTY_BUFFER_DONE (HFI_MSG_SESSION_OX_START + 0x7)
+#define HFI_MSG_SESSION_FILL_BUFFER_DONE (HFI_MSG_SESSION_OX_START + 0x8)
+#define HFI_MSG_SESSION_PROPERTY_INFO (HFI_MSG_SESSION_OX_START + 0x9)
+#define HFI_MSG_SESSION_RELEASE_RESOURCES_DONE (HFI_MSG_SESSION_OX_START + 0xA)
+#define HFI_MSG_SESSION_PARSE_SEQUENCE_HEADER_DONE \
+ (HFI_MSG_SESSION_OX_START + 0xB)
struct hfi_cmd_sys_session_abort_packet {
u32 size;
- enum HFI_COMMAND packet_type;
+ u32 packet_type;
u32 session_id;
};
-struct hfi_cmd_sys_pc_prep_packet {
- u32 size;
- enum HFI_COMMAND packet_type;
-};
-
-struct hfi_cmd_sys_set_resource_packet {
- u32 size;
- enum HFI_COMMAND packet_type;
- u32 resource_handle;
- enum HFI_RESOURCE resource_type;
- u32 rg_resource_data[1];
-};
-
-struct hfi_cmd_sys_release_resource_packet {
- u32 size;
- enum HFI_COMMAND packet_type;
- enum HFI_RESOURCE resource_type;
- u32 resource_handle;
-};
-
struct hfi_cmd_sys_ping_packet {
u32 size;
- enum HFI_COMMAND packet_type;
+ u32 packet_type;
u32 client_data;
};
-struct hfi_cmd_sys_set_property_packet {
- u32 size;
- enum HFI_COMMAND packet_type;
- u32 num_properties;
- u32 rg_property_data[1];
-};
-
-struct hfi_cmd_sys_get_property_packet {
- u32 size;
- enum HFI_COMMAND packet_type;
- u32 num_properties;
- enum HFI_PROPERTY rg_property_data[1];
-};
-
struct hfi_cmd_session_load_resources_packet {
u32 size;
- enum HFI_COMMAND packet_type;
+ u32 packet_type;
u32 session_id;
};
struct hfi_cmd_session_start_packet {
u32 size;
- enum HFI_COMMAND packet_type;
+ u32 packet_type;
u32 session_id;
};
struct hfi_cmd_session_stop_packet {
u32 size;
- enum HFI_COMMAND packet_type;
+ u32 packet_type;
u32 session_id;
};
struct hfi_cmd_session_empty_buffer_compressed_packet {
u32 size;
- enum HFI_COMMAND packet_type;
+ u32 packet_type;
u32 session_id;
- u32 timestamp_hi;
- u32 timestamp_lo;
+ u32 time_stamp_hi;
+ u32 time_stamp_lo;
u32 flags;
u32 mark_target;
u32 mark_data;
@@ -1085,15 +433,16 @@
u32 filled_len;
u32 input_tag;
u8 *packet_buffer;
+ u8 *extra_data_buffer;
};
struct hfi_cmd_session_empty_buffer_uncompressed_plane0_packet {
u32 size;
- enum HFI_COMMAND packet;
+ u32 packet_type;
u32 session_id;
u32 view_id;
- u32 timestamp_hi;
- u32 timestamp_lo;
+ u32 time_stamp_hi;
+ u32 time_stamp_lo;
u32 flags;
u32 mark_target;
u32 mark_data;
@@ -1102,6 +451,7 @@
u32 offset;
u32 input_tag;
u8 *packet_buffer;
+ u8 *extra_data_buffer;
};
struct hfi_cmd_session_empty_buffer_uncompressed_plane1_packet {
@@ -1122,234 +472,153 @@
struct hfi_cmd_session_fill_buffer_packet {
u32 size;
- enum HFI_COMMAND packet_type;
+ u32 packet_type;
u32 session_id;
u32 stream_id;
+ u32 output_tag;
u8 *packet_buffer;
u8 *extra_data_buffer;
};
struct hfi_cmd_session_flush_packet {
u32 size;
- enum HFI_COMMAND packet_type;
+ u32 packet_type;
u32 session_id;
- enum HFI_FLUSH flush_type;
+ u32 flush_type;
};
struct hfi_cmd_session_suspend_packet {
u32 size;
- enum HFI_COMMAND packet;
+ u32 packet_type;
u32 session_id;
};
struct hfi_cmd_session_resume_packet {
u32 size;
- enum HFI_COMMAND packet_type;
+ u32 packet_type;
u32 session_id;
};
-struct hfi_cmd_session_set_property_packet {
- u32 size;
- enum HFI_COMMAND packet_type;
- u32 session_id;
- u32 num_properties;
- u32 rg_property_data[0];
-};
-
struct hfi_cmd_session_get_property_packet {
u32 size;
- enum HFI_COMMAND packet_type;
+ u32 packet_type;
u32 session_id;
u32 num_properties;
- enum HFI_PROPERTY rg_property_data[1];
-};
-
-struct hfi_buffer_info {
- u32 buffer_addr;
- u32 extradata_addr;
-};
-
-struct hfi_cmd_session_set_buffers_packet {
- u32 size;
- enum HFI_COMMAND packet_type;
- u32 session_id;
- enum HFI_BUFFER buffer_type;
- enum HFI_BUFFER_MODE buffer_mode;
- u32 buffer_size;
- u32 extradata_size;
- u32 min_buffer_size;
- u32 num_buffers;
- u32 rg_buffer_info[1];
+ u32 rg_property_data[1];
};
struct hfi_cmd_session_release_buffer_packet {
u32 size;
- enum HFI_COMMAND packet_type;
+ u32 packet_type;
u32 session_id;
- enum HFI_BUFFER buffer_type;
+ u32 buffer_type;
u32 buffer_size;
- u32 extradata_size;
+ u32 extra_data_size;
u32 num_buffers;
u32 rg_buffer_info[1];
};
struct hfi_cmd_session_release_resources_packet {
u32 size;
- enum HFI_COMMAND packet_type;
+ u32 packet_type;
u32 session_id;
};
struct hfi_cmd_session_parse_sequence_header_packet {
u32 size;
- enum HFI_COMMAND packet_type;
+ u32 packet_type;
u32 session_id;
u32 header_len;
u8 *packet_buffer;
};
-struct hfi_cmd_session_get_sequence_header_packet {
- u32 size;
- enum HFI_COMMAND packet_type;
- u32 session_id;
- u32 buffer_len;
- u8 *packet_buffer;
-};
-
-struct hfi_msg_event_notify_packet {
- u32 size;
- enum HFI_MESSAGE packet_type;
- u32 session_id;
- enum HFI_EVENT event_id;
- u32 event_data1;
- u32 event_data2;
- u32 rg_ext_event_data[1];
-};
-
-struct hfi_msg_sys_init_done_packet {
- u32 size;
- enum HFI_MESSAGE packet_type;
- enum HFI_ERROR error_type;
- u32 num_properties;
- u32 rg_property_data[1];
-};
-
-struct hfi_msg_sys_session_init_done_packet {
- u32 size;
- enum HFI_MESSAGE packet_type;
- u32 session_id;
- enum HFI_ERROR error_type;
- u32 num_properties;
- u32 rg_property_data[1];
-};
-
-struct hfi_msg_sys_session_end_done_packet {
- u32 size;
- enum HFI_MESSAGE packet_type;
- u32 session_id;
- enum HFI_ERROR error_type;
-};
-
struct hfi_msg_sys_session_abort_done_packet {
u32 size;
- enum HFI_MESSAGE packet_type;
+ u32 packet_type;
u32 session_id;
- enum HFI_ERROR error_type;
+ u32 error_type;
};
struct hfi_msg_sys_idle_packet {
u32 size;
- enum HFI_MESSAGE packet_type;
-};
-
-struct hfi_msg_sys_pc_prep_done_packet {
- u32 size;
- enum HFI_MESSAGE packet_type;
- enum HFI_ERROR error_type;
-};
-
-struct hfi_msg_sys_release_resource_done_packet {
- u32 size;
- enum HFI_MESSAGE packet_type;
- u32 resource_handle;
- enum HFI_ERROR error_type;
+ u32 packet_type;
};
struct hfi_msg_sys_ping_ack_packet {
u32 size;
- enum HFI_MESSAGE packet_type;
+ u32 packet_type;
u32 client_data;
};
-struct hfi_msg_sys_debug_packet {
- u32 size;
- enum HFI_MESSAGE packet_type;
- enum HFI_DEBUG_MSG msg_type;
- u32 msg_size;
- u32 timestamp_hi;
- u32 timestamp_lo;
- u8 rg_msg_data[1];
-};
-
struct hfi_msg_sys_property_info_packet {
- u32 nsize;
- enum HFI_MESSAGE packet_type;
+ u32 size;
+ u32 packet_type;
u32 num_properties;
u32 rg_property_data[1];
};
struct hfi_msg_session_load_resources_done_packet {
u32 size;
- enum HFI_MESSAGE packet_type;
+ u32 packet_type;
u32 session_id;
- enum HFI_ERROR error_type;
+ u32 error_type;
};
struct hfi_msg_session_start_done_packet {
u32 size;
- enum HFI_MESSAGE packet_type;
+ u32 packet_type;
u32 session_id;
- enum HFI_ERROR error_type;
+ u32 error_type;
};
struct hfi_msg_session_stop_done_packet {
u32 size;
- enum HFI_MESSAGE packet_type;
+ u32 packet_type;
u32 session_id;
- enum HFI_ERROR error_type;
+ u32 error_type;
};
struct hfi_msg_session_suspend_done_packet {
u32 size;
- enum HFI_MESSAGE packet_type;
+ u32 packet_type;
u32 session_id;
- enum HFI_ERROR error_type;
+ u32 error_type;
};
struct hfi_msg_session_resume_done_packet {
u32 size;
- enum HFI_MESSAGE packet_type;
+ u32 packet_type;
u32 session_id;
- enum HFI_ERROR error_type;
+ u32 error_type;
+};
+
+struct hfi_msg_session_flush_done_packet {
+ u32 size;
+ u32 packet_type;
+ u32 session_id;
+ u32 error_type;
+ u32 flush_type;
};
struct hfi_msg_session_empty_buffer_done_packet {
u32 size;
- enum HFI_MESSAGE packet_type;
+ u32 packet_type;
u32 session_id;
- enum HFI_ERROR error_type;
+ u32 error_type;
u32 offset;
u32 filled_len;
u32 input_tag;
u8 *packet_buffer;
+ u8 *extra_data_buffer;
};
struct hfi_msg_session_fill_buffer_done_compressed_packet {
u32 size;
- enum HFI_MESSAGE packet_type;
+ u32 packet_type;
u32 session_id;
- u32 timestamp_hi;
- u32 timestamp_lo;
- enum HFI_ERROR error_type;
+ u32 time_stamp_hi;
+ u32 time_stamp_lo;
+ u32 error_type;
u32 flags;
u32 mark_target;
u32 mark_data;
@@ -1358,34 +627,36 @@
u32 alloc_len;
u32 filled_len;
u32 input_tag;
- enum HFI_PICTURE picture_type;
+ u32 output_tag;
+ u32 picture_type;
u8 *packet_buffer;
u8 *extra_data_buffer;
};
struct hfi_msg_session_fbd_uncompressed_plane0_packet {
u32 size;
- enum HFI_MESSAGE packet_type;
+ u32 packet_type;
u32 session_id;
u32 stream_id;
u32 view_id;
- enum HFI_ERROR error_type;
- u32 timestamp_hi;
- u32 timestamp_lo;
+ u32 error_type;
+ u32 time_stamp_hi;
+ u32 time_stamp_lo;
u32 flags;
u32 mark_target;
u32 mark_data;
u32 stats;
u32 alloc_len;
u32 filled_len;
- u32 oofset;
+ u32 offset;
u32 frame_width;
u32 frame_height;
- u32 start_xCoord;
- u32 start_yCoord;
+ u32 start_x_coord;
+ u32 start_y_coord;
u32 input_tag;
- u32 input_tag1;
- enum HFI_PICTURE picture_type;
+ u32 input_tag2;
+ u32 output_tag;
+ u32 picture_type;
u8 *packet_buffer;
u8 *extra_data_buffer;
};
@@ -1395,7 +666,7 @@
u32 alloc_len;
u32 filled_len;
u32 offset;
- u8 *packet_buffer;
+ u8 *packet_buffer2;
};
struct hfi_msg_session_fill_buffer_done_uncompressed_plane2_packet {
@@ -1403,38 +674,21 @@
u32 alloc_len;
u32 filled_len;
u32 offset;
- u8 *packet_buffer;
-};
-
-struct hfi_msg_session_flush_done_packet {
- u32 size;
- enum HFI_MESSAGE packet_type;
- u32 session_id;
- enum HFI_ERROR error_type;
- enum HFI_FLUSH flush_type;
+ u8 *packet_buffer3;
};
struct hfi_msg_session_parse_sequence_header_done_packet {
u32 size;
- enum HFI_MESSAGE packet_type;
+ u32 packet_type;
u32 session_id;
- enum HFI_ERROR error_type;
+ u32 error_type;
u32 num_properties;
u32 rg_property_data[1];
};
-struct hfi_msg_session_get_sequence_header_done_packet {
- u32 size;
- enum HFI_MESSAGE packet_type;
- u32 session_id;
- enum HFI_ERROR error_type;
- u32 header_len;
- u8 *sequence_header;
-};
-
struct hfi_msg_session_property_info_packet {
u32 size;
- enum HFI_MESSAGE packet_type;
+ u32 packet_type;
u32 session_id;
u32 num_properties;
u32 rg_property_data[1];
@@ -1442,9 +696,9 @@
struct hfi_msg_session_release_resources_done_packet {
u32 size;
- enum HFI_MESSAGE packet_type;
+ u32 packet_type;
u32 session_id;
- enum HFI_ERROR error_type;
+ u32 error_type;
};
struct hfi_extradata_mb_quantization_payload {
@@ -1453,7 +707,7 @@
struct hfi_extradata_vc1_pswnd {
u32 ps_wnd_h_offset;
- u32 ps_wndv_offset;
+ u32 ps_wnd_v_offset;
u32 ps_wnd_width;
u32 ps_wnd_height;
};
@@ -1481,12 +735,8 @@
};
struct hfi_extradata_timestamp_payload {
- u32 timestamp_low;
- u32 timestamp_high;
-};
-
-struct hfi_extradata_interlace_video_payload {
- enum HFI_INTERLACE_FORMAT format;
+ u32 time_stamp_low;
+ u32 time_stamp_high;
};
enum HFI_S3D_FP_LAYOUT {
@@ -1496,14 +746,14 @@
HFI_S3D_FP_LAYOUT_INTRLV_ROW,
HFI_S3D_FP_LAYOUT_SIDEBYSIDE,
HFI_S3D_FP_LAYOUT_TOPBOTTOM,
- HFI_S3D_FP_LAYOUT_UNUSED = 0x10000000,
+ HFI_S3D_FP_LAYOUT_UNUSED = 0x10000000
};
enum HFI_S3D_FP_VIEW_ORDER {
HFI_S3D_FP_LEFTVIEW_FIRST,
HFI_S3D_FP_RIGHTVIEW_FIRST,
HFI_S3D_FP_UNKNOWN,
- HFI_S3D_FP_VIEWORDER_UNUSED = 0x10000000,
+ HFI_S3D_FP_VIEWORDER_UNUSED = 0x10000000
};
enum HFI_S3D_FP_FLIP {
@@ -1512,18 +762,22 @@
HFI_S3D_FP_FLIP_LEFT_VERT,
HFI_S3D_FP_FLIP_RIGHT_HORIZ,
HFI_S3D_FP_FLIP_RIGHT_VERT,
- HFI_S3D_FP_FLIP_UNUSED = 0x10000000,
+ HFI_S3D_FP_FLIP_UNUSED = 0x10000000
};
struct hfi_extradata_s3d_frame_packing_payload {
- enum HFI_S3D_FP_LAYOUT eLayout;
- enum HFI_S3D_FP_VIEW_ORDER eOrder;
- enum HFI_S3D_FP_FLIP eFlip;
- int bQuinCunx;
- u32 nLeftViewLumaSiteX;
- u32 nLeftViewLumaSiteY;
- u32 nRightViewLumaSiteX;
- u32 nRightViewLumaSiteY;
+ enum HFI_S3D_FP_LAYOUT layout;
+ enum HFI_S3D_FP_VIEW_ORDER order;
+ enum HFI_S3D_FP_FLIP flip;
+ int quin_cunx;
+ u32 left_view_luma_site_x;
+ u32 left_view_luma_site_y;
+ u32 right_view_luma_site_x;
+ u32 right_view_luma_site_y;
+};
+
+struct hfi_extradata_interlace_video_payload {
+ u32 format;
};
struct hfi_extradata_num_concealed_mb_payload {
@@ -1615,4 +869,4 @@
/* Interrupt Processing:*/
void vidc_hal_response_handler(struct hal_device *device);
-#endif /*__VIDC_HAL_H__ */
+#endif
diff --git a/drivers/media/video/msm_vidc/vidc_hal_api.h b/drivers/media/video/msm_vidc/vidc_hal_api.h
index 036091b..b3ea92a 100644
--- a/drivers/media/video/msm_vidc/vidc_hal_api.h
+++ b/drivers/media/video/msm_vidc/vidc_hal_api.h
@@ -40,6 +40,12 @@
#define HAL_BUFFERFLAG_READONLY 0x00000200
#define HAL_BUFFERFLAG_ENDOFSUBFRAME 0x00000400
+#define HAL_DEBUG_MSG_LOW 0x00000001
+#define HAL_DEBUG_MSG_MEDIUM 0x00000002
+#define HAL_DEBUG_MSG_HIGH 0x00000004
+#define HAL_DEBUG_MSG_ERROR 0x00000008
+#define HAL_DEBUG_MSG_FATAL 0x00000010
+
enum vidc_status {
VIDC_ERR_NONE = 0x0,
VIDC_ERR_FAIL = 0x80000000,
@@ -242,11 +248,12 @@
enum hal_h264_profile {
HAL_H264_PROFILE_BASELINE = 0x00000001,
HAL_H264_PROFILE_MAIN = 0x00000002,
- HAL_H264_PROFILE_EXTENDED = 0x00000004,
- HAL_H264_PROFILE_HIGH = 0x00000008,
+ HAL_H264_PROFILE_HIGH = 0x00000004,
+ HAL_H264_PROFILE_EXTENDED = 0x00000008,
HAL_H264_PROFILE_HIGH10 = 0x00000010,
HAL_H264_PROFILE_HIGH422 = 0x00000020,
HAL_H264_PROFILE_HIGH444 = 0x00000040,
+ HAL_H264_PROFILE_CONSTRAINED_HIGH = 0x00000080,
HAL_UNUSED_H264_PROFILE = 0x10000000,
};
@@ -541,7 +548,7 @@
struct hal_h264_db_control {
enum hal_h264_db_mode mode;
int slice_alpha_offset;
- int slicebeta_offset;
+ int slice_beta_offset;
};
struct hal_temporal_spatial_tradeoff {
diff --git a/drivers/media/video/msm_vidc/vidc_hal_helper.h b/drivers/media/video/msm_vidc/vidc_hal_helper.h
new file mode 100644
index 0000000..d4e2619
--- /dev/null
+++ b/drivers/media/video/msm_vidc/vidc_hal_helper.h
@@ -0,0 +1,832 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __H_VIDC_HAL_HELPER_H__
+#define __H_VIDC_HAL_HELPER_H__
+
+#define HFI_NV12_IL_CALC_Y_STRIDE(stride, frame_width, stride_multiple) \
+ { stride = (frame_width + stride_multiple - 1) & \
+ (0xffffffff - (stride_multiple - 1))}
+
+#define HFI_NV12_IL_CALC_Y_BUFHEIGHT(buf_height, frame_height,\
+ min_buf_height_multiple) \
+ { buf_height = (frame_height + min_buf_height_multiple - 1) & \
+ (0xffffffff - (min_buf_height_multiple - 1)) }
+
+#define HFI_NV12_IL_CALC_UV_STRIDE(stride, frame_width, stride_multiple) \
+ { stride = ((((frame_width + 1) >> 1) + stride_multiple - 1) & \
+ (0xffffffff - (stride_multiple - 1))) << 1 }
+
+#define HFI_NV12_IL_CALC_UV_BUFHEIGHT(buf_height, frame_height,\
+ min_buf_height_multiple) \
+ { buf_height = ((((frame_height + 1) >> 1) + \
+ min_buf_height_multiple - 1) & (0xffffffff - \
+ (min_buf_height_multiple - 1))) }
+
+#define HFI_NV12_IL_CALC_BUF_SIZE(buf_size, y_buf_size, y_stride, \
+ y_buf_height, uv_buf_size, uv_stride, uv_buf_height, uv_alignment) \
+ { y_buf_size = (y_stride * y_buf_height); \
+ uv_buf_size = (uv_stride * uv_buf_height) + uv_alignment; \
+ buf_size = y_buf_size + uv_buf_size }
+
+#define HFI_YUYV_CALC_STRIDE(stride, frame_width, stride_multiple) \
+ { stride = ((frame_width << 1) + stride_multiple - 1) & \
+ (0xffffffff - (stride_multiple - 1)) }
+
+#define HFI_YUYV_CALC_BUFHEIGHT(buf_height, frame_height,\
+ min_buf_height_multiple) \
+ { buf_height = ((frame_height + min_buf_height_multiple - 1) & \
+ (0xffffffff - (min_buf_height_multiple - 1))) }
+
+#define HFI_YUYV_CALC_BUF_SIZE(buf_size, stride, buf_height) \
+ { buf_size = stride * buf_height }
+
+#define HFI_RGB888_CALC_STRIDE(stride, frame_width, stride_multiple) \
+ { stride = ((frame_width * 3) + stride_multiple - 1) & \
+ (0xffffffff - (stride_multiple - 1)) }
+
+#define HFI_RGB888_CALC_BUFHEIGHT(buf_height, frame_height,\
+ min_buf_height_multiple) \
+ { buf_height = ((frame_height + min_buf_height_multiple - 1) & \
+ (0xffffffff - (min_buf_height_multiple - 1))) }
+
+#define HFI_RGB888_CALC_BUF_SIZE(buf_size, stride, buf_height) \
+ { buf_size = (stride * buf_height) }
+
+#define HFI_COMMON_BASE (0)
+#define HFI_OX_BASE (0x01000000)
+
+#define HFI_VIDEO_DOMAIN_ENCODER (HFI_COMMON_BASE + 0x1)
+#define HFI_VIDEO_DOMAIN_DECODER (HFI_COMMON_BASE + 0x2)
+#define HFI_VIDEO_DOMAIN_VPE (HFI_COMMON_BASE + 0x3)
+#define HFI_VIDEO_DOMAIN_MBI (HFI_COMMON_BASE + 0x4)
+
+#define HFI_DOMAIN_BASE_COMMON (HFI_COMMON_BASE + 0)
+#define HFI_DOMAIN_BASE_VDEC (HFI_COMMON_BASE + 0x01000000)
+#define HFI_DOMAIN_BASE_VENC (HFI_COMMON_BASE + 0x02000000)
+#define HFI_DOMAIN_BASE_VPE (HFI_COMMON_BASE + 0x03000000)
+
+#define HFI_VIDEO_ARCH_OX (HFI_COMMON_BASE + 0x1)
+
+#define HFI_ARCH_COMMON_OFFSET (0)
+#define HFI_ARCH_OX_OFFSET (0x00200000)
+
+#define HFI_ERR_NONE HFI_COMMON_BASE
+#define HFI_ERR_SYS_FATAL (HFI_COMMON_BASE + 0x1)
+#define HFI_ERR_SYS_INVALID_PARAMETER (HFI_COMMON_BASE + 0x2)
+#define HFI_ERR_SYS_VERSION_MISMATCH (HFI_COMMON_BASE + 0x3)
+#define HFI_ERR_SYS_INSUFFICIENT_RESOURCES (HFI_COMMON_BASE + 0x4)
+#define HFI_ERR_SYS_MAX_SESSIONS_REACHED (HFI_COMMON_BASE + 0x5)
+#define HFI_ERR_SYS_UNSUPPORTED_CODEC (HFI_COMMON_BASE + 0x6)
+#define HFI_ERR_SYS_SESSION_IN_USE (HFI_COMMON_BASE + 0x7)
+#define HFI_ERR_SYS_SESSION_ID_OUT_OF_RANGE (HFI_COMMON_BASE + 0x8)
+#define HFI_ERR_SYS_UNSUPPORTED_DOMAIN (HFI_COMMON_BASE + 0x9)
+
+#define HFI_ERR_SESSION_FATAL (HFI_COMMON_BASE + 0x1001)
+#define HFI_ERR_SESSION_INVALID_PARAMETER (HFI_COMMON_BASE + 0x1002)
+#define HFI_ERR_SESSION_BAD_POINTER (HFI_COMMON_BASE + 0x1003)
+#define HFI_ERR_SESSION_INVALID_SESSION_ID (HFI_COMMON_BASE + 0x1004)
+#define HFI_ERR_SESSION_INVALID_STREAM_ID (HFI_COMMON_BASE + 0x1005)
+#define HFI_ERR_SESSION_INCORRECT_STATE_OPERATION \
+ (HFI_COMMON_BASE + 0x1006)
+#define HFI_ERR_SESSION_UNSUPPORTED_PROPERTY (HFI_COMMON_BASE + 0x1007)
+
+#define HFI_ERR_SESSION_UNSUPPORTED_SETTING (HFI_COMMON_BASE + 0x1008)
+
+#define HFI_ERR_SESSION_INSUFFICIENT_RESOURCES (HFI_COMMON_BASE + 0x1009)
+
+#define HFI_ERR_SESSION_STREAM_CORRUPT_OUTPUT_STALLED \
+ (HFI_COMMON_BASE + 0x100A)
+
+#define HFI_ERR_SESSION_STREAM_CORRUPT (HFI_COMMON_BASE + 0x100B)
+#define HFI_ERR_SESSION_ENC_OVERFLOW (HFI_COMMON_BASE + 0x100C)
+
+#define HFI_EVENT_SYS_ERROR (HFI_COMMON_BASE + 0x1)
+#define HFI_EVENT_SESSION_ERROR (HFI_COMMON_BASE + 0x2)
+
+#define HFI_VIDEO_CODEC_H264 0x00000002
+#define HFI_VIDEO_CODEC_H263 0x00000004
+#define HFI_VIDEO_CODEC_MPEG1 0x00000008
+#define HFI_VIDEO_CODEC_MPEG2 0x00000010
+#define HFI_VIDEO_CODEC_MPEG4 0x00000020
+#define HFI_VIDEO_CODEC_DIVX_311 0x00000040
+#define HFI_VIDEO_CODEC_DIVX 0x00000080
+#define HFI_VIDEO_CODEC_VC1 0x00000100
+#define HFI_VIDEO_CODEC_SPARK 0x00000200
+#define HFI_VIDEO_CODEC_VP8 0x00001000
+
+#define HFI_H264_PROFILE_BASELINE 0x00000001
+#define HFI_H264_PROFILE_MAIN 0x00000002
+#define HFI_H264_PROFILE_HIGH 0x00000004
+#define HFI_H264_PROFILE_STEREO_HIGH 0x00000008
+#define HFI_H264_PROFILE_MULTIVIEW_HIGH 0x00000010
+#define HFI_H264_PROFILE_CONSTRAINED_HIGH 0x00000020
+
+#define HFI_H264_LEVEL_1 0x00000001
+#define HFI_H264_LEVEL_1b 0x00000002
+#define HFI_H264_LEVEL_11 0x00000004
+#define HFI_H264_LEVEL_12 0x00000008
+#define HFI_H264_LEVEL_13 0x00000010
+#define HFI_H264_LEVEL_2 0x00000020
+#define HFI_H264_LEVEL_21 0x00000040
+#define HFI_H264_LEVEL_22 0x00000080
+#define HFI_H264_LEVEL_3 0x00000100
+#define HFI_H264_LEVEL_31 0x00000200
+#define HFI_H264_LEVEL_32 0x00000400
+#define HFI_H264_LEVEL_4 0x00000800
+#define HFI_H264_LEVEL_41 0x00001000
+#define HFI_H264_LEVEL_42 0x00002000
+#define HFI_H264_LEVEL_5 0x00004000
+#define HFI_H264_LEVEL_51 0x00008000
+
+#define HFI_H263_PROFILE_BASELINE 0x00000001
+
+#define HFI_H263_LEVEL_10 0x00000001
+#define HFI_H263_LEVEL_20 0x00000002
+#define HFI_H263_LEVEL_30 0x00000004
+#define HFI_H263_LEVEL_40 0x00000008
+#define HFI_H263_LEVEL_45 0x00000010
+#define HFI_H263_LEVEL_50 0x00000020
+#define HFI_H263_LEVEL_60 0x00000040
+#define HFI_H263_LEVEL_70 0x00000080
+
+#define HFI_MPEG2_PROFILE_SIMPLE 0x00000001
+#define HFI_MPEG2_PROFILE_MAIN 0x00000002
+#define HFI_MPEG2_PROFILE_422 0x00000004
+#define HFI_MPEG2_PROFILE_SNR 0x00000008
+#define HFI_MPEG2_PROFILE_SPATIAL 0x00000010
+#define HFI_MPEG2_PROFILE_HIGH 0x00000020
+
+#define HFI_MPEG2_LEVEL_LL 0x00000001
+#define HFI_MPEG2_LEVEL_ML 0x00000002
+#define HFI_MPEG2_LEVEL_H14 0x00000004
+#define HFI_MPEG2_LEVEL_HL 0x00000008
+
+#define HFI_MPEG4_PROFILE_SIMPLE 0x00000001
+#define HFI_MPEG4_PROFILE_ADVANCEDSIMPLE 0x00000002
+
+#define HFI_MPEG4_LEVEL_0 0x00000001
+#define HFI_MPEG4_LEVEL_0b 0x00000002
+#define HFI_MPEG4_LEVEL_1 0x00000004
+#define HFI_MPEG4_LEVEL_2 0x00000008
+#define HFI_MPEG4_LEVEL_3 0x00000010
+#define HFI_MPEG4_LEVEL_4 0x00000020
+#define HFI_MPEG4_LEVEL_4a 0x00000040
+#define HFI_MPEG4_LEVEL_5 0x00000080
+#define HFI_MPEG4_LEVEL_6 0x00000100
+#define HFI_MPEG4_LEVEL_7 0x00000200
+#define HFI_MPEG4_LEVEL_8 0x00000400
+#define HFI_MPEG4_LEVEL_9 0x00000800
+#define HFI_MPEG4_LEVEL_3b 0x00001000
+
+#define HFI_VC1_PROFILE_SIMPLE 0x00000001
+#define HFI_VC1_PROFILE_MAIN 0x00000002
+#define HFI_VC1_PROFILE_ADVANCED 0x00000004
+
+#define HFI_VC1_LEVEL_LOW 0x00000001
+#define HFI_VC1_LEVEL_MEDIUM 0x00000002
+#define HFI_VC1_LEVEL_HIGH 0x00000004
+#define HFI_VC1_LEVEL_0 0x00000008
+#define HFI_VC1_LEVEL_1 0x00000010
+#define HFI_VC1_LEVEL_2 0x00000020
+#define HFI_VC1_LEVEL_3 0x00000040
+#define HFI_VC1_LEVEL_4 0x00000080
+
+#define HFI_VPX_PROFILE_SIMPLE 0x00000001
+#define HFI_VPX_PROFILE_ADVANCED 0x00000002
+#define HFI_VPX_PROFILE_VERSION_0 0x00000004
+#define HFI_VPX_PROFILE_VERSION_1 0x00000008
+#define HFI_VPX_PROFILE_VERSION_2 0x00000010
+#define HFI_VPX_PROFILE_VERSION_3 0x00000020
+
+#define HFI_DIVX_FORMAT_4 (HFI_COMMON_BASE + 0x1)
+#define HFI_DIVX_FORMAT_5 (HFI_COMMON_BASE + 0x2)
+#define HFI_DIVX_FORMAT_6 (HFI_COMMON_BASE + 0x3)
+
+#define HFI_DIVX_PROFILE_QMOBILE 0x00000001
+#define HFI_DIVX_PROFILE_MOBILE 0x00000002
+#define HFI_DIVX_PROFILE_MT 0x00000004
+#define HFI_DIVX_PROFILE_HT 0x00000008
+#define HFI_DIVX_PROFILE_HD 0x00000010
+
+#define HFI_BUFFER_INPUT (HFI_COMMON_BASE + 0x1)
+#define HFI_BUFFER_OUTPUT (HFI_COMMON_BASE + 0x2)
+#define HFI_BUFFER_OUTPUT2 (HFI_COMMON_BASE + 0x3)
+#define HFI_BUFFER_INTERNAL_PERSIST (HFI_COMMON_BASE + 0x4)
+
+struct hfi_buffer_info {
+ u32 buffer_addr;
+ u32 extra_data_addr;
+};
+
+#define HFI_PROPERTY_SYS_COMMON_START \
+ (HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + 0x0000)
+#define HFI_PROPERTY_SYS_DEBUG_CONFIG \
+ (HFI_PROPERTY_SYS_COMMON_START + 0x001)
+#define HFI_PROPERTY_SYS_RESOURCE_OCMEM_REQUIREMENT_INFO \
+(HFI_PROPERTY_SYS_COMMON_START + 0x002)
+#define HFI_PROPERTY_PARAM_COMMON_START \
+ (HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + 0x1000)
+#define HFI_PROPERTY_PARAM_FRAME_SIZE \
+ (HFI_PROPERTY_PARAM_COMMON_START + 0x001)
+#define HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_INFO \
+ (HFI_PROPERTY_PARAM_COMMON_START + 0x002)
+#define HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SELECT \
+ (HFI_PROPERTY_PARAM_COMMON_START + 0x003)
+#define HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SUPPORTED \
+ (HFI_PROPERTY_PARAM_COMMON_START + 0x004)
+#define HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT \
+ (HFI_PROPERTY_PARAM_COMMON_START + 0x005)
+#define HFI_PROPERTY_PARAM_PROFILE_LEVEL_SUPPORTED \
+ (HFI_PROPERTY_PARAM_COMMON_START + 0x006)
+#define HFI_PROPERTY_PARAM_CAPABILITY_SUPPORTED \
+ (HFI_PROPERTY_PARAM_COMMON_START + 0x007)
+#define HFI_PROPERTY_PARAM_PROPERTIES_SUPPORTED \
+ (HFI_PROPERTY_PARAM_COMMON_START + 0x008)
+#define HFI_PROPERTY_PARAM_CODEC_SUPPORTED \
+ (HFI_PROPERTY_PARAM_COMMON_START + 0x009)
+#define HFI_PROPERTY_PARAM_NAL_STREAM_FORMAT_SUPPORTED \
+ (HFI_PROPERTY_PARAM_COMMON_START + 0x00A)
+#define HFI_PROPERTY_PARAM_NAL_STREAM_FORMAT_SELECT \
+ (HFI_PROPERTY_PARAM_COMMON_START + 0x00B)
+#define HFI_PROPERTY_PARAM_MULTI_VIEW_FORMAT \
+ (HFI_PROPERTY_PARAM_COMMON_START + 0x00C)
+
+#define HFI_PROPERTY_CONFIG_COMMON_START \
+ (HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + 0x2000)
+#define HFI_PROPERTY_CONFIG_FRAME_RATE \
+ (HFI_PROPERTY_CONFIG_COMMON_START + 0x001)
+
+#define HFI_PROPERTY_PARAM_VDEC_COMMON_START \
+ (HFI_DOMAIN_BASE_VDEC + HFI_ARCH_COMMON_OFFSET + 0x3000)
+#define HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM \
+ (HFI_PROPERTY_PARAM_VDEC_COMMON_START + 0x001)
+
+#define HFI_PROPERTY_CONFIG_VDEC_COMMON_START \
+ (HFI_DOMAIN_BASE_VDEC + HFI_ARCH_COMMON_OFFSET + 0x4000)
+
+#define HFI_PROPERTY_PARAM_VENC_COMMON_START \
+ (HFI_DOMAIN_BASE_VENC + HFI_ARCH_COMMON_OFFSET + 0x5000)
+#define HFI_PROPERTY_PARAM_VENC_SLICE_DELIVERY_MODE \
+ (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x001)
+#define HFI_PROPERTY_PARAM_VENC_H264_ENTROPY_CONTROL \
+ (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x002)
+#define HFI_PROPERTY_PARAM_VENC_H264_DEBLOCK_CONTROL \
+ (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x003)
+#define HFI_PROPERTY_PARAM_VENC_RATE_CONTROL \
+ (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x004)
+#define HFI_PROPERTY_PARAM_VENC_TEMPORAL_SPATIAL_TRADEOFF \
+ (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x005)
+#define HFI_PROPERTY_PARAM_VENC_QUALITY_VS_SPEED \
+ (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x010)
+#define HFI_PROPERTY_PARAM_VENC_SESSION_QP \
+ (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x006)
+#define HFI_PROPERTY_PARAM_VENC_MPEG4_AC_PREDICTION \
+ (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x007)
+#define HFI_PROPERTY_PARAM_VENC_MPEG4_DATA_PARTITIONING \
+ (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x008)
+#define HFI_PROPERTY_PARAM_VENC_MPEG4_TIME_RESOLUTION \
+ (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x009)
+#define HFI_PROPERTY_PARAM_VENC_MPEG4_SHORT_HEADER \
+ (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x00A)
+#define HFI_PROPERTY_PARAM_VENC_MPEG4_HEADER_EXTENSION \
+ (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x00B)
+#define HFI_PROPERTY_PARAM_VENC_MULTI_SLICE_INFO \
+ (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x00C)
+#define HFI_PROPERTY_PARAM_VENC_INTRA_REFRESH \
+ (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x00D)
+#define HFI_PROPERTY_PARAM_VENC_MULTI_SLICE_CONTROL \
+ (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x00E)
+#define HFI_PROPERTY_PARAM_VENC_VBVBUFFER_SIZE \
+ (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x00F)
+#define HFI_PROPERTY_PARAM_VENC_MPEG4_QPEL \
+ (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x011)
+#define HFI_PROPERTY_PARAM_VENC_ADVANCED \
+ (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x012)
+#define HFI_PROPERTY_PARAM_VENC_SYNC_FRAME_SEQUENCE_HEADER \
+ (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x013)
+#define HFI_PROPERTY_PARAM_VENC_H264_IDR_S3D_FRAME_PACKING_NAL \
+ (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x014)
+
+#define HFI_PROPERTY_CONFIG_VENC_COMMON_START \
+ (HFI_DOMAIN_BASE_VENC + HFI_ARCH_COMMON_OFFSET + 0x6000)
+#define HFI_PROPERTY_CONFIG_VENC_TARGET_BITRATE \
+ (HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x001)
+#define HFI_PROPERTY_CONFIG_VENC_IDR_PERIOD \
+ (HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x002)
+#define HFI_PROPERTY_CONFIG_VENC_INTRA_PERIOD \
+ (HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x003)
+#define HFI_PROPERTY_CONFIG_VENC_REQUEST_SYNC_FRAME \
+ (HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x004)
+#define HFI_PROPERTY_CONFIG_VENC_TIMESTAMP_SCALE \
+ (HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x005)
+#define HFI_PROPERTY_CONFIG_VENC_FRAME_QP \
+ (HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x006)
+#define HFI_PROPERTY_CONFIG_VENC_MAX_BITRATE \
+ (HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x007)
+
+#define HFI_PROPERTY_PARAM_VPE_COMMON_START \
+ (HFI_DOMAIN_BASE_VPE + HFI_ARCH_COMMON_OFFSET + 0x7000)
+
+#define HFI_PROPERTY_CONFIG_VPE_COMMON_START \
+ (HFI_DOMAIN_BASE_VPE + HFI_ARCH_COMMON_OFFSET + 0x8000)
+#define HFI_PROPERTY_CONFIG_VPE_DEINTERLACE \
+ (HFI_PROPERTY_CONFIG_VPE_COMMON_START + 0x001)
+#define HFI_PROPERTY_CONFIG_VPE_OPERATIONS \
+ (HFI_PROPERTY_CONFIG_VPE_COMMON_START + 0x002)
+
+struct hfi_bitrate {
+ u32 bit_rate;
+};
+
+#define HFI_CAPABILITY_FRAME_WIDTH (HFI_COMMON_BASE + 0x1)
+#define HFI_CAPABILITY_FRAME_HEIGHT (HFI_COMMON_BASE + 0x2)
+#define HFI_CAPABILITY_MBS_PER_FRAME (HFI_COMMON_BASE + 0x3)
+#define HFI_CAPABILITY_MBS_PER_SECOND (HFI_COMMON_BASE + 0x4)
+#define HFI_CAPABILITY_FRAMERATE (HFI_COMMON_BASE + 0x5)
+#define HFI_CAPABILITY_SCALE_X (HFI_COMMON_BASE + 0x6)
+#define HFI_CAPABILITY_SCALE_Y (HFI_COMMON_BASE + 0x7)
+#define HFI_CAPABILITY_BITRATE (HFI_COMMON_BASE + 0x8)
+
+struct hfi_capability_supported {
+ u32 capability_type;
+ u32 min;
+ u32 max;
+ u32 step_size;
+};
+
+struct hfi_capability_supported_info {
+ u32 num_capabilities;
+ struct hfi_capability_supported rg_data[1];
+};
+
+#define HFI_DEBUG_MSG_LOW 0x00000001
+#define HFI_DEBUG_MSG_MEDIUM 0x00000002
+#define HFI_DEBUG_MSG_HIGH 0x00000004
+#define HFI_DEBUG_MSG_ERROR 0x00000008
+#define HFI_DEBUG_MSG_FATAL 0x00000010
+
+struct hfi_debug_config {
+ u32 debug_config;
+};
+
+struct hfi_enable {
+ int enable;
+};
+
+#define HFI_H264_DB_MODE_DISABLE (HFI_COMMON_BASE + 0x1)
+#define HFI_H264_DB_MODE_SKIP_SLICE_BOUNDARY (HFI_COMMON_BASE + 0x2)
+#define HFI_H264_DB_MODE_ALL_BOUNDARY (HFI_COMMON_BASE + 0x3)
+
+struct hfi_h264_db_control {
+ u32 mode;
+ int slice_alpha_offset;
+ int slice_beta_offset;
+};
+
+#define HFI_H264_ENTROPY_CAVLC (HFI_COMMON_BASE + 0x1)
+#define HFI_H264_ENTROPY_CABAC (HFI_COMMON_BASE + 0x2)
+
+#define HFI_H264_CABAC_MODEL_0 (HFI_COMMON_BASE + 0x1)
+#define HFI_H264_CABAC_MODEL_1 (HFI_COMMON_BASE + 0x2)
+#define HFI_H264_CABAC_MODEL_2 (HFI_COMMON_BASE + 0x3)
+
+struct hfi_h264_entropy_control {
+ u32 entropy_mode;
+ u32 cabac_model;
+};
+
+struct hfi_frame_rate {
+ u32 buffer_type;
+ u32 frame_rate;
+};
+
+#define HFI_INTRA_REFRESH_NONE (HFI_COMMON_BASE + 0x1)
+#define HFI_INTRA_REFRESH_CYCLIC (HFI_COMMON_BASE + 0x2)
+#define HFI_INTRA_REFRESH_ADAPTIVE (HFI_COMMON_BASE + 0x3)
+#define HFI_INTRA_REFRESH_CYCLIC_ADAPTIVE (HFI_COMMON_BASE + 0x4)
+#define HFI_INTRA_REFRESH_RANDOM (HFI_COMMON_BASE + 0x5)
+
+struct hfi_intra_refresh {
+ u32 mode;
+ u32 air_mbs;
+ u32 air_ref;
+ u32 cir_mbs;
+};
+
+struct hfi_idr_period {
+ u32 idr_period;
+};
+
+struct hfi_intra_period {
+ u32 pframes;
+ u32 bframes;
+};
+
+struct hfi_timestamp_scale {
+ u32 time_stamp_scale;
+};
+
+struct hfi_mpeg4_header_extension {
+ u32 header_extension;
+};
+
+struct hfi_mpeg4_time_resolution {
+ u32 time_increment_resolution;
+};
+
+struct hfi_multi_stream {
+ u32 buffer_type;
+ u32 enable;
+ u32 width;
+ u32 height;
+};
+
+struct hfi_multi_view_format {
+ u32 views;
+ u32 rg_view_order[1];
+};
+
+#define HFI_MULTI_SLICE_OFF (HFI_COMMON_BASE + 0x1)
+#define HFI_MULTI_SLICE_BY_MB_COUNT (HFI_COMMON_BASE + 0x2)
+#define HFI_MULTI_SLICE_BY_BYTE_COUNT (HFI_COMMON_BASE + 0x3)
+#define HFI_MULTI_SLICE_GOB (HFI_COMMON_BASE + 0x4)
+
+struct hfi_multi_slice_control {
+ u32 multi_slice;
+ u32 slice_size;
+};
+
+#define HFI_NAL_FORMAT_STARTCODES 0x00000001
+#define HFI_NAL_FORMAT_ONE_NAL_PER_BUFFER 0x00000002
+#define HFI_NAL_FORMAT_ONE_BYTE_LENGTH 0x00000004
+#define HFI_NAL_FORMAT_TWO_BYTE_LENGTH 0x00000008
+#define HFI_NAL_FORMAT_FOUR_BYTE_LENGTH 0x00000010
+
+struct hfi_nal_stream_format_supported {
+ u32 nal_stream_format_supported;
+};
+
+#define HFI_PICTURE_TYPE_I 0x01
+#define HFI_PICTURE_TYPE_P 0x02
+#define HFI_PICTURE_TYPE_B 0x04
+#define HFI_PICTURE_TYPE_IDR 0x08
+
+struct hfi_profile_level {
+ u32 profile;
+ u32 level;
+};
+
+struct hfi_profile_level_supported {
+ u32 profile_count;
+ struct hfi_profile_level rg_profile_level[1];
+};
+
+struct hfi_quantization {
+ u32 qp_i;
+ u32 qp_p;
+ u32 qp_b;
+ u32 layer_id;
+};
+
+struct hfi_temporal_spatial_tradeoff {
+ u32 ts_factor;
+};
+
+struct hfi_frame_size {
+ u32 buffer_type;
+ u32 width;
+ u32 height;
+};
+
+#define HFI_COLOR_FORMAT_MONOCHROME (HFI_COMMON_BASE + 0x1)
+#define HFI_COLOR_FORMAT_NV12 (HFI_COMMON_BASE + 0x2)
+#define HFI_COLOR_FORMAT_NV21 (HFI_COMMON_BASE + 0x3)
+#define HFI_COLOR_FORMAT_NV12_4x4TILE (HFI_COMMON_BASE + 0x4)
+#define HFI_COLOR_FORMAT_NV21_4x4TILE (HFI_COMMON_BASE + 0x5)
+#define HFI_COLOR_FORMAT_YUYV (HFI_COMMON_BASE + 0x6)
+#define HFI_COLOR_FORMAT_YVYU (HFI_COMMON_BASE + 0x7)
+#define HFI_COLOR_FORMAT_UYVY (HFI_COMMON_BASE + 0x8)
+#define HFI_COLOR_FORMAT_VYUY (HFI_COMMON_BASE + 0x9)
+#define HFI_COLOR_FORMAT_RGB565 (HFI_COMMON_BASE + 0xA)
+#define HFI_COLOR_FORMAT_BGR565 (HFI_COMMON_BASE + 0xB)
+#define HFI_COLOR_FORMAT_RGB888 (HFI_COMMON_BASE + 0xC)
+#define HFI_COLOR_FORMAT_BGR888 (HFI_COMMON_BASE + 0xD)
+
+struct hfi_uncompressed_format_select {
+ u32 buffer_type;
+ u32 format;
+};
+
+struct hfi_uncompressed_format_supported {
+ u32 buffer_type;
+ u32 format_entries;
+ u32 rg_format_info[1];
+};
+
+struct hfi_uncompressed_plane_actual {
+ int actual_stride;
+ u32 actual_plane_buffer_height;
+};
+
+struct hfi_uncompressed_plane_actual_info {
+ u32 buffer_type;
+ u32 num_planes;
+ struct hfi_uncompressed_plane_actual rg_plane_format[1];
+};
+
+struct hfi_uncompressed_plane_constraints {
+ u32 stride_multiples;
+ u32 max_stride;
+ u32 min_plane_buffer_height_multiple;
+ u32 buffer_alignment;
+};
+
+struct hfi_uncompressed_plane_info {
+ u32 format;
+ u32 num_planes;
+ struct hfi_uncompressed_plane_constraints rg_plane_format[1];
+};
+
+struct hfi_codec_supported {
+ u32 decoder_codec_supported;
+ u32 encoder_codec_supported;
+};
+
+struct hfi_properties_supported {
+ u32 num_properties;
+ u32 rg_properties[1];
+};
+
+#define HFI_ROTATE_NONE (HFI_COMMON_BASE + 0x1)
+#define HFI_ROTATE_90 (HFI_COMMON_BASE + 0x2)
+#define HFI_ROTATE_180 (HFI_COMMON_BASE + 0x3)
+#define HFI_ROTATE_270 (HFI_COMMON_BASE + 0x4)
+
+#define HFI_FLIP_NONE (HFI_COMMON_BASE + 0x1)
+#define HFI_FLIP_HORIZONTAL (HFI_COMMON_BASE + 0x2)
+#define HFI_FLIP_VERTICAL (HFI_COMMON_BASE + 0x3)
+
+struct hfi_operations {
+ u32 rotate;
+ u32 flip;
+};
+
+#define HFI_RESOURCE_OCMEM 0x00000001
+
+struct hfi_resource_ocmem {
+ u32 size;
+ u8 *mem;
+};
+
+struct hfi_resource_ocmem_requirement {
+ u32 session_domain;
+ u32 width;
+ u32 height;
+ u32 size;
+};
+
+struct hfi_resource_ocmem_requirement_info {
+ u32 num_entries;
+ struct hfi_resource_ocmem_requirement rg_requirements[1];
+};
+
+struct hfi_venc_config_advanced {
+ u8 pipe2d;
+ u8 hw_mode;
+ u8 low_delay_enforce;
+ int h264_constrain_intra_pred;
+ int h264_transform_8x8_flag;
+ int mpeg4_qpel_enable;
+ int multi_refp_en;
+ int qmatrix_en;
+ u8 vpp_info_packet_mode;
+ u8 ref_tile_mode;
+ u8 bitstream_flush_mode;
+ u32 ds_display_frame_width;
+ u32 ds_display_frame_height;
+ u32 perf_tune_param_ptr;
+ u32 input_x_offset;
+ u32 input_y_offset;
+ u32 input_roi_width;
+ u32 input_roi_height;
+ u32 vsp_fifo_dma_sel;
+ u32 h264_num_ref_frames;
+};
+
+#define HFI_CMD_SYS_COMMON_START \
+ (HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + 0x0000)
+#define HFI_CMD_SYS_INIT (HFI_CMD_SYS_COMMON_START + 0x001)
+#define HFI_CMD_SYS_PC_PREP (HFI_CMD_SYS_COMMON_START + 0x002)
+#define HFI_CMD_SYS_SET_RESOURCE (HFI_CMD_SYS_COMMON_START + 0x003)
+#define HFI_CMD_SYS_RELEASE_RESOURCE (HFI_CMD_SYS_COMMON_START + 0x004)
+#define HFI_CMD_SYS_SET_PROPERTY (HFI_CMD_SYS_COMMON_START + 0x005)
+#define HFI_CMD_SYS_GET_PROPERTY (HFI_CMD_SYS_COMMON_START + 0x006)
+#define HFI_CMD_SYS_SESSION_INIT (HFI_CMD_SYS_COMMON_START + 0x007)
+#define HFI_CMD_SYS_SESSION_END (HFI_CMD_SYS_COMMON_START + 0x008)
+#define HFI_CMD_SYS_SET_BUFFERS (HFI_CMD_SYS_COMMON_START + 0x009)
+
+#define HFI_CMD_SESSION_COMMON_START \
+ (HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + 0x1000)
+#define HFI_CMD_SESSION_SET_PROPERTY \
+ (HFI_CMD_SESSION_COMMON_START + 0x001)
+#define HFI_CMD_SESSION_SET_BUFFERS \
+ (HFI_CMD_SESSION_COMMON_START + 0x002)
+#define HFI_CMD_SESSION_GET_SEQUENCE_HEADER \
+ (HFI_CMD_SESSION_COMMON_START + 0x003)
+
+#define HFI_MSG_SYS_COMMON_START \
+ (HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + 0x0000)
+#define HFI_MSG_SYS_INIT_DONE (HFI_MSG_SYS_COMMON_START + 0x1)
+#define HFI_MSG_SYS_PC_PREP_DONE (HFI_MSG_SYS_COMMON_START + 0x2)
+#define HFI_MSG_SYS_RELEASE_RESOURCE (HFI_MSG_SYS_COMMON_START + 0x3)
+#define HFI_MSG_SYS_DEBUG (HFI_MSG_SYS_COMMON_START + 0x4)
+#define HFI_MSG_SYS_SESSION_INIT_DONE (HFI_MSG_SYS_COMMON_START + 0x6)
+#define HFI_MSG_SYS_SESSION_END_DONE (HFI_MSG_SYS_COMMON_START + 0x7)
+
+#define HFI_MSG_SESSION_COMMON_START \
+ (HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + 0x1000)
+#define HFI_MSG_EVENT_NOTIFY (HFI_MSG_SESSION_COMMON_START + 0x1)
+#define HFI_MSG_SESSION_GET_SEQUENCE_HEADER_DONE \
+ (HFI_MSG_SESSION_COMMON_START + 0x2)
+
+struct vidc_hal_msg_pkt_hdr {
+ u32 size;
+ u32 packet;
+};
+
+struct vidc_hal_session_cmd_pkt {
+ u32 size;
+ u32 packet_type;
+ u32 session_id;
+};
+
+struct hfi_cmd_sys_init_packet {
+ u32 size;
+ u32 packet_type;
+ u32 arch_type;
+};
+
+struct hfi_cmd_sys_pc_prep_packet {
+ u32 size;
+ u32 packet_type;
+};
+
+struct hfi_cmd_sys_set_resource_packet {
+ u32 size;
+ u32 packet_type;
+ u32 resource_handle;
+ u32 resource_type;
+ u32 rg_resource_data[1];
+};
+
+struct hfi_cmd_sys_release_resource_packet {
+ u32 size;
+ u32 packet_type;
+ u32 resource_type;
+ u32 resource_handle;
+};
+
+struct hfi_cmd_sys_set_property_packet {
+ u32 size;
+ u32 packet_type;
+ u32 num_properties;
+ u32 rg_property_data[1];
+};
+
+struct hfi_cmd_sys_get_property_packet {
+ u32 size;
+ u32 packet_type;
+ u32 num_properties;
+ u32 rg_property_data[1];
+};
+
+struct hfi_cmd_sys_session_init_packet {
+ u32 size;
+ u32 packet_type;
+ u32 session_id;
+ u32 session_domain;
+ u32 session_codec;
+};
+
+struct hfi_cmd_sys_session_end_packet {
+ u32 size;
+ u32 packet_type;
+ u32 session_id;
+};
+
+struct hfi_cmd_sys_set_buffers_packet {
+ u32 size;
+ u32 packet_type;
+ u32 buffer_type;
+ u32 buffer_size;
+ u32 num_buffers;
+ u32 rg_buffer_addr[1];
+};
+
+struct hfi_cmd_session_set_property_packet {
+ u32 size;
+ u32 packet_type;
+ u32 session_id;
+ u32 num_properties;
+ u32 rg_property_data[0];
+};
+
+struct hfi_cmd_session_set_buffers_packet {
+ u32 size;
+ u32 packet_type;
+ u32 session_id;
+ u32 buffer_type;
+ u32 buffer_mode;
+ u32 buffer_size;
+ u32 extra_data_size;
+ u32 min_buffer_size;
+ u32 num_buffers;
+ u32 rg_buffer_info[1];
+};
+
+struct hfi_cmd_session_get_sequence_header_packet {
+ u32 size;
+ u32 packet_type;
+ u32 session_id;
+ u32 buffer_len;
+ u8 *packet_buffer;
+};
+
+struct hfi_msg_event_notify_packet {
+ u32 size;
+ u32 packet_type;
+ u32 session_id;
+ u32 event_id;
+ u32 event_data1;
+ u32 event_data2;
+ u32 rg_ext_event_data[1];
+};
+
+struct hfi_msg_sys_init_done_packet {
+ u32 size;
+ u32 packet_type;
+ u32 error_type;
+ u32 num_properties;
+ u32 rg_property_data[1];
+};
+
+struct hfi_msg_sys_pc_prep_done_packet {
+ u32 size;
+ u32 packet_type;
+ u32 error_type;
+};
+
+struct hfi_msg_sys_release_resource_done_packet {
+ u32 size;
+ u32 packet_type;
+ u32 resource_handle;
+ u32 error_type;
+};
+
+struct hfi_msg_sys_session_init_done_packet {
+ u32 size;
+ u32 packet_type;
+ u32 session_id;
+ u32 error_type;
+ u32 num_properties;
+ u32 rg_property_data[1];
+};
+
+struct hfi_msg_sys_session_end_done_packet {
+ u32 size;
+ u32 packet_type;
+ u32 session_id;
+ u32 error_type;
+};
+
+struct hfi_msg_session_get_sequence_header_done_packet {
+ u32 size;
+ u32 packet_type;
+ u32 session_id;
+ u32 error_type;
+ u32 header_len;
+ u8 *sequence_header;
+};
+
+struct hfi_msg_sys_debug_packet {
+ u32 size;
+ u32 packet_type;
+ u32 msg_type;
+ u32 msg_size;
+ u32 time_stamp_hi;
+ u32 time_stamp_lo;
+ u8 rg_msg_data[1];
+};
+
+#endif
diff --git a/drivers/media/video/msm_vidc/vidc_hal_interrupt_handler.c b/drivers/media/video/msm_vidc/vidc_hal_interrupt_handler.c
index 02b9699..b604d0a 100644
--- a/drivers/media/video/msm_vidc/vidc_hal_interrupt_handler.c
+++ b/drivers/media/video/msm_vidc/vidc_hal_interrupt_handler.c
@@ -15,7 +15,7 @@
#include <linux/list.h>
#include "vidc_hal.h"
-static enum vidc_status vidc_map_hal_err_status(enum HFI_ERROR hfi_err)
+static enum vidc_status vidc_map_hal_err_status(int hfi_err)
{
enum vidc_status vidc_err;
switch (hfi_err) {
@@ -64,8 +64,6 @@
case HFI_ERR_SESSION_SYNC_FRAME_NOT_DETECTED:
vidc_err = VIDC_ERR_IFRAME_EXPECTED;
break;
- case HFI_ERR_SYS_UNKNOWN:
- case HFI_ERR_SESSION_UNKNOWN:
case HFI_ERR_SESSION_EMPTY_BUFFER_DONE_OUTPUT_PENDING:
default:
vidc_err = VIDC_ERR_FAIL;
@@ -82,7 +80,7 @@
int num_properties_changed;
struct hfi_frame_size frame_sz;
u8 *data_ptr;
- enum HFI_PROPERTY prop_id;
+ int prop_id;
HAL_MSG_LOW("RECEIVED:EVENT_NOTIFY");
if (sizeof(struct hfi_msg_event_notify_packet)
> pkt->size) {
@@ -103,12 +101,11 @@
if (num_properties_changed) {
data_ptr = (u8 *) &pkt->rg_ext_event_data[0];
do {
- prop_id = (enum HFI_PROPERTY) *((u32 *)data_ptr);
+ prop_id = (int) *((u32 *)data_ptr);
switch (prop_id) {
case HFI_PROPERTY_PARAM_FRAME_SIZE:
- frame_sz.buffer =
- (enum HFI_BUFFER)
- *((((u32 *)data_ptr)+1));
+ frame_sz.buffer_type =
+ (int) *((((u32 *)data_ptr)+1));
frame_sz.width =
event_notify.width =
*((((u32 *)data_ptr)+2));
@@ -165,7 +162,7 @@
struct vidc_hal_sys_init_done sys_init_done;
u32 rem_bytes, bytes_read = 0, num_properties;
u8 *data_ptr;
- enum HFI_PROPERTY prop_id;
+ int prop_id;
enum vidc_status status = VIDC_ERR_NONE;
HAL_MSG_LOW("RECEIVED:SYS_INIT_DONE");
@@ -202,7 +199,7 @@
num_properties = pkt->num_properties;
while ((num_properties != 0) && (rem_bytes >= sizeof(u32))) {
- prop_id = (enum HFI_PROPERTY) *((u32 *)data_ptr);
+ prop_id = *((u32 *)data_ptr);
data_ptr = data_ptr + 4;
switch (prop_id) {
@@ -282,8 +279,8 @@
rc = VIDC_ERR_FAIL;
}
HAL_MSG_LOW("got buffer requirements for: %d",
- hfi_buf_req->buffer);
- switch (hfi_buf_req->buffer) {
+ hfi_buf_req->buffer_type);
+ switch (hfi_buf_req->buffer_type) {
case HFI_BUFFER_INPUT:
memcpy(&buffreq->buffer[0], hfi_buf_req,
sizeof(struct hfi_buffer_requirements));
@@ -330,8 +327,8 @@
HAL_BUFFER_INTERNAL_PERSIST;
break;
default:
- HAL_MSG_ERROR("hal_process_sess_get_prop_buf_req:"
- "bad_buffer_type: %d", hfi_buf_req->buffer);
+ HAL_MSG_ERROR("%s: bad_buffer_type: %d",
+ __func__, hfi_buf_req->buffer_type);
break;
}
req_bytes -= sizeof(struct hfi_buffer_requirements);
@@ -525,8 +522,8 @@
data_done.size = sizeof(struct msm_vidc_cb_data_done);
data_done.clnt_data = (void *) pkt->input_tag;
- data_done.output_done.timestamp_hi = pkt->timestamp_hi;
- data_done.output_done.timestamp_lo = pkt->timestamp_lo;
+ data_done.output_done.timestamp_hi = pkt->time_stamp_hi;
+ data_done.output_done.timestamp_lo = pkt->time_stamp_lo;
data_done.output_done.flags1 = pkt->flags;
data_done.output_done.mark_target = pkt->mark_target;
data_done.output_done.mark_data = pkt->mark_data;
@@ -559,20 +556,20 @@
data_done.output_done.stream_id = pkt->stream_id;
data_done.output_done.view_id = pkt->view_id;
- data_done.output_done.timestamp_hi = pkt->timestamp_hi;
- data_done.output_done.timestamp_lo = pkt->timestamp_lo;
+ data_done.output_done.timestamp_hi = pkt->time_stamp_hi;
+ data_done.output_done.timestamp_lo = pkt->time_stamp_lo;
data_done.output_done.flags1 = pkt->flags;
data_done.output_done.mark_target = pkt->mark_target;
data_done.output_done.mark_data = pkt->mark_data;
data_done.output_done.stats = pkt->stats;
data_done.output_done.alloc_len1 = pkt->alloc_len;
data_done.output_done.filled_len1 = pkt->filled_len;
- data_done.output_done.offset1 = pkt->oofset;
+ data_done.output_done.offset1 = pkt->offset;
data_done.output_done.frame_width = pkt->frame_width;
data_done.output_done.frame_height = pkt->frame_height;
- data_done.output_done.start_xCoord = pkt->start_xCoord;
- data_done.output_done.start_yCoord = pkt->start_yCoord;
- data_done.output_done.input_tag1 = pkt->input_tag1;
+ data_done.output_done.start_xCoord = pkt->start_x_coord;
+ data_done.output_done.start_yCoord = pkt->start_y_coord;
+ data_done.output_done.input_tag1 = pkt->input_tag;
data_done.output_done.picture_type = pkt->picture_type;
data_done.output_done.packet_buffer1 = pkt->packet_buffer;
data_done.output_done.extra_data_buffer =
diff --git a/drivers/mfd/pm8038-core.c b/drivers/mfd/pm8038-core.c
index 8fef786..b32932b 100644
--- a/drivers/mfd/pm8038-core.c
+++ b/drivers/mfd/pm8038-core.c
@@ -327,6 +327,17 @@
.pdata_size = sizeof(struct pm8xxx_tm_core_data),
};
+static const struct resource ccadc_cell_resources[] __devinitconst = {
+ SINGLE_IRQ_RESOURCE("PM8921_BMS_CCADC_EOC", PM8921_BMS_CCADC_EOC),
+};
+
+static struct mfd_cell ccadc_cell __devinitdata = {
+ .name = PM8XXX_CCADC_DEV_NAME,
+ .id = -1,
+ .resources = ccadc_cell_resources,
+ .num_resources = ARRAY_SIZE(ccadc_cell_resources),
+};
+
static struct pm8xxx_vreg regulator_data[] = {
/* name pc_name ctrl test hpm_min */
NLDO1200("8038_l1", 0x0AE, 0x0AF, LDO_1200),
@@ -641,6 +652,19 @@
goto bail;
}
+ if (pdata->ccadc_pdata) {
+ ccadc_cell.platform_data = pdata->ccadc_pdata;
+ ccadc_cell.pdata_size =
+ sizeof(struct pm8xxx_ccadc_platform_data);
+
+ ret = mfd_add_devices(pmic->dev, 0, &ccadc_cell, 1, NULL,
+ irq_base);
+ if (ret) {
+ pr_err("Failed to add ccadc subdevice ret=%d\n", ret);
+ goto bail;
+ }
+ }
+
return 0;
bail:
if (pmic->irq_chip) {
diff --git a/drivers/mfd/wcd9xxx-slimslave.c b/drivers/mfd/wcd9xxx-slimslave.c
index 889c416..789242d 100644
--- a/drivers/mfd/wcd9xxx-slimslave.c
+++ b/drivers/mfd/wcd9xxx-slimslave.c
@@ -537,3 +537,18 @@
return ret;
}
EXPORT_SYMBOL_GPL(wcd9xxx_close_slim_sch_tx);
+
+int wcd9xxx_get_slave_port(unsigned int ch_num)
+{
+ int ret = 0;
+
+ pr_debug("%s: ch_num[%d]\n", __func__, ch_num);
+ ret = (ch_num - BASE_CH_NUM);
+ if (ret < 0) {
+ pr_err("%s: Error:- Invalid slave port found = %d\n",
+ __func__, ret);
+ return -EINVAL;
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(wcd9xxx_get_slave_port);
diff --git a/drivers/misc/isa1200.c b/drivers/misc/isa1200.c
index 555dfdd..6c3e787 100644
--- a/drivers/misc/isa1200.c
+++ b/drivers/misc/isa1200.c
@@ -20,6 +20,7 @@
#include <linux/workqueue.h>
#include <linux/slab.h>
#include <linux/regulator/consumer.h>
+#include <linux/clk.h>
#include <linux/i2c/isa1200.h>
#include "../staging/android/timed_output.h"
@@ -48,6 +49,7 @@
struct regulator **regs;
bool clk_on;
u8 hctrl0_val;
+ struct clk *pwm_clk;
};
static int isa1200_read_reg(struct i2c_client *client, int reg)
@@ -107,13 +109,23 @@
goto chip_dwn;
}
} else if (haptic->pdata->mode_ctrl == PWM_GEN_MODE) {
- /* vote for clock */
- if (haptic->pdata->clk_enable && !haptic->clk_on) {
+ /* check for board specific clk callback */
+ if (haptic->pdata->clk_enable) {
rc = haptic->pdata->clk_enable(true);
if (rc < 0) {
+ pr_err("%s: clk enable cb failed\n",
+ __func__);
+ goto chip_dwn;
+ }
+ }
+
+ /* vote for clock */
+ if (haptic->pdata->need_pwm_clk && !haptic->clk_on) {
+ rc = clk_enable(haptic->pwm_clk);
+ if (rc < 0) {
pr_err("%s: clk enable failed\n",
__func__);
- goto chip_dwn;
+ goto dis_clk_cb;
}
haptic->clk_on = true;
}
@@ -150,29 +162,35 @@
pr_err("%s: stop vibartion fail\n", __func__);
/* de-vote clock */
- if (haptic->pdata->clk_enable && haptic->clk_on) {
- rc = haptic->pdata->clk_enable(false);
- if (rc < 0) {
- pr_err("%s: clk disable failed\n",
- __func__);
- return;
- }
+ if (haptic->pdata->need_pwm_clk && haptic->clk_on) {
+ clk_disable(haptic->pwm_clk);
haptic->clk_on = false;
}
+ /* check for board specific clk callback */
+ if (haptic->pdata->clk_enable) {
+ rc = haptic->pdata->clk_enable(false);
+ if (rc < 0)
+ pr_err("%s: clk disable cb failed\n",
+ __func__);
+ }
}
}
return;
dis_clk:
- if (haptic->pdata->clk_enable && haptic->clk_on) {
- rc = haptic->pdata->clk_enable(false);
- if (rc < 0) {
- pr_err("%s: clk disable failed\n", __func__);
- return;
- }
+ if (haptic->pdata->need_pwm_clk && haptic->clk_on) {
+ clk_disable(haptic->pwm_clk);
haptic->clk_on = false;
}
+
+dis_clk_cb:
+ if (haptic->pdata->clk_enable) {
+ rc = haptic->pdata->clk_enable(false);
+ if (rc < 0)
+ pr_err("%s: clk disable cb failed\n", __func__);
+ }
+
chip_dwn:
if (haptic->is_len_gpio_valid == true)
gpio_set_value_cansleep(haptic->pdata->hap_en_gpio, 0);
@@ -545,6 +563,13 @@
ret = PTR_ERR(haptic->pwm);
goto reset_hctrl0;
}
+ } else if (haptic->pdata->need_pwm_clk) {
+ haptic->pwm_clk = clk_get(&client->dev, "pwm_clk");
+ if (IS_ERR(haptic->pwm_clk)) {
+ dev_err(&client->dev, "pwm_clk get failed\n");
+ ret = PTR_ERR(haptic->pwm_clk);
+ goto reset_hctrl0;
+ }
}
printk(KERN_INFO "%s: %s registered\n", __func__, id->name);
diff --git a/drivers/misc/tsif.c b/drivers/misc/tsif.c
index 2b09d7c..aeda38c 100644
--- a/drivers/misc/tsif.c
+++ b/drivers/misc/tsif.c
@@ -304,7 +304,9 @@
for (i = size-1; i >= 0; i--) {
int tmp;
g = table + i;
- tmp = gpio_tlmm_config(g->gpio_cfg, GPIO_CFG_DISABLE);
+ tmp = gpio_tlmm_config(GPIO_CFG(GPIO_PIN(g->gpio_cfg),
+ 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA),
+ GPIO_CFG_DISABLE);
if (tmp) {
pr_err("gpio_tlmm_config(0x%08x, GPIO_CFG_DISABLE)"
" <%s> failed: %d\n",
@@ -681,7 +683,7 @@
while (tsif_device->xfer[0].busy ||
tsif_device->xfer[1].busy) {
msm_dmov_flush(tsif_device->dma, 1);
- msleep(10);
+ usleep(10000);
}
}
tsif_device->state = tsif_state_stopped;
@@ -1031,6 +1033,15 @@
return rc;
}
tsif_device->state = tsif_state_running;
+
+ /* make sure the GPIO's are set up */
+ rc = tsif_start_gpios(tsif_device);
+ if (rc) {
+ dev_err(&tsif_device->pdev->dev, "failed to start GPIOs\n");
+ tsif_dma_exit(tsif_device);
+ return rc;
+ }
+
/*
* DMA should be scheduled prior to TSIF hardware initialization,
* otherwise "bus error" will be reported by Data Mover
@@ -1046,6 +1057,7 @@
rc = tsif_start_hw(tsif_device);
if (rc) {
dev_err(&tsif_device->pdev->dev, "Unable to start HW\n");
+ tsif_stop_gpios(tsif_device);
tsif_dma_exit(tsif_device);
tsif_clock(tsif_device, 0);
return rc;
@@ -1067,10 +1079,19 @@
{
dev_info(&tsif_device->pdev->dev, "%s, state %d\n", __func__,
(int)tsif_device->state);
- /*
- * DMA should be flushed/stopped prior to TSIF hardware stop,
- * otherwise "bus error" will be reported by Data Mover
+
+ /* turn off the GPIO's to prevent new data from entering */
+ tsif_stop_gpios(tsif_device);
+
+ /* we unfortunately must sleep here to give the ADM time to
+ * complete any outstanding reads after the GPIO's are turned
+ * off. There is no indication from the ADM hardware that
+ * there are any outstanding reads on the bus, and if we
+ * stop the TSIF too quickly, it can cause a bus error.
*/
+ msleep(100);
+
+ /* now we can stop the core */
tsif_stop_hw(tsif_device);
tsif_dma_exit(tsif_device);
tsif_clock(tsif_device, 0);
@@ -1317,9 +1338,6 @@
}
dev_info(&pdev->dev, "remapped phys 0x%08x => virt %p\n",
tsif_device->memres->start, tsif_device->base);
- rc = tsif_start_gpios(tsif_device);
- if (rc)
- goto err_gpio;
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
@@ -1355,8 +1373,6 @@
free_irq(tsif_device->irq, tsif_device);
err_irq:
tsif_debugfs_exit(tsif_device);
- tsif_stop_gpios(tsif_device);
-err_gpio:
iounmap(tsif_device->base);
err_ioremap:
err_rgn:
diff --git a/drivers/misc/tspp.c b/drivers/misc/tspp.c
index 81c6b65..4d7553e 100644
--- a/drivers/misc/tspp.c
+++ b/drivers/misc/tspp.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1687,7 +1687,7 @@
/* map clocks */
if (data->tsif_pclk) {
- device->tsif_pclk = clk_get(NULL, data->tsif_pclk);
+ device->tsif_pclk = clk_get(&pdev->dev, data->tsif_pclk);
if (IS_ERR(device->tsif_pclk)) {
pr_err("tspp: failed to get %s",
data->tsif_pclk);
@@ -1697,7 +1697,7 @@
}
}
if (data->tsif_ref_clk) {
- device->tsif_ref_clk = clk_get(NULL, data->tsif_ref_clk);
+ device->tsif_ref_clk = clk_get(&pdev->dev, data->tsif_ref_clk);
if (IS_ERR(device->tsif_ref_clk)) {
pr_err("tspp: failed to get %s",
data->tsif_ref_clk);
diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c
index edf4400..0659b79 100644
--- a/drivers/mmc/host/msm_sdcc.c
+++ b/drivers/mmc/host/msm_sdcc.c
@@ -378,8 +378,7 @@
host->curr.got_dataend = 0;
host->curr.wait_for_auto_prog_done = false;
host->curr.got_auto_prog_done = false;
- writel_relaxed(readl_relaxed(host->base + MMCIDATACTRL) &
- (~(MCI_DPSM_ENABLE)), host->base + MMCIDATACTRL);
+ writel_relaxed(0, host->base + MMCIDATACTRL);
msmsdcc_sync_reg_wr(host); /* Allow the DPSM to be reset */
}
@@ -1683,7 +1682,7 @@
msmsdcc_delay(host);
}
- if (!host->clks_on) {
+ if (!atomic_read(&host->clks_on)) {
pr_debug("%s: %s: SDIO async irq received\n",
mmc_hostname(host->mmc), __func__);
@@ -1993,7 +1992,8 @@
/*
* Don't start the request if SDCC is not in proper state to handle it
*/
- if (!host->pwr || !host->clks_on || host->sdcc_irq_disabled) {
+ if (!host->pwr || !atomic_read(&host->clks_on)
+ || host->sdcc_irq_disabled) {
WARN(1, "%s: %s: SDCC is in bad state. don't process"
" new request (CMD%d)\n", mmc_hostname(host->mmc),
__func__, mrq->cmd->opcode);
@@ -2203,7 +2203,7 @@
return rc;
}
-static int msmsdcc_vreg_disable(struct msm_mmc_reg_data *vreg)
+static int msmsdcc_vreg_disable(struct msm_mmc_reg_data *vreg, bool is_init)
{
int rc = 0;
@@ -2225,17 +2225,33 @@
rc = msmsdcc_vreg_set_voltage(vreg, 0, vreg->high_vol_level);
if (rc)
goto out;
- } else if (vreg->is_enabled && vreg->always_on && vreg->lpm_sup) {
- /* Put always_on regulator in LPM (low power mode) */
- rc = msmsdcc_vreg_set_optimum_mode(vreg, vreg->lpm_uA);
- if (rc < 0)
- goto out;
+ } else if (vreg->is_enabled && vreg->always_on) {
+ if (!is_init && vreg->lpm_sup) {
+ /* Put always_on regulator in LPM (low power mode) */
+ rc = msmsdcc_vreg_set_optimum_mode(vreg, vreg->lpm_uA);
+ if (rc < 0)
+ goto out;
+ } else if (is_init && vreg->reset_at_init) {
+ /**
+ * The regulator might not actually be disabled if it
+ * is shared and in use by other drivers.
+ */
+ rc = regulator_disable(vreg->reg);
+ if (rc) {
+ pr_err("%s: regulator_disable(%s) failed at " \
+ "bootup. rc=%d\n", __func__,
+ vreg->name, rc);
+ goto out;
+ }
+ vreg->is_enabled = false;
+ }
}
out:
return rc;
}
-static int msmsdcc_setup_vreg(struct msmsdcc_host *host, bool enable)
+static int msmsdcc_setup_vreg(struct msmsdcc_host *host, bool enable,
+ bool is_init)
{
int rc = 0, i;
struct msm_mmc_slot_reg_data *curr_slot;
@@ -2253,7 +2269,8 @@
if (enable)
rc = msmsdcc_vreg_enable(vreg_table[i]);
else
- rc = msmsdcc_vreg_disable(vreg_table[i]);
+ rc = msmsdcc_vreg_disable(vreg_table[i],
+ is_init);
if (rc)
goto out;
}
@@ -2270,10 +2287,10 @@
{
int rc;
- rc = msmsdcc_setup_vreg(host, 1);
+ rc = msmsdcc_setup_vreg(host, 1, true);
if (rc)
return rc;
- rc = msmsdcc_setup_vreg(host, 0);
+ rc = msmsdcc_setup_vreg(host, 0, true);
return rc;
}
@@ -2338,17 +2355,37 @@
* Any function calling msmsdcc_setup_clocks must
* acquire clk_mutex. May sleep.
*/
-static inline void msmsdcc_setup_clocks(struct msmsdcc_host *host, bool enable)
+static int msmsdcc_setup_clocks(struct msmsdcc_host *host, bool enable)
{
- if (enable) {
- if (!IS_ERR_OR_NULL(host->bus_clk))
- clk_prepare_enable(host->bus_clk);
- if (!IS_ERR(host->pclk))
- clk_prepare_enable(host->pclk);
- clk_prepare_enable(host->clk);
+ int rc = 0;
+
+ if (enable && !atomic_read(&host->clks_on)) {
+ if (!IS_ERR_OR_NULL(host->bus_clk)) {
+ rc = clk_prepare_enable(host->bus_clk);
+ if (rc) {
+ pr_err("%s: %s: failed to enable the bus-clock with error %d\n",
+ mmc_hostname(host->mmc), __func__, rc);
+ goto out;
+ }
+ }
+ if (!IS_ERR(host->pclk)) {
+ rc = clk_prepare_enable(host->pclk);
+ if (rc) {
+ pr_err("%s: %s: failed to enable the pclk with error %d\n",
+ mmc_hostname(host->mmc), __func__, rc);
+ goto disable_bus;
+ }
+ }
+ rc = clk_prepare_enable(host->clk);
+ if (rc) {
+ pr_err("%s: %s: failed to enable the host-clk with error %d\n",
+ mmc_hostname(host->mmc), __func__, rc);
+ goto disable_pclk;
+ }
mb();
msmsdcc_delay(host);
- } else {
+ atomic_set(&host->clks_on, 1);
+ } else if (!enable && atomic_read(&host->clks_on)) {
mb();
msmsdcc_delay(host);
clk_disable_unprepare(host->clk);
@@ -2356,7 +2393,18 @@
clk_disable_unprepare(host->pclk);
if (!IS_ERR_OR_NULL(host->bus_clk))
clk_disable_unprepare(host->bus_clk);
+ atomic_set(&host->clks_on, 0);
}
+ goto out;
+
+disable_pclk:
+ if (!IS_ERR_OR_NULL(host->pclk))
+ clk_disable_unprepare(host->pclk);
+disable_bus:
+ if (!IS_ERR_OR_NULL(host->bus_clk))
+ clk_disable_unprepare(host->bus_clk);
+out:
+ return rc;
}
static inline unsigned int msmsdcc_get_sup_clk_rate(struct msmsdcc_host *host,
@@ -2535,7 +2583,7 @@
if (host->plat->translate_vdd && !host->sdio_gpio_lpm)
ret = host->plat->translate_vdd(mmc_dev(mmc), ios->vdd);
else if (!host->plat->translate_vdd && !host->sdio_gpio_lpm)
- ret = msmsdcc_setup_vreg(host, !!ios->vdd);
+ ret = msmsdcc_setup_vreg(host, !!ios->vdd, false);
if (ret) {
pr_err("%s: Failed to setup voltage regulators\n",
@@ -2889,18 +2937,16 @@
spin_lock_irqsave(&host->lock, flags);
if (ios->clock) {
- if (!host->clks_on) {
- spin_unlock_irqrestore(&host->lock, flags);
- msmsdcc_setup_clocks(host, true);
- spin_lock_irqsave(&host->lock, flags);
- host->clks_on = 1;
- writel_relaxed(host->mci_irqenable,
- host->base + MMCIMASK0);
- mb();
- msmsdcc_cfg_sdio_wakeup(host, false);
- }
-
+ spin_unlock_irqrestore(&host->lock, flags);
+ rc = msmsdcc_setup_clocks(host, true);
+ if (rc)
+ goto out;
+ spin_lock_irqsave(&host->lock, flags);
+ writel_relaxed(host->mci_irqenable, host->base + MMCIMASK0);
+ mb();
+ msmsdcc_cfg_sdio_wakeup(host, false);
clock = msmsdcc_get_sup_clk_rate(host, ios->clock);
+
/*
* For DDR50 mode, controller needs clock rate to be
* double than what is required on the SD card CLK pin.
@@ -2943,7 +2989,6 @@
msmsdcc_delay(host);
clk |= MCI_CLK_ENABLE;
}
-
if (ios->bus_width == MMC_BUS_WIDTH_8)
clk |= MCI_CLK_WIDEBUS_8;
else if (ios->bus_width == MMC_BUS_WIDTH_4)
@@ -2982,7 +3027,7 @@
clk |= IO_PAD_PWR_SWITCH;
/* Don't write into registers if clocks are disabled */
- if (host->clks_on) {
+ if (atomic_read(&host->clks_on)) {
if (readl_relaxed(host->base + MMCICLOCK) != clk) {
writel_relaxed(clk, host->base + MMCICLOCK);
msmsdcc_sync_reg_wr(host);
@@ -2994,7 +3039,7 @@
}
}
- if (!(clk & MCI_CLK_ENABLE) && host->clks_on) {
+ if (!(clk & MCI_CLK_ENABLE) && atomic_read(&host->clks_on)) {
msmsdcc_cfg_sdio_wakeup(host, true);
spin_unlock_irqrestore(&host->lock, flags);
/*
@@ -3003,11 +3048,10 @@
*/
msmsdcc_setup_clocks(host, false);
spin_lock_irqsave(&host->lock, flags);
- host->clks_on = 0;
}
if (host->tuning_in_progress)
- WARN(!host->clks_on,
+ WARN(!atomic_read(&host->clks_on),
"tuning_in_progress but SDCC clocks are OFF\n");
/* Let interrupts be disabled if the host is powered off */
@@ -3015,8 +3059,8 @@
enable_irq(host->core_irqres->start);
host->sdcc_irq_disabled = 0;
}
-
spin_unlock_irqrestore(&host->lock, flags);
+out:
mutex_unlock(&host->clk_mutex);
}
@@ -3089,14 +3133,14 @@
spin_lock_irqsave(&host->lock, flags);
if (enable) {
host->mci_irqenable |= MCI_SDIOINTOPERMASK;
- if (host->clks_on) {
+ if (atomic_read(&host->clks_on)) {
writel_relaxed(readl_relaxed(host->base + MMCIMASK0) |
MCI_SDIOINTOPERMASK, host->base + MMCIMASK0);
mb();
}
} else {
host->mci_irqenable &= ~MCI_SDIOINTOPERMASK;
- if (host->clks_on) {
+ if (atomic_read(&host->clks_on)) {
writel_relaxed(readl_relaxed(host->base + MMCIMASK0) &
~MCI_SDIOINTOPERMASK, host->base + MMCIMASK0);
mb();
@@ -3218,20 +3262,14 @@
}
mutex_lock(&host->clk_mutex);
- spin_lock_irqsave(&host->lock, flags);
- if (!host->clks_on) {
- spin_unlock_irqrestore(&host->lock, flags);
- msmsdcc_setup_clocks(host, true);
- spin_lock_irqsave(&host->lock, flags);
- host->clks_on = 1;
- }
- spin_unlock_irqrestore(&host->lock, flags);
+ rc = msmsdcc_setup_clocks(host, true);
mutex_unlock(&host->clk_mutex);
out:
if (rc < 0) {
pr_info("%s: %s: failed with error %d", mmc_hostname(mmc),
__func__, rc);
+ msmsdcc_pm_qos_update_latency(host, 0);
return rc;
}
msmsdcc_msm_bus_cancel_work_and_set_vote(host, &mmc->ios);
@@ -3242,6 +3280,7 @@
{
struct msmsdcc_host *host = mmc_priv(mmc);
unsigned long flags;
+ int rc = 0;
msmsdcc_pm_qos_update_latency(host, 0);
@@ -3249,19 +3288,16 @@
goto out;
mutex_lock(&host->clk_mutex);
- spin_lock_irqsave(&host->lock, flags);
- if (host->clks_on) {
- spin_unlock_irqrestore(&host->lock, flags);
- msmsdcc_setup_clocks(host, false);
- spin_lock_irqsave(&host->lock, flags);
- host->clks_on = 0;
- }
- spin_unlock_irqrestore(&host->lock, flags);
+ rc = msmsdcc_setup_clocks(host, false);
mutex_unlock(&host->clk_mutex);
+ if (rc) {
+ msmsdcc_pm_qos_update_latency(host, 1);
+ return rc;
+ }
out:
msmsdcc_msm_bus_queue_work(host);
- return 0;
+ return rc;
}
#endif
@@ -3718,7 +3754,7 @@
spin_lock_irqsave(&host->lock, flags);
WARN(!host->pwr, "SDCC power is turned off\n");
- WARN(!host->clks_on, "SDCC clocks are turned off\n");
+ WARN(!atomic_read(&host->clks_on), "SDCC clocks are turned off\n");
WARN(host->sdcc_irq_disabled, "SDCC IRQ is disabled\n");
host->tuning_in_progress = 1;
@@ -4463,13 +4499,14 @@
pr_info("%s: SDCC PWR is %s\n", mmc_hostname(host->mmc),
(host->pwr ? "ON" : "OFF"));
pr_info("%s: SDCC clks are %s, MCLK rate=%d\n",
- mmc_hostname(host->mmc), (host->clks_on ? "ON" : "OFF"),
+ mmc_hostname(host->mmc),
+ (atomic_read(&host->clks_on) ? "ON" : "OFF"),
(u32)clk_get_rate(host->clk));
pr_info("%s: SDCC irq is %s\n", mmc_hostname(host->mmc),
(host->sdcc_irq_disabled ? "disabled" : "enabled"));
/* Now dump SDCC registers. Don't print FIFO registers */
- if (host->clks_on)
+ if (atomic_read(&host->clks_on))
msmsdcc_print_regs("SDCC-CORE", host->base,
host->core_memres->start, 28);
@@ -4481,7 +4518,7 @@
mmc_hostname(host->mmc), host->dma.busy,
host->dma.channel, host->dma.crci);
else if (host->is_sps_mode) {
- if (host->sps.busy && host->clks_on)
+ if (host->sps.busy && atomic_read(&host->clks_on))
msmsdcc_print_regs("SDCC-DML", host->dml_base,
host->dml_memres->start,
16);
@@ -4892,7 +4929,7 @@
(1 + ((3 * USEC_PER_SEC) /
msmsdcc_get_min_sup_clk_rate(host)));
- host->clks_on = 1;
+ atomic_set(&host->clks_on, 1);
/* Apply Hard reset to SDCC to put it in power on default state */
msmsdcc_hard_reset(host);
@@ -5330,6 +5367,7 @@
{
struct msmsdcc_host *host = mmc_priv(mmc);
unsigned long flags;
+ int rc = 0;
mutex_lock(&host->clk_mutex);
spin_lock_irqsave(&host->lock, flags);
@@ -5342,13 +5380,9 @@
disable_irq_nosync(host->core_irqres->start);
host->sdcc_irq_disabled = 1;
}
-
- if (host->clks_on) {
- spin_unlock_irqrestore(&host->lock, flags);
- msmsdcc_setup_clocks(host, false);
- spin_lock_irqsave(&host->lock, flags);
- host->clks_on = 0;
- }
+ rc = msmsdcc_setup_clocks(host, false);
+ if (rc)
+ goto out;
if (host->plat->sdio_lpm_gpio_setup &&
!host->sdio_gpio_lpm) {
@@ -5364,6 +5398,10 @@
host->sdio_wakeupirq_disabled = 0;
}
} else {
+ rc = msmsdcc_setup_clocks(host, true);
+ if (rc)
+ goto out;
+
if (!host->sdio_wakeupirq_disabled) {
disable_irq_nosync(host->plat->sdiowakeup_irq);
host->sdio_wakeupirq_disabled = 1;
@@ -5378,14 +5416,7 @@
host->sdio_gpio_lpm = 0;
}
- if (!host->clks_on) {
- spin_unlock_irqrestore(&host->lock, flags);
- msmsdcc_setup_clocks(host, true);
- spin_lock_irqsave(&host->lock, flags);
- host->clks_on = 1;
- }
-
- if (host->sdcc_irq_disabled) {
+ if (host->sdcc_irq_disabled && atomic_read(&host->clks_on)) {
writel_relaxed(host->mci_irqenable,
host->base + MMCIMASK0);
mb();
@@ -5393,9 +5424,10 @@
host->sdcc_irq_disabled = 0;
}
}
+out:
spin_unlock_irqrestore(&host->lock, flags);
mutex_unlock(&host->clk_mutex);
- return 0;
+ return rc;
}
#else
int msmsdcc_sdio_al_lpm(struct mmc_host *mmc, bool enable)
@@ -5607,7 +5639,7 @@
* during suspend and not allowing TCXO.
*/
- if (host->clks_on && !host->plat->is_sdio_al_client) {
+ if (atomic_read(&host->clks_on) && !host->plat->is_sdio_al_client) {
pr_warn("%s: clocks are on after suspend, aborting system "
"suspend\n", mmc_hostname(mmc));
rc = -EAGAIN;
diff --git a/drivers/mmc/host/msm_sdcc.h b/drivers/mmc/host/msm_sdcc.h
index 5531f06..dc32d1c 100644
--- a/drivers/mmc/host/msm_sdcc.h
+++ b/drivers/mmc/host/msm_sdcc.h
@@ -351,7 +351,7 @@
struct clk *clk; /* main MMC bus clock */
struct clk *pclk; /* SDCC peripheral bus clock */
struct clk *bus_clk; /* SDCC bus voter clock */
- unsigned int clks_on; /* set if clocks are enabled */
+ atomic_t clks_on; /* set if clocks are enabled */
unsigned int eject; /* eject state */
diff --git a/drivers/net/usb/rmnet_usb_ctrl.c b/drivers/net/usb/rmnet_usb_ctrl.c
index c2085c9..2972af0 100644
--- a/drivers/net/usb/rmnet_usb_ctrl.c
+++ b/drivers/net/usb/rmnet_usb_ctrl.c
@@ -111,7 +111,7 @@
{
if (dev) {
mutex_lock(&dev->dev_lock);
- if (!dev->intf) {
+ if (!dev->is_connected) {
mutex_unlock(&dev->dev_lock);
return 0;
}
@@ -521,8 +521,6 @@
dev->is_opened = 0;
mutex_unlock(&dev->dev_lock);
- rmnet_usb_ctrl_stop_rx(dev);
-
if (is_dev_connected(dev))
usb_kill_anchored_urbs(&dev->tx_submitted);
@@ -761,10 +759,17 @@
dev->tx_ctrl_err_cnt = 0;
dev->set_ctrl_line_state_cnt = 0;
- ret = rmnet_usb_ctrl_write_cmd(dev);
+ ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
+ USB_CDC_REQ_SET_CONTROL_LINE_STATE,
+ (USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE),
+ dev->cbits_tomdm,
+ dev->intf->cur_altsetting->desc.bInterfaceNumber,
+ NULL, 0, USB_CTRL_SET_TIMEOUT);
if (ret < 0)
return ret;
+ dev->set_ctrl_line_state_cnt++;
+
dev->inturb = usb_alloc_urb(0, GFP_KERNEL);
if (!dev->inturb) {
dev_err(dev->devicep, "Error allocating int urb\n");
@@ -800,7 +805,11 @@
notification_available_cb, dev, interval);
usb_mark_last_busy(udev);
- return rmnet_usb_ctrl_start_rx(dev);
+ ret = rmnet_usb_ctrl_start_rx(dev);
+ if (!ret)
+ dev->is_connected = true;
+
+ return ret;
}
void rmnet_usb_ctrl_disconnect(struct rmnet_ctrl_dev *dev)
@@ -813,7 +822,7 @@
dev->cbits_tolocal = ~ACM_CTRL_CD;
dev->cbits_tomdm = ~ACM_CTRL_DTR;
- dev->intf = NULL;
+ dev->is_connected = false;
mutex_unlock(&dev->dev_lock);
wake_up(&dev->read_wait_queue);
diff --git a/drivers/net/usb/rmnet_usb_ctrl.h b/drivers/net/usb/rmnet_usb_ctrl.h
index bc07726..3259940 100644
--- a/drivers/net/usb/rmnet_usb_ctrl.h
+++ b/drivers/net/usb/rmnet_usb_ctrl.h
@@ -46,6 +46,8 @@
unsigned is_opened;
+ bool is_connected;
+
/*input control lines (DSR, CTS, CD, RI)*/
unsigned int cbits_tolocal;
diff --git a/drivers/net/usb/rmnet_usb_data.c b/drivers/net/usb/rmnet_usb_data.c
index 9e1e252..55020a1 100644
--- a/drivers/net/usb/rmnet_usb_data.c
+++ b/drivers/net/usb/rmnet_usb_data.c
@@ -604,6 +604,7 @@
static const struct driver_info rmnet_info_pid9034 = {
.description = "RmNET net device",
+ .flags = FLAG_SEND_ZLP,
.bind = rmnet_usb_bind,
.tx_fixup = rmnet_usb_tx_fixup,
.rx_fixup = rmnet_usb_rx_fixup,
@@ -613,6 +614,7 @@
static const struct driver_info rmnet_info_pid9048 = {
.description = "RmNET net device",
+ .flags = FLAG_SEND_ZLP,
.bind = rmnet_usb_bind,
.tx_fixup = rmnet_usb_tx_fixup,
.rx_fixup = rmnet_usb_rx_fixup,
@@ -622,6 +624,7 @@
static const struct driver_info rmnet_info_pid904c = {
.description = "RmNET net device",
+ .flags = FLAG_SEND_ZLP,
.bind = rmnet_usb_bind,
.tx_fixup = rmnet_usb_tx_fixup,
.rx_fixup = rmnet_usb_rx_fixup,
diff --git a/drivers/of/of_spmi.c b/drivers/of/of_spmi.c
index 61085c9..0c23db5 100644
--- a/drivers/of/of_spmi.c
+++ b/drivers/of/of_spmi.c
@@ -43,27 +43,12 @@
}
/*
- * Allocate dev_node array for spmi_device
- */
-static inline int of_spmi_alloc_device_store(struct of_spmi_dev_info *d_info,
- uint32_t num_dev_node)
-{
- d_info->b_info.num_dev_node = num_dev_node;
- d_info->b_info.dev_node = kzalloc(sizeof(struct spmi_resource) *
- num_dev_node, GFP_KERNEL);
- if (!d_info->b_info.dev_node)
- return -ENOMEM;
-
- return 0;
-}
-
-/*
* Calculate the number of resources to allocate
*
* The caller is responsible for initializing the of_spmi_res_info structure.
*/
-static void of_spmi_sum_node_resources(struct of_spmi_res_info *r_info,
- bool has_reg)
+static void of_spmi_sum_resources(struct of_spmi_res_info *r_info,
+ bool has_reg)
{
struct of_irq oirq;
uint64_t size;
@@ -92,58 +77,48 @@
}
/*
- * free spmi_resource for the spmi_device
+ * Allocate dev_node array for spmi_device - used with spmi-dev-container
*/
-static void of_spmi_free_device_resources(struct of_spmi_dev_info *d_info)
+static inline int of_spmi_alloc_devnode_store(struct of_spmi_dev_info *d_info,
+ uint32_t num_dev_node)
{
- int i;
+ d_info->b_info.num_dev_node = num_dev_node;
+ d_info->b_info.dev_node = kzalloc(sizeof(struct spmi_resource) *
+ num_dev_node, GFP_KERNEL);
+ if (!d_info->b_info.dev_node)
+ return -ENOMEM;
- for (i = 0; i < d_info->b_info.num_dev_node; i++)
- kfree(d_info->b_info.dev_node[i].resource);
-
- kfree(d_info->b_info.dev_node);
-}
-
-/*
- * Gather node resources and populate
- */
-static void of_spmi_populate_node_resources(struct of_spmi_dev_info *d_info,
- struct of_spmi_res_info *r_info,
- int idx)
-
-{
- uint32_t num_irq = r_info->num_irq, num_reg = r_info->num_reg;
- int i;
- struct resource *res;
- const __be32 *addrp;
- uint64_t size;
- uint32_t flags;
-
- res = d_info->b_info.dev_node[idx].resource;
- d_info->b_info.dev_node[idx].of_node = r_info->node;
-
- if ((num_irq || num_reg) && (res != NULL)) {
- for (i = 0; i < num_reg; i++, res++) {
- /* Addresses are always 16 bits */
- addrp = of_get_address(r_info->node, i, &size, &flags);
- BUG_ON(!addrp);
- res->start = be32_to_cpup(addrp);
- res->end = res->start + size - 1;
- res->flags = flags;
- }
- WARN_ON(of_irq_to_resource_table(r_info->node, res, num_irq) !=
- num_irq);
- }
+ return 0;
}
/*
* Allocate enough memory to handle the resources associated with the
- * device_node. The number of device nodes included in this allocation
- * depends on whether the spmi-dev-container flag is specified or not.
+ * primary node.
*/
static int of_spmi_allocate_node_resources(struct of_spmi_dev_info *d_info,
- struct of_spmi_res_info *r_info,
- uint32_t idx)
+ struct of_spmi_res_info *r_info)
+{
+ uint32_t num_irq = r_info->num_irq, num_reg = r_info->num_reg;
+ struct resource *res = NULL;
+
+ if (num_irq || num_reg) {
+ res = kzalloc(sizeof(*res) * (num_irq + num_reg), GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
+ }
+ d_info->b_info.res.num_resources = num_reg + num_irq;
+ d_info->b_info.res.resource = res;
+
+ return 0;
+}
+
+/*
+ * Allocate enough memory to handle the resources associated with the
+ * spmi-dev-container nodes.
+ */
+static int of_spmi_allocate_devnode_resources(struct of_spmi_dev_info *d_info,
+ struct of_spmi_res_info *r_info,
+ uint32_t idx)
{
uint32_t num_irq = r_info->num_irq, num_reg = r_info->num_reg;
struct resource *res = NULL;
@@ -160,6 +135,87 @@
}
/*
+ * free node resources - used with primary node
+ */
+static void of_spmi_free_node_resources(struct of_spmi_dev_info *d_info)
+{
+ kfree(d_info->b_info.res.resource);
+}
+
+/*
+ * free devnode resources - used with spmi-dev-container
+ */
+static void of_spmi_free_devnode_resources(struct of_spmi_dev_info *d_info)
+{
+ int i;
+
+ for (i = 0; i < d_info->b_info.num_dev_node; i++)
+ kfree(d_info->b_info.dev_node[i].resource);
+
+ kfree(d_info->b_info.dev_node);
+}
+
+static void of_spmi_populate_resources(struct of_spmi_dev_info *d_info,
+ struct of_spmi_res_info *r_info,
+ struct resource *res)
+
+{
+ uint32_t num_irq = r_info->num_irq, num_reg = r_info->num_reg;
+ int i;
+ const __be32 *addrp;
+ uint64_t size;
+ uint32_t flags;
+
+ if ((num_irq || num_reg) && (res != NULL)) {
+ for (i = 0; i < num_reg; i++, res++) {
+ /* Addresses are always 16 bits */
+ addrp = of_get_address(r_info->node, i, &size, &flags);
+ BUG_ON(!addrp);
+ res->start = be32_to_cpup(addrp);
+ res->end = res->start + size - 1;
+ res->flags = flags;
+ of_property_read_string_index(r_info->node, "reg-names",
+ i, &res->name);
+ }
+ WARN_ON(of_irq_to_resource_table(r_info->node, res, num_irq) !=
+ num_irq);
+ }
+}
+
+/*
+ * Gather primary node resources and populate.
+ */
+static void of_spmi_populate_node_resources(struct of_spmi_dev_info *d_info,
+ struct of_spmi_res_info *r_info)
+
+{
+ struct resource *res;
+
+ res = d_info->b_info.res.resource;
+ d_info->b_info.res.of_node = r_info->node;
+ of_property_read_string(r_info->node, "label",
+ &d_info->b_info.res.label);
+ of_spmi_populate_resources(d_info, r_info, res);
+}
+
+/*
+ * Gather node devnode resources and populate - used with spmi-dev-container.
+ */
+static void of_spmi_populate_devnode_resources(struct of_spmi_dev_info *d_info,
+ struct of_spmi_res_info *r_info,
+ int idx)
+
+{
+ struct resource *res;
+
+ res = d_info->b_info.dev_node[idx].resource;
+ d_info->b_info.dev_node[idx].of_node = r_info->node;
+ of_property_read_string(r_info->node, "label",
+ &d_info->b_info.dev_node[idx].label);
+ of_spmi_populate_resources(d_info, r_info, res);
+}
+
+/*
* create a single spmi_device
*/
static int of_spmi_create_device(struct of_spmi_dev_info *d_info,
@@ -216,10 +272,10 @@
num_dev_node++;
}
- rc = of_spmi_alloc_device_store(d_info, num_dev_node);
+ rc = of_spmi_alloc_devnode_store(d_info, num_dev_node);
if (rc) {
- dev_err(&ctrl->dev, "%s: unable to allocate"
- " device resources\n", __func__);
+ dev_err(&ctrl->dev, "%s: unable to allocate devnode resources\n",
+ __func__);
return;
}
@@ -228,23 +284,36 @@
if (!of_device_is_available(node))
continue;
of_spmi_init_resource(&r_info, node);
- of_spmi_sum_node_resources(&r_info, 1);
- rc = of_spmi_allocate_node_resources(d_info, &r_info, i);
+ of_spmi_sum_resources(&r_info, true);
+ rc = of_spmi_allocate_devnode_resources(d_info, &r_info, i);
if (rc) {
dev_err(&ctrl->dev, "%s: unable to allocate"
" resources\n", __func__);
- of_spmi_free_device_resources(d_info);
+ of_spmi_free_devnode_resources(d_info);
return;
}
- of_spmi_populate_node_resources(d_info, &r_info, i);
+ of_spmi_populate_devnode_resources(d_info, &r_info, i);
i++;
}
+ of_spmi_init_resource(&r_info, container);
+ of_spmi_sum_resources(&r_info, true);
+
+ rc = of_spmi_allocate_node_resources(d_info, &r_info);
+ if (rc) {
+ dev_err(&ctrl->dev, "%s: unable to allocate resources\n",
+ __func__);
+ of_spmi_free_node_resources(d_info);
+ }
+
+ of_spmi_populate_node_resources(d_info, &r_info);
+
+
rc = of_spmi_create_device(d_info, container);
if (rc) {
dev_err(&ctrl->dev, "%s: unable to create device for"
" node %s\n", __func__, container->full_name);
- of_spmi_free_device_resources(d_info);
+ of_spmi_free_devnode_resources(d_info);
return;
}
}
@@ -255,7 +324,7 @@
* point all share the same slave_id.
*/
static void of_spmi_walk_slave_container(struct of_spmi_dev_info *d_info,
- struct device_node *container)
+ struct device_node *container)
{
struct spmi_controller *ctrl = d_info->ctrl;
struct device_node *node;
@@ -276,24 +345,17 @@
continue;
}
- rc = of_spmi_alloc_device_store(d_info, 1);
- if (rc) {
- dev_err(&ctrl->dev, "%s: unable to allocate"
- " device resources\n", __func__);
- goto slave_err;
- }
-
of_spmi_init_resource(&r_info, node);
- of_spmi_sum_node_resources(&r_info, 1);
+ of_spmi_sum_resources(&r_info, true);
- rc = of_spmi_allocate_node_resources(d_info, &r_info, 0);
+ rc = of_spmi_allocate_node_resources(d_info, &r_info);
if (rc) {
dev_err(&ctrl->dev, "%s: unable to allocate"
" resources\n", __func__);
goto slave_err;
}
- of_spmi_populate_node_resources(d_info, &r_info, 0);
+ of_spmi_populate_node_resources(d_info, &r_info);
rc = of_spmi_create_device(d_info, node);
if (rc) {
@@ -305,7 +367,7 @@
return;
slave_err:
- of_spmi_free_device_resources(d_info);
+ of_spmi_free_node_resources(d_info);
}
int of_spmi_register_devices(struct spmi_controller *ctrl)
@@ -370,31 +432,23 @@
if (!of_device_is_available(node))
continue;
- rc = of_spmi_alloc_device_store(&d_info, 1);
- if (rc) {
- dev_err(&ctrl->dev, "%s: unable to allocate"
- " device resources\n", __func__);
- continue;
- }
-
of_spmi_init_resource(&r_info, node);
- of_spmi_sum_node_resources(&r_info, 0);
- rc = of_spmi_allocate_node_resources(&d_info,
- &r_info, 0);
+ of_spmi_sum_resources(&r_info, false);
+ rc = of_spmi_allocate_node_resources(&d_info, &r_info);
if (rc) {
dev_err(&ctrl->dev, "%s: unable to allocate"
" resources\n", __func__);
- of_spmi_free_device_resources(&d_info);
+ of_spmi_free_node_resources(&d_info);
continue;
}
- of_spmi_populate_node_resources(&d_info, &r_info, 0);
+ of_spmi_populate_node_resources(&d_info, &r_info);
rc = of_spmi_create_device(&d_info, node);
if (rc) {
dev_err(&ctrl->dev, "%s: unable to create"
" device\n", __func__);
- of_spmi_free_device_resources(&d_info);
+ of_spmi_free_node_resources(&d_info);
continue;
}
}
@@ -404,4 +458,4 @@
}
EXPORT_SYMBOL(of_spmi_register_devices);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
index be6ba04..e87b4bd 100644
--- a/drivers/power/power_supply_core.c
+++ b/drivers/power/power_supply_core.c
@@ -64,6 +64,24 @@
EXPORT_SYMBOL_GPL(power_supply_set_online);
/**
+ * power_supply_set_scope - set scope of the power supply
+ * @psy: the power supply to control
+ * @scope: value to set the scope property to, should be from
+ * the SCOPE enum in power_supply.h
+ */
+int power_supply_set_scope(struct power_supply *psy, int scope)
+{
+ const union power_supply_propval ret = {scope, };
+
+ if (psy->set_property)
+ return psy->set_property(psy, POWER_SUPPLY_PROP_SCOPE,
+ &ret);
+
+ return -ENXIO;
+}
+EXPORT_SYMBOL_GPL(power_supply_set_scope);
+
+/**
* power_supply_set_charge_type - set charge type of the power supply
* @psy: the power supply to control
* @enable: sets charge type property of power supply
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 2bbc796..6b0916e 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -419,8 +419,8 @@
constraint checking while the real driver is being developed.
config REGULATOR_QPNP
+ depends on SPMI
depends on OF_SPMI
- depends on MSM_QPNP
tristate "Qualcomm QPNP regulator support"
help
This driver supports voltage regulators in Qualcomm PMIC chips which
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 986d55b..7cb4a51 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -302,11 +302,11 @@
static int regulator_check_drms(struct regulator_dev *rdev)
{
if (!rdev->constraints) {
- rdev_err(rdev, "no constraints\n");
+ rdev_dbg(rdev, "no constraints\n");
return -ENODEV;
}
if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_DRMS)) {
- rdev_err(rdev, "operation not allowed\n");
+ rdev_dbg(rdev, "operation not allowed\n");
return -EPERM;
}
return 0;
diff --git a/drivers/regulator/qpnp-regulator.c b/drivers/regulator/qpnp-regulator.c
index 120d17e..8d592fb 100644
--- a/drivers/regulator/qpnp-regulator.c
+++ b/drivers/regulator/qpnp-regulator.c
@@ -29,8 +29,6 @@
#include <linux/regulator/of_regulator.h>
#include <linux/regulator/qpnp-regulator.h>
-#include <mach/qpnp.h>
-
/* Debug Flag Definitions */
enum {
QPNP_VREG_DEBUG_REQUEST = BIT(0), /* Show requests */
@@ -1189,7 +1187,7 @@
pdata->init_data.constraints.input_uV
= pdata->init_data.constraints.max_uV;
- res = qpnp_get_resource(spmi, 0, IORESOURCE_MEM, 0);
+ res = spmi_get_resource(spmi, NULL, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&spmi->dev, "%s: node is missing base address\n",
__func__);
diff --git a/drivers/slimbus/slimbus.c b/drivers/slimbus/slimbus.c
index 24da4d1..36414e0 100644
--- a/drivers/slimbus/slimbus.c
+++ b/drivers/slimbus/slimbus.c
@@ -1979,7 +1979,7 @@
}
}
/* Leave some slots for messaging space */
- if (opensl1[1] == 0 && opensl1[0] == 0)
+ if (opensl1[1] <= 0 && opensl1[0] <= 0)
return -EXFULL;
if (opensl1[1] > opensl1[0]) {
int temp = opensl1[0];
@@ -2184,7 +2184,7 @@
}
}
/* Leave some slots for messaging space */
- if (opensl3[1] == 0 && opensl3[0] == 0)
+ if (opensl3[1] <= 0 && opensl3[0] <= 0)
return -EXFULL;
/* swap 1st and 2nd bucket if 2nd bucket has more open slots */
if (opensl3[1] > opensl3[0]) {
diff --git a/drivers/spi/spi_qsd.c b/drivers/spi/spi_qsd.c
index 2c86e83..f2c881d 100644
--- a/drivers/spi/spi_qsd.c
+++ b/drivers/spi/spi_qsd.c
@@ -405,9 +405,17 @@
bytes_sent = 0;
}
- /* We'll send in chunks of SPI_MAX_LEN if larger */
- bytes_to_send = dd->tx_bytes_remaining / SPI_MAX_LEN ?
- SPI_MAX_LEN : dd->tx_bytes_remaining;
+ /* We'll send in chunks of SPI_MAX_LEN if larger than
+ * 4K bytes for targets that doesn't support infinite
+ * mode. Make sure this doesn't happen on targets that
+ * support infinite mode.
+ */
+ if (!dd->pdata->infinite_mode)
+ bytes_to_send = dd->tx_bytes_remaining / SPI_MAX_LEN ?
+ SPI_MAX_LEN : dd->tx_bytes_remaining;
+ else
+ bytes_to_send = dd->tx_bytes_remaining;
+
num_transfers = DIV_ROUND_UP(bytes_to_send, dd->bytes_per_word);
dd->unaligned_len = bytes_to_send % dd->burst_size;
num_rows = bytes_to_send / dd->burst_size;
@@ -512,12 +520,11 @@
msm_dmov_enqueue_cmd(dd->rx_dma_chan, &dd->rx_hdr);
}
-/* SPI core can send maximum of 4K transfers, because there is HW problem
- with infinite mode.
- Therefore, we are sending several chunks of 3K or less (depending on how
- much is left).
- Upon completion we send the next chunk, or complete the transfer if
- everything is finished.
+/* SPI core on targets that does not support infinite mode can send maximum of
+ 4K transfers, Therefore, we are sending several chunks of 3K or less
+ (depending on how much is left). Upon completion we send the next chunk,
+ or complete the transfer if everything is finished. On targets that support
+ infinite mode, we send all the bytes in as single chunk.
*/
static int msm_spi_dm_send_next(struct msm_spi *dd)
{
@@ -527,8 +534,10 @@
if (dd->mode != SPI_DMOV_MODE)
return 0;
- /* We need to send more chunks, if we sent max last time */
- if (dd->tx_bytes_remaining > SPI_MAX_LEN) {
+ /* On targets which does not support infinite mode,
+ We need to send more chunks, if we sent max last time */
+ if ((!dd->pdata->infinite_mode) &&
+ (dd->tx_bytes_remaining > SPI_MAX_LEN)) {
dd->tx_bytes_remaining -= SPI_MAX_LEN;
if (msm_spi_set_state(dd, SPI_OP_STATE_RESET))
return 0;
@@ -1766,6 +1775,8 @@
of_property_read_u32(node, "spi-max-frequency",
&pdata->max_clock_speed);
+ of_property_read_u32(node, "infinite_mode",
+ &pdata->infinite_mode);
return pdata;
}
diff --git a/drivers/spmi/Kconfig b/drivers/spmi/Kconfig
index 84fd462..7026ee8 100644
--- a/drivers/spmi/Kconfig
+++ b/drivers/spmi/Kconfig
@@ -19,18 +19,11 @@
This is required for communicating with Qualcomm PMICs and
other devices that have the SPMI interface.
-config MSM_QPNP
- depends on ARCH_MSMCOPPER
- depends on OF_SPMI
- bool "MSM QPNP"
- help
- Say 'y' here to include support for the Qualcomm QPNP
-
config MSM_QPNP_INT
depends on SPARSE_IRQ
depends on ARCH_MSMCOPPER
+ depends on SPMI
depends on OF_SPMI
- depends on MSM_QPNP
bool "MSM QPNP INT"
help
Say 'y' here to include support for the Qualcomm QPNP interrupt
diff --git a/drivers/spmi/Makefile b/drivers/spmi/Makefile
index d59a610..becd823 100644
--- a/drivers/spmi/Makefile
+++ b/drivers/spmi/Makefile
@@ -1,7 +1,6 @@
#
# Makefile for kernel SPMI framework.
#
-obj-$(CONFIG_SPMI) += spmi.o
+obj-$(CONFIG_SPMI) += spmi.o spmi-resources.o
obj-$(CONFIG_SPMI_MSM_PMIC_ARB) += spmi-pmic-arb.o
-obj-$(CONFIG_MSM_QPNP) += qpnp.o
obj-$(CONFIG_MSM_QPNP_INT) += qpnp-int.o
diff --git a/drivers/spmi/qpnp-int.c b/drivers/spmi/qpnp-int.c
index 2998c01..b6dfd51 100644
--- a/drivers/spmi/qpnp-int.c
+++ b/drivers/spmi/qpnp-int.c
@@ -31,8 +31,6 @@
#include <asm/mach/irq.h>
#include <mach/qpnp-int.h>
-#define QPNPINT_MAX_BUSSES 1
-
/* 16 slave_ids, 256 per_ids per slave, and 8 ints per per_id */
#define QPNPINT_NR_IRQS (16 * 256 * 8)
@@ -66,13 +64,18 @@
struct q_chip_data {
int bus_nr;
- struct irq_domain domain;
+ struct irq_domain *domain;
struct qpnp_local_int cb;
struct spmi_controller *spmi_ctrl;
struct radix_tree_root per_tree;
+ struct list_head list;
};
-static struct q_chip_data chip_data[QPNPINT_MAX_BUSSES] __read_mostly;
+static LIST_HEAD(qpnpint_chips);
+static DEFINE_MUTEX(qpnpint_chips_mutex);
+
+#define QPNPINT_MAX_BUSSES 4
+struct q_chip_data *chip_lookup[QPNPINT_MAX_BUSSES];
/**
* qpnpint_encode_hwirq - translate between qpnp_irq_spec and
@@ -138,8 +141,7 @@
if (chip_d->cb.mask) {
rc = qpnpint_decode_hwirq(d->hwirq, &q_spec);
if (rc)
- pr_err("%s: decode failed on hwirq %lu\n",
- __func__, d->hwirq);
+ pr_err("decode failed on hwirq %lu\n", d->hwirq);
else
chip_d->cb.mask(chip_d->spmi_ctrl, &q_spec,
irq_d->priv_d);
@@ -150,8 +152,7 @@
rc = qpnpint_spmi_write(irq_d, QPNPINT_REG_EN_CLR,
(u8 *)&irq_d->mask_shift, 1);
if (rc)
- pr_err("%s: spmi failure on irq %d\n",
- __func__, d->irq);
+ pr_err("spmi failure on irq %d\n", d->irq);
}
static void qpnpint_irq_mask_ack(struct irq_data *d)
@@ -168,8 +169,7 @@
if (chip_d->cb.mask) {
rc = qpnpint_decode_hwirq(d->hwirq, &q_spec);
if (rc)
- pr_err("%s: decode failed on hwirq %lu\n",
- __func__, d->hwirq);
+ pr_err("decode failed on hwirq %lu\n", d->hwirq);
else
chip_d->cb.mask(chip_d->spmi_ctrl, &q_spec,
irq_d->priv_d);
@@ -180,14 +180,12 @@
rc = qpnpint_spmi_write(irq_d, QPNPINT_REG_EN_CLR,
&irq_d->mask_shift, 1);
if (rc)
- pr_err("%s: spmi failure on irq %d\n",
- __func__, d->irq);
+ pr_err("spmi failure on irq %d\n", d->irq);
rc = qpnpint_spmi_write(irq_d, QPNPINT_REG_LATCHED_CLR,
&irq_d->mask_shift, 1);
if (rc)
- pr_err("%s: spmi failure on irq %d\n",
- __func__, d->irq);
+ pr_err("spmi failure on irq %d\n", d->irq);
}
static void qpnpint_irq_unmask(struct irq_data *d)
@@ -203,8 +201,7 @@
if (chip_d->cb.unmask) {
rc = qpnpint_decode_hwirq(d->hwirq, &q_spec);
if (rc)
- pr_err("%s: decode failed on hwirq %lu\n",
- __func__, d->hwirq);
+ pr_err("decode failed on hwirq %lu\n", d->hwirq);
else
chip_d->cb.unmask(chip_d->spmi_ctrl, &q_spec,
irq_d->priv_d);
@@ -214,8 +211,7 @@
rc = qpnpint_spmi_write(irq_d, QPNPINT_REG_EN_SET,
&irq_d->mask_shift, 1);
if (rc)
- pr_err("%s: spmi failure on irq %d\n",
- __func__, d->irq);
+ pr_err("spmi failure on irq %d\n", d->irq);
}
static int qpnpint_irq_set_type(struct irq_data *d, unsigned int flow_type)
@@ -244,7 +240,7 @@
if (flow_type & IRQF_TRIGGER_HIGH)
per_d->pol_high |= irq_d->mask_shift;
else
- per_d->pol_high &= ~irq_d->mask_shift;
+ per_d->pol_low |= irq_d->mask_shift;
}
buf[0] = per_d->type;
@@ -253,8 +249,7 @@
rc = qpnpint_spmi_write(irq_d, QPNPINT_REG_SET_TYPE, &buf, 3);
if (rc)
- pr_err("%s: spmi failure on irq %d\n",
- __func__, d->irq);
+ pr_err("spmi failure on irq %d\n", d->irq);
return rc;
}
@@ -279,13 +274,16 @@
return rc;
irq_d->spmi_slave = q_spec.slave;
irq_d->spmi_offset = q_spec.per << 8;
- irq_d->per_d->use_count++;
irq_d->chip_d = chip_d;
if (chip_d->cb.register_priv_data)
rc = chip_d->cb.register_priv_data(chip_d->spmi_ctrl, &q_spec,
&irq_d->priv_d);
- return rc;
+ if (rc)
+ return rc;
+
+ irq_d->per_d->use_count++;
+ return 0;
}
static struct q_irq_data *qpnpint_alloc_irq_data(
@@ -307,8 +305,10 @@
per_d = radix_tree_lookup(&chip_d->per_tree, (hwirq & ~0x7));
if (!per_d) {
per_d = kzalloc(sizeof(struct q_perip_data), GFP_KERNEL);
- if (!per_d)
+ if (!per_d) {
+ kfree(irq_d);
return ERR_PTR(-ENOMEM);
+ }
radix_tree_insert(&chip_d->per_tree,
(hwirq & ~0x7), per_d);
}
@@ -317,74 +317,6 @@
return irq_d;
}
-static int qpnpint_register_int(uint32_t busno, unsigned long hwirq)
-{
- int irq, rc;
- struct irq_domain *domain;
- struct q_irq_data *irq_d;
-
- pr_debug("busno = %u hwirq = %lu\n", busno, hwirq);
-
- if (hwirq < 0 || hwirq >= 32768) {
- pr_err("%s: hwirq %lu out of qpnp interrupt bounds\n",
- __func__, hwirq);
- return -EINVAL;
- }
-
- if (busno < 0 || busno > QPNPINT_MAX_BUSSES) {
- pr_err("%s: invalid bus number %d\n", __func__, busno);
- return -EINVAL;
- }
-
- domain = &chip_data[busno].domain;
- irq = irq_domain_to_irq(domain, hwirq);
-
- rc = irq_alloc_desc_at(irq, numa_node_id());
- if (rc < 0) {
- if (rc != -EEXIST)
- pr_err("%s: failed to alloc irq at %d with "
- "rc %d\n", __func__, irq, rc);
- return rc;
- }
- irq_d = qpnpint_alloc_irq_data(&chip_data[busno], hwirq);
- if (IS_ERR(irq_d)) {
- pr_err("%s: failed to alloc irq data %d with "
- "rc %d\n", __func__, irq, rc);
- rc = PTR_ERR(irq_d);
- goto register_err_cleanup;
- }
- rc = qpnpint_init_irq_data(&chip_data[busno], irq_d, hwirq);
- if (rc) {
- pr_err("%s: failed to init irq data %d with "
- "rc %d\n", __func__, irq, rc);
- goto register_err_cleanup;
- }
-
- irq_domain_register_irq(domain, hwirq);
-
- irq_set_chip_and_handler(irq,
- &qpnpint_chip,
- handle_level_irq);
- irq_set_chip_data(irq, irq_d);
-#ifdef CONFIG_ARM
- set_irq_flags(irq, IRQF_VALID);
-#else
- irq_set_noprobe(irq);
-#endif
- return 0;
-
-register_err_cleanup:
- irq_free_desc(irq);
- if (!IS_ERR(irq_d)) {
- if (irq_d->per_d->use_count == 1)
- kfree(irq_d->per_d);
- else
- irq_d->per_d->use_count--;
- kfree(irq_d);
- }
- return rc;
-}
-
static int qpnpint_irq_domain_dt_translate(struct irq_domain *d,
struct device_node *controller,
const u32 *intspec, unsigned int intsize,
@@ -392,11 +324,10 @@
unsigned int *out_type)
{
struct qpnp_irq_spec addr;
- struct q_chip_data *chip_d = d->priv;
int ret;
- pr_debug("%s: intspec[0] 0x%x intspec[1] 0x%x intspec[2] 0x%x\n",
- __func__, intspec[0], intspec[1], intspec[2]);
+ pr_debug("intspec[0] 0x%x intspec[1] 0x%x intspec[2] 0x%x\n",
+ intspec[0], intspec[1], intspec[2]);
if (d->of_node != controller)
return -EINVAL;
@@ -409,41 +340,102 @@
ret = qpnpint_encode_hwirq(&addr);
if (ret < 0) {
- pr_err("%s: invalid intspec\n", __func__);
+ pr_err("invalid intspec\n");
return ret;
}
*out_hwirq = ret;
*out_type = IRQ_TYPE_NONE;
- /**
- * Register the interrupt if it's not already registered.
- * This implies that mapping a qpnp interrupt allocates
- * resources.
- */
- ret = qpnpint_register_int(chip_d->bus_nr, *out_hwirq);
- if (ret && ret != -EEXIST) {
- pr_err("%s: Cannot register hwirq %lu\n", __func__, *out_hwirq);
- return ret;
- }
-
return 0;
}
+static void qpnpint_free_irq_data(struct q_irq_data *irq_d)
+{
+ if (irq_d->per_d->use_count == 1)
+ kfree(irq_d->per_d);
+ else
+ irq_d->per_d->use_count--;
+ kfree(irq_d);
+}
+
+static int qpnpint_irq_domain_map(struct irq_domain *d,
+ unsigned int virq, irq_hw_number_t hwirq)
+{
+ struct q_chip_data *chip_d = d->host_data;
+ struct q_irq_data *irq_d;
+ int rc;
+
+ pr_debug("hwirq = %lu\n", hwirq);
+
+ if (hwirq < 0 || hwirq >= 32768) {
+ pr_err("hwirq %lu out of bounds\n", hwirq);
+ return -EINVAL;
+ }
+
+ irq_radix_revmap_insert(d, virq, hwirq);
+
+ irq_d = qpnpint_alloc_irq_data(chip_d, hwirq);
+ if (IS_ERR(irq_d)) {
+ pr_err("failed to alloc irq data for hwirq %lu\n", hwirq);
+ return PTR_ERR(irq_d);
+ }
+
+ rc = qpnpint_init_irq_data(chip_d, irq_d, hwirq);
+ if (rc) {
+ pr_err("failed to init irq data for hwirq %lu\n", hwirq);
+ goto map_err;
+ }
+
+ irq_set_chip_and_handler(virq,
+ &qpnpint_chip,
+ handle_level_irq);
+ irq_set_chip_data(virq, irq_d);
+#ifdef CONFIG_ARM
+ set_irq_flags(virq, IRQF_VALID);
+#else
+ irq_set_noprobe(virq);
+#endif
+ return 0;
+
+map_err:
+ qpnpint_free_irq_data(irq_d);
+ return rc;
+}
+
+void qpnpint_irq_domain_unmap(struct irq_domain *d, unsigned int virq)
+{
+ struct q_irq_data *irq_d = irq_get_chip_data(virq);
+
+ if (WARN_ON(!irq_d))
+ return;
+
+ qpnpint_free_irq_data(irq_d);
+}
+
const struct irq_domain_ops qpnpint_irq_domain_ops = {
- .dt_translate = qpnpint_irq_domain_dt_translate,
+ .map = qpnpint_irq_domain_map,
+ .unmap = qpnpint_irq_domain_unmap,
+ .xlate = qpnpint_irq_domain_dt_translate,
};
-int qpnpint_register_controller(unsigned int busno,
+int qpnpint_register_controller(struct device_node *node,
+ struct spmi_controller *ctrl,
struct qpnp_local_int *li_cb)
{
- if (busno >= QPNPINT_MAX_BUSSES)
- return -EINVAL;
- chip_data[busno].cb = *li_cb;
- chip_data[busno].spmi_ctrl = spmi_busnum_to_ctrl(busno);
- if (!chip_data[busno].spmi_ctrl)
- return -ENOENT;
+ struct q_chip_data *chip_d;
- return 0;
+ if (!node || !ctrl || ctrl->nr >= QPNPINT_MAX_BUSSES)
+ return -EINVAL;
+
+ list_for_each_entry(chip_d, &qpnpint_chips, list)
+ if (node == chip_d->domain->of_node) {
+ chip_d->cb = *li_cb;
+ chip_d->spmi_ctrl = ctrl;
+ chip_lookup[ctrl->nr] = chip_d;
+ return 0;
+ }
+
+ return -ENOENT;
}
EXPORT_SYMBOL(qpnpint_register_controller);
@@ -457,21 +449,18 @@
pr_debug("spec slave = %u per = %u irq = %u\n",
spec->slave, spec->per, spec->irq);
- if (!spec || !spmi_ctrl)
- return -EINVAL;
-
busno = spmi_ctrl->nr;
- if (busno >= QPNPINT_MAX_BUSSES)
+ if (!spec || !spmi_ctrl || busno >= QPNPINT_MAX_BUSSES)
return -EINVAL;
hwirq = qpnpint_encode_hwirq(spec);
if (hwirq < 0) {
- pr_err("%s: invalid irq spec passed\n", __func__);
+ pr_err("invalid irq spec passed\n");
return -EINVAL;
}
- domain = &chip_data[busno].domain;
- irq = irq_domain_to_irq(domain, hwirq);
+ domain = chip_lookup[busno]->domain;
+ irq = irq_radix_revmap_lookup(domain, hwirq);
generic_handle_irq(irq);
@@ -479,31 +468,24 @@
}
EXPORT_SYMBOL(qpnpint_handle_irq);
-/**
- * This assumes that there's a relationship between the order of the interrupt
- * controllers specified to of_irq_match() is the SPMI device topology. If
- * this ever turns out to be a bad assumption, then of_irq_init_cb_t should
- * be modified to pass a parameter to this function.
- */
-static int qpnpint_cnt __initdata;
-
int __init qpnpint_of_init(struct device_node *node, struct device_node *parent)
{
- struct q_chip_data *chip_d = &chip_data[qpnpint_cnt];
- struct irq_domain *domain = &chip_d->domain;
+ struct q_chip_data *chip_d;
+
+ chip_d = kzalloc(sizeof(struct q_chip_data), GFP_KERNEL);
+ if (!chip_d)
+ return -ENOMEM;
+
+ chip_d->domain = irq_domain_add_tree(node,
+ &qpnpint_irq_domain_ops, chip_d);
+ if (!chip_d->domain) {
+ pr_err("Unable to allocate irq_domain\n");
+ kfree(chip_d);
+ return -ENOMEM;
+ }
INIT_RADIX_TREE(&chip_d->per_tree, GFP_ATOMIC);
-
- domain->irq_base = irq_domain_find_free_range(0, QPNPINT_NR_IRQS);
- domain->nr_irq = QPNPINT_NR_IRQS;
- domain->of_node = of_node_get(node);
- domain->priv = chip_d;
- domain->ops = &qpnpint_irq_domain_ops;
- irq_domain_add(domain);
-
- pr_info("irq_base = %d\n", domain->irq_base);
-
- qpnpint_cnt++;
+ list_add(&chip_d->list, &qpnpint_chips);
return 0;
}
diff --git a/drivers/spmi/qpnp.c b/drivers/spmi/qpnp.c
deleted file mode 100644
index a164efb..0000000
--- a/drivers/spmi/qpnp.c
+++ /dev/null
@@ -1,56 +0,0 @@
-/* Copyright (c) 2002-3 Patrick Mochel
- * Copyright (c) 2002-3 Open Source Development Labs
- * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Resource handling based on platform.c.
- */
-
-#include <linux/export.h>
-#include <mach/qpnp.h>
-
-/**
- * qpnp_get_resource - get a resource for a device
- * @dev: qpnp device
- * @type: resource type
- * @num: resource index
- */
-struct resource *qpnp_get_resource(struct spmi_device *dev,
- unsigned int node_idx, unsigned int type,
- unsigned int res_num)
-{
- int i;
-
- for (i = 0; i < dev->dev_node[node_idx].num_resources; i++) {
- struct resource *r = &dev->dev_node[node_idx].resource[i];
-
- if (type == resource_type(r) && res_num-- == 0)
- return r;
- }
- return NULL;
-}
-EXPORT_SYMBOL_GPL(qpnp_get_resource);
-
-/**
- * qpnp_get_irq - get an IRQ for a device
- * @dev: qpnp device
- * @num: IRQ number index
- */
-int qpnp_get_irq(struct spmi_device *dev, unsigned int node_idx,
- unsigned int res_num)
-{
- struct resource *r = qpnp_get_resource(dev, node_idx,
- IORESOURCE_IRQ, res_num);
-
- return r ? r->start : -ENXIO;
-}
-EXPORT_SYMBOL_GPL(qpnp_get_irq);
-
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index f22b900..422e99e 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -664,7 +664,14 @@
goto err_add_controller;
/* Register the interrupt enable/disable functions */
- qpnpint_register_controller(cell_index, &spmi_pmic_arb_intr_cb);
+ ret = qpnpint_register_controller(pmic_arb->controller.dev.of_node,
+ &pmic_arb->controller,
+ &spmi_pmic_arb_intr_cb);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to register controller %d\n",
+ cell_index);
+ goto err_reg_controller;
+ }
/* Register device(s) from the device tree */
of_spmi_register_devices(&pmic_arb->controller);
@@ -674,6 +681,8 @@
return 0;
+err_reg_controller:
+ spmi_del_controller(&pmic_arb->controller);
err_add_controller:
platform_set_drvdata(pdev, NULL);
return ret;
diff --git a/drivers/spmi/spmi-resources.c b/drivers/spmi/spmi-resources.c
new file mode 100644
index 0000000..97f15ae
--- /dev/null
+++ b/drivers/spmi/spmi-resources.c
@@ -0,0 +1,151 @@
+/* Copyright (c) 2002-3 Patrick Mochel
+ * Copyright (c) 2002-3 Open Source Development Labs
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Resource handling based on platform.c.
+ */
+
+#include <linux/export.h>
+#include <linux/spmi.h>
+#include <linux/string.h>
+
+/**
+ * spmi_get_resource - get a resource for a device
+ * @dev: spmi device
+ * @node: device node resource
+ * @type: resource type
+ * @res_num: resource index
+ *
+ * If 'node' is specified as NULL, then the API treats this as a special
+ * case to assume the first devnode. For configurations that do not use
+ * spmi-dev-container, there is only one node to begin with, so NULL should
+ * be passed in this case.
+ *
+ * Returns
+ * NULL on failure.
+ */
+struct resource *spmi_get_resource(struct spmi_device *dev,
+ struct spmi_resource *node,
+ unsigned int type, unsigned int res_num)
+{
+ int i;
+
+ /* if a node is not specified, default to the first node */
+ if (!node)
+ node = &dev->res;
+
+ for (i = 0; i < node->num_resources; i++) {
+ struct resource *r = &node->resource[i];
+
+ if (type == resource_type(r) && res_num-- == 0)
+ return r;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(spmi_get_resource);
+
+#define SPMI_MAX_RES_NAME 256
+
+/**
+ * spmi_get_resource_byname - get a resource for a device given a name
+ * @dev: spmi device handle
+ * @node: device node resource
+ * @type: resource type
+ * @name: resource name to lookup
+ */
+struct resource *spmi_get_resource_byname(struct spmi_device *dev,
+ struct spmi_resource *node,
+ unsigned int type,
+ const char *name)
+{
+ int i;
+
+ /* if a node is not specified, default to the first node */
+ if (!node)
+ node = &dev->res;
+
+ for (i = 0; i < node->num_resources; i++) {
+ struct resource *r = &node->resource[i];
+
+ if (type == resource_type(r) && r->name &&
+ !strncmp(r->name, name, SPMI_MAX_RES_NAME))
+ return r;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(spmi_get_resource_byname);
+
+/**
+ * spmi_get_irq - get an IRQ for a device
+ * @dev: spmi device
+ * @node: device node resource
+ * @res_num: IRQ number index
+ *
+ * Returns
+ * -ENXIO on failure.
+ */
+int spmi_get_irq(struct spmi_device *dev, struct spmi_resource *node,
+ unsigned int res_num)
+{
+ struct resource *r = spmi_get_resource(dev, node,
+ IORESOURCE_IRQ, res_num);
+
+ return r ? r->start : -ENXIO;
+}
+EXPORT_SYMBOL_GPL(spmi_get_irq);
+
+/**
+ * spmi_get_irq_byname - get an IRQ for a device given a name
+ * @dev: spmi device handle
+ * @node: device node resource
+ * @name: resource name to lookup
+ *
+ * Returns -ENXIO on failure
+ */
+int spmi_get_irq_byname(struct spmi_device *dev,
+ struct spmi_resource *node, const char *name)
+{
+ struct resource *r = spmi_get_resource_byname(dev, node,
+ IORESOURCE_IRQ, name);
+ return r ? r->start : -ENXIO;
+}
+EXPORT_SYMBOL_GPL(spmi_get_irq_byname);
+
+/*
+ * spmi_get_container_dev_byname - get a device node resource
+ * @dev: spmi device handle
+ * @label: device name to lookup
+ *
+ * Only useable in spmi-dev-container configurations. Given a name,
+ * find the associated spmi_resource that matches the name.
+ *
+ * Return NULL if the spmi_device is not a dev-container,
+ * or if the lookup fails.
+ */
+struct spmi_resource *spmi_get_dev_container_byname(struct spmi_device *dev,
+ const char *label)
+{
+ int i;
+
+ if (!label)
+ return NULL;
+
+ for (i = 0; i < dev->num_dev_node; i++) {
+ struct spmi_resource *r = &dev->dev_node[i];
+
+ if (r && r->label && !strncmp(r->label,
+ label, SPMI_MAX_RES_NAME))
+ return r;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(spmi_get_dev_container_byname);
diff --git a/drivers/spmi/spmi.c b/drivers/spmi/spmi.c
index 0342b97..914df95 100644
--- a/drivers/spmi/spmi.c
+++ b/drivers/spmi/spmi.c
@@ -238,6 +238,7 @@
spmidev->dev.platform_data = (void *)info->platform_data;
spmidev->num_dev_node = info->num_dev_node;
spmidev->dev_node = info->dev_node;
+ spmidev->res = info->res;
rc = spmi_add_device(spmidev);
if (rc < 0) {
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 8390f5d..5d79bd2 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -631,24 +631,32 @@
{
unsigned int physaddr = 0;
pgd_t *pgd_ptr = NULL;
+ pud_t *pud_ptr = NULL;
pmd_t *pmd_ptr = NULL;
pte_t *pte_ptr = NULL, pte;
spin_lock(¤t->mm->page_table_lock);
pgd_ptr = pgd_offset(current->mm, virtaddr);
- if (pgd_none(*pgd) || pgd_bad(*pgd)) {
+ if (pgd_none(*pgd_ptr) || pgd_bad(*pgd_ptr)) {
pr_err("Failed to convert virtaddr %x to pgd_ptr\n",
virtaddr);
goto done;
}
- pmd_ptr = pmd_offset(pgd_ptr, virtaddr);
- if (pmd_none(*pmd_ptr) || pmd_bad(*pmd_ptr)) {
- pr_err("Failed to convert pgd_ptr %p to pmd_ptr\n",
+ pud_ptr = pud_offset(pgd_ptr, virtaddr);
+ if (pud_none(*pud_ptr) || pud_bad(*pud_ptr)) {
+ pr_err("Failed to convert pgd_ptr %p to pud_ptr\n",
(void *)pgd_ptr);
goto done;
}
+ pmd_ptr = pmd_offset(pud_ptr, virtaddr);
+ if (pmd_none(*pmd_ptr) || pmd_bad(*pmd_ptr)) {
+ pr_err("Failed to convert pud_ptr %p to pmd_ptr\n",
+ (void *)pud_ptr);
+ goto done;
+ }
+
pte_ptr = pte_offset_map(pmd_ptr, virtaddr);
if (!pte_ptr) {
pr_err("Failed to convert pmd_ptr %p to pte_ptr\n",
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index 72bc8de..c483bb45 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -73,6 +73,7 @@
#ifdef CONFIG_SERIAL_MSM_RX_WAKEUP
struct msm_wakeup wakeup;
#endif
+ int uim;
};
#define UART_TO_MSM(uart_port) ((struct msm_port *) uart_port)
@@ -500,7 +501,22 @@
msm_port->clk_state = MSM_CLK_ON;
#endif
- if (port->uartclk == 19200000) {
+ if (msm_port->uim) {
+ msm_write(port,
+ UART_SIM_CFG_UIM_TX_MODE |
+ UART_SIM_CFG_UIM_RX_MODE |
+ UART_SIM_CFG_STOP_BIT_LEN_N(1) |
+ UART_SIM_CFG_SIM_CLK_ON |
+ UART_SIM_CFG_SIM_CLK_STOP_HIGH |
+ UART_SIM_CFG_SIM_SEL,
+ UART_SIM_CFG);
+
+ /* (TCXO * 16) / (5 * 372) = TCXO * 16 / 1860 */
+ msm_write(port, 0x08, UART_MREG);
+ msm_write(port, 0x19, UART_NREG);
+ msm_write(port, 0xe8, UART_DREG);
+ msm_write(port, 0x0e, UART_MNDREG);
+ } else if (port->uartclk == 19200000) {
/* clock is TCXO (19.2MHz) */
msm_write(port, 0x06, UART_MREG);
msm_write(port, 0xF1, UART_NREG);
@@ -603,6 +619,11 @@
{
struct msm_port *msm_port = UART_TO_MSM(port);
+ if (msm_port->uim)
+ msm_write(port,
+ UART_SIM_CFG_SIM_CLK_STOP_HIGH,
+ UART_SIM_CFG);
+
msm_port->imr = 0;
msm_write(port, 0, UART_IMR); /* disable interrupts */
@@ -1040,6 +1061,39 @@
return uart_add_one_port(&msm_uart_driver, port);
}
+static int __init msm_uim_probe(struct platform_device *pdev)
+{
+ struct msm_port *msm_port;
+ struct resource *resource;
+ struct uart_port *port;
+ int irq;
+
+ if (unlikely(pdev->id < 0 || pdev->id >= UART_NR))
+ return -ENXIO;
+
+ pr_info("msm_uim: detected port #%d\n", pdev->id);
+
+ port = get_port_from_line(pdev->id);
+ port->dev = &pdev->dev;
+ msm_port = UART_TO_MSM(port);
+
+ msm_port->uim = true;
+
+ resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (unlikely(!resource))
+ return -ENXIO;
+ port->mapbase = resource->start;
+
+ irq = platform_get_irq(pdev, 0);
+ if (unlikely(irq < 0))
+ return -ENXIO;
+ port->irq = irq;
+
+ platform_set_drvdata(pdev, port);
+
+ return uart_add_one_port(&msm_uart_driver, port);
+}
+
static int __devexit msm_serial_remove(struct platform_device *pdev)
{
struct msm_port *msm_port = platform_get_drvdata(pdev);
@@ -1125,6 +1179,14 @@
},
};
+static struct platform_driver msm_platform_uim_driver = {
+ .remove = msm_serial_remove,
+ .driver = {
+ .name = "msm_uim",
+ .owner = THIS_MODULE,
+ },
+};
+
static int __init msm_serial_init(void)
{
int ret;
@@ -1137,6 +1199,8 @@
if (unlikely(ret))
uart_unregister_driver(&msm_uart_driver);
+ platform_driver_probe(&msm_platform_uim_driver, msm_uim_probe);
+
printk(KERN_INFO "msm_serial: driver initialized\n");
return ret;
diff --git a/drivers/tty/serial/msm_serial.h b/drivers/tty/serial/msm_serial.h
index 65d0e30..a769825 100644
--- a/drivers/tty/serial/msm_serial.h
+++ b/drivers/tty/serial/msm_serial.h
@@ -1,7 +1,7 @@
/*
* Copyright (C) 2007 Google, Inc.
* Author: Robert Love <rlove@google.com>
- * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -100,6 +100,16 @@
#define UART_DREG 0x0030
#define UART_MNDREG 0x0034
#define UART_IRDA 0x0038
+
+#define UART_SIM_CFG 0x003c
+#define UART_SIM_CFG_UIM_TX_MODE (1 << 17)
+#define UART_SIM_CFG_UIM_RX_MODE (1 << 16)
+#define UART_SIM_CFG_STOP_BIT_LEN_N(n) ((n) << 8)
+#define UART_SIM_CFG_SIM_CLK_ON (1 << 7)
+#define UART_SIM_CFG_SIM_CLK_TD8_SEL (1 << 6)
+#define UART_SIM_CFG_SIM_CLK_STOP_HIGH (1 << 5)
+#define UART_SIM_CFG_SIM_SEL (1 << 0)
+
#define UART_MISR_MODE 0x0040
#define UART_MISR_RESET 0x0044
#define UART_MISR_EXPORT 0x0048
diff --git a/drivers/tty/serial/msm_serial_hs_lite.c b/drivers/tty/serial/msm_serial_hs_lite.c
index 5735534..ce197be 100644
--- a/drivers/tty/serial/msm_serial_hs_lite.c
+++ b/drivers/tty/serial/msm_serial_hs_lite.c
@@ -122,9 +122,15 @@
},
{}
};
+
+#ifdef CONFIG_SERIAL_MSM_HSL_CONSOLE
+static int get_console_state(struct uart_port *port);
+#else
+static inline int get_console_state(struct uart_port *port) { return -ENODEV; };
+#endif
+
static struct dentry *debug_base;
static inline void wait_for_xmitr(struct uart_port *port);
-static int get_console_state(struct uart_port *port);
static inline void msm_hsl_write(struct uart_port *port,
unsigned int val, unsigned int off)
{
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 5dceb41..d97d548 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -2808,6 +2808,7 @@
int usb_remote_wakeup(struct usb_device *udev)
{
int status = 0;
+ struct usb_hcd *hcd = bus_to_hcd(udev->bus);
if (udev->state == USB_STATE_SUSPENDED) {
dev_dbg(&udev->dev, "usb %sresume\n", "wakeup-");
@@ -2816,7 +2817,11 @@
/* Let the drivers do their thing, then... */
usb_autosuspend_device(udev);
}
+ } else {
+ dev_dbg(&udev->dev, "usb not suspended\n");
+ clear_bit(HCD_FLAG_WAKEUP_PENDING, &hcd->flags);
}
+
return status;
}
@@ -3152,7 +3157,9 @@
* value.
*/
for (i = 0; i < GET_DESCRIPTOR_TRIES; (++i, msleep(100))) {
- if (USE_NEW_SCHEME(retry_counter) && !(hcd->driver->flags & HCD_USB3)) {
+ if (USE_NEW_SCHEME(retry_counter) &&
+ !(hcd->driver->flags & HCD_USB3) &&
+ !(hcd->driver->flags & HCD_OLD_ENUM)) {
struct usb_device_descriptor *buf;
int r = 0;
@@ -3252,7 +3259,9 @@
* - read ep0 maxpacket even for high and low speed,
*/
msleep(10);
- if (USE_NEW_SCHEME(retry_counter) && !(hcd->driver->flags & HCD_USB3))
+ if (USE_NEW_SCHEME(retry_counter) &&
+ !(hcd->driver->flags & HCD_USB3) &&
+ !(hcd->driver->flags & HCD_OLD_ENUM))
break;
}
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index d216f17..05f1a60 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -147,13 +147,6 @@
#define USB_PHY_VDD_DIG_VOL_MIN 1045000 /* uV */
#define USB_PHY_VDD_DIG_VOL_MAX 1320000 /* uV */
-enum usb_vdd_value {
- VDD_NONE = 0,
- VDD_MIN,
- VDD_MAX,
- VDD_VAL_MAX,
-};
-
static const int vdd_val[VDD_TYPE_MAX][VDD_VAL_MAX] = {
{ /* VDD_CX CORNER Voting */
[VDD_NONE] = RPM_VREG_CORNER_NONE,
diff --git a/drivers/usb/gadget/ci13xxx_udc.c b/drivers/usb/gadget/ci13xxx_udc.c
index 8cdc2e9..f82c2fe 100644
--- a/drivers/usb/gadget/ci13xxx_udc.c
+++ b/drivers/usb/gadget/ci13xxx_udc.c
@@ -48,7 +48,6 @@
* - Handle requests which spawns into several TDs
* - GET_STATUS(device) - always reports 0
* - Gadget API (majority of optional features)
- * - Suspend & Remote Wakeup
*/
#include <linux/delay.h>
#include <linux/device.h>
@@ -170,6 +169,8 @@
#define CAP_ENDPTCTRL (hw_bank.lpm ? 0x0ECUL : 0x080UL)
#define CAP_LAST (hw_bank.lpm ? 0x12CUL : 0x0C0UL)
+#define REMOTE_WAKEUP_DELAY msecs_to_jiffies(200)
+
/* maximum number of enpoints: valid only after hw_device_reset() */
static unsigned hw_ep_max;
@@ -1523,6 +1524,24 @@
return ret;
}
+static void usb_do_remote_wakeup(struct work_struct *w)
+{
+ struct ci13xxx *udc = _udc;
+ unsigned long flags;
+ bool do_wake;
+
+ /*
+ * This work can not be canceled from interrupt handler. Check
+ * if wakeup conditions are still met.
+ */
+ spin_lock_irqsave(udc->lock, flags);
+ do_wake = udc->suspended && udc->remote_wakeup;
+ spin_unlock_irqrestore(udc->lock, flags);
+
+ if (do_wake)
+ ci13xxx_wakeup(&udc->gadget);
+}
+
static ssize_t usb_remote_wakeup(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
@@ -1652,6 +1671,7 @@
unsigned i;
int ret = 0;
unsigned length = mReq->req.length;
+ struct ci13xxx *udc = _udc;
trace("%p, %p", mEp, mReq);
@@ -1728,6 +1748,18 @@
mReq->ptr->page[i] =
(mReq->req.dma + i * CI13XXX_PAGE_SIZE) & ~TD_RESERVED_MASK;
+ /* Remote Wakeup */
+ if (udc->suspended) {
+ if (!udc->remote_wakeup) {
+ mReq->req.status = -EAGAIN;
+ dev_dbg(mEp->device, "%s: queue failed (suspend) ept #%d\n",
+ __func__, mEp->num);
+ return -EAGAIN;
+ }
+ usb_phy_set_suspend(udc->transceiver, 0);
+ schedule_delayed_work(&udc->rw_work, REMOTE_WAKEUP_DELAY);
+ }
+
if (!list_empty(&mEp->qh.queue)) {
struct ci13xxx_req *mReqPrev;
int n = hw_ep_bit(mEp->num, mEp->dir);
@@ -2207,8 +2239,11 @@
trace("%p", udc);
mEp = (udc->ep0_dir == TX) ? &udc->ep0out : &udc->ep0in;
- udc->status->context = udc;
- udc->status->complete = isr_setup_status_complete;
+ if (udc->status) {
+ udc->status->context = udc;
+ udc->status->complete = isr_setup_status_complete;
+ } else
+ return -EINVAL;
spin_unlock(mEp->lock);
retval = usb_ep_queue(&mEp->ep, udc->status, GFP_ATOMIC);
@@ -3341,6 +3376,8 @@
}
}
+ INIT_DELAYED_WORK(&udc->rw_work, usb_do_remote_wakeup);
+
retval = hw_device_init(regs);
if (retval < 0)
goto put_transceiver;
diff --git a/drivers/usb/gadget/ci13xxx_udc.h b/drivers/usb/gadget/ci13xxx_udc.h
index 4376804..a189b45 100644
--- a/drivers/usb/gadget/ci13xxx_udc.h
+++ b/drivers/usb/gadget/ci13xxx_udc.h
@@ -149,6 +149,7 @@
u8 configured; /* is device configured */
u8 test_mode; /* the selected test mode */
+ struct delayed_work rw_work; /* remote wakeup delayed work */
struct usb_gadget_driver *driver; /* 3rd party gadget driver */
struct ci13xxx_udc_driver *udc_driver; /* device controller driver */
int vbus_active; /* is VBUS active */
diff --git a/drivers/usb/gadget/msm72k_udc.c b/drivers/usb/gadget/msm72k_udc.c
index a025d95..3e40552 100644
--- a/drivers/usb/gadget/msm72k_udc.c
+++ b/drivers/usb/gadget/msm72k_udc.c
@@ -119,6 +119,7 @@
unsigned long dTD_update_fail_count;
unsigned long false_prime_fail_count;
unsigned actual_prime_fail_count;
+ unsigned long dTD_workaround_fail_count;
unsigned wedged:1;
/* pointers to DMA transfer list area */
@@ -199,6 +200,7 @@
unsigned phy_fail_count;
unsigned prime_fail_count;
unsigned long dTD_update_fail_count;
+ unsigned long dTD_workaround_fail_count;
struct usb_gadget gadget;
struct usb_gadget_driver *driver;
@@ -1110,6 +1112,8 @@
struct msm_request *req;
unsigned long flags;
int req_dequeue = 1;
+ int dtd_update_fail_count_chk = 10;
+ int check_bit = 0;
unsigned info;
/*
@@ -1136,12 +1140,22 @@
/* if the transaction is still in-flight, stop here */
if (info & INFO_ACTIVE) {
if (req_dequeue) {
- req_dequeue = 0;
ui->dTD_update_fail_count++;
ept->dTD_update_fail_count++;
- udelay(10);
+ udelay(1);
+ if (!dtd_update_fail_count_chk--) {
+ req_dequeue = 0;
+ check_bit = 1;
+ }
goto dequeue;
} else {
+ if (check_bit) {
+ pr_debug("%s: Delay Workaround Failed\n",
+ __func__);
+ check_bit = 0;
+ ui->dTD_workaround_fail_count++;
+ ept->dTD_workaround_fail_count++;
+ }
break;
}
}
@@ -1965,11 +1979,14 @@
continue;
i += scnprintf(buf + i, PAGE_SIZE - i,
- "ept%d %s false_prime_count=%lu prime_fail_count=%d dtd_fail_count=%lu\n",
+ "ept%d %s false_prime_count=%lu prime_fail_count=%d "
+ "dtd_fail_count=%lu "
+ "dTD_workaround_fail_count=%lu\n",
ept->num, (ept->flags & EPT_FLAG_IN) ? "in " : "out",
ept->false_prime_fail_count,
ept->actual_prime_fail_count,
- ept->dTD_update_fail_count);
+ ept->dTD_update_fail_count,
+ ept->dTD_workaround_fail_count);
}
i += scnprintf(buf + i, PAGE_SIZE - i,
@@ -1979,6 +1996,10 @@
i += scnprintf(buf + i, PAGE_SIZE - i,
"prime_fail count: %d\n", ui->prime_fail_count);
+ i += scnprintf(buf + i, PAGE_SIZE - i,
+ "dtd_workaround_fail count: %lu\n",
+ ui->dTD_workaround_fail_count);
+
spin_unlock_irqrestore(&ui->lock, flags);
return simple_read_from_buffer(ubuf, count, ppos, buf, i);
diff --git a/drivers/usb/gadget/u_sdio.c b/drivers/usb/gadget/u_sdio.c
index 8c4b4c7..5e9b0ec 100644
--- a/drivers/usb/gadget/u_sdio.c
+++ b/drivers/usb/gadget/u_sdio.c
@@ -1140,18 +1140,6 @@
goto free_sdio_ports;
}
-#ifdef DEBUG
- /* REVISIT: create one file per port
- * or do not create any file
- */
- if (i == 0) {
- ret = device_create_file(&g->dev, &dev_attr_input);
- if (ret)
- pr_err("%s: unable to create device file\n",
- __func__);
- }
-#endif
-
}
gsdio_debugfs_init();
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index caf86ca..fff9465 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -380,6 +380,9 @@
ehci_writel(ehci, ehci->periodic_dma, &ehci->regs->frame_list);
ehci_writel(ehci, (u32) ehci->async->qh_dma, &ehci->regs->async_next);
+ /*CMD_RUN will be set after, PORT_RESUME gets cleared*/
+ if (ehci->resume_sof_bug)
+ ehci->command &= ~CMD_RUN;
/* restore CMD_RUN, framelist size, and irq threshold */
ehci_writel(ehci, ehci->command, &ehci->regs->command);
ehci->rh_state = EHCI_RH_RUNNING;
@@ -422,6 +425,17 @@
ehci_writel(ehci, temp, &ehci->regs->port_status [i]);
}
+ if (ehci->resume_sof_bug && resume_needed) {
+ /* root hub has only one port.
+ * PORT_RESUME gets cleared automatically. */
+ handshake(ehci, &ehci->regs->port_status[0], PORT_RESUME, 0,
+ 20000);
+ ehci_writel(ehci, ehci_readl(ehci,
+ &ehci->regs->command) | CMD_RUN,
+ &ehci->regs->command);
+ goto skip_clear_resume;
+ }
+
/* msleep for 20ms only if code is trying to resume port */
if (resume_needed) {
spin_unlock_irq(&ehci->lock);
@@ -438,6 +452,8 @@
ehci_vdbg (ehci, "resumed port %d\n", i + 1);
}
}
+
+skip_clear_resume:
(void) ehci_readl(ehci, &ehci->regs->command);
/* maybe re-activate the schedule(s) */
@@ -823,7 +839,7 @@
u32 __iomem *status_reg = &ehci->regs->port_status[
(wIndex & 0xff) - 1];
u32 __iomem *hostpc_reg = NULL;
- u32 temp, temp1, status;
+ u32 temp, temp1, status, cmd = 0;
unsigned long flags;
int retval = 0;
unsigned selector;
@@ -1202,7 +1218,32 @@
ehci->reset_done [wIndex] = jiffies
+ msecs_to_jiffies (50);
}
+
+ if (ehci->reset_sof_bug && (temp & PORT_RESET)) {
+ cmd = ehci_readl(ehci, &ehci->regs->command);
+ cmd &= ~CMD_RUN;
+ ehci_writel(ehci, cmd, &ehci->regs->command);
+ if (handshake(ehci, &ehci->regs->status,
+ STS_HALT, STS_HALT, 16 * 125))
+ ehci_info(ehci,
+ "controller halt failed\n");
+ }
ehci_writel(ehci, temp, status_reg);
+ if (ehci->reset_sof_bug && (temp & PORT_RESET)
+ && hcd->driver->enable_ulpi_control) {
+ hcd->driver->enable_ulpi_control(hcd,
+ PORT_RESET);
+ spin_unlock_irqrestore(&ehci->lock, flags);
+ usleep_range(50000, 55000);
+ if (handshake(ehci, status_reg,
+ PORT_RESET, 0, 10 * 1000))
+ ehci_info(ehci,
+ "failed to clear reset\n");
+ spin_lock_irqsave(&ehci->lock, flags);
+ hcd->driver->disable_ulpi_control(hcd);
+ cmd |= CMD_RUN;
+ ehci_writel(ehci, cmd, &ehci->regs->command);
+ }
break;
/* For downstream facing ports (these): one hub port is put
diff --git a/drivers/usb/host/ehci-msm-hsic.c b/drivers/usb/host/ehci-msm-hsic.c
index a6b7dee..874c728 100644
--- a/drivers/usb/host/ehci-msm-hsic.c
+++ b/drivers/usb/host/ehci-msm-hsic.c
@@ -328,6 +328,29 @@
}
+static int ulpi_read(struct msm_hsic_hcd *mehci, u32 reg)
+{
+ struct usb_hcd *hcd = hsic_to_hcd(mehci);
+ unsigned long timeout;
+
+ /* initiate read operation */
+ writel_relaxed(ULPI_RUN | ULPI_READ | ULPI_ADDR(reg),
+ USB_ULPI_VIEWPORT);
+
+ /* wait for completion */
+ timeout = jiffies + usecs_to_jiffies(ULPI_IO_TIMEOUT_USEC);
+ while (readl_relaxed(USB_ULPI_VIEWPORT) & ULPI_RUN) {
+ if (time_after(jiffies, timeout)) {
+ dev_err(mehci->dev, "ulpi_read: timeout %08x\n",
+ readl_relaxed(USB_ULPI_VIEWPORT));
+ return -ETIMEDOUT;
+ }
+ udelay(1);
+ }
+
+ return ULPI_DATA_READ(readl_relaxed(USB_ULPI_VIEWPORT));
+}
+
static int ulpi_write(struct msm_hsic_hcd *mehci, u32 val, u32 reg)
{
struct usb_hcd *hcd = hsic_to_hcd(mehci);
@@ -354,6 +377,37 @@
return 0;
}
+#define HSIC_DBG1 0X38
+#define ULPI_MANUAL_ENABLE BIT(4)
+#define ULPI_LINESTATE_DATA BIT(5)
+#define ULPI_LINESTATE_STROBE BIT(6)
+static void ehci_msm_enable_ulpi_control(struct usb_hcd *hcd, u32 linestate)
+{
+ struct msm_hsic_hcd *mehci = hcd_to_hsic(hcd);
+ int val;
+
+ switch (linestate) {
+ case PORT_RESET:
+ val = ulpi_read(mehci, HSIC_DBG1);
+ val |= ULPI_MANUAL_ENABLE;
+ val &= ~(ULPI_LINESTATE_DATA | ULPI_LINESTATE_STROBE);
+ ulpi_write(mehci, val, HSIC_DBG1);
+ break;
+ default:
+ pr_info("%s: Unknown linestate:%0x\n", __func__, linestate);
+ }
+}
+
+static void ehci_msm_disable_ulpi_control(struct usb_hcd *hcd)
+{
+ struct msm_hsic_hcd *mehci = hcd_to_hsic(hcd);
+ int val;
+
+ val = ulpi_read(mehci, HSIC_DBG1);
+ val &= ~ULPI_MANUAL_ENABLE;
+ ulpi_write(mehci, val, HSIC_DBG1);
+}
+
static int msm_hsic_config_gpios(struct msm_hsic_hcd *mehci, int gpio_en)
{
int rc = 0;
@@ -406,50 +460,28 @@
return rc;
}
-static int msm_hsic_phy_clk_reset(struct msm_hsic_hcd *mehci)
+static void msm_hsic_clk_reset(struct msm_hsic_hcd *mehci)
{
int ret;
- clk_prepare_enable(mehci->alt_core_clk);
-
ret = clk_reset(mehci->core_clk, CLK_RESET_ASSERT);
if (ret) {
- clk_disable_unprepare(mehci->alt_core_clk);
- dev_err(mehci->dev, "usb phy clk assert failed\n");
- return ret;
+ dev_err(mehci->dev, "hsic clk assert failed:%d\n", ret);
+ return;
}
- usleep_range(10000, 12000);
- clk_disable_unprepare(mehci->alt_core_clk);
+ clk_disable(mehci->core_clk);
ret = clk_reset(mehci->core_clk, CLK_RESET_DEASSERT);
if (ret)
- dev_err(mehci->dev, "usb phy clk deassert failed\n");
+ dev_err(mehci->dev, "hsic clk deassert failed:%d\n", ret);
- return ret;
+ usleep_range(10000, 12000);
+
+ clk_enable(mehci->core_clk);
}
-static int msm_hsic_phy_reset(struct msm_hsic_hcd *mehci)
-{
- struct usb_hcd *hcd = hsic_to_hcd(mehci);
- u32 val;
- int ret;
-
- ret = msm_hsic_phy_clk_reset(mehci);
- if (ret)
- return ret;
-
- val = readl_relaxed(USB_PORTSC) & ~PORTSC_PTS_MASK;
- writel_relaxed(val | PORTSC_PTS_ULPI, USB_PORTSC);
-
- /* Ensure that RESET operation is completed before turning off clock */
- mb();
- dev_dbg(mehci->dev, "phy_reset: success\n");
-
- return 0;
-}
-
-#define HSIC_GPIO150_PAD_CTL (MSM_TLMM_BASE+0x20C0)
-#define HSIC_GPIO151_PAD_CTL (MSM_TLMM_BASE+0x20C4)
+#define HSIC_STROBE_GPIO_PAD_CTL (MSM_TLMM_BASE+0x20C0)
+#define HSIC_DATA_GPIO_PAD_CTL (MSM_TLMM_BASE+0x20C4)
#define HSIC_CAL_PAD_CTL (MSM_TLMM_BASE+0x20C8)
#define HSIC_LV_MODE 0x04
#define HSIC_PAD_CALIBRATION 0xA8
@@ -458,33 +490,15 @@
static int msm_hsic_reset(struct msm_hsic_hcd *mehci)
{
struct usb_hcd *hcd = hsic_to_hcd(mehci);
- int cnt = 0;
int ret;
struct msm_hsic_host_platform_data *pdata = mehci->dev->platform_data;
- ret = msm_hsic_phy_reset(mehci);
- if (ret) {
- dev_err(mehci->dev, "phy_reset failed\n");
- return ret;
- }
+ msm_hsic_clk_reset(mehci);
- writel_relaxed(USBCMD_RESET, USB_USBCMD);
- while (cnt < LINK_RESET_TIMEOUT_USEC) {
- if (!(readl_relaxed(USB_USBCMD) & USBCMD_RESET))
- break;
- udelay(1);
- cnt++;
- }
- if (cnt >= LINK_RESET_TIMEOUT_USEC)
- return -ETIMEDOUT;
-
- /* Reset PORTSC and select ULPI phy */
+ /* select ulpi phy */
writel_relaxed(0x80000000, USB_PORTSC);
- /* TODO: Need to confirm if HSIC PHY also requires delay after RESET */
- msleep(100);
-
- /* HSIC PHY Initialization */
+ mb();
/* HSIC init sequence when HSIC signals (Strobe/Data) are
routed via GPIOs */
@@ -493,6 +507,8 @@
/* Enable LV_MODE in HSIC_CAL_PAD_CTL register */
writel_relaxed(HSIC_LV_MODE, HSIC_CAL_PAD_CTL);
+ mb();
+
/*set periodic calibration interval to ~2.048sec in
HSIC_IO_CAL_REG */
ulpi_write(mehci, 0xFF, 0x33);
@@ -500,16 +516,18 @@
/* Enable periodic IO calibration in HSIC_CFG register */
ulpi_write(mehci, HSIC_PAD_CALIBRATION, 0x30);
- /* Configure GPIO 150/151 pins for HSIC functionality mode */
+ /* Configure GPIO pins for HSIC functionality mode */
ret = msm_hsic_config_gpios(mehci, 1);
if (ret) {
dev_err(mehci->dev, " gpio configuarion failed\n");
return ret;
}
- /* Set LV_MODE=0x1 and DCC=0x2 in HSIC_GPIO150/151_PAD_CTL
- register */
- writel_relaxed(HSIC_GPIO_PAD_VAL, HSIC_GPIO150_PAD_CTL);
- writel_relaxed(HSIC_GPIO_PAD_VAL, HSIC_GPIO151_PAD_CTL);
+ /* Set LV_MODE=0x1 and DCC=0x2 in HSIC_GPIO PAD_CTL register */
+ writel_relaxed(HSIC_GPIO_PAD_VAL, HSIC_STROBE_GPIO_PAD_CTL);
+ writel_relaxed(HSIC_GPIO_PAD_VAL, HSIC_DATA_GPIO_PAD_CTL);
+
+ mb();
+
/* Enable HSIC mode in HSIC_CFG register */
ulpi_write(mehci, 0x01, 0x31);
} else {
@@ -712,13 +730,6 @@
skip_phy_resume:
- if (!(readl_relaxed(USB_USBCMD) & CMD_RUN) &&
- (readl_relaxed(USB_PORTSC) & PORT_SUSPEND)) {
- writel_relaxed(readl_relaxed(USB_USBCMD) | CMD_RUN ,
- USB_USBCMD);
- dbg_log_event(NULL, "Set RS", readl_relaxed(USB_USBCMD));
- }
-
usb_hcd_resume_root_hub(hcd);
atomic_set(&mehci->in_lpm, 0);
@@ -824,7 +835,7 @@
* generic hardware linkage
*/
.irq = msm_hsic_irq,
- .flags = HCD_USB2 | HCD_MEMORY,
+ .flags = HCD_USB2 | HCD_MEMORY | HCD_OLD_ENUM,
.reset = ehci_hsic_reset,
.start = ehci_run,
@@ -861,6 +872,9 @@
.bus_resume = ehci_hsic_bus_resume,
.log_urb_complete = dbg_log_event,
+
+ .enable_ulpi_control = ehci_msm_enable_ulpi_control,
+ .disable_ulpi_control = ehci_msm_disable_ulpi_control,
};
static int msm_hsic_init_clocks(struct msm_hsic_hcd *mehci, u32 init)
@@ -1223,6 +1237,9 @@
mehci->dev = &pdev->dev;
mehci->ehci.susp_sof_bug = 1;
+ mehci->ehci.reset_sof_bug = 1;
+
+ mehci->ehci.resume_sof_bug = 1;
mehci->ehci.max_log2_irq_thresh = 6;
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index 6afb70b..a0f995c 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -152,6 +152,8 @@
unsigned has_synopsys_hc_bug:1; /* Synopsys HC */
unsigned frame_index_bug:1; /* MosChip (AKA NetMos) */
unsigned susp_sof_bug:1; /*Chip Idea HC*/
+ unsigned resume_sof_bug:1;/*Chip Idea HC*/
+ unsigned reset_sof_bug:1; /*Chip Idea HC*/
/* required for usb32 quirk */
#define OHCI_CTRL_HCFS (3 << 6)
diff --git a/drivers/usb/misc/diag_bridge.c b/drivers/usb/misc/diag_bridge.c
index 8b762a2..6d5544a 100644
--- a/drivers/usb/misc/diag_bridge.c
+++ b/drivers/usb/misc/diag_bridge.c
@@ -255,6 +255,7 @@
pipe = usb_sndbulkpipe(dev->udev, dev->out_epAddr);
usb_fill_bulk_urb(urb, dev->udev, pipe, data, size,
diag_bridge_write_cb, dev);
+ urb->transfer_flags |= URB_ZERO_PACKET;
usb_anchor_urb(urb, &dev->submitted);
dev->pending_writes++;
diff --git a/drivers/usb/misc/mdm_data_bridge.c b/drivers/usb/misc/mdm_data_bridge.c
index db2f40a..1c9de07 100644
--- a/drivers/usb/misc/mdm_data_bridge.c
+++ b/drivers/usb/misc/mdm_data_bridge.c
@@ -497,6 +497,8 @@
usb_fill_bulk_urb(txurb, dev->udev, dev->bulk_out,
skb->data, skb->len, data_bridge_write_cb, skb);
+ txurb->transfer_flags |= URB_ZERO_PACKET;
+
if (test_bit(SUSPENDED, &dev->flags)) {
usb_anchor_urb(txurb, &dev->delayed);
goto free_urb;
diff --git a/drivers/usb/otg/msm_otg.c b/drivers/usb/otg/msm_otg.c
index dedad53..1d9c84f 100644
--- a/drivers/usb/otg/msm_otg.c
+++ b/drivers/usb/otg/msm_otg.c
@@ -670,6 +670,9 @@
if (aca_enabled())
return 0;
+ if (atomic_read(&motg->in_lpm) == suspend)
+ return 0;
+
if (suspend) {
switch (phy->state) {
case OTG_STATE_A_WAIT_BCON:
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 1f6d915..a749a6d 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -284,6 +284,8 @@
.write = usb_wwan_write,
.write_room = usb_wwan_write_room,
.chars_in_buffer = usb_wwan_chars_in_buffer,
+ .throttle = usb_wwan_throttle,
+ .unthrottle = usb_wwan_unthrottle,
.attach = usb_wwan_startup,
.disconnect = usb_wwan_disconnect,
.release = qc_release,
diff --git a/drivers/usb/serial/usb-wwan.h b/drivers/usb/serial/usb-wwan.h
index 9811a82..98b399f 100644
--- a/drivers/usb/serial/usb-wwan.h
+++ b/drivers/usb/serial/usb-wwan.h
@@ -24,6 +24,8 @@
extern int usb_wwan_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count);
extern int usb_wwan_chars_in_buffer(struct tty_struct *tty);
+extern void usb_wwan_throttle(struct tty_struct *tty);
+extern void usb_wwan_unthrottle(struct tty_struct *tty);
#ifdef CONFIG_PM
extern int usb_wwan_suspend(struct usb_serial *serial, pm_message_t message);
extern int usb_wwan_resume(struct usb_serial *serial);
@@ -33,7 +35,7 @@
#define N_IN_URB 5
#define N_OUT_URB 5
-#define IN_BUFLEN 65536
+#define IN_BUFLEN 16384
#define OUT_BUFLEN 65536
struct usb_wwan_intf_private {
@@ -55,6 +57,10 @@
int opened;
struct usb_anchor submitted;
struct usb_anchor delayed;
+ struct list_head in_urb_list;
+ spinlock_t in_lock;
+ ssize_t n_read;
+ struct work_struct in_work;
/* Settings for the port */
int rts_state; /* Handshaking pins (outputs) */
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
index 0c58554..bf30c0b 100644
--- a/drivers/usb/serial/usb_wwan.c
+++ b/drivers/usb/serial/usb_wwan.c
@@ -279,15 +279,78 @@
}
EXPORT_SYMBOL(usb_wwan_write);
+static void usb_wwan_in_work(struct work_struct *w)
+{
+ struct usb_wwan_port_private *portdata =
+ container_of(w, struct usb_wwan_port_private, in_work);
+ struct list_head *q = &portdata->in_urb_list;
+ struct urb *urb;
+ unsigned char *data;
+ struct tty_struct *tty;
+ struct usb_serial_port *port;
+ int err;
+ ssize_t len;
+ ssize_t count;
+ unsigned long flags;
+
+ spin_lock_irqsave(&portdata->in_lock, flags);
+ while (!list_empty(q)) {
+ urb = list_first_entry(q, struct urb, urb_list);
+ port = urb->context;
+ if (port->throttle_req || port->throttled)
+ break;
+
+ tty = tty_port_tty_get(&port->port);
+ if (!tty)
+ continue;
+
+ list_del_init(&urb->urb_list);
+
+ spin_unlock_irqrestore(&portdata->in_lock, flags);
+
+ len = urb->actual_length - portdata->n_read;
+ data = urb->transfer_buffer + portdata->n_read;
+ count = tty_insert_flip_string(tty, data, len);
+ tty_flip_buffer_push(tty);
+ tty_kref_put(tty);
+
+ if (count < len) {
+ dbg("%s: len:%d count:%d n_read:%d\n", __func__,
+ len, count, portdata->n_read);
+ portdata->n_read += count;
+ port->throttled = true;
+
+ /* add request back to list */
+ spin_lock_irqsave(&portdata->in_lock, flags);
+ list_add(&urb->urb_list, q);
+ spin_unlock_irqrestore(&portdata->in_lock, flags);
+ return;
+ }
+ portdata->n_read = 0;
+
+ usb_anchor_urb(urb, &portdata->submitted);
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err) {
+ usb_unanchor_urb(urb);
+ if (err != -EPERM)
+ pr_err("%s: submit read urb failed:%d",
+ __func__, err);
+ }
+
+ usb_mark_last_busy(port->serial->dev);
+ spin_lock_irqsave(&portdata->in_lock, flags);
+ }
+ spin_unlock_irqrestore(&portdata->in_lock, flags);
+}
+
static void usb_wwan_indat_callback(struct urb *urb)
{
int err;
int endpoint;
struct usb_wwan_port_private *portdata;
struct usb_serial_port *port;
- struct tty_struct *tty;
- unsigned char *data = urb->transfer_buffer;
int status = urb->status;
+ unsigned long flags;
dbg("%s: %p", __func__, urb);
@@ -295,38 +358,30 @@
port = urb->context;
portdata = usb_get_serial_port_data(port);
- if (status) {
- dbg("%s: nonzero status: %d on endpoint %02x.",
- __func__, status, endpoint);
- } else {
- tty = tty_port_tty_get(&port->port);
- if (tty) {
- if (urb->actual_length) {
- tty_insert_flip_string(tty, data,
- urb->actual_length);
- tty_flip_buffer_push(tty);
- } else
- dbg("%s: empty read urb received", __func__);
- tty_kref_put(tty);
- }
+ usb_mark_last_busy(port->serial->dev);
- /* Resubmit urb so we continue receiving */
- if (status != -ESHUTDOWN) {
- usb_anchor_urb(urb, &portdata->submitted);
- err = usb_submit_urb(urb, GFP_ATOMIC);
- if (err) {
- usb_unanchor_urb(urb);
- if (err != -EPERM) {
- printk(KERN_ERR "%s: resubmit read urb failed. "
- "(%d)", __func__, err);
- /* busy also in error unless we are killed */
- usb_mark_last_busy(port->serial->dev);
- }
- } else {
- usb_mark_last_busy(port->serial->dev);
- }
- }
+ if (!status && urb->actual_length) {
+ spin_lock_irqsave(&portdata->in_lock, flags);
+ list_add_tail(&urb->urb_list, &portdata->in_urb_list);
+ spin_unlock_irqrestore(&portdata->in_lock, flags);
+ schedule_work(&portdata->in_work);
+
+ return;
+ }
+
+ dbg("%s: nonzero status: %d on endpoint %02x.",
+ __func__, status, endpoint);
+
+ if (status != -ESHUTDOWN) {
+ usb_anchor_urb(urb, &portdata->submitted);
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err) {
+ usb_unanchor_urb(urb);
+ if (err != -EPERM)
+ pr_err("%s: submit read urb failed:%d",
+ __func__, err);
+ }
}
}
@@ -401,6 +456,31 @@
}
EXPORT_SYMBOL(usb_wwan_chars_in_buffer);
+void usb_wwan_throttle(struct tty_struct *tty)
+{
+ struct usb_serial_port *port = tty->driver_data;
+
+ port->throttle_req = true;
+
+ dbg("%s:\n", __func__);
+}
+EXPORT_SYMBOL(usb_wwan_throttle);
+
+void usb_wwan_unthrottle(struct tty_struct *tty)
+{
+ struct usb_serial_port *port = tty->driver_data;
+ struct usb_wwan_port_private *portdata;
+
+ portdata = usb_get_serial_port_data(port);
+
+ dbg("%s:\n", __func__);
+ port->throttle_req = false;
+ port->throttled = false;
+
+ schedule_work(&portdata->in_work);
+}
+EXPORT_SYMBOL(usb_wwan_unthrottle);
+
int usb_wwan_open(struct tty_struct *tty, struct usb_serial_port *port)
{
struct usb_wwan_port_private *portdata;
@@ -560,6 +640,9 @@
}
init_usb_anchor(&portdata->delayed);
init_usb_anchor(&portdata->submitted);
+ INIT_WORK(&portdata->in_work, usb_wwan_in_work);
+ INIT_LIST_HEAD(&portdata->in_urb_list);
+ spin_lock_init(&portdata->in_lock);
for (j = 0; j < N_IN_URB; j++) {
buffer = kmalloc(IN_BUFLEN, GFP_KERNEL);
@@ -624,14 +707,25 @@
int i, j;
struct usb_serial_port *port;
struct usb_wwan_port_private *portdata;
-
- dbg("%s", __func__);
+ struct urb *urb;
+ struct list_head *q;
+ unsigned long flags;
/* Now free them */
for (i = 0; i < serial->num_ports; ++i) {
port = serial->port[i];
portdata = usb_get_serial_port_data(port);
+ cancel_work_sync(&portdata->in_work);
+ /* TBD: do we really need this */
+ spin_lock_irqsave(&portdata->in_lock, flags);
+ q = &portdata->in_urb_list;
+ while (!list_empty(q)) {
+ urb = list_first_entry(q, struct urb, urb_list);
+ list_del_init(&urb->urb_list);
+ }
+ spin_unlock_irqrestore(&portdata->in_lock, flags);
+
for (j = 0; j < N_IN_URB; j++) {
usb_free_urb(portdata->in_urbs[j]);
kfree(portdata->in_buffer[j]);
diff --git a/drivers/video/msm/hdmi_msm.c b/drivers/video/msm/hdmi_msm.c
index 03243ac..2526d76 100644
--- a/drivers/video/msm/hdmi_msm.c
+++ b/drivers/video/msm/hdmi_msm.c
@@ -4187,39 +4187,6 @@
}
#endif
-static void hdmi_msm_hpd_read_work(struct work_struct *work)
-{
- uint32 hpd_ctrl;
-
- clk_prepare_enable(hdmi_msm_state->hdmi_app_clk);
- hdmi_msm_state->pd->core_power(1, 1);
- hdmi_msm_state->pd->enable_5v(1);
- hdmi_msm_set_mode(FALSE);
- hdmi_msm_init_phy(external_common_state->video_resolution);
- /* HDMI_USEC_REFTIMER[0x0208] */
- HDMI_OUTP(0x0208, 0x0001001B);
- hpd_ctrl = (HDMI_INP(0x0258) & ~0xFFF) | 0xFFF;
-
- /* Toggle HPD circuit to trigger HPD sense */
- HDMI_OUTP(0x0258, ~(1 << 28) & hpd_ctrl);
- HDMI_OUTP(0x0258, (1 << 28) | hpd_ctrl);
-
- hdmi_msm_set_mode(TRUE);
- msleep(1000);
- external_common_state->hpd_state = (HDMI_INP(0x0250) & 0x2) >> 1;
- if (external_common_state->hpd_state) {
- hdmi_msm_read_edid();
- DEV_DBG("%s: sense CONNECTED: send ONLINE\n", __func__);
- kobject_uevent(external_common_state->uevent_kobj,
- KOBJ_ONLINE);
- }
- hdmi_msm_hpd_off();
- hdmi_msm_set_mode(FALSE);
- hdmi_msm_state->pd->core_power(0, 1);
- hdmi_msm_state->pd->enable_5v(0);
- clk_disable_unprepare(hdmi_msm_state->hdmi_app_clk);
-}
-
static void hdmi_msm_hpd_off(void)
{
int rc = 0;
@@ -4583,8 +4550,6 @@
#endif
}
- queue_work(hdmi_work_queue, &hdmi_msm_state->hpd_read_work);
-
/* Initialize hdmi node and register with switch driver */
if (hdmi_prim_display)
external_common_state->sdev.name = "hdmi_as_primary";
@@ -4754,7 +4719,6 @@
hdmi_common_init_panel_info(&hdmi_msm_panel_data.panel_info);
init_completion(&hdmi_msm_state->ddc_sw_done);
INIT_WORK(&hdmi_msm_state->hpd_state_work, hdmi_msm_hpd_state_work);
- INIT_WORK(&hdmi_msm_state->hpd_read_work, hdmi_msm_hpd_read_work);
#ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL_HDCP_SUPPORT
init_completion(&hdmi_msm_state->hdcp_success_done);
INIT_WORK(&hdmi_msm_state->hdcp_reauth_work, hdmi_msm_hdcp_reauth_work);
diff --git a/drivers/video/msm/hdmi_msm.h b/drivers/video/msm/hdmi_msm.h
index 5195f2c..06ebb06 100644
--- a/drivers/video/msm/hdmi_msm.h
+++ b/drivers/video/msm/hdmi_msm.h
@@ -61,7 +61,7 @@
boolean hpd_cable_chg_detected;
boolean full_auth_done;
boolean hpd_during_auth;
- struct work_struct hpd_state_work, hpd_read_work;
+ struct work_struct hpd_state_work;
struct timer_list hpd_state_timer;
struct completion ddc_sw_done;
diff --git a/drivers/video/msm/mdp.c b/drivers/video/msm/mdp.c
index cad6e02..2bc7f5b 100644
--- a/drivers/video/msm/mdp.c
+++ b/drivers/video/msm/mdp.c
@@ -51,7 +51,7 @@
int mdp_rev;
static struct platform_device *mdp_init_pdev;
-static struct regulator *footswitch;
+static struct regulator *footswitch, *hdmi_pll_fs;
static unsigned int mdp_footswitch_on;
struct completion mdp_ppp_comp;
@@ -1099,31 +1099,21 @@
goto error;
}
- if (mgmt->hist == NULL) {
- if ((mgmt->mdp_is_hist_init == TRUE) &&
- ((!completion_done(&mgmt->mdp_hist_comp)) &&
- waitqueue_active(&mgmt->mdp_hist_comp.wait)))
- pr_err("mgmt->hist invalid NULL\n");
+ switch (mgmt->block) {
+ case MDP_BLOCK_DMA_P:
+ case MDP_BLOCK_DMA_S:
+ ret = _mdp_histogram_read_dma_data(mgmt);
+ break;
+ case MDP_BLOCK_VG_1:
+ case MDP_BLOCK_VG_2:
+ ret = _mdp_histogram_read_vg_data(mgmt);
+ break;
+ default:
+ pr_err("%s, invalid MDP block = %d\n", __func__, mgmt->block);
ret = -EINVAL;
+ goto error;
}
- if (!ret) {
- switch (mgmt->block) {
- case MDP_BLOCK_DMA_P:
- case MDP_BLOCK_DMA_S:
- ret = _mdp_histogram_read_dma_data(mgmt);
- break;
- case MDP_BLOCK_VG_1:
- case MDP_BLOCK_VG_2:
- ret = _mdp_histogram_read_vg_data(mgmt);
- break;
- default:
- pr_err("%s, invalid MDP block = %d\n", __func__,
- mgmt->block);
- ret = -EINVAL;
- goto error;
- }
- }
/*
* if read was triggered by an underrun or failed copying,
* don't wake up readers
@@ -1624,7 +1614,16 @@
__mdp_histogram_kickoff(mgmt);
if (isr & INTR_HIST_DONE) {
- queue_work(mdp_hist_wq, &mgmt->mdp_histogram_worker);
+ if ((waitqueue_active(&mgmt->mdp_hist_comp.wait))
+ && (mgmt->hist != NULL)) {
+ if (!queue_work(mdp_hist_wq,
+ &mgmt->mdp_histogram_worker)) {
+ pr_err("%s %d- can't queue hist_read\n",
+ __func__, mgmt->block);
+ }
+ } else {
+ __mdp_histogram_reset(mgmt);
+ }
}
}
@@ -2123,10 +2122,16 @@
}
disable_irq(mdp_irq);
+ hdmi_pll_fs = regulator_get(&pdev->dev, "hdmi_pll_fs");
+ if (IS_ERR(hdmi_pll_fs))
+ hdmi_pll_fs = NULL;
+
footswitch = regulator_get(&pdev->dev, "vdd");
if (IS_ERR(footswitch))
footswitch = NULL;
else {
+ if (hdmi_pll_fs)
+ regulator_enable(hdmi_pll_fs);
regulator_enable(footswitch);
mdp_footswitch_on = 1;
@@ -2135,6 +2140,8 @@
msleep(20);
regulator_enable(footswitch);
}
+ if (hdmi_pll_fs)
+ regulator_disable(hdmi_pll_fs);
}
mdp_clk = clk_get(&pdev->dev, "core_clk");
@@ -2626,6 +2633,9 @@
return;
}
+ if (hdmi_pll_fs)
+ regulator_enable(hdmi_pll_fs);
+
if (on && !mdp_footswitch_on) {
pr_debug("Enable MDP FS\n");
regulator_enable(footswitch);
@@ -2636,6 +2646,9 @@
mdp_footswitch_on = 0;
}
+ if (hdmi_pll_fs)
+ regulator_disable(hdmi_pll_fs);
+
mutex_unlock(&mdp_suspend_mutex);
}
diff --git a/drivers/video/msm/mdp.h b/drivers/video/msm/mdp.h
index b232e0a..e60b24e 100644
--- a/drivers/video/msm/mdp.h
+++ b/drivers/video/msm/mdp.h
@@ -74,8 +74,7 @@
struct mdp_buf_type {
struct ion_handle *ihdl;
- u32 write_addr;
- u32 read_addr;
+ u32 phys_addr;
u32 size;
};
diff --git a/drivers/video/msm/mdp4.h b/drivers/video/msm/mdp4.h
index 59404d0..1557eed 100644
--- a/drivers/video/msm/mdp4.h
+++ b/drivers/video/msm/mdp4.h
@@ -337,8 +337,7 @@
uint32 element1; /* 0 = C0, 1 = C1, 2 = C2, 3 = C3 */
uint32 element0; /* 0 = C0, 1 = C1, 2 = C2, 3 = C3 */
struct completion comp;
- ulong ov_blt_addr; /* blt mode addr */
- ulong dma_blt_addr; /* blt mode addr */
+ ulong blt_addr; /* blt mode addr */
ulong blt_base;
ulong blt_offset;
uint32 blt_cnt;
diff --git a/drivers/video/msm/mdp4_overlay.c b/drivers/video/msm/mdp4_overlay.c
index 287b564..1287743 100644
--- a/drivers/video/msm/mdp4_overlay.c
+++ b/drivers/video/msm/mdp4_overlay.c
@@ -126,7 +126,7 @@
pr_debug("mixer %u, pipe %u, plane %u\n", pipe->mixer_num,
pipe->pipe_ndx, plane);
if (ion_map_iommu(display_iclient, *srcp_ihdl,
- DISPLAY_READ_DOMAIN, GEN_POOL, SZ_4K, 0, start,
+ DISPLAY_DOMAIN, GEN_POOL, SZ_4K, 0, start,
len, 0, ION_IOMMU_UNMAP_DELAYED)) {
ion_free(display_iclient, *srcp_ihdl);
pr_err("ion_map_iommu() failed\n");
@@ -140,7 +140,7 @@
if (iom_pipe_info->prev_ihdl[plane]) {
ion_unmap_iommu(display_iclient,
iom_pipe_info->prev_ihdl[plane],
- DISPLAY_READ_DOMAIN, GEN_POOL);
+ DISPLAY_DOMAIN, GEN_POOL);
ion_free(display_iclient,
iom_pipe_info->prev_ihdl[plane]);
pr_debug("Previous: mixer %u, pipe %u, plane %u, "
@@ -175,7 +175,7 @@
iom_pipe_info->prev_ihdl[i]);
ion_unmap_iommu(display_iclient,
iom_pipe_info->prev_ihdl[i],
- DISPLAY_READ_DOMAIN, GEN_POOL);
+ DISPLAY_DOMAIN, GEN_POOL);
ion_free(display_iclient,
iom_pipe_info->prev_ihdl[i]);
iom_pipe_info->prev_ihdl[i] = NULL;
@@ -191,7 +191,7 @@
iom_pipe_info->ihdl[i]);
ion_unmap_iommu(display_iclient,
iom_pipe_info->ihdl[i],
- DISPLAY_READ_DOMAIN, GEN_POOL);
+ DISPLAY_DOMAIN, GEN_POOL);
ion_free(display_iclient,
iom_pipe_info->ihdl[i]);
iom_pipe_info->ihdl[i] = NULL;
@@ -346,7 +346,7 @@
MDP_OUTP(MDP_BASE + 0xb0004,
(pipe->src_height << 16 | pipe->src_width));
- if (pipe->dma_blt_addr) {
+ if (pipe->blt_addr) {
uint32 off, bpp;
#ifdef BLT_RGB565
bpp = 2; /* overlay ouput is RGB565 */
@@ -356,7 +356,7 @@
off = 0;
if (pipe->ov_cnt & 0x01)
off = pipe->src_height * pipe->src_width * bpp;
- MDP_OUTP(MDP_BASE + 0xb0008, pipe->dma_blt_addr + off);
+ MDP_OUTP(MDP_BASE + 0xb0008, pipe->blt_addr + off);
/* RGB888, output of overlay blending */
MDP_OUTP(MDP_BASE + 0xb000c, pipe->src_width * bpp);
} else {
@@ -427,7 +427,7 @@
/* dma_p source */
MDP_OUTP(MDP_BASE + 0x90004,
(pipe->src_height << 16 | pipe->src_width));
- if (pipe->dma_blt_addr) {
+ if (pipe->blt_addr) {
#ifdef BLT_RGB565
bpp = 2; /* overlay ouput is RGB565 */
#else
@@ -436,7 +436,7 @@
off = 0;
if (pipe->dmap_cnt & 0x01)
off = pipe->src_height * pipe->src_width * bpp;
- MDP_OUTP(MDP_BASE + 0x90008, pipe->dma_blt_addr + off);
+ MDP_OUTP(MDP_BASE + 0x90008, pipe->blt_addr + off);
/* RGB888, output of overlay blending */
MDP_OUTP(MDP_BASE + 0x9000c, pipe->src_width * bpp);
} else {
@@ -1321,7 +1321,7 @@
/*
* BLT support both primary and external external
*/
- if (pipe->ov_blt_addr) {
+ if (pipe->blt_addr) {
int off, bpp;
#ifdef BLT_RGB565
bpp = 2; /* overlay ouput is RGB565 */
@@ -1338,10 +1338,10 @@
if (pipe->ov_cnt & 0x01)
off = pipe->src_height * pipe->src_width * bpp;
- outpdw(overlay_base + 0x000c, pipe->ov_blt_addr + off);
+ outpdw(overlay_base + 0x000c, pipe->blt_addr + off);
/* overlay ouput is RGB888 */
outpdw(overlay_base + 0x0010, pipe->src_width * bpp);
- outpdw(overlay_base + 0x001c, pipe->ov_blt_addr + off);
+ outpdw(overlay_base + 0x001c, pipe->blt_addr + off);
/* MDDI - BLT + on demand */
outpdw(overlay_base + 0x0004, 0x08);
@@ -1361,19 +1361,19 @@
pipe->src_width * bpp;
outpdw(overlay_base + 0x000c,
- pipe->ov_blt_addr + off);
+ pipe->blt_addr + off);
/* overlay ouput is RGB888 */
outpdw(overlay_base + 0x0010,
((pipe->src_width << 16) |
pipe->src_width));
outpdw(overlay_base + 0x001c,
- pipe->ov_blt_addr + off);
+ pipe->blt_addr + off);
off = pipe->src_height * pipe->src_width;
/* align chroma to 2k address */
off = (off + 2047) & ~2047;
/* UV plane adress */
outpdw(overlay_base + 0x0020,
- pipe->ov_blt_addr + off);
+ pipe->blt_addr + off);
/* MDDI - BLT + on demand */
outpdw(overlay_base + 0x0004, 0x08);
/* pseudo planar + writeback */
@@ -3086,7 +3086,13 @@
mdp4_overlay_rgb_setup(pipe); /* rgb pipe */
}
- mdp4_overlay_reg_flush(pipe, 1);
+ if (pipe->mixer_num != MDP4_MIXER2) {
+ if ((ctrl->panel_mode & MDP4_PANEL_DTV) ||
+ (ctrl->panel_mode & MDP4_PANEL_LCDC) ||
+ (ctrl->panel_mode & MDP4_PANEL_DSI_VIDEO))
+ mdp4_overlay_reg_flush(pipe, 1);
+ }
+
mdp4_mixer_stage_up(pipe);
if (pipe->mixer_num == MDP4_MIXER2) {
@@ -3180,25 +3186,25 @@
char *name;
int domain;
} msm_iommu_ctx_names[] = {
- /* Display read*/
+ /* Display */
{
.name = "mdp_port0_cb0",
- .domain = DISPLAY_READ_DOMAIN,
+ .domain = DISPLAY_DOMAIN,
},
- /* Display read*/
+ /* Display */
{
.name = "mdp_port0_cb1",
- .domain = DISPLAY_WRITE_DOMAIN,
+ .domain = DISPLAY_DOMAIN,
},
- /* Display write */
+ /* Display */
{
.name = "mdp_port1_cb0",
- .domain = DISPLAY_READ_DOMAIN,
+ .domain = DISPLAY_DOMAIN,
},
- /* Display write */
+ /* Display */
{
.name = "mdp_port1_cb1",
- .domain = DISPLAY_WRITE_DOMAIN,
+ .domain = DISPLAY_DOMAIN,
},
};
diff --git a/drivers/video/msm/mdp4_overlay_dsi_cmd.c b/drivers/video/msm/mdp4_overlay_dsi_cmd.c
index 0d8fea7..7ba4e75 100644
--- a/drivers/video/msm/mdp4_overlay_dsi_cmd.c
+++ b/drivers/video/msm/mdp4_overlay_dsi_cmd.c
@@ -162,8 +162,7 @@
dsi_pipe = pipe; /* keep it */
mdp4_init_writeback_buf(mfd, MDP4_MIXER0);
- pipe->ov_blt_addr = 0;
- pipe->dma_blt_addr = 0;
+ pipe->blt_addr = 0;
} else {
pipe = dsi_pipe;
@@ -322,25 +321,24 @@
{
unsigned long flag;
- pr_debug("%s: blt_end=%d ov_blt_addr=%x pid=%d\n",
- __func__, dsi_pipe->blt_end, (int)dsi_pipe->ov_blt_addr, current->pid);
+ pr_debug("%s: blt_end=%d blt_addr=%x pid=%d\n",
+ __func__, dsi_pipe->blt_end, (int)dsi_pipe->blt_addr, current->pid);
mdp4_allocate_writeback_buf(mfd, MDP4_MIXER0);
- if (mfd->ov0_wb_buf->write_addr == 0) {
+ if (mfd->ov0_wb_buf->phys_addr == 0) {
pr_info("%s: no blt_base assigned\n", __func__);
return -EBUSY;
}
- if (dsi_pipe->ov_blt_addr == 0) {
+ if (dsi_pipe->blt_addr == 0) {
mdp4_dsi_cmd_dma_busy_wait(mfd);
spin_lock_irqsave(&mdp_spin_lock, flag);
dsi_pipe->blt_end = 0;
dsi_pipe->blt_cnt = 0;
dsi_pipe->ov_cnt = 0;
dsi_pipe->dmap_cnt = 0;
- dsi_pipe->ov_blt_addr = mfd->ov0_wb_buf->write_addr;
- dsi_pipe->dma_blt_addr = mfd->ov0_wb_buf->read_addr;
+ dsi_pipe->blt_addr = mfd->ov0_wb_buf->phys_addr;
mdp4_stat.blt_dsi_cmd++;
spin_unlock_irqrestore(&mdp_spin_lock, flag);
return 0;
@@ -354,10 +352,10 @@
unsigned long flag;
- pr_debug("%s: blt_end=%d ov_blt_addr=%x\n",
- __func__, dsi_pipe->blt_end, (int)dsi_pipe->ov_blt_addr);
+ pr_debug("%s: blt_end=%d blt_addr=%x\n",
+ __func__, dsi_pipe->blt_end, (int)dsi_pipe->blt_addr);
- if ((dsi_pipe->blt_end == 0) && dsi_pipe->ov_blt_addr) {
+ if ((dsi_pipe->blt_end == 0) && dsi_pipe->blt_addr) {
spin_lock_irqsave(&mdp_spin_lock, flag);
dsi_pipe->blt_end = 1; /* mark as end */
spin_unlock_irqrestore(&mdp_spin_lock, flag);
@@ -395,7 +393,7 @@
char *overlay_base;
- if (pipe->ov_blt_addr == 0)
+ if (pipe->blt_addr == 0)
return;
@@ -407,7 +405,7 @@
off = 0;
if (pipe->dmap_cnt & 0x01)
off = pipe->src_height * pipe->src_width * bpp;
- addr = pipe->dma_blt_addr + off;
+ addr = pipe->blt_addr + off;
/* dmap */
MDP_OUTP(MDP_BASE + 0x90008, addr);
@@ -415,7 +413,7 @@
off = 0;
if (pipe->ov_cnt & 0x01)
off = pipe->src_height * pipe->src_width * bpp;
- addr2 = pipe->ov_blt_addr + off;
+ addr2 = pipe->blt_addr + off;
/* overlay 0 */
overlay_base = MDP_BASE + MDP4_OVERLAYPROC0_BASE;/* 0x10000 */
outpdw(overlay_base + 0x000c, addr2);
@@ -443,8 +441,7 @@
spin_unlock(&mdp_spin_lock);
if (dsi_pipe->blt_end) {
dsi_pipe->blt_end = 0;
- dsi_pipe->dma_blt_addr = 0;
- dsi_pipe->ov_blt_addr = 0;
+ dsi_pipe->blt_addr = 0;
pr_debug("%s: END, ov_cnt=%d dmap_cnt=%d\n",
__func__, dsi_pipe->ov_cnt, dsi_pipe->dmap_cnt);
mdp_intr_mask &= ~INTR_DMA_P_DONE;
@@ -482,7 +479,7 @@
{
int diff;
- if (dsi_pipe->ov_blt_addr == 0) {
+ if (dsi_pipe->blt_addr == 0) {
mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_OFF, TRUE);
spin_lock(&mdp_spin_lock);
dma->busy = FALSE;
@@ -542,7 +539,7 @@
mipi_dsi_mdp_busy_wait(dsi_mfd);
mdp4_overlay_update_dsi_cmd(dsi_mfd);
- if (dsi_pipe->ov_blt_addr)
+ if (dsi_pipe->blt_addr)
mdp4_dsi_blt_dmap_busy_wait(dsi_mfd);
mdp4_dsi_cmd_overlay_kickoff(dsi_mfd, dsi_pipe);
}
@@ -625,17 +622,17 @@
* to be called before kickoff.
* vice versa for blt disabled.
*/
- if (dsi_pipe->ov_blt_addr && dsi_pipe->blt_cnt == 0)
+ if (dsi_pipe->blt_addr && dsi_pipe->blt_cnt == 0)
mdp4_overlay_update_dsi_cmd(mfd); /* first time */
- else if (dsi_pipe->ov_blt_addr == 0 && dsi_pipe->blt_cnt) {
+ else if (dsi_pipe->blt_addr == 0 && dsi_pipe->blt_cnt) {
mdp4_overlay_update_dsi_cmd(mfd); /* last time */
dsi_pipe->blt_cnt = 0;
}
- pr_debug("%s: ov_blt_addr=%d blt_cnt=%d\n",
- __func__, (int)dsi_pipe->ov_blt_addr, dsi_pipe->blt_cnt);
+ pr_debug("%s: blt_addr=%d blt_cnt=%d\n",
+ __func__, (int)dsi_pipe->blt_addr, dsi_pipe->blt_cnt);
- if (dsi_pipe->ov_blt_addr)
+ if (dsi_pipe->blt_addr)
mdp4_dsi_blt_dmap_busy_wait(dsi_mfd);
mdp4_dsi_cmd_overlay_kickoff(mfd, pipe);
@@ -661,7 +658,7 @@
mipi_dsi_mdp_busy_wait(mfd);
- if (dsi_pipe->ov_blt_addr == 0)
+ if (dsi_pipe->blt_addr == 0)
mipi_dsi_cmd_mdp_start();
mdp4_overlay_dsi_state_set(ST_DSI_PLAYING);
@@ -669,7 +666,7 @@
spin_lock_irqsave(&mdp_spin_lock, flag);
mdp_enable_irq(MDP_OVERLAY0_TERM);
mfd->dma->busy = TRUE;
- if (dsi_pipe->ov_blt_addr)
+ if (dsi_pipe->blt_addr)
mfd->dma->dmap_busy = TRUE;
/* start OVERLAY pipe */
spin_unlock_irqrestore(&mdp_spin_lock, flag);
@@ -703,7 +700,7 @@
if (mfd && mfd->panel_power_on) {
mdp4_dsi_cmd_dma_busy_wait(mfd);
- if (dsi_pipe && dsi_pipe->ov_blt_addr)
+ if (dsi_pipe && dsi_pipe->blt_addr)
mdp4_dsi_blt_dmap_busy_wait(mfd);
mdp4_overlay_update_dsi_cmd(mfd);
diff --git a/drivers/video/msm/mdp4_overlay_dsi_video.c b/drivers/video/msm/mdp4_overlay_dsi_video.c
index 478a8ce..05c6fe8 100644
--- a/drivers/video/msm/mdp4_overlay_dsi_video.c
+++ b/drivers/video/msm/mdp4_overlay_dsi_video.c
@@ -152,8 +152,7 @@
init_completion(&dsi_video_comp);
mdp4_init_writeback_buf(mfd, MDP4_MIXER0);
- pipe->ov_blt_addr = 0;
- pipe->dma_blt_addr = 0;
+ pipe->blt_addr = 0;
} else {
pipe = dsi_pipe;
@@ -416,7 +415,7 @@
char *overlay_base;
- if (pipe->ov_blt_addr == 0)
+ if (pipe->blt_addr == 0)
return;
@@ -428,7 +427,7 @@
off = 0;
if (pipe->ov_cnt & 0x01)
off = pipe->src_height * pipe->src_width * bpp;
- addr = pipe->ov_blt_addr + off;
+ addr = pipe->blt_addr + off;
/* overlay 0 */
overlay_base = MDP_BASE + MDP4_OVERLAYPROC0_BASE;/* 0x10000 */
@@ -441,7 +440,7 @@
uint32 off, addr;
int bpp;
- if (pipe->ov_blt_addr == 0)
+ if (pipe->blt_addr == 0)
return;
@@ -453,7 +452,7 @@
off = 0;
if (pipe->dmap_cnt & 0x01)
off = pipe->src_height * pipe->src_width * bpp;
- addr = pipe->dma_blt_addr + off;
+ addr = pipe->blt_addr + off;
/* dmap */
MDP_OUTP(MDP_BASE + 0x90008, addr);
@@ -530,7 +529,7 @@
if (pipe->flags & MDP_OV_PLAY_NOWAIT)
return;
- if (dsi_pipe->ov_blt_addr) {
+ if (dsi_pipe->blt_addr) {
mdp4_overlay_dsi_video_dma_busy_wait(mfd);
mdp4_dsi_video_blt_ov_update(dsi_pipe);
@@ -573,7 +572,7 @@
mdp4_overlayproc_cfg(dsi_pipe);
mdp4_overlay_dmap_xy(dsi_pipe);
mdp_is_in_isr = FALSE;
- if (dsi_pipe->ov_blt_addr) {
+ if (dsi_pipe->blt_addr) {
mdp4_dsi_video_blt_ov_update(dsi_pipe);
dsi_pipe->ov_cnt++;
outp32(MDP_INTR_CLEAR, INTR_OVERLAY0_DONE);
@@ -596,7 +595,7 @@
{
spin_lock(&mdp_spin_lock);
dma->busy = FALSE;
- if (dsi_pipe->ov_blt_addr == 0) {
+ if (dsi_pipe->blt_addr == 0) {
spin_unlock(&mdp_spin_lock);
return;
}
@@ -619,23 +618,21 @@
mdp4_allocate_writeback_buf(mfd, MDP4_MIXER0);
- if (mfd->ov0_wb_buf->write_addr == 0) {
+ if (mfd->ov0_wb_buf->phys_addr == 0) {
pr_info("%s: no blt_base assigned\n", __func__);
return;
}
spin_lock_irqsave(&mdp_spin_lock, flag);
- if (enable && dsi_pipe->ov_blt_addr == 0) {
- dsi_pipe->ov_blt_addr = mfd->ov0_wb_buf->write_addr;
- dsi_pipe->dma_blt_addr = mfd->ov0_wb_buf->read_addr;
+ if (enable && dsi_pipe->blt_addr == 0) {
+ dsi_pipe->blt_addr = mfd->ov0_wb_buf->phys_addr;
dsi_pipe->blt_cnt = 0;
dsi_pipe->ov_cnt = 0;
dsi_pipe->dmap_cnt = 0;
mdp4_stat.blt_dsi_video++;
change++;
- } else if (enable == 0 && dsi_pipe->ov_blt_addr) {
- dsi_pipe->ov_blt_addr = 0;
- dsi_pipe->dma_blt_addr = 0;
+ } else if (enable == 0 && dsi_pipe->blt_addr) {
+ dsi_pipe->blt_addr = 0;
change++;
}
@@ -644,8 +641,8 @@
return;
}
- pr_debug("%s: enable=%d ov_blt_addr=%x\n", __func__,
- enable, (int)dsi_pipe->ov_blt_addr);
+ pr_debug("%s: enable=%d blt_addr=%x\n", __func__,
+ enable, (int)dsi_pipe->blt_addr);
blt_cfg_changed = 1;
spin_unlock_irqrestore(&mdp_spin_lock, flag);
diff --git a/drivers/video/msm/mdp4_overlay_dtv.c b/drivers/video/msm/mdp4_overlay_dtv.c
index e41f9e8..03b22f1 100644
--- a/drivers/video/msm/mdp4_overlay_dtv.c
+++ b/drivers/video/msm/mdp4_overlay_dtv.c
@@ -376,8 +376,7 @@
return -ENODEV;
mdp4_init_writeback_buf(mfd, MDP4_MIXER1);
- dtv_pipe->ov_blt_addr = 0;
- dtv_pipe->dma_blt_addr = 0;
+ dtv_pipe->blt_addr = 0;
return mdp4_dtv_start(mfd);
}
@@ -408,7 +407,7 @@
int bpp;
char *overlay_base;
- if (pipe->ov_blt_addr == 0)
+ if (pipe->blt_addr == 0)
return;
#ifdef BLT_RGB565
bpp = 2; /* overlay ouput is RGB565 */
@@ -418,7 +417,7 @@
off = (pipe->ov_cnt & 0x01) ?
pipe->src_height * pipe->src_width * bpp : 0;
- addr = pipe->ov_blt_addr + off;
+ addr = pipe->blt_addr + off;
pr_debug("%s overlay addr 0x%x\n", __func__, addr);
/* overlay 1 */
overlay_base = MDP_BASE + MDP4_OVERLAYPROC1_BASE;/* 0x18000 */
@@ -431,7 +430,7 @@
uint32 off, addr;
int bpp;
- if (pipe->ov_blt_addr == 0)
+ if (pipe->blt_addr == 0)
return;
#ifdef BLT_RGB565
@@ -441,7 +440,7 @@
#endif
off = (pipe->dmae_cnt & 0x01) ?
pipe->src_height * pipe->src_width * bpp : 0;
- addr = pipe->dma_blt_addr + off;
+ addr = pipe->blt_addr + off;
MDP_OUTP(MDP_BASE + 0xb0008, addr);
}
@@ -464,7 +463,7 @@
return;
}
- if (dtv_pipe->ov_blt_addr) {
+ if (dtv_pipe->blt_addr) {
mdp4_dtv_blt_ov_update(dtv_pipe);
dtv_pipe->ov_cnt++;
mdp4_overlay_dtv_ov_kick_start();
@@ -521,10 +520,10 @@
}
wait_for_completion_timeout(&dtv_pipe->comp,
- msecs_to_jiffies(VSYNC_PERIOD*2));
+ msecs_to_jiffies(VSYNC_PERIOD * 3));
mdp_disable_irq(MDP_OVERLAY1_TERM);
- if (dtv_pipe->ov_blt_addr)
+ if (dtv_pipe->blt_addr)
mdp4_overlay_dtv_wait4dmae(mfd);
}
@@ -582,7 +581,7 @@
{
if (!dtv_pipe)
return;
- if (dtv_pipe->ov_blt_addr) {
+ if (dtv_pipe->blt_addr) {
mdp4_dtv_blt_dmae_update(dtv_pipe);
dtv_pipe->dmae_cnt++;
}
@@ -643,7 +642,7 @@
unsigned long flag;
int change = 0;
- if (!mfd->ov1_wb_buf->write_addr) {
+ if (!mfd->ov1_wb_buf->phys_addr) {
pr_debug("%s: no writeback buf assigned\n", __func__);
return;
}
@@ -655,18 +654,16 @@
}
spin_lock_irqsave(&mdp_spin_lock, flag);
- if (enable && dtv_pipe->ov_blt_addr == 0) {
- dtv_pipe->ov_blt_addr = mfd->ov1_wb_buf->write_addr;
- dtv_pipe->dma_blt_addr = mfd->ov1_wb_buf->read_addr;
+ if (enable && dtv_pipe->blt_addr == 0) {
+ dtv_pipe->blt_addr = mfd->ov1_wb_buf->phys_addr;
change++;
dtv_pipe->ov_cnt = 0;
dtv_pipe->dmae_cnt = 0;
- } else if (enable == 0 && dtv_pipe->ov_blt_addr) {
- dtv_pipe->ov_blt_addr = 0;
- dtv_pipe->dma_blt_addr = 0;
+ } else if (enable == 0 && dtv_pipe->blt_addr) {
+ dtv_pipe->blt_addr = 0;
change++;
}
- pr_debug("%s: ov_blt_addr=%x\n", __func__, (int)dtv_pipe->ov_blt_addr);
+ pr_debug("%s: blt_addr=%x\n", __func__, (int)dtv_pipe->blt_addr);
spin_unlock_irqrestore(&mdp_spin_lock, flag);
if (!change)
diff --git a/drivers/video/msm/mdp4_overlay_lcdc.c b/drivers/video/msm/mdp4_overlay_lcdc.c
index 1c3bf3f..fd6d365 100644
--- a/drivers/video/msm/mdp4_overlay_lcdc.c
+++ b/drivers/video/msm/mdp4_overlay_lcdc.c
@@ -133,8 +133,8 @@
init_completion(&lcdc_comp);
mdp4_init_writeback_buf(mfd, MDP4_MIXER0);
- pipe->ov_blt_addr = 0;
- pipe->dma_blt_addr = 0;
+ pipe->blt_addr = 0;
+
} else {
pipe = lcdc_pipe;
}
@@ -322,7 +322,7 @@
char *overlay_base;
- if (pipe->ov_blt_addr == 0)
+ if (pipe->blt_addr == 0)
return;
@@ -334,7 +334,7 @@
off = 0;
if (pipe->ov_cnt & 0x01)
off = pipe->src_height * pipe->src_width * bpp;
- addr = pipe->ov_blt_addr + off;
+ addr = pipe->blt_addr + off;
/* overlay 0 */
overlay_base = MDP_BASE + MDP4_OVERLAYPROC0_BASE;/* 0x10000 */
@@ -347,7 +347,7 @@
uint32 off, addr;
int bpp;
- if (pipe->ov_blt_addr == 0)
+ if (pipe->blt_addr == 0)
return;
@@ -359,7 +359,7 @@
off = 0;
if (pipe->dmap_cnt & 0x01)
off = pipe->src_height * pipe->src_width * bpp;
- addr = pipe->dma_blt_addr + off;
+ addr = pipe->blt_addr + off;
/* dmap */
MDP_OUTP(MDP_BASE + 0x90008, addr);
@@ -436,7 +436,7 @@
if (pipe->flags & MDP_OV_PLAY_NOWAIT)
return;
- if (lcdc_pipe->ov_blt_addr) {
+ if (lcdc_pipe->blt_addr) {
mdp4_overlay_lcdc_dma_busy_wait(mfd);
mdp4_lcdc_blt_ov_update(lcdc_pipe);
@@ -483,7 +483,7 @@
{
spin_lock(&mdp_spin_lock);
dma->busy = FALSE;
- if (lcdc_pipe->ov_blt_addr == 0) {
+ if (lcdc_pipe->blt_addr == 0) {
spin_unlock(&mdp_spin_lock);
return;
}
@@ -498,7 +498,7 @@
{
unsigned long flag;
- if (lcdc_pipe->ov_blt_addr) {
+ if (lcdc_pipe->blt_addr) {
mdp4_overlay_lcdc_dma_busy_wait(mfd);
mdp4_lcdc_blt_ov_update(lcdc_pipe);
@@ -528,26 +528,24 @@
mdp4_allocate_writeback_buf(mfd, MDP4_MIXER0);
- if (!mfd->ov0_wb_buf->write_addr) {
+ if (!mfd->ov0_wb_buf->phys_addr) {
pr_debug("%s: no blt_base assigned\n", __func__);
return;
}
spin_lock_irqsave(&mdp_spin_lock, flag);
- if (enable && lcdc_pipe->ov_blt_addr == 0) {
- lcdc_pipe->ov_blt_addr = mfd->ov0_wb_buf->write_addr;
- lcdc_pipe->dma_blt_addr = mfd->ov0_wb_buf->read_addr;
+ if (enable && lcdc_pipe->blt_addr == 0) {
+ lcdc_pipe->blt_addr = mfd->ov0_wb_buf->phys_addr;
change++;
lcdc_pipe->blt_cnt = 0;
lcdc_pipe->ov_cnt = 0;
lcdc_pipe->dmap_cnt = 0;
mdp4_stat.blt_lcdc++;
- } else if (enable == 0 && lcdc_pipe->ov_blt_addr) {
- lcdc_pipe->ov_blt_addr = 0;
- lcdc_pipe->dma_blt_addr = 0;
+ } else if (enable == 0 && lcdc_pipe->blt_addr) {
+ lcdc_pipe->blt_addr = 0;
change++;
}
- pr_info("%s: ov_blt_addr=%x\n", __func__, (int)lcdc_pipe->ov_blt_addr);
+ pr_info("%s: blt_addr=%x\n", __func__, (int)lcdc_pipe->blt_addr);
spin_unlock_irqrestore(&mdp_spin_lock, flag);
if (!change)
@@ -561,7 +559,7 @@
mdp4_overlayproc_cfg(lcdc_pipe);
mdp4_overlay_dmap_xy(lcdc_pipe);
- if (lcdc_pipe->ov_blt_addr) {
+ if (lcdc_pipe->blt_addr) {
mdp4_overlay_lcdc_prefill(mfd);
mdp4_overlay_lcdc_prefill(mfd);
}
diff --git a/drivers/video/msm/mdp4_overlay_mddi.c b/drivers/video/msm/mdp4_overlay_mddi.c
index c4e6793..82864918 100644
--- a/drivers/video/msm/mdp4_overlay_mddi.c
+++ b/drivers/video/msm/mdp4_overlay_mddi.c
@@ -163,8 +163,7 @@
MDP_OUTP(MDP_BASE + 0x00098, 0x01);
mdp4_init_writeback_buf(mfd, MDP4_MIXER0);
- pipe->ov_blt_addr = 0;
- pipe->dma_blt_addr = 0;
+ pipe->blt_addr = 0;
} else {
pipe = mddi_pipe;
}
@@ -255,25 +254,23 @@
unsigned long flag;
pr_debug("%s: blt_end=%d blt_addr=%x pid=%d\n",
- __func__, mddi_pipe->blt_end,
- (int)mddi_pipe->ov_blt_addr, current->pid);
+ __func__, mddi_pipe->blt_end, (int)mddi_pipe->blt_addr, current->pid);
mdp4_allocate_writeback_buf(mfd, MDP4_MIXER0);
- if (mfd->ov0_wb_buf->write_addr == 0) {
+ if (mfd->ov0_wb_buf->phys_addr == 0) {
pr_info("%s: no blt_base assigned\n", __func__);
return -EBUSY;
}
- if (mddi_pipe->ov_blt_addr == 0) {
+ if (mddi_pipe->blt_addr == 0) {
mdp4_mddi_dma_busy_wait(mfd);
spin_lock_irqsave(&mdp_spin_lock, flag);
mddi_pipe->blt_end = 0;
mddi_pipe->blt_cnt = 0;
mddi_pipe->ov_cnt = 0;
mddi_pipe->dmap_cnt = 0;
- mddi_pipe->ov_blt_addr = mfd->ov0_wb_buf->write_addr;
- mddi_pipe->dma_blt_addr = mfd->ov0_wb_buf->write_addr;
+ mddi_pipe->blt_addr = mfd->ov0_wb_buf->phys_addr;
mdp4_stat.blt_mddi++;
spin_unlock_irqrestore(&mdp_spin_lock, flag);
return 0;
@@ -287,9 +284,9 @@
unsigned long flag;
pr_debug("%s: blt_end=%d blt_addr=%x\n",
- __func__, mddi_pipe->blt_end, (int)mddi_pipe->ov_blt_addr);
+ __func__, mddi_pipe->blt_end, (int)mddi_pipe->blt_addr);
- if ((mddi_pipe->blt_end == 0) && mddi_pipe->ov_blt_addr) {
+ if ((mddi_pipe->blt_end == 0) && mddi_pipe->blt_addr) {
spin_lock_irqsave(&mdp_spin_lock, flag);
mddi_pipe->blt_end = 1; /* mark as end */
spin_unlock_irqrestore(&mdp_spin_lock, flag);
@@ -326,7 +323,7 @@
int bpp;
char *overlay_base;
- if (pipe->ov_blt_addr == 0)
+ if (pipe->blt_addr == 0)
return;
@@ -339,7 +336,7 @@
if (pipe->dmap_cnt & 0x01)
off = pipe->src_height * pipe->src_width * bpp;
- addr = pipe->ov_blt_addr + off;
+ addr = pipe->blt_addr + off;
/* dmap */
MDP_OUTP(MDP_BASE + 0x90008, addr);
@@ -347,7 +344,7 @@
off = 0;
if (pipe->ov_cnt & 0x01)
off = pipe->src_height * pipe->src_width * bpp;
- addr2 = pipe->ov_blt_addr + off;
+ addr2 = pipe->blt_addr + off;
/* overlay 0 */
overlay_base = MDP_BASE + MDP4_OVERLAYPROC0_BASE;/* 0x10000 */
outpdw(overlay_base + 0x000c, addr2);
@@ -374,8 +371,7 @@
if (mddi_pipe->blt_end) {
mddi_pipe->blt_end = 0;
- mddi_pipe->ov_blt_addr = 0;
- mddi_pipe->dma_blt_addr = 0;
+ mddi_pipe->blt_addr = 0;
pr_debug("%s: END, ov_cnt=%d dmap_cnt=%d\n", __func__,
mddi_pipe->ov_cnt, mddi_pipe->dmap_cnt);
mdp_intr_mask &= ~INTR_DMA_P_DONE;
@@ -410,7 +406,7 @@
{
int diff;
- if (mddi_pipe->ov_blt_addr == 0) {
+ if (mddi_pipe->blt_addr == 0) {
mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_OFF, TRUE);
spin_lock(&mdp_spin_lock);
dma->busy = FALSE;
@@ -477,7 +473,7 @@
mdp4_mddi_dma_busy_wait(mddi_mfd);
mdp4_overlay_update_lcd(mddi_mfd);
- if (mddi_pipe->ov_blt_addr)
+ if (mddi_pipe->blt_addr)
mdp4_mddi_blt_dmap_busy_wait(mddi_mfd);
mdp4_mddi_overlay_kickoff(mddi_mfd, mddi_pipe);
mddi_mfd->dma_update_flag = 1;
@@ -543,17 +539,17 @@
* to be called before kickoff.
* vice versa for blt disabled.
*/
- if (mddi_pipe->ov_blt_addr && mddi_pipe->blt_cnt == 0)
+ if (mddi_pipe->blt_addr && mddi_pipe->blt_cnt == 0)
mdp4_overlay_update_lcd(mfd); /* first time */
- else if (mddi_pipe->ov_blt_addr == 0 && mddi_pipe->blt_cnt) {
+ else if (mddi_pipe->blt_addr == 0 && mddi_pipe->blt_cnt) {
mdp4_overlay_update_lcd(mfd); /* last time */
mddi_pipe->blt_cnt = 0;
}
pr_debug("%s: blt_addr=%d blt_cnt=%d\n",
- __func__, (int)mddi_pipe->ov_blt_addr, mddi_pipe->blt_cnt);
+ __func__, (int)mddi_pipe->blt_addr, mddi_pipe->blt_cnt);
- if (mddi_pipe->ov_blt_addr)
+ if (mddi_pipe->blt_addr)
mdp4_mddi_blt_dmap_busy_wait(mddi_mfd);
mdp4_mddi_overlay_kickoff(mfd, pipe);
}
@@ -576,7 +572,7 @@
mdp_enable_irq(MDP_OVERLAY0_TERM);
spin_lock_irqsave(&mdp_spin_lock, flag);
mfd->dma->busy = TRUE;
- if (mddi_pipe->ov_blt_addr)
+ if (mddi_pipe->blt_addr)
mfd->dma->dmap_busy = TRUE;
spin_unlock_irqrestore(&mdp_spin_lock, flag);
/* start OVERLAY pipe */
@@ -661,7 +657,7 @@
mdp_enable_irq(MDP_DMA_S_TERM);
- if (mddi_pipe->ov_blt_addr == 0)
+ if (mddi_pipe->blt_addr == 0)
mfd->dma->busy = TRUE;
mfd->ibuf_flushed = TRUE;
@@ -692,7 +688,7 @@
if (mfd && mfd->panel_power_on) {
mdp4_mddi_dma_busy_wait(mfd);
- if (mddi_pipe && mddi_pipe->ov_blt_addr)
+ if (mddi_pipe && mddi_pipe->blt_addr)
mdp4_mddi_blt_dmap_busy_wait(mfd);
mdp4_overlay_update_lcd(mfd);
diff --git a/drivers/video/msm/mdp4_overlay_writeback.c b/drivers/video/msm/mdp4_overlay_writeback.c
index 0174309..8dccf78 100644
--- a/drivers/video/msm/mdp4_overlay_writeback.c
+++ b/drivers/video/msm/mdp4_overlay_writeback.c
@@ -21,7 +21,7 @@
#include <linux/delay.h>
#include <mach/hardware.h>
#include <linux/io.h>
-
+#include <mach/iommu_domains.h>
#include <asm/system.h>
#include <asm/mach-types.h>
#include <linux/semaphore.h>
@@ -272,11 +272,11 @@
}
mutex_unlock(&mfd->writeback_mutex);
- writeback_pipe->ov_blt_addr = (ulong) (node ? node->addr : NULL);
+ writeback_pipe->blt_addr = (ulong) (node ? node->addr : NULL);
- if (!writeback_pipe->ov_blt_addr) {
+ if (!writeback_pipe->blt_addr) {
pr_err("%s: no writeback buffer 0x%x, %p\n", __func__,
- (unsigned int)writeback_pipe->ov_blt_addr, node);
+ (unsigned int)writeback_pipe->blt_addr, node);
mutex_unlock(&mfd->unregister_mutex);
return;
}
@@ -324,13 +324,13 @@
}
mutex_unlock(&mfd->writeback_mutex);
- writeback_pipe->ov_blt_addr = (ulong) (node ? node->addr : NULL);
+ writeback_pipe->blt_addr = (ulong) (node ? node->addr : NULL);
mutex_lock(&mfd->dma->ov_mutex);
pr_debug("%s in writeback\n", __func__);
- if (writeback_pipe && !writeback_pipe->ov_blt_addr) {
+ if (writeback_pipe && !writeback_pipe->blt_addr) {
pr_err("%s: no writeback buffer 0x%x\n", __func__,
- (unsigned int)writeback_pipe->ov_blt_addr);
+ (unsigned int)writeback_pipe->blt_addr);
ret = mdp4_overlay_writeback_update(mfd);
if (ret)
pr_err("%s: update failed writeback pipe NULL\n",
@@ -351,7 +351,7 @@
}
pr_debug("%s: in writeback pan display 0x%x\n", __func__,
- (unsigned int)writeback_pipe->ov_blt_addr);
+ (unsigned int)writeback_pipe->blt_addr);
mdp4_writeback_kickoff_ui(mfd, writeback_pipe);
mdp4_iommu_unmap(writeback_pipe);
@@ -407,11 +407,10 @@
pr_err("%s: out of memory\n", __func__);
goto register_alloc_fail;
}
-
+ temp->ihdl = NULL;
if (data->iova)
temp->addr = (void *)(data->iova + data->offset);
-#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
- else {
+ else if (mfd->iclient) {
struct ion_handle *srcp_ihdl;
ulong len;
srcp_ihdl = ion_import_fd(mfd->iclient,
@@ -420,22 +419,30 @@
pr_err("%s: ion import fd failed\n", __func__);
goto register_ion_fail;
}
- if (ion_phys(mfd->iclient,
- srcp_ihdl,
- (ulong *)&temp->addr,
- (size_t *)&len)) {
- pr_err("%s: unable to get ion phys\n",
+
+ if (ion_map_iommu(mfd->iclient,
+ srcp_ihdl,
+ DISPLAY_DOMAIN,
+ GEN_POOL,
+ SZ_4K,
+ 0,
+ (ulong *)&temp->addr,
+ (ulong *)&len,
+ 0,
+ ION_IOMMU_UNMAP_DELAYED)) {
+ ion_free(mfd->iclient, srcp_ihdl);
+ pr_err("%s: unable to get ion mapping addr\n",
__func__);
goto register_ion_fail;
}
temp->addr += data->offset;
+ temp->ihdl = srcp_ihdl;
}
-#else
else {
pr_err("%s: only support ion memory\n", __func__);
goto register_ion_fail;
}
-#endif
+
memcpy(&temp->buf_info, data, sizeof(struct msmfb_data));
if (mdp4_overlay_writeback_register_buffer(mfd, temp)) {
pr_err("%s: error registering node\n", __func__);
@@ -514,6 +521,15 @@
list_del(&node->active_entry);
node->state = WITH_CLIENT;
memcpy(data, &node->buf_info, sizeof(struct msmfb_data));
+ if (!data->iova)
+ if (mfd->iclient && node->ihdl) {
+ ion_unmap_iommu(mfd->iclient,
+ node->ihdl,
+ DISPLAY_DOMAIN,
+ GEN_POOL);
+ ion_free(mfd->iclient,
+ node->ihdl);
+ }
} else {
pr_err("node is NULL. Somebody else dequeued?\n");
rc = -ENOBUFS;
diff --git a/drivers/video/msm/mdp4_util.c b/drivers/video/msm/mdp4_util.c
index 208e3ce..f192b12 100644
--- a/drivers/video/msm/mdp4_util.c
+++ b/drivers/video/msm/mdp4_util.c
@@ -2559,14 +2559,13 @@
buf = mfd->ov1_wb_buf;
buf->ihdl = NULL;
- buf->write_addr = 0;
- buf->read_addr = 0;
+ buf->phys_addr = 0;
}
u32 mdp4_allocate_writeback_buf(struct msm_fb_data_type *mfd, u32 mix_num)
{
struct mdp_buf_type *buf;
- ion_phys_addr_t addr, read_addr = 0;
+ ion_phys_addr_t addr;
size_t buffer_size;
unsigned long len;
@@ -2575,7 +2574,7 @@
else
buf = mfd->ov1_wb_buf;
- if (buf->write_addr || !IS_ERR_OR_NULL(buf->ihdl))
+ if (buf->phys_addr || !IS_ERR_OR_NULL(buf->ihdl))
return 0;
if (!buf->size) {
@@ -2592,25 +2591,10 @@
buf->ihdl = ion_alloc(mfd->iclient, buffer_size, SZ_4K,
mfd->mem_hid);
if (!IS_ERR_OR_NULL(buf->ihdl)) {
- if (mfd->mem_hid & ION_SECURE) {
- if (ion_phys(mfd->iclient, buf->ihdl,
- &addr, (unsigned *)&len)) {
- pr_err("%s:%d: ion_phys map failed\n",
- __func__, __LINE__);
- return -ENOMEM;
- }
- } else {
- if (ion_map_iommu(mfd->iclient, buf->ihdl,
- DISPLAY_WRITE_DOMAIN, GEN_POOL, SZ_4K,
- 0, &addr, &len, 0, 0)) {
- pr_err("ion_map_iommu() write failed\n");
- return -ENOMEM;
- }
- }
if (ion_map_iommu(mfd->iclient, buf->ihdl,
- DISPLAY_READ_DOMAIN, GEN_POOL, SZ_4K,
- 0, &read_addr, &len, 0, 0)) {
- pr_err("ion_map_iommu() read failed\n");
+ DISPLAY_DOMAIN, GEN_POOL, SZ_4K, 0, &addr,
+ &len, 0, 0)) {
+ pr_err("ion_map_iommu() failed\n");
return -ENOMEM;
}
} else {
@@ -2625,13 +2609,7 @@
if (addr) {
pr_info("allocating %d bytes at %x for mdp writeback\n",
buffer_size, (u32) addr);
- buf->write_addr = addr;
-
- if (read_addr)
- buf->read_addr = read_addr;
- else
- buf->read_addr = buf->write_addr;
-
+ buf->phys_addr = addr;
return 0;
} else {
pr_err("%s cannot allocate memory for mdp writeback!\n",
@@ -2651,25 +2629,21 @@
if (!IS_ERR_OR_NULL(mfd->iclient)) {
if (!IS_ERR_OR_NULL(buf->ihdl)) {
- if (!(mfd->mem_hid & ION_SECURE))
- ion_unmap_iommu(mfd->iclient, buf->ihdl,
- DISPLAY_WRITE_DOMAIN, GEN_POOL);
ion_unmap_iommu(mfd->iclient, buf->ihdl,
- DISPLAY_READ_DOMAIN, GEN_POOL);
+ DISPLAY_DOMAIN, GEN_POOL);
ion_free(mfd->iclient, buf->ihdl);
pr_debug("%s:%d free writeback imem\n", __func__,
__LINE__);
buf->ihdl = NULL;
}
} else {
- if (buf->write_addr) {
- free_contiguous_memory_by_paddr(buf->write_addr);
+ if (buf->phys_addr) {
+ free_contiguous_memory_by_paddr(buf->phys_addr);
pr_debug("%s:%d free writeback pmem\n", __func__,
__LINE__);
}
}
- buf->write_addr = 0;
- buf->read_addr = 0;
+ buf->phys_addr = 0;
}
static int mdp4_update_pcc_regs(uint32_t offset,
diff --git a/drivers/video/msm/mdss/Makefile b/drivers/video/msm/mdss/Makefile
index 780e0c6..492437e 100644
--- a/drivers/video/msm/mdss/Makefile
+++ b/drivers/video/msm/mdss/Makefile
@@ -4,6 +4,7 @@
mdss-mdp-objs += mdss_mdp_intf_writeback.o
mdss-mdp-objs += mdss_mdp_rotator.o
mdss-mdp-objs += mdss_mdp_overlay.o
+mdss-mdp-objs += mdss_mdp_wb.o
obj-$(CONFIG_FB_MSM_MDSS) += mdss-mdp.o
obj-$(CONFIG_FB_MSM_MDSS) += mdss_fb.o
obj-$(CONFIG_FB_MSM_MDSS_WRITEBACK) += mdss_wb.o
diff --git a/drivers/video/msm/mdss/mdss_fb.c b/drivers/video/msm/mdss/mdss_fb.c
index 0fedb6c..a96bf3a 100644
--- a/drivers/video/msm/mdss/mdss_fb.c
+++ b/drivers/video/msm/mdss/mdss_fb.c
@@ -1158,6 +1158,20 @@
return ret;
}
+struct fb_info *msm_fb_get_writeback_fb(void)
+{
+ int c = 0;
+ for (c = 0; c < fbi_list_index; ++c) {
+ struct msm_fb_data_type *mfd;
+ mfd = (struct msm_fb_data_type *)fbi_list[c]->par;
+ if (mfd->panel.type == WRITEBACK_PANEL)
+ return fbi_list[c];
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(msm_fb_get_writeback_fb);
+
int mdss_register_panel(struct mdss_panel_data *pdata)
{
struct platform_device *mdss_fb_dev = NULL;
diff --git a/drivers/video/msm/mdss/mdss_fb.h b/drivers/video/msm/mdss/mdss_fb.h
index a3f0dbe..ac6c213 100644
--- a/drivers/video/msm/mdss/mdss_fb.h
+++ b/drivers/video/msm/mdss/mdss_fb.h
@@ -55,6 +55,7 @@
int op_enable;
u32 fb_imgType;
+ u32 dst_format;
int hw_refresh;
@@ -90,6 +91,7 @@
struct ion_client *iclient;
struct mdss_mdp_ctl *ctl;
+ struct mdss_mdp_wb *wb;
};
int mdss_fb_get_phys_info(unsigned long *start, unsigned long *len, int fb_num);
diff --git a/drivers/video/msm/mdss/mdss_mdp.c b/drivers/video/msm/mdss/mdss_mdp.c
index 41e0c18..46e49da 100644
--- a/drivers/video/msm/mdss/mdss_mdp.c
+++ b/drivers/video/msm/mdss/mdss_mdp.c
@@ -554,10 +554,10 @@
}
disable_irq(mdss_res->irq);
- mdss_res->fs = regulator_get(NULL, "gdsc_mdss");
+ mdss_res->fs = regulator_get(&pdev->dev, "vdd");
if (IS_ERR_OR_NULL(mdss_res->fs)) {
mdss_res->fs = NULL;
- pr_err("unable to get gdsc_mdss regulator\n");
+ pr_err("unable to get gdsc regulator\n");
goto error;
}
regulator_enable(mdss_res->fs);
diff --git a/drivers/video/msm/mdss/mdss_mdp.h b/drivers/video/msm/mdss/mdss_mdp.h
index 2cdd9f6..4489fbb 100644
--- a/drivers/video/msm/mdss/mdss_mdp.h
+++ b/drivers/video/msm/mdss/mdss_mdp.h
@@ -247,6 +247,12 @@
unsigned long smp[MAX_PLANES];
};
+struct mdss_mdp_writeback_arg {
+ struct mdss_mdp_data *data;
+ void (*callback_fnc) (void *arg);
+ void *priv_data;
+};
+
static inline void mdss_mdp_ctl_write(struct mdss_mdp_ctl *ctl,
u32 reg, u32 val)
{
@@ -313,4 +319,7 @@
int mdss_mdp_get_img(struct ion_client *iclient, struct msmfb_data *img,
struct mdss_mdp_img_data *data);
+int mdss_mdp_wb_kickoff(struct mdss_mdp_ctl *ctl);
+int mdss_mdp_wb_ioctl_handler(struct msm_fb_data_type *mfd, u32 cmd, void *arg);
+
#endif /* MDSS_MDP_H */
diff --git a/drivers/video/msm/mdss/mdss_mdp_intf_writeback.c b/drivers/video/msm/mdss/mdss_mdp_intf_writeback.c
index c1bc58a..af422b7 100644
--- a/drivers/video/msm/mdss/mdss_mdp_intf_writeback.c
+++ b/drivers/video/msm/mdss/mdss_mdp_intf_writeback.c
@@ -39,9 +39,12 @@
u16 height;
u8 rot90;
- struct completion comp;
+ int initialized;
+
struct mdss_mdp_plane_sizes dst_planes;
- struct mdss_mdp_data wb_data;
+
+ void (*callback_fnc) (void *arg);
+ void *callback_arg;
};
static struct mdss_mdp_writeback_ctx wb_ctx_list[MDSS_MDP_MAX_WRITEBACK] = {
@@ -72,8 +75,6 @@
},
};
-static void *videomemory;
-
static int mdss_mdp_writeback_addr_setup(struct mdss_mdp_writeback_ctx *ctx,
struct mdss_mdp_data *data)
{
@@ -101,7 +102,8 @@
{
struct mdss_mdp_format_params *fmt;
u32 dst_format, pattern, ystride0, ystride1, outsize, chroma_samp;
- int off, ret;
+ int off;
+ u32 opmode = ctx->opmode;
pr_debug("wb_num=%d format=%d\n", ctx->wb_num, ctx->format);
@@ -111,11 +113,30 @@
fmt = mdss_mdp_get_format_params(ctx->format);
if (!fmt) {
pr_err("wb format=%d not supported\n", ctx->format);
- return ret;
+ return -EINVAL;
}
chroma_samp = fmt->chroma_sample;
- if (ctx->rot90) {
+
+ if (ctx->type != MDSS_MDP_WRITEBACK_TYPE_ROTATOR && fmt->is_yuv) {
+ mdss_mdp_csc_setup(MDSS_MDP_BLOCK_WB, ctx->wb_num, 0,
+ MDSS_MDP_CSC_RGB2YUV);
+ opmode |= (1 << 8) | /* CSC_EN */
+ (0 << 9) | /* SRC_DATA=RGB */
+ (1 << 10); /* DST_DATA=YCBCR */
+
+ switch (chroma_samp) {
+ case MDSS_MDP_CHROMA_RGB:
+ case MDSS_MDP_CHROMA_420:
+ case MDSS_MDP_CHROMA_H2V1:
+ opmode |= (chroma_samp << 11);
+ break;
+ case MDSS_MDP_CHROMA_H1V2:
+ default:
+ pr_err("unsupported wb chroma samp=%d\n", chroma_samp);
+ return -EINVAL;
+ }
+ } else if (ctx->rot90) {
if (chroma_samp == MDSS_MDP_CHROMA_H2V1)
chroma_samp = MDSS_MDP_CHROMA_H1V2;
else if (chroma_samp == MDSS_MDP_CHROMA_H1V2)
@@ -147,7 +168,7 @@
off = MDSS_MDP_REG_WB_OFFSET(ctx->wb_num);
MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_WB_DST_FORMAT, dst_format);
- MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_WB_DST_OP_MODE, ctx->opmode);
+ MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_WB_DST_OP_MODE, opmode);
MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_WB_DST_PACK_PATTERN, pattern);
MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_WB_DST_YSTRIDE0, ystride0);
MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_WB_DST_YSTRIDE1, ystride1);
@@ -156,32 +177,24 @@
return 0;
}
-static int mdss_mdp_writeback_wfd_setup(struct mdss_mdp_ctl *ctl,
- struct mdss_mdp_writeback_ctx *ctx)
+static int mdss_mdp_writeback_prepare_wfd(struct mdss_mdp_ctl *ctl, void *arg)
{
- struct msm_fb_data_type *mfd;
- struct fb_info *fbi;
+ struct mdss_mdp_writeback_ctx *ctx;
int ret;
- u32 plane_size;
- mfd = ctl->mfd;
- fbi = mfd->fbi;
+ ctx = (struct mdss_mdp_writeback_ctx *) ctl->priv_data;
+ if (!ctx)
+ return -ENODEV;
- pr_debug("setup ctl=%d\n", ctl->num);
+ if (ctx->initialized) /* already set */
+ return 0;
+
+ pr_debug("wfd setup ctl=%d\n", ctl->num);
ctx->opmode = 0;
ctx->format = ctl->dst_format;
- ctx->width = fbi->var.xres;
- ctx->height = fbi->var.yres;
-
- plane_size = ctx->width * ctx->height * fbi->var.bits_per_pixel / 8;
-
- videomemory = (void *) fbi->fix.smem_start + fbi->fix.smem_len -
- plane_size;
-
- ctx->wb_data.num_planes = 1;
- ctx->wb_data.p[0].addr = (u32) videomemory;
- ctx->wb_data.p[0].len = plane_size;
+ ctx->width = ctl->width;
+ ctx->height = ctl->height;
ret = mdss_mdp_writeback_format_setup(ctx);
if (ret) {
@@ -189,16 +202,30 @@
return ret;
}
- ctl->flush_bits |= BIT(16); /* WB */
+ ctx->initialized = true;
return 0;
}
-static int mdss_mdp_writeback_rot_setup(struct mdss_mdp_ctl *ctl,
- struct mdss_mdp_writeback_ctx *ctx,
- struct mdss_mdp_rotator_session *rot)
+static int mdss_mdp_writeback_prepare_rot(struct mdss_mdp_ctl *ctl, void *arg)
{
- pr_debug("rotator wb_num=%d\n", ctx->wb_num);
+ struct mdss_mdp_writeback_ctx *ctx;
+ struct mdss_mdp_writeback_arg *wb_args;
+ struct mdss_mdp_rotator_session *rot;
+
+ ctx = (struct mdss_mdp_writeback_ctx *) ctl->priv_data;
+ if (!ctx)
+ return -ENODEV;
+ wb_args = (struct mdss_mdp_writeback_arg *) arg;
+ if (!wb_args)
+ return -ENOENT;
+
+ rot = (struct mdss_mdp_rotator_session *) wb_args->priv_data;
+ if (!rot) {
+ pr_err("unable to retrieve rot session ctl=%d\n", ctl->num);
+ return -ENODEV;
+ }
+ pr_debug("rot setup wb_num=%d\n", ctx->wb_num);
ctx->opmode = BIT(6); /* ROT EN */
if (ROT_BLK_SIZE == 128)
@@ -238,27 +265,6 @@
return 0;
}
-static int mdss_mdp_writeback_prepare(struct mdss_mdp_ctl *ctl, void *arg)
-{
- struct mdss_mdp_writeback_ctx *ctx;
- ctx = (struct mdss_mdp_writeback_ctx *) ctl->priv_data;
- if (!ctx)
- return -ENODEV;
-
- if (ctx->type == MDSS_MDP_WRITEBACK_TYPE_ROTATOR) {
- struct mdss_mdp_rotator_session *rot;
- rot = (struct mdss_mdp_rotator_session *) arg;
- if (!rot) {
- pr_err("unable to retrieve rot session ctl=%d\n",
- ctl->num);
- return -ENODEV;
- }
- mdss_mdp_writeback_rot_setup(ctl, ctx, rot);
- }
-
- return 0;
-}
-
static void mdss_mdp_writeback_intr_done(void *arg)
{
struct mdss_mdp_writeback_ctx *ctx;
@@ -274,14 +280,14 @@
mdss_mdp_irq_disable_nosync(ctx->intr_type, ctx->intf_num);
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, true);
- complete_all(&ctx->comp);
+ if (ctx->callback_fnc)
+ ctx->callback_fnc(ctx->callback_arg);
}
static int mdss_mdp_writeback_display(struct mdss_mdp_ctl *ctl, void *arg)
{
struct mdss_mdp_writeback_ctx *ctx;
- struct mdss_mdp_rotator_session *rot = NULL;
- struct mdss_mdp_data *wb_data;
+ struct mdss_mdp_writeback_arg *wb_args;
u32 flush_bits;
int ret;
@@ -289,28 +295,22 @@
if (!ctx)
return -ENODEV;
- if (ctx->type == MDSS_MDP_WRITEBACK_TYPE_ROTATOR) {
- rot = (struct mdss_mdp_rotator_session *) arg;
- if (!rot) {
- pr_err("unable to retrieve rot session ctl=%d\n",
- ctl->num);
- return -ENODEV;
- }
- wb_data = rot->dst_data;
- } else {
- wb_data = &ctx->wb_data;
- }
+ wb_args = (struct mdss_mdp_writeback_arg *) arg;
+ if (!wb_args)
+ return -ENOENT;
- ret = mdss_mdp_writeback_addr_setup(ctx, wb_data);
+ ret = mdss_mdp_writeback_addr_setup(ctx, wb_args->data);
if (ret) {
pr_err("writeback data setup error ctl=%d\n", ctl->num);
return ret;
}
+ ctx->callback_fnc = wb_args->callback_fnc;
+ ctx->callback_arg = wb_args->priv_data;
+
flush_bits = BIT(16); /* WB */
mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_FLUSH, flush_bits);
- INIT_COMPLETION(ctx->comp);
mdss_mdp_set_intr_callback(ctx->intr_type, ctx->intf_num,
mdss_mdp_writeback_intr_done, ctx);
mdss_mdp_irq_enable(ctx->intr_type, ctx->intf_num);
@@ -319,17 +319,6 @@
mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_START, 1);
wmb();
- if (rot) {
- pr_debug("rotator kickoff wb_num=%d\n", ctx->wb_num);
- mutex_lock(&rot->lock);
- rot->comp = &ctx->comp;
- rot->busy = 1;
- mutex_unlock(&rot->lock);
- } else {
- pr_debug("writeback kickoff wb_num=%d\n", ctx->wb_num);
- wait_for_completion_interruptible(&ctx->comp);
- }
-
return 0;
}
@@ -355,16 +344,12 @@
}
ctl->priv_data = ctx;
ctx->wb_num = ctl->num; /* wb num should match ctl num */
+ ctx->initialized = false;
- init_completion(&ctx->comp);
-
- if (ctx->type == MDSS_MDP_WRITEBACK_TYPE_WFD)
- ret = mdss_mdp_writeback_wfd_setup(ctl, ctx);
- else if (ctx->type == MDSS_MDP_WRITEBACK_TYPE_ROTATOR)
- ctl->prepare_fnc = mdss_mdp_writeback_prepare;
- else /* line mode not supported */
- return -ENOSYS;
-
+ if (ctx->type == MDSS_MDP_WRITEBACK_TYPE_ROTATOR)
+ ctl->prepare_fnc = mdss_mdp_writeback_prepare_rot;
+ else /* wfd or line mode */
+ ctl->prepare_fnc = mdss_mdp_writeback_prepare_wfd;
ctl->stop_fnc = mdss_mdp_writeback_stop;
ctl->display_fnc = mdss_mdp_writeback_display;
diff --git a/drivers/video/msm/mdss/mdss_mdp_overlay.c b/drivers/video/msm/mdss/mdss_mdp_overlay.c
index f1b158d..43ddb5e 100644
--- a/drivers/video/msm/mdss/mdss_mdp_overlay.c
+++ b/drivers/video/msm/mdss/mdss_mdp_overlay.c
@@ -793,6 +793,11 @@
ret = 0;
}
break;
+
+ default:
+ if (mfd->panel_info.type == WRITEBACK_PANEL)
+ ret = mdss_mdp_wb_ioctl_handler(mfd, cmd, argp);
+ break;
}
return ret;
@@ -809,7 +814,11 @@
mfd->cursor_update = mdss_mdp_hw_cursor_update;
mfd->dma_fnc = mdss_mdp_overlay_pan_display;
mfd->ioctl_handler = mdss_mdp_overlay_ioctl_handler;
- mfd->kickoff_fnc = mdss_mdp_overlay_kickoff;
+
+ if (mfd->panel_info.type == WRITEBACK_PANEL)
+ mfd->kickoff_fnc = mdss_mdp_wb_kickoff;
+ else
+ mfd->kickoff_fnc = mdss_mdp_overlay_kickoff;
return 0;
}
diff --git a/drivers/video/msm/mdss/mdss_mdp_rotator.c b/drivers/video/msm/mdss/mdss_mdp_rotator.c
index 628b7f5..fc3a843 100644
--- a/drivers/video/msm/mdss/mdss_mdp_rotator.c
+++ b/drivers/video/msm/mdss/mdss_mdp_rotator.c
@@ -39,6 +39,7 @@
rot->ref_cnt++;
rot->session_id = i | MDSS_MDP_ROT_SESSION_MASK;
mutex_init(&rot->lock);
+ init_completion(&rot->comp);
break;
}
}
@@ -65,19 +66,6 @@
return NULL;
}
-static int mdss_mdp_rotator_busy_wait(struct mdss_mdp_rotator_session *rot)
-{
- mutex_lock(&rot->lock);
- if (rot->busy) {
- pr_debug("waiting for rot=%d to complete\n", rot->pipe->num);
- wait_for_completion_interruptible(rot->comp);
- rot->busy = 0;
- }
- mutex_unlock(&rot->lock);
-
- return 0;
-}
-
static struct mdss_mdp_pipe *mdss_mdp_rotator_pipe_alloc(void)
{
struct mdss_mdp_mixer *mixer;
@@ -110,6 +98,52 @@
return pipe;
}
+static int mdss_mdp_rotator_busy_wait(struct mdss_mdp_rotator_session *rot)
+{
+ mutex_lock(&rot->lock);
+ if (rot->busy) {
+ pr_debug("waiting for rot=%d to complete\n", rot->pipe->num);
+ wait_for_completion_interruptible(&rot->comp);
+ rot->busy = false;
+
+ }
+ mutex_unlock(&rot->lock);
+
+ return 0;
+}
+
+static void mdss_mdp_rotator_callback(void *arg)
+{
+ struct mdss_mdp_rotator_session *rot;
+
+ rot = (struct mdss_mdp_rotator_session *) arg;
+ if (rot)
+ complete(&rot->comp);
+}
+
+static int mdss_mdp_rotator_kickoff(struct mdss_mdp_ctl *ctl,
+ struct mdss_mdp_rotator_session *rot,
+ struct mdss_mdp_data *dst_data)
+{
+ int ret;
+ struct mdss_mdp_writeback_arg wb_args = {
+ .callback_fnc = mdss_mdp_rotator_callback,
+ .data = dst_data,
+ .priv_data = rot,
+ };
+
+ mutex_lock(&rot->lock);
+ INIT_COMPLETION(rot->comp);
+ rot->busy = true;
+ ret = mdss_mdp_display_commit(ctl, &wb_args);
+ if (ret) {
+ rot->busy = false;
+ pr_err("problem with kickoff rot pipe=%d", rot->pipe->num);
+ }
+ mutex_unlock(&rot->lock);
+ return ret;
+}
+
static int mdss_mdp_rotator_pipe_dequeue(struct mdss_mdp_rotator_session *rot)
{
if (rot->pipe) {
@@ -182,15 +216,13 @@
rot_pipe->params_changed++;
}
- rot->dst_data = dst_data;
-
ret = mdss_mdp_pipe_queue_data(rot->pipe, src_data);
if (ret) {
pr_err("unable to queue rot data\n");
goto done;
}
- ret = mdss_mdp_display_commit(ctl, rot);
+ ret = mdss_mdp_rotator_kickoff(ctl, rot, dst_data);
done:
mutex_unlock(&rotator_lock);
diff --git a/drivers/video/msm/mdss/mdss_mdp_rotator.h b/drivers/video/msm/mdss/mdss_mdp_rotator.h
index 8940c46..1e4b81e0 100644
--- a/drivers/video/msm/mdss/mdss_mdp_rotator.h
+++ b/drivers/video/msm/mdss/mdss_mdp_rotator.h
@@ -33,12 +33,11 @@
u32 bwc_mode;
struct mdss_mdp_pipe *pipe;
- struct mdss_mdp_data *dst_data;
struct mutex lock;
+ struct completion comp;
u8 busy;
u8 no_wait;
- struct completion *comp;
struct list_head head;
};
diff --git a/drivers/video/msm/mdss/mdss_mdp_wb.c b/drivers/video/msm/mdss/mdss_mdp_wb.c
new file mode 100644
index 0000000..da55edc
--- /dev/null
+++ b/drivers/video/msm/mdss/mdss_mdp_wb.c
@@ -0,0 +1,539 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/major.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+
+#include "mdss_mdp.h"
+#include "mdss_fb.h"
+
+#define DEBUG_WRITEBACK
+
+enum mdss_mdp_wb_state {
+ WB_OPEN,
+ WB_START,
+ WB_STOPING,
+ WB_STOP
+};
+
+struct mdss_mdp_wb {
+ u32 fb_ndx;
+ struct mutex lock;
+ struct list_head busy_queue;
+ struct list_head free_queue;
+ struct list_head register_queue;
+ wait_queue_head_t wait_q;
+ u32 state;
+};
+
+enum mdss_mdp_wb_node_state {
+ REGISTERED,
+ IN_FREE_QUEUE,
+ IN_BUSY_QUEUE,
+ WITH_CLIENT
+};
+
+struct mdss_mdp_wb_data {
+ struct list_head registered_entry;
+ struct list_head active_entry;
+ struct msmfb_data buf_info;
+ struct mdss_mdp_data buf_data;
+ int state;
+};
+
+static DEFINE_MUTEX(mdss_mdp_wb_buf_lock);
+static struct mdss_mdp_wb mdss_mdp_wb_info;
+
+#ifdef DEBUG_WRITEBACK
+/* for debugging: writeback output buffer to framebuffer memory */
+static inline
+struct mdss_mdp_data *mdss_mdp_wb_debug_buffer(struct msm_fb_data_type *mfd)
+{
+ static void *videomemory;
+ static void *mdss_wb_mem;
+ static struct mdss_mdp_data buffer = {
+ .num_planes = 1,
+ };
+
+ struct fb_info *fbi;
+ int img_size;
+ int offset;
+
+
+ fbi = mfd->fbi;
+ img_size = fbi->var.xres * fbi->var.yres * fbi->var.bits_per_pixel / 8;
+ offset = fbi->fix.smem_len - img_size;
+
+ videomemory = fbi->screen_base + offset;
+ mdss_wb_mem = (void *)(fbi->fix.smem_start + offset);
+
+ buffer.p[0].addr = fbi->fix.smem_start + offset;
+ buffer.p[0].len = img_size;
+
+ return &buffer;
+}
+#else
+static inline
+struct mdss_mdp_data *mdss_mdp_wb_debug_buffer(struct msm_fb_data_type *mfd)
+{
+ return NULL;
+}
+#endif
+
+static int mdss_mdp_wb_init(struct msm_fb_data_type *mfd)
+{
+ struct mdss_mdp_wb *wb;
+
+ mutex_lock(&mdss_mdp_wb_buf_lock);
+ wb = mfd->wb;
+ if (wb == NULL) {
+ wb = &mdss_mdp_wb_info;
+ wb->fb_ndx = mfd->index;
+ mfd->wb = wb;
+ } else if (mfd->index != wb->fb_ndx) {
+ pr_err("only one writeback intf supported at a time\n");
+ return -EMLINK;
+ } else {
+ pr_debug("writeback already initialized\n");
+ }
+
+ pr_debug("init writeback on fb%d\n", wb->fb_ndx);
+
+ mutex_init(&wb->lock);
+ INIT_LIST_HEAD(&wb->free_queue);
+ INIT_LIST_HEAD(&wb->busy_queue);
+ INIT_LIST_HEAD(&wb->register_queue);
+ wb->state = WB_OPEN;
+ init_waitqueue_head(&wb->wait_q);
+
+ mfd->wb = wb;
+ mutex_unlock(&mdss_mdp_wb_buf_lock);
+ return 0;
+}
+
+static int mdss_mdp_wb_terminate(struct msm_fb_data_type *mfd)
+{
+ struct mdss_mdp_wb *wb = mfd->wb;
+
+ if (!wb) {
+ pr_err("unable to terminate, writeback is not initialized\n");
+ return -ENODEV;
+ }
+
+ pr_debug("terminate writeback\n");
+
+ mutex_lock(&mdss_mdp_wb_buf_lock);
+ mutex_lock(&wb->lock);
+ if (!list_empty(&wb->register_queue)) {
+ struct mdss_mdp_wb_data *node, *temp;
+ list_for_each_entry_safe(node, temp, &wb->register_queue,
+ registered_entry) {
+ list_del(&node->registered_entry);
+ kfree(node);
+ }
+ }
+ mutex_unlock(&wb->lock);
+
+ mfd->wb = NULL;
+ mutex_unlock(&mdss_mdp_wb_buf_lock);
+
+ return 0;
+}
+
+static int mdss_mdp_wb_start(struct msm_fb_data_type *mfd)
+{
+ struct mdss_mdp_wb *wb = mfd->wb;
+
+ if (!wb) {
+ pr_err("unable to start, writeback is not initialized\n");
+ return -ENODEV;
+ }
+
+ mutex_lock(&wb->lock);
+ wb->state = WB_START;
+ mutex_unlock(&wb->lock);
+ wake_up(&wb->wait_q);
+
+ return 0;
+}
+
+static int mdss_mdp_wb_stop(struct msm_fb_data_type *mfd)
+{
+ struct mdss_mdp_wb *wb = mfd->wb;
+
+ if (!wb) {
+ pr_err("unable to stop, writeback is not initialized\n");
+ return -ENODEV;
+ }
+
+ mutex_lock(&wb->lock);
+ wb->state = WB_STOPING;
+ mutex_unlock(&wb->lock);
+ wake_up(&wb->wait_q);
+
+ return 0;
+}
+
+static int mdss_mdp_wb_register_node(struct mdss_mdp_wb *wb,
+ struct mdss_mdp_wb_data *node)
+{
+ node->state = REGISTERED;
+ list_add_tail(&node->registered_entry, &wb->register_queue);
+ if (!node) {
+ pr_err("Invalid wb node\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct mdss_mdp_wb_data *get_local_node(struct mdss_mdp_wb *wb,
+ struct msmfb_data *data) {
+ struct mdss_mdp_wb_data *node;
+ struct mdss_mdp_img_data *buf;
+ int ret;
+
+ if (!data->iova)
+ return NULL;
+
+ if (!list_empty(&wb->register_queue)) {
+ list_for_each_entry(node, &wb->register_queue, registered_entry)
+ if (node->buf_info.iova == data->iova) {
+ pr_debug("found node iova=%x addr=%x\n",
+ data->iova, node->buf_data.p[0].addr);
+ return node;
+ }
+ }
+
+ node = kzalloc(sizeof(struct mdss_mdp_wb_data), GFP_KERNEL);
+ if (node == NULL) {
+ pr_err("out of memory\n");
+ return NULL;
+ }
+
+ node->buf_data.num_planes = 1;
+ buf = &node->buf_data.p[0];
+ buf->addr = (u32) (data->iova + data->offset);
+ buf->len = UINT_MAX; /* trusted source */
+ ret = mdss_mdp_wb_register_node(wb, node);
+ if (IS_ERR_VALUE(ret)) {
+ pr_err("error registering wb node\n");
+ kfree(node);
+ return NULL;
+ }
+
+ pr_debug("register node iova=0x%x addr=0x%x\n", data->iova, buf->addr);
+
+ return node;
+}
+
+static struct mdss_mdp_wb_data *get_user_node(struct msm_fb_data_type *mfd,
+ struct msmfb_data *data) {
+ struct mdss_mdp_wb *wb = mfd->wb;
+ struct mdss_mdp_wb_data *node;
+ struct mdss_mdp_img_data *buf;
+ int ret;
+
+ node = kzalloc(sizeof(struct mdss_mdp_wb_data), GFP_KERNEL);
+ if (node == NULL) {
+ pr_err("out of memory\n");
+ return NULL;
+ }
+
+ node->buf_data.num_planes = 1;
+ buf = &node->buf_data.p[0];
+ ret = mdss_mdp_get_img(mfd->iclient, data, buf);
+ if (IS_ERR_VALUE(ret)) {
+ pr_err("error getting buffer info\n");
+ goto register_fail;
+ }
+ memcpy(&node->buf_info, data, sizeof(*data));
+
+ ret = mdss_mdp_wb_register_node(wb, node);
+ if (IS_ERR_VALUE(ret)) {
+ pr_err("error registering wb node\n");
+ goto register_fail;
+ }
+
+ pr_debug("register node mem_id=%d offset=%u addr=0x%x len=%d\n",
+ data->memory_id, data->offset, buf->addr, buf->len);
+
+ return node;
+
+register_fail:
+ kfree(node);
+ return NULL;
+}
+
+static int mdss_mdp_wb_queue(struct msm_fb_data_type *mfd,
+ struct msmfb_data *data, int local)
+{
+ struct mdss_mdp_wb *wb = mfd->wb;
+ struct mdss_mdp_wb_data *node = NULL;
+ int ret = 0;
+
+ if (!wb) {
+ pr_err("unable to queue, writeback is not initialized\n");
+ return -ENODEV;
+ }
+
+ pr_debug("fb%d queue\n", wb->fb_ndx);
+
+ mutex_lock(&wb->lock);
+ if (local)
+ node = get_local_node(wb, data);
+ if (node == NULL)
+ node = get_user_node(mfd, data);
+
+ if (!node || node->state == IN_BUSY_QUEUE ||
+ node->state == IN_FREE_QUEUE) {
+ pr_err("memory not registered or Buffer already with us\n");
+ ret = -EINVAL;
+ } else {
+ list_add_tail(&node->active_entry, &wb->free_queue);
+ node->state = IN_FREE_QUEUE;
+ }
+ mutex_unlock(&wb->lock);
+
+ return ret;
+}
+
+static int is_buffer_ready(struct mdss_mdp_wb *wb)
+{
+ int rc;
+ mutex_lock(&wb->lock);
+ rc = !list_empty(&wb->busy_queue) || (wb->state == WB_STOPING);
+ mutex_unlock(&wb->lock);
+
+ return rc;
+}
+
+static int mdss_mdp_wb_dequeue(struct msm_fb_data_type *mfd,
+ struct msmfb_data *data)
+{
+ struct mdss_mdp_wb *wb = mfd->wb;
+ struct mdss_mdp_wb_data *node = NULL;
+ int ret;
+
+ if (!wb) {
+ pr_err("unable to dequeue, writeback is not initialized\n");
+ return -ENODEV;
+ }
+
+ ret = wait_event_interruptible(wb->wait_q, is_buffer_ready(wb));
+ if (ret) {
+ pr_err("failed to get dequeued buffer\n");
+ return -ENOBUFS;
+ }
+
+ mutex_lock(&wb->lock);
+ if (wb->state == WB_STOPING) {
+ pr_debug("wfd stopped\n");
+ wb->state = WB_STOP;
+ ret = -ENOBUFS;
+ } else if (!list_empty(&wb->busy_queue)) {
+ struct mdss_mdp_img_data *buf;
+ node = list_first_entry(&wb->busy_queue,
+ struct mdss_mdp_wb_data,
+ active_entry);
+ list_del(&node->active_entry);
+ node->state = WITH_CLIENT;
+ memcpy(data, &node->buf_info, sizeof(*data));
+
+ buf = &node->buf_data.p[0];
+ pr_debug("found node addr=%x len=%d\n", buf->addr, buf->len);
+ } else {
+ pr_debug("node is NULL, wait for next\n");
+ ret = -ENOBUFS;
+ }
+ mutex_unlock(&wb->lock);
+ return 0;
+}
+
+static void mdss_mdp_wb_callback(void *arg)
+{
+ if (arg)
+ complete((struct completion *) arg);
+}
+
+int mdss_mdp_wb_kickoff(struct mdss_mdp_ctl *ctl)
+{
+ struct mdss_mdp_wb *wb;
+ struct mdss_mdp_wb_data *node = NULL;
+ int ret = 0;
+ DECLARE_COMPLETION_ONSTACK(comp);
+ struct mdss_mdp_writeback_arg wb_args = {
+ .callback_fnc = mdss_mdp_wb_callback,
+ .priv_data = &comp,
+ };
+
+ if (!ctl || !ctl->mfd)
+ return -ENODEV;
+
+ mutex_lock(&mdss_mdp_wb_buf_lock);
+ wb = ctl->mfd->wb;
+ if (wb) {
+ mutex_lock(&wb->lock);
+ if (!list_empty(&wb->free_queue) && wb->state != WB_STOPING &&
+ wb->state != WB_STOP) {
+ node = list_first_entry(&wb->free_queue,
+ struct mdss_mdp_wb_data,
+ active_entry);
+ list_del(&node->active_entry);
+ node->state = IN_BUSY_QUEUE;
+ wb_args.data = &node->buf_data;
+ } else {
+ pr_debug("unable to get buf wb state=%d\n", wb->state);
+ }
+ mutex_unlock(&wb->lock);
+ }
+
+ if (wb_args.data == NULL)
+ wb_args.data = mdss_mdp_wb_debug_buffer(ctl->mfd);
+
+ if (wb_args.data == NULL) {
+ pr_err("unable to get writeback buf ctl=%d\n", ctl->num);
+ ret = -ENOMEM;
+ goto kickoff_fail;
+ }
+
+ ret = mdss_mdp_display_commit(ctl, &wb_args);
+ if (ret) {
+ pr_err("error on commit ctl=%d\n", ctl->num);
+ goto kickoff_fail;
+ }
+
+ wait_for_completion_interruptible(&comp);
+ if (wb && node) {
+ mutex_lock(&wb->lock);
+ list_add_tail(&node->active_entry, &wb->busy_queue);
+ mutex_unlock(&wb->lock);
+ wake_up(&wb->wait_q);
+ }
+
+kickoff_fail:
+ mutex_unlock(&mdss_mdp_wb_buf_lock);
+ return ret;
+}
+
+int mdss_mdp_wb_ioctl_handler(struct msm_fb_data_type *mfd, u32 cmd, void *arg)
+{
+ struct msmfb_data data;
+ int ret = -ENOSYS;
+
+ switch (cmd) {
+ case MSMFB_WRITEBACK_INIT:
+ ret = mdss_mdp_wb_init(mfd);
+ break;
+ case MSMFB_WRITEBACK_START:
+ ret = mdss_mdp_wb_start(mfd);
+ break;
+ case MSMFB_WRITEBACK_STOP:
+ ret = mdss_mdp_wb_stop(mfd);
+ break;
+ case MSMFB_WRITEBACK_QUEUE_BUFFER:
+ if (!copy_from_user(&data, arg, sizeof(data))) {
+ ret = mdss_mdp_wb_queue(mfd, arg, false);
+ } else {
+ pr_err("wb queue buf failed on copy_from_user\n");
+ ret = -EFAULT;
+ }
+ break;
+ case MSMFB_WRITEBACK_DEQUEUE_BUFFER:
+ if (!copy_from_user(&data, arg, sizeof(data))) {
+ ret = mdss_mdp_wb_dequeue(mfd, arg);
+ } else {
+ pr_err("wb dequeue buf failed on copy_from_user\n");
+ ret = -EFAULT;
+ }
+ break;
+ case MSMFB_WRITEBACK_TERMINATE:
+ ret = mdss_mdp_wb_terminate(mfd);
+ break;
+ }
+
+ return ret;
+}
+
+int msm_fb_writeback_start(struct fb_info *info)
+{
+ struct msm_fb_data_type *mfd = (struct msm_fb_data_type *) info->par;
+
+ if (!mfd)
+ return -ENODEV;
+
+ return mdss_mdp_wb_start(mfd);
+}
+EXPORT_SYMBOL(msm_fb_writeback_start);
+
+int msm_fb_writeback_queue_buffer(struct fb_info *info,
+ struct msmfb_data *data)
+{
+ struct msm_fb_data_type *mfd = (struct msm_fb_data_type *) info->par;
+
+ if (!mfd)
+ return -ENODEV;
+
+ return mdss_mdp_wb_queue(mfd, data, true);
+}
+EXPORT_SYMBOL(msm_fb_writeback_queue_buffer);
+
+int msm_fb_writeback_dequeue_buffer(struct fb_info *info,
+ struct msmfb_data *data)
+{
+ struct msm_fb_data_type *mfd = (struct msm_fb_data_type *) info->par;
+
+ if (!mfd)
+ return -ENODEV;
+
+ return mdss_mdp_wb_dequeue(mfd, data);
+}
+EXPORT_SYMBOL(msm_fb_writeback_dequeue_buffer);
+
+int msm_fb_writeback_stop(struct fb_info *info)
+{
+ struct msm_fb_data_type *mfd = (struct msm_fb_data_type *) info->par;
+
+ if (!mfd)
+ return -ENODEV;
+
+ return mdss_mdp_wb_stop(mfd);
+}
+EXPORT_SYMBOL(msm_fb_writeback_stop);
+
+int msm_fb_writeback_init(struct fb_info *info)
+{
+ struct msm_fb_data_type *mfd = (struct msm_fb_data_type *) info->par;
+
+ if (!mfd)
+ return -ENODEV;
+
+ return mdss_mdp_wb_init(mfd);
+}
+EXPORT_SYMBOL(msm_fb_writeback_init);
+
+int msm_fb_writeback_terminate(struct fb_info *info)
+{
+ struct msm_fb_data_type *mfd = (struct msm_fb_data_type *) info->par;
+
+ if (!mfd)
+ return -ENODEV;
+
+ return mdss_mdp_wb_terminate(mfd);
+}
+EXPORT_SYMBOL(msm_fb_writeback_terminate);
diff --git a/drivers/video/msm/msm_fb.c b/drivers/video/msm/msm_fb.c
index 18ee3e6..81a6e50 100644
--- a/drivers/video/msm/msm_fb.c
+++ b/drivers/video/msm/msm_fb.c
@@ -1326,7 +1326,7 @@
fbi->fix.smem_start = (unsigned long)fbram_phys;
msm_iommu_map_contig_buffer(fbi->fix.smem_start,
- DISPLAY_READ_DOMAIN,
+ DISPLAY_DOMAIN,
GEN_POOL,
fbi->fix.smem_len,
SZ_4K,
@@ -1334,7 +1334,7 @@
&(mfd->display_iova));
msm_iommu_map_contig_buffer(fbi->fix.smem_start,
- ROTATOR_SRC_DOMAIN,
+ ROTATOR_DOMAIN,
GEN_POOL,
fbi->fix.smem_len,
SZ_4K,
diff --git a/drivers/video/msm/msm_fb.h b/drivers/video/msm/msm_fb.h
index c9eb7dd..0658365 100644
--- a/drivers/video/msm/msm_fb.h
+++ b/drivers/video/msm/msm_fb.h
@@ -60,6 +60,7 @@
struct list_head registered_entry;
struct list_head active_entry;
void *addr;
+ struct ion_handle *ihdl;
struct file *pmem_file;
struct msmfb_data buf_info;
struct msmfb_img img;
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_helper.c b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_helper.c
index 6fd5656..d7ebd54 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_helper.c
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_helper.c
@@ -168,6 +168,28 @@
} else if (operation == DDL_DPB_OP_MARK_FREE) {
dpb_mask->client_mask |= (0x1 << loopc);
*found_frame = *in_out_frame;
+ if ((decoder->meta_data_enable_flag) &&
+ (in_out_frame->vcd_frm.buff_ion_handle)) {
+ struct ddl_context *ddl_context =
+ ddl_get_context();
+ unsigned long *vaddr =
+ (unsigned long *)((u32)
+ in_out_frame->vcd_frm.virtual +
+ decoder->meta_data_offset);
+ DDL_MSG_LOW("%s: Cache clean: vaddr"\
+ " (%p), offset %u, size %u",
+ __func__,
+ in_out_frame->vcd_frm.virtual,
+ decoder->meta_data_offset,
+ decoder->suffix);
+ msm_ion_do_cache_op(
+ ddl_context->video_ion_client,
+ in_out_frame->vcd_frm.\
+ buff_ion_handle,
+ vaddr,
+ (unsigned long)decoder->suffix,
+ ION_IOC_CLEAN_CACHES);
+ }
}
} else {
in_out_frame->vcd_frm.physical = NULL;
diff --git a/drivers/video/msm/vidc/720p/ddl/vcd_ddl_errors.c b/drivers/video/msm/vidc/720p/ddl/vcd_ddl_errors.c
index ac5bce9..91136f3 100644
--- a/drivers/video/msm/vidc/720p/ddl/vcd_ddl_errors.c
+++ b/drivers/video/msm/vidc/720p/ddl/vcd_ddl_errors.c
@@ -391,8 +391,10 @@
ddl->decoding &&
!ddl->codec_data.decoder.header_in_start &&
!ddl->codec_data.decoder.dec_disp_info.img_size_x &&
- !ddl->codec_data.decoder.dec_disp_info.img_size_y
- ) {
+ !ddl->codec_data.decoder.dec_disp_info.img_size_y &&
+ !eos) {
+ DBG("Treat header in start error %u as success",
+ vcd_status);
/* this is first frame seq. header only case */
vcd_status = VCD_S_SUCCESS;
ddl->input_frame.vcd_frm.flags |=
@@ -426,9 +428,10 @@
}
/* if it is decoder EOS case */
- if (ddl->decoding && eos)
+ if (ddl->decoding && eos) {
+ DBG("DEC-EOS_RUN");
ddl_decode_eos_run(ddl);
- else
+ } else
DDL_IDLE(ddl_context);
return true;
diff --git a/drivers/video/msm/vidc/720p/ddl/vcd_ddl_interrupt_handler.c b/drivers/video/msm/vidc/720p/ddl/vcd_ddl_interrupt_handler.c
index 5fa9b09..fe71dc1 100644
--- a/drivers/video/msm/vidc/720p/ddl/vcd_ddl_interrupt_handler.c
+++ b/drivers/video/msm/vidc/720p/ddl/vcd_ddl_interrupt_handler.c
@@ -290,8 +290,6 @@
decoder->client_output_buf_req.actual_count
&& decoder->progressive_only)
need_reconfig = false;
- if (input_vcd_frm->flags & VCD_FRAME_FLAG_EOS)
- need_reconfig = false;
if ((input_vcd_frm->data_len <= seq_hdr_info.dec_frm_size ||
(input_vcd_frm->flags & VCD_FRAME_FLAG_CODECCONFIG)) &&
(!need_reconfig ||
diff --git a/drivers/video/msm/vidc/common/dec/vdec.c b/drivers/video/msm/vidc/common/dec/vdec.c
index 3076aa1..ed8b452 100644
--- a/drivers/video/msm/vidc/common/dec/vdec.c
+++ b/drivers/video/msm/vidc/common/dec/vdec.c
@@ -348,10 +348,15 @@
pmem_fd, kernel_vaddr, buffer_index,
&buff_handle);
if (ion_flag == CACHED && buff_handle) {
+ DBG("%s: Cache invalidate: vaddr (%p), "\
+ "size %u\n", __func__,
+ (void *)kernel_vaddr,
+ vcd_frame_data->alloc_len);
msm_ion_do_cache_op(client_ctx->user_ion_client,
buff_handle,
(unsigned long *) kernel_vaddr,
- (unsigned long)vcd_frame_data->data_len,
+ (unsigned long)vcd_frame_data->\
+ alloc_len,
ION_IOC_INV_CACHES);
}
}
diff --git a/drivers/video/msm/vidc/common/vcd/vcd_sub.c b/drivers/video/msm/vidc/common/vcd/vcd_sub.c
index f79a147..5b64f20 100644
--- a/drivers/video/msm/vidc/common/vcd/vcd_sub.c
+++ b/drivers/video/msm/vidc/common/vcd/vcd_sub.c
@@ -2045,10 +2045,12 @@
transc->in_use = true;
if ((codec_config &&
(status != VCD_ERR_BITSTREAM_ERR)) ||
- ((status == VCD_ERR_BITSTREAM_ERR) &&
+ (codec_config && (status == VCD_ERR_BITSTREAM_ERR) &&
!(cctxt->status.mask & VCD_FIRST_IP_DONE) &&
- (core_type == VCD_CORE_720P)))
+ (core_type == VCD_CORE_720P))) {
+ VCD_MSG_HIGH("handle EOS for codec config");
vcd_handle_eos_done(cctxt, transc, VCD_S_SUCCESS);
+ }
}
return rc;
}
diff --git a/include/linux/android_pmem.h b/include/linux/android_pmem.h
index ab96379..cfca491 100644
--- a/include/linux/android_pmem.h
+++ b/include/linux/android_pmem.h
@@ -108,26 +108,6 @@
PMEM_ALLOCATORTYPE_MAX,
};
-#define PMEM_MEMTYPE_MASK 0x7
-#define PMEM_INVALID_MEMTYPE 0x0
-#define PMEM_MEMTYPE_EBI1 0x1
-#define PMEM_MEMTYPE_SMI 0x2
-#define PMEM_MEMTYPE_RESERVED_INVALID2 0x3
-#define PMEM_MEMTYPE_RESERVED_INVALID3 0x4
-#define PMEM_MEMTYPE_RESERVED_INVALID4 0x5
-#define PMEM_MEMTYPE_RESERVED_INVALID5 0x6
-#define PMEM_MEMTYPE_RESERVED_INVALID6 0x7
-
-#define PMEM_ALIGNMENT_MASK 0x18
-#define PMEM_ALIGNMENT_RESERVED_INVALID1 0x0
-#define PMEM_ALIGNMENT_4K 0x8 /* the default */
-#define PMEM_ALIGNMENT_1M 0x10
-#define PMEM_ALIGNMENT_RESERVED_INVALID2 0x18
-
-/* flags in the following function defined as above. */
-int32_t pmem_kalloc(const size_t size, const uint32_t flags);
-int32_t pmem_kfree(const int32_t physaddr);
-
/* kernel api names for board specific data structures */
#define PMEM_KERNEL_EBI1_DATA_NAME "pmem_kernel_ebi1"
#define PMEM_KERNEL_SMI_DATA_NAME "pmem_kernel_smi"
diff --git a/arch/arm/mach-msm/include/mach/stm.h b/include/linux/coresight-stm.h
similarity index 100%
rename from arch/arm/mach-msm/include/mach/stm.h
rename to include/linux/coresight-stm.h
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
new file mode 100644
index 0000000..f03a493
--- /dev/null
+++ b/include/linux/coresight.h
@@ -0,0 +1,132 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_CORESIGHT_H
+#define _LINUX_CORESIGHT_H
+
+#include <linux/device.h>
+
+/* Peripheral id registers (0xFD0-0xFEC) */
+#define CORESIGHT_PERIPHIDR4 (0xFD0)
+#define CORESIGHT_PERIPHIDR5 (0xFD4)
+#define CORESIGHT_PERIPHIDR6 (0xFD8)
+#define CORESIGHT_PERIPHIDR7 (0xFDC)
+#define CORESIGHT_PERIPHIDR0 (0xFE0)
+#define CORESIGHT_PERIPHIDR1 (0xFE4)
+#define CORESIGHT_PERIPHIDR2 (0xFE8)
+#define CORESIGHT_PERIPHIDR3 (0xFEC)
+/* Component id registers (0xFF0-0xFFC) */
+#define CORESIGHT_COMPIDR0 (0xFF0)
+#define CORESIGHT_COMPIDR1 (0xFF4)
+#define CORESIGHT_COMPIDR2 (0xFF8)
+#define CORESIGHT_COMPIDR3 (0xFFC)
+
+/* DBGv7 with baseline CP14 registers implemented */
+#define ARM_DEBUG_ARCH_V7B (0x3)
+/* DBGv7 with all CP14 registers implemented */
+#define ARM_DEBUG_ARCH_V7 (0x4)
+#define ARM_DEBUG_ARCH_V7_1 (0x5)
+#define ETM_ARCH_V3_3 (0x23)
+#define PFT_ARCH_V1_1 (0x31)
+
+enum coresight_clk_rate {
+ CORESIGHT_CLK_RATE_OFF,
+ CORESIGHT_CLK_RATE_TRACE,
+ CORESIGHT_CLK_RATE_HSTRACE,
+};
+
+enum coresight_dev_type {
+ CORESIGHT_DEV_TYPE_SINK,
+ CORESIGHT_DEV_TYPE_LINK,
+ CORESIGHT_DEV_TYPE_SOURCE,
+ CORESIGHT_DEV_TYPE_MAX,
+};
+
+struct coresight_connection {
+ int child_id;
+ int child_port;
+ struct coresight_device *child_dev;
+ struct list_head link;
+};
+
+struct coresight_device {
+ int id;
+ struct coresight_connection *conns;
+ int nr_conns;
+ const struct coresight_ops *ops;
+ struct device dev;
+ struct mutex mutex;
+ int *refcnt;
+ struct list_head link;
+ struct module *owner;
+ bool enable;
+};
+
+#define to_coresight_device(d) container_of(d, struct coresight_device, dev)
+
+struct coresight_ops {
+ int (*enable)(struct coresight_device *csdev, int port);
+ void (*disable)(struct coresight_device *csdev, int port);
+};
+
+struct coresight_platform_data {
+ int id;
+ const char *name;
+ int nr_ports;
+ int *child_ids;
+ int *child_ports;
+ int nr_children;
+};
+
+struct coresight_desc {
+ enum coresight_dev_type type;
+ const struct coresight_ops *ops;
+ struct coresight_platform_data *pdata;
+ struct device *dev;
+ const struct attribute_group **groups;
+ struct module *owner;
+};
+
+struct qdss_source {
+ struct list_head link;
+ const char *name;
+ uint32_t fport_mask;
+};
+
+struct msm_qdss_platform_data {
+ struct qdss_source *src_table;
+ size_t size;
+ uint8_t afamily;
+};
+
+
+extern struct coresight_device *
+coresight_register(struct coresight_desc *desc);
+extern void coresight_unregister(struct coresight_device *csdev);
+extern int coresight_enable(struct coresight_device *csdev, int port);
+extern void coresight_disable(struct coresight_device *csdev, int port);
+
+#ifdef CONFIG_MSM_QDSS
+extern struct qdss_source *qdss_get(const char *name);
+extern void qdss_put(struct qdss_source *src);
+extern int qdss_enable(struct qdss_source *src);
+extern void qdss_disable(struct qdss_source *src);
+extern void qdss_disable_sink(void);
+#else
+static inline struct qdss_source *qdss_get(const char *name) { return NULL; }
+static inline void qdss_put(struct qdss_source *src) {}
+static inline int qdss_enable(struct qdss_source *src) { return -ENOSYS; }
+static inline void qdss_disable(struct qdss_source *src) {}
+static inline void qdss_disable_sink(void) {}
+#endif
+
+#endif
diff --git a/include/linux/diagchar.h b/include/linux/diagchar.h
index 537960b..4ff1147 100644
--- a/include/linux/diagchar.h
+++ b/include/linux/diagchar.h
@@ -108,10 +108,10 @@
/* This needs to be modified manually now, when we add
a new RANGE of SSIDs to the msg_mask_tbl */
#define MSG_MASK_TBL_CNT 23
-#define EVENT_LAST_ID 0x083F
+#define EVENT_LAST_ID 0x08AD
#define MSG_SSID_0 0
-#define MSG_SSID_0_LAST 90
+#define MSG_SSID_0_LAST 91
#define MSG_SSID_1 500
#define MSG_SSID_1_LAST 506
#define MSG_SSID_2 1000
@@ -125,7 +125,7 @@
#define MSG_SSID_6 4500
#define MSG_SSID_6_LAST 4526
#define MSG_SSID_7 4600
-#define MSG_SSID_7_LAST 4612
+#define MSG_SSID_7_LAST 4613
#define MSG_SSID_8 5000
#define MSG_SSID_8_LAST 5029
#define MSG_SSID_9 5500
@@ -271,6 +271,7 @@
MSG_LVL_LOW|MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR|MSG_LVL_FATAL,
MSG_LVL_LOW,
MSG_LVL_MED,
+ MSG_LVL_LOW,
MSG_LVL_LOW
};
@@ -383,6 +384,7 @@
MSG_LVL_MED,
MSG_LVL_MED,
MSG_LVL_MED,
+ MSG_LVL_LOW,
MSG_LVL_LOW
};
diff --git a/include/linux/i2c/isa1200.h b/include/linux/i2c/isa1200.h
index 9dab3eb..ffadf96 100644
--- a/include/linux/i2c/isa1200.h
+++ b/include/linux/i2c/isa1200.h
@@ -49,6 +49,7 @@
bool smart_en; /* smart mode enable/disable */
bool is_erm;
bool ext_clk_en;
+ bool need_pwm_clk;
unsigned int chip_en;
unsigned int duty;
struct isa1200_regulator *regulator_info;
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index ed6bb39..c65740d 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -9,106 +9,178 @@
* representation into a hardware irq number that can be mapped back to a
* Linux irq number without any extra platform support code.
*
- * irq_domain is expected to be embedded in an interrupt controller's private
- * data structure.
+ * Interrupt controller "domain" data structure. This could be defined as a
+ * irq domain controller. That is, it handles the mapping between hardware
+ * and virtual interrupt numbers for a given interrupt domain. The domain
+ * structure is generally created by the PIC code for a given PIC instance
+ * (though a domain can cover more than one PIC if they have a flat number
+ * model). It's the domain callbacks that are responsible for setting the
+ * irq_chip on a given irq_desc after it's been mapped.
+ *
+ * The host code and data structures are agnostic to whether or not
+ * we use an open firmware device-tree. We do have references to struct
+ * device_node in two places: in irq_find_host() to find the host matching
+ * a given interrupt controller node, and of course as an argument to its
+ * counterpart domain->ops->match() callback. However, those are treated as
+ * generic pointers by the core and the fact that it's actually a device-node
+ * pointer is purely a convention between callers and implementation. This
+ * code could thus be used on other architectures by replacing those two
+ * by some sort of arch-specific void * "token" used to identify interrupt
+ * controllers.
*/
+
#ifndef _LINUX_IRQDOMAIN_H
#define _LINUX_IRQDOMAIN_H
-#include <linux/irq.h>
-#include <linux/mod_devicetable.h>
+#include <linux/types.h>
+#include <linux/radix-tree.h>
-#ifdef CONFIG_IRQ_DOMAIN
struct device_node;
struct irq_domain;
+struct of_device_id;
+
+/* Number of irqs reserved for a legacy isa controller */
+#define NUM_ISA_INTERRUPTS 16
/**
* struct irq_domain_ops - Methods for irq_domain objects
- * @to_irq: (optional) given a local hardware irq number, return the linux
- * irq number. If to_irq is not implemented, then the irq_domain
- * will use this translation: irq = (domain->irq_base + hwirq)
- * @dt_translate: Given a device tree node and interrupt specifier, decode
- * the hardware irq number and linux irq type value.
+ * @match: Match an interrupt controller device node to a host, returns
+ * 1 on a match
+ * @map: Create or update a mapping between a virtual irq number and a hw
+ * irq number. This is called only once for a given mapping.
+ * @unmap: Dispose of such a mapping
+ * @xlate: Given a device tree node and interrupt specifier, decode
+ * the hardware irq number and linux irq type value.
+ *
+ * Functions below are provided by the driver and called whenever a new mapping
+ * is created or an old mapping is disposed. The driver can then proceed to
+ * whatever internal data structures management is required. It also needs
+ * to setup the irq_desc when returning from map().
*/
struct irq_domain_ops {
- unsigned int (*to_irq)(struct irq_domain *d, unsigned long hwirq);
-
-#ifdef CONFIG_OF
- int (*dt_translate)(struct irq_domain *d, struct device_node *node,
- const u32 *intspec, unsigned int intsize,
- unsigned long *out_hwirq, unsigned int *out_type);
-#endif /* CONFIG_OF */
+ int (*match)(struct irq_domain *d, struct device_node *node);
+ int (*map)(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw);
+ void (*unmap)(struct irq_domain *d, unsigned int virq);
+ int (*xlate)(struct irq_domain *d, struct device_node *node,
+ const u32 *intspec, unsigned int intsize,
+ unsigned long *out_hwirq, unsigned int *out_type);
};
/**
* struct irq_domain - Hardware interrupt number translation object
- * @list: Element in global irq_domain list.
+ * @link: Element in global irq_domain list.
+ * @revmap_type: Method used for reverse mapping hwirq numbers to linux irq. This
+ * will be one of the IRQ_DOMAIN_MAP_* values.
+ * @revmap_data: Revmap method specific data.
+ * @ops: pointer to irq_domain methods
+ * @host_data: private data pointer for use by owner. Not touched by irq_domain
+ * core code.
* @irq_base: Start of irq_desc range assigned to the irq_domain. The creator
* of the irq_domain is responsible for allocating the array of
* irq_desc structures.
* @nr_irq: Number of irqs managed by the irq domain
* @hwirq_base: Starting number for hwirqs managed by the irq domain
- * @ops: pointer to irq_domain methods
- * @priv: private data pointer for use by owner. Not touched by irq_domain
- * core code.
* @of_node: (optional) Pointer to device tree nodes associated with the
* irq_domain. Used when decoding device tree interrupt specifiers.
*/
struct irq_domain {
- struct list_head list;
- unsigned int irq_base;
- unsigned int nr_irq;
- unsigned int hwirq_base;
+ struct list_head link;
+
+ /* type of reverse mapping_technique */
+ unsigned int revmap_type;
+ union {
+ struct {
+ unsigned int size;
+ unsigned int first_irq;
+ irq_hw_number_t first_hwirq;
+ } legacy;
+ struct {
+ unsigned int size;
+ unsigned int *revmap;
+ } linear;
+ struct {
+ unsigned int max_irq;
+ } nomap;
+ struct radix_tree_root tree;
+ } revmap_data;
const struct irq_domain_ops *ops;
- void *priv;
+ void *host_data;
+ irq_hw_number_t inval_irq;
+
+ /* Optional device node pointer */
struct device_node *of_node;
};
-/**
- * irq_domain_to_irq() - Translate from a hardware irq to a linux irq number
- *
- * Returns the linux irq number associated with a hardware irq. By default,
- * the mapping is irq == domain->irq_base + hwirq, but this mapping can
- * be overridden if the irq_domain implements a .to_irq() hook.
- */
-static inline unsigned int irq_domain_to_irq(struct irq_domain *d,
- unsigned long hwirq)
+#ifdef CONFIG_IRQ_DOMAIN
+struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
+ unsigned int size,
+ unsigned int first_irq,
+ irq_hw_number_t first_hwirq,
+ const struct irq_domain_ops *ops,
+ void *host_data);
+struct irq_domain *irq_domain_add_linear(struct device_node *of_node,
+ unsigned int size,
+ const struct irq_domain_ops *ops,
+ void *host_data);
+struct irq_domain *irq_domain_add_nomap(struct device_node *of_node,
+ unsigned int max_irq,
+ const struct irq_domain_ops *ops,
+ void *host_data);
+struct irq_domain *irq_domain_add_tree(struct device_node *of_node,
+ const struct irq_domain_ops *ops,
+ void *host_data);
+
+extern struct irq_domain *irq_find_host(struct device_node *node);
+extern void irq_set_default_host(struct irq_domain *host);
+
+static inline struct irq_domain *irq_domain_add_legacy_isa(
+ struct device_node *of_node,
+ const struct irq_domain_ops *ops,
+ void *host_data)
{
- if (d->ops->to_irq)
- return d->ops->to_irq(d, hwirq);
- if (WARN_ON(hwirq < d->hwirq_base))
- return 0;
- return d->irq_base + hwirq - d->hwirq_base;
+ return irq_domain_add_legacy(of_node, NUM_ISA_INTERRUPTS, 0, 0, ops,
+ host_data);
}
+extern struct irq_domain *irq_find_host(struct device_node *node);
+extern void irq_set_default_host(struct irq_domain *host);
-#define irq_domain_for_each_hwirq(d, hw) \
- for (hw = d->hwirq_base; hw < d->hwirq_base + d->nr_irq; hw++)
-#define irq_domain_for_each_irq(d, hw, irq) \
- for (hw = d->hwirq_base, irq = irq_domain_to_irq(d, hw); \
- hw < d->hwirq_base + d->nr_irq; \
- hw++, irq = irq_domain_to_irq(d, hw))
-
+extern unsigned int irq_create_mapping(struct irq_domain *host,
+ irq_hw_number_t hwirq);
extern void irq_dispose_mapping(unsigned int virq);
+extern unsigned int irq_find_mapping(struct irq_domain *host,
+ irq_hw_number_t hwirq);
+extern unsigned int irq_create_direct_mapping(struct irq_domain *host);
+extern void irq_radix_revmap_insert(struct irq_domain *host, unsigned int virq,
+ irq_hw_number_t hwirq);
+extern unsigned int irq_radix_revmap_lookup(struct irq_domain *host,
+ irq_hw_number_t hwirq);
+extern unsigned int irq_linear_revmap(struct irq_domain *host,
+ irq_hw_number_t hwirq);
-extern int irq_domain_add(struct irq_domain *domain);
-extern void irq_domain_del(struct irq_domain *domain);
-extern void irq_domain_register(struct irq_domain *domain);
-extern void irq_domain_register_irq(struct irq_domain *domain, int hwirq);
-extern void irq_domain_unregister(struct irq_domain *domain);
-extern void irq_domain_unregister_irq(struct irq_domain *domain, int hwirq);
-extern int irq_domain_find_free_range(unsigned int from, unsigned int cnt);
+extern const struct irq_domain_ops irq_domain_simple_ops;
-extern struct irq_domain_ops irq_domain_simple_ops;
-#endif /* CONFIG_IRQ_DOMAIN */
+/* stock xlate functions */
+int irq_domain_xlate_onecell(struct irq_domain *d, struct device_node *ctrlr,
+ const u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq, unsigned int *out_type);
+int irq_domain_xlate_twocell(struct irq_domain *d, struct device_node *ctrlr,
+ const u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq, unsigned int *out_type);
+int irq_domain_xlate_onetwocell(struct irq_domain *d, struct device_node *ctrlr,
+ const u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq, unsigned int *out_type);
-#if defined(CONFIG_IRQ_DOMAIN) && defined(CONFIG_OF_IRQ)
-extern void irq_domain_add_simple(struct device_node *controller, int irq_base);
+#if defined(CONFIG_OF_IRQ)
extern void irq_domain_generate_simple(const struct of_device_id *match,
u64 phys_base, unsigned int irq_start);
-#else /* CONFIG_IRQ_DOMAIN && CONFIG_OF_IRQ */
+#else /* CONFIG_OF_IRQ */
static inline void irq_domain_generate_simple(const struct of_device_id *match,
u64 phys_base, unsigned int irq_start) { }
-#endif /* CONFIG_IRQ_DOMAIN && CONFIG_OF_IRQ */
+#endif /* !CONFIG_OF_IRQ */
+
+#else /* CONFIG_IRQ_DOMAIN */
+static inline void irq_dispose_mapping(unsigned int virq) { }
+#endif /* !CONFIG_IRQ_DOMAIN */
#endif /* _LINUX_IRQDOMAIN_H */
diff --git a/include/linux/mfd/wcd9xxx/wcd9xxx-slimslave.h b/include/linux/mfd/wcd9xxx/wcd9xxx-slimslave.h
index fcd3bd3..93c21ce 100644
--- a/include/linux/mfd/wcd9xxx/wcd9xxx-slimslave.h
+++ b/include/linux/mfd/wcd9xxx/wcd9xxx-slimslave.h
@@ -99,4 +99,5 @@
int wcd9xxx_get_channel(struct wcd9xxx *wcd9xxx,
unsigned int *rx_ch,
unsigned int *tx_ch);
+int wcd9xxx_get_slave_port(unsigned int ch_num);
#endif /* __WCD9310_SLIMSLAVE_H_ */
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 447fbbb..05a6b5b 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -241,7 +241,6 @@
#define MMC_CAP2_BROKEN_VOLTAGE (1 << 7) /* Use the broken voltage */
#define MMC_CAP2_DETECT_ON_ERR (1 << 8) /* On I/O err check card removal */
#define MMC_CAP2_HC_ERASE_SZ (1 << 9) /* High-capacity erase size */
-#define MMC_CAP2_POWER_OFF_VCCQ_DURING_SUSPEND (1 << 10)
#define MMC_CAP2_PACKED_RD (1 << 10) /* Allow packed read */
#define MMC_CAP2_PACKED_WR (1 << 11) /* Allow packed write */
#define MMC_CAP2_PACKED_CMD (MMC_CAP2_PACKED_RD | \
@@ -250,6 +249,7 @@
#define MMC_CAP2_SANITIZE (1 << 13) /* Support Sanitize */
#define MMC_CAP2_BKOPS (1 << 14) /* BKOPS supported */
#define MMC_CAP2_INIT_BKOPS (1 << 15) /* Need to set BKOPS_EN */
+#define MMC_CAP2_POWER_OFF_VCCQ_DURING_SUSPEND (1 << 16)
mmc_pm_flag_t pm_caps; /* supported pm features */
diff --git a/include/linux/msm_rotator.h b/include/linux/msm_rotator.h
index 6cfbb35..0f15a8b 100644
--- a/include/linux/msm_rotator.h
+++ b/include/linux/msm_rotator.h
@@ -31,7 +31,6 @@
unsigned char rotations;
int enable;
unsigned int downscale_ratio;
- unsigned int secure;
};
struct msm_rotator_data_info {
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 643c80e..32d8ec2 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -216,6 +216,7 @@
extern int power_supply_set_battery_charged(struct power_supply *psy);
extern int power_supply_set_current_limit(struct power_supply *psy, int limit);
extern int power_supply_set_online(struct power_supply *psy, bool enable);
+extern int power_supply_set_scope(struct power_supply *psy, int scope);
extern int power_supply_set_charge_type(struct power_supply *psy, int type);
#if defined(CONFIG_POWER_SUPPLY) || defined(CONFIG_POWER_SUPPLY_MODULE)
diff --git a/include/linux/qpnp/gpio.h b/include/linux/qpnp/gpio.h
deleted file mode 100644
index e7fb53e..0000000
--- a/include/linux/qpnp/gpio.h
+++ /dev/null
@@ -1,118 +0,0 @@
-/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <mach/qpnp.h>
-
-#define QPNP_GPIO_DIR_IN 0
-#define QPNP_GPIO_DIR_OUT 1
-#define QPNP_GPIO_DIR_BOTH 2
-
-#define QPNP_GPIO_INVERT_DISABLE 0
-#define QPNP_GPIO_INVERT_ENABLE 1
-
-#define QPNP_GPIO_OUT_BUF_CMOS 0
-#define QPNP_GPIO_OUT_BUF_OPEN_DRAIN_NMOS 1
-#define QPNP_GPIO_OUT_BUF_OPEN_DRAIN_PMOS 2
-
-#define QPNP_GPIO_VIN0 0
-#define QPNP_GPIO_VIN1 1
-#define QPNP_GPIO_VIN2 2
-#define QPNP_GPIO_VIN3 3
-#define QPNP_GPIO_VIN4 4
-#define QPNP_GPIO_VIN5 5
-#define QPNP_GPIO_VIN6 6
-#define QPNP_GPIO_VIN7 7
-
-#define QPNP_GPIO_PULL_UP_30 0
-#define QPNP_GPIO_PULL_UP_1P5 1
-#define QPNP_GPIO_PULL_UP_31P5 2
-#define QPNP_GPIO_PULL_UP_1P5_30 3
-#define QPNP_GPIO_PULL_DN 4
-#define QPNP_GPIO_PULL_NO 5
-
-#define QPNP_GPIO_OUT_STRENGTH_LOW 1
-#define QPNP_GPIO_OUT_STRENGTH_MED 2
-#define QPNP_GPIO_OUT_STRENGTH_HIGH 3
-
-#define QPNP_GPIO_SRC_FUNC_NORMAL 0
-#define QPNP_GPIO_SRC_FUNC_PAIRED 1
-#define QPNP_GPIO_SRC_FUNC_1 2
-#define QPNP_GPIO_SRC_FUNC_2 3
-#define QPNP_GPIO_SRC_DTEST1 4
-#define QPNP_GPIO_SRC_DTEST2 5
-#define QPNP_GPIO_SRC_DTEST3 6
-#define QPNP_GPIO_SRC_DTEST4 7
-
-#define QPNP_GPIO_MASTER_DISABLE 0
-#define QPNP_GPIO_MASTER_ENABLE 1
-
-/**
- * struct qpnp_gpio_cfg - structure to specify gpio configurtion values
- * @direction: indicates whether the gpio should be input, output, or
- * both. Should be of the type QPNP_GPIO_DIR_*
- * @output_type: indicates gpio should be configured as CMOS or open
- * drain. Should be of the type QPNP_GPIO_OUT_BUF_*
- * @invert: Invert the signal of the gpio line -
- * QPNP_GPIO_INVERT_DISABLE or QPNP_GPIO_INVERT_ENABLE
- * @pull: Indicates whether a pull up or pull down should be
- * applied. If a pullup is required the current strength
- * needs to be specified. Current values of 30uA, 1.5uA,
- * 31.5uA, 1.5uA with 30uA boost are supported. This value
- * should be one of the QPNP_GPIO_PULL_*
- * @vin_sel: specifies the voltage level when the output is set to 1.
- * For an input gpio specifies the voltage level at which
- * the input is interpreted as a logical 1.
- * @out_strength: the amount of current supplied for an output gpio,
- * should be of the type QPNP_GPIO_STRENGTH_*
- * @source_sel: choose alternate function for the gpio. Certain gpios
- * can be paired (shorted) with each other. Some gpio pin
- * can act as alternate functions. This parameter should
- * be of type QPNP_GPIO_SRC_*.
- * @master_en: QPNP_GPIO_MASTER_ENABLE = Enable features within the
- * GPIO block based on configurations.
- * QPNP_GPIO_MASTER_DISABLE = Completely disable the GPIO
- * block and let the pin float with high impedance
- * regardless of other settings.
- */
-struct qpnp_gpio_cfg {
- unsigned int direction;
- unsigned int output_type;
- unsigned int invert;
- unsigned int pull;
- unsigned int vin_sel;
- unsigned int out_strength;
- unsigned int src_select;
- unsigned int master_en;
-};
-
-/**
- * qpnp_gpio_config - Apply gpio configuration for Linux gpio
- * @gpio: Linux gpio number to configure.
- * @param: parameters to configure.
- *
- * This routine takes a Linux gpio number that corresponds with a
- * PMIC gpio and applies the configuration specified in 'param'.
- * This gpio number can be ascertained by of_get_gpio_flags() or
- * the qpnp_gpio_map_gpio() API.
- */
-int qpnp_gpio_config(int gpio, struct qpnp_gpio_cfg *param);
-
-/**
- * qpnp_gpio_map_gpio - Obtain Linux GPIO number from device spec
- * @slave_id: slave_id of the spmi_device for the gpio in question.
- * @pmic_gpio: PMIC gpio number to lookup.
- *
- * This routine is used in legacy configurations that do not support
- * Device Tree. If you are using Device Tree, you should not use this.
- * For such cases, use of_get_gpio() instead.
- */
-int qpnp_gpio_map_gpio(uint16_t slave_id, uint32_t pmic_gpio);
diff --git a/include/linux/qpnp/pin.h b/include/linux/qpnp/pin.h
new file mode 100644
index 0000000..fa9c30f
--- /dev/null
+++ b/include/linux/qpnp/pin.h
@@ -0,0 +1,190 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/* Mode select */
+#define QPNP_PIN_MODE_DIG_IN 0
+#define QPNP_PIN_MODE_DIG_OUT 1
+#define QPNP_PIN_MODE_DIG_IN_OUT 2
+#define QPNP_PIN_MODE_BIDIR 3
+#define QPNP_PIN_MODE_AIN 4
+#define QPNP_PIN_MODE_AOUT 5
+#define QPNP_PIN_MODE_SINK 6
+
+/* Invert source select (GPIO, MPP) */
+#define QPNP_PIN_INVERT_DISABLE 0
+#define QPNP_PIN_INVERT_ENABLE 1
+
+/* Output type (GPIO) */
+#define QPNP_PIN_OUT_BUF_CMOS 0
+#define QPNP_PIN_OUT_BUF_OPEN_DRAIN_NMOS 1
+#define QPNP_PIN_OUT_BUF_OPEN_DRAIN_PMOS 2
+
+/* Voltage select (GPIO, MPP) */
+#define QPNP_PIN_VIN0 0
+#define QPNP_PIN_VIN1 1
+#define QPNP_PIN_VIN2 2
+#define QPNP_PIN_VIN3 3
+#define QPNP_PIN_VIN4 4
+#define QPNP_PIN_VIN5 5
+#define QPNP_PIN_VIN6 6
+#define QPNP_PIN_VIN7 7
+
+/* Pull Up Values (GPIO) */
+#define QPNP_PIN_GPIO_PULL_UP_30 0
+#define QPNP_PIN_GPIO_PULL_UP_1P5 1
+#define QPNP_PIN_GPIO_PULL_UP_31P5 2
+#define QPNP_PIN_GPIO_PULL_UP_1P5_30 3
+#define QPNP_PIN_GPIO_PULL_DN 4
+#define QPNP_PIN_GPIO_PULL_NO 5
+
+/* Pull Up Values (MPP) */
+#define QPNP_PIN_MPP_PULL_UP_0P6KOHM 0
+#define QPNP_PIN_MPP_PULL_UP_OPEN 1
+#define QPNP_PIN_MPP_PULL_UP_10KOHM 2
+#define QPNP_PIN_MPP_PULL_UP_30KOHM 3
+
+/* Out Strength (GPIO) */
+#define QPNP_PIN_OUT_STRENGTH_LOW 1
+#define QPNP_PIN_OUT_STRENGTH_MED 2
+#define QPNP_PIN_OUT_STRENGTH_HIGH 3
+
+/* Source Select (GPIO) / Enable Select (MPP) */
+#define QPNP_PIN_SEL_FUNC_CONSTANT 0
+#define QPNP_PIN_SEL_FUNC_PAIRED 1
+#define QPNP_PIN_SEL_FUNC_1 2
+#define QPNP_PIN_SEL_FUNC_2 3
+#define QPNP_PIN_SEL_DTEST1 4
+#define QPNP_PIN_SEL_DTEST2 5
+#define QPNP_PIN_SEL_DTEST3 6
+#define QPNP_PIN_SEL_DTEST4 7
+
+/* Master enable (GPIO, MPP) */
+#define QPNP_PIN_MASTER_DISABLE 0
+#define QPNP_PIN_MASTER_ENABLE 1
+
+/* Analog Output (MPP) */
+#define QPNP_PIN_AOUT_1V25 0
+#define QPNP_PIN_AOUT_0V625 1
+#define QPNP_PIN_AOUT_0V3125 2
+#define QPNP_PIN_AOUT_MPP 3
+#define QPNP_PIN_AOUT_ABUS1 4
+#define QPNP_PIN_AOUT_ABUS2 5
+#define QPNP_PIN_AOUT_ABUS3 6
+#define QPNP_PIN_AOUT_ABUS4 7
+
+/* Analog Input (MPP) */
+#define QPNP_PIN_AIN_AMUX_CH5 0
+#define QPNP_PIN_AIN_AMUX_CH6 1
+#define QPNP_PIN_AIN_AMUX_CH7 2
+#define QPNP_PIN_AIN_AMUX_CH8 3
+#define QPNP_PIN_AIN_AMUX_ABUS1 4
+#define QPNP_PIN_AIN_AMUX_ABUS2 5
+#define QPNP_PIN_AIN_AMUX_ABUS3 6
+#define QPNP_PIN_AIN_AMUX_ABUS4 7
+
+/* Current Sink (MPP) */
+#define QPNP_PIN_CS_OUT_5MA 0
+#define QPNP_PIN_CS_OUT_10MA 1
+#define QPNP_PIN_CS_OUT_15MA 2
+#define QPNP_PIN_CS_OUT_20MA 3
+#define QPNP_PIN_CS_OUT_25MA 4
+#define QPNP_PIN_CS_OUT_30MA 5
+#define QPNP_PIN_CS_OUT_35MA 6
+#define QPNP_PIN_CS_OUT_40MA 7
+
+/**
+ * struct qpnp_pin_cfg - structure to specify pin configurtion values
+ * @mode: indicates whether the pin should be input, output, or
+ * both for gpios. mpp pins also support bidirectional,
+ * analog in, analog out and current sink. This value
+ * should be of type QPNP_PIN_MODE_*.
+ * @output_type: indicates pin should be configured as CMOS or open
+ * drain. Should be of the type QPNP_PIN_OUT_BUF_*. This
+ * setting applies for gpios only.
+ * @invert: Invert the signal of the line -
+ * QPNP_PIN_INVERT_DISABLE or QPNP_PIN_INVERT_ENABLE.
+ * @pull: This parameter should be programmed to different values
+ * depending on whether it's GPIO or MPP.
+ * For GPIO, it indicates whether a pull up or pull down
+ * should be applied. If a pullup is required the
+ * current strength needs to be specified.
+ * Current values of 30uA, 1.5uA, 31.5uA, 1.5uA with 30uA
+ * boost are supported. This value should be one of
+ * the QPNP_PIN_GPIO_PULL_*. Note that the hardware ignores
+ * this configuration if the GPIO is not set to input or
+ * output open-drain mode.
+ * For MPP, it indicates whether a pullup should be
+ * applied for bidirectitional mode only. The hardware
+ * ignores the configuration when operating in other modes.
+ * This value should be one of the QPNP_PIN_MPP_PULL_*.
+ * @vin_sel: specifies the voltage level when the output is set to 1.
+ * For an input gpio specifies the voltage level at which
+ * the input is interpreted as a logical 1.
+ * @out_strength: the amount of current supplied for an output gpio,
+ * should be of the type QPNP_PIN_STRENGTH_*.
+ * @select: select alternate function for the pin. Certain pins
+ * can be paired (shorted) with each other. Some pins
+ * can act as alternate functions. In the context of
+ * gpio, this acts as a source select. For mpps,
+ * this is an enable select.
+ * This parameter should be of type QPNP_PIN_SEL_*.
+ * @master_en: QPNP_PIN_MASTER_ENABLE = Enable features within the
+ * pin block based on configurations.
+ * QPNP_PIN_MASTER_DISABLE = Completely disable the pin
+ * block and let the pin float with high impedance
+ * regardless of other settings.
+ * @aout_ref: Set the analog output reference. This parameter should
+ * be of type QPNP_PIN_AOUT_*. This parameter only applies
+ * to mpp pins.
+ * @ain_route: Set the source for analog input. This parameter
+ * should be of type QPNP_PIN_AIN_*. This parameter only
+ * applies to mpp pins.
+ * @cs_out: Set the the amount of current to sync in mA. This
+ * parameter should be of type QPNP_PIN_CS_OUT_*. This
+ * parameter only applies to mpp pins.
+ */
+struct qpnp_pin_cfg {
+ int mode;
+ int output_type;
+ int invert;
+ int pull;
+ int vin_sel;
+ int out_strength;
+ int select;
+ int master_en;
+ int aout_ref;
+ int ain_route;
+ int cs_out;
+};
+
+/**
+ * qpnp_pin_config - Apply pin configuration for Linux gpio
+ * @gpio: Linux gpio number to configure.
+ * @param: parameters to configure.
+ *
+ * This routine takes a Linux gpio number that corresponds with a
+ * PMIC pin and applies the configuration specified in 'param'.
+ * This gpio number can be ascertained by of_get_gpio_flags() or
+ * the qpnp_pin_map_gpio() API.
+ */
+int qpnp_pin_config(int gpio, struct qpnp_pin_cfg *param);
+
+/**
+ * qpnp_pin_map - Obtain Linux GPIO number from device spec
+ * @name: Name assigned by the 'label' binding for the primary node.
+ * @pmic_pin: PMIC pin number to lookup.
+ *
+ * This routine is used in legacy configurations that do not support
+ * Device Tree. If you are using Device Tree, you should not use this.
+ * For such cases, use of_get_gpio() or friends instead.
+ */
+int qpnp_pin_map(const char *name, uint32_t pmic_pin);
diff --git a/include/linux/spmi.h b/include/linux/spmi.h
index 927978a..f94b5c5 100644
--- a/include/linux/spmi.h
+++ b/include/linux/spmi.h
@@ -92,11 +92,19 @@
* @num_resources: number of resources for this device node
* @resources: array of resources for this device_node
* @of_node: device_node of the resource in question
+ * @label: name used to reference the device from the driver
+ *
+ * Note that we explicitly add a 'label' pointer here since per
+ * the ePAPR 2.2.2, the device_node->name should be generic and not
+ * reflect precise programming model. Thus label enables a
+ * platform specific name to be assigned with the 'label' binding to
+ * allow for unique query names.
*/
struct spmi_resource {
struct resource *resource;
u32 num_resources;
struct device_node *of_node;
+ const char *label;
};
/**
@@ -108,7 +116,8 @@
* @dev: Driver model representation of the device.
* @name: Name of driver to use with this device.
* @ctrl: SPMI controller managing the bus hosting this device.
- * @dev_node: array of SPMI resources - one entry per device_node.
+ * @res: SPMI resource for the primary node
+ * @dev_node: array of SPMI resources when used with spmi-dev-container.
* @num_dev_node: number of device_node structures.
* @sid: Slave Identifier.
*/
@@ -116,6 +125,7 @@
struct device dev;
const char *name;
struct spmi_controller *ctrl;
+ struct spmi_resource res;
struct spmi_resource *dev_node;
u32 num_dev_node;
u8 sid;
@@ -124,10 +134,12 @@
/**
* struct spmi_boardinfo: Declare board info for SPMI device bringup.
+ * @name: Name of driver to use with this device.
* @slave_id: slave identifier.
* @spmi_device: device to be registered with the SPMI framework.
* @of_node: pointer to the OpenFirmware device node.
- * @dev_node: one spmi_resource for each device_node.
+ * @res: SPMI resource for the primary node
+ * @dev_node: array of SPMI resources when used with spmi-dev-container.
* @num_dev_node: number of device_node structures.
* @platform_data: goes to spmi_device.dev.platform_data
*/
@@ -135,6 +147,7 @@
char name[SPMI_NAME_SIZE];
uint8_t slave_id;
struct device_node *of_node;
+ struct spmi_resource res;
struct spmi_resource *dev_node;
u32 num_dev_node;
const void *platform_data;
@@ -417,4 +430,49 @@
* -ETIMEDOUT if the SPMI transaction times out.
*/
extern int spmi_command_shutdown(struct spmi_controller *ctrl, u8 sid);
+
+/**
+ * spmi_for_each_container_dev - iterate over the array of devnode resources.
+ * @res: spmi_resource pointer used as the array cursor
+ * @spmi_dev: spmi_device to iterate
+ *
+ * Only useable in spmi-dev-container configurations.
+ */
+#define spmi_for_each_container_dev(res, spmi_dev) \
+ for (res = ((spmi_dev)->dev_node ? &(spmi_dev)->dev_node[0] : NULL); \
+ (res - (spmi_dev)->dev_node) < (spmi_dev)->num_dev_node; res++)
+
+extern struct resource *spmi_get_resource(struct spmi_device *dev,
+ struct spmi_resource *node,
+ unsigned int type, unsigned int res_num);
+
+struct resource *spmi_get_resource_byname(struct spmi_device *dev,
+ struct spmi_resource *node,
+ unsigned int type,
+ const char *name);
+
+extern int spmi_get_irq(struct spmi_device *dev, struct spmi_resource *node,
+ unsigned int res_num);
+
+extern int spmi_get_irq_byname(struct spmi_device *dev,
+ struct spmi_resource *node, const char *name);
+
+/**
+ * spmi_get_node_name - return device name for spmi node
+ * @dev: spmi device handle
+ *
+ * Get the primary node name of a spmi_device coresponding with
+ * with the 'label' binding.
+ *
+ * Returns NULL if no primary dev name has been assigned to this spmi_device.
+ */
+static inline const char *spmi_get_primary_dev_name(struct spmi_device *dev)
+{
+ if (dev->res.label)
+ return dev->res.label;
+ return NULL;
+}
+
+struct spmi_resource *spmi_get_dev_container_byname(struct spmi_device *dev,
+ const char *label);
#endif
diff --git a/include/linux/test-iosched.h b/include/linux/test-iosched.h
new file mode 100644
index 0000000..8054409
--- /dev/null
+++ b/include/linux/test-iosched.h
@@ -0,0 +1,233 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * The test scheduler allows to test the block device by dispatching
+ * specific requests according to the test case and declare PASS/FAIL
+ * according to the requests completion error code.
+ * Each test is exposed via debugfs and can be triggered by writing to
+ * the debugfs file.
+ *
+ */
+
+#ifndef _LINUX_TEST_IOSCHED_H
+#define _LINUX_TEST_IOSCHED_H
+
+/*
+ * Patterns definitions for read/write requests data
+ */
+#define TEST_PATTERN_SEQUENTIAL -1
+#define TEST_PATTERN_5A 0x5A5A5A5A
+#define TEST_PATTERN_FF 0xFFFFFFFF
+#define TEST_NO_PATTERN 0xDEADBEEF
+#define BIO_U32_SIZE 1024
+
+struct test_data;
+
+typedef int (prepare_test_fn) (struct test_data *);
+typedef int (run_test_fn) (struct test_data *);
+typedef int (check_test_result_fn) (struct test_data *);
+typedef int (post_test_fn) (struct test_data *);
+typedef char* (get_test_case_str_fn) (struct test_data *);
+typedef void (blk_dev_test_init_fn) (void);
+typedef void (blk_dev_test_exit_fn) (void);
+
+/**
+ * enum test_state - defines the state of the test
+ */
+enum test_state {
+ TEST_IDLE,
+ TEST_RUNNING,
+ TEST_COMPLETED,
+};
+
+/**
+ * enum test_results - defines the success orfailure of the test
+ */
+enum test_results {
+ TEST_NO_RESULT,
+ TEST_FAILED,
+ TEST_PASSED,
+ TEST_NOT_SUPPORTED,
+};
+
+/**
+ * enum req_unique_type - defines a unique request type
+ */
+enum req_unique_type {
+ REQ_UNIQUE_NONE,
+ REQ_UNIQUE_DISCARD,
+ REQ_UNIQUE_FLUSH,
+};
+
+/**
+ * struct test_debug - debugfs directories
+ * @debug_root: The test-iosched debugfs root directory
+ * @debug_utils_root: test-iosched debugfs utils root
+ * directory
+ * @debug_tests_root: test-iosched debugfs tests root
+ * directory
+ * @debug_test_result: Exposes the test result to the user
+ * space
+ * @start_sector: The start sector for read/write requests
+ */
+struct test_debug {
+ struct dentry *debug_root;
+ struct dentry *debug_utils_root;
+ struct dentry *debug_tests_root;
+ struct dentry *debug_test_result;
+ struct dentry *start_sector;
+};
+
+/**
+ * struct test_request - defines a test request
+ * @queuelist: The test requests list
+ * @bios_buffer: Write/read requests data buffer
+ * @buf_size: Write/read requests data buffer size (in
+ * bytes)
+ * @rq: A block request, to be dispatched
+ * @req_completed: A flag to indicate if the request was
+ * completed
+ * @req_result: Keeps the error code received in the
+ * request completion callback
+ * @is_err_expected: A flag to indicate if the request should
+ * fail
+ * @wr_rd_data_pattern: A pattern written to the write data
+ * buffer. Can be used in read requests to
+ * verify the data
+ * @req_id: A unique ID to identify a test request
+ * to ease the debugging of the test cases
+ */
+struct test_request {
+ struct list_head queuelist;
+ unsigned int *bios_buffer;
+ int buf_size;
+ struct request *rq;
+ bool req_completed;
+ int req_result;
+ int is_err_expected;
+ int wr_rd_data_pattern;
+ int req_id;
+};
+
+/**
+ * struct test_info - specific test information
+ * @testcase: The current running test case
+ * @timeout_msec: Test specific test timeout
+ * @buf_size: Write/read requests data buffer size (in
+ * bytes)
+ * @prepare_test_fn: Test specific test preparation callback
+ * @run_test_fn: Test specific test running callback
+ * @check_test_result_fn: Test specific test result checking
+ * callback
+ * @get_test_case_str_fn: Test specific function to get the test name
+ * @data: Test specific private data
+ */
+struct test_info {
+ int testcase;
+ unsigned timeout_msec;
+ prepare_test_fn *prepare_test_fn;
+ run_test_fn *run_test_fn;
+ check_test_result_fn *check_test_result_fn;
+ post_test_fn *post_test_fn;
+ get_test_case_str_fn *get_test_case_str_fn;
+ void *data;
+};
+
+/**
+ * struct blk_dev_test_type - identifies block device test
+ * @list: list head pointer
+ * @init_fn: block device test init callback
+ * @exit_fn: block device test exit callback
+ */
+struct blk_dev_test_type {
+ struct list_head list;
+ blk_dev_test_init_fn *init_fn;
+ blk_dev_test_exit_fn *exit_fn;
+};
+
+/**
+ * struct test_data - global test iosched data
+ * @queue: The test IO scheduler requests list
+ * @test_queue: The test requests list
+ * @next_req: Points to the next request to be
+ * dispatched from the test requests list
+ * @wait_q: A wait queue for waiting for the test
+ * requests completion
+ * @test_state: Indicates if there is a running test.
+ * Used for dispatch function
+ * @test_result: Indicates if the test passed or failed
+ * @debug: The test debugfs entries
+ * @req_q: The block layer request queue
+ * @num_of_write_bios: The number of write BIOs added to the test requests.
+ * Used to calcualte the sector number of
+ * new BIOs.
+ * @start_sector: The address of the first sector that can
+ * be accessed by the test
+ * @timeout_timer: A timer to verify test completion in
+ * case of non-completed requests
+ * @wr_rd_next_req_id: A unique ID to identify WRITE/READ
+ * request to ease the debugging of the
+ * test cases
+ * @unique_next_req_id: A unique ID to identify
+ * FLUSH/DISCARD/SANITIZE request to ease
+ * the debugging of the test cases
+ * @lock: A lock to verify running a single test
+ * at a time
+ * @test_info: A specific test data to be set by the
+ * test invokation function
+ * @ignore_round: A boolean variable indicating that a
+ * test round was disturbed by an external
+ * flush request, therefore disqualifying
+ * the results
+ */
+struct test_data {
+ struct list_head queue;
+ struct list_head test_queue;
+ struct test_request *next_req;
+ wait_queue_head_t wait_q;
+ enum test_state test_state;
+ enum test_results test_result;
+ struct test_debug debug;
+ struct request_queue *req_q;
+ int num_of_write_bios;
+ u32 start_sector;
+ struct timer_list timeout_timer;
+ int wr_rd_next_req_id;
+ int unique_next_req_id;
+ spinlock_t lock;
+ struct test_info test_info;
+ bool fs_wr_reqs_during_test;
+ bool ignore_round;
+};
+
+extern int test_iosched_start_test(struct test_info *t_info);
+extern void test_iosched_mark_test_completion(void);
+extern int test_iosched_add_unique_test_req(int is_err_expcted,
+ enum req_unique_type req_unique,
+ int start_sec, int nr_sects, rq_end_io_fn *end_req_io);
+extern int test_iosched_add_wr_rd_test_req(int is_err_expcted,
+ int direction, int start_sec,
+ int num_bios, int pattern, rq_end_io_fn *end_req_io);
+
+extern struct dentry *test_iosched_get_debugfs_tests_root(void);
+extern struct dentry *test_iosched_get_debugfs_utils_root(void);
+
+extern struct request_queue *test_iosched_get_req_queue(void);
+
+extern void test_iosched_set_test_result(int);
+
+void test_iosched_set_ignore_round(bool ignore_round);
+
+void test_iosched_register(struct blk_dev_test_type *bdt);
+
+void test_iosched_unregister(struct blk_dev_test_type *bdt);
+
+#endif /* _LINUX_TEST_IOSCHED_H */
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index d9ec332..eabe4e8 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -212,6 +212,7 @@
#define HCD_MEMORY 0x0001 /* HC regs use memory (else I/O) */
#define HCD_LOCAL_MEM 0x0002 /* HC needs local memory */
#define HCD_SHARED 0x0004 /* Two (or more) usb_hcds share HW */
+#define HCD_OLD_ENUM 0x0008 /* HC supports short enumeration */
#define HCD_USB11 0x0010 /* USB 1.1 */
#define HCD_USB2 0x0020 /* USB 2.0 */
#define HCD_USB3 0x0040 /* USB 3.0 */
@@ -348,6 +349,8 @@
/* to log completion events*/
void (*log_urb_complete)(struct urb *urb, char * event,
unsigned extra);
+ void (*enable_ulpi_control)(struct usb_hcd *hcd, u32 linestate);
+ void (*disable_ulpi_control)(struct usb_hcd *hcd);
};
extern int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb);
diff --git a/include/media/msm_camera.h b/include/media/msm_camera.h
index 3308243..da450fc 100644
--- a/include/media/msm_camera.h
+++ b/include/media/msm_camera.h
@@ -1659,4 +1659,86 @@
#define MSM_IRQROUTER_CFG_COMPIRQ \
_IOWR('V', BASE_VIDIOC_PRIVATE, void __user *)
+#define MAX_NUM_CPP_STRIPS 8
+
+enum msm_cpp_frame_type {
+ MSM_CPP_OFFLINE_FRAME,
+ MSM_CPP_REALTIME_FRAME,
+};
+
+struct msm_cpp_frame_strip_info {
+ int scale_v_en;
+ int scale_h_en;
+
+ int upscale_v_en;
+ int upscale_h_en;
+
+ int src_start_x;
+ int src_end_x;
+ int src_start_y;
+ int src_end_y;
+
+ /* Padding is required for upscaler because it does not
+ * pad internally like other blocks, also needed for rotation
+ * rotation expects all the blocks in the stripe to be the same size
+ * Padding is done such that all the extra padded pixels
+ * are on the right and bottom
+ */
+ int pad_bottom;
+ int pad_top;
+ int pad_right;
+ int pad_left;
+
+ int v_init_phase;
+ int h_init_phase;
+ int h_phase_step;
+ int v_phase_step;
+
+ int prescale_crop_width_first_pixel;
+ int prescale_crop_width_last_pixel;
+ int prescale_crop_height_first_line;
+ int prescale_crop_height_last_line;
+
+ int postscale_crop_height_first_line;
+ int postscale_crop_height_last_line;
+ int postscale_crop_width_first_pixel;
+ int postscale_crop_width_last_pixel;
+
+ int dst_start_x;
+ int dst_end_x;
+ int dst_start_y;
+ int dst_end_y;
+
+ int bytes_per_pixel;
+ unsigned int source_address;
+ unsigned int destination_address;
+ unsigned int src_stride;
+ unsigned int dst_stride;
+ int rotate_270;
+ int horizontal_flip;
+ int vertical_flip;
+ int scale_output_width;
+ int scale_output_height;
+};
+
+struct msm_cpp_frame_info_t {
+ int32_t frame_id;
+ uint32_t inst_id;
+ uint32_t client_id;
+ enum msm_cpp_frame_type frame_type;
+ uint32_t num_strips;
+ struct msm_cpp_frame_strip_info *strip_info;
+};
+
+#define VIDIOC_MSM_CPP_CFG \
+ _IOWR('V', BASE_VIDIOC_PRIVATE, struct msm_camera_v4l2_ioctl_t)
+
+#define VIDIOC_MSM_CPP_GET_EVENTPAYLOAD \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 1, struct msm_camera_v4l2_ioctl_t)
+
+#define VIDIOC_MSM_CPP_GET_INST_INFO \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 2, struct msm_camera_v4l2_ioctl_t)
+
+#define V4L2_EVENT_CPP_FRAME_DONE (V4L2_EVENT_PRIVATE_START + 0)
+
#endif /* __LINUX_MSM_CAMERA_H */
diff --git a/include/media/msm_isp.h b/include/media/msm_isp.h
index 93f6c8b..0309fd5 100644
--- a/include/media/msm_isp.h
+++ b/include/media/msm_isp.h
@@ -60,7 +60,7 @@
#define MSG_ID_OUTPUT_SECONDARY 41
#define MSG_ID_STATS_COMPOSITE 42
#define MSG_ID_OUTPUT_TERTIARY1 43
-
+#define MSG_ID_STOP_LS_ACK 44
/* ISP command IDs */
#define VFE_CMD_DUMMY_0 0
diff --git a/include/media/radio-iris.h b/include/media/radio-iris.h
index b5e8f2e..25a1d84 100644
--- a/include/media/radio-iris.h
+++ b/include/media/radio-iris.h
@@ -489,6 +489,29 @@
__u8 in_det_out;
} __packed;
+#define CLKSPURID_INDEX0 0
+#define CLKSPURID_INDEX1 5
+#define CLKSPURID_INDEX2 10
+#define CLKSPURID_INDEX3 15
+#define CLKSPURID_INDEX4 20
+#define CLKSPURID_INDEX5 25
+
+#define MAX_SPUR_FREQ_LIMIT 30
+#define CKK_SPUR 0x3B
+#define SPUR_DATA_SIZE 0x4
+#define SPUR_ENTRIES_PER_ID 0x5
+
+#define COMPUTE_SPUR(val) ((((val) - (76000)) / (50)))
+#define GET_FREQ(val, bit) ((bit == 1) ? ((val) >> 8) : ((val) & 0xFF))
+#define GET_SPUR_ENTRY_LEVEL(val) ((val) / (5))
+
+struct hci_fm_spur_data {
+ __u32 freq[MAX_SPUR_FREQ_LIMIT];
+ __s8 rmssi[MAX_SPUR_FREQ_LIMIT];
+ __u8 enable[MAX_SPUR_FREQ_LIMIT];
+} __packed;
+
+
/* HCI dev events */
#define RADIO_HCI_DEV_REG 1
#define RADIO_HCI_DEV_WRITE 2
@@ -572,6 +595,10 @@
V4L2_CID_PRIVATE_INTF_HIGH_THRESHOLD,
V4L2_CID_PRIVATE_SINR_THRESHOLD,
V4L2_CID_PRIVATE_SINR_SAMPLES,
+ V4L2_CID_PRIVATE_SPUR_FREQ,
+ V4L2_CID_PRIVATE_SPUR_FREQ_RMSSI,
+ V4L2_CID_PRIVATE_SPUR_SELECTION,
+ V4L2_CID_PRIVATE_UPDATE_SPUR_TABLE,
/*using private CIDs under userclass*/
V4L2_CID_PRIVATE_IRIS_READ_DEFAULT = 0x00980928,
@@ -680,6 +707,14 @@
RDS_AF_JUMP,
};
+enum spur_entry_levels {
+ ENTRY_0,
+ ENTRY_1,
+ ENTRY_2,
+ ENTRY_3,
+ ENTRY_4,
+ ENTRY_5,
+};
/* Band limits */
#define REGION_US_EU_BAND_LOW 87500
@@ -774,6 +809,7 @@
#define RDS_SYNC_INTR (1 << 1)
#define AUDIO_CTRL_INTR (1 << 2)
#define AF_JUMP_ENABLE (1 << 4)
+
int hci_def_data_read(struct hci_fm_def_data_rd_req *arg,
struct radio_hci_dev *hdev);
int hci_def_data_write(struct hci_fm_def_data_wr_req *arg,
diff --git a/include/media/tavarua.h b/include/media/tavarua.h
index 9943287..adbdada 100644
--- a/include/media/tavarua.h
+++ b/include/media/tavarua.h
@@ -395,6 +395,22 @@
#define FM_TX_PWR_LVL_0 0 /* Lowest power lvl that can be set for Tx */
#define FM_TX_PWR_LVL_MAX 7 /* Max power lvl for Tx */
+
+/* Tone Generator control value */
+#define TONE_GEN_CTRL_BYTE 0x00
+#define TONE_CHANNEL_EN_AND_SCALING_BYTE 0x01
+#define TONE_LEFT_FREQ_BYTE 0x02
+#define TONE_RIGHT_FREQ_BYTE 0x03
+#define TONE_LEFT_PHASE 0x04
+#define TONE_RIGHT_PHASE 0x05
+
+#define TONE_LEFT_CH_ENABLED 0x01
+#define TONE_RIGHT_CH_ENABLED 0x02
+#define TONE_LEFT_RIGHT_CH_ENABLED (TONE_LEFT_CH_ENABLED\
+ | TONE_RIGHT_CH_ENABLED)
+
+#define TONE_SCALING_SHIFT 0x02
+
/* Transfer */
enum tavarua_xfr_ctrl_t {
RDS_PS_0 = 0x01,
@@ -453,6 +469,7 @@
PHY_CONFIG,
PHY_TXBLOCK,
PHY_TCB,
+ XFR_EXT,
XFR_PEEK_MODE = 0x40,
XFR_POKE_MODE = 0xC0,
TAVARUA_XFR_CTRL_MAX
@@ -503,6 +520,7 @@
TWELVE_BYTE,
THIRTEEN_BYTE
};
+
#define XFR_READ (0)
#define XFR_WRITE (1)
#define XFR_MODE_OFFSET (0)
@@ -531,4 +549,28 @@
__u8 data[XFR_REG_NUM];
} __packed;
+enum Internal_tone_gen_vals {
+ ONE_KHZ_LR_EQUA_0DBFS = 1,
+ ONE_KHZ_LEFTONLY_EQUA_0DBFS,
+ ONE_KHZ_RIGHTONLY_EQUA_0DBFS,
+ ONE_KHZ_LR_EQUA_l8DBFS,
+ FIFTEEN_KHZ_LR_EQUA_l8DBFS
+};
+
+enum Tone_scaling_indexes {
+ TONE_SCALE_IND_0,
+ TONE_SCALE_IND_1,
+ TONE_SCALE_IND_2,
+ TONE_SCALE_IND_3,
+ TONE_SCALE_IND_4,
+ TONE_SCALE_IND_5,
+ TONE_SCALE_IND_6,
+ TONE_SCALE_IND_7,
+ TONE_SCALE_IND_8,
+ TONE_SCALE_IND_9,
+ TONE_SCALE_IND_10,
+ TONE_SCALE_IND_11,
+ TONE_SCALE_IND_12
+};
+
#endif /* __LINUX_TAVARUA_H */
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index 9a03a12..893aeea 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -534,6 +534,7 @@
__u16 tx_win;
__u16 tx_win_max;
+ __u16 ack_win;
__u8 max_tx;
__u8 amp_pref;
__u16 remote_tx_win;
diff --git a/include/sound/apr_audio-v2.h b/include/sound/apr_audio-v2.h
index 695fea9..8e8778a 100644
--- a/include/sound/apr_audio-v2.h
+++ b/include/sound/apr_audio-v2.h
@@ -2322,6 +2322,9 @@
} __packed;
#define ASM_END_POINT_DEVICE_MATRIX 0
+
+#define PCM_CHANNEL_NULL 0
+
/* Front left channel. */
#define PCM_CHANNEL_FL 1
@@ -2444,7 +2447,7 @@
} __packed;
struct asm_stream_cmd_set_encdec_param {
- u32 param_id;
+ u32 param_id;
/* ID of the parameter. */
u32 param_size;
@@ -2573,9 +2576,6 @@
* - 6 -- 5.1 content
*/
- u16 reserved;
- /* Reserved. Clients must set this field to zero. */
-
u16 total_size_of_PCE_bits;
/* greater or equal to zero. * -In case of RAW formats and
* channel config = 0 (PCE), client can send * the bit stream
@@ -2986,6 +2986,8 @@
u16 enc_options;
/* Options used during encoding. */
+ u16 reserved;
+
} __packed;
#define ASM_MEDIA_FMT_WMA_V8 0x00010D91
@@ -4495,7 +4497,6 @@
struct asm_dec_out_chan_map_param {
struct apr_hdr hdr;
struct asm_stream_cmd_set_encdec_param encdec;
- struct asm_enc_cfg_blk_param_v2 encblk;
u32 num_channels;
/* Number of decoder output channels.
* Supported values: 0 to #MAX_CHAN_MAP_CHANNELS
diff --git a/include/sound/apr_audio.h b/include/sound/apr_audio.h
index 431dedf..c770f13 100644
--- a/include/sound/apr_audio.h
+++ b/include/sound/apr_audio.h
@@ -1127,6 +1127,7 @@
#define AC3_DECODER 0x00010BF6
#define EAC3_DECODER 0x00010C3C
#define DTS 0x00010D88
+#define DTS_LBR 0x00010DBB
#define ATRAC 0x00010D89
#define MAT 0x00010D8A
#define G711_ALAW_FS 0x00010BF7
@@ -1145,6 +1146,13 @@
#define ASM_ENCDEC_IMMDIATE_DECODE 0x00010C14
#define ASM_ENCDEC_CFG_BLK 0x00010C2C
+#define ASM_STREAM_CMD_OPEN_READ_COMPRESSED 0x00010D95
+struct asm_stream_cmd_open_read_compressed {
+ struct apr_hdr hdr;
+ u32 uMode;
+ u32 frame_per_buf;
+} __packed;
+
#define ASM_STREAM_CMD_OPEN_WRITE 0x00010BCA
struct asm_stream_cmd_open_write {
struct apr_hdr hdr;
@@ -1185,6 +1193,17 @@
u16 afe_port_id;
} __packed;
+#define ADM_CMD_CONNECT_AFE_PORT_V2 0x00010332
+
+struct adm_cmd_connect_afe_port_v2 {
+ struct apr_hdr hdr;
+ u8 mode; /*mode represent the interface is for RX or TX*/
+ u8 session_id; /*ASM session ID*/
+ u16 afe_port_id;
+ u32 num_channels;
+ u32 sampleing_rate;
+} __packed;
+
#define ASM_STREAM_CMD_SET_ENCDEC_PARAM 0x00010C10
#define ASM_STREAM_CMD_GET_ENCDEC_PARAM 0x00010C11
#define ASM_ENCDEC_CFG_BLK_ID 0x00010C2C
diff --git a/include/sound/compress_offload.h b/include/sound/compress_offload.h
index 9769dea..e59d29c 100644
--- a/include/sound/compress_offload.h
+++ b/include/sound/compress_offload.h
@@ -123,6 +123,16 @@
};
/**
+ * struct snd_compr_audio_info: compressed input audio information
+ * @frame_size: legth of the encoded frame with valid data
+ * @reserved: reserved for furture use
+ */
+struct snd_compr_audio_info {
+ uint32_t frame_size;
+ uint32_t reserved[15];
+};
+
+/**
* compress path ioctl definitions
* SNDRV_COMPRESS_GET_CAPS: Query capability of DSP
* SNDRV_COMPRESS_GET_CODEC_CAPS: Query capability of a codec
diff --git a/include/sound/compress_params.h b/include/sound/compress_params.h
index 5aa7b09..75558bf 100644
--- a/include/sound/compress_params.h
+++ b/include/sound/compress_params.h
@@ -70,10 +70,14 @@
#define SND_AUDIOCODEC_IEC61937 ((__u32) 0x0000000B)
#define SND_AUDIOCODEC_G723_1 ((__u32) 0x0000000C)
#define SND_AUDIOCODEC_G729 ((__u32) 0x0000000D)
-#define SND_AUDIOCODEC_AC3 ((__u32) 0x0000000E)
-#define SND_AUDIOCODEC_DTS ((__u32) 0x0000000F)
-#define SND_AUDIOCODEC_AC3_PASS_THROUGH ((__u32) 0x00000010)
-#define SND_AUDIOCODEC_WMA_PRO ((__u32) 0x00000011)
+#define SND_AUDIOCODEC_AC3 ((__u32) 0x0000000E)
+#define SND_AUDIOCODEC_DTS ((__u32) 0x0000000F)
+#define SND_AUDIOCODEC_AC3_PASS_THROUGH ((__u32) 0x00000010)
+#define SND_AUDIOCODEC_WMA_PRO ((__u32) 0x00000011)
+#define SND_AUDIOCODEC_DTS_PASS_THROUGH ((__u32) 0x00000012)
+#define SND_AUDIOCODEC_DTS_LBR ((__u32) 0x00000013)
+#define SND_AUDIOCODEC_DTS_TRANSCODE_LOOPBACK ((__u32) 0x00000014)
+
/*
* Profile and modes are listed with bit masks. This allows for a
* more compact representation of fields that will not evolve
@@ -241,6 +245,8 @@
__u32 bits_per_sample;
__u32 channelmask;
__u32 encodeopt;
+ __u32 encodeopt1;
+ __u32 encodeopt2;
};
diff --git a/include/sound/q6adm.h b/include/sound/q6adm.h
index 56594d4..8e15955 100644
--- a/include/sound/q6adm.h
+++ b/include/sound/q6adm.h
@@ -43,6 +43,8 @@
int adm_connect_afe_port(int mode, int session_id, int port_id);
int adm_disconnect_afe_port(int mode, int session_id, int port_id);
+void adm_ec_ref_rx_id(int port_id);
+
#ifdef CONFIG_RTAC
int adm_get_copp_id(int port_id);
#endif
diff --git a/include/sound/q6asm-v2.h b/include/sound/q6asm-v2.h
index 7ef15ac..2a555b2 100644
--- a/include/sound/q6asm-v2.h
+++ b/include/sound/q6asm-v2.h
@@ -64,8 +64,11 @@
/* Enable Sample_Rate/Channel_Mode notification event from Decoder */
#define SR_CM_NOTIFY_ENABLE 0x0004
-#define ASYNC_IO_MODE 0x0002
#define SYNC_IO_MODE 0x0001
+#define ASYNC_IO_MODE 0x0002
+#define NT_MODE 0x0400
+
+
#define NO_TIMESTAMP 0xFF00
#define SET_TIMESTAMP 0x0000
@@ -230,6 +233,9 @@
int q6asm_set_encdec_chan_map(struct audio_client *ac,
uint32_t num_channels);
+int q6asm_enc_cfg_blk_pcm_native(struct audio_client *ac,
+ uint32_t rate, uint32_t channels);
+
int q6asm_enable_sbrps(struct audio_client *ac,
uint32_t sbr_ps);
diff --git a/include/sound/q6asm.h b/include/sound/q6asm.h
index ee90797..84e3150 100644
--- a/include/sound/q6asm.h
+++ b/include/sound/q6asm.h
@@ -48,6 +48,7 @@
#define FORMAT_ATRAC 0x0016
#define FORMAT_MAT 0x0017
#define FORMAT_AAC 0x0018
+#define FORMAT_DTS_LBR 0x0019
#define ENCDEC_SBCBITRATE 0x0001
#define ENCDEC_IMMEDIATE_DECODE 0x0002
@@ -180,6 +181,8 @@
int q6asm_open_read(struct audio_client *ac, uint32_t format);
+int q6asm_open_read_compressed(struct audio_client *ac, uint32_t format);
+
int q6asm_open_write(struct audio_client *ac, uint32_t format);
int q6asm_open_write_compressed(struct audio_client *ac, uint32_t format);
@@ -239,6 +242,9 @@
int q6asm_enc_cfg_blk_pcm(struct audio_client *ac,
uint32_t rate, uint32_t channels);
+int q6asm_enc_cfg_blk_pcm_native(struct audio_client *ac,
+ uint32_t rate, uint32_t channels);
+
int q6asm_enc_cfg_blk_multi_ch_pcm(struct audio_client *ac,
uint32_t rate, uint32_t channels);
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index 0aa96d3..0e0ba5f 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -1,312 +1,780 @@
+#include <linux/debugfs.h>
+#include <linux/hardirq.h>
+#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/irqdesc.h>
#include <linux/irqdomain.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/seq_file.h>
#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/fs.h>
+
+#define IRQ_DOMAIN_MAP_LEGACY 0 /* driver allocated fixed range of irqs.
+ * ie. legacy 8259, gets irqs 1..15 */
+#define IRQ_DOMAIN_MAP_NOMAP 1 /* no fast reverse mapping */
+#define IRQ_DOMAIN_MAP_LINEAR 2 /* linear map of interrupts */
+#define IRQ_DOMAIN_MAP_TREE 3 /* radix tree */
static LIST_HEAD(irq_domain_list);
static DEFINE_MUTEX(irq_domain_mutex);
-/**
- * irq_domain_add() - Register an irq_domain
- * @domain: ptr to initialized irq_domain structure
- *
- * Adds a irq_domain structure. The irq_domain must at a minimum be
- * initialized with an ops structure pointer, and either a ->to_irq hook or
- * a valid irq_base value. The irq range must be mutually exclusive with
- * domains already registered. Everything else is optional.
- */
-int irq_domain_add(struct irq_domain *domain)
-{
- struct irq_domain *curr;
- uint32_t d_highirq = domain->irq_base + domain->nr_irq - 1;
+static DEFINE_MUTEX(revmap_trees_mutex);
+static struct irq_domain *irq_default_domain;
- if (!domain->nr_irq)
- return -EINVAL;
+/**
+ * irq_domain_alloc() - Allocate a new irq_domain data structure
+ * @of_node: optional device-tree node of the interrupt controller
+ * @revmap_type: type of reverse mapping to use
+ * @ops: map/unmap domain callbacks
+ * @host_data: Controller private data pointer
+ *
+ * Allocates and initialize and irq_domain structure. Caller is expected to
+ * register allocated irq_domain with irq_domain_register(). Returns pointer
+ * to IRQ domain, or NULL on failure.
+ */
+static struct irq_domain *irq_domain_alloc(struct device_node *of_node,
+ unsigned int revmap_type,
+ const struct irq_domain_ops *ops,
+ void *host_data)
+{
+ struct irq_domain *domain;
+
+ domain = kzalloc(sizeof(*domain), GFP_KERNEL);
+ if (WARN_ON(!domain))
+ return NULL;
+
+ /* Fill structure */
+ domain->revmap_type = revmap_type;
+ domain->ops = ops;
+ domain->host_data = host_data;
+ domain->of_node = of_node_get(of_node);
+
+ return domain;
+}
+
+static void irq_domain_add(struct irq_domain *domain)
+{
+ mutex_lock(&irq_domain_mutex);
+ list_add(&domain->link, &irq_domain_list);
+ mutex_unlock(&irq_domain_mutex);
+ pr_debug("irq: Allocated domain of type %d @0x%p\n",
+ domain->revmap_type, domain);
+}
+
+static unsigned int irq_domain_legacy_revmap(struct irq_domain *domain,
+ irq_hw_number_t hwirq)
+{
+ irq_hw_number_t first_hwirq = domain->revmap_data.legacy.first_hwirq;
+ int size = domain->revmap_data.legacy.size;
+
+ if (WARN_ON(hwirq < first_hwirq || hwirq >= first_hwirq + size))
+ return 0;
+ return hwirq - first_hwirq + domain->revmap_data.legacy.first_irq;
+}
+
+/**
+ * irq_domain_add_legacy() - Allocate and register a legacy revmap irq_domain.
+ * @of_node: pointer to interrupt controller's device tree node.
+ * @size: total number of irqs in legacy mapping
+ * @first_irq: first number of irq block assigned to the domain
+ * @first_hwirq: first hwirq number to use for the translation. Should normally
+ * be '0', but a positive integer can be used if the effective
+ * hwirqs numbering does not begin at zero.
+ * @ops: map/unmap domain callbacks
+ * @host_data: Controller private data pointer
+ *
+ * Note: the map() callback will be called before this function returns
+ * for all legacy interrupts except 0 (which is always the invalid irq for
+ * a legacy controller).
+ */
+struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
+ unsigned int size,
+ unsigned int first_irq,
+ irq_hw_number_t first_hwirq,
+ const struct irq_domain_ops *ops,
+ void *host_data)
+{
+ struct irq_domain *domain;
+ unsigned int i;
+
+ domain = irq_domain_alloc(of_node, IRQ_DOMAIN_MAP_LEGACY, ops, host_data);
+ if (!domain)
+ return NULL;
+
+ domain->revmap_data.legacy.first_irq = first_irq;
+ domain->revmap_data.legacy.first_hwirq = first_hwirq;
+ domain->revmap_data.legacy.size = size;
mutex_lock(&irq_domain_mutex);
- /* insert in ascending order of domain->irq_base */
- list_for_each_entry(curr, &irq_domain_list, list) {
- uint32_t c_highirq = curr->irq_base + curr->nr_irq - 1;
- if (domain->irq_base < curr->irq_base &&
- d_highirq < curr->irq_base) {
- break;
- }
- if (d_highirq <= c_highirq) {
+ /* Verify that all the irqs are available */
+ for (i = 0; i < size; i++) {
+ int irq = first_irq + i;
+ struct irq_data *irq_data = irq_get_irq_data(irq);
+
+ if (WARN_ON(!irq_data || irq_data->domain)) {
mutex_unlock(&irq_domain_mutex);
- return -EINVAL;
+ of_node_put(domain->of_node);
+ kfree(domain);
+ return NULL;
}
}
- list_add_tail(&domain->list, &curr->list);
+
+ /* Claim all of the irqs before registering a legacy domain */
+ for (i = 0; i < size; i++) {
+ struct irq_data *irq_data = irq_get_irq_data(first_irq + i);
+ irq_data->hwirq = first_hwirq + i;
+ irq_data->domain = domain;
+ }
mutex_unlock(&irq_domain_mutex);
+ for (i = 0; i < size; i++) {
+ int irq = first_irq + i;
+ int hwirq = first_hwirq + i;
+
+ /* IRQ0 gets ignored */
+ if (!irq)
+ continue;
+
+ /* Legacy flags are left to default at this point,
+ * one can then use irq_create_mapping() to
+ * explicitly change them
+ */
+ ops->map(domain, irq, hwirq);
+
+ /* Clear norequest flags */
+ irq_clear_status_flags(irq, IRQ_NOREQUEST);
+ }
+
+ irq_domain_add(domain);
+ return domain;
+}
+
+/**
+ * irq_domain_add_linear() - Allocate and register a legacy revmap irq_domain.
+ * @of_node: pointer to interrupt controller's device tree node.
+ * @ops: map/unmap domain callbacks
+ * @host_data: Controller private data pointer
+ */
+struct irq_domain *irq_domain_add_linear(struct device_node *of_node,
+ unsigned int size,
+ const struct irq_domain_ops *ops,
+ void *host_data)
+{
+ struct irq_domain *domain;
+ unsigned int *revmap;
+
+ revmap = kzalloc(sizeof(*revmap) * size, GFP_KERNEL);
+ if (WARN_ON(!revmap))
+ return NULL;
+
+ domain = irq_domain_alloc(of_node, IRQ_DOMAIN_MAP_LINEAR, ops, host_data);
+ if (!domain) {
+ kfree(revmap);
+ return NULL;
+ }
+ domain->revmap_data.linear.size = size;
+ domain->revmap_data.linear.revmap = revmap;
+ irq_domain_add(domain);
+ return domain;
+}
+
+struct irq_domain *irq_domain_add_nomap(struct device_node *of_node,
+ unsigned int max_irq,
+ const struct irq_domain_ops *ops,
+ void *host_data)
+{
+ struct irq_domain *domain = irq_domain_alloc(of_node,
+ IRQ_DOMAIN_MAP_NOMAP, ops, host_data);
+ if (domain) {
+ domain->revmap_data.nomap.max_irq = max_irq ? max_irq : ~0;
+ irq_domain_add(domain);
+ }
+ return domain;
+}
+
+/**
+ * irq_domain_add_tree()
+ * @of_node: pointer to interrupt controller's device tree node.
+ * @ops: map/unmap domain callbacks
+ *
+ * Note: The radix tree will be allocated later during boot automatically
+ * (the reverse mapping will use the slow path until that happens).
+ */
+struct irq_domain *irq_domain_add_tree(struct device_node *of_node,
+ const struct irq_domain_ops *ops,
+ void *host_data)
+{
+ struct irq_domain *domain = irq_domain_alloc(of_node,
+ IRQ_DOMAIN_MAP_TREE, ops, host_data);
+ if (domain) {
+ INIT_RADIX_TREE(&domain->revmap_data.tree, GFP_KERNEL);
+ irq_domain_add(domain);
+ }
+ return domain;
+}
+
+/**
+ * irq_find_host() - Locates a domain for a given device node
+ * @node: device-tree node of the interrupt controller
+ */
+struct irq_domain *irq_find_host(struct device_node *node)
+{
+ struct irq_domain *h, *found = NULL;
+ int rc;
+
+ /* We might want to match the legacy controller last since
+ * it might potentially be set to match all interrupts in
+ * the absence of a device node. This isn't a problem so far
+ * yet though...
+ */
+ mutex_lock(&irq_domain_mutex);
+ list_for_each_entry(h, &irq_domain_list, link) {
+ if (h->ops->match)
+ rc = h->ops->match(h, node);
+ else
+ rc = (h->of_node != NULL) && (h->of_node == node);
+
+ if (rc) {
+ found = h;
+ break;
+ }
+ }
+ mutex_unlock(&irq_domain_mutex);
+ return found;
+}
+EXPORT_SYMBOL_GPL(irq_find_host);
+
+/**
+ * irq_set_default_host() - Set a "default" irq domain
+ * @domain: default domain pointer
+ *
+ * For convenience, it's possible to set a "default" domain that will be used
+ * whenever NULL is passed to irq_create_mapping(). It makes life easier for
+ * platforms that want to manipulate a few hard coded interrupt numbers that
+ * aren't properly represented in the device-tree.
+ */
+void irq_set_default_host(struct irq_domain *domain)
+{
+ pr_debug("irq: Default domain set to @0x%p\n", domain);
+
+ irq_default_domain = domain;
+}
+
+static int irq_setup_virq(struct irq_domain *domain, unsigned int virq,
+ irq_hw_number_t hwirq)
+{
+ struct irq_data *irq_data = irq_get_irq_data(virq);
+
+ irq_data->hwirq = hwirq;
+ irq_data->domain = domain;
+ if (domain->ops->map(domain, virq, hwirq)) {
+ pr_debug("irq: -> mapping failed, freeing\n");
+ irq_data->domain = NULL;
+ irq_data->hwirq = 0;
+ return -1;
+ }
+
+ irq_clear_status_flags(virq, IRQ_NOREQUEST);
+
return 0;
}
/**
- * irq_domain_register() - Register an entire irq_domain
- * @domain: ptr to initialized irq_domain structure
+ * irq_create_direct_mapping() - Allocate an irq for direct mapping
+ * @domain: domain to allocate the irq for or NULL for default domain
*
- * Registers the entire irq_domain. The irq_domain must at a minimum be
- * initialized with an ops structure pointer, and either a ->to_irq hook or
- * a valid irq_base value. Everything else is optional.
+ * This routine is used for irq controllers which can choose the hardware
+ * interrupt numbers they generate. In such a case it's simplest to use
+ * the linux irq as the hardware interrupt number.
*/
-void irq_domain_register(struct irq_domain *domain)
+unsigned int irq_create_direct_mapping(struct irq_domain *domain)
{
- struct irq_data *d;
- int hwirq, irq;
+ unsigned int virq;
- irq_domain_for_each_irq(domain, hwirq, irq) {
- d = irq_get_irq_data(irq);
- if (!d) {
- WARN(1, "error: assigning domain to non existant irq_desc");
- return;
- }
- if (d->domain) {
- /* things are broken; just report, don't clean up */
- WARN(1, "error: irq_desc already assigned to a domain");
- return;
- }
- d->domain = domain;
- d->hwirq = hwirq;
+ if (domain == NULL)
+ domain = irq_default_domain;
+
+ BUG_ON(domain == NULL);
+ WARN_ON(domain->revmap_type != IRQ_DOMAIN_MAP_NOMAP);
+
+ virq = irq_alloc_desc_from(1, 0);
+ if (!virq) {
+ pr_debug("irq: create_direct virq allocation failed\n");
+ return 0;
}
+ if (virq >= domain->revmap_data.nomap.max_irq) {
+ pr_err("ERROR: no free irqs available below %i maximum\n",
+ domain->revmap_data.nomap.max_irq);
+ irq_free_desc(virq);
+ return 0;
+ }
+ pr_debug("irq: create_direct obtained virq %d\n", virq);
+
+ if (irq_setup_virq(domain, virq, virq)) {
+ irq_free_desc(virq);
+ return 0;
+ }
+
+ return virq;
}
/**
- * irq_domain_register_irq() - Register an irq_domain
- * @domain: ptr to initialized irq_domain structure
- * @hwirq: irq_domain hwirq to register
+ * irq_create_mapping() - Map a hardware interrupt into linux irq space
+ * @domain: domain owning this hardware interrupt or NULL for default domain
+ * @hwirq: hardware irq number in that domain space
*
- * Registers a specific hwirq within the irq_domain. The irq_domain
- * must at a minimum be initialized with an ops structure pointer, and
- * either a ->to_irq hook or a valid irq_base value. Everything else is
- * optional.
+ * Only one mapping per hardware interrupt is permitted. Returns a linux
+ * irq number.
+ * If the sense/trigger is to be specified, set_irq_type() should be called
+ * on the number returned from that call.
*/
-void irq_domain_register_irq(struct irq_domain *domain, int hwirq)
+unsigned int irq_create_mapping(struct irq_domain *domain,
+ irq_hw_number_t hwirq)
{
- struct irq_data *d;
+ unsigned int hint;
+ int virq;
- d = irq_get_irq_data(irq_domain_to_irq(domain, hwirq));
- if (!d) {
- WARN(1, "error: assigning domain to non existant irq_desc");
- return;
+ pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq);
+
+ /* Look for default domain if nececssary */
+ if (domain == NULL)
+ domain = irq_default_domain;
+ if (domain == NULL) {
+ printk(KERN_WARNING "irq_create_mapping called for"
+ " NULL domain, hwirq=%lx\n", hwirq);
+ WARN_ON(1);
+ return 0;
}
- if (d->domain) {
- /* things are broken; just report, don't clean up */
- WARN(1, "error: irq_desc already assigned to a domain");
- return;
+ pr_debug("irq: -> using domain @%p\n", domain);
+
+ /* Check if mapping already exists */
+ virq = irq_find_mapping(domain, hwirq);
+ if (virq) {
+ pr_debug("irq: -> existing mapping on virq %d\n", virq);
+ return virq;
}
- d->domain = domain;
- d->hwirq = hwirq;
-}
-/**
- * irq_domain_del() - Removes a irq_domain from the system
- * @domain: ptr to registered irq_domain.
- */
-void irq_domain_del(struct irq_domain *domain)
-{
- mutex_lock(&irq_domain_mutex);
- list_del(&domain->list);
- mutex_unlock(&irq_domain_mutex);
-}
+ /* Get a virtual interrupt number */
+ if (domain->revmap_type == IRQ_DOMAIN_MAP_LEGACY)
+ return irq_domain_legacy_revmap(domain, hwirq);
-/**
- * irq_domain_unregister() - Unregister an irq_domain
- * @domain: ptr to registered irq_domain.
- */
-void irq_domain_unregister(struct irq_domain *domain)
-{
- struct irq_data *d;
- int hwirq, irq;
-
- /* Clear the irq_domain assignments */
- irq_domain_for_each_irq(domain, hwirq, irq) {
- d = irq_get_irq_data(irq);
- d->domain = NULL;
+ /* Allocate a virtual interrupt number */
+ hint = hwirq % nr_irqs;
+ if (hint == 0)
+ hint++;
+ virq = irq_alloc_desc_from(hint, 0);
+ if (virq <= 0)
+ virq = irq_alloc_desc_from(1, 0);
+ if (virq <= 0) {
+ pr_debug("irq: -> virq allocation failed\n");
+ return 0;
}
-}
-/**
- * irq_domain_unregister_irq() - Unregister a hwirq within a irq_domain
- * @domain: ptr to registered irq_domain.
- * @hwirq: irq_domain hwirq to unregister.
- */
-void irq_domain_unregister_irq(struct irq_domain *domain, int hwirq)
-{
- struct irq_data *d;
-
- /* Clear the irq_domain assignment */
- d = irq_get_irq_data(irq_domain_to_irq(domain, hwirq));
- d->domain = NULL;
-}
-
-/**
- * irq_domain_find_free_range() - Find an available irq range
- * @from: lowest logical irq number to request from
- * @cnt: number of interrupts to search for
- *
- * Finds an available logical irq range from the domains specified
- * on the system. The from parameter can be used to allocate a range
- * at least as great as the specified irq number.
- */
-int irq_domain_find_free_range(unsigned int from, unsigned int cnt)
-{
- struct irq_domain *curr, *prev = NULL;
-
- if (list_empty(&irq_domain_list))
- return from;
-
- list_for_each_entry(curr, &irq_domain_list, list) {
- if (prev == NULL) {
- if ((from + cnt - 1) < curr->irq_base)
- return from;
- } else {
- uint32_t p_next_irq = prev->irq_base + prev->nr_irq;
- uint32_t start_irq;
- if (from >= curr->irq_base)
- continue;
- if (from < p_next_irq)
- start_irq = p_next_irq;
- else
- start_irq = from;
- if ((curr->irq_base - start_irq) >= cnt)
- return p_next_irq;
- }
- prev = curr;
+ if (irq_setup_virq(domain, virq, hwirq)) {
+ if (domain->revmap_type != IRQ_DOMAIN_MAP_LEGACY)
+ irq_free_desc(virq);
+ return 0;
}
- curr = list_entry(curr->list.prev, struct irq_domain, list);
- return from > curr->irq_base + curr->nr_irq ?
- from : curr->irq_base + curr->nr_irq;
+ pr_debug("irq: irq %lu on domain %s mapped to virtual irq %u\n",
+ hwirq, domain->of_node ? domain->of_node->full_name : "null", virq);
+
+ return virq;
}
+EXPORT_SYMBOL_GPL(irq_create_mapping);
-#if defined(CONFIG_OF_IRQ)
-/**
- * irq_create_of_mapping() - Map a linux irq number from a DT interrupt spec
- *
- * Used by the device tree interrupt mapping code to translate a device tree
- * interrupt specifier to a valid linux irq number. Returns either a valid
- * linux IRQ number or 0.
- *
- * When the caller no longer need the irq number returned by this function it
- * should arrange to call irq_dispose_mapping().
- */
unsigned int irq_create_of_mapping(struct device_node *controller,
const u32 *intspec, unsigned int intsize)
{
struct irq_domain *domain;
- unsigned long hwirq;
- unsigned int irq, type;
- int rc = -EINVAL;
+ irq_hw_number_t hwirq;
+ unsigned int type = IRQ_TYPE_NONE;
+ unsigned int virq;
- /* Find a domain which can translate the irq spec */
- mutex_lock(&irq_domain_mutex);
- list_for_each_entry(domain, &irq_domain_list, list) {
- if (!domain->ops->dt_translate)
- continue;
-
- rc = domain->ops->dt_translate(domain, controller,
- intspec, intsize, &hwirq, &type);
- if (rc == 0)
- break;
- }
- mutex_unlock(&irq_domain_mutex);
-
- if (rc != 0)
+ domain = controller ? irq_find_host(controller) : irq_default_domain;
+ if (!domain) {
+#ifdef CONFIG_MIPS
+ /*
+ * Workaround to avoid breaking interrupt controller drivers
+ * that don't yet register an irq_domain. This is temporary
+ * code. ~~~gcl, Feb 24, 2012
+ *
+ * Scheduled for removal in Linux v3.6. That should be enough
+ * time.
+ */
+ if (intsize > 0)
+ return intspec[0];
+#endif
+ printk(KERN_WARNING "irq: no irq domain found for %s !\n",
+ controller->full_name);
return 0;
+ }
- irq = irq_domain_to_irq(domain, hwirq);
- if (type != IRQ_TYPE_NONE)
- irq_set_irq_type(irq, type);
- pr_debug("%s: mapped hwirq=%i to irq=%i, flags=%x\n",
- controller->full_name, (int)hwirq, irq, type);
- return irq;
+ /* If domain has no translation, then we assume interrupt line */
+ if (domain->ops->xlate == NULL)
+ hwirq = intspec[0];
+ else {
+ if (domain->ops->xlate(domain, controller, intspec, intsize,
+ &hwirq, &type))
+ return 0;
+ }
+
+ /* Create mapping */
+ virq = irq_create_mapping(domain, hwirq);
+ if (!virq)
+ return virq;
+
+ /* Set type if specified and different than the current one */
+ if (type != IRQ_TYPE_NONE &&
+ type != (irqd_get_trigger_type(irq_get_irq_data(virq))))
+ irq_set_irq_type(virq, type);
+ return virq;
}
EXPORT_SYMBOL_GPL(irq_create_of_mapping);
/**
- * irq_dispose_mapping() - Discard a mapping created by irq_create_of_mapping()
- * @irq: linux irq number to be discarded
- *
- * Calling this function indicates the caller no longer needs a reference to
- * the linux irq number returned by a prior call to irq_create_of_mapping().
+ * irq_dispose_mapping() - Unmap an interrupt
+ * @virq: linux irq number of the interrupt to unmap
*/
-void irq_dispose_mapping(unsigned int irq)
+void irq_dispose_mapping(unsigned int virq)
{
- /*
- * nothing yet; will be filled when support for dynamic allocation of
- * irq_descs is added to irq_domain
- */
+ struct irq_data *irq_data = irq_get_irq_data(virq);
+ struct irq_domain *domain;
+ irq_hw_number_t hwirq;
+
+ if (!virq || !irq_data)
+ return;
+
+ domain = irq_data->domain;
+ if (WARN_ON(domain == NULL))
+ return;
+
+ /* Never unmap legacy interrupts */
+ if (domain->revmap_type == IRQ_DOMAIN_MAP_LEGACY)
+ return;
+
+ irq_set_status_flags(virq, IRQ_NOREQUEST);
+
+ /* remove chip and handler */
+ irq_set_chip_and_handler(virq, NULL, NULL);
+
+ /* Make sure it's completed */
+ synchronize_irq(virq);
+
+ /* Tell the PIC about it */
+ if (domain->ops->unmap)
+ domain->ops->unmap(domain, virq);
+ smp_mb();
+
+ /* Clear reverse map */
+ hwirq = irq_data->hwirq;
+ switch(domain->revmap_type) {
+ case IRQ_DOMAIN_MAP_LINEAR:
+ if (hwirq < domain->revmap_data.linear.size)
+ domain->revmap_data.linear.revmap[hwirq] = 0;
+ break;
+ case IRQ_DOMAIN_MAP_TREE:
+ mutex_lock(&revmap_trees_mutex);
+ radix_tree_delete(&domain->revmap_data.tree, hwirq);
+ mutex_unlock(&revmap_trees_mutex);
+ break;
+ }
+
+ irq_free_desc(virq);
}
EXPORT_SYMBOL_GPL(irq_dispose_mapping);
-int irq_domain_simple_dt_translate(struct irq_domain *d,
- struct device_node *controller,
- const u32 *intspec, unsigned int intsize,
- unsigned long *out_hwirq, unsigned int *out_type)
+/**
+ * irq_find_mapping() - Find a linux irq from an hw irq number.
+ * @domain: domain owning this hardware interrupt
+ * @hwirq: hardware irq number in that domain space
+ *
+ * This is a slow path, for use by generic code. It's expected that an
+ * irq controller implementation directly calls the appropriate low level
+ * mapping function.
+ */
+unsigned int irq_find_mapping(struct irq_domain *domain,
+ irq_hw_number_t hwirq)
{
- if (d->of_node != controller)
- return -EINVAL;
- if (intsize < 1)
- return -EINVAL;
- if (d->nr_irq && ((intspec[0] < d->hwirq_base) ||
- (intspec[0] >= d->hwirq_base + d->nr_irq)))
- return -EINVAL;
+ unsigned int i;
+ unsigned int hint = hwirq % nr_irqs;
- *out_hwirq = intspec[0];
- *out_type = IRQ_TYPE_NONE;
- if (intsize > 1)
- *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
+ /* Look for default domain if nececssary */
+ if (domain == NULL)
+ domain = irq_default_domain;
+ if (domain == NULL)
+ return 0;
+
+ /* legacy -> bail early */
+ if (domain->revmap_type == IRQ_DOMAIN_MAP_LEGACY)
+ return irq_domain_legacy_revmap(domain, hwirq);
+
+ /* Slow path does a linear search of the map */
+ if (hint == 0)
+ hint = 1;
+ i = hint;
+ do {
+ struct irq_data *data = irq_get_irq_data(i);
+ if (data && (data->domain == domain) && (data->hwirq == hwirq))
+ return i;
+ i++;
+ if (i >= nr_irqs)
+ i = 1;
+ } while(i != hint);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(irq_find_mapping);
+
+/**
+ * irq_radix_revmap_lookup() - Find a linux irq from a hw irq number.
+ * @domain: domain owning this hardware interrupt
+ * @hwirq: hardware irq number in that domain space
+ *
+ * This is a fast path, for use by irq controller code that uses radix tree
+ * revmaps
+ */
+unsigned int irq_radix_revmap_lookup(struct irq_domain *domain,
+ irq_hw_number_t hwirq)
+{
+ struct irq_data *irq_data;
+
+ if (WARN_ON_ONCE(domain->revmap_type != IRQ_DOMAIN_MAP_TREE))
+ return irq_find_mapping(domain, hwirq);
+
+ /*
+ * Freeing an irq can delete nodes along the path to
+ * do the lookup via call_rcu.
+ */
+ rcu_read_lock();
+ irq_data = radix_tree_lookup(&domain->revmap_data.tree, hwirq);
+ rcu_read_unlock();
+
+ /*
+ * If found in radix tree, then fine.
+ * Else fallback to linear lookup - this should not happen in practice
+ * as it means that we failed to insert the node in the radix tree.
+ */
+ return irq_data ? irq_data->irq : irq_find_mapping(domain, hwirq);
+}
+
+/**
+ * irq_radix_revmap_insert() - Insert a hw irq to linux irq number mapping.
+ * @domain: domain owning this hardware interrupt
+ * @virq: linux irq number
+ * @hwirq: hardware irq number in that domain space
+ *
+ * This is for use by irq controllers that use a radix tree reverse
+ * mapping for fast lookup.
+ */
+void irq_radix_revmap_insert(struct irq_domain *domain, unsigned int virq,
+ irq_hw_number_t hwirq)
+{
+ struct irq_data *irq_data = irq_get_irq_data(virq);
+
+ if (WARN_ON(domain->revmap_type != IRQ_DOMAIN_MAP_TREE))
+ return;
+
+ if (virq) {
+ mutex_lock(&revmap_trees_mutex);
+ radix_tree_insert(&domain->revmap_data.tree, hwirq, irq_data);
+ mutex_unlock(&revmap_trees_mutex);
+ }
+}
+
+/**
+ * irq_linear_revmap() - Find a linux irq from a hw irq number.
+ * @domain: domain owning this hardware interrupt
+ * @hwirq: hardware irq number in that domain space
+ *
+ * This is a fast path, for use by irq controller code that uses linear
+ * revmaps. It does fallback to the slow path if the revmap doesn't exist
+ * yet and will create the revmap entry with appropriate locking
+ */
+unsigned int irq_linear_revmap(struct irq_domain *domain,
+ irq_hw_number_t hwirq)
+{
+ unsigned int *revmap;
+
+ if (WARN_ON_ONCE(domain->revmap_type != IRQ_DOMAIN_MAP_LINEAR))
+ return irq_find_mapping(domain, hwirq);
+
+ /* Check revmap bounds */
+ if (unlikely(hwirq >= domain->revmap_data.linear.size))
+ return irq_find_mapping(domain, hwirq);
+
+ /* Check if revmap was allocated */
+ revmap = domain->revmap_data.linear.revmap;
+ if (unlikely(revmap == NULL))
+ return irq_find_mapping(domain, hwirq);
+
+ /* Fill up revmap with slow path if no mapping found */
+ if (unlikely(!revmap[hwirq]))
+ revmap[hwirq] = irq_find_mapping(domain, hwirq);
+
+ return revmap[hwirq];
+}
+
+#ifdef CONFIG_IRQ_DOMAIN_DEBUG
+static int virq_debug_show(struct seq_file *m, void *private)
+{
+ unsigned long flags;
+ struct irq_desc *desc;
+ const char *p;
+ static const char none[] = "none";
+ void *data;
+ int i;
+
+ seq_printf(m, "%-5s %-7s %-15s %-*s %s\n", "irq", "hwirq",
+ "chip name", (int)(2 * sizeof(void *) + 2), "chip data",
+ "domain name");
+
+ for (i = 1; i < nr_irqs; i++) {
+ desc = irq_to_desc(i);
+ if (!desc)
+ continue;
+
+ raw_spin_lock_irqsave(&desc->lock, flags);
+
+ if (desc->action && desc->action->handler) {
+ struct irq_chip *chip;
+
+ seq_printf(m, "%5d ", i);
+ seq_printf(m, "0x%05lx ", desc->irq_data.hwirq);
+
+ chip = irq_desc_get_chip(desc);
+ if (chip && chip->name)
+ p = chip->name;
+ else
+ p = none;
+ seq_printf(m, "%-15s ", p);
+
+ data = irq_desc_get_chip_data(desc);
+ seq_printf(m, data ? "0x%p " : " %p ", data);
+
+ if (desc->irq_data.domain && desc->irq_data.domain->of_node)
+ p = desc->irq_data.domain->of_node->full_name;
+ else
+ p = none;
+ seq_printf(m, "%s\n", p);
+ }
+
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+ }
+
+ return 0;
+}
+
+static int virq_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, virq_debug_show, inode->i_private);
+}
+
+static const struct file_operations virq_debug_fops = {
+ .open = virq_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init irq_debugfs_init(void)
+{
+ if (debugfs_create_file("irq_domain_mapping", S_IRUGO, NULL,
+ NULL, &virq_debug_fops) == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+__initcall(irq_debugfs_init);
+#endif /* CONFIG_IRQ_DOMAIN_DEBUG */
+
+int irq_domain_simple_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
return 0;
}
/**
- * irq_domain_create_simple() - Set up a 'simple' translation range
+ * irq_domain_xlate_onecell() - Generic xlate for direct one cell bindings
+ *
+ * Device Tree IRQ specifier translation function which works with one cell
+ * bindings where the cell value maps directly to the hwirq number.
*/
-void irq_domain_add_simple(struct device_node *controller, int irq_base)
+int irq_domain_xlate_onecell(struct irq_domain *d, struct device_node *ctrlr,
+ const u32 *intspec, unsigned int intsize,
+ unsigned long *out_hwirq, unsigned int *out_type)
{
- struct irq_domain *domain;
- int rc;
-
- domain = kzalloc(sizeof(*domain), GFP_KERNEL);
- if (!domain) {
- WARN_ON(1);
- return;
- }
-
- domain->irq_base = irq_base;
- domain->of_node = of_node_get(controller);
- domain->ops = &irq_domain_simple_ops;
- rc = irq_domain_add(domain);
- if (rc) {
- WARN(1, "Unable to create irq domain\n");
- return;
- }
- irq_domain_register(domain);
+ if (WARN_ON(intsize < 1))
+ return -EINVAL;
+ *out_hwirq = intspec[0];
+ *out_type = IRQ_TYPE_NONE;
+ return 0;
}
-EXPORT_SYMBOL_GPL(irq_domain_add_simple);
+EXPORT_SYMBOL_GPL(irq_domain_xlate_onecell);
+/**
+ * irq_domain_xlate_twocell() - Generic xlate for direct two cell bindings
+ *
+ * Device Tree IRQ specifier translation function which works with two cell
+ * bindings where the cell values map directly to the hwirq number
+ * and linux irq flags.
+ */
+int irq_domain_xlate_twocell(struct irq_domain *d, struct device_node *ctrlr,
+ const u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq, unsigned int *out_type)
+{
+ if (WARN_ON(intsize < 2))
+ return -EINVAL;
+ *out_hwirq = intspec[0];
+ *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(irq_domain_xlate_twocell);
+
+/**
+ * irq_domain_xlate_onetwocell() - Generic xlate for one or two cell bindings
+ *
+ * Device Tree IRQ specifier translation function which works with either one
+ * or two cell bindings where the cell values map directly to the hwirq number
+ * and linux irq flags.
+ *
+ * Note: don't use this function unless your interrupt controller explicitly
+ * supports both one and two cell bindings. For the majority of controllers
+ * the _onecell() or _twocell() variants above should be used.
+ */
+int irq_domain_xlate_onetwocell(struct irq_domain *d,
+ struct device_node *ctrlr,
+ const u32 *intspec, unsigned int intsize,
+ unsigned long *out_hwirq, unsigned int *out_type)
+{
+ if (WARN_ON(intsize < 1))
+ return -EINVAL;
+ *out_hwirq = intspec[0];
+ *out_type = (intsize > 1) ? intspec[1] : IRQ_TYPE_NONE;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(irq_domain_xlate_onetwocell);
+
+const struct irq_domain_ops irq_domain_simple_ops = {
+ .map = irq_domain_simple_map,
+ .xlate = irq_domain_xlate_onetwocell,
+};
+EXPORT_SYMBOL_GPL(irq_domain_simple_ops);
+
+#ifdef CONFIG_OF_IRQ
void irq_domain_generate_simple(const struct of_device_id *match,
u64 phys_base, unsigned int irq_start)
{
struct device_node *node;
- pr_info("looking for phys_base=%llx, irq_start=%i\n",
+ pr_debug("looking for phys_base=%llx, irq_start=%i\n",
(unsigned long long) phys_base, (int) irq_start);
node = of_find_matching_node_by_address(NULL, match, phys_base);
if (node)
- irq_domain_add_simple(node, irq_start);
- else
- pr_info("no node found\n");
+ irq_domain_add_legacy(node, 32, irq_start, 0,
+ &irq_domain_simple_ops, NULL);
}
EXPORT_SYMBOL_GPL(irq_domain_generate_simple);
-#endif /* CONFIG_OF_IRQ */
-
-struct irq_domain_ops irq_domain_simple_ops = {
-#ifdef CONFIG_OF_IRQ
- .dt_translate = irq_domain_simple_dt_translate,
-#endif /* CONFIG_OF_IRQ */
-};
-EXPORT_SYMBOL_GPL(irq_domain_simple_ops);
+#endif
diff --git a/kernel/panic.c b/kernel/panic.c
index b47ca87..8c6babc 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -81,6 +81,14 @@
int state = 0;
/*
+ * Disable local interrupts. This will prevent panic_smp_self_stop
+ * from deadlocking the first cpu that invokes the panic, since
+ * there is nothing to prevent an interrupt handler (that runs
+ * after the panic_lock is acquired) from invoking panic again.
+ */
+ local_irq_disable();
+
+ /*
* It's possible to come here directly from a panic-assertion and
* not have preempt disabled. Some functions called from here want
* preempt to be disabled. No point enabling it later though...
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 980b846..e9e8521 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -1993,10 +1993,10 @@
frames_to_ack = 0;
}
- /* Ack now if the tx window is 3/4ths full.
+ /* Ack now if the window is 3/4ths full.
* Calculate without mul or div
*/
- threshold = pi->tx_win;
+ threshold = pi->ack_win;
threshold += threshold << 1;
threshold >>= 2;
@@ -3105,6 +3105,7 @@
pi->tx_win_max = L2CAP_TX_WIN_MAX_ENHANCED;
pi->extended_control = 0;
}
+ pi->ack_win = pi->tx_win;
}
static void l2cap_aggregate_fs(struct hci_ext_fs *cur,
@@ -3844,10 +3845,7 @@
break;
case L2CAP_CONF_EXT_WINDOW:
- pi->tx_win = val;
-
- if (pi->tx_win > L2CAP_TX_WIN_MAX_ENHANCED)
- pi->tx_win = L2CAP_TX_WIN_MAX_ENHANCED;
+ pi->ack_win = min_t(u16, val, pi->ack_win);
l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_WINDOW,
2, pi->tx_win);
@@ -3869,6 +3867,10 @@
pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
pi->mps = le16_to_cpu(rfc.max_pdu_size);
+ if (!pi->extended_control) {
+ pi->ack_win = min_t(u16, pi->ack_win,
+ rfc.txwin_size);
+ }
break;
case L2CAP_MODE_STREAMING:
pi->mps = le16_to_cpu(rfc.max_pdu_size);
@@ -3901,6 +3903,7 @@
int type, olen;
unsigned long val;
struct l2cap_conf_rfc rfc;
+ u16 txwin_ext = pi->ack_win;
BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
@@ -3909,6 +3912,7 @@
rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
+ rfc.txwin_size = min_t(u16, pi->ack_win, L2CAP_DEFAULT_TX_WINDOW);
if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
return;
@@ -3920,16 +3924,22 @@
case L2CAP_CONF_RFC:
if (olen == sizeof(rfc))
memcpy(&rfc, (void *)val, olen);
- goto done;
+ break;
+ case L2CAP_CONF_EXT_WINDOW:
+ txwin_ext = val;
+ break;
}
}
-done:
switch (rfc.mode) {
case L2CAP_MODE_ERTM:
pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
pi->mps = le16_to_cpu(rfc.max_pdu_size);
+ if (pi->extended_control)
+ pi->ack_win = min_t(u16, pi->ack_win, txwin_ext);
+ else
+ pi->ack_win = min_t(u16, pi->ack_win, rfc.txwin_size);
break;
case L2CAP_MODE_STREAMING:
pi->mps = le16_to_cpu(rfc.max_pdu_size);
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 0ad1633..4829e6b 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -1270,6 +1270,7 @@
pi->scid = 0;
pi->dcid = 0;
pi->tx_win_max = L2CAP_TX_WIN_MAX_ENHANCED;
+ pi->ack_win = pi->tx_win;
pi->extended_control = 0;
pi->local_conf.fcs = pi->fcs;
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 5034393..947bd85 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -2779,7 +2779,7 @@
pcm_file = file->private_data;
- if (((cmd >> 8) & 0xff) != 'A')
+ if ((((cmd >> 8) & 0xff) != 'A') && (((cmd >> 8) & 0xff) != 'C'))
return -ENOTTY;
return snd_pcm_capture_ioctl1(file, pcm_file->substream, cmd,
diff --git a/sound/soc/codecs/wcd9310.c b/sound/soc/codecs/wcd9310.c
index e85e9f5..f279f7d 100644
--- a/sound/soc/codecs/wcd9310.c
+++ b/sound/soc/codecs/wcd9310.c
@@ -18,6 +18,7 @@
#include <linux/printk.h>
#include <linux/ratelimit.h>
#include <linux/debugfs.h>
+#include <linux/wait.h>
#include <linux/mfd/wcd9xxx/core.h>
#include <linux/mfd/wcd9xxx/wcd9xxx_registers.h>
#include <linux/mfd/wcd9xxx/wcd9310_registers.h>
@@ -47,6 +48,8 @@
#define MBHC_FW_READ_ATTEMPTS 15
#define MBHC_FW_READ_TIMEOUT 2000000
+#define SLIM_CLOSE_TIMEOUT 1000
+
enum {
MBHC_USE_HPHL_TRIGGER = 1,
MBHC_USE_MB_TRIGGER = 2
@@ -78,6 +81,8 @@
u32 *ch_num;
u32 ch_act;
u32 ch_tot;
+ u32 ch_mask;
+ wait_queue_head_t dai_wait;
};
#define TABLA_MCLK_RATE_12288KHZ 12288000
@@ -114,6 +119,11 @@
static const DECLARE_TLV_DB_SCALE(analog_gain, 0, 25, 1);
static struct snd_soc_dai_driver tabla_dai[];
static const DECLARE_TLV_DB_SCALE(aux_pga_gain, 0, 2, 0);
+static int tabla_codec_enable_slimrx(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event);
+static int tabla_codec_enable_slimtx(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event);
+
enum tabla_bandgap_type {
TABLA_BANDGAP_OFF = 0,
@@ -3931,50 +3941,277 @@
return 0;
}
+
+static struct snd_soc_dapm_widget tabla_dapm_aif_in_widgets[] = {
+
+ SND_SOC_DAPM_AIF_IN_E("SLIM RX1", "AIF1 Playback", 0, SND_SOC_NOPM, 1,
+ 0, tabla_codec_enable_slimrx,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_AIF_IN_E("SLIM RX2", "AIF1 Playback", 0, SND_SOC_NOPM, 2,
+ 0, tabla_codec_enable_slimrx,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_AIF_IN_E("SLIM RX3", "AIF1 Playback", 0, SND_SOC_NOPM, 3,
+ 0, tabla_codec_enable_slimrx,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_AIF_IN_E("SLIM RX4", "AIF3 Playback", 0, SND_SOC_NOPM, 4,
+ 0, tabla_codec_enable_slimrx,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_AIF_IN_E("SLIM RX5", "AIF3 Playback", 0, SND_SOC_NOPM, 5,
+ 0, tabla_codec_enable_slimrx,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_AIF_IN_E("SLIM RX6", "AIF2 Playback", 0, SND_SOC_NOPM, 6,
+ 0, tabla_codec_enable_slimrx,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_AIF_IN_E("SLIM RX7", "AIF2 Playback", 0, SND_SOC_NOPM, 7,
+ 0, tabla_codec_enable_slimrx,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+};
+
+static struct snd_soc_dapm_widget tabla_dapm_aif_out_widgets[] = {
+
+ SND_SOC_DAPM_AIF_OUT_E("SLIM TX1", "AIF2 Capture", 0, SND_SOC_NOPM, 1,
+ 0, tabla_codec_enable_slimtx,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_AIF_OUT_E("SLIM TX2", "AIF2 Capture", 0, SND_SOC_NOPM, 2,
+ 0, tabla_codec_enable_slimtx,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_AIF_OUT_E("SLIM TX3", "AIF3 Capture", 0, SND_SOC_NOPM, 3,
+ 0, tabla_codec_enable_slimtx,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_AIF_OUT_E("SLIM TX4", "AIF2 Capture", 0, SND_SOC_NOPM, 4,
+ 0, tabla_codec_enable_slimtx,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_AIF_OUT_E("SLIM TX5", "AIF3 Capture", 0, SND_SOC_NOPM, 5,
+ 0, tabla_codec_enable_slimtx,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_AIF_OUT_E("SLIM TX6", "AIF2 Capture", 0, SND_SOC_NOPM, 6,
+ 0, tabla_codec_enable_slimtx,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_AIF_OUT_E("SLIM TX7", "AIF1 Capture", 0, SND_SOC_NOPM, 7,
+ 0, tabla_codec_enable_slimtx,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_AIF_OUT_E("SLIM TX8", "AIF1 Capture", 0, SND_SOC_NOPM, 8,
+ 0, tabla_codec_enable_slimtx,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_AIF_OUT_E("SLIM TX9", "AIF1 Capture", 0, SND_SOC_NOPM, 9,
+ 0, tabla_codec_enable_slimtx,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_AIF_OUT_E("SLIM TX10", "AIF1 Capture", 0, SND_SOC_NOPM, 10,
+ 0, tabla_codec_enable_slimtx,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+};
+
+static int tabla_set_interpolator_rate(struct snd_soc_dai *dai,
+ u8 rx_fs_rate_reg_val, u32 compander_fs, u32 sample_rate)
+{
+ u32 i, j;
+ u8 rx_mix1_inp;
+ u16 rx_mix_1_reg_1, rx_mix_1_reg_2;
+ u16 rx_fs_reg;
+ u8 rx_mix_1_reg_1_val, rx_mix_1_reg_2_val;
+ struct snd_soc_codec *codec = dai->codec;
+ struct tabla_priv *tabla = snd_soc_codec_get_drvdata(codec);
+ struct snd_soc_dapm_widget *w = tabla_dapm_aif_in_widgets;
+
+ for (i = 0; i < ARRAY_SIZE(tabla_dapm_aif_in_widgets); i++) {
+
+ if (strncmp(dai->driver->playback.stream_name, w[i].sname, 13))
+ continue;
+
+ rx_mix1_inp = w[i].shift + 4;
+
+ if ((rx_mix1_inp < 0x5) || (rx_mix1_inp > 0xB)) {
+
+ pr_err("%s: Invalid SLIM RX%u port. widget = %s\n",
+ __func__, rx_mix1_inp - 4 , w[i].name);
+ return -EINVAL;
+ }
+
+ rx_mix_1_reg_1 = TABLA_A_CDC_CONN_RX1_B1_CTL;
+
+ for (j = 0; j < NUM_INTERPOLATORS; j++) {
+
+ rx_mix_1_reg_2 = rx_mix_1_reg_1 + 1;
+
+ rx_mix_1_reg_1_val = snd_soc_read(codec,
+ rx_mix_1_reg_1);
+ rx_mix_1_reg_2_val = snd_soc_read(codec,
+ rx_mix_1_reg_2);
+
+ if (((rx_mix_1_reg_1_val & 0x0F) == rx_mix1_inp) ||
+ (((rx_mix_1_reg_1_val >> 4) & 0x0F) == rx_mix1_inp)
+ || ((rx_mix_1_reg_2_val & 0x0F) == rx_mix1_inp)) {
+
+ rx_fs_reg = TABLA_A_CDC_RX1_B5_CTL + 8 * j;
+
+ pr_debug("%s: %s connected to RX%u\n", __func__,
+ w[i].name, j + 1);
+
+ pr_debug("%s: set RX%u sample rate to %u\n",
+ __func__, j + 1, sample_rate);
+
+ snd_soc_update_bits(codec, rx_fs_reg,
+ 0xE0, rx_fs_rate_reg_val);
+
+ if (comp_rx_path[j] < COMPANDER_MAX)
+ tabla->comp_fs[comp_rx_path[j]]
+ = compander_fs;
+ }
+ if (j <= 2)
+ rx_mix_1_reg_1 += 3;
+ else
+ rx_mix_1_reg_1 += 2;
+ }
+ }
+ return 0;
+}
+
+static int tabla_set_decimator_rate(struct snd_soc_dai *dai,
+ u8 tx_fs_rate_reg_val, u32 sample_rate)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct snd_soc_dapm_widget *w = tabla_dapm_aif_out_widgets;
+
+ u32 i, tx_port;
+ u16 tx_port_reg, tx_fs_reg;
+ u8 tx_port_reg_val;
+ s8 decimator;
+
+ for (i = 0; i < ARRAY_SIZE(tabla_dapm_aif_out_widgets); i++) {
+
+ if (strncmp(dai->driver->capture.stream_name, w[i].sname, 12))
+ continue;
+
+ tx_port = w[i].shift;
+
+ if ((tx_port < 1) || (tx_port > NUM_DECIMATORS)) {
+ pr_err("%s: Invalid SLIM TX%u port. widget = %s\n",
+ __func__, tx_port, w[i].name);
+ return -EINVAL;
+ }
+
+ tx_port_reg = TABLA_A_CDC_CONN_TX_SB_B1_CTL + (tx_port - 1);
+ tx_port_reg_val = snd_soc_read(codec, tx_port_reg);
+
+ decimator = 0;
+
+ if ((tx_port >= 1) && (tx_port <= 6)) {
+
+ tx_port_reg_val = tx_port_reg_val & 0x0F;
+ if (tx_port_reg_val == 0x8)
+ decimator = tx_port;
+
+ } else if ((tx_port >= 7) && (tx_port <= NUM_DECIMATORS)) {
+
+ tx_port_reg_val = tx_port_reg_val & 0x1F;
+
+ if ((tx_port_reg_val >= 0x8) &&
+ (tx_port_reg_val <= 0x11)) {
+
+ decimator = (tx_port_reg_val - 0x8) + 1;
+ }
+ }
+
+ if (decimator) { /* SLIM_TX port has a DEC as input */
+
+ tx_fs_reg = TABLA_A_CDC_TX1_CLK_FS_CTL +
+ 8 * (decimator - 1);
+
+ pr_debug("%s: set DEC%u (-> SLIM_TX%u) rate to %u\n",
+ __func__, decimator, tx_port, sample_rate);
+
+ snd_soc_update_bits(codec, tx_fs_reg, 0x07,
+ tx_fs_rate_reg_val);
+
+ } else {
+ if ((tx_port_reg_val >= 0x1) &&
+ (tx_port_reg_val <= 0x7)) {
+
+ pr_debug("%s: RMIX%u going to SLIM TX%u\n",
+ __func__, tx_port_reg_val, tx_port);
+
+ } else if ((tx_port_reg_val >= 0x8) &&
+ (tx_port_reg_val <= 0x11)) {
+
+ pr_err("%s: ERROR: Should not be here\n",
+ __func__);
+ pr_err("%s: ERROR: DEC connected to SLIM TX%u\n"
+ , __func__, tx_port);
+ return -EINVAL;
+
+ } else if (tx_port_reg_val == 0) {
+ pr_debug("%s: no signal to SLIM TX%u\n",
+ __func__, tx_port);
+ } else {
+ pr_err("%s: ERROR: wrong signal to SLIM TX%u\n"
+ , __func__, tx_port);
+ pr_err("%s: ERROR: wrong signal = %u\n"
+ , __func__, tx_port_reg_val);
+ return -EINVAL;
+ }
+ }
+ }
+ return 0;
+}
+
static int tabla_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params,
- struct snd_soc_dai *dai)
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
{
struct snd_soc_codec *codec = dai->codec;
struct tabla_priv *tabla = snd_soc_codec_get_drvdata(dai->codec);
- u8 path, shift;
- u16 tx_fs_reg, rx_fs_reg;
- u8 tx_fs_rate, rx_fs_rate, rx_state, tx_state;
+ u8 tx_fs_rate_reg_val, rx_fs_rate_reg_val;
u32 compander_fs;
+ int ret;
pr_debug("%s: dai_name = %s DAI-ID %x rate %d num_ch %d\n", __func__,
- dai->name, dai->id, params_rate(params),
- params_channels(params));
+ dai->name, dai->id, params_rate(params),
+ params_channels(params));
switch (params_rate(params)) {
case 8000:
- tx_fs_rate = 0x00;
- rx_fs_rate = 0x00;
+ tx_fs_rate_reg_val = 0x00;
+ rx_fs_rate_reg_val = 0x00;
compander_fs = COMPANDER_FS_8KHZ;
break;
case 16000:
- tx_fs_rate = 0x01;
- rx_fs_rate = 0x20;
+ tx_fs_rate_reg_val = 0x01;
+ rx_fs_rate_reg_val = 0x20;
compander_fs = COMPANDER_FS_16KHZ;
break;
case 32000:
- tx_fs_rate = 0x02;
- rx_fs_rate = 0x40;
+ tx_fs_rate_reg_val = 0x02;
+ rx_fs_rate_reg_val = 0x40;
compander_fs = COMPANDER_FS_32KHZ;
break;
case 48000:
- tx_fs_rate = 0x03;
- rx_fs_rate = 0x60;
+ tx_fs_rate_reg_val = 0x03;
+ rx_fs_rate_reg_val = 0x60;
compander_fs = COMPANDER_FS_48KHZ;
break;
case 96000:
- tx_fs_rate = 0x04;
- rx_fs_rate = 0x80;
+ tx_fs_rate_reg_val = 0x04;
+ rx_fs_rate_reg_val = 0x80;
compander_fs = COMPANDER_FS_96KHZ;
break;
case 192000:
- tx_fs_rate = 0x05;
- rx_fs_rate = 0xA0;
+ tx_fs_rate_reg_val = 0x05;
+ rx_fs_rate_reg_val = 0xA0;
compander_fs = COMPANDER_FS_192KHZ;
break;
default:
@@ -3983,105 +4220,76 @@
return -EINVAL;
}
+ switch (substream->stream) {
+ case SNDRV_PCM_STREAM_CAPTURE:
- /**
- * If current dai is a tx dai, set sample rate to
- * all the txfe paths that are currently not active
- */
- if ((dai->id == AIF1_CAP) || (dai->id == AIF2_CAP) ||
- (dai->id == AIF3_CAP)) {
-
- tx_state = snd_soc_read(codec,
- TABLA_A_CDC_CLK_TX_CLK_EN_B1_CTL);
-
- for (path = 1, shift = 0;
- path <= NUM_DECIMATORS; path++, shift++) {
-
- if (path == BITS_PER_REG + 1) {
- shift = 0;
- tx_state = snd_soc_read(codec,
- TABLA_A_CDC_CLK_TX_CLK_EN_B2_CTL);
- }
-
- if (!(tx_state & (1 << shift))) {
- tx_fs_reg = TABLA_A_CDC_TX1_CLK_FS_CTL
- + (BITS_PER_REG*(path-1));
- snd_soc_update_bits(codec, tx_fs_reg,
- 0x07, tx_fs_rate);
- }
+ ret = tabla_set_decimator_rate(dai, tx_fs_rate_reg_val,
+ params_rate(params));
+ if (ret < 0) {
+ pr_err("%s: set decimator rate failed %d\n", __func__,
+ ret);
+ return ret;
}
+
if (tabla->intf_type == WCD9XXX_INTERFACE_TYPE_I2C) {
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
snd_soc_update_bits(codec,
- TABLA_A_CDC_CLK_TX_I2S_CTL,
- 0x20, 0x20);
+ TABLA_A_CDC_CLK_TX_I2S_CTL, 0x20, 0x20);
break;
case SNDRV_PCM_FORMAT_S32_LE:
snd_soc_update_bits(codec,
- TABLA_A_CDC_CLK_TX_I2S_CTL,
- 0x20, 0x00);
+ TABLA_A_CDC_CLK_TX_I2S_CTL, 0x20, 0x00);
break;
default:
- pr_err("invalid format\n");
- break;
+ pr_err("%s: invalid TX format %u\n", __func__,
+ params_format(params));
+ return -EINVAL;
}
snd_soc_update_bits(codec, TABLA_A_CDC_CLK_TX_I2S_CTL,
- 0x07, tx_fs_rate);
+ 0x07, tx_fs_rate_reg_val);
} else {
tabla->dai[dai->id - 1].rate = params_rate(params);
}
- }
- /**
- * TODO: Need to handle case where same RX chain takes 2 or more inputs
- * with varying sample rates
- */
+ break;
- /**
- * If current dai is a rx dai, set sample rate to
- * all the rx paths that are currently not active
- */
- if (dai->id == AIF1_PB || dai->id == AIF2_PB || dai->id == AIF3_PB) {
+ case SNDRV_PCM_STREAM_PLAYBACK:
- rx_state = snd_soc_read(codec,
- TABLA_A_CDC_CLK_RX_B1_CTL);
-
- for (path = 1, shift = 0;
- path <= NUM_INTERPOLATORS; path++, shift++) {
-
- if (!(rx_state & (1 << shift))) {
- rx_fs_reg = TABLA_A_CDC_RX1_B5_CTL
- + (BITS_PER_REG*(path-1));
- snd_soc_update_bits(codec, rx_fs_reg,
- 0xE0, rx_fs_rate);
- if (comp_rx_path[shift] < COMPANDER_MAX)
- tabla->comp_fs[comp_rx_path[shift]]
- = compander_fs;
- }
+ ret = tabla_set_interpolator_rate(dai, rx_fs_rate_reg_val,
+ compander_fs, params_rate(params));
+ if (ret < 0) {
+ pr_err("%s: set decimator rate failed %d\n", __func__,
+ ret);
+ return ret;
}
+
if (tabla->intf_type == WCD9XXX_INTERFACE_TYPE_I2C) {
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
snd_soc_update_bits(codec,
- TABLA_A_CDC_CLK_RX_I2S_CTL,
- 0x20, 0x20);
+ TABLA_A_CDC_CLK_RX_I2S_CTL, 0x20, 0x20);
break;
case SNDRV_PCM_FORMAT_S32_LE:
snd_soc_update_bits(codec,
- TABLA_A_CDC_CLK_RX_I2S_CTL,
- 0x20, 0x00);
+ TABLA_A_CDC_CLK_RX_I2S_CTL, 0x20, 0x00);
break;
default:
- pr_err("invalid format\n");
- break;
+ pr_err("%s: invalid RX format %u\n", __func__,
+ params_format(params));
+ return -EINVAL;
}
snd_soc_update_bits(codec, TABLA_A_CDC_CLK_RX_I2S_CTL,
- 0x03, (rx_fs_rate >> 0x05));
+ 0x03, (rx_fs_rate_reg_val >> 0x05));
} else {
tabla->dai[dai->id - 1].rate = params_rate(params);
}
- }
+ break;
+ default:
+ pr_err("%s: Invalid stream type %d\n", __func__,
+ substream->stream);
+ return -EINVAL;
+ }
return 0;
}
@@ -4213,6 +4421,41 @@
},
};
+static int tabla_codec_enable_chmask(struct tabla_priv *tabla_p,
+ int event, int index)
+{
+ int ret = 0;
+ u32 k = 0;
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ for (k = 0; k < tabla_p->dai[index].ch_tot; k++) {
+ ret = wcd9xxx_get_slave_port(
+ tabla_p->dai[index].ch_num[k]);
+ if (ret < 0) {
+ pr_err("%s: Invalid slave port ID: %d\n",
+ __func__, ret);
+ ret = -EINVAL;
+ break;
+ }
+ tabla_p->dai[index].ch_mask |= 1 << ret;
+ }
+ ret = 0;
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ ret = wait_event_timeout(tabla_p->dai[index].dai_wait,
+ (tabla_p->dai[index].ch_mask == 0),
+ msecs_to_jiffies(SLIM_CLOSE_TIMEOUT));
+ if (!ret) {
+ pr_err("%s: Slim close tx/rx wait timeout\n",
+ __func__);
+ ret = -EINVAL;
+ }
+ ret = 0;
+ break;
+ }
+ return ret;
+}
+
static int tabla_codec_enable_slimrx(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
@@ -4242,11 +4485,15 @@
break;
}
}
- if (tabla_p->dai[j].ch_act == tabla_p->dai[j].ch_tot)
+ if (tabla_p->dai[j].ch_act == tabla_p->dai[j].ch_tot) {
+ ret = tabla_codec_enable_chmask(tabla_p,
+ SND_SOC_DAPM_POST_PMU,
+ j);
ret = wcd9xxx_cfg_slim_sch_rx(tabla,
tabla_p->dai[j].ch_num,
tabla_p->dai[j].ch_tot,
tabla_p->dai[j].rate);
+ }
break;
case SND_SOC_DAPM_POST_PMD:
for (j = 0; j < ARRAY_SIZE(tabla_dai); j++) {
@@ -4265,11 +4512,13 @@
ret = wcd9xxx_close_slim_sch_rx(tabla,
tabla_p->dai[j].ch_num,
tabla_p->dai[j].ch_tot);
- usleep_range(15000, 15000);
tabla_p->dai[j].rate = 0;
memset(tabla_p->dai[j].ch_num, 0, (sizeof(u32)*
tabla_p->dai[j].ch_tot));
tabla_p->dai[j].ch_tot = 0;
+ ret = tabla_codec_enable_chmask(tabla_p,
+ SND_SOC_DAPM_POST_PMD,
+ j);
}
}
return ret;
@@ -4307,11 +4556,15 @@
break;
}
}
- if (tabla_p->dai[j].ch_act == tabla_p->dai[j].ch_tot)
+ if (tabla_p->dai[j].ch_act == tabla_p->dai[j].ch_tot) {
+ ret = tabla_codec_enable_chmask(tabla_p,
+ SND_SOC_DAPM_POST_PMU,
+ j);
ret = wcd9xxx_cfg_slim_sch_tx(tabla,
tabla_p->dai[j].ch_num,
tabla_p->dai[j].ch_tot,
tabla_p->dai[j].rate);
+ }
break;
case SND_SOC_DAPM_POST_PMD:
for (j = 0; j < ARRAY_SIZE(tabla_dai); j++) {
@@ -4333,6 +4586,9 @@
memset(tabla_p->dai[j].ch_num, 0, (sizeof(u32)*
tabla_p->dai[j].ch_tot));
tabla_p->dai[j].ch_tot = 0;
+ ret = tabla_codec_enable_chmask(tabla_p,
+ SND_SOC_DAPM_POST_PMD,
+ j);
}
}
return ret;
@@ -4350,30 +4606,6 @@
SND_SOC_DAPM_MIXER("DAC1", TABLA_A_RX_EAR_EN, 6, 0, dac1_switch,
ARRAY_SIZE(dac1_switch)),
- SND_SOC_DAPM_AIF_IN_E("SLIM RX1", "AIF1 Playback", 0, SND_SOC_NOPM, 0,
- 0, tabla_codec_enable_slimrx,
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_AIF_IN_E("SLIM RX2", "AIF1 Playback", 0, SND_SOC_NOPM, 0,
- 0, tabla_codec_enable_slimrx,
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_AIF_IN_E("SLIM RX3", "AIF1 Playback", 0, SND_SOC_NOPM, 0,
- 0, tabla_codec_enable_slimrx,
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-
- SND_SOC_DAPM_AIF_IN_E("SLIM RX4", "AIF3 Playback", 0, SND_SOC_NOPM, 0,
- 0, tabla_codec_enable_slimrx,
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_AIF_IN_E("SLIM RX5", "AIF3 Playback", 0, SND_SOC_NOPM, 0,
- 0, tabla_codec_enable_slimrx,
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-
- SND_SOC_DAPM_AIF_IN_E("SLIM RX6", "AIF2 Playback", 0, SND_SOC_NOPM, 0,
- 0, tabla_codec_enable_slimrx,
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_AIF_IN_E("SLIM RX7", "AIF2 Playback", 0, SND_SOC_NOPM, 0,
- 0, tabla_codec_enable_slimrx,
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-
/* Headphone */
SND_SOC_DAPM_OUTPUT("HEADPHONE"),
SND_SOC_DAPM_PGA_E("HPHL", TABLA_A_RX_HPH_CNP_EN, 5, 0, NULL, 0,
@@ -4654,54 +4886,15 @@
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_MUX("SLIM TX1 MUX", SND_SOC_NOPM, 0, 0, &sb_tx1_mux),
- SND_SOC_DAPM_AIF_OUT_E("SLIM TX1", "AIF2 Capture", 0, SND_SOC_NOPM, 0,
- 0, tabla_codec_enable_slimtx,
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-
SND_SOC_DAPM_MUX("SLIM TX2 MUX", SND_SOC_NOPM, 0, 0, &sb_tx2_mux),
- SND_SOC_DAPM_AIF_OUT_E("SLIM TX2", "AIF2 Capture", 0, SND_SOC_NOPM, 0,
- 0, tabla_codec_enable_slimtx,
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-
SND_SOC_DAPM_MUX("SLIM TX3 MUX", SND_SOC_NOPM, 0, 0, &sb_tx3_mux),
- SND_SOC_DAPM_AIF_OUT_E("SLIM TX3", "AIF3 Capture", 0, SND_SOC_NOPM, 0,
- 0, tabla_codec_enable_slimtx,
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-
SND_SOC_DAPM_MUX("SLIM TX4 MUX", SND_SOC_NOPM, 0, 0, &sb_tx4_mux),
- SND_SOC_DAPM_AIF_OUT_E("SLIM TX4", "AIF2 Capture", 0, SND_SOC_NOPM, 0,
- 0, tabla_codec_enable_slimtx,
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-
SND_SOC_DAPM_MUX("SLIM TX5 MUX", SND_SOC_NOPM, 0, 0, &sb_tx5_mux),
- SND_SOC_DAPM_AIF_OUT_E("SLIM TX5", "AIF3 Capture", 0, SND_SOC_NOPM, 0,
- 0, tabla_codec_enable_slimtx,
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-
SND_SOC_DAPM_MUX("SLIM TX6 MUX", SND_SOC_NOPM, 0, 0, &sb_tx6_mux),
- SND_SOC_DAPM_AIF_OUT_E("SLIM TX6", "AIF2 Capture", 0, SND_SOC_NOPM, 0,
- 0, tabla_codec_enable_slimtx,
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-
SND_SOC_DAPM_MUX("SLIM TX7 MUX", SND_SOC_NOPM, 0, 0, &sb_tx7_mux),
- SND_SOC_DAPM_AIF_OUT_E("SLIM TX7", "AIF1 Capture", 0, SND_SOC_NOPM, 0,
- 0, tabla_codec_enable_slimtx,
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-
SND_SOC_DAPM_MUX("SLIM TX8 MUX", SND_SOC_NOPM, 0, 0, &sb_tx8_mux),
- SND_SOC_DAPM_AIF_OUT_E("SLIM TX8", "AIF1 Capture", 0, SND_SOC_NOPM, 0,
- 0, tabla_codec_enable_slimtx,
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-
SND_SOC_DAPM_MUX("SLIM TX9 MUX", SND_SOC_NOPM, 0, 0, &sb_tx9_mux),
- SND_SOC_DAPM_AIF_OUT_E("SLIM TX9", "AIF1 Capture", NULL, SND_SOC_NOPM,
- 0, 0, tabla_codec_enable_slimtx,
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-
SND_SOC_DAPM_MUX("SLIM TX10 MUX", SND_SOC_NOPM, 0, 0, &sb_tx10_mux),
- SND_SOC_DAPM_AIF_OUT_E("SLIM TX10", "AIF1 Capture", NULL, SND_SOC_NOPM,
- 0, 0, tabla_codec_enable_slimtx,
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
/* Digital Mic Inputs */
SND_SOC_DAPM_ADC_E("DMIC1", NULL, SND_SOC_NOPM, 0, 0,
@@ -7014,7 +7207,8 @@
{
struct tabla_priv *priv = data;
struct snd_soc_codec *codec = priv->codec;
- int i, j;
+ struct tabla_priv *tabla_p = snd_soc_codec_get_drvdata(codec);
+ int i, j, port_id, k, ch_mask_temp;
u8 val;
for (i = 0; i < WCD9XXX_SLIM_NUM_PORT_REG; i++) {
@@ -7029,6 +7223,22 @@
if (val & 0x2)
pr_err_ratelimited("underflow error on port %x,"
" value %x\n", i*8 + j, val);
+ if (val & 0x4) {
+ pr_debug("%s: port %x disconnect value %x\n",
+ __func__, i*8 + j, val);
+ port_id = i*8 + j;
+ for (k = 0; k < ARRAY_SIZE(tabla_dai); k++) {
+ ch_mask_temp = 1 << port_id;
+ if (ch_mask_temp &
+ tabla_p->dai[k].ch_mask) {
+ tabla_p->dai[k].ch_mask &=
+ ~ch_mask_temp;
+ if (!tabla_p->dai[k].ch_mask)
+ wake_up(
+ &tabla_p->dai[k].dai_wait);
+ }
+ }
+ }
}
wcd9xxx_interface_reg_write(codec->control_data,
TABLA_SLIM_PGD_PORT_INT_CLR0 + i, 0xFF);
@@ -7560,6 +7770,13 @@
// snd_soc_dapm_new_controls(dapm, tabla_dapm_widgets,
// ARRAY_SIZE(tabla_dapm_widgets));
+
+ snd_soc_dapm_new_controls(dapm, tabla_dapm_aif_in_widgets,
+ ARRAY_SIZE(tabla_dapm_aif_in_widgets));
+
+ snd_soc_dapm_new_controls(dapm, tabla_dapm_aif_out_widgets,
+ ARRAY_SIZE(tabla_dapm_aif_out_widgets));
+
if (TABLA_IS_1_X(control->version))
snd_soc_dapm_new_controls(dapm, tabla_1_x_dapm_widgets,
ARRAY_SIZE(tabla_1_x_dapm_widgets));
@@ -7678,6 +7895,7 @@
}
tabla->dai[i].ch_num = kzalloc((sizeof(unsigned int)*
ch_cnt), GFP_KERNEL);
+ init_waitqueue_head(&tabla->dai[i].dai_wait);
}
#ifdef CONFIG_DEBUG_FS
diff --git a/sound/soc/msm/mdm9615.c b/sound/soc/msm/mdm9615.c
index 90d8723..dbe5d00 100644
--- a/sound/soc/msm/mdm9615.c
+++ b/sound/soc/msm/mdm9615.c
@@ -188,9 +188,6 @@
#define LPAIF_SPARE_MUX_CTL_PRI_MUX_SEL_BMSK 0x3
#define LPAIF_SPARE_MUX_CTL_PRI_MUX_SEL_SHFT 0x0
-static u32 spare_shadow;
-static u32 sif_shadow;
-
static atomic_t msm9615_auxpcm_ref;
static atomic_t msm9615_sec_auxpcm_ref;
@@ -1066,30 +1063,26 @@
{
struct msm_i2s_ctl *pintf = &msm9x15_i2s_ctl;
if (i2s_intf == MSM_INTF_PRIM) {
- if (i2s_dir == MSM_DIR_RX)
- gpio_free(GPIO_PRIM_I2S_DOUT);
- if (i2s_dir == MSM_DIR_TX)
- gpio_free(GPIO_PRIM_I2S_DIN);
if (pintf->intf_status[i2s_intf][MSM_DIR_TX] == 0 &&
pintf->intf_status[i2s_intf][MSM_DIR_RX] == 0) {
+ gpio_free(GPIO_PRIM_I2S_DIN);
+ gpio_free(GPIO_PRIM_I2S_DOUT);
gpio_free(GPIO_PRIM_I2S_SCK);
gpio_free(GPIO_PRIM_I2S_WS);
}
} else if (i2s_intf == MSM_INTF_SECN) {
- if (i2s_dir == MSM_DIR_RX)
- gpio_free(GPIO_SEC_I2S_DOUT);
- if (i2s_dir == MSM_DIR_TX)
- gpio_free(GPIO_SEC_I2S_DIN);
if (pintf->intf_status[i2s_intf][MSM_DIR_TX] == 0 &&
pintf->intf_status[i2s_intf][MSM_DIR_RX] == 0) {
+ gpio_free(GPIO_SEC_I2S_DOUT);
gpio_free(GPIO_SEC_I2S_WS);
+ gpio_free(GPIO_SEC_I2S_DIN);
gpio_free(GPIO_SEC_I2S_SCK);
}
}
return 0;
}
-int msm9615_i2s_intf_dir_sel(const char *cpu_dai_name,
+static int msm9615_i2s_intf_dir_sel(const char *cpu_dai_name,
u8 *i2s_intf, u8 *i2s_dir)
{
int ret = 0;
@@ -1117,34 +1110,37 @@
return ret;
}
-int msm9615_enable_i2s_gpio(u8 i2s_intf, u8 i2s_dir)
+static int msm9615_enable_i2s_gpio(u8 i2s_intf, u8 i2s_dir)
{
u8 ret = 0;
struct msm_i2s_ctl *pintf = &msm9x15_i2s_ctl;
+
if (i2s_intf == MSM_INTF_PRIM) {
- if (i2s_dir == MSM_DIR_TX) {
+ if (pintf->intf_status[i2s_intf][MSM_DIR_TX] == 0 &&
+ pintf->intf_status[i2s_intf][MSM_DIR_RX] == 0) {
+
+ ret = gpio_request(GPIO_PRIM_I2S_DOUT,
+ "I2S_PRIM_DOUT");
+ if (ret) {
+ pr_err("%s: Failed to request gpio %d\n",
+ __func__, GPIO_PRIM_I2S_DOUT);
+ goto err;
+ }
+
ret = gpio_request(GPIO_PRIM_I2S_DIN, "I2S_PRIM_DIN");
if (ret) {
pr_err("%s: Failed to request gpio %d\n",
- __func__, GPIO_PRIM_I2S_DIN);
+ __func__, GPIO_PRIM_I2S_DIN);
goto err;
}
- } else if (i2s_dir == MSM_DIR_RX) {
- ret = gpio_request(GPIO_PRIM_I2S_DOUT,
- "I2S_PRIM_DOUT");
- if (ret) {
- pr_err("%s: Failed to request gpio %d\n",
- __func__, GPIO_PRIM_I2S_DOUT);
- goto err;
- }
- } else if (pintf->intf_status[i2s_intf][MSM_DIR_TX] == 0 &&
- pintf->intf_status[i2s_intf][MSM_DIR_RX] == 0) {
+
ret = gpio_request(GPIO_PRIM_I2S_SCK, "I2S_PRIM_SCK");
if (ret) {
pr_err("%s: Failed to request gpio %d\n",
__func__, GPIO_PRIM_I2S_SCK);
goto err;
}
+
ret = gpio_request(GPIO_PRIM_I2S_WS, "I2S_PRIM_WS");
if (ret) {
pr_err("%s: Failed to request gpio %d\n",
@@ -1153,28 +1149,30 @@
}
}
} else if (i2s_intf == MSM_INTF_SECN) {
- if (i2s_dir == MSM_DIR_RX) {
- ret = gpio_request(GPIO_SEC_I2S_DOUT, "I2S_SEC_DOUT");
- if (ret) {
- pr_err("%s: Failed to request gpio %d\n",
- __func__, GPIO_SEC_I2S_DOUT);
- goto err;
- }
- } else if (i2s_dir == MSM_DIR_TX) {
+ if (pintf->intf_status[i2s_intf][MSM_DIR_TX] == 0 &&
+ pintf->intf_status[i2s_intf][MSM_DIR_RX] == 0) {
+
ret = gpio_request(GPIO_SEC_I2S_DIN, "I2S_SEC_DIN");
if (ret) {
pr_err("%s: Failed to request gpio %d\n",
__func__, GPIO_SEC_I2S_DIN);
goto err;
}
- } else if (pintf->intf_status[i2s_intf][MSM_DIR_TX] == 0 &&
- pintf->intf_status[i2s_intf][MSM_DIR_RX] == 0) {
+
+ ret = gpio_request(GPIO_SEC_I2S_DOUT, "I2S_SEC_DOUT");
+ if (ret) {
+ pr_err("%s: Failed to request gpio %d\n",
+ __func__, GPIO_SEC_I2S_DOUT);
+ goto err;
+ }
+
ret = gpio_request(GPIO_SEC_I2S_SCK, "I2S_SEC_SCK");
if (ret) {
pr_err("%s: Failed to request gpio %d\n",
__func__, GPIO_SEC_I2S_SCK);
goto err;
}
+
ret = gpio_request(GPIO_SEC_I2S_WS, "I2S_SEC_WS");
if (ret) {
pr_err("%s: Failed to request gpio %d\n",
@@ -1283,20 +1281,33 @@
return ret;
}
-void msm9615_config_i2s_sif_mux(u8 value)
+static void msm9615_config_i2s_sif_mux(u8 value)
{
struct msm_i2s_ctl *pintf = &msm9x15_i2s_ctl;
- sif_shadow = 0x00000;
+ u32 sif_shadow = 0x0000;
+ /* Make this variable global if both secondary and
+ * primary needs to be supported. This is required
+ * to retain bits in interace and set only specific
+ * bits in the register. Also set Sec Intf bits.
+ * Secondary interface bits are 0,1.
+ **/
sif_shadow = (sif_shadow & LPASS_SIF_MUX_CTL_PRI_MUX_SEL_BMSK) |
(value << LPASS_SIF_MUX_CTL_PRI_MUX_SEL_SHFT);
- iowrite32(sif_shadow, pintf->sif_virt_addr);
+ if (pintf->sif_virt_addr != NULL)
+ iowrite32(sif_shadow, pintf->sif_virt_addr);
/* Dont read SIF register. Device crashes. */
pr_debug("%s() SIF Reg = 0x%x\n", __func__, sif_shadow);
}
-void msm9615_config_i2s_spare_mux(u8 value, u8 i2s_intf)
+static void msm9615_config_i2s_spare_mux(u8 value, u8 i2s_intf)
{
struct msm_i2s_ctl *pintf = &msm9x15_i2s_ctl;
+ u32 spare_shadow = 0x0000;
+ /* Make this variable global if both secondary and
+ * primary needs to be supported. This is required
+ * to retain bits in interace and set only specific
+ * bits in the register. Also set Sec Intf bits.
+ **/
if (i2s_intf == MSM_INTF_PRIM) {
/* Configure Primary SIF */
spare_shadow = (spare_shadow & LPAIF_SPARE_MUX_CTL_PRI_MUX_SEL_BMSK
@@ -1307,7 +1318,8 @@
spare_shadow = (spare_shadow & LPAIF_SPARE_MUX_CTL_SEC_MUX_SEL_BMSK
) | (value << LPAIF_SPARE_MUX_CTL_SEC_MUX_SEL_SHFT);
}
- iowrite32(spare_shadow, pintf->spare_virt_addr);
+ if (pintf->spare_virt_addr != NULL)
+ iowrite32(spare_shadow, pintf->spare_virt_addr);
/* Dont read SPARE register. Device crashes. */
pr_debug("%s( ): SPARE Reg =0x%x\n", __func__, spare_shadow);
}
@@ -2214,6 +2226,9 @@
atomic_set(&msm9615_sec_auxpcm_ref, 0);
msm9x15_i2s_ctl.sif_virt_addr = ioremap(LPASS_SIF_MUX_ADDR, 4);
msm9x15_i2s_ctl.spare_virt_addr = ioremap(LPAIF_SPARE_ADDR, 4);
+ if (msm9x15_i2s_ctl.spare_virt_addr == NULL ||
+ msm9x15_i2s_ctl.sif_virt_addr == NULL)
+ pr_err("%s: SIF or Spare ptr are NULL", __func__);
sif_virt_addr = ioremap(LPASS_SIF_MUX_ADDR, 4);
secpcm_portslc_virt_addr = ioremap(SEC_PCM_PORT_SLC_ADDR, 4);
diff --git a/sound/soc/msm/mpq8064.c b/sound/soc/msm/mpq8064.c
index 4ecd8df..6685ce5 100644
--- a/sound/soc/msm/mpq8064.c
+++ b/sound/soc/msm/mpq8064.c
@@ -850,7 +850,7 @@
static void msm_mi2s_shutdown(struct snd_pcm_substream *substream)
{
if (mi2s_bit_clk) {
- clk_disable(mi2s_bit_clk);
+ clk_disable_unprepare(mi2s_bit_clk);
clk_put(mi2s_bit_clk);
mi2s_bit_clk = NULL;
}
@@ -892,7 +892,7 @@
if (IS_ERR(mi2s_bit_clk))
return PTR_ERR(mi2s_bit_clk);
clk_set_rate(mi2s_bit_clk, 0);
- ret = clk_enable(mi2s_bit_clk);
+ ret = clk_prepare_enable(mi2s_bit_clk);
if (IS_ERR_VALUE(ret)) {
pr_err("Unable to enable mi2s_bit_clk\n");
clk_put(mi2s_bit_clk);
@@ -1257,8 +1257,8 @@
.cpu_dai_name = "MultiMedia5",
.platform_name = "msm-multi-ch-pcm-dsp",
.dynamic = 1,
- .trigger = {SND_SOC_DPCM_TRIGGER_BESPOKE,
- SND_SOC_DPCM_TRIGGER_BESPOKE},
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
.ignore_suspend = 1,
@@ -1271,8 +1271,8 @@
.cpu_dai_name = "MultiMedia6",
.platform_name = "msm-multi-ch-pcm-dsp",
.dynamic = 1,
- .trigger = {SND_SOC_DPCM_TRIGGER_BESPOKE,
- SND_SOC_DPCM_TRIGGER_BESPOKE},
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
.ignore_suspend = 1,
@@ -1285,8 +1285,8 @@
.cpu_dai_name = "MultiMedia7",
.platform_name = "msm-compr-dsp",
.dynamic = 1,
- .trigger = {SND_SOC_DPCM_TRIGGER_BESPOKE,
- SND_SOC_DPCM_TRIGGER_BESPOKE},
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
.ignore_suspend = 1,
@@ -1299,8 +1299,8 @@
.cpu_dai_name = "MultiMedia8",
.platform_name = "msm-compr-dsp",
.dynamic = 1,
- .trigger = {SND_SOC_DPCM_TRIGGER_BESPOKE,
- SND_SOC_DPCM_TRIGGER_BESPOKE},
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
.ignore_suspend = 1,
diff --git a/sound/soc/msm/msm-compr-q6.c b/sound/soc/msm/msm-compr-q6.c
index d4045e1..c894921 100644
--- a/sound/soc/msm/msm-compr-q6.c
+++ b/sound/soc/msm/msm-compr-q6.c
@@ -34,6 +34,13 @@
#include "msm-compr-q6.h"
#include "msm-pcm-routing.h"
+#define COMPRE_CAPTURE_NUM_PERIODS 16
+/* Allocate the worst case frame size for compressed audio */
+#define COMPRE_CAPTURE_HEADER_SIZE (sizeof(struct snd_compr_audio_info))
+#define COMPRE_CAPTURE_MAX_FRAME_SIZE (6144)
+#define COMPRE_CAPTURE_PERIOD_SIZE (COMPRE_CAPTURE_MAX_FRAME_SIZE + \
+ COMPRE_CAPTURE_HEADER_SIZE)
+
struct snd_msm {
struct msm_audio *prtd;
unsigned volume;
@@ -42,6 +49,27 @@
static struct audio_locks the_locks;
+static struct snd_pcm_hardware msm_compr_hardware_capture = {
+ .info = (SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME),
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .rates = SNDRV_PCM_RATE_8000_48000,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ .channels_min = 1,
+ .channels_max = 8,
+ .buffer_bytes_max =
+ COMPRE_CAPTURE_PERIOD_SIZE * COMPRE_CAPTURE_NUM_PERIODS ,
+ .period_bytes_min = COMPRE_CAPTURE_PERIOD_SIZE,
+ .period_bytes_max = COMPRE_CAPTURE_PERIOD_SIZE,
+ .periods_min = COMPRE_CAPTURE_NUM_PERIODS,
+ .periods_max = COMPRE_CAPTURE_NUM_PERIODS,
+ .fifo_size = 0,
+};
+
static struct snd_pcm_hardware msm_compr_hardware_playback = {
.info = (SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_BLOCK_TRANSFER |
@@ -81,7 +109,9 @@
struct snd_pcm_substream *substream = prtd->substream;
struct snd_pcm_runtime *runtime = substream->runtime;
struct audio_aio_write_param param;
+ struct audio_aio_read_param read_param;
struct audio_buffer *buf = NULL;
+ uint32_t *ptrmem = (uint32_t *)payload;
int i = 0;
pr_debug("%s opcode =%08x\n", __func__, opcode);
@@ -138,9 +168,53 @@
prtd->cmd_ack = 1;
wake_up(&the_locks.eos_wait);
break;
+ case ASM_DATA_EVENT_READ_DONE: {
+ pr_debug("ASM_DATA_EVENT_READ_DONE\n");
+ pr_debug("buf = %p, data = 0x%X, *data = %p,\n"
+ "prtd->pcm_irq_pos = %d\n",
+ prtd->audio_client->port[OUT].buf,
+ *(uint32_t *)prtd->audio_client->port[OUT].buf->data,
+ prtd->audio_client->port[OUT].buf->data,
+ prtd->pcm_irq_pos);
+
+ memcpy(prtd->audio_client->port[OUT].buf->data +
+ prtd->pcm_irq_pos, (ptrmem + 2),
+ COMPRE_CAPTURE_HEADER_SIZE);
+ pr_debug("buf = %p, updated data = 0x%X, *data = %p\n",
+ prtd->audio_client->port[OUT].buf,
+ *(uint32_t *)(prtd->audio_client->port[OUT].buf->data +
+ prtd->pcm_irq_pos),
+ prtd->audio_client->port[OUT].buf->data);
+ if (!atomic_read(&prtd->start))
+ break;
+ pr_debug("frame size=%d, buffer = 0x%X\n", ptrmem[2],
+ ptrmem[1]);
+ if (ptrmem[2] > COMPRE_CAPTURE_MAX_FRAME_SIZE) {
+ pr_err("Frame length exceeded the max length");
+ break;
+ }
+ buf = prtd->audio_client->port[OUT].buf;
+ pr_debug("pcm_irq_pos=%d, buf[0].phys = 0x%X\n",
+ prtd->pcm_irq_pos, (uint32_t)buf[0].phys);
+ read_param.len = prtd->pcm_count - COMPRE_CAPTURE_HEADER_SIZE ;
+ read_param.paddr = (unsigned long)(buf[0].phys) +
+ prtd->pcm_irq_pos + COMPRE_CAPTURE_HEADER_SIZE;
+ prtd->pcm_irq_pos += prtd->pcm_count;
+
+ if (atomic_read(&prtd->start))
+ snd_pcm_period_elapsed(substream);
+
+ q6asm_async_read(prtd->audio_client, &read_param);
+ break;
+ }
case APR_BASIC_RSP_RESULT: {
switch (payload[0]) {
case ASM_SESSION_CMD_RUN: {
+ if (substream->stream
+ != SNDRV_PCM_STREAM_PLAYBACK) {
+ atomic_set(&prtd->start, 1);
+ break;
+ }
if (!atomic_read(&prtd->pending_buffer))
break;
pr_debug("%s:writing %d bytes"
@@ -230,12 +304,15 @@
pr_debug("compressd playback, no need to send"
" the decoder params\n");
break;
+ case SND_AUDIOCODEC_DTS_PASS_THROUGH:
+ pr_debug("compressd DTS playback,dont send the decoder params\n");
+ break;
case SND_AUDIOCODEC_WMA:
pr_debug("SND_AUDIOCODEC_WMA\n");
memset(&wma_cfg, 0x0, sizeof(struct asm_wma_cfg));
wma_cfg.format_tag = compr->info.codec_param.codec.format;
wma_cfg.ch_cfg = runtime->channels;
- wma_cfg.sample_rate = runtime->rate;
+ wma_cfg.sample_rate = compr->info.codec_param.codec.sample_rate;
wma_cfg.avg_bytes_per_sec =
compr->info.codec_param.codec.bit_rate/8;
wma_cfg.block_align = compr->info.codec_param.codec.align;
@@ -255,7 +332,8 @@
memset(&wma_pro_cfg, 0x0, sizeof(struct asm_wmapro_cfg));
wma_pro_cfg.format_tag = compr->info.codec_param.codec.format;
wma_pro_cfg.ch_cfg = compr->info.codec_param.codec.ch_in;
- wma_pro_cfg.sample_rate = runtime->rate;
+ wma_pro_cfg.sample_rate =
+ compr->info.codec_param.codec.sample_rate;
wma_pro_cfg.avg_bytes_per_sec =
compr->info.codec_param.codec.bit_rate/8;
wma_pro_cfg.block_align = compr->info.codec_param.codec.align;
@@ -266,11 +344,25 @@
compr->info.codec_param.codec.options.wma.channelmask;
wma_pro_cfg.encode_opt =
compr->info.codec_param.codec.options.wma.encodeopt;
+ wma_pro_cfg.adv_encode_opt =
+ compr->info.codec_param.codec.options.wma.encodeopt1;
+ wma_pro_cfg.adv_encode_opt2 =
+ compr->info.codec_param.codec.options.wma.encodeopt2;
ret = q6asm_media_format_block_wmapro(prtd->audio_client,
&wma_pro_cfg);
if (ret < 0)
pr_err("%s: CMD Format block failed\n", __func__);
break;
+ case SND_AUDIOCODEC_DTS:
+ case SND_AUDIOCODEC_DTS_LBR:
+ pr_debug("SND_AUDIOCODEC_DTS\n");
+ ret = q6asm_media_format_block(prtd->audio_client,
+ compr->codec);
+ if (ret < 0) {
+ pr_err("%s: CMD Format block failed\n", __func__);
+ return ret;
+ }
+ break;
default:
return -EINVAL;
}
@@ -281,6 +373,44 @@
return 0;
}
+static int msm_compr_capture_prepare(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct compr_audio *compr = runtime->private_data;
+ struct msm_audio *prtd = &compr->prtd;
+ struct audio_buffer *buf = prtd->audio_client->port[OUT].buf;
+ struct audio_aio_read_param read_param;
+ int ret = 0;
+ int i;
+ prtd->pcm_size = snd_pcm_lib_buffer_bytes(substream);
+ prtd->pcm_count = snd_pcm_lib_period_bytes(substream);
+ prtd->pcm_irq_pos = 0;
+
+ /* rate and channels are sent to audio driver */
+ prtd->samp_rate = runtime->rate;
+ prtd->channel_mode = runtime->channels;
+
+ if (prtd->enabled)
+ return ret;
+ read_param.len = prtd->pcm_count - COMPRE_CAPTURE_HEADER_SIZE;
+ pr_debug("%s: Samp_rate = %d, Channel = %d, pcm_size = %d,\n"
+ "pcm_count = %d, periods = %d\n",
+ __func__, prtd->samp_rate, prtd->channel_mode,
+ prtd->pcm_size, prtd->pcm_count, runtime->periods);
+
+ for (i = 0; i < runtime->periods; i++) {
+ read_param.uid = i;
+ read_param.paddr = ((unsigned long)(buf[i].phys) +
+ COMPRE_CAPTURE_HEADER_SIZE);
+ q6asm_async_read(prtd->audio_client, &read_param);
+ }
+ prtd->periods = runtime->periods;
+
+ prtd->enabled = 1;
+
+ return ret;
+}
+
static int msm_compr_trigger(struct snd_pcm_substream *substream, int cmd)
{
int ret = 0;
@@ -293,8 +423,17 @@
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
prtd->pcm_irq_pos = 0;
- if (compr->info.codec_param.codec.id ==
- SND_AUDIOCODEC_AC3_PASS_THROUGH) {
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ if (compr->info.codec_param.codec.id ==
+ SND_AUDIOCODEC_AC3_PASS_THROUGH ||
+ compr->info.codec_param.codec.id ==
+ SND_AUDIOCODEC_DTS_PASS_THROUGH) {
+ msm_pcm_routing_reg_psthr_stream(
+ soc_prtd->dai_link->be_id,
+ prtd->session_id, substream->stream,
+ 1);
+ }
+ } else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
msm_pcm_routing_reg_psthr_stream(
soc_prtd->dai_link->be_id,
prtd->session_id, substream->stream, 1);
@@ -307,11 +446,19 @@
break;
case SNDRV_PCM_TRIGGER_STOP:
pr_debug("SNDRV_PCM_TRIGGER_STOP\n");
- if (compr->info.codec_param.codec.id ==
- SND_AUDIOCODEC_AC3_PASS_THROUGH) {
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ if (compr->info.codec_param.codec.id ==
+ SND_AUDIOCODEC_AC3_PASS_THROUGH) {
+ msm_pcm_routing_reg_psthr_stream(
+ soc_prtd->dai_link->be_id,
+ prtd->session_id, substream->stream,
+ 0);
+ }
+ } else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
msm_pcm_routing_reg_psthr_stream(
soc_prtd->dai_link->be_id,
- prtd->session_id, substream->stream, 0);
+ prtd->session_id, substream->stream,
+ 0);
}
atomic_set(&prtd->start, 0);
break;
@@ -344,6 +491,9 @@
compr->info.compr_cap.codecs[2] = SND_AUDIOCODEC_AC3_PASS_THROUGH;
compr->info.compr_cap.codecs[3] = SND_AUDIOCODEC_WMA;
compr->info.compr_cap.codecs[4] = SND_AUDIOCODEC_WMA_PRO;
+ compr->info.compr_cap.codecs[5] = SND_AUDIOCODEC_DTS;
+ compr->info.compr_cap.codecs[6] = SND_AUDIOCODEC_DTS_LBR;
+ compr->info.compr_cap.codecs[7] = SND_AUDIOCODEC_DTS_PASS_THROUGH;
/* Add new codecs here */
}
@@ -365,10 +515,6 @@
.rampingcurve = SOFT_VOLUME_CURVE_LINEAR,
};
- /* Capture path */
- if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
- return -EINVAL;
-
pr_debug("%s\n", __func__);
compr = kzalloc(sizeof(struct compr_audio), GFP_KERNEL);
if (compr == NULL) {
@@ -384,13 +530,18 @@
kfree(prtd);
return -ENOMEM;
}
- runtime->hw = msm_compr_hardware_playback;
pr_info("%s: session ID %d\n", __func__, prtd->audio_client->session);
prtd->session_id = prtd->audio_client->session;
- prtd->cmd_ack = 1;
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ runtime->hw = msm_compr_hardware_playback;
+ prtd->cmd_ack = 1;
+ } else {
+ runtime->hw = msm_compr_hardware_capture;
+ }
+
ret = snd_pcm_hw_constraint_list(runtime, 0,
SNDRV_PCM_HW_PARAM_RATE,
@@ -405,7 +556,8 @@
prtd->dsp_cnt = 0;
atomic_set(&prtd->pending_buffer, 1);
- compr->codec = FORMAT_MP3;
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ compr->codec = FORMAT_MP3;
populate_codec_list(compr, runtime);
runtime->private_data = compr;
compressed_audio.prtd = &compr->prtd;
@@ -468,6 +620,27 @@
return 0;
}
+static int msm_compr_capture_close(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
+ struct compr_audio *compr = runtime->private_data;
+ struct msm_audio *prtd = &compr->prtd;
+ int dir = OUT;
+
+ pr_debug("%s\n", __func__);
+ atomic_set(&prtd->pending_buffer, 0);
+ q6asm_cmd(prtd->audio_client, CMD_CLOSE);
+ q6asm_audio_client_buf_free_contiguous(dir,
+ prtd->audio_client);
+ msm_pcm_routing_dereg_phy_stream(soc_prtd->dai_link->be_id,
+ SNDRV_PCM_STREAM_CAPTURE);
+ q6asm_audio_client_free(prtd->audio_client);
+ kfree(prtd);
+
+ return 0;
+}
+
static int msm_compr_close(struct snd_pcm_substream *substream)
{
int ret = 0;
@@ -475,7 +648,7 @@
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
ret = msm_compr_playback_close(substream);
else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
- ret = EINVAL;
+ ret = msm_compr_capture_close(substream);
return ret;
}
static int msm_compr_prepare(struct snd_pcm_substream *substream)
@@ -485,7 +658,7 @@
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
ret = msm_compr_playback_prepare(substream);
else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
- ret = EINVAL;
+ ret = msm_compr_capture_prepare(substream);
return ret;
}
@@ -499,7 +672,10 @@
if (prtd->pcm_irq_pos >= prtd->pcm_size)
prtd->pcm_irq_pos = 0;
- pr_debug("pcm_irq_pos = %d\n", prtd->pcm_irq_pos);
+ pr_debug("%s: pcm_irq_pos = %d, pcm_size = %d, sample_bits = %d,\n"
+ "frame_bits = %d\n", __func__, prtd->pcm_irq_pos,
+ prtd->pcm_size, runtime->sample_bits,
+ runtime->frame_bits);
return bytes_to_frames(runtime, (prtd->pcm_irq_pos));
}
@@ -541,28 +717,45 @@
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
dir = IN;
else
- return -EINVAL;
+ dir = OUT;
- switch (compr->info.codec_param.codec.id) {
- case SND_AUDIOCODEC_AC3_PASS_THROUGH:
- ret = q6asm_open_write_compressed(prtd->audio_client,
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ switch (compr->info.codec_param.codec.id) {
+ case SND_AUDIOCODEC_AC3_PASS_THROUGH:
+ case SND_AUDIOCODEC_DTS_PASS_THROUGH:
+ ret = q6asm_open_write_compressed(prtd->audio_client,
compr->codec);
+
+ if (ret < 0) {
+ pr_err("%s: Session out open failed\n",
+ __func__);
+ return -ENOMEM;
+ }
+ break;
+ default:
+ ret = q6asm_open_write(prtd->audio_client,
+ compr->codec);
+ if (ret < 0) {
+ pr_err("%s: Session out open failed\n",
+ __func__);
+ return -ENOMEM;
+ }
+ msm_pcm_routing_reg_phy_stream(
+ soc_prtd->dai_link->be_id,
+ prtd->session_id, substream->stream);
+
+ break;
+ }
+ } else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+ ret = q6asm_open_read_compressed(prtd->audio_client,
+ compr->codec);
+
if (ret < 0) {
pr_err("%s: compressed Session out open failed\n",
- __func__);
+ __func__);
return -ENOMEM;
}
- break;
- default:
- ret = q6asm_open_write(prtd->audio_client, compr->codec);
- if (ret < 0) {
- pr_err("%s: Session out open failed\n", __func__);
- return -ENOMEM;
- }
- msm_pcm_routing_reg_phy_stream(soc_prtd->dai_link->be_id,
- prtd->session_id, substream->stream);
-
- break;
}
ret = q6asm_set_io_mode(prtd->audio_client, ASYNC_IO_MODE);
if (ret < 0) {
@@ -581,13 +774,17 @@
}
buf = prtd->audio_client->port[dir].buf;
- pr_debug("%s:buf = %p\n", __func__, buf);
dma_buf->dev.type = SNDRV_DMA_TYPE_DEV;
dma_buf->dev.dev = substream->pcm->card->dev;
dma_buf->private_data = NULL;
dma_buf->area = buf[0].data;
dma_buf->addr = buf[0].phys;
dma_buf->bytes = runtime->hw.buffer_bytes_max;
+
+ pr_debug("%s: buf[%p]dma_buf->area[%p]dma_buf->addr[%p]\n"
+ "dma_buf->bytes[%d]\n", __func__,
+ (void *)buf, (void *)dma_buf->area,
+ (void *)dma_buf->addr, dma_buf->bytes);
if (!dma_buf->area)
return -ENOMEM;
@@ -669,6 +866,18 @@
pr_debug("SND_AUDIOCODEC_WMA_PRO\n");
compr->codec = FORMAT_WMA_V10PRO;
break;
+ case SND_AUDIOCODEC_DTS_PASS_THROUGH:
+ pr_debug("SND_AUDIOCODEC_DTS_PASS_THROUGH\n");
+ compr->codec = FORMAT_DTS;
+ break;
+ case SND_AUDIOCODEC_DTS:
+ pr_debug("SND_AUDIOCODEC_DTS\n");
+ compr->codec = FORMAT_DTS;
+ break;
+ case SND_AUDIOCODEC_DTS_LBR:
+ pr_debug("SND_AUDIOCODEC_DTS\n");
+ compr->codec = FORMAT_DTS_LBR;
+ break;
default:
pr_debug("FORMAT_LINEAR_PCM\n");
compr->codec = FORMAT_LINEAR_PCM;
diff --git a/sound/soc/msm/msm-dai-fe.c b/sound/soc/msm/msm-dai-fe.c
index 56e83d5..210cfa9 100644
--- a/sound/soc/msm/msm-dai-fe.c
+++ b/sound/soc/msm/msm-dai-fe.c
@@ -92,7 +92,7 @@
SNDRV_PCM_RATE_KNOT),
.formats = SNDRV_PCM_FMTBIT_S16_LE,
.channels_min = 1,
- .channels_max = 2,
+ .channels_max = 8,
.rate_min = 8000,
.rate_max = 48000,
},
@@ -176,12 +176,24 @@
.rate_min = 8000,
.rate_max = 48000,
},
+ .capture = {
+ .stream_name = "MultiMedia4 Capture",
+ .aif_name = "MM_UL4",
+ .rates = (SNDRV_PCM_RATE_8000_48000|
+ SNDRV_PCM_RATE_KNOT),
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .channels_min = 1,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
.ops = &msm_fe_Multimedia_dai_ops,
.name = "MultiMedia4",
},
{
.playback = {
.stream_name = "MultiMedia5 Playback",
+ .aif_name = "MM_DL5",
.rates = (SNDRV_PCM_RATE_8000_48000 |
SNDRV_PCM_RATE_KNOT),
.formats = SNDRV_PCM_FMTBIT_S16_LE,
@@ -196,6 +208,7 @@
{
.playback = {
.stream_name = "MultiMedia6 Playback",
+ .aif_name = "MM_DL6",
.rates = (SNDRV_PCM_RATE_8000_48000 |
SNDRV_PCM_RATE_KNOT),
.formats = SNDRV_PCM_FMTBIT_S16_LE,
@@ -210,6 +223,7 @@
{
.playback = {
.stream_name = "MultiMedia7 Playback",
+ .aif_name = "MM_DL7",
.rates = (SNDRV_PCM_RATE_8000_48000 |
SNDRV_PCM_RATE_KNOT),
.formats = SNDRV_PCM_FMTBIT_S16_LE,
@@ -224,6 +238,7 @@
{
.playback = {
.stream_name = "MultiMedia8 Playback",
+ .aif_name = "MM_DL8",
.rates = (SNDRV_PCM_RATE_8000_48000 |
SNDRV_PCM_RATE_KNOT),
.formats = SNDRV_PCM_FMTBIT_S16_LE,
diff --git a/sound/soc/msm/msm-dai-q6.c b/sound/soc/msm/msm-dai-q6.c
index b3e5120..147316e 100644
--- a/sound/soc/msm/msm-dai-q6.c
+++ b/sound/soc/msm/msm-dai-q6.c
@@ -40,9 +40,14 @@
union afe_port_config port_config;
};
+struct msm_dai_q6_mi2s_dai_config {
+ u16 pdata_mi2s_lines;
+ struct msm_dai_q6_dai_data mi2s_dai_data;
+};
+
struct msm_dai_q6_mi2s_dai_data {
- struct msm_dai_q6_dai_data tx_dai;
- struct msm_dai_q6_dai_data rx_dai;
+ struct msm_dai_q6_mi2s_dai_config tx_dai;
+ struct msm_dai_q6_mi2s_dai_config rx_dai;
struct snd_pcm_hw_constraint_list rate_constraint;
struct snd_pcm_hw_constraint_list bitwidth_constraint;
};
@@ -86,8 +91,8 @@
static const char *mi2s_format[] = {
"LPCM",
"Compr",
- "60958-LPCM",
- "60958-Compr"};
+ "LPCM-60958",
+ "Compr-60958"};
static const struct soc_enum mi2s_config_enum[] = {
SOC_ENUM_SINGLE_EXT(4, mi2s_format),
@@ -143,21 +148,63 @@
{
struct msm_dai_q6_mi2s_dai_data *mi2s_dai_data =
dev_get_drvdata(dai->dev);
- struct msm_dai_q6_dai_data *dai_data =
+ struct msm_dai_q6_mi2s_dai_config *mi2s_dai_config =
(substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
&mi2s_dai_data->rx_dai : &mi2s_dai_data->tx_dai);
+ struct msm_dai_q6_dai_data *dai_data = &mi2s_dai_config->mi2s_dai_data;
dai_data->channels = params_channels(params);
switch (dai_data->channels) {
- case 2:
- dai_data->port_config.mi2s.channel = MSM_AFE_STEREO;
+ case 8:
+ case 7:
+ if (mi2s_dai_config->pdata_mi2s_lines < AFE_I2S_8CHS)
+ goto error_invalid_data;
+ dai_data->port_config.mi2s.line = AFE_I2S_8CHS;
break;
+ case 6:
+ case 5:
+ if (mi2s_dai_config->pdata_mi2s_lines < AFE_I2S_6CHS)
+ goto error_invalid_data;
+ dai_data->port_config.mi2s.line = AFE_I2S_6CHS;
+ break;
+ case 4:
+ case 3:
+ if (mi2s_dai_config->pdata_mi2s_lines < AFE_I2S_QUAD01)
+ goto error_invalid_data;
+ if (mi2s_dai_config->pdata_mi2s_lines == AFE_I2S_QUAD23)
+ dai_data->port_config.mi2s.line =
+ mi2s_dai_config->pdata_mi2s_lines;
+ else
+ dai_data->port_config.mi2s.line = AFE_I2S_QUAD01;
+ break;
+ case 2:
case 1:
- dai_data->port_config.mi2s.channel = MSM_AFE_MONO;
+ if (mi2s_dai_config->pdata_mi2s_lines < AFE_I2S_SD0)
+ goto error_invalid_data;
+ switch (mi2s_dai_config->pdata_mi2s_lines) {
+ case AFE_I2S_SD0:
+ case AFE_I2S_SD1:
+ case AFE_I2S_SD2:
+ case AFE_I2S_SD3:
+ dai_data->port_config.mi2s.line =
+ mi2s_dai_config->pdata_mi2s_lines;
+ break;
+ case AFE_I2S_QUAD01:
+ case AFE_I2S_6CHS:
+ case AFE_I2S_8CHS:
+ dai_data->port_config.mi2s.line = AFE_I2S_SD0;
+ break;
+ case AFE_I2S_QUAD23:
+ dai_data->port_config.mi2s.line = AFE_I2S_SD2;
+ break;
+ }
+ if (dai_data->channels == 2)
+ dai_data->port_config.mi2s.channel = MSM_AFE_STEREO;
+ else
+ dai_data->port_config.mi2s.channel = MSM_AFE_MONO;
break;
default:
- pr_warn("greater than stereo has not been validated");
- break;
+ goto error_invalid_data;
}
dai_data->rate = params_rate(params);
dai_data->port_config.mi2s.bitwidth = 16;
@@ -166,7 +213,14 @@
mi2s_dai_data->rate_constraint.list = &dai_data->rate;
mi2s_dai_data->bitwidth_constraint.list = &dai_data->bitwidth;
}
+
+ pr_debug("%s: dai_data->channels = %d, line = %d\n", __func__,
+ dai_data->channels, dai_data->port_config.mi2s.line);
return 0;
+error_invalid_data:
+ pr_err("%s: dai_data->channels = %d, line = %d\n", __func__,
+ dai_data->channels, dai_data->port_config.mi2s.line);
+ return -EINVAL;
}
static int msm_dai_q6_mi2s_get_lineconfig(u16 sd_lines, u16 *config_ptr,
@@ -276,7 +330,9 @@
}
if (ch_cnt) {
- dai_data->rx_dai.port_config.mi2s.line = sdline_config;
+ dai_data->rx_dai.mi2s_dai_data.port_config.mi2s.line =
+ sdline_config;
+ dai_data->rx_dai.pdata_mi2s_lines = sdline_config;
dai_driver->playback.channels_min = 1;
dai_driver->playback.channels_max = ch_cnt << 1;
} else {
@@ -292,7 +348,9 @@
}
if (ch_cnt) {
- dai_data->tx_dai.port_config.mi2s.line = sdline_config;
+ dai_data->tx_dai.mi2s_dai_data.port_config.mi2s.line =
+ sdline_config;
+ dai_data->tx_dai.pdata_mi2s_lines = sdline_config;
dai_driver->capture.channels_min = 1;
dai_driver->capture.channels_max = ch_cnt << 1;
} else {
@@ -301,8 +359,8 @@
}
dev_info(&pdev->dev, "%s: playback sdline %x capture sdline %x\n",
- __func__, dai_data->rx_dai.port_config.mi2s.line,
- dai_data->tx_dai.port_config.mi2s.line);
+ __func__, dai_data->rx_dai.pdata_mi2s_lines,
+ dai_data->tx_dai.pdata_mi2s_lines);
dev_info(&pdev->dev, "%s: playback ch_max %d capture ch_mx %d\n",
__func__, dai_driver->playback.channels_max,
dai_driver->capture.channels_max);
@@ -315,8 +373,10 @@
struct msm_dai_q6_mi2s_dai_data *mi2s_dai_data =
dev_get_drvdata(dai->dev);
- if (test_bit(STATUS_PORT_STARTED, mi2s_dai_data->rx_dai.status_mask) ||
- test_bit(STATUS_PORT_STARTED, mi2s_dai_data->rx_dai.status_mask)) {
+ if (test_bit(STATUS_PORT_STARTED,
+ mi2s_dai_data->rx_dai.mi2s_dai_data.status_mask) ||
+ test_bit(STATUS_PORT_STARTED,
+ mi2s_dai_data->tx_dai.mi2s_dai_data.status_mask)) {
dev_err(dai->dev, "%s: err chg i2s mode while dai running",
__func__);
return -EPERM;
@@ -324,12 +384,12 @@
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBS_CFS:
- mi2s_dai_data->rx_dai.port_config.mi2s.ws = 1;
- mi2s_dai_data->tx_dai.port_config.mi2s.ws = 1;
+ mi2s_dai_data->rx_dai.mi2s_dai_data.port_config.mi2s.ws = 1;
+ mi2s_dai_data->tx_dai.mi2s_dai_data.port_config.mi2s.ws = 1;
break;
case SND_SOC_DAIFMT_CBM_CFM:
- mi2s_dai_data->rx_dai.port_config.mi2s.ws = 0;
- mi2s_dai_data->tx_dai.port_config.mi2s.ws = 0;
+ mi2s_dai_data->rx_dai.mi2s_dai_data.port_config.mi2s.ws = 0;
+ mi2s_dai_data->tx_dai.mi2s_dai_data.port_config.mi2s.ws = 0;
break;
default:
return -EINVAL;
@@ -345,7 +405,8 @@
dev_get_drvdata(dai->dev);
struct msm_dai_q6_dai_data *dai_data =
(substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
- &mi2s_dai_data->rx_dai : &mi2s_dai_data->tx_dai);
+ &mi2s_dai_data->rx_dai.mi2s_dai_data :
+ &mi2s_dai_data->tx_dai.mi2s_dai_data);
int rc = 0;
if (!test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
@@ -364,7 +425,8 @@
dev_get_drvdata(dai->dev);
struct msm_dai_q6_dai_data *dai_data =
(substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
- &mi2s_dai_data->rx_dai : &mi2s_dai_data->tx_dai);
+ &mi2s_dai_data->rx_dai.mi2s_dai_data :
+ &mi2s_dai_data->tx_dai.mi2s_dai_data);
u16 port_id = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
MI2S_RX : MI2S_TX);
int rc = 0;
@@ -406,7 +468,8 @@
dev_get_drvdata(dai->dev);
struct msm_dai_q6_dai_data *dai_data =
(substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
- &mi2s_dai_data->rx_dai : &mi2s_dai_data->tx_dai);
+ &mi2s_dai_data->rx_dai.mi2s_dai_data :
+ &mi2s_dai_data->tx_dai.mi2s_dai_data);
u16 port_id = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
MI2S_RX : MI2S_TX);
int rc = 0;
@@ -418,8 +481,10 @@
clear_bit(STATUS_PORT_STARTED, dai_data->status_mask);
}
- if (!test_bit(STATUS_PORT_STARTED, mi2s_dai_data->rx_dai.status_mask) &&
- !test_bit(STATUS_PORT_STARTED, mi2s_dai_data->rx_dai.status_mask)) {
+ if (!test_bit(STATUS_PORT_STARTED,
+ mi2s_dai_data->rx_dai.mi2s_dai_data.status_mask) &&
+ !test_bit(STATUS_PORT_STARTED,
+ mi2s_dai_data->rx_dai.mi2s_dai_data.status_mask)) {
mi2s_dai_data->rate_constraint.list = NULL;
mi2s_dai_data->bitwidth_constraint.list = NULL;
}
@@ -1268,9 +1333,9 @@
struct snd_kcontrol *kcontrol = NULL;
int rc = 0;
- if (mi2s_dai_data->rx_dai.port_config.mi2s.line) {
+ if (mi2s_dai_data->rx_dai.mi2s_dai_data.port_config.mi2s.line) {
kcontrol = snd_ctl_new1(&mi2s_config_controls[0],
- &mi2s_dai_data->rx_dai);
+ &mi2s_dai_data->rx_dai.mi2s_dai_data);
rc = snd_ctl_add(dai->card->snd_card, kcontrol);
if (IS_ERR_VALUE(rc)) {
@@ -1279,10 +1344,10 @@
}
}
- if (mi2s_dai_data->tx_dai.port_config.mi2s.line) {
+ if (mi2s_dai_data->tx_dai.mi2s_dai_data.port_config.mi2s.line) {
rc = snd_ctl_add(dai->card->snd_card,
- snd_ctl_new1(&mi2s_config_controls[2],
- &mi2s_dai_data->tx_dai));
+ snd_ctl_new1(&mi2s_config_controls[2],
+ &mi2s_dai_data->tx_dai.mi2s_dai_data));
if (IS_ERR_VALUE(rc)) {
if (kcontrol)
@@ -1302,19 +1367,21 @@
int rc;
/* If AFE port is still up, close it */
- if (test_bit(STATUS_PORT_STARTED, mi2s_dai_data->rx_dai.status_mask)) {
+ if (test_bit(STATUS_PORT_STARTED,
+ mi2s_dai_data->rx_dai.mi2s_dai_data.status_mask)) {
rc = afe_close(MI2S_RX); /* can block */
if (IS_ERR_VALUE(rc))
dev_err(dai->dev, "fail to close MI2S_RX port\n");
clear_bit(STATUS_PORT_STARTED,
- mi2s_dai_data->rx_dai.status_mask);
+ mi2s_dai_data->rx_dai.mi2s_dai_data.status_mask);
}
- if (test_bit(STATUS_PORT_STARTED, mi2s_dai_data->tx_dai.status_mask)) {
+ if (test_bit(STATUS_PORT_STARTED,
+ mi2s_dai_data->tx_dai.mi2s_dai_data.status_mask)) {
rc = afe_close(MI2S_TX); /* can block */
if (IS_ERR_VALUE(rc))
dev_err(dai->dev, "fail to close MI2S_TX port\n");
clear_bit(STATUS_PORT_STARTED,
- mi2s_dai_data->tx_dai.status_mask);
+ mi2s_dai_data->tx_dai.mi2s_dai_data.status_mask);
}
kfree(mi2s_dai_data);
snd_soc_unregister_dai(dai->dev);
@@ -1966,6 +2033,8 @@
return 0;
err_pdata:
+
+ dev_err(&pdev->dev, "fail to msm_dai_q6_mi2s_dev_probe\n");
kfree(dai_data);
rtn:
return rc;
diff --git a/sound/soc/msm/msm-multi-ch-pcm-q6.c b/sound/soc/msm/msm-multi-ch-pcm-q6.c
index 734d34f..ef58dd1 100644
--- a/sound/soc/msm/msm-multi-ch-pcm-q6.c
+++ b/sound/soc/msm/msm-multi-ch-pcm-q6.c
@@ -49,7 +49,8 @@
#define PLAYBACK_MAX_PERIOD_SIZE 4032
#define PLAYBACK_MIN_PERIOD_SIZE 256
#define CAPTURE_NUM_PERIODS 16
-#define CAPTURE_PERIOD_SIZE 320
+#define CAPTURE_MIN_PERIOD_SIZE 320
+#define CAPTURE_MAX_PERIOD_SIZE 5376
static struct snd_pcm_hardware msm_pcm_hardware_capture = {
.info = (SNDRV_PCM_INFO_MMAP |
@@ -62,10 +63,10 @@
.rate_min = 8000,
.rate_max = 48000,
.channels_min = 1,
- .channels_max = 2,
- .buffer_bytes_max = CAPTURE_NUM_PERIODS * CAPTURE_PERIOD_SIZE,
- .period_bytes_min = CAPTURE_PERIOD_SIZE,
- .period_bytes_max = CAPTURE_PERIOD_SIZE,
+ .channels_max = 8,
+ .buffer_bytes_max = CAPTURE_NUM_PERIODS * CAPTURE_MAX_PERIOD_SIZE,
+ .period_bytes_min = CAPTURE_MIN_PERIOD_SIZE,
+ .period_bytes_max = CAPTURE_MAX_PERIOD_SIZE,
.periods_min = CAPTURE_NUM_PERIODS,
.periods_max = CAPTURE_NUM_PERIODS,
.fifo_size = 0,
@@ -390,6 +391,17 @@
}
}
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+ ret = snd_pcm_hw_constraint_minmax(runtime,
+ SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
+ CAPTURE_NUM_PERIODS * CAPTURE_MIN_PERIOD_SIZE,
+ CAPTURE_NUM_PERIODS * CAPTURE_MAX_PERIOD_SIZE);
+ if (ret < 0) {
+ pr_err("constraint for buffer bytes min max ret = %d\n",
+ ret);
+ }
+ }
+
prtd->dsp_cnt = 0;
runtime->private_data = prtd;
pr_debug("substream->pcm->device = %d\n", substream->pcm->device);
@@ -695,21 +707,14 @@
else
dir = OUT;
- if (dir == OUT) {
- ret = q6asm_audio_client_buf_alloc_contiguous(dir,
- prtd->audio_client,
- runtime->hw.period_bytes_min,
- runtime->hw.periods_max);
- } else {
- /*
- *TODO : Need to Add Async IO changes. All period
- * size might not be supported.
- */
- ret = q6asm_audio_client_buf_alloc_contiguous(dir,
- prtd->audio_client,
- (params_buffer_bytes(params) / params_periods(params)),
- params_periods(params));
- }
+ /*
+ *TODO : Need to Add Async IO changes. All period
+ * size might not be supported.
+ */
+ ret = q6asm_audio_client_buf_alloc_contiguous(dir,
+ prtd->audio_client,
+ (params_buffer_bytes(params) / params_periods(params)),
+ params_periods(params));
if (ret < 0) {
pr_err("Audio Start: Buffer Allocation failed rc = %d\n", ret);
@@ -723,10 +728,7 @@
dma_buf->private_data = NULL;
dma_buf->area = buf[0].data;
dma_buf->addr = buf[0].phys;
- if (dir == OUT)
- dma_buf->bytes = runtime->hw.buffer_bytes_max;
- else
- dma_buf->bytes = params_buffer_bytes(params);
+ dma_buf->bytes = params_buffer_bytes(params);
if (!dma_buf->area)
return -ENOMEM;
diff --git a/sound/soc/msm/msm-pcm-q6.c b/sound/soc/msm/msm-pcm-q6.c
index 39ce436..168cf97 100644
--- a/sound/soc/msm/msm-pcm-q6.c
+++ b/sound/soc/msm/msm-pcm-q6.c
@@ -532,12 +532,15 @@
int dir = OUT;
pr_debug("%s\n", __func__);
- q6asm_cmd(prtd->audio_client, CMD_CLOSE);
- q6asm_audio_client_buf_free_contiguous(dir,
+ if (prtd->audio_client) {
+ q6asm_cmd(prtd->audio_client, CMD_CLOSE);
+ q6asm_audio_client_buf_free_contiguous(dir,
prtd->audio_client);
+ q6asm_audio_client_free(prtd->audio_client);
+ }
+
msm_pcm_routing_dereg_phy_stream(soc_prtd->dai_link->be_id,
- SNDRV_PCM_STREAM_CAPTURE);
- q6asm_audio_client_free(prtd->audio_client);
+ SNDRV_PCM_STREAM_CAPTURE);
kfree(prtd);
return 0;
@@ -638,7 +641,7 @@
if (ret < 0) {
pr_err("%s: q6asm_open_read failed\n", __func__);
q6asm_audio_client_free(prtd->audio_client);
- kfree(prtd);
+ prtd->audio_client = NULL;
return -ENOMEM;
}
}
diff --git a/sound/soc/msm/msm-pcm-routing.c b/sound/soc/msm/msm-pcm-routing.c
index cc51a0f6..afc14f5 100644
--- a/sound/soc/msm/msm-pcm-routing.c
+++ b/sound/soc/msm/msm-pcm-routing.c
@@ -69,7 +69,7 @@
static const DECLARE_TLV_DB_LINEAR(compressed_rx_vol_gain, 0,
INT_RX_VOL_MAX_STEPS);
-
+static int msm_route_ec_ref_rx;
/* Equal to Frontend after last of the MULTIMEDIA SESSIONS */
#define MAX_EQ_SESSIONS MSM_FRONTEND_DAI_CS_VOICE
@@ -1046,6 +1046,45 @@
return 0;
}
+static int msm_routing_ec_ref_rx_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ pr_debug("%s: ec_ref_rx = %d", __func__, msm_route_ec_ref_rx);
+ ucontrol->value.integer.value[0] = msm_route_ec_ref_rx;
+ return 0;
+}
+
+static int msm_routing_ec_ref_rx_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 0:
+ msm_route_ec_ref_rx = SLIMBUS_0_RX;
+ break;
+ case 1:
+ msm_route_ec_ref_rx = PRIMARY_I2S_RX;
+ break;
+ default:
+ msm_route_ec_ref_rx = 0;
+ break;
+ }
+ adm_ec_ref_rx_id(msm_route_ec_ref_rx);
+ pr_debug("%s: msm_route_ec_ref_rx = %d\n",
+ __func__, msm_route_ec_ref_rx);
+ return 0;
+}
+
+static const char * const ec_ref_rx[] = {"SLIM_RX", "I2S_RX", "PROXY_RX",
+ "NONE"};
+static const struct soc_enum msm_route_ec_ref_rx_enum[] = {
+ SOC_ENUM_SINGLE_EXT(4, ec_ref_rx),
+};
+
+static const struct snd_kcontrol_new ec_ref_rx_mixer_controls[] = {
+ SOC_ENUM_EXT("EC_REF_RX", msm_route_ec_ref_rx_enum[0],
+ msm_routing_ec_ref_rx_get, msm_routing_ec_ref_rx_put),
+};
+
static const struct snd_kcontrol_new pri_i2s_rx_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_PRI_I2S_RX ,
MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
@@ -1313,6 +1352,13 @@
msm_routing_put_audio_mixer),
};
+
+static const struct snd_kcontrol_new mmul4_mixer_controls[] = {
+ SOC_SINGLE_EXT("MI2S_TX", MSM_BACKEND_DAI_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+};
+
static const struct snd_kcontrol_new pri_rx_voice_mixer_controls[] = {
SOC_SINGLE_EXT("CSVoice", MSM_BACKEND_DAI_PRI_I2S_RX,
MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer,
@@ -1944,6 +1990,7 @@
SND_SOC_DAPM_AIF_IN("VOIP_DL", "VoIP Playback", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_OUT("MM_UL1", "MultiMedia1 Capture", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_OUT("MM_UL2", "MultiMedia2 Capture", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("MM_UL4", "MultiMedia4 Capture", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_IN("CS-VOICE_DL1", "CS-VOICE Playback", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_OUT("CS-VOICE_UL1", "CS-VOICE Capture", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_IN("VoLTE_DL", "VoLTE Playback", 0, 0, 0, 0),
@@ -2041,6 +2088,8 @@
mmul1_mixer_controls, ARRAY_SIZE(mmul1_mixer_controls)),
SND_SOC_DAPM_MIXER("MultiMedia2 Mixer", SND_SOC_NOPM, 0, 0,
mmul2_mixer_controls, ARRAY_SIZE(mmul2_mixer_controls)),
+ SND_SOC_DAPM_MIXER("MultiMedia4 Mixer", SND_SOC_NOPM, 0, 0,
+ mmul4_mixer_controls, ARRAY_SIZE(mmul4_mixer_controls)),
SND_SOC_DAPM_MIXER("AUX_PCM_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
auxpcm_rx_mixer_controls, ARRAY_SIZE(auxpcm_rx_mixer_controls)),
SND_SOC_DAPM_MIXER("SEC_AUX_PCM_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
@@ -2208,6 +2257,7 @@
{"MultiMedia1 Mixer", "PRI_TX", "PRI_I2S_TX"},
{"MultiMedia1 Mixer", "MI2S_TX", "MI2S_TX"},
{"MultiMedia2 Mixer", "MI2S_TX", "MI2S_TX"},
+ {"MultiMedia4 Mixer", "MI2S_TX", "MI2S_TX"},
{"MultiMedia1 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
{"MultiMedia1 Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
{"MultiMedia1 Mixer", "SEC_AUX_PCM_UL_TX", "SEC_AUX_PCM_TX"},
@@ -2237,6 +2287,7 @@
{"MM_UL1", NULL, "MultiMedia1 Mixer"},
{"MultiMedia2 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
{"MM_UL2", NULL, "MultiMedia2 Mixer"},
+ {"MM_UL4", NULL, "MultiMedia4 Mixer"},
{"AUX_PCM_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
{"AUX_PCM_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
@@ -2409,6 +2460,10 @@
{"BE_OUT", NULL, "SLIMBUS_3_RX"},
{"BE_OUT", NULL, "STUB_RX"},
{"STUB_TX", NULL, "BE_IN"},
+ {"BE_OUT", NULL, "SEC_AUX_PCM_RX"},
+ {"SEC_AUX_PCM_TX", NULL, "BE_IN"},
+ {"BE_OUT", NULL, "AUX_PCM_RX"},
+ {"AUX_PCM_TX", NULL, "BE_IN"},
};
static int msm_pcm_routing_hw_params(struct snd_pcm_substream *substream,
@@ -2599,6 +2654,9 @@
lpa_SRS_trumedia_controls_I2S,
ARRAY_SIZE(lpa_SRS_trumedia_controls_I2S));
+ snd_soc_add_platform_controls(platform,
+ ec_ref_rx_mixer_controls,
+ ARRAY_SIZE(ec_ref_rx_mixer_controls));
return 0;
}
diff --git a/sound/soc/msm/msm8974.c b/sound/soc/msm/msm8974.c
index 4678ea4..2656eb7 100644
--- a/sound/soc/msm/msm8974.c
+++ b/sound/soc/msm/msm8974.c
@@ -648,6 +648,21 @@
.be_id = MSM_FRONTEND_DAI_MULTIMEDIA1
},
{
+ .name = "MSM VoIP",
+ .stream_name = "VoIP",
+ .cpu_dai_name = "VoIP",
+ .platform_name = "msm-voip-dsp",
+ .dynamic = 1,
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ .ignore_suspend = 1,
+ /* this dainlink has playback support */
+ .ignore_pmdown_time = 1,
+ .be_id = MSM_FRONTEND_DAI_VOIP,
+ },
+ {
.name = "MSM8974 LPA",
.stream_name = "LPA",
.cpu_dai_name = "MultiMedia3",
@@ -662,6 +677,21 @@
.ignore_pmdown_time = 1,
.be_id = MSM_FRONTEND_DAI_MULTIMEDIA3,
},
+ {
+ .name = "AUXPCM Hostless",
+ .stream_name = "AUXPCM Hostless",
+ .cpu_dai_name = "AUXPCM_HOSTLESS",
+ .platform_name = "msm-pcm-hostless",
+ .dynamic = 1,
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+ .ignore_suspend = 1,
+ /* this dainlink has playback support */
+ .ignore_pmdown_time = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ },
/* AUX PCM Backend DAI Links */
{
diff --git a/sound/soc/msm/qdsp6/q6adm.c b/sound/soc/msm/qdsp6/q6adm.c
index bc57ef3..0327e4a 100644
--- a/sound/soc/msm/qdsp6/q6adm.c
+++ b/sound/soc/msm/qdsp6/q6adm.c
@@ -38,6 +38,7 @@
atomic_t copp_cnt[AFE_MAX_PORTS];
atomic_t copp_stat[AFE_MAX_PORTS];
wait_queue_head_t wait;
+ int ec_ref_rx;
};
static struct acdb_cal_block mem_addr_audproc[MAX_AUDPROC_TYPES];
@@ -45,6 +46,7 @@
static struct adm_ctl this_adm;
+
int srs_trumedia_open(int port_id, int srs_tech_id, void *srs_params)
{
struct asm_pp_params_command *open = NULL;
@@ -642,8 +644,16 @@
open.mode = path;
open.endpoint_id1 = port_id;
- open.endpoint_id2 = 0xFFFF;
+ if (this_adm.ec_ref_rx == 0) {
+ open.endpoint_id2 = 0xFFFF;
+ } else if (this_adm.ec_ref_rx && (path != 1)) {
+ open.endpoint_id2 = this_adm.ec_ref_rx;
+ this_adm.ec_ref_rx = 0;
+ }
+
+ pr_debug("%s open.endpoint_id1:%d open.endpoint_id2:%d",
+ __func__, open.endpoint_id1, open.endpoint_id2);
/* convert path to acdb path */
if (path == ADM_PATH_PLAYBACK)
open.topology_id = get_adm_rx_topology();
@@ -755,6 +765,15 @@
open.dev_channel_mapping[3] = PCM_CHANNEL_FC;
open.dev_channel_mapping[4] = PCM_CHANNEL_LB;
open.dev_channel_mapping[5] = PCM_CHANNEL_RB;
+ } else if (channel_mode == 8) {
+ open.dev_channel_mapping[0] = PCM_CHANNEL_FL;
+ open.dev_channel_mapping[1] = PCM_CHANNEL_FR;
+ open.dev_channel_mapping[2] = PCM_CHANNEL_LFE;
+ open.dev_channel_mapping[3] = PCM_CHANNEL_FC;
+ open.dev_channel_mapping[4] = PCM_CHANNEL_LB;
+ open.dev_channel_mapping[5] = PCM_CHANNEL_RB;
+ open.dev_channel_mapping[6] = PCM_CHANNEL_FLC;
+ open.dev_channel_mapping[7] = PCM_CHANNEL_FRC;
} else {
pr_err("%s invalid num_chan %d\n", __func__,
channel_mode);
@@ -772,8 +791,16 @@
open.mode = path;
open.endpoint_id1 = port_id;
- open.endpoint_id2 = 0xFFFF;
+ if (this_adm.ec_ref_rx == 0) {
+ open.endpoint_id2 = 0xFFFF;
+ } else if (this_adm.ec_ref_rx && (path != 1)) {
+ open.endpoint_id2 = this_adm.ec_ref_rx;
+ this_adm.ec_ref_rx = 0;
+ }
+
+ pr_debug("%s open.endpoint_id1:%d open.endpoint_id2:%d",
+ __func__, open.endpoint_id1, open.endpoint_id2);
/* convert path to acdb path */
if (path == ADM_PATH_PLAYBACK)
open.topology_id = get_adm_rx_topology();
@@ -1073,6 +1100,12 @@
return atomic_read(&this_adm.copp_id[port_index]);
}
+void adm_ec_ref_rx_id(int port_id)
+{
+ this_adm.ec_ref_rx = port_id;
+ pr_debug("%s ec_ref_rx:%d", __func__, this_adm.ec_ref_rx);
+}
+
int adm_close(int port_id)
{
struct apr_hdr close;
diff --git a/sound/soc/msm/qdsp6/q6asm.c b/sound/soc/msm/qdsp6/q6asm.c
index 09bfd94..9136f93 100644
--- a/sound/soc/msm/qdsp6/q6asm.c
+++ b/sound/soc/msm/qdsp6/q6asm.c
@@ -246,21 +246,20 @@
port->buf[cnt].handle);
ion_client_destroy(port->buf[cnt].client);
#else
- pr_debug("%s:data[%p]phys[%p][%p] cnt[%d]"
- "mem_buffer[%p]\n",
+ pr_debug("%s:data[%p]phys[%p][%p] cnt[%d] mem_buffer[%p]\n",
__func__, (void *)port->buf[cnt].data,
- (void *)port->buf[cnt].phys,
- (void *)&port->buf[cnt].phys, cnt,
- (void *)port->buf[cnt].mem_buffer);
+ (void *)port->buf[cnt].phys,
+ (void *)&port->buf[cnt].phys, cnt,
+ (void *)port->buf[cnt].mem_buffer);
if (IS_ERR((void *)port->buf[cnt].mem_buffer))
- pr_err("%s:mem buffer invalid, error ="
- "%ld\n", __func__,
+ pr_err("%s:mem buffer invalid, error = %ld\n",
+ __func__,
PTR_ERR((void *)port->buf[cnt].mem_buffer));
else {
if (iounmap(
port->buf[cnt].mem_buffer) < 0)
- pr_err("%s: unmap buffer"
- " failed\n", __func__);
+ pr_err("%s: unmap buffer failed\n",
+ __func__);
}
free_contiguous_memory_by_paddr(
port->buf[cnt].phys);
@@ -306,8 +305,7 @@
ion_unmap_kernel(port->buf[0].client, port->buf[0].handle);
ion_free(port->buf[0].client, port->buf[0].handle);
ion_client_destroy(port->buf[0].client);
- pr_debug("%s:data[%p]phys[%p][%p]"
- ", client[%p] handle[%p]\n",
+ pr_debug("%s:data[%p]phys[%p][%p], client[%p] handle[%p]\n",
__func__,
(void *)port->buf[0].data,
(void *)port->buf[0].phys,
@@ -315,22 +313,20 @@
(void *)port->buf[0].client,
(void *)port->buf[0].handle);
#else
- pr_debug("%s:data[%p]phys[%p][%p]"
- "mem_buffer[%p]\n",
+ pr_debug("%s:data[%p]phys[%p][%p] mem_buffer[%p]\n",
__func__,
(void *)port->buf[0].data,
(void *)port->buf[0].phys,
(void *)&port->buf[0].phys,
(void *)port->buf[0].mem_buffer);
if (IS_ERR((void *)port->buf[0].mem_buffer))
- pr_err("%s:mem buffer invalid, error ="
- "%ld\n", __func__,
+ pr_err("%s:mem buffer invalid, error = %ld\n",
+ __func__,
PTR_ERR((void *)port->buf[0].mem_buffer));
else {
if (iounmap(
port->buf[0].mem_buffer) < 0)
- pr_err("%s: unmap buffer"
- " failed\n", __func__);
+ pr_err("%s: unmap buffer failed\n", __func__);
}
free_contiguous_memory_by_paddr(port->buf[0].phys);
#endif
@@ -433,8 +429,8 @@
(apr_fn)q6asm_mmapcallback,\
0x0FFFFFFFF, &this_mmap);
if (this_mmap.apr == NULL) {
- pr_debug("%s Unable to register \
- APR ASM common port \n", __func__);
+ pr_debug("%s Unable to register APR ASM common port\n",
+ __func__);
goto fail;
}
}
@@ -523,8 +519,7 @@
(UINT_MAX, "audio_client");
if (IS_ERR_OR_NULL((void *)
buf[cnt].client)) {
- pr_err("%s: ION create client"
- " for AUDIO failed\n",
+ pr_err("%s: ION create client for AUDIO failed\n",
__func__);
mutex_unlock(&ac->cmd_lock);
goto fail;
@@ -534,8 +529,7 @@
(0x1 << ION_AUDIO_HEAP_ID));
if (IS_ERR_OR_NULL((void *)
buf[cnt].handle)) {
- pr_err("%s: ION memory"
- " allocation for AUDIO failed\n",
+ pr_err("%s: ION memory allocation for AUDIO failed\n",
__func__);
mutex_unlock(&ac->cmd_lock);
goto fail;
@@ -547,8 +541,7 @@
&buf[cnt].phys,
(size_t *)&len);
if (rc) {
- pr_err("%s: ION Get Physical"
- " for AUDIO failed, rc = %d\n",
+ pr_err("%s: ION Get Physical for AUDIO failed, rc = %d\n",
__func__, rc);
mutex_unlock(&ac->cmd_lock);
goto fail;
@@ -559,8 +552,8 @@
0);
if (IS_ERR_OR_NULL((void *)
buf[cnt].data)) {
- pr_err("%s: ION memory"
- " mapping for AUDIO failed\n", __func__);
+ pr_err("%s: ION memory mapping for AUDIO failed\n",
+ __func__);
mutex_unlock(&ac->cmd_lock);
goto fail;
}
@@ -571,8 +564,8 @@
allocate_contiguous_ebi_nomap(bufsz,
SZ_4K);
if (!buf[cnt].phys) {
- pr_err("%s:Buf alloc failed "
- " size=%d\n", __func__,
+ pr_err("%s:Buf alloc failed size=%d\n",
+ __func__,
bufsz);
mutex_unlock(&ac->cmd_lock);
goto fail;
@@ -581,17 +574,17 @@
ioremap(buf[cnt].phys, bufsz);
if (IS_ERR(
(void *)buf[cnt].mem_buffer)) {
- pr_err("%s:map_buffer failed,"
- "error = %ld\n",
- __func__, PTR_ERR((void *)buf[cnt].mem_buffer));
+ pr_err("%s:map_buffer failed, error = %ld\n",
+ __func__,
+ PTR_ERR((void *)buf[cnt].mem_buffer));
mutex_unlock(&ac->cmd_lock);
goto fail;
}
buf[cnt].data =
buf[cnt].mem_buffer;
if (!buf[cnt].data) {
- pr_err("%s:invalid vaddr,"
- " iomap failed\n", __func__);
+ pr_err("%s:invalid vaddr, iomap failed\n",
+ __func__);
mutex_unlock(&ac->cmd_lock);
goto fail;
}
@@ -697,17 +690,15 @@
buf[0].phys = allocate_contiguous_ebi_nomap(bufsz * bufcnt,
SZ_4K);
if (!buf[0].phys) {
- pr_err("%s:Buf alloc failed "
- " size=%d, bufcnt=%d\n", __func__,
- bufsz, bufcnt);
+ pr_err("%s:Buf alloc failed size=%d, bufcnt=%d\n",
+ __func__, bufsz, bufcnt);
mutex_unlock(&ac->cmd_lock);
goto fail;
}
buf[0].mem_buffer = ioremap(buf[0].phys, bufsz * bufcnt);
if (IS_ERR((void *)buf[cnt].mem_buffer)) {
- pr_err("%s:map_buffer failed,"
- "error = %ld\n",
+ pr_err("%s:map_buffer failed, error = %ld\n",
__func__, PTR_ERR((void *)buf[0].mem_buffer));
mutex_unlock(&ac->cmd_lock);
@@ -716,8 +707,7 @@
buf[0].data = buf[0].mem_buffer;
#endif
if (!buf[0].data) {
- pr_err("%s:invalid vaddr,"
- " iomap failed\n", __func__);
+ pr_err("%s:invalid vaddr, iomap failed\n", __func__);
mutex_unlock(&ac->cmd_lock);
goto fail;
}
@@ -747,6 +737,9 @@
cnt++;
}
ac->port[dir].max_buf_cnt = cnt;
+
+ pr_debug("%s ac->port[%d].max_buf_cnt[%d]\n", __func__, dir,
+ ac->port[dir].max_buf_cnt);
mutex_unlock(&ac->cmd_lock);
rc = q6asm_memory_map(ac, buf[0].phys, dir, bufsz, cnt);
if (rc < 0) {
@@ -776,9 +769,8 @@
return 0;
}
- pr_debug("%s:ptr0[0x%x]ptr1[0x%x]opcode[0x%x]"
- "token[0x%x]payload_s[%d] src[%d] dest[%d]\n", __func__,
- payload[0], payload[1], data->opcode, data->token,
+ pr_debug("%s:ptr0[0x%x]ptr1[0x%x]opcode[0x%x] token[0x%x]payload_s[%d] src[%d] dest[%d]\n",
+ __func__, payload[0], payload[1], data->opcode, data->token,
data->payload_size, data->src_port, data->dest_port);
if (data->opcode == APR_BASIC_RSP_RESULT) {
@@ -836,8 +828,8 @@
return 0;
}
- pr_debug("%s: session[%d]opcode[0x%x] \
- token[0x%x]payload_s[%d] src[%d] dest[%d]\n", __func__,
+ pr_debug("%s: session[%d]opcode[0x%x] token[0x%x]payload_s[%d] src[%d] dest[%d]\n",
+ __func__,
ac->session, data->opcode,
data->token, data->payload_size, data->src_port,
data->dest_port);
@@ -868,6 +860,7 @@
case ASM_DATA_CMD_MEDIA_FORMAT_UPDATE:
case ASM_STREAM_CMD_SET_ENCDEC_PARAM:
case ASM_STREAM_CMD_OPEN_WRITE_COMPRESSED:
+ case ASM_STREAM_CMD_OPEN_READ_COMPRESSED:
if (atomic_read(&ac->cmd_state)) {
atomic_set(&ac->cmd_state, 0);
wake_up(&ac->cmd_wait);
@@ -915,9 +908,8 @@
out_cold_index*/
if (out_cold_index != 1) {
do_gettimeofday(&out_cold_tv);
- pr_debug("COLD: apr_send_pkt at %ld \
- sec %ld microsec\n",\
- out_cold_tv.tv_sec,\
+ pr_debug("COLD: apr_send_pkt at %ld sec %ld microsec\n",
+ out_cold_tv.tv_sec,
out_cold_tv.tv_usec);
out_cold_index = 1;
}
@@ -953,8 +945,7 @@
*/
if (in_cont_index == 7) {
do_gettimeofday(&in_cont_tv);
- pr_err("In_CONT:previous read buffer done \
- at %ld sec %ld microsec\n",\
+ pr_err("In_CONT:previous read buffer done at %ld sec %ld microsec\n",
in_cont_tv.tv_sec, in_cont_tv.tv_usec);
}
}
@@ -971,9 +962,8 @@
payload[READDONE_IDX_ID],
payload[READDONE_IDX_NUMFRAMES]);
#ifdef CONFIG_DEBUG_FS
- if (in_enable_flag) {
+ if (in_enable_flag)
in_cont_index++;
- }
#endif
if (ac->io_mode == SYNC_IO_MODE) {
if (port->buf == NULL) {
@@ -1009,9 +999,8 @@
pr_err("ASM_SESSION_EVENT_TX_OVERFLOW\n");
break;
case ASM_SESSION_CMDRSP_GET_SESSION_TIME:
- pr_debug("%s: ASM_SESSION_CMDRSP_GET_SESSION_TIME, "
- "payload[0] = %d, payload[1] = %d, "
- "payload[2] = %d\n", __func__,
+ pr_debug("%s: ASM_SESSION_CMDRSP_GET_SESSION_TIME, payload[0] = %d, payload[1] = %d, payload[2] = %d\n",
+ __func__,
payload[0], payload[1], payload[2]);
ac->time_stamp = (uint64_t)(((uint64_t)payload[1] << 32) |
payload[2]);
@@ -1022,9 +1011,8 @@
break;
case ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY:
case ASM_DATA_EVENT_ENC_SR_CM_NOTIFY:
- pr_debug("%s: ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY, "
- "payload[0] = %d, payload[1] = %d, "
- "payload[2] = %d, payload[3] = %d\n", __func__,
+ pr_debug("%s: ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY, payload[0] = %d, payload[1] = %d, payload[2] = %d, payload[3] = %d\n",
+ __func__,
payload[0], payload[1], payload[2],
payload[3]);
break;
@@ -1061,8 +1049,8 @@
if (port->buf[idx].used == dir) {
/* To make it more robust, we could loop and get the
next avail buf, its risky though */
- pr_debug("%s:Next buf idx[0x%x] not available,\
- dir[%d]\n", __func__, idx, dir);
+ pr_debug("%s:Next buf idx[0x%x] not available,dir[%d]\n",
+ __func__, idx, dir);
mutex_unlock(&port->lock);
return NULL;
}
@@ -1111,8 +1099,8 @@
* To make it more robust, we could loop and get the
* next avail buf, its risky though
*/
- pr_debug("%s:Next buf idx[0x%x] not available,\
- dir[%d]\n", __func__, idx, dir);
+ pr_debug("%s:Next buf idx[0x%x] not available, dir[%d]\n",
+ __func__, idx, dir);
return NULL;
}
*size = port->buf[idx].actual_size;
@@ -1276,6 +1264,42 @@
return -EINVAL;
}
+int q6asm_open_read_compressed(struct audio_client *ac, uint32_t format)
+{
+ int rc = 0x00;
+ struct asm_stream_cmd_open_read_compressed open;
+#ifdef CONFIG_DEBUG_FS
+ in_cont_index = 0;
+#endif
+ if ((ac == NULL) || (ac->apr == NULL)) {
+ pr_err("%s: APR handle NULL\n", __func__);
+ return -EINVAL;
+ }
+ pr_debug("%s:session[%d]", __func__, ac->session);
+
+ q6asm_add_hdr(ac, &open.hdr, sizeof(open), TRUE);
+ open.hdr.opcode = ASM_STREAM_CMD_OPEN_READ_COMPRESSED;
+ /* hardcoded as following*/
+ open.frame_per_buf = 1;
+ open.uMode = 0;
+
+ rc = apr_send_pkt(ac->apr, (uint32_t *) &open);
+ if (rc < 0) {
+ pr_err("open failed op[0x%x]rc[%d]\n", open.hdr.opcode, rc);
+ goto fail_cmd;
+ }
+ rc = wait_event_timeout(ac->cmd_wait,
+ (atomic_read(&ac->cmd_state) == 0), 5*HZ);
+ if (!rc) {
+ pr_err("%s: timeout. waited for OPEN_READ_COMPRESSED rc[%d]\n",
+ __func__, rc);
+ goto fail_cmd;
+ }
+ return 0;
+fail_cmd:
+ return -EINVAL;
+}
+
int q6asm_open_write_compressed(struct audio_client *ac, uint32_t format)
{
int rc = 0x00;
@@ -1305,6 +1329,9 @@
case FORMAT_DTS:
open.format = DTS;
break;
+ case FORMAT_DTS_LBR:
+ open.format = DTS_LBR;
+ break;
case FORMAT_AAC:
open.format = MPEG4_AAC;
break;
@@ -1388,6 +1415,12 @@
case FORMAT_MP3:
open.format = MP3;
break;
+ case FORMAT_DTS:
+ open.format = DTS;
+ break;
+ case FORMAT_DTS_LBR:
+ open.format = DTS_LBR;
+ break;
default:
pr_err("%s: Invalid format[%d]\n", __func__, format);
goto fail_cmd;
@@ -1594,8 +1627,8 @@
struct asm_stream_cmd_encdec_cfg_blk enc_cfg;
int rc = 0;
- pr_debug("%s:session[%d]frames[%d]SR[%d]ch[%d]bitrate[%d]mode[%d]"
- "format[%d]", __func__, ac->session, frames_per_buf,
+ pr_debug("%s:session[%d]frames[%d]SR[%d]ch[%d]bitrate[%d]mode[%d] format[%d]",
+ __func__, ac->session, frames_per_buf,
sample_rate, channels, bit_rate, mode, format);
q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE);
@@ -1670,6 +1703,47 @@
return -EINVAL;
}
+int q6asm_enc_cfg_blk_pcm_native(struct audio_client *ac,
+ uint32_t rate, uint32_t channels)
+{
+ struct asm_stream_cmd_encdec_cfg_blk enc_cfg;
+
+ int rc = 0;
+
+ pr_debug("%s: Session %d, rate = %d, channels = %d, setting the rate and channels to 0 for native\n",
+ __func__, ac->session, rate, channels);
+
+ q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE);
+
+ enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
+ enc_cfg.param_id = ASM_ENCDEC_CFG_BLK_ID;
+ enc_cfg.param_size = sizeof(struct asm_encode_cfg_blk);
+ enc_cfg.enc_blk.frames_per_buf = 1;
+ enc_cfg.enc_blk.format_id = LINEAR_PCM;
+ enc_cfg.enc_blk.cfg_size = sizeof(struct asm_pcm_cfg);
+ enc_cfg.enc_blk.cfg.pcm.ch_cfg = 0;/*channels;*/
+ enc_cfg.enc_blk.cfg.pcm.bits_per_sample = 16;
+ enc_cfg.enc_blk.cfg.pcm.sample_rate = 0;/*rate;*/
+ enc_cfg.enc_blk.cfg.pcm.is_signed = 1;
+ enc_cfg.enc_blk.cfg.pcm.interleaved = 1;
+
+ rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg);
+ if (rc < 0) {
+ pr_err("Comamnd open failed\n");
+ rc = -EINVAL;
+ goto fail_cmd;
+ }
+ rc = wait_event_timeout(ac->cmd_wait,
+ (atomic_read(&ac->cmd_state) == 0), 5*HZ);
+ if (!rc) {
+ pr_err("timeout opcode[0x%x] ", enc_cfg.hdr.opcode);
+ goto fail_cmd;
+ }
+ return 0;
+fail_cmd:
+ return -EINVAL;
+}
+
int q6asm_enc_cfg_blk_multi_ch_pcm(struct audio_client *ac,
uint32_t rate, uint32_t channels)
{
@@ -1694,14 +1768,43 @@
enc_cfg.enc_blk.cfg.mpcm.sample_rate = rate;
enc_cfg.enc_blk.cfg.mpcm.is_signed = 1;
enc_cfg.enc_blk.cfg.mpcm.is_interleaved = 1;
- enc_cfg.enc_blk.cfg.mpcm.channel_mapping[0] = PCM_CHANNEL_FL;
- enc_cfg.enc_blk.cfg.mpcm.channel_mapping[1] = PCM_CHANNEL_FR;
- enc_cfg.enc_blk.cfg.mpcm.channel_mapping[2] = PCM_CHANNEL_RB;
- enc_cfg.enc_blk.cfg.mpcm.channel_mapping[3] = PCM_CHANNEL_LB;
- enc_cfg.enc_blk.cfg.mpcm.channel_mapping[4] = 0;
- enc_cfg.enc_blk.cfg.mpcm.channel_mapping[5] = 0;
- enc_cfg.enc_blk.cfg.mpcm.channel_mapping[6] = 0;
- enc_cfg.enc_blk.cfg.mpcm.channel_mapping[7] = 0;
+ if (channels == 2) {
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[0] = PCM_CHANNEL_FL;
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[1] = PCM_CHANNEL_FR;
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[2] = 0;
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[3] = 0;
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[4] = 0;
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[5] = 0;
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[6] = 0;
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[7] = 0;
+ } else if (channels == 4) {
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[0] = PCM_CHANNEL_FL;
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[1] = PCM_CHANNEL_FR;
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[2] = PCM_CHANNEL_RB;
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[3] = PCM_CHANNEL_LB;
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[4] = 0;
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[5] = 0;
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[6] = 0;
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[7] = 0;
+ } else if (channels == 6) {
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[0] = PCM_CHANNEL_FL;
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[1] = PCM_CHANNEL_FR;
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[2] = PCM_CHANNEL_LFE;
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[3] = PCM_CHANNEL_FC;
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[4] = PCM_CHANNEL_LB;
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[5] = PCM_CHANNEL_RB;
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[6] = 0;
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[7] = 0;
+ } else if (channels == 8) {
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[0] = PCM_CHANNEL_FL;
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[1] = PCM_CHANNEL_FR;
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[2] = PCM_CHANNEL_LFE;
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[3] = PCM_CHANNEL_FC;
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[4] = PCM_CHANNEL_LB;
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[5] = PCM_CHANNEL_RB;
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[6] = PCM_CHANNEL_FLC;
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[7] = PCM_CHANNEL_FRC;
+ }
rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg);
if (rc < 0) {
@@ -1861,8 +1964,8 @@
struct asm_stream_cmd_encdec_cfg_blk enc_cfg;
int rc = 0;
- pr_debug("%s:session[%d]frames[%d]min_rate[0x%4x]max_rate[0x%4x] \
- reduced_rate_level[0x%4x]rate_modulation_cmd[0x%4x]", __func__,
+ pr_debug("%s:session[%d]frames[%d]min_rate[0x%4x]max_rate[0x%4x] reduced_rate_level[0x%4x]rate_modulation_cmd[0x%4x]",
+ __func__,
ac->session, frames_per_buf, min_rate, max_rate,
reduced_rate_level, rate_modulation_cmd);
@@ -1904,8 +2007,8 @@
struct asm_stream_cmd_encdec_cfg_blk enc_cfg;
int rc = 0;
- pr_debug("%s:session[%d]frames[%d]min_rate[0x%4x]max_rate[0x%4x] \
- rate_modulation_cmd[0x%4x]", __func__, ac->session,
+ pr_debug("%s:session[%d]frames[%d]min_rate[0x%4x]max_rate[0x%4x] rate_modulation_cmd[0x%4x]",
+ __func__, ac->session,
frames_per_buf, min_rate, max_rate, rate_modulation_cmd);
q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE);
@@ -2238,6 +2341,12 @@
case FORMAT_MP3:
fmt.format = MP3;
break;
+ case FORMAT_DTS:
+ fmt.format = DTS;
+ break;
+ case FORMAT_DTS_LBR:
+ fmt.format = DTS_LBR;
+ break;
default:
pr_err("Invalid format[%d]\n", format);
goto fail_cmd;
@@ -2267,8 +2376,7 @@
struct asm_wma_cfg *wma_cfg = (struct asm_wma_cfg *)cfg;
int rc = 0;
- pr_debug("session[%d]format_tag[0x%4x] rate[%d] ch[0x%4x] bps[%d],\
- balign[0x%4x], bit_sample[0x%4x], ch_msk[%d], enc_opt[0x%4x]\n",
+ pr_debug("session[%d]format_tag[0x%4x] rate[%d] ch[0x%4x] bps[%d], balign[0x%4x], bit_sample[0x%4x], ch_msk[%d], enc_opt[0x%4x]\n",
ac->session, wma_cfg->format_tag, wma_cfg->sample_rate,
wma_cfg->ch_cfg, wma_cfg->avg_bytes_per_sec,
wma_cfg->block_align, wma_cfg->valid_bits_per_sample,
@@ -2319,9 +2427,7 @@
struct asm_wmapro_cfg *wmapro_cfg = (struct asm_wmapro_cfg *)cfg;
int rc = 0;
- pr_debug("session[%d]format_tag[0x%4x] rate[%d] ch[0x%4x] bps[%d],"
- "balign[0x%4x], bit_sample[0x%4x], ch_msk[%d], enc_opt[0x%4x],\
- adv_enc_opt[0x%4x], adv_enc_opt2[0x%8x]\n",
+ pr_debug("session[%d]format_tag[0x%4x] rate[%d] ch[0x%4x] bps[%d], balign[0x%4x], bit_sample[0x%4x], ch_msk[%d], enc_opt[0x%4x], adv_enc_opt[0x%4x], adv_enc_opt2[0x%8x]\n",
ac->session, wmapro_cfg->format_tag, wmapro_cfg->sample_rate,
wmapro_cfg->ch_cfg, wmapro_cfg->avg_bytes_per_sec,
wmapro_cfg->block_align, wmapro_cfg->valid_bits_per_sample,
@@ -2778,8 +2884,8 @@
params->period = pause_param->period;
params->step = pause_param->step;
params->rampingcurve = pause_param->rampingcurve;
- pr_debug("%s: soft Pause Command: enable = %d, period = %d,"
- "step = %d, curve = %d\n", __func__, params->enable,
+ pr_debug("%s: soft Pause Command: enable = %d, period = %d, step = %d, curve = %d\n",
+ __func__, params->enable,
params->period, params->step, params->rampingcurve);
rc = apr_send_pkt(ac->apr, (uint32_t *) vol_cmd);
if (rc < 0) {
@@ -2791,8 +2897,8 @@
rc = wait_event_timeout(ac->cmd_wait,
(atomic_read(&ac->cmd_state) == 0), 5*HZ);
if (!rc) {
- pr_err("%s: timeout in sending volume command(soft_pause)"
- "to apr\n", __func__);
+ pr_err("%s: timeout in sending volume command(soft_pause) to apr\n",
+ __func__);
rc = -EINVAL;
goto fail_cmd;
}
@@ -2837,13 +2943,13 @@
params->period = softvol_param->period;
params->step = softvol_param->step;
params->rampingcurve = softvol_param->rampingcurve;
- pr_debug("%s: soft Volume:opcode = %d,payload_sz =%d,module_id =%d,"
- "param_id = %d, param_sz = %d\n", __func__,
+ pr_debug("%s: soft Volume:opcode = %d,payload_sz =%d,module_id =%d, param_id = %d, param_sz = %d\n",
+ __func__,
cmd->hdr.opcode, cmd->payload_size,
cmd->params.module_id, cmd->params.param_id,
cmd->params.param_size);
- pr_debug("%s: soft Volume Command: period = %d,"
- "step = %d, curve = %d\n", __func__, params->period,
+ pr_debug("%s: soft Volume Command: period = %d, step = %d, curve = %d\n",
+ __func__, params->period,
params->step, params->rampingcurve);
rc = apr_send_pkt(ac->apr, (uint32_t *) vol_cmd);
if (rc < 0) {
@@ -2855,8 +2961,8 @@
rc = wait_event_timeout(ac->cmd_wait,
(atomic_read(&ac->cmd_state) == 0), 5*HZ);
if (!rc) {
- pr_err("%s: timeout in sending volume command(soft_volume)"
- "to apr\n", __func__);
+ pr_err("%s: timeout in sending volume command(soft_volume) to apr\n",
+ __func__);
rc = -EINVAL;
goto fail_cmd;
}
@@ -3197,8 +3303,8 @@
if ((strncmp(((char *)ab->data), zero_pattern, 2)) &&
(!strncmp(((char *)ab->data + 2), zero_pattern, 2))) {
do_gettimeofday(&out_warm_tv);
- pr_debug("WARM:apr_send_pkt at \
- %ld sec %ld microsec\n", out_warm_tv.tv_sec,\
+ pr_debug("WARM:apr_send_pkt at %ld sec %ld microsec\n",
+ out_warm_tv.tv_sec,\
out_warm_tv.tv_usec);
pr_debug("Warm Pattern Matched");
}
@@ -3207,8 +3313,8 @@
else if ((!strncmp(((char *)ab->data), zero_pattern, 2))
&& (strncmp(((char *)ab->data + 2), zero_pattern, 2))) {
do_gettimeofday(&out_cont_tv);
- pr_debug("CONT:apr_send_pkt at \
- %ld sec %ld microsec\n", out_cont_tv.tv_sec,\
+ pr_debug("CONT:apr_send_pkt at %ld sec %ld microsec\n",
+ out_cont_tv.tv_sec,\
out_cont_tv.tv_usec);
pr_debug("Cont Pattern Matched");
}
diff --git a/sound/soc/msm/qdsp6v2/Makefile b/sound/soc/msm/qdsp6v2/Makefile
index 6f765d1..ff2cc8d 100644
--- a/sound/soc/msm/qdsp6v2/Makefile
+++ b/sound/soc/msm/qdsp6v2/Makefile
@@ -1,4 +1,4 @@
snd-soc-qdsp6v2-objs += msm-dai-q6-v2.o msm-pcm-q6-v2.o msm-pcm-routing-v2.o msm-compr-q6-v2.o msm-multi-ch-pcm-q6-v2.o
-snd-soc-qdsp6v2-objs += msm-pcm-lpa-v2.o msm-pcm-afe-v2.o
+snd-soc-qdsp6v2-objs += msm-pcm-lpa-v2.o msm-pcm-afe-v2.o msm-pcm-voip-v2.o msm-pcm-voice-v2.o
obj-$(CONFIG_SND_SOC_QDSP6V2) += snd-soc-qdsp6v2.o
-obj-y += q6adm.o q6afe.o q6asm.o q6audio-v2.o
+obj-y += q6adm.o q6afe.o q6asm.o q6audio-v2.o q6voice.o
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
index 2eebae5..b7aaf01 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
@@ -28,15 +28,17 @@
#include <sound/q6asm-v2.h>
#include <sound/q6afe-v2.h>
#include <sound/tlv.h>
+
#include "msm-pcm-routing-v2.h"
-#include "../qdsp6/q6voice.h"
+#include "q6voice.h"
struct msm_pcm_routing_bdai_data {
u16 port_id; /* AFE port ID */
u8 active; /* track if this backend is enabled */
- struct snd_pcm_hw_params *hw_params; /* to get freq and channel mode */
unsigned long fe_sessions; /* Front-end sessions */
unsigned long port_sessions; /* track Tx BE ports -> Rx BE */
+ unsigned int sample_rate;
+ unsigned int channel;
};
#define INVALID_SESSION -1
@@ -46,28 +48,26 @@
static struct mutex routing_lock;
static int fm_switch_enable;
+static int fm_pcmrx_switch_enable;
-#define INT_FM_RX_VOL_MAX_STEPS 100
-#define INT_FM_RX_VOL_GAIN 2000
-
-static int msm_route_fm_vol_control;
-static const DECLARE_TLV_DB_SCALE(fm_rx_vol_gain, 0,
- INT_FM_RX_VOL_MAX_STEPS, 0);
-
-#define INT_RX_VOL_MAX_STEPS 100
+#define INT_RX_VOL_MAX_STEPS 0x2000
#define INT_RX_VOL_GAIN 0x2000
+static int msm_route_fm_vol_control;
+static const DECLARE_TLV_DB_LINEAR(fm_rx_vol_gain, 0,
+ INT_RX_VOL_MAX_STEPS);
+
static int msm_route_lpa_vol_control;
-static const DECLARE_TLV_DB_SCALE(lpa_rx_vol_gain, 0,
- INT_RX_VOL_MAX_STEPS, 0);
+static const DECLARE_TLV_DB_LINEAR(lpa_rx_vol_gain, 0,
+ INT_RX_VOL_MAX_STEPS);
static int msm_route_multimedia2_vol_control;
-static const DECLARE_TLV_DB_SCALE(multimedia2_rx_vol_gain, 0,
- INT_RX_VOL_MAX_STEPS, 0);
+static const DECLARE_TLV_DB_LINEAR(multimedia2_rx_vol_gain, 0,
+ INT_RX_VOL_MAX_STEPS);
static int msm_route_compressed_vol_control;
-static const DECLARE_TLV_DB_SCALE(compressed_rx_vol_gain, 0,
- INT_RX_VOL_MAX_STEPS, 0);
+static const DECLARE_TLV_DB_LINEAR(compressed_rx_vol_gain, 0,
+ INT_RX_VOL_MAX_STEPS);
@@ -111,28 +111,35 @@
/* This array is indexed by back-end DAI ID defined in msm-pcm-routing.h
* If new back-end is defined, add new back-end DAI ID at the end of enum
*/
+#define SLIMBUS_EXTPROC_RX AFE_PORT_INVALID
static struct msm_pcm_routing_bdai_data msm_bedais[MSM_BACKEND_DAI_MAX] = {
- { PRIMARY_I2S_RX, 0, NULL, 0, 0},
- { PRIMARY_I2S_TX, 0, NULL, 0, 0},
- { SLIMBUS_0_RX, 0, NULL, 0, 0},
- { SLIMBUS_0_TX, 0, NULL, 0, 0},
- { HDMI_RX, 0, NULL, 0, 0},
- { INT_BT_SCO_RX, 0, NULL, 0, 0},
- { INT_BT_SCO_TX, 0, NULL, 0, 0},
- { INT_FM_RX, 0, NULL, 0, 0},
- { INT_FM_TX, 0, NULL, 0, 0},
- { RT_PROXY_PORT_001_RX, 0, NULL, 0, 0},
- { RT_PROXY_PORT_001_TX, 0, NULL, 0, 0},
- { PCM_RX, 0, NULL, 0, 0},
- { PCM_TX, 0, NULL, 0, 0},
- { VOICE_PLAYBACK_TX, 0, NULL, 0, 0},
- { VOICE_RECORD_RX, 0, NULL, 0, 0},
- { VOICE_RECORD_TX, 0, NULL, 0, 0},
- { MI2S_RX, 0, NULL, 0, 0},
- { SECONDARY_I2S_RX, 0, NULL, 0, 0},
- { SLIMBUS_1_RX, 0, NULL, 0, 0},
- { SLIMBUS_1_TX, 0, NULL, 0, 0},
- { SLIMBUS_INVALID, 0, NULL, 0, 0},
+ { PRIMARY_I2S_RX, 0, 0, 0, 0, 0},
+ { PRIMARY_I2S_TX, 0, 0, 0, 0, 0},
+ { SLIMBUS_0_RX, 0, 0, 0, 0, 0},
+ { SLIMBUS_0_TX, 0, 0, 0, 0, 0},
+ { HDMI_RX, 0, 0, 0, 0, 0},
+ { INT_BT_SCO_RX, 0, 0, 0, 0, 0},
+ { INT_BT_SCO_TX, 0, 0, 0, 0, 0},
+ { INT_FM_RX, 0, 0, 0, 0, 0},
+ { INT_FM_TX, 0, 0, 0, 0, 0},
+ { RT_PROXY_PORT_001_RX, 0, 0, 0, 0, 0},
+ { RT_PROXY_PORT_001_TX, 0, 0, 0, 0, 0},
+ { PCM_RX, 0, 0, 0, 0, 0},
+ { PCM_TX, 0, 0, 0, 0, 0},
+ { VOICE_PLAYBACK_TX, 0, 0, 0, 0, 0},
+ { VOICE_RECORD_RX, 0, 0, 0, 0, 0},
+ { VOICE_RECORD_TX, 0, 0, 0, 0, 0},
+ { MI2S_RX, 0, 0, 0, 0, 0},
+ { MI2S_TX, 0, 0, 0, 0},
+ { SECONDARY_I2S_RX, 0, 0, 0, 0, 0},
+ { SLIMBUS_1_RX, 0, 0, 0, 0, 0},
+ { SLIMBUS_1_TX, 0, 0, 0, 0, 0},
+ { SLIMBUS_4_RX, 0, 0, 0, 0, 0},
+ { SLIMBUS_4_TX, 0, 0, 0, 0, 0},
+ { SLIMBUS_3_RX, 0, 0, 0, 0, 0},
+ { SLIMBUS_EXTPROC_RX, 0, 0, 0, 0, 0},
+ { SLIMBUS_EXTPROC_RX, 0, 0, 0, 0, 0},
+ { SLIMBUS_EXTPROC_RX, 0, 0, 0, 0, 0},
};
@@ -148,6 +155,16 @@
{INVALID_SESSION, INVALID_SESSION},
};
+static uint8_t is_be_dai_extproc(int be_dai)
+{
+ if (be_dai == MSM_BACKEND_DAI_EXTPROC_RX ||
+ be_dai == MSM_BACKEND_DAI_EXTPROC_TX ||
+ be_dai == MSM_BACKEND_DAI_EXTPROC_EC_TX)
+ return 1;
+ else
+ return 0;
+}
+
static void msm_pcm_routing_build_matrix(int fedai_id, int dspst_id,
int path_type)
{
@@ -159,10 +176,10 @@
MSM_AFE_PORT_TYPE_RX : MSM_AFE_PORT_TYPE_TX);
for (i = 0; i < MSM_BACKEND_DAI_MAX; i++) {
- if ((afe_get_port_type(msm_bedais[i].port_id) ==
- port_type) &&
- msm_bedais[i].active && (test_bit(fedai_id,
- &msm_bedais[i].fe_sessions)))
+ if (!is_be_dai_extproc(i) &&
+ (afe_get_port_type(msm_bedais[i].port_id) == port_type) &&
+ (msm_bedais[i].active) &&
+ (test_bit(fedai_id, &msm_bedais[i].fe_sessions)))
payload.copp_ids[payload.num_copps++] =
msm_bedais[i].port_id;
}
@@ -172,6 +189,44 @@
payload.num_copps, payload.copp_ids, 0);
}
+void msm_pcm_routing_reg_psthr_stream(int fedai_id, int dspst_id,
+ int stream_type)
+{
+ int i, session_type, path_type, port_type;
+ u32 mode = 0;
+
+ if (fedai_id > MSM_FRONTEND_DAI_MM_MAX_ID) {
+ /* bad ID assigned in machine driver */
+ pr_err("%s: bad MM ID\n", __func__);
+ return;
+ }
+
+ if (stream_type == SNDRV_PCM_STREAM_PLAYBACK) {
+ session_type = SESSION_TYPE_RX;
+ path_type = ADM_PATH_PLAYBACK;
+ port_type = MSM_AFE_PORT_TYPE_RX;
+ } else {
+ session_type = SESSION_TYPE_TX;
+ path_type = ADM_PATH_LIVE_REC;
+ port_type = MSM_AFE_PORT_TYPE_TX;
+ }
+
+ mutex_lock(&routing_lock);
+
+ fe_dai_map[fedai_id][session_type] = dspst_id;
+ for (i = 0; i < MSM_BACKEND_DAI_MAX; i++) {
+ if (!is_be_dai_extproc(i) &&
+ (afe_get_port_type(msm_bedais[i].port_id) == port_type) &&
+ (msm_bedais[i].active) &&
+ (test_bit(fedai_id, &msm_bedais[i].fe_sessions))) {
+ mode = afe_get_port_type(msm_bedais[i].port_id);
+ /*adm_connect_afe_port needs to be called*/
+ break;
+ }
+ }
+ mutex_unlock(&routing_lock);
+}
+
void msm_pcm_routing_reg_phy_stream(int fedai_id, int dspst_id, int stream_type)
{
int i, session_type, path_type, port_type;
@@ -202,25 +257,25 @@
if (eq_data[fedai_id].enable)
msm_send_eq_values(fedai_id);
for (i = 0; i < MSM_BACKEND_DAI_MAX; i++) {
- if ((afe_get_port_type(msm_bedais[i].port_id) ==
- port_type) && msm_bedais[i].active &&
- (test_bit(fedai_id,
- &msm_bedais[i].fe_sessions))) {
+ if (!is_be_dai_extproc(i) &&
+ (afe_get_port_type(msm_bedais[i].port_id) == port_type) &&
+ (msm_bedais[i].active) &&
+ (test_bit(fedai_id, &msm_bedais[i].fe_sessions))) {
- channels = params_channels(msm_bedais[i].hw_params);
+ channels = msm_bedais[i].channel;
if ((stream_type == SNDRV_PCM_STREAM_PLAYBACK) &&
(channels > 2))
adm_multi_ch_copp_open(msm_bedais[i].port_id,
path_type,
- params_rate(msm_bedais[i].hw_params),
- channels,
+ msm_bedais[i].sample_rate,
+ msm_bedais[i].channel,
DEFAULT_COPP_TOPOLOGY);
else
adm_open(msm_bedais[i].port_id,
path_type,
- params_rate(msm_bedais[i].hw_params),
- params_channels(msm_bedais[i].hw_params),
+ msm_bedais[i].sample_rate,
+ msm_bedais[i].channel,
DEFAULT_COPP_TOPOLOGY);
payload.copp_ids[payload.num_copps++] =
@@ -255,10 +310,10 @@
mutex_lock(&routing_lock);
for (i = 0; i < MSM_BACKEND_DAI_MAX; i++) {
- if ((afe_get_port_type(msm_bedais[i].port_id) ==
- port_type) && msm_bedais[i].active &&
- (test_bit(fedai_id,
- &msm_bedais[i].fe_sessions)))
+ if (!is_be_dai_extproc(i) &&
+ (afe_get_port_type(msm_bedais[i].port_id) == port_type) &&
+ (msm_bedais[i].active) &&
+ (test_bit(fedai_id, &msm_bedais[i].fe_sessions)))
adm_close(msm_bedais[i].port_id);
}
@@ -309,29 +364,35 @@
mutex_lock(&routing_lock);
if (set) {
+ if (!test_bit(val, &msm_bedais[reg].fe_sessions) &&
+ (msm_bedais[reg].port_id == VOICE_PLAYBACK_TX))
+ voc_start_playback(set);
+
set_bit(val, &msm_bedais[reg].fe_sessions);
if (msm_bedais[reg].active && fe_dai_map[val][session_type] !=
INVALID_SESSION) {
- channels = params_channels(msm_bedais[reg].hw_params);
+ channels = msm_bedais[reg].channel;
if ((session_type == SESSION_TYPE_RX) && (channels > 2))
adm_multi_ch_copp_open(msm_bedais[reg].port_id,
path_type,
- params_rate(msm_bedais[reg].hw_params),
+ msm_bedais[reg].sample_rate,
channels,
DEFAULT_COPP_TOPOLOGY);
else
adm_open(msm_bedais[reg].port_id,
path_type,
- params_rate(msm_bedais[reg].hw_params),
- params_channels(msm_bedais[reg].hw_params),
+ msm_bedais[reg].sample_rate, channels,
DEFAULT_COPP_TOPOLOGY);
msm_pcm_routing_build_matrix(val,
fe_dai_map[val][session_type], path_type);
}
} else {
+ if (test_bit(val, &msm_bedais[reg].fe_sessions) &&
+ (msm_bedais[reg].port_id == VOICE_PLAYBACK_TX))
+ voc_start_playback(set);
clear_bit(val, &msm_bedais[reg].fe_sessions);
if (msm_bedais[reg].active && fe_dai_map[val][session_type] !=
INVALID_SESSION) {
@@ -340,6 +401,10 @@
fe_dai_map[val][session_type], path_type);
}
}
+ if ((msm_bedais[reg].port_id == VOICE_RECORD_RX)
+ || (msm_bedais[reg].port_id == VOICE_RECORD_TX))
+ voc_start_record(msm_bedais[reg].port_id, set);
+
mutex_unlock(&routing_lock);
}
@@ -354,7 +419,7 @@
else
ucontrol->value.integer.value[0] = 0;
- pr_info("%s: reg %x shift %x val %ld\n", __func__, mc->reg, mc->shift,
+ pr_debug("%s: reg %x shift %x val %ld\n", __func__, mc->reg, mc->shift,
ucontrol->value.integer.value[0]);
return 0;
@@ -370,23 +435,68 @@
if (ucontrol->value.integer.value[0] &&
- msm_pcm_routing_route_is_set(mc->reg, mc->shift) == false) {
+ msm_pcm_routing_route_is_set(mc->reg, mc->shift) == false) {
msm_pcm_routing_process_audio(mc->reg, mc->shift, 1);
snd_soc_dapm_mixer_update_power(widget, kcontrol, 1);
} else if (!ucontrol->value.integer.value[0] &&
- msm_pcm_routing_route_is_set(mc->reg, mc->shift) == true) {
+ msm_pcm_routing_route_is_set(mc->reg, mc->shift) == true) {
msm_pcm_routing_process_audio(mc->reg, mc->shift, 0);
snd_soc_dapm_mixer_update_power(widget, kcontrol, 0);
}
- pr_info("%s: reg %x shift %x val %ld\n", __func__, mc->reg, mc->shift,
- ucontrol->value.integer.value[0]);
return 1;
}
static void msm_pcm_routing_process_voice(u16 reg, u16 val, int set)
{
- return;
+ u16 session_id = 0;
+
+ pr_debug("%s: reg %x val %x set %x\n", __func__, reg, val, set);
+
+ if (val == MSM_FRONTEND_DAI_CS_VOICE)
+ session_id = voc_get_session_id(VOICE_SESSION_NAME);
+ else if (val == MSM_FRONTEND_DAI_VOLTE)
+ session_id = voc_get_session_id(VOLTE_SESSION_NAME);
+ else
+ session_id = voc_get_session_id(VOIP_SESSION_NAME);
+
+ pr_debug("%s: FE DAI 0x%x session_id 0x%x\n",
+ __func__, val, session_id);
+
+ mutex_lock(&routing_lock);
+
+ if (set)
+ set_bit(val, &msm_bedais[reg].fe_sessions);
+ else
+ clear_bit(val, &msm_bedais[reg].fe_sessions);
+
+ mutex_unlock(&routing_lock);
+
+ if (afe_get_port_type(msm_bedais[reg].port_id) ==
+ MSM_AFE_PORT_TYPE_RX) {
+ voc_set_route_flag(session_id, RX_PATH, set);
+ if (set) {
+ voc_set_rxtx_port(session_id,
+ msm_bedais[reg].port_id, DEV_RX);
+
+ if (voc_get_route_flag(session_id, RX_PATH) &&
+ voc_get_route_flag(session_id, TX_PATH))
+ voc_enable_cvp(session_id);
+ } else {
+ voc_disable_cvp(session_id);
+ }
+ } else {
+ voc_set_route_flag(session_id, TX_PATH, set);
+ if (set) {
+ voc_set_rxtx_port(session_id,
+ msm_bedais[reg].port_id, DEV_TX);
+ if (voc_get_route_flag(session_id, RX_PATH) &&
+ voc_get_route_flag(session_id, TX_PATH))
+ voc_enable_cvp(session_id);
+ } else {
+ voc_disable_cvp(session_id);
+ }
+ }
}
static int msm_routing_get_voice_mixer(struct snd_kcontrol *kcontrol,
@@ -503,6 +613,31 @@
return 1;
}
+static int msm_routing_get_fm_pcmrx_switch_mixer(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ ucontrol->value.integer.value[0] = fm_pcmrx_switch_enable;
+ pr_debug("%s: FM Switch enable %ld\n", __func__,
+ ucontrol->value.integer.value[0]);
+ return 0;
+}
+
+static int msm_routing_put_fm_pcmrx_switch_mixer(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
+ struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+
+ pr_debug("%s: FM Switch enable %ld\n", __func__,
+ ucontrol->value.integer.value[0]);
+ if (ucontrol->value.integer.value[0])
+ snd_soc_dapm_mixer_update_power(widget, kcontrol, 1);
+ else
+ snd_soc_dapm_mixer_update_power(widget, kcontrol, 0);
+ fm_pcmrx_switch_enable = ucontrol->value.integer.value[0];
+ return 1;
+}
+
static int msm_routing_get_port_mixer(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
@@ -531,12 +666,12 @@
if (ucontrol->value.integer.value[0]) {
afe_loopback(1, msm_bedais[mc->reg].port_id,
- msm_bedais[mc->shift].port_id);
+ msm_bedais[mc->shift].port_id);
set_bit(mc->shift,
&msm_bedais[mc->reg].port_sessions);
} else {
afe_loopback(0, msm_bedais[mc->reg].port_id,
- msm_bedais[mc->shift].port_id);
+ msm_bedais[mc->shift].port_id);
clear_bit(mc->shift,
&msm_bedais[mc->reg].port_sessions);
}
@@ -576,7 +711,6 @@
ucontrol->value.integer.value[0];
return 0;
-
}
static int msm_routing_get_multimedia2_vol_mixer(struct snd_kcontrol *kcontrol,
@@ -590,9 +724,11 @@
static int msm_routing_set_multimedia2_vol_mixer(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
+
if (!multi_ch_pcm_set_volume(ucontrol->value.integer.value[0]))
msm_route_multimedia2_vol_control =
ucontrol->value.integer.value[0];
+
return 0;
}
@@ -610,6 +746,7 @@
if (!compressed_set_volume(ucontrol->value.integer.value[0]))
msm_route_compressed_vol_control =
ucontrol->value.integer.value[0];
+
return 0;
}
@@ -621,7 +758,7 @@
if (ac == NULL) {
pr_err("%s: Could not get audio client for session: %d\n",
- __func__, fe_dai_map[eq_idx][SESSION_TYPE_RX]);
+ __func__, fe_dai_map[eq_idx][SESSION_TYPE_RX]);
goto done;
}
@@ -629,7 +766,7 @@
if (result < 0)
pr_err("%s: Call to ASM equalizer failed, returned = %d\n",
- __func__, result);
+ __func__, result);
done:
return;
}
@@ -827,6 +964,15 @@
msm_routing_put_audio_mixer),
};
+static const struct snd_kcontrol_new slimbus_4_rx_mixer_controls[] = {
+ SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_SLIMBUS_4_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_SLIMBUS_4_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+};
+
static const struct snd_kcontrol_new int_bt_sco_rx_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_INT_BT_SCO_RX,
MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
@@ -891,6 +1037,9 @@
SOC_SINGLE_EXT("PRI_TX", MSM_BACKEND_DAI_PRI_I2S_TX,
MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MI2S_TX", MSM_BACKEND_DAI_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX,
MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
@@ -912,12 +1061,18 @@
SOC_SINGLE_EXT("VOC_REC_UL", MSM_BACKEND_DAI_INCALL_RECORD_TX,
MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("SLIM_4_TX", MSM_BACKEND_DAI_SLIMBUS_4_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new mmul2_mixer_controls[] = {
SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_INT_FM_TX,
MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MI2S_TX", MSM_BACKEND_DAI_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new pri_rx_voice_mixer_controls[] = {
@@ -927,6 +1082,9 @@
SOC_SINGLE_EXT("Voip", MSM_BACKEND_DAI_PRI_I2S_RX,
MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
msm_routing_put_voice_mixer),
+ SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_PRI_I2S_RX,
+ MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
+ msm_routing_put_voice_mixer),
};
static const struct snd_kcontrol_new sec_i2s_rx_voice_mixer_controls[] = {
@@ -936,6 +1094,9 @@
SOC_SINGLE_EXT("Voip", MSM_BACKEND_DAI_SEC_I2S_RX,
MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
msm_routing_put_voice_mixer),
+ SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_SEC_I2S_RX,
+ MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
+ msm_routing_put_voice_mixer),
};
static const struct snd_kcontrol_new slimbus_rx_voice_mixer_controls[] = {
@@ -945,6 +1106,9 @@
SOC_SINGLE_EXT("Voip", MSM_BACKEND_DAI_SLIMBUS_0_RX ,
MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
msm_routing_put_voice_mixer),
+ SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_SLIMBUS_0_RX ,
+ MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
+ msm_routing_put_voice_mixer),
};
static const struct snd_kcontrol_new bt_sco_rx_voice_mixer_controls[] = {
@@ -957,6 +1121,21 @@
SOC_SINGLE_EXT("Voice Stub", MSM_BACKEND_DAI_INT_BT_SCO_RX,
MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
msm_routing_put_voice_stub_mixer),
+ SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_INT_BT_SCO_RX ,
+ MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
+ msm_routing_put_voice_mixer),
+};
+
+static const struct snd_kcontrol_new mi2s_rx_voice_mixer_controls[] = {
+ SOC_SINGLE_EXT("CSVoice", MSM_BACKEND_DAI_MI2S_RX,
+ MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer,
+ msm_routing_put_voice_mixer),
+ SOC_SINGLE_EXT("Voip", MSM_BACKEND_DAI_MI2S_RX,
+ MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
+ msm_routing_put_voice_mixer),
+ SOC_SINGLE_EXT("Voice Stub", MSM_BACKEND_DAI_MI2S_RX,
+ MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+ msm_routing_put_voice_stub_mixer),
};
static const struct snd_kcontrol_new afe_pcm_rx_voice_mixer_controls[] = {
@@ -969,6 +1148,9 @@
SOC_SINGLE_EXT("Voice Stub", MSM_BACKEND_DAI_AFE_PCM_RX,
MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
msm_routing_put_voice_stub_mixer),
+ SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_AFE_PCM_RX,
+ MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
+ msm_routing_put_voice_mixer),
};
static const struct snd_kcontrol_new aux_pcm_rx_voice_mixer_controls[] = {
@@ -981,6 +1163,9 @@
SOC_SINGLE_EXT("Voice Stub", MSM_BACKEND_DAI_AUXPCM_RX,
MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
msm_routing_put_voice_stub_mixer),
+ SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_AUXPCM_RX,
+ MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
+ msm_routing_put_voice_mixer),
};
static const struct snd_kcontrol_new hdmi_rx_voice_mixer_controls[] = {
@@ -990,10 +1175,16 @@
SOC_SINGLE_EXT("Voip", MSM_BACKEND_DAI_HDMI_RX,
MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
msm_routing_put_voice_mixer),
+ SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_HDMI_RX,
+ MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
+ msm_routing_put_voice_mixer),
+ SOC_SINGLE_EXT("Voice Stub", MSM_BACKEND_DAI_HDMI_RX,
+ MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+ msm_routing_put_voice_stub_mixer),
};
static const struct snd_kcontrol_new stub_rx_mixer_controls[] = {
- SOC_SINGLE_EXT("Voice Stub", MSM_BACKEND_DAI_INVALID,
+ SOC_SINGLE_EXT("Voice Stub", MSM_BACKEND_DAI_EXTPROC_RX,
MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
msm_routing_put_voice_stub_mixer),
};
@@ -1004,10 +1195,19 @@
msm_routing_put_voice_stub_mixer),
};
+static const struct snd_kcontrol_new slimbus_3_rx_mixer_controls[] = {
+ SOC_SINGLE_EXT("Voice Stub", MSM_BACKEND_DAI_SLIMBUS_3_RX,
+ MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+ msm_routing_put_voice_stub_mixer),
+};
+
static const struct snd_kcontrol_new tx_voice_mixer_controls[] = {
SOC_SINGLE_EXT("PRI_TX_Voice", MSM_BACKEND_DAI_PRI_I2S_TX,
MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer,
msm_routing_put_voice_mixer),
+ SOC_SINGLE_EXT("MI2S_TX_Voice", MSM_BACKEND_DAI_MI2S_TX,
+ MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer,
+ msm_routing_put_voice_mixer),
SOC_SINGLE_EXT("SLIM_0_TX_Voice", MSM_BACKEND_DAI_SLIMBUS_0_TX,
MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer,
msm_routing_put_voice_mixer),
@@ -1022,10 +1222,31 @@
msm_routing_put_voice_mixer),
};
+static const struct snd_kcontrol_new tx_volte_mixer_controls[] = {
+ SOC_SINGLE_EXT("PRI_TX_VoLTE", MSM_BACKEND_DAI_PRI_I2S_TX,
+ MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
+ msm_routing_put_voice_mixer),
+ SOC_SINGLE_EXT("SLIM_0_TX_VoLTE", MSM_BACKEND_DAI_SLIMBUS_0_TX,
+ MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
+ msm_routing_put_voice_mixer),
+ SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX_VoLTE",
+ MSM_BACKEND_DAI_INT_BT_SCO_TX, MSM_FRONTEND_DAI_VOLTE, 1, 0,
+ msm_routing_get_voice_mixer, msm_routing_put_voice_mixer),
+ SOC_SINGLE_EXT("AFE_PCM_TX_VoLTE", MSM_BACKEND_DAI_AFE_PCM_TX,
+ MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
+ msm_routing_put_voice_mixer),
+ SOC_SINGLE_EXT("AUX_PCM_TX_VoLTE", MSM_BACKEND_DAI_AUXPCM_TX,
+ MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
+ msm_routing_put_voice_mixer),
+};
+
static const struct snd_kcontrol_new tx_voip_mixer_controls[] = {
SOC_SINGLE_EXT("PRI_TX_Voip", MSM_BACKEND_DAI_PRI_I2S_TX,
MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
msm_routing_put_voice_mixer),
+ SOC_SINGLE_EXT("MI2S_TX_Voip", MSM_BACKEND_DAI_MI2S_TX,
+ MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
+ msm_routing_put_voice_mixer),
SOC_SINGLE_EXT("SLIM_0_TX_Voip", MSM_BACKEND_DAI_SLIMBUS_0_TX,
MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
msm_routing_put_voice_mixer),
@@ -1041,7 +1262,7 @@
};
static const struct snd_kcontrol_new tx_voice_stub_mixer_controls[] = {
- SOC_SINGLE_EXT("STUB_TX_HL", MSM_BACKEND_DAI_INVALID,
+ SOC_SINGLE_EXT("STUB_TX_HL", MSM_BACKEND_DAI_EXTPROC_TX,
MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
msm_routing_put_voice_stub_mixer),
SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_INT_BT_SCO_TX,
@@ -1050,6 +1271,12 @@
SOC_SINGLE_EXT("SLIM_1_TX", MSM_BACKEND_DAI_SLIMBUS_1_TX,
MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
msm_routing_put_voice_stub_mixer),
+ SOC_SINGLE_EXT("STUB_1_TX_HL", MSM_BACKEND_DAI_EXTPROC_EC_TX,
+ MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+ msm_routing_put_voice_stub_mixer),
+ SOC_SINGLE_EXT("MI2S_TX", MSM_BACKEND_DAI_MI2S_TX,
+ MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+ msm_routing_put_voice_stub_mixer),
};
static const struct snd_kcontrol_new sbus_0_rx_port_mixer_controls[] = {
@@ -1062,6 +1289,9 @@
SOC_SINGLE_EXT("AUX_PCM_UL_TX", MSM_BACKEND_DAI_SLIMBUS_0_RX,
MSM_BACKEND_DAI_AUXPCM_TX, 1, 0, msm_routing_get_port_mixer,
msm_routing_put_port_mixer),
+ SOC_SINGLE_EXT("MI2S_TX", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+ MSM_BACKEND_DAI_MI2S_TX, 1, 0, msm_routing_get_port_mixer,
+ msm_routing_put_port_mixer),
};
static const struct snd_kcontrol_new auxpcm_rx_port_mixer_controls[] = {
@@ -1079,20 +1309,58 @@
msm_routing_put_port_mixer),
};
+static const struct snd_kcontrol_new sbus_3_rx_port_mixer_controls[] = {
+ SOC_SINGLE_EXT("INTERNAL_BT_SCO_RX", MSM_BACKEND_DAI_SLIMBUS_3_RX,
+ MSM_BACKEND_DAI_INT_BT_SCO_RX, 1, 0, msm_routing_get_port_mixer,
+ msm_routing_put_port_mixer),
+ SOC_SINGLE_EXT("MI2S_TX", MSM_BACKEND_DAI_SLIMBUS_3_RX,
+ MSM_BACKEND_DAI_MI2S_TX, 1, 0, msm_routing_get_port_mixer,
+ msm_routing_put_port_mixer),
+};
static const struct snd_kcontrol_new bt_sco_rx_port_mixer_controls[] = {
SOC_SINGLE_EXT("SLIM_1_TX", MSM_BACKEND_DAI_INT_BT_SCO_RX,
MSM_BACKEND_DAI_SLIMBUS_1_TX, 1, 0, msm_routing_get_port_mixer,
msm_routing_put_port_mixer),
};
+static const struct snd_kcontrol_new afe_pcm_rx_port_mixer_controls[] = {
+ SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_AFE_PCM_RX,
+ MSM_BACKEND_DAI_INT_FM_TX, 1, 0, msm_routing_get_port_mixer,
+ msm_routing_put_port_mixer),
+};
+
+
+static const struct snd_kcontrol_new hdmi_rx_port_mixer_controls[] = {
+ SOC_SINGLE_EXT("MI2S_TX", MSM_BACKEND_DAI_HDMI_RX,
+ MSM_BACKEND_DAI_MI2S_TX, 1, 0, msm_routing_get_port_mixer,
+ msm_routing_put_port_mixer),
+};
+
+static const struct snd_kcontrol_new sec_i2s_rx_port_mixer_controls[] = {
+ SOC_SINGLE_EXT("MI2S_TX", MSM_BACKEND_DAI_SEC_I2S_RX,
+ MSM_BACKEND_DAI_MI2S_TX, 1, 0, msm_routing_get_port_mixer,
+ msm_routing_put_port_mixer),
+};
+
+static const struct snd_kcontrol_new mi2s_rx_port_mixer_controls[] = {
+ SOC_SINGLE_EXT("SLIM_1_TX", MSM_BACKEND_DAI_MI2S_RX,
+ MSM_BACKEND_DAI_SLIMBUS_1_TX, 1, 0, msm_routing_get_port_mixer,
+ msm_routing_put_port_mixer),
+};
+
static const struct snd_kcontrol_new fm_switch_mixer_controls =
SOC_SINGLE_EXT("Switch", SND_SOC_NOPM,
0, 1, 0, msm_routing_get_switch_mixer,
msm_routing_put_switch_mixer);
+static const struct snd_kcontrol_new pcm_rx_switch_mixer_controls =
+ SOC_SINGLE_EXT("Switch", SND_SOC_NOPM,
+ 0, 1, 0, msm_routing_get_fm_pcmrx_switch_mixer,
+ msm_routing_put_fm_pcmrx_switch_mixer);
+
static const struct snd_kcontrol_new int_fm_vol_mixer_controls[] = {
SOC_SINGLE_EXT_TLV("Internal FM RX Volume", SND_SOC_NOPM, 0,
- INT_FM_RX_VOL_GAIN, 0, msm_routing_get_fm_vol_mixer,
+ INT_RX_VOL_GAIN, 0, msm_routing_get_fm_vol_mixer,
msm_routing_set_fm_vol_mixer, fm_rx_vol_gain),
};
@@ -1113,7 +1381,6 @@
INT_RX_VOL_GAIN, 0, msm_routing_get_compressed_vol_mixer,
msm_routing_set_compressed_vol_mixer, compressed_rx_vol_gain),
};
-
static const struct snd_kcontrol_new eq_enable_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia1 EQ Enable", SND_SOC_NOPM,
MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_eq_enable_mixer,
@@ -1302,6 +1569,8 @@
SND_SOC_DAPM_AIF_OUT("MM_UL2", "MultiMedia2 Capture", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_IN("CS-VOICE_DL1", "CS-VOICE Playback", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_OUT("CS-VOICE_UL1", "CS-VOICE Capture", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("VoLTE_DL", "VoLTE Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("VoLTE_UL", "VoLTE Capture", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_OUT("VOIP_UL", "VoIP Capture", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_IN("SLIM0_DL_HL", "SLIMBUS0_HOSTLESS Playback",
0, 0, 0, 0),
@@ -1312,14 +1581,18 @@
SND_SOC_DAPM_AIF_OUT("INTFM_UL_HL", "INT_FM_HOSTLESS Capture",
0, 0, 0, 0),
SND_SOC_DAPM_AIF_IN("HDMI_DL_HL", "HDMI_HOSTLESS Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("SEC_I2S_DL_HL", "SEC_I2S_RX_HOSTLESS Playback",
+ 0, 0, 0, 0),
SND_SOC_DAPM_AIF_IN("AUXPCM_DL_HL", "AUXPCM_HOSTLESS Playback",
0, 0, 0, 0),
SND_SOC_DAPM_AIF_OUT("AUXPCM_UL_HL", "AUXPCM_HOSTLESS Capture",
0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("MI2S_UL_HL", "MI2S_TX_HOSTLESS Capture",
+ 0, 0, 0, 0),
/* Backend AIF */
/* Stream name equals to backend dai link stream name
- */
+ */
SND_SOC_DAPM_AIF_OUT("PRI_I2S_RX", "Primary I2S Playback", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_OUT("SEC_I2S_RX", "Secondary I2S Playback",
0, 0, 0 , 0),
@@ -1327,6 +1600,7 @@
SND_SOC_DAPM_AIF_OUT("HDMI", "HDMI Playback", 0, 0, 0 , 0),
SND_SOC_DAPM_AIF_OUT("MI2S_RX", "MI2S Playback", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_IN("PRI_I2S_TX", "Primary I2S Capture", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("MI2S_TX", "MI2S Capture", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_IN("SLIMBUS_0_TX", "Slimbus Capture", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_OUT("INT_BT_SCO_RX", "Internal BT-SCO Playback",
0, 0, 0 , 0),
@@ -1343,10 +1617,15 @@
/* incall */
SND_SOC_DAPM_AIF_OUT("VOICE_PLAYBACK_TX", "Voice Farend Playback",
0, 0, 0 , 0),
+ SND_SOC_DAPM_AIF_OUT("SLIMBUS_4_RX", "Slimbus4 Playback",
+ 0, 0, 0 , 0),
SND_SOC_DAPM_AIF_IN("INCALL_RECORD_TX", "Voice Uplink Capture",
0, 0, 0, 0),
SND_SOC_DAPM_AIF_IN("INCALL_RECORD_RX", "Voice Downlink Capture",
0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("SLIMBUS_4_TX", "Slimbus4 Capture",
+ 0, 0, 0, 0),
+
SND_SOC_DAPM_AIF_OUT("AUX_PCM_RX", "AUX PCM Playback", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_IN("AUX_PCM_TX", "AUX PCM Capture", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_IN("VOICE_STUB_DL", "VOICE_STUB Playback", 0, 0, 0, 0),
@@ -1355,10 +1634,14 @@
SND_SOC_DAPM_AIF_IN("STUB_TX", "Stub Capture", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_OUT("SLIMBUS_1_RX", "Slimbus1 Playback", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_IN("SLIMBUS_1_TX", "Slimbus1 Capture", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("STUB_1_TX", "Stub1 Capture", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("SLIMBUS_3_RX", "Slimbus3 Playback", 0, 0, 0, 0),
/* Switch Definitions */
SND_SOC_DAPM_SWITCH("SLIMBUS_DL_HL", SND_SOC_NOPM, 0, 0,
&fm_switch_mixer_controls),
+ SND_SOC_DAPM_SWITCH("PCM_RX_DL_HL", SND_SOC_NOPM, 0, 0,
+ &pcm_rx_switch_mixer_controls),
/* Mixer definitions */
SND_SOC_DAPM_MIXER("PRI_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
pri_i2s_rx_mixer_controls, ARRAY_SIZE(pri_i2s_rx_mixer_controls)),
@@ -1380,6 +1663,9 @@
SND_SOC_DAPM_MIXER("Incall_Music Audio Mixer", SND_SOC_NOPM, 0, 0,
incall_music_delivery_mixer_controls,
ARRAY_SIZE(incall_music_delivery_mixer_controls)),
+ SND_SOC_DAPM_MIXER("SLIMBUS_4_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+ slimbus_4_rx_mixer_controls,
+ ARRAY_SIZE(slimbus_4_rx_mixer_controls)),
/* Voice Mixer */
SND_SOC_DAPM_MIXER("PRI_RX_Voice Mixer",
SND_SOC_NOPM, 0, 0, pri_rx_voice_mixer_controls,
@@ -1408,12 +1694,19 @@
SND_SOC_NOPM, 0, 0,
hdmi_rx_voice_mixer_controls,
ARRAY_SIZE(hdmi_rx_voice_mixer_controls)),
+ SND_SOC_DAPM_MIXER("MI2S_RX_Voice Mixer",
+ SND_SOC_NOPM, 0, 0,
+ mi2s_rx_voice_mixer_controls,
+ ARRAY_SIZE(mi2s_rx_voice_mixer_controls)),
SND_SOC_DAPM_MIXER("Voice_Tx Mixer",
SND_SOC_NOPM, 0, 0, tx_voice_mixer_controls,
ARRAY_SIZE(tx_voice_mixer_controls)),
SND_SOC_DAPM_MIXER("Voip_Tx Mixer",
SND_SOC_NOPM, 0, 0, tx_voip_mixer_controls,
ARRAY_SIZE(tx_voip_mixer_controls)),
+ SND_SOC_DAPM_MIXER("VoLTE_Tx Mixer",
+ SND_SOC_NOPM, 0, 0, tx_volte_mixer_controls,
+ ARRAY_SIZE(tx_volte_mixer_controls)),
SND_SOC_DAPM_MIXER("INTERNAL_BT_SCO_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
int_bt_sco_rx_mixer_controls, ARRAY_SIZE(int_bt_sco_rx_mixer_controls)),
SND_SOC_DAPM_MIXER("INTERNAL_FM_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
@@ -1426,6 +1719,8 @@
stub_rx_mixer_controls, ARRAY_SIZE(stub_rx_mixer_controls)),
SND_SOC_DAPM_MIXER("SLIMBUS_1_RX Mixer", SND_SOC_NOPM, 0, 0,
slimbus_1_rx_mixer_controls, ARRAY_SIZE(slimbus_1_rx_mixer_controls)),
+ SND_SOC_DAPM_MIXER("SLIMBUS_3_RX_Voice Mixer", SND_SOC_NOPM, 0, 0,
+ slimbus_3_rx_mixer_controls, ARRAY_SIZE(slimbus_3_rx_mixer_controls)),
SND_SOC_DAPM_MIXER("SLIMBUS_0_RX Port Mixer",
SND_SOC_NOPM, 0, 0, sbus_0_rx_port_mixer_controls,
ARRAY_SIZE(sbus_0_rx_port_mixer_controls)),
@@ -1438,6 +1733,25 @@
SND_SOC_DAPM_MIXER("INTERNAL_BT_SCO_RX Port Mixer", SND_SOC_NOPM, 0, 0,
bt_sco_rx_port_mixer_controls,
ARRAY_SIZE(bt_sco_rx_port_mixer_controls)),
+ SND_SOC_DAPM_MIXER("AFE_PCM_RX Port Mixer",
+ SND_SOC_NOPM, 0, 0, afe_pcm_rx_port_mixer_controls,
+ ARRAY_SIZE(afe_pcm_rx_port_mixer_controls)),
+ SND_SOC_DAPM_MIXER("HDMI_RX Port Mixer",
+ SND_SOC_NOPM, 0, 0, hdmi_rx_port_mixer_controls,
+ ARRAY_SIZE(hdmi_rx_port_mixer_controls)),
+ SND_SOC_DAPM_MIXER("SEC_I2S_RX Port Mixer",
+ SND_SOC_NOPM, 0, 0, sec_i2s_rx_port_mixer_controls,
+ ARRAY_SIZE(sec_i2s_rx_port_mixer_controls)),
+ SND_SOC_DAPM_MIXER("SLIMBUS_3_RX Port Mixer",
+ SND_SOC_NOPM, 0, 0, sbus_3_rx_port_mixer_controls,
+ ARRAY_SIZE(sbus_3_rx_port_mixer_controls)),
+ SND_SOC_DAPM_MIXER("MI2S_RX Port Mixer", SND_SOC_NOPM, 0, 0,
+ mi2s_rx_port_mixer_controls, ARRAY_SIZE(mi2s_rx_port_mixer_controls)),
+
+ /* Virtual Pins to force backends ON atm */
+ SND_SOC_DAPM_OUTPUT("BE_OUT"),
+ SND_SOC_DAPM_INPUT("BE_IN"),
+
};
static const struct snd_soc_dapm_route intercon[] = {
@@ -1469,9 +1783,13 @@
{"Incall_Music Audio Mixer", "MultiMedia1", "MM_DL1"},
{"Incall_Music Audio Mixer", "MultiMedia2", "MM_DL2"},
{"VOICE_PLAYBACK_TX", NULL, "Incall_Music Audio Mixer"},
+ {"SLIMBUS_4_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
+ {"SLIMBUS_4_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
+ {"SLIMBUS_4_RX", NULL, "SLIMBUS_4_RX Audio Mixer"},
{"MultiMedia1 Mixer", "VOC_REC_UL", "INCALL_RECORD_TX"},
{"MultiMedia1 Mixer", "VOC_REC_DL", "INCALL_RECORD_RX"},
+ {"MultiMedia1 Mixer", "SLIM_4_TX", "SLIMBUS_4_TX"},
{"MI2S_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
{"MI2S_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
{"MI2S_RX Audio Mixer", "MultiMedia3", "MM_DL3"},
@@ -1479,6 +1797,8 @@
{"MI2S_RX", NULL, "MI2S_RX Audio Mixer"},
{"MultiMedia1 Mixer", "PRI_TX", "PRI_I2S_TX"},
+ {"MultiMedia1 Mixer", "MI2S_TX", "MI2S_TX"},
+ {"MultiMedia2 Mixer", "MI2S_TX", "MI2S_TX"},
{"MultiMedia1 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
{"MultiMedia1 Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
@@ -1515,41 +1835,56 @@
{"AUX_PCM_RX", NULL, "AUX_PCM_RX Audio Mixer"},
{"PRI_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
+ {"PRI_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
{"PRI_RX_Voice Mixer", "Voip", "VOIP_DL"},
{"PRI_I2S_RX", NULL, "PRI_RX_Voice Mixer"},
{"SEC_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
+ {"SEC_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
{"SEC_RX_Voice Mixer", "Voip", "VOIP_DL"},
{"SEC_I2S_RX", NULL, "SEC_RX_Voice Mixer"},
{"SLIM_0_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
+ {"SLIM_0_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
{"SLIM_0_RX_Voice Mixer", "Voip", "VOIP_DL"},
{"SLIMBUS_0_RX", NULL, "SLIM_0_RX_Voice Mixer"},
{"INTERNAL_BT_SCO_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
+ {"INTERNAL_BT_SCO_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
{"INTERNAL_BT_SCO_RX_Voice Mixer", "Voip", "VOIP_DL"},
{"INT_BT_SCO_RX", NULL, "INTERNAL_BT_SCO_RX_Voice Mixer"},
{"AFE_PCM_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
+ {"AFE_PCM_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
{"AFE_PCM_RX_Voice Mixer", "Voip", "VOIP_DL"},
{"PCM_RX", NULL, "AFE_PCM_RX_Voice Mixer"},
{"AUX_PCM_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
+ {"AUX_PCM_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
{"AUX_PCM_RX_Voice Mixer", "Voip", "VOIP_DL"},
{"AUX_PCM_RX", NULL, "AUX_PCM_RX_Voice Mixer"},
{"HDMI_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
+ {"HDMI_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
{"HDMI_RX_Voice Mixer", "Voip", "VOIP_DL"},
{"HDMI", NULL, "HDMI_RX_Voice Mixer"},
{"HDMI", NULL, "HDMI_DL_HL"},
{"Voice_Tx Mixer", "PRI_TX_Voice", "PRI_I2S_TX"},
+ {"Voice_Tx Mixer", "MI2S_TX_Voice", "MI2S_TX"},
{"Voice_Tx Mixer", "SLIM_0_TX_Voice", "SLIMBUS_0_TX"},
{"Voice_Tx Mixer", "INTERNAL_BT_SCO_TX_Voice", "INT_BT_SCO_TX"},
{"Voice_Tx Mixer", "AFE_PCM_TX_Voice", "PCM_TX"},
{"Voice_Tx Mixer", "AUX_PCM_TX_Voice", "AUX_PCM_TX"},
{"CS-VOICE_UL1", NULL, "Voice_Tx Mixer"},
+ {"VoLTE_Tx Mixer", "PRI_TX_VoLTE", "PRI_I2S_TX"},
+ {"VoLTE_Tx Mixer", "SLIM_0_TX_VoLTE", "SLIMBUS_0_TX"},
+ {"VoLTE_Tx Mixer", "INTERNAL_BT_SCO_TX_VoLTE", "INT_BT_SCO_TX"},
+ {"VoLTE_Tx Mixer", "AFE_PCM_TX_VoLTE", "PCM_TX"},
+ {"VoLTE_Tx Mixer", "AUX_PCM_TX_VoLTE", "AUX_PCM_TX"},
+ {"VoLTE_UL", NULL, "VoLTE_Tx Mixer"},
{"Voip_Tx Mixer", "PRI_TX_Voip", "PRI_I2S_TX"},
+ {"Voip_Tx Mixer", "MI2S_TX_Voip", "MI2S_TX"},
{"Voip_Tx Mixer", "SLIM_0_TX_Voip", "SLIMBUS_0_TX"},
{"Voip_Tx Mixer", "INTERNAL_BT_SCO_TX_Voip", "INT_BT_SCO_TX"},
{"Voip_Tx Mixer", "AFE_PCM_TX_Voip", "PCM_TX"},
@@ -1563,10 +1898,17 @@
{"INTFM_UL_HL", NULL, "INT_FM_TX"},
{"AUX_PCM_RX", NULL, "AUXPCM_DL_HL"},
{"AUXPCM_UL_HL", NULL, "AUX_PCM_TX"},
+ {"PCM_RX_DL_HL", "Switch", "SLIM0_DL_HL"},
+ {"PCM_RX", NULL, "PCM_RX_DL_HL"},
+ {"MI2S_UL_HL", NULL, "MI2S_TX"},
+ {"SEC_I2S_RX", NULL, "SEC_I2S_DL_HL"},
{"SLIMBUS_0_RX Port Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
{"SLIMBUS_0_RX Port Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
{"SLIMBUS_0_RX Port Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
+ {"SLIMBUS_0_RX Port Mixer", "MI2S_TX", "MI2S_TX"},
{"SLIMBUS_0_RX", NULL, "SLIMBUS_0_RX Port Mixer"},
+ {"AFE_PCM_RX Port Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
+ {"PCM_RX", NULL, "AFE_PCM_RX Port Mixer"},
{"AUXPCM_RX Port Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
{"AUXPCM_RX Port Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
@@ -1575,6 +1917,8 @@
{"Voice Stub Tx Mixer", "STUB_TX_HL", "STUB_TX"},
{"Voice Stub Tx Mixer", "SLIM_1_TX", "SLIMBUS_1_TX"},
{"Voice Stub Tx Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
+ {"Voice Stub Tx Mixer", "STUB_1_TX_HL", "STUB_1_TX"},
+ {"Voice Stub Tx Mixer", "MI2S_TX", "MI2S_TX"},
{"VOICE_STUB_UL", NULL, "Voice Stub Tx Mixer"},
{"STUB_RX Mixer", "Voice Stub", "VOICE_STUB_DL"},
@@ -1582,11 +1926,48 @@
{"SLIMBUS_1_RX Mixer", "Voice Stub", "VOICE_STUB_DL"},
{"SLIMBUS_1_RX", NULL, "SLIMBUS_1_RX Mixer"},
{"INTERNAL_BT_SCO_RX_Voice Mixer", "Voice Stub", "VOICE_STUB_DL"},
+ {"MI2S_RX_Voice Mixer", "Voice Stub", "VOICE_STUB_DL"},
+ {"MI2S_RX", NULL, "MI2S_RX_Voice Mixer"},
+
+ {"SLIMBUS_3_RX_Voice Mixer", "Voice Stub", "VOICE_STUB_DL"},
+ {"SLIMBUS_3_RX", NULL, "SLIMBUS_3_RX_Voice Mixer"},
{"SLIMBUS_1_RX Port Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
{"SLIMBUS_1_RX", NULL, "SLIMBUS_1_RX Port Mixer"},
{"INTERNAL_BT_SCO_RX Port Mixer", "SLIM_1_TX", "SLIMBUS_1_TX"},
{"INT_BT_SCO_RX", NULL, "INTERNAL_BT_SCO_RX Port Mixer"},
+ {"SLIMBUS_3_RX Port Mixer", "INTERNAL_BT_SCO_RX", "INT_BT_SCO_RX"},
+ {"SLIMBUS_3_RX Port Mixer", "MI2S_TX", "MI2S_TX"},
+ {"SLIMBUS_3_RX", NULL, "SLIMBUS_3_RX Port Mixer"},
+
+
+ {"HDMI_RX Port Mixer", "MI2S_TX", "MI2S_TX"},
+ {"HDMI", NULL, "HDMI_RX Port Mixer"},
+
+ {"SEC_I2S_RX Port Mixer", "MI2S_TX", "MI2S_TX"},
+ {"SEC_I2S_RX", NULL, "SEC_I2S_RX Port Mixer"},
+
+ {"MI2S_RX Port Mixer", "SLIM_1_TX", "SLIMBUS_1_TX"},
+ {"MI2S_RX", NULL, "MI2S_RX Port Mixer"},
+ /* Backend Enablement */
+
+ {"BE_OUT", NULL, "PRI_I2S_RX"},
+ {"BE_OUT", NULL, "SEC_I2S_RX"},
+ {"BE_OUT", NULL, "SLIMBUS_0_RX"},
+ {"BE_OUT", NULL, "HDMI"},
+ {"BE_OUT", NULL, "MI2S_RX"},
+ {"PRI_I2S_TX", NULL, "BE_IN"},
+ {"MI2S_TX", NULL, "BE_IN"},
+ {"SLIMBUS_0_TX", NULL, "BE_IN" },
+ {"BE_OUT", NULL, "INT_BT_SCO_RX"},
+ {"INT_BT_SCO_TX", NULL, "BE_IN"},
+ {"BE_OUT", NULL, "INT_FM_RX"},
+ {"INT_FM_TX", NULL, "BE_IN"},
+ {"BE_OUT", NULL, "PCM_RX"},
+ {"PCM_TX", NULL, "BE_IN"},
+ {"BE_OUT", NULL, "SLIMBUS_3_RX"},
+ {"BE_OUT", NULL, "AUX_PCM_RX"},
+ {"AUX_PCM_TX", NULL, "BE_IN"},
};
static int msm_pcm_routing_hw_params(struct snd_pcm_substream *substream,
@@ -1601,7 +1982,8 @@
}
mutex_lock(&routing_lock);
- msm_bedais[be_id].hw_params = params;
+ msm_bedais[be_id].sample_rate = params_rate(params);
+ msm_bedais[be_id].channel = params_channels(params);
mutex_unlock(&routing_lock);
return 0;
}
@@ -1619,7 +2001,6 @@
}
bedai = &msm_bedais[be_id];
-
session_type = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
0 : 1);
@@ -1631,8 +2012,8 @@
}
bedai->active = 0;
- bedai->hw_params = NULL;
-
+ bedai->sample_rate = 0;
+ bedai->channel = 0;
mutex_unlock(&routing_lock);
return 0;
@@ -1651,15 +2032,8 @@
return -EINVAL;
}
-
bedai = &msm_bedais[be_id];
- if (bedai->hw_params == NULL) {
- pr_err("%s: HW param is not configured", __func__);
- return -EINVAL;
- }
-
-
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
path_type = ADM_PATH_PLAYBACK;
session_type = SESSION_TYPE_RX;
@@ -1679,23 +2053,23 @@
* is started.
*/
bedai->active = 1;
-
for_each_set_bit(i, &bedai->fe_sessions, MSM_FRONTEND_DAI_MM_SIZE) {
if (fe_dai_map[i][session_type] != INVALID_SESSION) {
- channels = params_channels(bedai->hw_params);
- if ((substream->stream == SNDRV_PCM_STREAM_PLAYBACK) &&
- (channels > 2))
+ channels = bedai->channel;
+ if ((substream->stream == SNDRV_PCM_STREAM_PLAYBACK ||
+ substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+ && (channels > 2))
adm_multi_ch_copp_open(bedai->port_id,
path_type,
- params_rate(bedai->hw_params),
+ bedai->sample_rate,
channels,
DEFAULT_COPP_TOPOLOGY);
else
adm_open(bedai->port_id,
path_type,
- params_rate(bedai->hw_params),
- params_channels(bedai->hw_params),
+ bedai->sample_rate,
+ channels,
DEFAULT_COPP_TOPOLOGY);
msm_pcm_routing_build_matrix(i,
@@ -1716,7 +2090,7 @@
};
static unsigned int msm_routing_read(struct snd_soc_platform *platform,
- unsigned int reg)
+ unsigned int reg)
{
dev_dbg(platform->dev, "reg %x\n", reg);
return 0;
@@ -1734,7 +2108,7 @@
static int msm_routing_probe(struct snd_soc_platform *platform)
{
snd_soc_dapm_new_controls(&platform->dapm, msm_qdsp6_widgets,
- ARRAY_SIZE(msm_qdsp6_widgets));
+ ARRAY_SIZE(msm_qdsp6_widgets));
snd_soc_dapm_add_routes(&platform->dapm, intercon,
ARRAY_SIZE(intercon));
@@ -1767,7 +2141,6 @@
snd_soc_add_platform_controls(platform,
compressed_vol_mixer_controls,
ARRAY_SIZE(compressed_vol_mixer_controls));
-
return 0;
}
@@ -1782,7 +2155,7 @@
{
dev_dbg(&pdev->dev, "dev name %s\n", dev_name(&pdev->dev));
return snd_soc_register_platform(&pdev->dev,
- &msm_soc_routing_platform);
+ &msm_soc_routing_platform);
}
static int msm_routing_pcm_remove(struct platform_device *pdev)
@@ -1809,10 +2182,8 @@
return 0;
}
for (i = 0; i < MSM_BACKEND_DAI_MAX; i++) {
- if ((test_bit(fedai_id,
- &msm_bedais[i].fe_sessions))) {
+ if (test_bit(fedai_id, &msm_bedais[i].fe_sessions))
return msm_bedais[i].active;
- }
}
return 0;
}
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h
index b971787..32e18d8 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h
@@ -13,29 +13,34 @@
#define _MSM_PCM_ROUTING_H
#include <sound/apr_audio-v2.h>
-#define LPASS_BE_PRI_I2S_RX "(Backend) PRIMARY_I2S_RX"
-#define LPASS_BE_PRI_I2S_TX "(Backend) PRIMARY_I2S_TX"
-#define LPASS_BE_SLIMBUS_0_RX "(Backend) SLIMBUS_0_RX"
-#define LPASS_BE_SLIMBUS_0_TX "(Backend) SLIMBUS_0_TX"
-#define LPASS_BE_HDMI "(Backend) HDMI"
-#define LPASS_BE_INT_BT_SCO_RX "(Backend) INT_BT_SCO_RX"
-#define LPASS_BE_INT_BT_SCO_TX "(Backend) INT_BT_SCO_TX"
-#define LPASS_BE_INT_FM_RX "(Backend) INT_FM_RX"
-#define LPASS_BE_INT_FM_TX "(Backend) INT_FM_TX"
-#define LPASS_BE_AFE_PCM_RX "(Backend) RT_PROXY_DAI_001_RX"
-#define LPASS_BE_AFE_PCM_TX "(Backend) RT_PROXY_DAI_002_TX"
-#define LPASS_BE_AUXPCM_RX "(Backend) AUX_PCM_RX"
-#define LPASS_BE_AUXPCM_TX "(Backend) AUX_PCM_TX"
-#define LPASS_BE_VOICE_PLAYBACK_TX "(Backend) VOICE_PLAYBACK_TX"
-#define LPASS_BE_INCALL_RECORD_RX "(Backend) INCALL_RECORD_TX"
-#define LPASS_BE_INCALL_RECORD_TX "(Backend) INCALL_RECORD_RX"
-#define LPASS_BE_SEC_I2S_RX "(Backend) SECONDARY_I2S_RX"
+#define LPASS_BE_PRI_I2S_RX "PRIMARY_I2S_RX"
+#define LPASS_BE_PRI_I2S_TX "PRIMARY_I2S_TX"
+#define LPASS_BE_SLIMBUS_0_RX "SLIMBUS_0_RX"
+#define LPASS_BE_SLIMBUS_0_TX "SLIMBUS_0_TX"
+#define LPASS_BE_HDMI "HDMI"
+#define LPASS_BE_INT_BT_SCO_RX "INT_BT_SCO_RX"
+#define LPASS_BE_INT_BT_SCO_TX "INT_BT_SCO_TX"
+#define LPASS_BE_INT_FM_RX "INT_FM_RX"
+#define LPASS_BE_INT_FM_TX "INT_FM_TX"
+#define LPASS_BE_AFE_PCM_RX "RT_PROXY_DAI_001_RX"
+#define LPASS_BE_AFE_PCM_TX "RT_PROXY_DAI_002_TX"
+#define LPASS_BE_AUXPCM_RX "AUX_PCM_RX"
+#define LPASS_BE_AUXPCM_TX "AUX_PCM_TX"
+#define LPASS_BE_VOICE_PLAYBACK_TX "VOICE_PLAYBACK_TX"
+#define LPASS_BE_INCALL_RECORD_RX "INCALL_RECORD_TX"
+#define LPASS_BE_INCALL_RECORD_TX "INCALL_RECORD_RX"
+#define LPASS_BE_SEC_I2S_RX "SECONDARY_I2S_RX"
-#define LPASS_BE_MI2S_RX "(Backend) MI2S_RX"
-#define LPASS_BE_STUB_RX "(Backend) STUB_RX"
-#define LPASS_BE_STUB_TX "(Backend) STUB_TX"
-#define LPASS_BE_SLIMBUS_1_RX "(Backend) SLIMBUS_1_RX"
-#define LPASS_BE_SLIMBUS_1_TX "(Backend) SLIMBUS_1_TX"
+#define LPASS_BE_MI2S_RX "MI2S_RX"
+#define LPASS_BE_MI2S_TX "MI2S_TX"
+#define LPASS_BE_STUB_RX "STUB_RX"
+#define LPASS_BE_STUB_TX "STUB_TX"
+#define LPASS_BE_SLIMBUS_1_RX "SLIMBUS_1_RX"
+#define LPASS_BE_SLIMBUS_1_TX "SLIMBUS_1_TX"
+#define LPASS_BE_STUB_1_TX "STUB_1_TX"
+#define LPASS_BE_SLIMBUS_3_RX "SLIMBUS_3_RX"
+#define LPASS_BE_SLIMBUS_4_RX "SLIMBUS_4_RX"
+#define LPASS_BE_SLIMBUS_4_TX "SLIMBUS_4_TX"
/* For multimedia front-ends, asm session is allocated dynamically.
* Hence, asm session/multimedia front-end mapping has to be maintained.
@@ -53,6 +58,7 @@
MSM_FRONTEND_DAI_AFE_RX,
MSM_FRONTEND_DAI_AFE_TX,
MSM_FRONTEND_DAI_VOICE_STUB,
+ MSM_FRONTEND_DAI_VOLTE,
MSM_FRONTEND_DAI_MAX,
};
@@ -77,10 +83,16 @@
MSM_BACKEND_DAI_INCALL_RECORD_RX,
MSM_BACKEND_DAI_INCALL_RECORD_TX,
MSM_BACKEND_DAI_MI2S_RX,
+ MSM_BACKEND_DAI_MI2S_TX,
MSM_BACKEND_DAI_SEC_I2S_RX,
MSM_BACKEND_DAI_SLIMBUS_1_RX,
MSM_BACKEND_DAI_SLIMBUS_1_TX,
- MSM_BACKEND_DAI_INVALID,
+ MSM_BACKEND_DAI_SLIMBUS_4_RX,
+ MSM_BACKEND_DAI_SLIMBUS_4_TX,
+ MSM_BACKEND_DAI_SLIMBUS_3_RX,
+ MSM_BACKEND_DAI_EXTPROC_RX,
+ MSM_BACKEND_DAI_EXTPROC_TX,
+ MSM_BACKEND_DAI_EXTPROC_EC_TX,
MSM_BACKEND_DAI_MAX,
};
@@ -90,6 +102,9 @@
*/
void msm_pcm_routing_reg_phy_stream(int fedai_id, int dspst_id,
int stream_type);
+void msm_pcm_routing_reg_psthr_stream(int fedai_id, int dspst_id,
+ int stream_type);
+
void msm_pcm_routing_dereg_phy_stream(int fedai_id, int stream_type);
int lpa_set_volume(unsigned volume);
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.c
new file mode 100644
index 0000000..206e881
--- /dev/null
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.c
@@ -0,0 +1,506 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/time.h>
+#include <linux/wait.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <sound/core.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/pcm.h>
+#include <sound/initval.h>
+#include <sound/control.h>
+#include <asm/dma.h>
+
+#include "msm-pcm-voice-v2.h"
+#include "q6voice.h"
+
+static struct msm_voice voice_info[VOICE_SESSION_INDEX_MAX];
+
+static struct snd_pcm_hardware msm_pcm_hardware = {
+
+ .info = SNDRV_PCM_INFO_INTERLEAVED,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000,
+ .rate_min = 8000,
+ .rate_max = 16000,
+ .channels_min = 1,
+ .channels_max = 1,
+
+ .buffer_bytes_max = 4096 * 2,
+ .period_bytes_min = 4096,
+ .period_bytes_max = 4096,
+ .periods_min = 2,
+ .periods_max = 2,
+
+ .fifo_size = 0,
+};
+static int is_volte(struct msm_voice *pvolte)
+{
+ if (pvolte == &voice_info[VOLTE_SESSION_INDEX])
+ return true;
+ else
+ return false;
+}
+
+static int msm_pcm_playback_prepare(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct msm_voice *prtd = runtime->private_data;
+
+ pr_debug("%s\n", __func__);
+
+ if (!prtd->playback_start)
+ prtd->playback_start = 1;
+
+ return 0;
+}
+
+static int msm_pcm_capture_prepare(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct msm_voice *prtd = runtime->private_data;
+
+ pr_debug("%s\n", __func__);
+
+ if (!prtd->capture_start)
+ prtd->capture_start = 1;
+
+ return 0;
+}
+static int msm_pcm_open(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct msm_voice *voice;
+
+ if (!strncmp("VoLTE", substream->pcm->id, 5)) {
+ voice = &voice_info[VOLTE_SESSION_INDEX];
+ pr_debug("%s: Open VoLTE Substream Id=%s\n",
+ __func__, substream->pcm->id);
+ } else {
+ voice = &voice_info[VOICE_SESSION_INDEX];
+ pr_debug("%s: Open VOICE Substream Id=%s\n",
+ __func__, substream->pcm->id);
+ }
+ mutex_lock(&voice->lock);
+
+ runtime->hw = msm_pcm_hardware;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ voice->playback_substream = substream;
+ else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+ voice->capture_substream = substream;
+
+ voice->instance++;
+ pr_debug("%s: Instance = %d, Stream ID = %s\n",
+ __func__ , voice->instance, substream->pcm->id);
+ runtime->private_data = voice;
+
+ mutex_unlock(&voice->lock);
+
+ return 0;
+}
+static int msm_pcm_playback_close(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct msm_voice *prtd = runtime->private_data;
+
+ pr_debug("%s\n", __func__);
+
+ if (prtd->playback_start)
+ prtd->playback_start = 0;
+
+ prtd->playback_substream = NULL;
+
+ return 0;
+}
+static int msm_pcm_capture_close(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct msm_voice *prtd = runtime->private_data;
+
+ pr_debug("%s\n", __func__);
+
+ if (prtd->capture_start)
+ prtd->capture_start = 0;
+ prtd->capture_substream = NULL;
+
+ return 0;
+}
+static int msm_pcm_close(struct snd_pcm_substream *substream)
+{
+
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct msm_voice *prtd = runtime->private_data;
+ uint16_t session_id = 0;
+ int ret = 0;
+
+ mutex_lock(&prtd->lock);
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ ret = msm_pcm_playback_close(substream);
+ else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+ ret = msm_pcm_capture_close(substream);
+
+ prtd->instance--;
+ if (!prtd->playback_start && !prtd->capture_start) {
+ pr_debug("end voice call\n");
+ if (is_volte(prtd))
+ session_id = voc_get_session_id(VOLTE_SESSION_NAME);
+ else
+ session_id = voc_get_session_id(VOICE_SESSION_NAME);
+ voc_end_voice_call(session_id);
+ }
+ mutex_unlock(&prtd->lock);
+
+ return ret;
+}
+static int msm_pcm_prepare(struct snd_pcm_substream *substream)
+{
+ int ret = 0;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct msm_voice *prtd = runtime->private_data;
+ uint16_t session_id = 0;
+
+ mutex_lock(&prtd->lock);
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ ret = msm_pcm_playback_prepare(substream);
+ else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+ ret = msm_pcm_capture_prepare(substream);
+
+ if (prtd->playback_start && prtd->capture_start) {
+ if (is_volte(prtd))
+ session_id = voc_get_session_id(VOLTE_SESSION_NAME);
+ else
+ session_id = voc_get_session_id(VOICE_SESSION_NAME);
+ voc_start_voice_call(session_id);
+ }
+ mutex_unlock(&prtd->lock);
+
+ return ret;
+}
+
+static int msm_pcm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+
+ pr_debug("%s: Voice\n", __func__);
+
+ snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
+
+ return 0;
+}
+
+static int msm_voice_volume_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ ucontrol->value.integer.value[0] = 0;
+ return 0;
+}
+
+static int msm_voice_volume_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int volume = ucontrol->value.integer.value[0];
+ pr_debug("%s: volume: %d\n", __func__, volume);
+ voc_set_rx_vol_index(voc_get_session_id(VOICE_SESSION_NAME),
+ RX_PATH, volume);
+ return 0;
+}
+
+static int msm_volte_volume_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ ucontrol->value.integer.value[0] = 0;
+ return 0;
+}
+
+static int msm_volte_volume_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int volume = ucontrol->value.integer.value[0];
+ pr_debug("%s: volume: %d\n", __func__, volume);
+ voc_set_rx_vol_index(voc_get_session_id(VOLTE_SESSION_NAME),
+ RX_PATH, volume);
+ return 0;
+}
+
+static int msm_voice_mute_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ ucontrol->value.integer.value[0] = 0;
+ return 0;
+}
+
+static int msm_voice_mute_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int mute = ucontrol->value.integer.value[0];
+
+ pr_debug("%s: mute=%d\n", __func__, mute);
+
+ voc_set_tx_mute(voc_get_session_id(VOICE_SESSION_NAME), TX_PATH, mute);
+
+ return 0;
+}
+
+static int msm_volte_mute_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ ucontrol->value.integer.value[0] = 0;
+ return 0;
+}
+
+static int msm_volte_mute_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int mute = ucontrol->value.integer.value[0];
+
+ pr_debug("%s: mute=%d\n", __func__, mute);
+
+ voc_set_tx_mute(voc_get_session_id(VOLTE_SESSION_NAME), TX_PATH, mute);
+
+ return 0;
+}
+
+static int msm_voice_rx_device_mute_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ ucontrol->value.integer.value[0] =
+ voc_get_rx_device_mute(voc_get_session_id(VOICE_SESSION_NAME));
+ return 0;
+}
+
+static int msm_voice_rx_device_mute_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int mute = ucontrol->value.integer.value[0];
+
+ pr_debug("%s: mute=%d\n", __func__, mute);
+
+ voc_set_rx_device_mute(voc_get_session_id(VOICE_SESSION_NAME), mute);
+
+ return 0;
+}
+
+static int msm_volte_rx_device_mute_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ ucontrol->value.integer.value[0] =
+ voc_get_rx_device_mute(voc_get_session_id(VOLTE_SESSION_NAME));
+ return 0;
+}
+
+static int msm_volte_rx_device_mute_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int mute = ucontrol->value.integer.value[0];
+
+ pr_debug("%s: mute=%d\n", __func__, mute);
+
+ voc_set_rx_device_mute(voc_get_session_id(VOLTE_SESSION_NAME), mute);
+
+ return 0;
+}
+
+static const char const *tty_mode[] = {"OFF", "HCO", "VCO", "FULL"};
+static const struct soc_enum msm_tty_mode_enum[] = {
+ SOC_ENUM_SINGLE_EXT(4, tty_mode),
+};
+
+static int msm_voice_tty_mode_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ ucontrol->value.integer.value[0] =
+ voc_get_tty_mode(voc_get_session_id(VOICE_SESSION_NAME));
+ return 0;
+}
+
+static int msm_voice_tty_mode_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int tty_mode = ucontrol->value.integer.value[0];
+
+ pr_debug("%s: tty_mode=%d\n", __func__, tty_mode);
+
+ voc_set_tty_mode(voc_get_session_id(VOICE_SESSION_NAME), tty_mode);
+
+ return 0;
+}
+static int msm_voice_widevoice_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int wv_enable = ucontrol->value.integer.value[0];
+
+ pr_debug("%s: wv enable=%d\n", __func__, wv_enable);
+
+ voc_set_widevoice_enable(voc_get_session_id(VOICE_SESSION_NAME),
+ wv_enable);
+
+ return 0;
+}
+
+static int msm_voice_widevoice_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ ucontrol->value.integer.value[0] =
+ voc_get_widevoice_enable(voc_get_session_id(VOICE_SESSION_NAME));
+ return 0;
+}
+
+
+static int msm_voice_slowtalk_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int st_enable = ucontrol->value.integer.value[0];
+
+ pr_debug("%s: st enable=%d\n", __func__, st_enable);
+
+ voc_set_pp_enable(voc_get_session_id(VOICE_SESSION_NAME),
+ MODULE_ID_VOICE_MODULE_ST, st_enable);
+
+ return 0;
+}
+
+static int msm_voice_slowtalk_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ ucontrol->value.integer.value[0] =
+ voc_get_pp_enable(voc_get_session_id(VOICE_SESSION_NAME),
+ MODULE_ID_VOICE_MODULE_ST);
+ return 0;
+}
+
+static int msm_voice_fens_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int fens_enable = ucontrol->value.integer.value[0];
+
+ pr_debug("%s: fens enable=%d\n", __func__, fens_enable);
+
+ voc_set_pp_enable(voc_get_session_id(VOICE_SESSION_NAME),
+ MODULE_ID_VOICE_MODULE_FENS, fens_enable);
+
+ return 0;
+}
+
+static int msm_voice_fens_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ ucontrol->value.integer.value[0] =
+ voc_get_pp_enable(voc_get_session_id(VOICE_SESSION_NAME),
+ MODULE_ID_VOICE_MODULE_FENS);
+ return 0;
+}
+
+static struct snd_kcontrol_new msm_voice_controls[] = {
+ SOC_SINGLE_EXT("Voice Rx Device Mute", SND_SOC_NOPM, 0, 1, 0,
+ msm_voice_rx_device_mute_get,
+ msm_voice_rx_device_mute_put),
+ SOC_SINGLE_EXT("Voice Tx Mute", SND_SOC_NOPM, 0, 1, 0,
+ msm_voice_mute_get, msm_voice_mute_put),
+ SOC_SINGLE_EXT("Voice Rx Volume", SND_SOC_NOPM, 0, 5, 0,
+ msm_voice_volume_get, msm_voice_volume_put),
+ SOC_ENUM_EXT("TTY Mode", msm_tty_mode_enum[0], msm_voice_tty_mode_get,
+ msm_voice_tty_mode_put),
+ SOC_SINGLE_EXT("Widevoice Enable", SND_SOC_NOPM, 0, 1, 0,
+ msm_voice_widevoice_get, msm_voice_widevoice_put),
+ SOC_SINGLE_EXT("Slowtalk Enable", SND_SOC_NOPM, 0, 1, 0,
+ msm_voice_slowtalk_get, msm_voice_slowtalk_put),
+ SOC_SINGLE_EXT("FENS Enable", SND_SOC_NOPM, 0, 1, 0,
+ msm_voice_fens_get, msm_voice_fens_put),
+ SOC_SINGLE_EXT("VoLTE Rx Device Mute", SND_SOC_NOPM, 0, 1, 0,
+ msm_volte_rx_device_mute_get,
+ msm_volte_rx_device_mute_put),
+ SOC_SINGLE_EXT("VoLTE Tx Mute", SND_SOC_NOPM, 0, 1, 0,
+ msm_volte_mute_get, msm_volte_mute_put),
+ SOC_SINGLE_EXT("VoLTE Rx Volume", SND_SOC_NOPM, 0, 5, 0,
+ msm_volte_volume_get, msm_volte_volume_put),
+};
+
+static struct snd_pcm_ops msm_pcm_ops = {
+ .open = msm_pcm_open,
+ .hw_params = msm_pcm_hw_params,
+ .close = msm_pcm_close,
+ .prepare = msm_pcm_prepare,
+};
+
+
+static int msm_asoc_pcm_new(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_card *card = rtd->card->snd_card;
+ int ret = 0;
+
+ if (!card->dev->coherent_dma_mask)
+ card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+ return ret;
+}
+
+static int msm_pcm_voice_probe(struct snd_soc_platform *platform)
+{
+ snd_soc_add_platform_controls(platform, msm_voice_controls,
+ ARRAY_SIZE(msm_voice_controls));
+
+ return 0;
+}
+
+static struct snd_soc_platform_driver msm_soc_platform = {
+ .ops = &msm_pcm_ops,
+ .pcm_new = msm_asoc_pcm_new,
+ .probe = msm_pcm_voice_probe,
+};
+
+static __devinit int msm_pcm_probe(struct platform_device *pdev)
+{
+ pr_debug("%s: dev name %s\n", __func__, dev_name(&pdev->dev));
+ return snd_soc_register_platform(&pdev->dev,
+ &msm_soc_platform);
+}
+
+static int msm_pcm_remove(struct platform_device *pdev)
+{
+ snd_soc_unregister_platform(&pdev->dev);
+ return 0;
+}
+
+static struct platform_driver msm_pcm_driver = {
+ .driver = {
+ .name = "msm-pcm-voice",
+ .owner = THIS_MODULE,
+ },
+ .probe = msm_pcm_probe,
+ .remove = __devexit_p(msm_pcm_remove),
+};
+
+static int __init msm_soc_platform_init(void)
+{
+ memset(&voice_info, 0, sizeof(voice_info));
+ mutex_init(&voice_info[VOICE_SESSION_INDEX].lock);
+ mutex_init(&voice_info[VOLTE_SESSION_INDEX].lock);
+
+ return platform_driver_register(&msm_pcm_driver);
+}
+module_init(msm_soc_platform_init);
+
+static void __exit msm_soc_platform_exit(void)
+{
+ platform_driver_unregister(&msm_pcm_driver);
+}
+module_exit(msm_soc_platform_exit);
+
+MODULE_DESCRIPTION("Voice PCM module platform driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.h b/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.h
new file mode 100644
index 0000000..64c0848
--- /dev/null
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.h
@@ -0,0 +1,37 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _MSM_PCM_VOICE_H
+#define _MSM_PCM_VOICE_H
+#include <sound/apr_audio.h>
+
+enum {
+ VOICE_SESSION_INDEX,
+ VOLTE_SESSION_INDEX,
+ VOICE_SESSION_INDEX_MAX,
+};
+
+struct msm_voice {
+ struct snd_pcm_substream *playback_substream;
+ struct snd_pcm_substream *capture_substream;
+
+ int instance;
+
+ struct mutex lock;
+
+ uint32_t samp_rate;
+ uint32_t channel_mode;
+
+ int playback_start;
+ int capture_start;
+};
+
+#endif /*_MSM_PCM_VOICE_H*/
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-voip-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-voip-v2.c
new file mode 100644
index 0000000..630405a
--- /dev/null
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-voip-v2.c
@@ -0,0 +1,1180 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/time.h>
+#include <linux/wait.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/of_device.h>
+#include <sound/core.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/pcm.h>
+#include <sound/initval.h>
+#include <sound/control.h>
+#include <asm/dma.h>
+
+#include "msm-pcm-q6-v2.h"
+#include "msm-pcm-routing-v2.h"
+#include "q6voice.h"
+
+#define VOIP_MAX_Q_LEN 10
+#define VOIP_MAX_VOC_PKT_SIZE 640
+#define VOIP_MIN_VOC_PKT_SIZE 320
+
+/* Length of the DSP frame info header added to the voc packet. */
+#define DSP_FRAME_HDR_LEN 1
+
+#define MODE_IS127 0x2
+#define MODE_4GV_NB 0x3
+#define MODE_4GV_WB 0x4
+#define MODE_AMR 0x5
+#define MODE_AMR_WB 0xD
+#define MODE_PCM 0xC
+
+enum format {
+ FORMAT_S16_LE = 2,
+ FORMAT_SPECIAL = 31,
+};
+
+
+enum amr_rate_type {
+ AMR_RATE_4750, /* AMR 4.75 kbps */
+ AMR_RATE_5150, /* AMR 5.15 kbps */
+ AMR_RATE_5900, /* AMR 5.90 kbps */
+ AMR_RATE_6700, /* AMR 6.70 kbps */
+ AMR_RATE_7400, /* AMR 7.40 kbps */
+ AMR_RATE_7950, /* AMR 7.95 kbps */
+ AMR_RATE_10200, /* AMR 10.20 kbps */
+ AMR_RATE_12200, /* AMR 12.20 kbps */
+ AMR_RATE_6600, /* AMR-WB 6.60 kbps */
+ AMR_RATE_8850, /* AMR-WB 8.85 kbps */
+ AMR_RATE_12650, /* AMR-WB 12.65 kbps */
+ AMR_RATE_14250, /* AMR-WB 14.25 kbps */
+ AMR_RATE_15850, /* AMR-WB 15.85 kbps */
+ AMR_RATE_18250, /* AMR-WB 18.25 kbps */
+ AMR_RATE_19850, /* AMR-WB 19.85 kbps */
+ AMR_RATE_23050, /* AMR-WB 23.05 kbps */
+ AMR_RATE_23850, /* AMR-WB 23.85 kbps */
+ AMR_RATE_UNDEF
+};
+
+enum voip_state {
+ VOIP_STOPPED,
+ VOIP_STARTED,
+};
+
+struct voip_frame {
+ union {
+ uint32_t frame_type;
+ uint32_t packet_rate;
+ } header;
+ uint32_t len;
+ uint8_t voc_pkt[VOIP_MAX_VOC_PKT_SIZE];
+};
+
+struct voip_buf_node {
+ struct list_head list;
+ struct voip_frame frame;
+};
+
+struct voip_drv_info {
+ enum voip_state state;
+
+ struct snd_pcm_substream *playback_substream;
+ struct snd_pcm_substream *capture_substream;
+
+ struct list_head in_queue;
+ struct list_head free_in_queue;
+
+ struct list_head out_queue;
+ struct list_head free_out_queue;
+
+ wait_queue_head_t out_wait;
+ wait_queue_head_t in_wait;
+
+ struct mutex lock;
+ struct mutex in_lock;
+ struct mutex out_lock;
+
+ spinlock_t dsp_lock;
+
+ uint32_t mode;
+ uint32_t rate_type;
+ uint32_t rate;
+ uint32_t dtx_mode;
+
+ uint8_t capture_start;
+ uint8_t playback_start;
+
+ uint8_t playback_instance;
+ uint8_t capture_instance;
+
+ unsigned int play_samp_rate;
+ unsigned int cap_samp_rate;
+
+ unsigned int pcm_size;
+ unsigned int pcm_count;
+ unsigned int pcm_playback_irq_pos; /* IRQ position */
+ unsigned int pcm_playback_buf_pos; /* position in buffer */
+
+ unsigned int pcm_capture_size;
+ unsigned int pcm_capture_count;
+ unsigned int pcm_capture_irq_pos; /* IRQ position */
+ unsigned int pcm_capture_buf_pos; /* position in buffer */
+};
+
+static int voip_get_media_type(uint32_t mode,
+ unsigned int samp_rate);
+static int voip_get_rate_type(uint32_t mode,
+ uint32_t rate,
+ uint32_t *rate_type);
+static int msm_voip_mode_rate_config_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol);
+static int msm_voip_mode_rate_config_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol);
+
+static struct voip_drv_info voip_info;
+
+static struct snd_pcm_hardware msm_pcm_hardware = {
+ .info = (SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED),
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_SPECIAL,
+ .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000,
+ .rate_min = 8000,
+ .rate_max = 16000,
+ .channels_min = 1,
+ .channels_max = 1,
+ .buffer_bytes_max = sizeof(struct voip_buf_node) * VOIP_MAX_Q_LEN,
+ .period_bytes_min = VOIP_MIN_VOC_PKT_SIZE,
+ .period_bytes_max = VOIP_MAX_VOC_PKT_SIZE,
+ .periods_min = VOIP_MAX_Q_LEN,
+ .periods_max = VOIP_MAX_Q_LEN,
+ .fifo_size = 0,
+};
+
+
+static int msm_voip_mute_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int mute = ucontrol->value.integer.value[0];
+
+ pr_debug("%s: mute=%d\n", __func__, mute);
+
+ voc_set_tx_mute(voc_get_session_id(VOIP_SESSION_NAME), TX_PATH, mute);
+
+ return 0;
+}
+
+static int msm_voip_mute_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ ucontrol->value.integer.value[0] = 0;
+ return 0;
+}
+
+static int msm_voip_volume_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int volume = ucontrol->value.integer.value[0];
+
+ pr_debug("%s: volume: %d\n", __func__, volume);
+
+ voc_set_rx_vol_index(voc_get_session_id(VOIP_SESSION_NAME),
+ RX_PATH,
+ volume);
+ return 0;
+}
+static int msm_voip_volume_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ ucontrol->value.integer.value[0] = 0;
+ return 0;
+}
+
+static int msm_voip_dtx_mode_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ mutex_lock(&voip_info.lock);
+
+ voip_info.dtx_mode = ucontrol->value.integer.value[0];
+
+ pr_debug("%s: dtx: %d\n", __func__, voip_info.dtx_mode);
+
+ mutex_unlock(&voip_info.lock);
+
+ return 0;
+}
+static int msm_voip_dtx_mode_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ mutex_lock(&voip_info.lock);
+
+ ucontrol->value.integer.value[0] = voip_info.dtx_mode;
+
+ mutex_unlock(&voip_info.lock);
+
+ return 0;
+}
+
+static struct snd_kcontrol_new msm_voip_controls[] = {
+ SOC_SINGLE_EXT("Voip Tx Mute", SND_SOC_NOPM, 0, 1, 0,
+ msm_voip_mute_get, msm_voip_mute_put),
+ SOC_SINGLE_EXT("Voip Rx Volume", SND_SOC_NOPM, 0, 5, 0,
+ msm_voip_volume_get, msm_voip_volume_put),
+ SOC_SINGLE_MULTI_EXT("Voip Mode Rate Config", SND_SOC_NOPM, 0, 23850,
+ 0, 2, msm_voip_mode_rate_config_get,
+ msm_voip_mode_rate_config_put),
+ SOC_SINGLE_EXT("Voip Dtx Mode", SND_SOC_NOPM, 0, 1, 0,
+ msm_voip_dtx_mode_get, msm_voip_dtx_mode_put),
+};
+
+static int msm_pcm_voip_probe(struct snd_soc_platform *platform)
+{
+ snd_soc_add_platform_controls(platform, msm_voip_controls,
+ ARRAY_SIZE(msm_voip_controls));
+
+ return 0;
+}
+
+/* sample rate supported */
+static unsigned int supported_sample_rates[] = {8000, 16000};
+
+/* capture path */
+static void voip_process_ul_pkt(uint8_t *voc_pkt,
+ uint32_t pkt_len,
+ void *private_data)
+{
+ struct voip_buf_node *buf_node = NULL;
+ struct voip_drv_info *prtd = private_data;
+ unsigned long dsp_flags;
+
+ if (prtd->capture_substream == NULL)
+ return;
+
+ /* Copy up-link packet into out_queue. */
+ spin_lock_irqsave(&prtd->dsp_lock, dsp_flags);
+
+ /* discarding UL packets till start is received */
+ if (!list_empty(&prtd->free_out_queue) && prtd->capture_start) {
+ buf_node = list_first_entry(&prtd->free_out_queue,
+ struct voip_buf_node, list);
+ list_del(&buf_node->list);
+ switch (prtd->mode) {
+ case MODE_AMR_WB:
+ case MODE_AMR: {
+ /* Remove the DSP frame info header. Header format:
+ * Bits 0-3: Frame rate
+ * Bits 4-7: Frame type
+ */
+ buf_node->frame.header.frame_type =
+ ((*voc_pkt) & 0xF0) >> 4;
+ voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN;
+ buf_node->frame.len = pkt_len - DSP_FRAME_HDR_LEN;
+ memcpy(&buf_node->frame.voc_pkt[0],
+ voc_pkt,
+ buf_node->frame.len);
+ list_add_tail(&buf_node->list, &prtd->out_queue);
+ break;
+ }
+ case MODE_IS127:
+ case MODE_4GV_NB:
+ case MODE_4GV_WB: {
+ /* Remove the DSP frame info header.
+ * Header format:
+ * Bits 0-3: frame rate
+ */
+ buf_node->frame.header.packet_rate = (*voc_pkt) & 0x0F;
+ voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN;
+ buf_node->frame.len = pkt_len - DSP_FRAME_HDR_LEN;
+
+ memcpy(&buf_node->frame.voc_pkt[0],
+ voc_pkt,
+ buf_node->frame.len);
+
+ list_add_tail(&buf_node->list, &prtd->out_queue);
+ break;
+ }
+ default: {
+ buf_node->frame.len = pkt_len;
+ memcpy(&buf_node->frame.voc_pkt[0],
+ voc_pkt,
+ buf_node->frame.len);
+ list_add_tail(&buf_node->list, &prtd->out_queue);
+ }
+ }
+ pr_debug("ul_pkt: pkt_len =%d, frame.len=%d\n", pkt_len,
+ buf_node->frame.len);
+ prtd->pcm_capture_irq_pos += prtd->pcm_capture_count;
+ spin_unlock_irqrestore(&prtd->dsp_lock, dsp_flags);
+ snd_pcm_period_elapsed(prtd->capture_substream);
+ } else {
+ spin_unlock_irqrestore(&prtd->dsp_lock, dsp_flags);
+ pr_err("UL data dropped\n");
+ }
+
+ wake_up(&prtd->out_wait);
+}
+
+/* playback path */
+static void voip_process_dl_pkt(uint8_t *voc_pkt,
+ uint32_t *pkt_len,
+ void *private_data)
+{
+ struct voip_buf_node *buf_node = NULL;
+ struct voip_drv_info *prtd = private_data;
+ unsigned long dsp_flags;
+
+
+ if (prtd->playback_substream == NULL)
+ return;
+
+ spin_lock_irqsave(&prtd->dsp_lock, dsp_flags);
+
+ if (!list_empty(&prtd->in_queue) && prtd->playback_start) {
+ buf_node = list_first_entry(&prtd->in_queue,
+ struct voip_buf_node, list);
+ list_del(&buf_node->list);
+ switch (prtd->mode) {
+ case MODE_AMR:
+ case MODE_AMR_WB: {
+ /* Add the DSP frame info header. Header format:
+ * Bits 0-3: Frame rate
+ * Bits 4-7: Frame type
+ */
+ *voc_pkt = ((buf_node->frame.header.frame_type &
+ 0x0F) << 4) | (prtd->rate_type & 0x0F);
+ voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN;
+ *pkt_len = buf_node->frame.len + DSP_FRAME_HDR_LEN;
+ memcpy(voc_pkt,
+ &buf_node->frame.voc_pkt[0],
+ buf_node->frame.len);
+ list_add_tail(&buf_node->list, &prtd->free_in_queue);
+ break;
+ }
+ case MODE_IS127:
+ case MODE_4GV_NB:
+ case MODE_4GV_WB: {
+ /* Add the DSP frame info header. Header format:
+ * Bits 0-3 : Frame rate
+ */
+ *voc_pkt = buf_node->frame.header.packet_rate & 0x0F;
+ voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN;
+ *pkt_len = buf_node->frame.len + DSP_FRAME_HDR_LEN;
+
+ memcpy(voc_pkt,
+ &buf_node->frame.voc_pkt[0],
+ buf_node->frame.len);
+
+ list_add_tail(&buf_node->list, &prtd->free_in_queue);
+ break;
+ }
+ default: {
+ *pkt_len = buf_node->frame.len;
+
+ memcpy(voc_pkt,
+ &buf_node->frame.voc_pkt[0],
+ buf_node->frame.len);
+
+ list_add_tail(&buf_node->list, &prtd->free_in_queue);
+ }
+ }
+ pr_debug("dl_pkt: pkt_len=%d, frame_len=%d\n", *pkt_len,
+ buf_node->frame.len);
+ prtd->pcm_playback_irq_pos += prtd->pcm_count;
+ spin_unlock_irqrestore(&prtd->dsp_lock, dsp_flags);
+ snd_pcm_period_elapsed(prtd->playback_substream);
+ } else {
+ *pkt_len = 0;
+ spin_unlock_irqrestore(&prtd->dsp_lock, dsp_flags);
+ pr_err("DL data not available\n");
+ }
+ wake_up(&prtd->in_wait);
+}
+
+static struct snd_pcm_hw_constraint_list constraints_sample_rates = {
+ .count = ARRAY_SIZE(supported_sample_rates),
+ .list = supported_sample_rates,
+ .mask = 0,
+};
+
+static int msm_pcm_playback_prepare(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct voip_drv_info *prtd = runtime->private_data;
+
+ prtd->play_samp_rate = runtime->rate;
+ prtd->pcm_size = snd_pcm_lib_buffer_bytes(substream);
+ prtd->pcm_count = snd_pcm_lib_period_bytes(substream);
+ prtd->pcm_playback_irq_pos = 0;
+ prtd->pcm_playback_buf_pos = 0;
+
+ return 0;
+}
+
+static int msm_pcm_capture_prepare(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct voip_drv_info *prtd = runtime->private_data;
+ int ret = 0;
+
+ prtd->cap_samp_rate = runtime->rate;
+ prtd->pcm_capture_size = snd_pcm_lib_buffer_bytes(substream);
+ prtd->pcm_capture_count = snd_pcm_lib_period_bytes(substream);
+ prtd->pcm_capture_irq_pos = 0;
+ prtd->pcm_capture_buf_pos = 0;
+ return ret;
+}
+
+static int msm_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+ int ret = 0;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct voip_drv_info *prtd = runtime->private_data;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ pr_debug("%s: Trigger start\n", __func__);
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+ prtd->capture_start = 1;
+ else
+ prtd->playback_start = 1;
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ pr_debug("SNDRV_PCM_TRIGGER_STOP\n");
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ prtd->playback_start = 0;
+ else
+ prtd->capture_start = 0;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int msm_pcm_open(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct voip_drv_info *prtd = &voip_info;
+ int ret = 0;
+
+ pr_debug("%s, VoIP\n", __func__);
+ mutex_lock(&prtd->lock);
+
+ runtime->hw = msm_pcm_hardware;
+
+ ret = snd_pcm_hw_constraint_list(runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE,
+ &constraints_sample_rates);
+ if (ret < 0)
+ pr_debug("snd_pcm_hw_constraint_list failed\n");
+
+ ret = snd_pcm_hw_constraint_integer(runtime,
+ SNDRV_PCM_HW_PARAM_PERIODS);
+ if (ret < 0) {
+ pr_debug("snd_pcm_hw_constraint_integer failed\n");
+ goto err;
+ }
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ prtd->playback_substream = substream;
+ prtd->playback_instance++;
+ } else {
+ prtd->capture_substream = substream;
+ prtd->capture_instance++;
+ }
+ runtime->private_data = prtd;
+err:
+ mutex_unlock(&prtd->lock);
+
+ return ret;
+}
+
+static int msm_pcm_playback_copy(struct snd_pcm_substream *substream, int a,
+ snd_pcm_uframes_t hwoff, void __user *buf, snd_pcm_uframes_t frames)
+{
+ int ret = 0;
+ struct voip_buf_node *buf_node = NULL;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct voip_drv_info *prtd = runtime->private_data;
+
+ int count = frames_to_bytes(runtime, frames);
+ pr_debug("%s: count = %d, frames=%d\n", __func__, count, (int)frames);
+
+ ret = wait_event_interruptible_timeout(prtd->in_wait,
+ (!list_empty(&prtd->free_in_queue) ||
+ prtd->state == VOIP_STOPPED),
+ 1 * HZ);
+ if (ret > 0) {
+ mutex_lock(&prtd->in_lock);
+ if (count <= VOIP_MAX_VOC_PKT_SIZE) {
+ buf_node =
+ list_first_entry(&prtd->free_in_queue,
+ struct voip_buf_node, list);
+ list_del(&buf_node->list);
+ if (prtd->mode == MODE_PCM) {
+ ret = copy_from_user(&buf_node->frame.voc_pkt,
+ buf, count);
+ buf_node->frame.len = count;
+ } else
+ ret = copy_from_user(&buf_node->frame,
+ buf, count);
+ list_add_tail(&buf_node->list, &prtd->in_queue);
+ } else {
+ pr_err("%s: Write cnt %d is > VOIP_MAX_VOC_PKT_SIZE\n",
+ __func__, count);
+ ret = -ENOMEM;
+ }
+
+ mutex_unlock(&prtd->in_lock);
+ } else if (ret == 0) {
+ pr_err("%s: No free DL buffs\n", __func__);
+ ret = -ETIMEDOUT;
+ } else {
+ pr_err("%s: playback copy was interrupted\n", __func__);
+ }
+
+ return ret;
+}
+static int msm_pcm_capture_copy(struct snd_pcm_substream *substream,
+ int channel, snd_pcm_uframes_t hwoff, void __user *buf,
+ snd_pcm_uframes_t frames)
+{
+ int ret = 0;
+ int count = 0;
+ struct voip_buf_node *buf_node = NULL;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct voip_drv_info *prtd = runtime->private_data;
+
+ count = frames_to_bytes(runtime, frames);
+
+ pr_debug("%s: count = %d\n", __func__, count);
+
+ ret = wait_event_interruptible_timeout(prtd->out_wait,
+ (!list_empty(&prtd->out_queue) ||
+ prtd->state == VOIP_STOPPED),
+ 1 * HZ);
+
+ if (ret > 0) {
+ mutex_lock(&prtd->out_lock);
+
+ if (count <= VOIP_MAX_VOC_PKT_SIZE) {
+ buf_node = list_first_entry(&prtd->out_queue,
+ struct voip_buf_node, list);
+ list_del(&buf_node->list);
+ if (prtd->mode == MODE_PCM)
+ ret = copy_to_user(buf,
+ &buf_node->frame.voc_pkt,
+ count);
+ else
+ ret = copy_to_user(buf,
+ &buf_node->frame,
+ count);
+ if (ret) {
+ pr_err("%s: Copy to user retuned %d\n",
+ __func__, ret);
+ ret = -EFAULT;
+ }
+ list_add_tail(&buf_node->list,
+ &prtd->free_out_queue);
+ } else {
+ pr_err("%s: Read count %d > VOIP_MAX_VOC_PKT_SIZE\n",
+ __func__, count);
+ ret = -ENOMEM;
+ }
+
+ mutex_unlock(&prtd->out_lock);
+
+ } else if (ret == 0) {
+ pr_err("%s: No UL data available\n", __func__);
+ ret = -ETIMEDOUT;
+ } else {
+ pr_err("%s: Read was interrupted\n", __func__);
+ ret = -ERESTARTSYS;
+ }
+ return ret;
+}
+static int msm_pcm_copy(struct snd_pcm_substream *substream, int a,
+ snd_pcm_uframes_t hwoff, void __user *buf, snd_pcm_uframes_t frames)
+{
+ int ret = 0;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ ret = msm_pcm_playback_copy(substream, a, hwoff, buf, frames);
+ else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+ ret = msm_pcm_capture_copy(substream, a, hwoff, buf, frames);
+
+ return ret;
+}
+
+static int msm_pcm_close(struct snd_pcm_substream *substream)
+{
+ int ret = 0;
+ struct list_head *ptr = NULL;
+ struct list_head *next = NULL;
+ struct voip_buf_node *buf_node = NULL;
+ struct snd_dma_buffer *p_dma_buf, *c_dma_buf;
+ struct snd_pcm_substream *p_substream, *c_substream;
+ struct snd_pcm_runtime *runtime;
+ struct voip_drv_info *prtd;
+
+ if (substream == NULL) {
+ pr_err("substream is NULL\n");
+ return -EINVAL;
+ }
+ runtime = substream->runtime;
+ prtd = runtime->private_data;
+
+ wake_up(&prtd->out_wait);
+
+ mutex_lock(&prtd->lock);
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ prtd->playback_instance--;
+ else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+ prtd->capture_instance--;
+
+ if (!prtd->playback_instance && !prtd->capture_instance) {
+ if (prtd->state == VOIP_STARTED) {
+ prtd->state = VOIP_STOPPED;
+ voc_end_voice_call(
+ voc_get_session_id(VOIP_SESSION_NAME));
+ voc_register_mvs_cb(NULL, NULL, prtd);
+ }
+ /* release all buffer */
+ /* release in_queue and free_in_queue */
+ pr_debug("release all buffer\n");
+ p_substream = prtd->playback_substream;
+ if (p_substream == NULL) {
+ pr_debug("p_substream is NULL\n");
+ goto capt;
+ }
+ p_dma_buf = &p_substream->dma_buffer;
+ if (p_dma_buf == NULL) {
+ pr_debug("p_dma_buf is NULL\n");
+ goto capt;
+ }
+ if (p_dma_buf->area != NULL) {
+ mutex_lock(&prtd->in_lock);
+ list_for_each_safe(ptr, next, &prtd->in_queue) {
+ buf_node = list_entry(ptr,
+ struct voip_buf_node, list);
+ list_del(&buf_node->list);
+ }
+ list_for_each_safe(ptr, next, &prtd->free_in_queue) {
+ buf_node = list_entry(ptr,
+ struct voip_buf_node, list);
+ list_del(&buf_node->list);
+ }
+ dma_free_coherent(p_substream->pcm->card->dev,
+ runtime->hw.buffer_bytes_max, p_dma_buf->area,
+ p_dma_buf->addr);
+ p_dma_buf->area = NULL;
+ mutex_unlock(&prtd->in_lock);
+ }
+ /* release out_queue and free_out_queue */
+capt: c_substream = prtd->capture_substream;
+ if (c_substream == NULL) {
+ pr_debug("c_substream is NULL\n");
+ goto done;
+ }
+ c_dma_buf = &c_substream->dma_buffer;
+ if (c_substream == NULL) {
+ pr_debug("c_dma_buf is NULL.\n");
+ goto done;
+ }
+ if (c_dma_buf->area != NULL) {
+ mutex_lock(&prtd->out_lock);
+ list_for_each_safe(ptr, next, &prtd->out_queue) {
+ buf_node = list_entry(ptr,
+ struct voip_buf_node, list);
+ list_del(&buf_node->list);
+ }
+ list_for_each_safe(ptr, next, &prtd->free_out_queue) {
+ buf_node = list_entry(ptr,
+ struct voip_buf_node, list);
+ list_del(&buf_node->list);
+ }
+ dma_free_coherent(c_substream->pcm->card->dev,
+ runtime->hw.buffer_bytes_max, c_dma_buf->area,
+ c_dma_buf->addr);
+ c_dma_buf->area = NULL;
+ mutex_unlock(&prtd->out_lock);
+ }
+done:
+ prtd->capture_substream = NULL;
+ prtd->playback_substream = NULL;
+ }
+ mutex_unlock(&prtd->lock);
+
+ return ret;
+}
+static int msm_pcm_prepare(struct snd_pcm_substream *substream)
+{
+ int ret = 0;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct voip_drv_info *prtd = runtime->private_data;
+ uint32_t media_type = 0;
+ uint32_t rate_type = 0;
+
+ mutex_lock(&prtd->lock);
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ ret = msm_pcm_playback_prepare(substream);
+ else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+ ret = msm_pcm_capture_prepare(substream);
+
+ if ((runtime->format != FORMAT_SPECIAL) &&
+ ((prtd->mode == MODE_AMR) || (prtd->mode == MODE_AMR_WB) ||
+ (prtd->mode == MODE_IS127) || (prtd->mode == MODE_4GV_NB) ||
+ (prtd->mode == MODE_4GV_WB))) {
+ pr_err("mode:%d and format:%u are not mached\n",
+ prtd->mode, (uint32_t)runtime->format);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ if ((runtime->format != FORMAT_S16_LE) &&
+ (prtd->mode == MODE_PCM)) {
+ pr_err("mode:%d and format:%u are not mached\n",
+ prtd->mode, (uint32_t)runtime->format);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ if (prtd->playback_instance && prtd->capture_instance
+ && (prtd->state != VOIP_STARTED)) {
+
+ ret = voip_get_rate_type(prtd->mode,
+ prtd->rate,
+ &rate_type);
+ if (ret < 0) {
+ pr_err("fail at getting rate_type\n");
+ ret = -EINVAL;
+ goto done;
+ }
+ prtd->rate_type = rate_type;
+ media_type = voip_get_media_type(prtd->mode,
+ prtd->play_samp_rate);
+ if (media_type < 0) {
+ pr_err("fail at getting media_type\n");
+ ret = -EINVAL;
+ goto done;
+ }
+ pr_debug(" media_type=%d, rate_type=%d\n", media_type,
+ rate_type);
+ if ((prtd->play_samp_rate == 8000) &&
+ (prtd->cap_samp_rate == 8000))
+ voc_config_vocoder(media_type, rate_type,
+ VSS_NETWORK_ID_VOIP_NB,
+ voip_info.dtx_mode);
+ else if ((prtd->play_samp_rate == 16000) &&
+ (prtd->cap_samp_rate == 16000))
+ voc_config_vocoder(media_type, rate_type,
+ VSS_NETWORK_ID_VOIP_WB,
+ voip_info.dtx_mode);
+ else {
+ pr_debug("%s: Invalid rate playback %d, capture %d\n",
+ __func__, prtd->play_samp_rate,
+ prtd->cap_samp_rate);
+ goto done;
+ }
+ voc_register_mvs_cb(voip_process_ul_pkt,
+ voip_process_dl_pkt, prtd);
+ voc_start_voice_call(voc_get_session_id(VOIP_SESSION_NAME));
+
+ prtd->state = VOIP_STARTED;
+ }
+done:
+ mutex_unlock(&prtd->lock);
+
+ return ret;
+}
+
+static snd_pcm_uframes_t
+msm_pcm_playback_pointer(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct voip_drv_info *prtd = runtime->private_data;
+
+ pr_debug("%s\n", __func__);
+ if (prtd->pcm_playback_irq_pos >= prtd->pcm_size)
+ prtd->pcm_playback_irq_pos = 0;
+ return bytes_to_frames(runtime, (prtd->pcm_playback_irq_pos));
+}
+
+static snd_pcm_uframes_t
+msm_pcm_capture_pointer(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct voip_drv_info *prtd = runtime->private_data;
+
+ if (prtd->pcm_capture_irq_pos >= prtd->pcm_capture_size)
+ prtd->pcm_capture_irq_pos = 0;
+ return bytes_to_frames(runtime, (prtd->pcm_capture_irq_pos));
+}
+
+static snd_pcm_uframes_t msm_pcm_pointer(struct snd_pcm_substream *substream)
+{
+ snd_pcm_uframes_t ret = 0;
+ pr_debug("%s\n", __func__);
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ ret = msm_pcm_playback_pointer(substream);
+ else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+ ret = msm_pcm_capture_pointer(substream);
+ return ret;
+}
+
+static int msm_pcm_mmap(struct snd_pcm_substream *substream,
+ struct vm_area_struct *vma)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+
+ pr_debug("%s\n", __func__);
+ dma_mmap_coherent(substream->pcm->card->dev, vma,
+ runtime->dma_area,
+ runtime->dma_addr,
+ runtime->dma_bytes);
+ return 0;
+}
+
+static int msm_pcm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_dma_buffer *dma_buf = &substream->dma_buffer;
+ struct voip_buf_node *buf_node = NULL;
+ int i = 0, offset = 0;
+
+ pr_debug("%s: voip\n", __func__);
+
+ mutex_lock(&voip_info.lock);
+
+ dma_buf->dev.type = SNDRV_DMA_TYPE_DEV;
+ dma_buf->dev.dev = substream->pcm->card->dev;
+ dma_buf->private_data = NULL;
+
+ dma_buf->area = dma_alloc_coherent(substream->pcm->card->dev,
+ runtime->hw.buffer_bytes_max,
+ &dma_buf->addr, GFP_KERNEL);
+ if (!dma_buf->area) {
+ pr_err("%s:MSM VOIP dma_alloc failed\n", __func__);
+ mutex_unlock(&voip_info.lock);
+ return -ENOMEM;
+ }
+
+ dma_buf->bytes = runtime->hw.buffer_bytes_max;
+ memset(dma_buf->area, 0, runtime->hw.buffer_bytes_max);
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ for (i = 0; i < VOIP_MAX_Q_LEN; i++) {
+ buf_node = (void *)dma_buf->area + offset;
+
+ mutex_lock(&voip_info.in_lock);
+ list_add_tail(&buf_node->list,
+ &voip_info.free_in_queue);
+ mutex_unlock(&voip_info.in_lock);
+ offset = offset + sizeof(struct voip_buf_node);
+ }
+ } else {
+ for (i = 0; i < VOIP_MAX_Q_LEN; i++) {
+ buf_node = (void *) dma_buf->area + offset;
+ mutex_lock(&voip_info.out_lock);
+ list_add_tail(&buf_node->list,
+ &voip_info.free_out_queue);
+ mutex_unlock(&voip_info.out_lock);
+ offset = offset + sizeof(struct voip_buf_node);
+ }
+ }
+
+ mutex_unlock(&voip_info.lock);
+
+ snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
+
+ return 0;
+}
+
+static int msm_voip_mode_rate_config_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ mutex_lock(&voip_info.lock);
+
+ ucontrol->value.integer.value[0] = voip_info.mode;
+ ucontrol->value.integer.value[1] = voip_info.rate;
+
+ mutex_unlock(&voip_info.lock);
+
+ return 0;
+}
+
+static int msm_voip_mode_rate_config_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ mutex_lock(&voip_info.lock);
+
+ voip_info.mode = ucontrol->value.integer.value[0];
+ voip_info.rate = ucontrol->value.integer.value[1];
+
+ pr_debug("%s: mode=%d,rate=%d\n", __func__, voip_info.mode,
+ voip_info.rate);
+
+ mutex_unlock(&voip_info.lock);
+
+ return 0;
+}
+
+static int voip_get_rate_type(uint32_t mode, uint32_t rate,
+ uint32_t *rate_type)
+{
+ int ret = 0;
+
+ switch (mode) {
+ case MODE_AMR: {
+ switch (rate) {
+ case 4750:
+ *rate_type = AMR_RATE_4750;
+ break;
+ case 5150:
+ *rate_type = AMR_RATE_5150;
+ break;
+ case 5900:
+ *rate_type = AMR_RATE_5900;
+ break;
+ case 6700:
+ *rate_type = AMR_RATE_6700;
+ break;
+ case 7400:
+ *rate_type = AMR_RATE_7400;
+ break;
+ case 7950:
+ *rate_type = AMR_RATE_7950;
+ break;
+ case 10200:
+ *rate_type = AMR_RATE_10200;
+ break;
+ case 12200:
+ *rate_type = AMR_RATE_12200;
+ break;
+ default:
+ pr_err("wrong rate for AMR NB.\n");
+ ret = -EINVAL;
+ break;
+ }
+ break;
+ }
+ case MODE_AMR_WB: {
+ switch (rate) {
+ case 6600:
+ *rate_type = AMR_RATE_6600 - AMR_RATE_6600;
+ break;
+ case 8850:
+ *rate_type = AMR_RATE_8850 - AMR_RATE_6600;
+ break;
+ case 12650:
+ *rate_type = AMR_RATE_12650 - AMR_RATE_6600;
+ break;
+ case 14250:
+ *rate_type = AMR_RATE_14250 - AMR_RATE_6600;
+ break;
+ case 15850:
+ *rate_type = AMR_RATE_15850 - AMR_RATE_6600;
+ break;
+ case 18250:
+ *rate_type = AMR_RATE_18250 - AMR_RATE_6600;
+ break;
+ case 19850:
+ *rate_type = AMR_RATE_19850 - AMR_RATE_6600;
+ break;
+ case 23050:
+ *rate_type = AMR_RATE_23050 - AMR_RATE_6600;
+ break;
+ case 23850:
+ *rate_type = AMR_RATE_23850 - AMR_RATE_6600;
+ break;
+ default:
+ pr_err("wrong rate for AMR_WB.\n");
+ ret = -EINVAL;
+ break;
+ }
+ break;
+ }
+ case MODE_PCM: {
+ *rate_type = 0;
+ break;
+ }
+ case MODE_IS127:
+ case MODE_4GV_NB:
+ case MODE_4GV_WB: {
+ switch (rate) {
+ case VOC_0_RATE:
+ case VOC_8_RATE:
+ case VOC_4_RATE:
+ case VOC_2_RATE:
+ case VOC_1_RATE:
+ *rate_type = rate;
+ break;
+ default:
+ pr_err("wrong rate for IS127/4GV_NB/WB.\n");
+ ret = -EINVAL;
+ break;
+ }
+ break;
+ }
+ default:
+ pr_err("wrong mode type.\n");
+ ret = -EINVAL;
+ }
+ pr_debug("%s, mode=%d, rate=%u, rate_type=%d\n",
+ __func__, mode, rate, *rate_type);
+ return ret;
+}
+
+static int voip_get_media_type(uint32_t mode,
+ unsigned int samp_rate)
+{
+ uint32_t media_type;
+
+ pr_debug("%s: mode=%d, samp_rate=%d\n", __func__,
+ mode, samp_rate);
+ switch (mode) {
+ case MODE_AMR:
+ media_type = VSS_MEDIA_ID_AMR_NB_MODEM;
+ break;
+ case MODE_AMR_WB:
+ media_type = VSS_MEDIA_ID_AMR_WB_MODEM;
+ break;
+ case MODE_PCM:
+ if (samp_rate == 8000)
+ media_type = VSS_MEDIA_ID_PCM_NB;
+ else
+ media_type = VSS_MEDIA_ID_PCM_WB;
+ break;
+ case MODE_IS127: /* EVRC-A */
+ media_type = VSS_MEDIA_ID_EVRC_MODEM;
+ break;
+ case MODE_4GV_NB: /* EVRC-B */
+ media_type = VSS_MEDIA_ID_4GV_NB_MODEM;
+ break;
+ case MODE_4GV_WB: /* EVRC-WB */
+ media_type = VSS_MEDIA_ID_4GV_WB_MODEM;
+ break;
+ default:
+ pr_debug(" input mode is not supported\n");
+ media_type = -EINVAL;
+ }
+
+ pr_debug("%s: media_type is 0x%x\n", __func__, media_type);
+
+ return media_type;
+}
+
+
+static struct snd_pcm_ops msm_pcm_ops = {
+ .open = msm_pcm_open,
+ .copy = msm_pcm_copy,
+ .hw_params = msm_pcm_hw_params,
+ .close = msm_pcm_close,
+ .prepare = msm_pcm_prepare,
+ .trigger = msm_pcm_trigger,
+ .pointer = msm_pcm_pointer,
+ .mmap = msm_pcm_mmap,
+};
+
+static int msm_asoc_pcm_new(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_card *card = rtd->card->snd_card;
+ int ret = 0;
+
+ pr_debug("msm_asoc_pcm_new\n");
+ if (!card->dev->coherent_dma_mask)
+ card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+ return ret;
+}
+
+static struct snd_soc_platform_driver msm_soc_platform = {
+ .ops = &msm_pcm_ops,
+ .pcm_new = msm_asoc_pcm_new,
+ .probe = msm_pcm_voip_probe,
+};
+
+static __devinit int msm_pcm_probe(struct platform_device *pdev)
+{
+ if (pdev->dev.of_node)
+ dev_set_name(&pdev->dev, "%s", "msm-voip-dsp");
+
+ pr_debug("%s: dev name %s\n", __func__, dev_name(&pdev->dev));
+ return snd_soc_register_platform(&pdev->dev,
+ &msm_soc_platform);
+}
+
+static int msm_pcm_remove(struct platform_device *pdev)
+{
+ snd_soc_unregister_platform(&pdev->dev);
+ return 0;
+}
+
+static const struct of_device_id msm_voip_dt_match[] = {
+ {.compatible = "qcom,msm-voip-dsp"},
+};
+MODULE_DEVICE_TABLE(of, msm_voip_dt_match);
+
+static struct platform_driver msm_pcm_driver = {
+ .driver = {
+ .name = "msm-voip-dsp",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_voip_dt_match,
+ },
+ .probe = msm_pcm_probe,
+ .remove = __devexit_p(msm_pcm_remove),
+};
+
+static int __init msm_soc_platform_init(void)
+{
+ memset(&voip_info, 0, sizeof(voip_info));
+ voip_info.mode = MODE_PCM;
+ mutex_init(&voip_info.lock);
+ mutex_init(&voip_info.in_lock);
+ mutex_init(&voip_info.out_lock);
+
+ spin_lock_init(&voip_info.dsp_lock);
+
+ init_waitqueue_head(&voip_info.out_wait);
+ init_waitqueue_head(&voip_info.in_wait);
+
+ INIT_LIST_HEAD(&voip_info.in_queue);
+ INIT_LIST_HEAD(&voip_info.free_in_queue);
+ INIT_LIST_HEAD(&voip_info.out_queue);
+ INIT_LIST_HEAD(&voip_info.free_out_queue);
+
+ return platform_driver_register(&msm_pcm_driver);
+}
+module_init(msm_soc_platform_init);
+
+static void __exit msm_soc_platform_exit(void)
+{
+ platform_driver_unregister(&msm_pcm_driver);
+}
+module_exit(msm_soc_platform_exit);
+
+MODULE_DESCRIPTION("PCM module platform driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/msm/qdsp6v2/q6asm.c b/sound/soc/msm/qdsp6v2/q6asm.c
index f982134..0bb88e8 100644
--- a/sound/soc/msm/qdsp6v2/q6asm.c
+++ b/sound/soc/msm/qdsp6v2/q6asm.c
@@ -84,6 +84,8 @@
uint32_t bufsz, uint32_t bufcnt);
static void q6asm_reset_buf_state(struct audio_client *ac);
+static int q6asm_map_channels(u8 *channel_mapping, uint32_t channels);
+
#ifdef CONFIG_DEBUG_FS
#define OUT_BUFFER_SIZE 56
@@ -196,8 +198,7 @@
out_cold_index*/
if (out_cold_index != 1) {
do_gettimeofday(&out_cold_tv);
- pr_debug("COLD: apr_send_pkt at %ld"
- "sec %ld microsec\n",\
+ pr_debug("COLD: apr_send_pkt at %ld sec %ld microsec\n",
out_cold_tv.tv_sec,\
out_cold_tv.tv_usec);
out_cold_index = 1;
@@ -222,8 +223,7 @@
*/
if (in_cont_index == 7) {
do_gettimeofday(&in_cont_tv);
- pr_err("In_CONT:previous read buffer done"
- "at %ld sec %ld microsec\n",\
+ pr_err("In_CONT:previous read buffer done at %ld sec %ld microsec\n",
in_cont_tv.tv_sec, in_cont_tv.tv_usec);
}
in_cont_index++;
@@ -253,8 +253,8 @@
if ((strncmp(((char *)ab->data), zero_pattern, 2)) &&
(!strncmp(((char *)ab->data + 2), zero_pattern, 2))) {
do_gettimeofday(&out_warm_tv);
- pr_debug("WARM:apr_send_pkt at"
- "%ld sec %ld microsec\n", out_warm_tv.tv_sec,\
+ pr_debug("WARM:apr_send_pkt at %ld sec %ld microsec\n",
+ out_warm_tv.tv_sec,\
out_warm_tv.tv_usec);
pr_debug("Warm Pattern Matched");
}
@@ -263,8 +263,8 @@
else if ((!strncmp(((char *)ab->data), zero_pattern, 2))
&& (strncmp(((char *)ab->data + 2), zero_pattern, 2))) {
do_gettimeofday(&out_cont_tv);
- pr_debug("CONT:apr_send_pkt at"
- "%ld sec %ld microsec\n", out_cont_tv.tv_sec,\
+ pr_debug("CONT:apr_send_pkt at %ld sec %ld microsec\n",
+ out_cont_tv.tv_sec,\
out_cont_tv.tv_usec);
pr_debug("Cont Pattern Matched");
}
@@ -410,8 +410,7 @@
ion_unmap_kernel(port->buf[0].client, port->buf[0].handle);
ion_free(port->buf[0].client, port->buf[0].handle);
ion_client_destroy(port->buf[0].client);
- pr_debug("%s:data[%p]phys[%p][%p]"
- ", client[%p] handle[%p]\n",
+ pr_debug("%s:data[%p]phys[%p][%p] , client[%p] handle[%p]\n",
__func__,
(void *)port->buf[0].data,
(void *)port->buf[0].phys,
@@ -479,13 +478,16 @@
int q6asm_set_io_mode(struct audio_client *ac, uint32_t mode)
{
+ ac->io_mode &= 0xFF00;
+ pr_debug("%s ac->mode after anding with FF00:0x[%x],\n",
+ __func__, ac->io_mode);
if (ac == NULL) {
pr_err("%s APR handle NULL\n", __func__);
return -EINVAL;
}
if ((mode == ASYNC_IO_MODE) || (mode == SYNC_IO_MODE)) {
- ac->io_mode = mode;
- pr_debug("%s:Set Mode to %d\n", __func__, ac->io_mode);
+ ac->io_mode |= mode;
+ pr_debug("%s:Set Mode to 0x[%x]\n", __func__, ac->io_mode);
return 0;
} else {
pr_err("%s:Not an valid IO Mode:%d\n", __func__, ac->io_mode);
@@ -500,8 +502,8 @@
(apr_fn)q6asm_mmapcallback,\
0x0FFFFFFFF, &this_mmap);
if (this_mmap.apr == NULL) {
- pr_debug("%s Unable to register"
- "APR ASM common port\n", __func__);
+ pr_debug("%s Unable to register APR ASM common port\n",
+ __func__);
goto fail;
}
}
@@ -624,8 +626,7 @@
(UINT_MAX, "audio_client");
if (IS_ERR_OR_NULL((void *)
buf[cnt].client)) {
- pr_err("%s: ION create client"
- " for AUDIO failed\n",
+ pr_err("%s: ION create client for AUDIO failed\n",
__func__);
goto fail;
}
@@ -634,8 +635,7 @@
(0x1 << ION_AUDIO_HEAP_ID));
if (IS_ERR_OR_NULL((void *)
buf[cnt].handle)) {
- pr_err("%s: ION memory"
- " allocation for AUDIO failed\n",
+ pr_err("%s: ION memory allocation for AUDIO failed\n",
__func__);
goto fail;
}
@@ -646,8 +646,7 @@
&buf[cnt].phys,
(size_t *)&len);
if (rc) {
- pr_err("%s: ION Get Physical"
- " for AUDIO failed, rc = %d\n",
+ pr_err("%s: ION Get Physical for AUDIO failed, rc = %d\n",
__func__, rc);
goto fail;
}
@@ -657,8 +656,8 @@
0);
if (IS_ERR_OR_NULL((void *)
buf[cnt].data)) {
- pr_err("%s: ION memory"
- " mapping for AUDIO failed\n", __func__);
+ pr_err("%s: ION memory mapping for AUDIO failed\n",
+ __func__);
goto fail;
}
memset((void *)buf[cnt].data, 0, bufsz);
@@ -752,8 +751,7 @@
}
memset((void *)buf[0].data, 0, (bufsz * bufcnt));
if (!buf[0].data) {
- pr_err("%s:invalid vaddr,"
- " iomap failed\n", __func__);
+ pr_err("%s:invalid vaddr, iomap failed\n", __func__);
mutex_unlock(&ac->cmd_lock);
goto fail;
}
@@ -822,8 +820,7 @@
}
sid = (data->token >> 8) & 0x0F;
ac = q6asm_get_audio_client(sid);
- pr_debug("%s:ptr0[0x%x]ptr1[0x%x]opcode[0x%x]"
- "token[0x%x]payload_s[%d] src[%d] dest[%d]sid[%d]dir[%d]\n",
+ pr_debug("%s:ptr0[0x%x]ptr1[0x%x]opcode[0x%x] token[0x%x]payload_s[%d] src[%d] dest[%d]sid[%d]dir[%d]\n",
__func__, payload[0], payload[1], data->opcode, data->token,
data->payload_size, data->src_port, data->dest_port, sid, dir);
pr_debug("%s:Payload = [0x%x] status[0x%x]\n",
@@ -918,8 +915,8 @@
return 0;
}
- pr_debug("%s: session[%d]opcode[0x%x]"
- "token[0x%x]payload_s[%d] src[%d] dest[%d]\n", __func__,
+ pr_debug("%s: session[%d]opcode[0x%x] token[0x%x]payload_s[%d] src[%d] dest[%d]\n",
+ __func__,
ac->session, data->opcode,
data->token, data->payload_size, data->src_port,
data->dest_port);
@@ -1060,9 +1057,8 @@
pr_err("ASM_SESSION_EVENTX_OVERFLOW\n");
break;
case ASM_SESSION_CMDRSP_GET_SESSIONTIME_V3:
- pr_debug("%s: ASM_SESSION_CMDRSP_GET_SESSIONTIME_V3, "
- "payload[0] = %d, payload[1] = %d, "
- "payload[2] = %d\n", __func__,
+ pr_debug("%s: ASM_SESSION_CMDRSP_GET_SESSIONTIME_V3, payload[0] = %d, payload[1] = %d, payload[2] = %d\n",
+ __func__,
payload[0], payload[1], payload[2]);
ac->time_stamp = (uint64_t)(((uint64_t)payload[1] << 32) |
payload[2]);
@@ -1073,9 +1069,8 @@
break;
case ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY:
case ASM_DATA_EVENT_ENC_SR_CM_CHANGE_NOTIFY:
- pr_debug("%s: ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY, "
- "payload[0] = %d, payload[1] = %d, "
- "payload[2] = %d, payload[3] = %d\n", __func__,
+ pr_debug("%s: ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY, payload[0] = %d, payload[1] = %d, payload[2] = %d, payload[3] = %d\n",
+ __func__,
payload[0], payload[1], payload[2],
payload[3]);
break;
@@ -1112,8 +1107,8 @@
if (port->buf[idx].used == dir) {
/* To make it more robust, we could loop and get the
next avail buf, its risky though */
- pr_debug("%s:Next buf idx[0x%x] not available,"
- "dir[%d]\n", __func__, idx, dir);
+ pr_debug("%s:Next buf idx[0x%x] not available, dir[%d]\n",
+ __func__, idx, dir);
mutex_unlock(&port->lock);
return NULL;
}
@@ -1162,8 +1157,8 @@
* To make it more robust, we could loop and get the
* next avail buf, its risky though
*/
- pr_debug("%s:Next buf idx[0x%x] not available,"
- "dir[%d]\n", __func__, idx, dir);
+ pr_debug("%s:Next buf idx[0x%x] not available, dir[%d]\n",
+ __func__, idx, dir);
return NULL;
}
*size = port->buf[idx].actual_size;
@@ -1427,6 +1422,7 @@
pr_debug("wr_format[0x%x]rd_format[0x%x]",
wr_format, rd_format);
+ ac->io_mode |= NT_MODE;
q6asm_add_hdr(ac, &open.hdr, sizeof(open), TRUE);
open.hdr.opcode = ASM_STREAM_CMD_OPEN_READWRITE_V2;
@@ -1593,8 +1589,8 @@
struct asm_aac_enc_cfg_v2 enc_cfg;
int rc = 0;
- pr_debug("%s:session[%d]frames[%d]SR[%d]ch[%d]bitrate[%d]mode[%d]"
- "format[%d]", __func__, ac->session, frames_per_buf,
+ pr_debug("%s:session[%d]frames[%d]SR[%d]ch[%d]bitrate[%d]mode[%d] format[%d]",
+ __func__, ac->session, frames_per_buf,
sample_rate, channels, bit_rate, mode, format);
q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE);
@@ -1632,8 +1628,41 @@
int q6asm_set_encdec_chan_map(struct audio_client *ac,
uint32_t num_channels)
{
- /* Todo: */
+ struct asm_dec_out_chan_map_param chan_map;
+ u8 *channel_mapping;
+ int rc = 0;
+ pr_debug("%s: Session %d, num_channels = %d\n",
+ __func__, ac->session, num_channels);
+ q6asm_add_hdr(ac, &chan_map.hdr, sizeof(chan_map), TRUE);
+ chan_map.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
+ chan_map.encdec.param_id = ASM_PARAM_ID_DEC_OUTPUT_CHAN_MAP;
+ chan_map.encdec.param_size = sizeof(struct asm_dec_out_chan_map_param) -
+ (sizeof(struct apr_hdr) +
+ sizeof(struct asm_stream_cmd_set_encdec_param));
+ chan_map.num_channels = num_channels;
+ channel_mapping = chan_map.channel_mapping;
+ memset(channel_mapping, PCM_CHANNEL_NULL, MAX_CHAN_MAP_CHANNELS);
+ if (q6asm_map_channels(channel_mapping, num_channels))
+ return -EINVAL;
+
+ rc = apr_send_pkt(ac->apr, (uint32_t *) &chan_map);
+ if (rc < 0) {
+ pr_err("%s:Command opcode[0x%x]paramid[0x%x] failed\n",
+ __func__, ASM_STREAM_CMD_SET_ENCDEC_PARAM,
+ ASM_PARAM_ID_DEC_OUTPUT_CHAN_MAP);
+ goto fail_cmd;
+ }
+ rc = wait_event_timeout(ac->cmd_wait,
+ (atomic_read(&ac->cmd_state) == 0), 5*HZ);
+ if (!rc) {
+ pr_err("%s:timeout opcode[0x%x]\n", __func__,
+ chan_map.hdr.opcode);
+ rc = -ETIMEDOUT;
+ goto fail_cmd;
+ }
return 0;
+fail_cmd:
+ return rc;
}
int q6asm_enc_cfg_blk_pcm(struct audio_client *ac,
@@ -1665,23 +1694,8 @@
memset(channel_mapping, 0, PCM_FORMAT_MAX_NUM_CHANNEL);
- if (channels == 1) {
- channel_mapping[0] = PCM_CHANNEL_FL;
- } else if (channels == 2) {
- channel_mapping[0] = PCM_CHANNEL_FL;
- channel_mapping[1] = PCM_CHANNEL_FR;
- } else if (channels == 6) {
- channel_mapping[0] = PCM_CHANNEL_FC;
- channel_mapping[1] = PCM_CHANNEL_FL;
- channel_mapping[2] = PCM_CHANNEL_FR;
- channel_mapping[3] = PCM_CHANNEL_LB;
- channel_mapping[4] = PCM_CHANNEL_RB;
- channel_mapping[5] = PCM_CHANNEL_LFE;
- } else {
- pr_err("%s: ERROR.unsupported num_ch = %u\n", __func__,
- channels);
+ if (q6asm_map_channels(channel_mapping, channels))
return -EINVAL;
- }
rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg);
if (rc < 0) {
@@ -1700,6 +1714,96 @@
return -EINVAL;
}
+int q6asm_enc_cfg_blk_pcm_native(struct audio_client *ac,
+ uint32_t rate, uint32_t channels)
+{
+ struct asm_multi_channel_pcm_enc_cfg_v2 enc_cfg;
+ u8 *channel_mapping;
+ u32 frames_per_buf = 0;
+
+ int rc = 0;
+
+ pr_debug("%s: Session %d, rate = %d, channels = %d\n", __func__,
+ ac->session, rate, channels);
+
+ q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE);
+
+ enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
+ enc_cfg.encdec.param_id = ASM_PARAM_ID_ENCDEC_ENC_CFG_BLK_V2;
+ enc_cfg.encdec.param_size = sizeof(enc_cfg) - sizeof(enc_cfg.hdr) -
+ sizeof(enc_cfg.encdec);
+ enc_cfg.encblk.frames_per_buf = frames_per_buf;
+ enc_cfg.encblk.enc_cfg_blk_size = enc_cfg.encdec.param_size -
+ sizeof(struct asm_enc_cfg_blk_param_v2);
+
+ enc_cfg.num_channels = 0;/*channels;*/
+ enc_cfg.bits_per_sample = 16;
+ enc_cfg.sample_rate = 0;/*rate;*/
+ enc_cfg.is_signed = 1;
+ channel_mapping = enc_cfg.channel_mapping; /* ??? PHANI */
+
+ memset(channel_mapping, 0, PCM_FORMAT_MAX_NUM_CHANNEL);
+
+ if (q6asm_map_channels(channel_mapping, channels))
+ return -EINVAL;
+
+ rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg);
+ if (rc < 0) {
+ pr_err("Comamnd open failed\n");
+ rc = -EINVAL;
+ goto fail_cmd;
+ }
+ rc = wait_event_timeout(ac->cmd_wait,
+ (atomic_read(&ac->cmd_state) == 0), 5*HZ);
+ if (!rc) {
+ pr_err("timeout opcode[0x%x] ", enc_cfg.hdr.opcode);
+ goto fail_cmd;
+ }
+ return 0;
+fail_cmd:
+ return -EINVAL;
+}
+
+static int q6asm_map_channels(u8 *channel_mapping, uint32_t channels)
+{
+ u8 *lchannel_mapping;
+ lchannel_mapping = channel_mapping;
+ pr_debug("%s channels passed: %d\n", __func__, channels);
+ if (channels == 1) {
+ lchannel_mapping[0] = PCM_CHANNEL_FC;
+ } else if (channels == 2) {
+ lchannel_mapping[0] = PCM_CHANNEL_FL;
+ lchannel_mapping[1] = PCM_CHANNEL_FR;
+ } else if (channels == 3) {
+ lchannel_mapping[0] = PCM_CHANNEL_FC;
+ lchannel_mapping[1] = PCM_CHANNEL_FL;
+ lchannel_mapping[2] = PCM_CHANNEL_FR;
+ } else if (channels == 4) {
+ lchannel_mapping[0] = PCM_CHANNEL_FC;
+ lchannel_mapping[1] = PCM_CHANNEL_FL;
+ lchannel_mapping[2] = PCM_CHANNEL_FR;
+ lchannel_mapping[3] = PCM_CHANNEL_LB;
+ } else if (channels == 5) {
+ lchannel_mapping[0] = PCM_CHANNEL_FC;
+ lchannel_mapping[1] = PCM_CHANNEL_FL;
+ lchannel_mapping[2] = PCM_CHANNEL_FR;
+ lchannel_mapping[3] = PCM_CHANNEL_LB;
+ lchannel_mapping[4] = PCM_CHANNEL_RB;
+ } else if (channels == 6) {
+ lchannel_mapping[0] = PCM_CHANNEL_FC;
+ lchannel_mapping[1] = PCM_CHANNEL_FL;
+ lchannel_mapping[2] = PCM_CHANNEL_FR;
+ lchannel_mapping[3] = PCM_CHANNEL_LB;
+ lchannel_mapping[4] = PCM_CHANNEL_RB;
+ lchannel_mapping[5] = PCM_CHANNEL_LFE;
+ } else {
+ pr_err("%s: ERROR.unsupported num_ch = %u\n",
+ __func__, channels);
+ return -EINVAL;
+ }
+ return 0;
+}
+
int q6asm_enable_sbrps(struct audio_client *ac,
uint32_t sbr_ps_enable)
{
@@ -1791,8 +1895,8 @@
struct asm_v13k_enc_cfg enc_cfg;
int rc = 0;
- pr_debug("%s:session[%d]frames[%d]min_rate[0x%4x]max_rate[0x%4x]"
- "reduced_rate_level[0x%4x]rate_modulation_cmd[0x%4x]", __func__,
+ pr_debug("%s:session[%d]frames[%d]min_rate[0x%4x]max_rate[0x%4x] reduced_rate_level[0x%4x]rate_modulation_cmd[0x%4x]",
+ __func__,
ac->session, frames_per_buf, min_rate, max_rate,
reduced_rate_level, rate_modulation_cmd);
@@ -1833,8 +1937,8 @@
struct asm_evrc_enc_cfg enc_cfg;
int rc = 0;
- pr_debug("%s:session[%d]frames[%d]min_rate[0x%4x]max_rate[0x%4x]"
- "rate_modulation_cmd[0x%4x]", __func__, ac->session,
+ pr_debug("%s:session[%d]frames[%d]min_rate[0x%4x]max_rate[0x%4x] rate_modulation_cmd[0x%4x]",
+ __func__, ac->session,
frames_per_buf, min_rate, max_rate, rate_modulation_cmd);
q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE);
@@ -1972,23 +2076,8 @@
memset(channel_mapping, 0, PCM_FORMAT_MAX_NUM_CHANNEL);
- if (channels == 1) {
- channel_mapping[0] = PCM_CHANNEL_FL;
- } else if (channels == 2) {
- channel_mapping[0] = PCM_CHANNEL_FL;
- channel_mapping[1] = PCM_CHANNEL_FR;
- } else if (channels == 6) {
- channel_mapping[0] = PCM_CHANNEL_FC;
- channel_mapping[1] = PCM_CHANNEL_FL;
- channel_mapping[2] = PCM_CHANNEL_FR;
- channel_mapping[3] = PCM_CHANNEL_LB;
- channel_mapping[4] = PCM_CHANNEL_RB;
- channel_mapping[5] = PCM_CHANNEL_LFE;
- } else {
- pr_err("%s: ERROR.unsupported num_ch = %u\n", __func__,
- channels);
+ if (q6asm_map_channels(channel_mapping, channels))
return -EINVAL;
- }
rc = apr_send_pkt(ac->apr, (uint32_t *) &fmt);
if (rc < 0) {
@@ -2056,8 +2145,7 @@
struct asm_wma_cfg *wma_cfg = (struct asm_wma_cfg *)cfg;
int rc = 0;
- pr_debug("session[%d]format_tag[0x%4x] rate[%d] ch[0x%4x] bps[%d],"
- "balign[0x%4x], bit_sample[0x%4x], ch_msk[%d], enc_opt[0x%4x]\n",
+ pr_debug("session[%d]format_tag[0x%4x] rate[%d] ch[0x%4x] bps[%d], balign[0x%4x], bit_sample[0x%4x], ch_msk[%d], enc_opt[0x%4x]\n",
ac->session, wma_cfg->format_tag, wma_cfg->sample_rate,
wma_cfg->ch_cfg, wma_cfg->avg_bytes_per_sec,
wma_cfg->block_align, wma_cfg->valid_bits_per_sample,
@@ -2065,8 +2153,9 @@
q6asm_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE);
- fmt.hdr.opcode = ASM_MEDIA_FMT_WMA_V9_V2;
-
+ fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2;
+ fmt.fmtblk.fmt_blk_size = sizeof(fmt) - sizeof(fmt.hdr) -
+ sizeof(fmt.fmtblk);
fmt.fmtag = wma_cfg->format_tag;
fmt.num_channels = wma_cfg->ch_cfg;
fmt.sample_rate = wma_cfg->sample_rate;
@@ -2100,9 +2189,7 @@
struct asm_wmapro_cfg *wmapro_cfg = (struct asm_wmapro_cfg *)cfg;
int rc = 0;
- pr_debug("session[%d]format_tag[0x%4x] rate[%d] ch[0x%4x] bps[%d],"
- "balign[0x%4x], bit_sample[0x%4x], ch_msk[%d], enc_opt[0x%4x],"
- "adv_enc_opt[0x%4x], adv_enc_opt2[0x%8x]\n",
+ pr_debug("session[%d]format_tag[0x%4x] rate[%d] ch[0x%4x] bps[%d], balign[0x%4x], bit_sample[0x%4x], ch_msk[%d], enc_opt[0x%4x], adv_enc_opt[0x%4x], adv_enc_opt2[0x%8x]\n",
ac->session, wmapro_cfg->format_tag, wmapro_cfg->sample_rate,
wmapro_cfg->ch_cfg, wmapro_cfg->avg_bytes_per_sec,
wmapro_cfg->block_align, wmapro_cfg->valid_bits_per_sample,
@@ -2111,7 +2198,9 @@
q6asm_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE);
- fmt.hdr.opcode = ASM_MEDIA_FMT_WMA_V10PRO_V2;
+ fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2;
+ fmt.fmtblk.fmt_blk_size = sizeof(fmt) - sizeof(fmt.hdr) -
+ sizeof(fmt.fmtblk);
fmt.fmtag = wmapro_cfg->format_tag;
fmt.num_channels = wmapro_cfg->ch_cfg;
@@ -2147,12 +2236,10 @@
struct avs_cmd_shared_mem_map_regions *mmap_regions = NULL;
struct avs_shared_map_region_payload *mregions = NULL;
struct audio_port_data *port = NULL;
- struct audio_buffer *ab = NULL;
void *mmap_region_cmd = NULL;
void *payload = NULL;
struct asm_buffer_node *buffer_node = NULL;
int rc = 0;
- int i = 0;
int cmd_size = 0;
if (!ac || ac->apr == NULL || ac->mmap_apr == NULL) {
@@ -2181,21 +2268,18 @@
mmap_regions->mem_pool_id = ADSP_MEMORY_MAP_EBI_POOL;
mmap_regions->num_regions = bufcnt & 0x00ff;
mmap_regions->property_flag = 0x00;
- pr_debug("map_regions->nregions = %d\n", mmap_regions->num_regions);
payload = ((u8 *) mmap_region_cmd +
sizeof(struct avs_cmd_shared_mem_map_regions));
mregions = (struct avs_shared_map_region_payload *)payload;
ac->port[dir].tmp_hdl = 0;
port = &ac->port[dir];
- for (i = 0; i < bufcnt; i++) {
- ab = &port->buf[i];
- mregions->shm_addr_lsw = ab->phys;
- /* Using only 32 bit address */
- mregions->shm_addr_msw = 0;
- mregions->mem_size_bytes = ab->size;
- ++mregions;
- }
+ pr_debug("%s, buf_add 0x%x, bufsz: %d\n", __func__, buf_add, bufsz);
+ mregions->shm_addr_lsw = buf_add;
+ /* Using only 32 bit address */
+ mregions->shm_addr_msw = 0;
+ mregions->mem_size_bytes = bufsz;
+ ++mregions;
rc = apr_send_pkt(ac->mmap_apr, (uint32_t *) mmap_region_cmd);
if (rc < 0) {
@@ -2295,7 +2379,7 @@
void *payload = NULL;
struct asm_buffer_node *buffer_node = NULL;
int rc = 0;
- int i = 0;
+ int i = 0;
int cmd_size = 0;
if (!ac || ac->apr == NULL || ac->mmap_apr == NULL) {
@@ -2351,7 +2435,6 @@
rc = wait_event_timeout(ac->cmd_wait,
(atomic_read(&ac->cmd_state) == 0)
, 5*HZ);
- /*ac->port[dir].tmp_hdl), 5*HZ);*/
if (!rc) {
pr_err("timeout. waited for memory_map\n");
rc = -EINVAL;
@@ -2843,8 +2926,6 @@
read.buf_addr_lsw,
read.hdr.token,
read.seq_id);
- pr_debug("q6asm_read_nolock mem-map handle is %x",
- read.mem_map_handle);
rc = apr_send_pkt(ac->apr, (uint32_t *) &read);
if (rc < 0) {
pr_err("read op[0x%x]rc[%d]\n", read.hdr.opcode, rc);
@@ -2865,6 +2946,8 @@
struct list_head *ptr, *next;
struct audio_buffer *ab;
struct audio_port_data *port;
+ u32 lbuf_addr_lsw;
+ u32 liomode;
if (!ac || ac->apr == NULL) {
pr_err("%s: APR handle NULL\n", __func__);
@@ -2884,11 +2967,21 @@
write.buf_size = param->len;
write.timestamp_msw = param->msw_ts;
write.timestamp_lsw = param->lsw_ts;
- pr_debug("%s: token[0x%x], buf_addr_lsw[0x%x], buf_size[0x%x],"
- "ts_msw[0x%x], ts_lsw[0x%x]\n",
- __func__, write.hdr.token, write.buf_addr_lsw,
+ liomode = (ASYNC_IO_MODE | NT_MODE);
+
+ if (ac->io_mode == liomode) {
+ pr_info("%s: subtracting 32 for header\n", __func__);
+ lbuf_addr_lsw = (write.buf_addr_lsw - 32);
+ } else{
+ lbuf_addr_lsw = write.buf_addr_lsw;
+ }
+
+ pr_debug("%s: token[0x%x], buf_addr_lsw[0x%x], buf_size[0x%x], ts_msw[0x%x], ts_lsw[0x%x], lbuf_addr_lsw: 0x[%x]\n",
+ __func__,
+ write.hdr.token, write.buf_addr_lsw,
write.buf_size, write.timestamp_msw,
- write.timestamp_lsw);
+ write.timestamp_lsw, lbuf_addr_lsw);
+
/* Use 0xFF00 for disabling timestamps */
if (param->flags == 0xFF00)
write.flags = (0x00000000 | (param->flags & 0x800000FF));
@@ -2899,21 +2992,12 @@
list_for_each_safe(ptr, next, &ac->port[IN].mem_map_handle) {
buf_node = list_entry(ptr, struct asm_buffer_node,
list);
- if (buf_node->buf_addr_lsw == (uint32_t)write.buf_addr_lsw) {
+ if (buf_node->buf_addr_lsw == lbuf_addr_lsw) {
write.mem_map_handle = buf_node->mmap_hdl;
- pr_debug("%s:buf_node->mmap_hdl = 0x%x,"
- "write.mem_map_handle = 0x%x\n",
- __func__,
- buf_node->mmap_hdl,
- (uint32_t)write.mem_map_handle);
break;
}
}
- pr_debug("%s: session[%d] bufadd[0x%x]len[0x%x],"
- "mem_map_handle[0x%x]\n", __func__, ac->session,
- write.buf_addr_lsw, write.buf_size, write.mem_map_handle);
-
rc = apr_send_pkt(ac->apr, (uint32_t *) &write);
if (rc < 0) {
pr_debug("[%s] write op[0x%x]rc[%d]\n", __func__,
@@ -2932,6 +3016,8 @@
struct asm_data_cmd_read_v2 read;
struct asm_buffer_node *buf_node = NULL;
struct list_head *ptr, *next;
+ u32 lbuf_addr_lsw;
+ u32 liomode;
if (!ac || ac->apr == NULL) {
pr_err("%s: APR handle NULL\n", __func__);
@@ -2947,16 +3033,21 @@
read.buf_addr_msw = 0;
read.buf_size = param->len;
read.seq_id = param->uid;
-
- list_for_each_safe(ptr, next, &ac->port[IN].mem_map_handle) {
- buf_node = list_entry(ptr, struct asm_buffer_node,
- list);
- if (buf_node->buf_addr_lsw == param->paddr)
- read.mem_map_handle = buf_node->mmap_hdl;
+ liomode = (NT_MODE | ASYNC_IO_MODE);
+ if (ac->io_mode == liomode) {
+ pr_info("%s: subtracting 32 for header\n", __func__);
+ lbuf_addr_lsw = (read.buf_addr_lsw - 32);
+ } else{
+ lbuf_addr_lsw = read.buf_addr_lsw;
}
- pr_debug("%s: session[%d] bufadd[0x%x]len[0x%x]", __func__, ac->session,
- read.buf_addr_lsw, read.buf_size);
+ list_for_each_safe(ptr, next, &ac->port[IN].mem_map_handle) {
+ buf_node = list_entry(ptr, struct asm_buffer_node, list);
+ if (buf_node->buf_addr_lsw == lbuf_addr_lsw) {
+ read.mem_map_handle = buf_node->mmap_hdl;
+ break;
+ }
+ }
rc = apr_send_pkt(ac->apr, (uint32_t *) &read);
if (rc < 0) {
@@ -3013,8 +3104,7 @@
list);
write.mem_map_handle = buf_node->mmap_hdl;
- pr_debug("%s:ab->phys[0x%x]bufadd[0x%x]"
- "token[0x%x]buf_id[0x%x]buf_size[0x%x]mmaphdl[0x%x]"
+ pr_debug("%s:ab->phys[0x%x]bufadd[0x%x] token[0x%x]buf_id[0x%x]buf_size[0x%x]mmaphdl[0x%x]"
, __func__,
ab->phys,
write.buf_addr_lsw,
@@ -3081,8 +3171,7 @@
write.flags = (0x80000000 | flags);
port->dsp_buf = (port->dsp_buf + 1) & (port->max_buf_cnt - 1);
- pr_err("%s:ab->phys[0x%x]bufadd[0x%x]token[0x%x]"
- "buf_id[0x%x]buf_size[0x%x]mmaphdl[0x%x]"
+ pr_debug("%s:ab->phys[0x%x]bufadd[0x%x]token[0x%x] buf_id[0x%x]buf_size[0x%x]mmaphdl[0x%x]"
, __func__,
ab->phys,
write.buf_addr_lsw,
diff --git a/sound/soc/msm/qdsp6v2/q6voice.c b/sound/soc/msm/qdsp6v2/q6voice.c
new file mode 100644
index 0000000..833dbf2
--- /dev/null
+++ b/sound/soc/msm/qdsp6v2/q6voice.c
@@ -0,0 +1,3916 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/slab.h>
+#include <linux/kthread.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+#include <linux/mutex.h>
+
+#include <asm/mach-types.h>
+#include <mach/qdsp6v2/audio_acdb.h>
+#include <mach/qdsp6v2/rtac.h>
+#include <mach/socinfo.h>
+
+#include "sound/apr_audio-v2.h"
+#include "sound/q6afe-v2.h"
+
+#include "q6voice.h"
+
+#define TIMEOUT_MS 200
+
+
+#define CMD_STATUS_SUCCESS 0
+#define CMD_STATUS_FAIL 1
+
+#define VOC_PATH_PASSIVE 0
+#define VOC_PATH_FULL 1
+#define VOC_PATH_VOLTE_PASSIVE 2
+
+/* CVP CAL Size: 245760 = 240 * 1024 */
+#define CVP_CAL_SIZE 245760
+/* CVS CAL Size: 49152 = 48 * 1024 */
+#define CVS_CAL_SIZE 49152
+
+static struct common_data common;
+
+static int voice_send_enable_vocproc_cmd(struct voice_data *v);
+static int voice_send_netid_timing_cmd(struct voice_data *v);
+static int voice_send_attach_vocproc_cmd(struct voice_data *v);
+static int voice_send_set_device_cmd(struct voice_data *v);
+static int voice_send_disable_vocproc_cmd(struct voice_data *v);
+static int voice_send_vol_index_cmd(struct voice_data *v);
+static int voice_send_cvp_map_memory_cmd(struct voice_data *v);
+static int voice_send_cvp_unmap_memory_cmd(struct voice_data *v);
+static int voice_send_cvs_map_memory_cmd(struct voice_data *v);
+static int voice_send_cvs_unmap_memory_cmd(struct voice_data *v);
+static int voice_send_cvs_register_cal_cmd(struct voice_data *v);
+static int voice_send_cvs_deregister_cal_cmd(struct voice_data *v);
+static int voice_send_cvp_register_cal_cmd(struct voice_data *v);
+static int voice_send_cvp_deregister_cal_cmd(struct voice_data *v);
+static int voice_send_cvp_register_vol_cal_table_cmd(struct voice_data *v);
+static int voice_send_cvp_deregister_vol_cal_table_cmd(struct voice_data *v);
+static int voice_send_set_widevoice_enable_cmd(struct voice_data *v);
+static int voice_send_set_pp_enable_cmd(struct voice_data *v,
+ uint32_t module_id, int enable);
+static int voice_cvs_stop_playback(struct voice_data *v);
+static int voice_cvs_start_playback(struct voice_data *v);
+static int voice_cvs_start_record(struct voice_data *v, uint32_t rec_mode);
+static int voice_cvs_stop_record(struct voice_data *v);
+
+static int32_t qdsp_mvm_callback(struct apr_client_data *data, void *priv);
+static int32_t qdsp_cvs_callback(struct apr_client_data *data, void *priv);
+static int32_t qdsp_cvp_callback(struct apr_client_data *data, void *priv);
+
+static u16 voice_get_mvm_handle(struct voice_data *v)
+{
+ if (v == NULL) {
+ pr_err("%s: v is NULL\n", __func__);
+ return 0;
+ }
+
+ pr_debug("%s: mvm_handle %d\n", __func__, v->mvm_handle);
+
+ return v->mvm_handle;
+}
+
+static void voice_set_mvm_handle(struct voice_data *v, u16 mvm_handle)
+{
+ pr_debug("%s: mvm_handle %d\n", __func__, mvm_handle);
+ if (v == NULL) {
+ pr_err("%s: v is NULL\n", __func__);
+ return;
+ }
+
+ v->mvm_handle = mvm_handle;
+}
+
+static u16 voice_get_cvs_handle(struct voice_data *v)
+{
+ if (v == NULL) {
+ pr_err("%s: v is NULL\n", __func__);
+ return 0;
+ }
+
+ pr_debug("%s: cvs_handle %d\n", __func__, v->cvs_handle);
+
+ return v->cvs_handle;
+}
+
+static void voice_set_cvs_handle(struct voice_data *v, u16 cvs_handle)
+{
+ pr_debug("%s: cvs_handle %d\n", __func__, cvs_handle);
+ if (v == NULL) {
+ pr_err("%s: v is NULL\n", __func__);
+ return;
+ }
+
+ v->cvs_handle = cvs_handle;
+}
+
+static u16 voice_get_cvp_handle(struct voice_data *v)
+{
+ if (v == NULL) {
+ pr_err("%s: v is NULL\n", __func__);
+ return 0;
+ }
+
+ pr_debug("%s: cvp_handle %d\n", __func__, v->cvp_handle);
+
+ return v->cvp_handle;
+}
+
+static void voice_set_cvp_handle(struct voice_data *v, u16 cvp_handle)
+{
+ pr_debug("%s: cvp_handle %d\n", __func__, cvp_handle);
+ if (v == NULL) {
+ pr_err("%s: v is NULL\n", __func__);
+ return;
+ }
+
+ v->cvp_handle = cvp_handle;
+}
+
+uint16_t voc_get_session_id(char *name)
+{
+ u16 session_id = 0;
+
+ if (name != NULL) {
+ if (!strncmp(name, "Voice session", 13))
+ session_id = common.voice[VOC_PATH_PASSIVE].session_id;
+ else if (!strncmp(name, "VoLTE session", 13))
+ session_id =
+ common.voice[VOC_PATH_VOLTE_PASSIVE].session_id;
+ else
+ session_id = common.voice[VOC_PATH_FULL].session_id;
+
+ pr_debug("%s: %s has session id 0x%x\n", __func__, name,
+ session_id);
+ }
+
+ return session_id;
+}
+
+static struct voice_data *voice_get_session(u16 session_id)
+{
+ struct voice_data *v = NULL;
+
+ if ((session_id >= SESSION_ID_BASE) &&
+ (session_id < SESSION_ID_BASE + MAX_VOC_SESSIONS)) {
+ v = &common.voice[session_id - SESSION_ID_BASE];
+ }
+
+ pr_debug("%s: session_id 0x%x session handle 0x%x\n",
+ __func__, session_id, (unsigned int)v);
+
+ return v;
+}
+
+static bool is_voice_session(u16 session_id)
+{
+ return (session_id == common.voice[VOC_PATH_PASSIVE].session_id);
+}
+
+static bool is_voip_session(u16 session_id)
+{
+ return (session_id == common.voice[VOC_PATH_FULL].session_id);
+}
+
+static bool is_volte_session(u16 session_id)
+{
+ return (session_id == common.voice[VOC_PATH_VOLTE_PASSIVE].session_id);
+}
+
+static int voice_apr_register(void)
+{
+ pr_debug("%s\n", __func__);
+
+ mutex_lock(&common.common_lock);
+
+ /* register callback to APR */
+ if (common.apr_q6_mvm == NULL) {
+ pr_debug("%s: Start to register MVM callback\n", __func__);
+
+ common.apr_q6_mvm = apr_register("ADSP", "MVM",
+ qdsp_mvm_callback,
+ 0xFFFFFFFF, &common);
+
+ if (common.apr_q6_mvm == NULL) {
+ pr_err("%s: Unable to register MVM\n", __func__);
+ goto err;
+ }
+ }
+
+ if (common.apr_q6_cvs == NULL) {
+ pr_debug("%s: Start to register CVS callback\n", __func__);
+
+ common.apr_q6_cvs = apr_register("ADSP", "CVS",
+ qdsp_cvs_callback,
+ 0xFFFFFFFF, &common);
+
+ if (common.apr_q6_cvs == NULL) {
+ pr_err("%s: Unable to register CVS\n", __func__);
+ goto err;
+ }
+
+ rtac_set_voice_handle(RTAC_CVS, common.apr_q6_cvs);
+ }
+
+ if (common.apr_q6_cvp == NULL) {
+ pr_debug("%s: Start to register CVP callback\n", __func__);
+
+ common.apr_q6_cvp = apr_register("ADSP", "CVP",
+ qdsp_cvp_callback,
+ 0xFFFFFFFF, &common);
+
+ if (common.apr_q6_cvp == NULL) {
+ pr_err("%s: Unable to register CVP\n", __func__);
+ goto err;
+ }
+
+ rtac_set_voice_handle(RTAC_CVP, common.apr_q6_cvp);
+ }
+
+ mutex_unlock(&common.common_lock);
+
+ return 0;
+
+err:
+ if (common.apr_q6_cvs != NULL) {
+ apr_deregister(common.apr_q6_cvs);
+ common.apr_q6_cvs = NULL;
+ rtac_set_voice_handle(RTAC_CVS, NULL);
+ }
+ if (common.apr_q6_mvm != NULL) {
+ apr_deregister(common.apr_q6_mvm);
+ common.apr_q6_mvm = NULL;
+ }
+
+ mutex_unlock(&common.common_lock);
+
+ return -ENODEV;
+}
+
+static int voice_send_dual_control_cmd(struct voice_data *v)
+{
+ int ret = 0;
+ struct mvm_modem_dual_control_session_cmd mvm_voice_ctl_cmd;
+ void *apr_mvm;
+ u16 mvm_handle;
+
+ if (v == NULL) {
+ pr_err("%s: v is NULL\n", __func__);
+ return -EINVAL;
+ }
+ apr_mvm = common.apr_q6_mvm;
+ if (!apr_mvm) {
+ pr_err("%s: apr_mvm is NULL.\n", __func__);
+ return -EINVAL;
+ }
+ pr_debug("%s: VoLTE command to MVM\n", __func__);
+ if (is_volte_session(v->session_id)) {
+ mvm_handle = voice_get_mvm_handle(v);
+ mvm_voice_ctl_cmd.hdr.hdr_field = APR_HDR_FIELD(
+ APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ mvm_voice_ctl_cmd.hdr.pkt_size = APR_PKT_SIZE(
+ APR_HDR_SIZE,
+ sizeof(mvm_voice_ctl_cmd) -
+ APR_HDR_SIZE);
+ pr_debug("%s: send mvm Voice Ctl pkt size = %d\n",
+ __func__, mvm_voice_ctl_cmd.hdr.pkt_size);
+ mvm_voice_ctl_cmd.hdr.src_port = v->session_id;
+ mvm_voice_ctl_cmd.hdr.dest_port = mvm_handle;
+ mvm_voice_ctl_cmd.hdr.token = 0;
+ mvm_voice_ctl_cmd.hdr.opcode =
+ VSS_IMVM_CMD_SET_POLICY_DUAL_CONTROL;
+ mvm_voice_ctl_cmd.voice_ctl.enable_flag = true;
+ v->mvm_state = CMD_STATUS_FAIL;
+
+ ret = apr_send_pkt(apr_mvm, (uint32_t *) &mvm_voice_ctl_cmd);
+ if (ret < 0) {
+ pr_err("%s: Error sending MVM Voice CTL CMD\n",
+ __func__);
+ ret = -EINVAL;
+ goto fail;
+ }
+ ret = wait_event_timeout(v->mvm_wait,
+ (v->mvm_state == CMD_STATUS_SUCCESS),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: wait_event timeout\n", __func__);
+ ret = -EINVAL;
+ goto fail;
+ }
+ }
+ ret = 0;
+fail:
+ return ret;
+}
+
+
+static int voice_create_mvm_cvs_session(struct voice_data *v)
+{
+ int ret = 0;
+ struct mvm_create_ctl_session_cmd mvm_session_cmd;
+ struct cvs_create_passive_ctl_session_cmd cvs_session_cmd;
+ struct cvs_create_full_ctl_session_cmd cvs_full_ctl_cmd;
+ struct mvm_attach_stream_cmd attach_stream_cmd;
+ void *apr_mvm, *apr_cvs, *apr_cvp;
+ u16 mvm_handle, cvs_handle, cvp_handle;
+
+ if (v == NULL) {
+ pr_err("%s: v is NULL\n", __func__);
+ return -EINVAL;
+ }
+ apr_mvm = common.apr_q6_mvm;
+ apr_cvs = common.apr_q6_cvs;
+ apr_cvp = common.apr_q6_cvp;
+
+ if (!apr_mvm || !apr_cvs || !apr_cvp) {
+ pr_err("%s: apr_mvm or apr_cvs or apr_cvp is NULL\n", __func__);
+ return -EINVAL;
+ }
+ mvm_handle = voice_get_mvm_handle(v);
+ cvs_handle = voice_get_cvs_handle(v);
+ cvp_handle = voice_get_cvp_handle(v);
+
+ pr_debug("%s: mvm_hdl=%d, cvs_hdl=%d\n", __func__,
+ mvm_handle, cvs_handle);
+ /* send cmd to create mvm session and wait for response */
+
+ if (!mvm_handle) {
+ if (is_voice_session(v->session_id) ||
+ is_volte_session(v->session_id)) {
+ mvm_session_cmd.hdr.hdr_field = APR_HDR_FIELD(
+ APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ mvm_session_cmd.hdr.pkt_size = APR_PKT_SIZE(
+ APR_HDR_SIZE,
+ sizeof(mvm_session_cmd) -
+ APR_HDR_SIZE);
+ pr_debug("%s: send mvm create session pkt size = %d\n",
+ __func__, mvm_session_cmd.hdr.pkt_size);
+ mvm_session_cmd.hdr.src_port = v->session_id;
+ mvm_session_cmd.hdr.dest_port = 0;
+ mvm_session_cmd.hdr.token = 0;
+ mvm_session_cmd.hdr.opcode =
+ VSS_IMVM_CMD_CREATE_PASSIVE_CONTROL_SESSION;
+ if (is_volte_session(v->session_id)) {
+ strlcpy(mvm_session_cmd.mvm_session.name,
+ "default volte voice",
+ sizeof(mvm_session_cmd.mvm_session.name));
+ } else {
+ strlcpy(mvm_session_cmd.mvm_session.name,
+ "default modem voice",
+ sizeof(mvm_session_cmd.mvm_session.name));
+ }
+
+ v->mvm_state = CMD_STATUS_FAIL;
+
+ ret = apr_send_pkt(apr_mvm,
+ (uint32_t *) &mvm_session_cmd);
+ if (ret < 0) {
+ pr_err("%s: Error sending MVM_CONTROL_SESSION\n",
+ __func__);
+ goto fail;
+ }
+ ret = wait_event_timeout(v->mvm_wait,
+ (v->mvm_state == CMD_STATUS_SUCCESS),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: wait_event timeout\n", __func__);
+ goto fail;
+ }
+ } else {
+ pr_debug("%s: creating MVM full ctrl\n", __func__);
+ mvm_session_cmd.hdr.hdr_field =
+ APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ mvm_session_cmd.hdr.pkt_size =
+ APR_PKT_SIZE(APR_HDR_SIZE,
+ sizeof(mvm_session_cmd) -
+ APR_HDR_SIZE);
+ mvm_session_cmd.hdr.src_port = v->session_id;
+ mvm_session_cmd.hdr.dest_port = 0;
+ mvm_session_cmd.hdr.token = 0;
+ mvm_session_cmd.hdr.opcode =
+ VSS_IMVM_CMD_CREATE_FULL_CONTROL_SESSION;
+ strlcpy(mvm_session_cmd.mvm_session.name,
+ "default voip",
+ sizeof(mvm_session_cmd.mvm_session.name));
+
+ v->mvm_state = CMD_STATUS_FAIL;
+
+ ret = apr_send_pkt(apr_mvm,
+ (uint32_t *) &mvm_session_cmd);
+ if (ret < 0) {
+ pr_err("Fail in sending MVM_CONTROL_SESSION\n");
+ goto fail;
+ }
+ ret = wait_event_timeout(v->mvm_wait,
+ (v->mvm_state == CMD_STATUS_SUCCESS),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: wait_event timeout\n", __func__);
+ goto fail;
+ }
+ }
+ /* Get the created MVM handle. */
+ mvm_handle = voice_get_mvm_handle(v);
+ }
+ /* send cmd to create cvs session */
+ if (!cvs_handle) {
+ if (is_voice_session(v->session_id) ||
+ is_volte_session(v->session_id)) {
+ pr_debug("%s: creating CVS passive session\n",
+ __func__);
+
+ cvs_session_cmd.hdr.hdr_field = APR_HDR_FIELD(
+ APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ cvs_session_cmd.hdr.pkt_size =
+ APR_PKT_SIZE(APR_HDR_SIZE,
+ sizeof(cvs_session_cmd) -
+ APR_HDR_SIZE);
+ cvs_session_cmd.hdr.src_port = v->session_id;
+ cvs_session_cmd.hdr.dest_port = 0;
+ cvs_session_cmd.hdr.token = 0;
+ cvs_session_cmd.hdr.opcode =
+ VSS_ISTREAM_CMD_CREATE_PASSIVE_CONTROL_SESSION;
+ if (is_volte_session(v->session_id)) {
+ strlcpy(mvm_session_cmd.mvm_session.name,
+ "default volte voice",
+ sizeof(mvm_session_cmd.mvm_session.name));
+ } else {
+ strlcpy(cvs_session_cmd.cvs_session.name,
+ "default modem voice",
+ sizeof(cvs_session_cmd.cvs_session.name));
+ }
+ v->cvs_state = CMD_STATUS_FAIL;
+
+ ret = apr_send_pkt(apr_cvs,
+ (uint32_t *) &cvs_session_cmd);
+ if (ret < 0) {
+ pr_err("Fail in sending STREAM_CONTROL_SESSION\n");
+ goto fail;
+ }
+ ret = wait_event_timeout(v->cvs_wait,
+ (v->cvs_state == CMD_STATUS_SUCCESS),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: wait_event timeout\n", __func__);
+ goto fail;
+ }
+ /* Get the created CVS handle. */
+ cvs_handle = voice_get_cvs_handle(v);
+
+ } else {
+ pr_debug("%s: creating CVS full session\n", __func__);
+
+ cvs_full_ctl_cmd.hdr.hdr_field =
+ APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+
+ cvs_full_ctl_cmd.hdr.pkt_size =
+ APR_PKT_SIZE(APR_HDR_SIZE,
+ sizeof(cvs_full_ctl_cmd) -
+ APR_HDR_SIZE);
+
+ cvs_full_ctl_cmd.hdr.src_port = v->session_id;
+ cvs_full_ctl_cmd.hdr.dest_port = 0;
+ cvs_full_ctl_cmd.hdr.token = 0;
+ cvs_full_ctl_cmd.hdr.opcode =
+ VSS_ISTREAM_CMD_CREATE_FULL_CONTROL_SESSION;
+ cvs_full_ctl_cmd.cvs_session.direction = 2;
+ cvs_full_ctl_cmd.cvs_session.enc_media_type =
+ common.mvs_info.media_type;
+ cvs_full_ctl_cmd.cvs_session.dec_media_type =
+ common.mvs_info.media_type;
+ cvs_full_ctl_cmd.cvs_session.network_id =
+ common.mvs_info.network_type;
+ strlcpy(cvs_full_ctl_cmd.cvs_session.name,
+ "default q6 voice",
+ sizeof(cvs_full_ctl_cmd.cvs_session.name));
+
+ v->cvs_state = CMD_STATUS_FAIL;
+
+ ret = apr_send_pkt(apr_cvs,
+ (uint32_t *) &cvs_full_ctl_cmd);
+
+ if (ret < 0) {
+ pr_err("%s: Err %d sending CREATE_FULL_CTRL\n",
+ __func__, ret);
+ goto fail;
+ }
+ ret = wait_event_timeout(v->cvs_wait,
+ (v->cvs_state == CMD_STATUS_SUCCESS),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: wait_event timeout\n", __func__);
+ goto fail;
+ }
+ /* Get the created CVS handle. */
+ cvs_handle = voice_get_cvs_handle(v);
+
+ /* Attach MVM to CVS. */
+ pr_debug("%s: Attach MVM to stream\n", __func__);
+
+ attach_stream_cmd.hdr.hdr_field =
+ APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ attach_stream_cmd.hdr.pkt_size =
+ APR_PKT_SIZE(APR_HDR_SIZE,
+ sizeof(attach_stream_cmd) -
+ APR_HDR_SIZE);
+ attach_stream_cmd.hdr.src_port = v->session_id;
+ attach_stream_cmd.hdr.dest_port = mvm_handle;
+ attach_stream_cmd.hdr.token = 0;
+ attach_stream_cmd.hdr.opcode =
+ VSS_IMVM_CMD_ATTACH_STREAM;
+ attach_stream_cmd.attach_stream.handle = cvs_handle;
+
+ v->mvm_state = CMD_STATUS_FAIL;
+ ret = apr_send_pkt(apr_mvm,
+ (uint32_t *) &attach_stream_cmd);
+ if (ret < 0) {
+ pr_err("%s: Error %d sending ATTACH_STREAM\n",
+ __func__, ret);
+ goto fail;
+ }
+ ret = wait_event_timeout(v->mvm_wait,
+ (v->mvm_state == CMD_STATUS_SUCCESS),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: wait_event timeout\n", __func__);
+ goto fail;
+ }
+ }
+ }
+ return 0;
+
+fail:
+ return -EINVAL;
+}
+
+static int voice_destroy_mvm_cvs_session(struct voice_data *v)
+{
+ int ret = 0;
+ struct mvm_detach_stream_cmd detach_stream;
+ struct apr_hdr mvm_destroy;
+ struct apr_hdr cvs_destroy;
+ void *apr_mvm, *apr_cvs;
+ u16 mvm_handle, cvs_handle;
+
+ if (v == NULL) {
+ pr_err("%s: v is NULL\n", __func__);
+ return -EINVAL;
+ }
+ apr_mvm = common.apr_q6_mvm;
+ apr_cvs = common.apr_q6_cvs;
+
+ if (!apr_mvm || !apr_cvs) {
+ pr_err("%s: apr_mvm or apr_cvs is NULL\n", __func__);
+ return -EINVAL;
+ }
+ mvm_handle = voice_get_mvm_handle(v);
+ cvs_handle = voice_get_cvs_handle(v);
+
+ /* MVM, CVS sessions are destroyed only for Full control sessions. */
+ if (is_voip_session(v->session_id)) {
+ pr_debug("%s: MVM detach stream\n", __func__);
+
+ /* Detach voice stream. */
+ detach_stream.hdr.hdr_field =
+ APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ detach_stream.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+ sizeof(detach_stream) - APR_HDR_SIZE);
+ detach_stream.hdr.src_port = v->session_id;
+ detach_stream.hdr.dest_port = mvm_handle;
+ detach_stream.hdr.token = 0;
+ detach_stream.hdr.opcode = VSS_IMVM_CMD_DETACH_STREAM;
+ detach_stream.detach_stream.handle = cvs_handle;
+
+ v->mvm_state = CMD_STATUS_FAIL;
+
+ ret = apr_send_pkt(apr_mvm, (uint32_t *) &detach_stream);
+ if (ret < 0) {
+ pr_err("%s: Error %d sending DETACH_STREAM\n",
+ __func__, ret);
+ goto fail;
+ }
+ ret = wait_event_timeout(v->mvm_wait,
+ (v->mvm_state == CMD_STATUS_SUCCESS),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: wait event timeout\n", __func__);
+ goto fail;
+ }
+ /* Destroy CVS. */
+ pr_debug("%s: CVS destroy session\n", __func__);
+
+ cvs_destroy.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ cvs_destroy.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+ sizeof(cvs_destroy) - APR_HDR_SIZE);
+ cvs_destroy.src_port = v->session_id;
+ cvs_destroy.dest_port = cvs_handle;
+ cvs_destroy.token = 0;
+ cvs_destroy.opcode = APRV2_IBASIC_CMD_DESTROY_SESSION;
+
+ v->cvs_state = CMD_STATUS_FAIL;
+
+ ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_destroy);
+ if (ret < 0) {
+ pr_err("%s: Error %d sending CVS DESTROY\n",
+ __func__, ret);
+ goto fail;
+ }
+ ret = wait_event_timeout(v->cvs_wait,
+ (v->cvs_state == CMD_STATUS_SUCCESS),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: wait event timeout\n", __func__);
+
+ goto fail;
+ }
+ cvs_handle = 0;
+ voice_set_cvs_handle(v, cvs_handle);
+
+ /* Destroy MVM. */
+ pr_debug("MVM destroy session\n");
+
+ mvm_destroy.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ mvm_destroy.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+ sizeof(mvm_destroy) - APR_HDR_SIZE);
+ mvm_destroy.src_port = v->session_id;
+ mvm_destroy.dest_port = mvm_handle;
+ mvm_destroy.token = 0;
+ mvm_destroy.opcode = APRV2_IBASIC_CMD_DESTROY_SESSION;
+
+ v->mvm_state = CMD_STATUS_FAIL;
+
+ ret = apr_send_pkt(apr_mvm, (uint32_t *) &mvm_destroy);
+ if (ret < 0) {
+ pr_err("%s: Error %d sending MVM DESTROY\n",
+ __func__, ret);
+
+ goto fail;
+ }
+ ret = wait_event_timeout(v->mvm_wait,
+ (v->mvm_state == CMD_STATUS_SUCCESS),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: wait event timeout\n", __func__);
+
+ goto fail;
+ }
+ mvm_handle = 0;
+ voice_set_mvm_handle(v, mvm_handle);
+ }
+ return 0;
+fail:
+ return -EINVAL;
+}
+
+static int voice_send_tty_mode_cmd(struct voice_data *v)
+{
+ int ret = 0;
+ struct mvm_set_tty_mode_cmd mvm_tty_mode_cmd;
+ void *apr_mvm;
+ u16 mvm_handle;
+
+ if (v == NULL) {
+ pr_err("%s: v is NULL\n", __func__);
+ return -EINVAL;
+ }
+ apr_mvm = common.apr_q6_mvm;
+
+ if (!apr_mvm) {
+ pr_err("%s: apr_mvm is NULL.\n", __func__);
+ return -EINVAL;
+ }
+ mvm_handle = voice_get_mvm_handle(v);
+
+ if (v->tty_mode) {
+ /* send tty mode cmd to mvm */
+ mvm_tty_mode_cmd.hdr.hdr_field = APR_HDR_FIELD(
+ APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ mvm_tty_mode_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+ sizeof(mvm_tty_mode_cmd) -
+ APR_HDR_SIZE);
+ pr_debug("%s: pkt size = %d\n",
+ __func__, mvm_tty_mode_cmd.hdr.pkt_size);
+ mvm_tty_mode_cmd.hdr.src_port = v->session_id;
+ mvm_tty_mode_cmd.hdr.dest_port = mvm_handle;
+ mvm_tty_mode_cmd.hdr.token = 0;
+ mvm_tty_mode_cmd.hdr.opcode = VSS_ISTREAM_CMD_SET_TTY_MODE;
+ mvm_tty_mode_cmd.tty_mode.mode = v->tty_mode;
+ pr_debug("tty mode =%d\n", mvm_tty_mode_cmd.tty_mode.mode);
+
+ v->mvm_state = CMD_STATUS_FAIL;
+ ret = apr_send_pkt(apr_mvm, (uint32_t *) &mvm_tty_mode_cmd);
+ if (ret < 0) {
+ pr_err("%s: Error %d sending SET_TTY_MODE\n",
+ __func__, ret);
+ goto fail;
+ }
+ ret = wait_event_timeout(v->mvm_wait,
+ (v->mvm_state == CMD_STATUS_SUCCESS),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: wait_event timeout\n", __func__);
+ goto fail;
+ }
+ }
+ return 0;
+fail:
+ return -EINVAL;
+}
+
+static int voice_set_dtx(struct voice_data *v)
+{
+ int ret = 0;
+ void *apr_cvs;
+ u16 cvs_handle;
+ struct cvs_set_enc_dtx_mode_cmd cvs_set_dtx;
+
+ if (v == NULL) {
+ pr_err("%s: v is NULL\n", __func__);
+ return -EINVAL;
+ }
+ apr_cvs = common.apr_q6_cvs;
+
+ if (!apr_cvs) {
+ pr_err("%s: apr_cvs is NULL.\n", __func__);
+ return -EINVAL;
+ }
+
+ cvs_handle = voice_get_cvs_handle(v);
+
+ /* Set DTX */
+ cvs_set_dtx.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ cvs_set_dtx.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+ sizeof(cvs_set_dtx) - APR_HDR_SIZE);
+ cvs_set_dtx.hdr.src_port = v->session_id;
+ cvs_set_dtx.hdr.dest_port = cvs_handle;
+ cvs_set_dtx.hdr.token = 0;
+ cvs_set_dtx.hdr.opcode = VSS_ISTREAM_CMD_SET_ENC_DTX_MODE;
+ cvs_set_dtx.dtx_mode.enable = common.mvs_info.dtx_mode;
+
+ pr_debug("%s: Setting DTX %d\n", __func__, common.mvs_info.dtx_mode);
+
+ v->cvs_state = CMD_STATUS_FAIL;
+
+ ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_set_dtx);
+ if (ret < 0) {
+ pr_err("%s: Error %d sending SET_DTX\n", __func__, ret);
+ return -EINVAL;
+ }
+
+ ret = wait_event_timeout(v->cvs_wait,
+ (v->cvs_state == CMD_STATUS_SUCCESS),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: wait_event timeout\n", __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int voice_config_cvs_vocoder(struct voice_data *v)
+{
+ int ret = 0;
+ void *apr_cvs;
+ u16 cvs_handle;
+ /* Set media type. */
+ struct cvs_set_media_type_cmd cvs_set_media_cmd;
+
+ if (v == NULL) {
+ pr_err("%s: v is NULL\n", __func__);
+ return -EINVAL;
+ }
+ apr_cvs = common.apr_q6_cvs;
+
+ if (!apr_cvs) {
+ pr_err("%s: apr_cvs is NULL.\n", __func__);
+ return -EINVAL;
+ }
+
+ cvs_handle = voice_get_cvs_handle(v);
+
+ cvs_set_media_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ cvs_set_media_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+ sizeof(cvs_set_media_cmd) - APR_HDR_SIZE);
+ cvs_set_media_cmd.hdr.src_port = v->session_id;
+ cvs_set_media_cmd.hdr.dest_port = cvs_handle;
+ cvs_set_media_cmd.hdr.token = 0;
+ cvs_set_media_cmd.hdr.opcode = VSS_ISTREAM_CMD_SET_MEDIA_TYPE;
+ cvs_set_media_cmd.media_type.tx_media_id = common.mvs_info.media_type;
+ cvs_set_media_cmd.media_type.rx_media_id = common.mvs_info.media_type;
+
+ v->cvs_state = CMD_STATUS_FAIL;
+
+ ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_set_media_cmd);
+ if (ret < 0) {
+ pr_err("%s: Error %d sending SET_MEDIA_TYPE\n",
+ __func__, ret);
+
+ goto fail;
+ }
+ ret = wait_event_timeout(v->cvs_wait,
+ (v->cvs_state == CMD_STATUS_SUCCESS),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: wait_event timeout\n", __func__);
+
+ goto fail;
+ }
+ /* Set encoder properties. */
+ switch (common.mvs_info.media_type) {
+ case VSS_MEDIA_ID_EVRC_MODEM: {
+ struct cvs_set_cdma_enc_minmax_rate_cmd cvs_set_cdma_rate;
+
+ pr_debug("Setting EVRC min-max rate\n");
+
+ cvs_set_cdma_rate.hdr.hdr_field =
+ APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ cvs_set_cdma_rate.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+ sizeof(cvs_set_cdma_rate) - APR_HDR_SIZE);
+ cvs_set_cdma_rate.hdr.src_port = v->session_id;
+ cvs_set_cdma_rate.hdr.dest_port = cvs_handle;
+ cvs_set_cdma_rate.hdr.token = 0;
+ cvs_set_cdma_rate.hdr.opcode =
+ VSS_ISTREAM_CMD_CDMA_SET_ENC_MINMAX_RATE;
+ cvs_set_cdma_rate.cdma_rate.min_rate = common.mvs_info.rate;
+ cvs_set_cdma_rate.cdma_rate.max_rate = common.mvs_info.rate;
+
+ v->cvs_state = CMD_STATUS_FAIL;
+
+ ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_set_cdma_rate);
+ if (ret < 0) {
+ pr_err("%s: Error %d sending SET_EVRC_MINMAX_RATE\n",
+ __func__, ret);
+ goto fail;
+ }
+ ret = wait_event_timeout(v->cvs_wait,
+ (v->cvs_state == CMD_STATUS_SUCCESS),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: wait_event timeout\n", __func__);
+
+ goto fail;
+ }
+ break;
+ }
+ case VSS_MEDIA_ID_AMR_NB_MODEM: {
+ struct cvs_set_amr_enc_rate_cmd cvs_set_amr_rate;
+
+ pr_debug("Setting AMR rate\n");
+
+ cvs_set_amr_rate.hdr.hdr_field =
+ APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ cvs_set_amr_rate.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+ sizeof(cvs_set_amr_rate) - APR_HDR_SIZE);
+ cvs_set_amr_rate.hdr.src_port = v->session_id;
+ cvs_set_amr_rate.hdr.dest_port = cvs_handle;
+ cvs_set_amr_rate.hdr.token = 0;
+ cvs_set_amr_rate.hdr.opcode =
+ VSS_ISTREAM_CMD_VOC_AMR_SET_ENC_RATE;
+ cvs_set_amr_rate.amr_rate.mode = common.mvs_info.rate;
+
+ v->cvs_state = CMD_STATUS_FAIL;
+
+ ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_set_amr_rate);
+ if (ret < 0) {
+ pr_err("%s: Error %d sending SET_AMR_RATE\n",
+ __func__, ret);
+ goto fail;
+ }
+ ret = wait_event_timeout(v->cvs_wait,
+ (v->cvs_state == CMD_STATUS_SUCCESS),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: wait_event timeout\n", __func__);
+ goto fail;
+ }
+
+ ret = voice_set_dtx(v);
+ if (ret < 0)
+ goto fail;
+
+ break;
+ }
+ case VSS_MEDIA_ID_AMR_WB_MODEM: {
+ struct cvs_set_amrwb_enc_rate_cmd cvs_set_amrwb_rate;
+
+ pr_debug("Setting AMR WB rate\n");
+
+ cvs_set_amrwb_rate.hdr.hdr_field =
+ APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ cvs_set_amrwb_rate.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+ sizeof(cvs_set_amrwb_rate) -
+ APR_HDR_SIZE);
+ cvs_set_amrwb_rate.hdr.src_port = v->session_id;
+ cvs_set_amrwb_rate.hdr.dest_port = cvs_handle;
+ cvs_set_amrwb_rate.hdr.token = 0;
+ cvs_set_amrwb_rate.hdr.opcode =
+ VSS_ISTREAM_CMD_VOC_AMRWB_SET_ENC_RATE;
+ cvs_set_amrwb_rate.amrwb_rate.mode = common.mvs_info.rate;
+
+ v->cvs_state = CMD_STATUS_FAIL;
+
+ ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_set_amrwb_rate);
+ if (ret < 0) {
+ pr_err("%s: Error %d sending SET_AMRWB_RATE\n",
+ __func__, ret);
+ goto fail;
+ }
+ ret = wait_event_timeout(v->cvs_wait,
+ (v->cvs_state == CMD_STATUS_SUCCESS),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: wait_event timeout\n", __func__);
+ goto fail;
+ }
+
+ ret = voice_set_dtx(v);
+ if (ret < 0)
+ goto fail;
+
+ break;
+ }
+ case VSS_MEDIA_ID_G729:
+ case VSS_MEDIA_ID_G711_ALAW:
+ case VSS_MEDIA_ID_G711_MULAW: {
+ ret = voice_set_dtx(v);
+
+ break;
+ }
+ default:
+ /* Do nothing. */
+ break;
+ }
+ return 0;
+
+fail:
+ return -EINVAL;
+}
+
+static int voice_send_start_voice_cmd(struct voice_data *v)
+{
+ struct apr_hdr mvm_start_voice_cmd;
+ int ret = 0;
+ void *apr_mvm;
+ u16 mvm_handle;
+
+ if (v == NULL) {
+ pr_err("%s: v is NULL\n", __func__);
+ return -EINVAL;
+ }
+ apr_mvm = common.apr_q6_mvm;
+
+ if (!apr_mvm) {
+ pr_err("%s: apr_mvm is NULL.\n", __func__);
+ return -EINVAL;
+ }
+ mvm_handle = voice_get_mvm_handle(v);
+
+ mvm_start_voice_cmd.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ mvm_start_voice_cmd.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+ sizeof(mvm_start_voice_cmd) - APR_HDR_SIZE);
+ pr_debug("send mvm_start_voice_cmd pkt size = %d\n",
+ mvm_start_voice_cmd.pkt_size);
+ mvm_start_voice_cmd.src_port = v->session_id;
+ mvm_start_voice_cmd.dest_port = mvm_handle;
+ mvm_start_voice_cmd.token = 0;
+ mvm_start_voice_cmd.opcode = VSS_IMVM_CMD_START_VOICE;
+
+ v->mvm_state = CMD_STATUS_FAIL;
+ ret = apr_send_pkt(apr_mvm, (uint32_t *) &mvm_start_voice_cmd);
+ if (ret < 0) {
+ pr_err("Fail in sending VSS_IMVM_CMD_START_VOICE\n");
+ goto fail;
+ }
+ ret = wait_event_timeout(v->mvm_wait,
+ (v->mvm_state == CMD_STATUS_SUCCESS),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: wait_event timeout\n", __func__);
+ goto fail;
+ }
+ return 0;
+fail:
+ return -EINVAL;
+}
+
+static int voice_send_disable_vocproc_cmd(struct voice_data *v)
+{
+ struct apr_hdr cvp_disable_cmd;
+ int ret = 0;
+ void *apr_cvp;
+ u16 cvp_handle;
+
+ if (v == NULL) {
+ pr_err("%s: v is NULL\n", __func__);
+ return -EINVAL;
+ }
+ apr_cvp = common.apr_q6_cvp;
+
+ if (!apr_cvp) {
+ pr_err("%s: apr regist failed\n", __func__);
+ return -EINVAL;
+ }
+ cvp_handle = voice_get_cvp_handle(v);
+
+ /* disable vocproc and wait for respose */
+ cvp_disable_cmd.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ cvp_disable_cmd.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+ sizeof(cvp_disable_cmd) - APR_HDR_SIZE);
+ pr_debug("cvp_disable_cmd pkt size = %d, cvp_handle=%d\n",
+ cvp_disable_cmd.pkt_size, cvp_handle);
+ cvp_disable_cmd.src_port = v->session_id;
+ cvp_disable_cmd.dest_port = cvp_handle;
+ cvp_disable_cmd.token = 0;
+ cvp_disable_cmd.opcode = VSS_IVOCPROC_CMD_DISABLE;
+
+ v->cvp_state = CMD_STATUS_FAIL;
+ ret = apr_send_pkt(apr_cvp, (uint32_t *) &cvp_disable_cmd);
+ if (ret < 0) {
+ pr_err("Fail in sending VSS_IVOCPROC_CMD_DISABLE\n");
+ goto fail;
+ }
+ ret = wait_event_timeout(v->cvp_wait,
+ (v->cvp_state == CMD_STATUS_SUCCESS),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: wait_event timeout\n", __func__);
+ goto fail;
+ }
+
+ return 0;
+fail:
+ return -EINVAL;
+}
+
+static int voice_send_set_device_cmd(struct voice_data *v)
+{
+ struct cvp_set_device_cmd cvp_setdev_cmd;
+ int ret = 0;
+ void *apr_cvp;
+ u16 cvp_handle;
+
+ if (v == NULL) {
+ pr_err("%s: v is NULL\n", __func__);
+ return -EINVAL;
+ }
+ apr_cvp = common.apr_q6_cvp;
+
+ if (!apr_cvp) {
+ pr_err("%s: apr_cvp is NULL.\n", __func__);
+ return -EINVAL;
+ }
+ cvp_handle = voice_get_cvp_handle(v);
+
+ /* set device and wait for response */
+ cvp_setdev_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ cvp_setdev_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+ sizeof(cvp_setdev_cmd) - APR_HDR_SIZE);
+ pr_debug(" send create cvp setdev, pkt size = %d\n",
+ cvp_setdev_cmd.hdr.pkt_size);
+ cvp_setdev_cmd.hdr.src_port = v->session_id;
+ cvp_setdev_cmd.hdr.dest_port = cvp_handle;
+ cvp_setdev_cmd.hdr.token = 0;
+ cvp_setdev_cmd.hdr.opcode = VSS_IVOCPROC_CMD_SET_DEVICE;
+
+ /* Use default topology if invalid value in ACDB */
+ cvp_setdev_cmd.cvp_set_device.tx_topology_id =
+ get_voice_tx_topology();
+ if (cvp_setdev_cmd.cvp_set_device.tx_topology_id == 0)
+ cvp_setdev_cmd.cvp_set_device.tx_topology_id =
+ VSS_IVOCPROC_TOPOLOGY_ID_TX_SM_ECNS;
+
+ cvp_setdev_cmd.cvp_set_device.rx_topology_id =
+ get_voice_rx_topology();
+ if (cvp_setdev_cmd.cvp_set_device.rx_topology_id == 0)
+ cvp_setdev_cmd.cvp_set_device.rx_topology_id =
+ VSS_IVOCPROC_TOPOLOGY_ID_RX_DEFAULT;
+ cvp_setdev_cmd.cvp_set_device.tx_port_id = v->dev_tx.port_id;
+ cvp_setdev_cmd.cvp_set_device.rx_port_id = v->dev_rx.port_id;
+ pr_debug("topology=%d , tx_port_id=%d, rx_port_id=%d\n",
+ cvp_setdev_cmd.cvp_set_device.tx_topology_id,
+ cvp_setdev_cmd.cvp_set_device.tx_port_id,
+ cvp_setdev_cmd.cvp_set_device.rx_port_id);
+
+ v->cvp_state = CMD_STATUS_FAIL;
+ ret = apr_send_pkt(apr_cvp, (uint32_t *) &cvp_setdev_cmd);
+ if (ret < 0) {
+ pr_err("Fail in sending VOCPROC_FULL_CONTROL_SESSION\n");
+ goto fail;
+ }
+ pr_debug("wait for cvp create session event\n");
+ ret = wait_event_timeout(v->cvp_wait,
+ (v->cvp_state == CMD_STATUS_SUCCESS),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: wait_event timeout\n", __func__);
+ goto fail;
+ }
+
+ return 0;
+fail:
+ return -EINVAL;
+}
+
+static int voice_send_stop_voice_cmd(struct voice_data *v)
+{
+ struct apr_hdr mvm_stop_voice_cmd;
+ int ret = 0;
+ void *apr_mvm;
+ u16 mvm_handle;
+
+ if (v == NULL) {
+ pr_err("%s: v is NULL\n", __func__);
+ return -EINVAL;
+ }
+ apr_mvm = common.apr_q6_mvm;
+
+ if (!apr_mvm) {
+ pr_err("%s: apr_mvm is NULL.\n", __func__);
+ return -EINVAL;
+ }
+ mvm_handle = voice_get_mvm_handle(v);
+
+ mvm_stop_voice_cmd.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ mvm_stop_voice_cmd.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+ sizeof(mvm_stop_voice_cmd) - APR_HDR_SIZE);
+ pr_debug("send mvm_stop_voice_cmd pkt size = %d\n",
+ mvm_stop_voice_cmd.pkt_size);
+ mvm_stop_voice_cmd.src_port = v->session_id;
+ mvm_stop_voice_cmd.dest_port = mvm_handle;
+ mvm_stop_voice_cmd.token = 0;
+ mvm_stop_voice_cmd.opcode = VSS_IMVM_CMD_STOP_VOICE;
+
+ v->mvm_state = CMD_STATUS_FAIL;
+ ret = apr_send_pkt(apr_mvm, (uint32_t *) &mvm_stop_voice_cmd);
+ if (ret < 0) {
+ pr_err("Fail in sending VSS_IMVM_CMD_STOP_VOICE\n");
+ goto fail;
+ }
+ ret = wait_event_timeout(v->mvm_wait,
+ (v->mvm_state == CMD_STATUS_SUCCESS),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: wait_event timeout\n", __func__);
+ goto fail;
+ }
+
+ return 0;
+fail:
+ return -EINVAL;
+}
+
+static int voice_send_cvs_register_cal_cmd(struct voice_data *v)
+{
+ struct cvs_register_cal_data_cmd cvs_reg_cal_cmd;
+ struct acdb_cal_block cal_block;
+ int ret = 0;
+ void *apr_cvs;
+ u16 cvs_handle;
+ uint32_t cal_paddr;
+
+ /* get the cvs cal data */
+ get_all_vocstrm_cal(&cal_block);
+ if (cal_block.cal_size == 0)
+ goto fail;
+
+ if (v == NULL) {
+ pr_err("%s: v is NULL\n", __func__);
+ return -EINVAL;
+ }
+ apr_cvs = common.apr_q6_cvs;
+
+ if (!apr_cvs) {
+ pr_err("%s: apr_cvs is NULL.\n", __func__);
+ return -EINVAL;
+ }
+
+ if (is_voip_session(v->session_id)) {
+ if (common.cvs_cal.buf) {
+ cal_paddr = common.cvs_cal.phy;
+
+ memcpy(common.cvs_cal.buf,
+ (void *) cal_block.cal_kvaddr,
+ cal_block.cal_size);
+ } else {
+ return -EINVAL;
+ }
+ } else {
+ cal_paddr = cal_block.cal_paddr;
+ }
+
+ cvs_handle = voice_get_cvs_handle(v);
+
+ /* fill in the header */
+ cvs_reg_cal_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ cvs_reg_cal_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+ sizeof(cvs_reg_cal_cmd) - APR_HDR_SIZE);
+ cvs_reg_cal_cmd.hdr.src_port = v->session_id;
+ cvs_reg_cal_cmd.hdr.dest_port = cvs_handle;
+ cvs_reg_cal_cmd.hdr.token = 0;
+ cvs_reg_cal_cmd.hdr.opcode = VSS_ISTREAM_CMD_REGISTER_CALIBRATION_DATA;
+
+ cvs_reg_cal_cmd.cvs_cal_data.phys_addr = cal_paddr;
+ cvs_reg_cal_cmd.cvs_cal_data.mem_size = cal_block.cal_size;
+
+ v->cvs_state = CMD_STATUS_FAIL;
+ ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_reg_cal_cmd);
+ if (ret < 0) {
+ pr_err("Fail: sending cvs cal,\n");
+ goto fail;
+ }
+ ret = wait_event_timeout(v->cvs_wait,
+ (v->cvs_state == CMD_STATUS_SUCCESS),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: wait_event timeout\n", __func__);
+ goto fail;
+ }
+ return 0;
+fail:
+ return -EINVAL;
+
+}
+
+static int voice_send_cvs_deregister_cal_cmd(struct voice_data *v)
+{
+ struct cvs_deregister_cal_data_cmd cvs_dereg_cal_cmd;
+ struct acdb_cal_block cal_block;
+ int ret = 0;
+ void *apr_cvs;
+ u16 cvs_handle;
+
+ get_all_vocstrm_cal(&cal_block);
+ if (cal_block.cal_size == 0)
+ return 0;
+
+ if (v == NULL) {
+ pr_err("%s: v is NULL\n", __func__);
+ return -EINVAL;
+ }
+ apr_cvs = common.apr_q6_cvs;
+
+ if (!apr_cvs) {
+ pr_err("%s: apr_cvs is NULL.\n", __func__);
+ return -EINVAL;
+ }
+ cvs_handle = voice_get_cvs_handle(v);
+
+ /* fill in the header */
+ cvs_dereg_cal_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ cvs_dereg_cal_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+ sizeof(cvs_dereg_cal_cmd) - APR_HDR_SIZE);
+ cvs_dereg_cal_cmd.hdr.src_port = v->session_id;
+ cvs_dereg_cal_cmd.hdr.dest_port = cvs_handle;
+ cvs_dereg_cal_cmd.hdr.token = 0;
+ cvs_dereg_cal_cmd.hdr.opcode =
+ VSS_ISTREAM_CMD_DEREGISTER_CALIBRATION_DATA;
+
+ v->cvs_state = CMD_STATUS_FAIL;
+ ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_dereg_cal_cmd);
+ if (ret < 0) {
+ pr_err("Fail: sending cvs cal,\n");
+ goto fail;
+ }
+ ret = wait_event_timeout(v->cvs_wait,
+ (v->cvs_state == CMD_STATUS_SUCCESS),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: wait_event timeout\n", __func__);
+ goto fail;
+ }
+ return 0;
+fail:
+ return -EINVAL;
+
+}
+
+static int voice_send_cvp_map_memory_cmd(struct voice_data *v)
+{
+ struct vss_map_memory_cmd cvp_map_mem_cmd;
+ struct acdb_cal_block cal_block;
+ int ret = 0;
+ void *apr_cvp;
+ u16 cvp_handle;
+ uint32_t cal_paddr;
+
+ /* get all cvp cal data */
+ get_all_cvp_cal(&cal_block);
+ if (cal_block.cal_size == 0)
+ goto fail;
+
+ if (v == NULL) {
+ pr_err("%s: v is NULL\n", __func__);
+ return -EINVAL;
+ }
+ apr_cvp = common.apr_q6_cvp;
+
+ if (!apr_cvp) {
+ pr_err("%s: apr_cvp is NULL.\n", __func__);
+ return -EINVAL;
+ }
+
+ if (is_voip_session(v->session_id)) {
+ if (common.cvp_cal.buf)
+ cal_paddr = common.cvp_cal.phy;
+ else
+ return -EINVAL;
+ } else {
+ cal_paddr = cal_block.cal_paddr;
+ }
+ cvp_handle = voice_get_cvp_handle(v);
+
+ /* fill in the header */
+ cvp_map_mem_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ cvp_map_mem_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+ sizeof(cvp_map_mem_cmd) - APR_HDR_SIZE);
+ cvp_map_mem_cmd.hdr.src_port = v->session_id;
+ cvp_map_mem_cmd.hdr.dest_port = cvp_handle;
+ cvp_map_mem_cmd.hdr.token = 0;
+ cvp_map_mem_cmd.hdr.opcode = VSS_ICOMMON_CMD_MAP_MEMORY;
+
+ pr_debug("%s, phy_addr:0x%x, mem_size:%d\n", __func__,
+ cal_paddr, cal_block.cal_size);
+ cvp_map_mem_cmd.vss_map_mem.phys_addr = cal_paddr;
+ cvp_map_mem_cmd.vss_map_mem.mem_size = cal_block.cal_size;
+ cvp_map_mem_cmd.vss_map_mem.mem_pool_id =
+ VSS_ICOMMON_MAP_MEMORY_SHMEM8_4K_POOL;
+
+ v->cvp_state = CMD_STATUS_FAIL;
+ ret = apr_send_pkt(apr_cvp, (uint32_t *) &cvp_map_mem_cmd);
+ if (ret < 0) {
+ pr_err("Fail: sending cvp cal,\n");
+ goto fail;
+ }
+ ret = wait_event_timeout(v->cvp_wait,
+ (v->cvp_state == CMD_STATUS_SUCCESS),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: wait_event timeout\n", __func__);
+ goto fail;
+ }
+ return 0;
+fail:
+ return -EINVAL;
+
+}
+
+static int voice_send_cvp_unmap_memory_cmd(struct voice_data *v)
+{
+ struct vss_unmap_memory_cmd cvp_unmap_mem_cmd;
+ struct acdb_cal_block cal_block;
+ int ret = 0;
+ void *apr_cvp;
+ u16 cvp_handle;
+ uint32_t cal_paddr;
+
+ get_all_cvp_cal(&cal_block);
+ if (cal_block.cal_size == 0)
+ return 0;
+
+ if (v == NULL) {
+ pr_err("%s: v is NULL\n", __func__);
+ return -EINVAL;
+ }
+ apr_cvp = common.apr_q6_cvp;
+
+ if (!apr_cvp) {
+ pr_err("%s: apr_cvp is NULL.\n", __func__);
+ return -EINVAL;
+ }
+
+ if (is_voip_session(v->session_id))
+ cal_paddr = common.cvp_cal.phy;
+ else
+ cal_paddr = cal_block.cal_paddr;
+
+ cvp_handle = voice_get_cvp_handle(v);
+
+ /* fill in the header */
+ cvp_unmap_mem_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ cvp_unmap_mem_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+ sizeof(cvp_unmap_mem_cmd) - APR_HDR_SIZE);
+ cvp_unmap_mem_cmd.hdr.src_port = v->session_id;
+ cvp_unmap_mem_cmd.hdr.dest_port = cvp_handle;
+ cvp_unmap_mem_cmd.hdr.token = 0;
+ cvp_unmap_mem_cmd.hdr.opcode = VSS_ICOMMON_CMD_UNMAP_MEMORY;
+
+ cvp_unmap_mem_cmd.vss_unmap_mem.phys_addr = cal_paddr;
+
+ v->cvp_state = CMD_STATUS_FAIL;
+ ret = apr_send_pkt(apr_cvp, (uint32_t *) &cvp_unmap_mem_cmd);
+ if (ret < 0) {
+ pr_err("Fail: sending cvp cal,\n");
+ goto fail;
+ }
+ ret = wait_event_timeout(v->cvp_wait,
+ (v->cvp_state == CMD_STATUS_SUCCESS),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: wait_event timeout\n", __func__);
+ goto fail;
+ }
+ return 0;
+fail:
+ return -EINVAL;
+
+}
+
+static int voice_send_cvs_map_memory_cmd(struct voice_data *v)
+{
+ struct vss_map_memory_cmd cvs_map_mem_cmd;
+ struct acdb_cal_block cal_block;
+ int ret = 0;
+ void *apr_cvs;
+ u16 cvs_handle;
+ uint32_t cal_paddr;
+
+ /* get all cvs cal data */
+ get_all_vocstrm_cal(&cal_block);
+ if (cal_block.cal_size == 0)
+ goto fail;
+
+ if (v == NULL) {
+ pr_err("%s: v is NULL\n", __func__);
+ return -EINVAL;
+ }
+ apr_cvs = common.apr_q6_cvs;
+
+ if (!apr_cvs) {
+ pr_err("%s: apr_cvs is NULL.\n", __func__);
+ return -EINVAL;
+ }
+
+ if (is_voip_session(v->session_id)) {
+ if (common.cvs_cal.buf)
+ cal_paddr = common.cvs_cal.phy;
+ else
+ return -EINVAL;
+ } else {
+ cal_paddr = cal_block.cal_paddr;
+ }
+
+ cvs_handle = voice_get_cvs_handle(v);
+
+ /* fill in the header */
+ cvs_map_mem_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ cvs_map_mem_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+ sizeof(cvs_map_mem_cmd) - APR_HDR_SIZE);
+ cvs_map_mem_cmd.hdr.src_port = v->session_id;
+ cvs_map_mem_cmd.hdr.dest_port = cvs_handle;
+ cvs_map_mem_cmd.hdr.token = 0;
+ cvs_map_mem_cmd.hdr.opcode = VSS_ICOMMON_CMD_MAP_MEMORY;
+
+ pr_debug("%s, phys_addr: 0x%x, mem_size: %d\n", __func__,
+ cal_paddr, cal_block.cal_size);
+ cvs_map_mem_cmd.vss_map_mem.phys_addr = cal_paddr;
+ cvs_map_mem_cmd.vss_map_mem.mem_size = cal_block.cal_size;
+ cvs_map_mem_cmd.vss_map_mem.mem_pool_id =
+ VSS_ICOMMON_MAP_MEMORY_SHMEM8_4K_POOL;
+
+ v->cvs_state = CMD_STATUS_FAIL;
+ ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_map_mem_cmd);
+ if (ret < 0) {
+ pr_err("Fail: sending cvs cal,\n");
+ goto fail;
+ }
+ ret = wait_event_timeout(v->cvs_wait,
+ (v->cvs_state == CMD_STATUS_SUCCESS),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: wait_event timeout\n", __func__);
+ goto fail;
+ }
+ return 0;
+fail:
+ return -EINVAL;
+
+}
+
+static int voice_send_cvs_unmap_memory_cmd(struct voice_data *v)
+{
+ struct vss_unmap_memory_cmd cvs_unmap_mem_cmd;
+ struct acdb_cal_block cal_block;
+ int ret = 0;
+ void *apr_cvs;
+ u16 cvs_handle;
+ uint32_t cal_paddr;
+
+ get_all_vocstrm_cal(&cal_block);
+ if (cal_block.cal_size == 0)
+ return 0;
+
+ if (v == NULL) {
+ pr_err("%s: v is NULL\n", __func__);
+ return -EINVAL;
+ }
+ apr_cvs = common.apr_q6_cvs;
+
+ if (!apr_cvs) {
+ pr_err("%s: apr_cvs is NULL.\n", __func__);
+ return -EINVAL;
+ }
+
+ if (is_voip_session(v->session_id))
+ cal_paddr = common.cvs_cal.phy;
+ else
+ cal_paddr = cal_block.cal_paddr;
+
+ cvs_handle = voice_get_cvs_handle(v);
+
+ /* fill in the header */
+ cvs_unmap_mem_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ cvs_unmap_mem_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+ sizeof(cvs_unmap_mem_cmd) - APR_HDR_SIZE);
+ cvs_unmap_mem_cmd.hdr.src_port = v->session_id;
+ cvs_unmap_mem_cmd.hdr.dest_port = cvs_handle;
+ cvs_unmap_mem_cmd.hdr.token = 0;
+ cvs_unmap_mem_cmd.hdr.opcode = VSS_ICOMMON_CMD_UNMAP_MEMORY;
+
+ cvs_unmap_mem_cmd.vss_unmap_mem.phys_addr = cal_paddr;
+
+ v->cvs_state = CMD_STATUS_FAIL;
+ ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_unmap_mem_cmd);
+ if (ret < 0) {
+ pr_err("Fail: sending cvs cal,\n");
+ goto fail;
+ }
+ ret = wait_event_timeout(v->cvs_wait,
+ (v->cvs_state == CMD_STATUS_SUCCESS),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: wait_event timeout\n", __func__);
+ goto fail;
+ }
+ return 0;
+fail:
+ return -EINVAL;
+
+}
+
+static int voice_send_cvp_register_cal_cmd(struct voice_data *v)
+{
+ struct cvp_register_cal_data_cmd cvp_reg_cal_cmd;
+ struct acdb_cal_block cal_block;
+ int ret = 0;
+ void *apr_cvp;
+ u16 cvp_handle;
+ uint32_t cal_paddr;
+
+ /* get the cvp cal data */
+ get_all_vocproc_cal(&cal_block);
+ if (cal_block.cal_size == 0)
+ goto fail;
+
+ if (v == NULL) {
+ pr_err("%s: v is NULL\n", __func__);
+ return -EINVAL;
+ }
+ apr_cvp = common.apr_q6_cvp;
+
+ if (!apr_cvp) {
+ pr_err("%s: apr_cvp is NULL.\n", __func__);
+ return -EINVAL;
+ }
+
+ if (is_voip_session(v->session_id)) {
+ if (common.cvp_cal.buf) {
+ cal_paddr = common.cvp_cal.phy;
+
+ memcpy(common.cvp_cal.buf,
+ (void *)cal_block.cal_kvaddr,
+ cal_block.cal_size);
+ } else {
+ return -EINVAL;
+ }
+ } else {
+ cal_paddr = cal_block.cal_paddr;
+ }
+
+ cvp_handle = voice_get_cvp_handle(v);
+
+ /* fill in the header */
+ cvp_reg_cal_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ cvp_reg_cal_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+ sizeof(cvp_reg_cal_cmd) - APR_HDR_SIZE);
+ cvp_reg_cal_cmd.hdr.src_port = v->session_id;
+ cvp_reg_cal_cmd.hdr.dest_port = cvp_handle;
+ cvp_reg_cal_cmd.hdr.token = 0;
+ cvp_reg_cal_cmd.hdr.opcode = VSS_IVOCPROC_CMD_REGISTER_CALIBRATION_DATA;
+
+ cvp_reg_cal_cmd.cvp_cal_data.phys_addr = cal_paddr;
+ cvp_reg_cal_cmd.cvp_cal_data.mem_size = cal_block.cal_size;
+
+ v->cvp_state = CMD_STATUS_FAIL;
+ ret = apr_send_pkt(apr_cvp, (uint32_t *) &cvp_reg_cal_cmd);
+ if (ret < 0) {
+ pr_err("Fail: sending cvp cal,\n");
+ goto fail;
+ }
+ ret = wait_event_timeout(v->cvp_wait,
+ (v->cvp_state == CMD_STATUS_SUCCESS),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: wait_event timeout\n", __func__);
+ goto fail;
+ }
+ return 0;
+fail:
+ return -EINVAL;
+
+}
+
+static int voice_send_cvp_deregister_cal_cmd(struct voice_data *v)
+{
+ struct cvp_deregister_cal_data_cmd cvp_dereg_cal_cmd;
+ struct acdb_cal_block cal_block;
+ int ret = 0;
+ void *apr_cvp;
+ u16 cvp_handle;
+
+ get_all_vocproc_cal(&cal_block);
+ if (cal_block.cal_size == 0)
+ return 0;
+
+ if (v == NULL) {
+ pr_err("%s: v is NULL\n", __func__);
+ return -EINVAL;
+ }
+ apr_cvp = common.apr_q6_cvp;
+
+ if (!apr_cvp) {
+ pr_err("%s: apr_cvp is NULL.\n", __func__);
+ return -EINVAL;
+ }
+ cvp_handle = voice_get_cvp_handle(v);
+
+ /* fill in the header */
+ cvp_dereg_cal_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ cvp_dereg_cal_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+ sizeof(cvp_dereg_cal_cmd) - APR_HDR_SIZE);
+ cvp_dereg_cal_cmd.hdr.src_port = v->session_id;
+ cvp_dereg_cal_cmd.hdr.dest_port = cvp_handle;
+ cvp_dereg_cal_cmd.hdr.token = 0;
+ cvp_dereg_cal_cmd.hdr.opcode =
+ VSS_IVOCPROC_CMD_DEREGISTER_CALIBRATION_DATA;
+
+ v->cvp_state = CMD_STATUS_FAIL;
+ ret = apr_send_pkt(apr_cvp, (uint32_t *) &cvp_dereg_cal_cmd);
+ if (ret < 0) {
+ pr_err("Fail: sending cvp cal,\n");
+ goto fail;
+ }
+ ret = wait_event_timeout(v->cvp_wait,
+ (v->cvp_state == CMD_STATUS_SUCCESS),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: wait_event timeout\n", __func__);
+ goto fail;
+ }
+ return 0;
+fail:
+ return -EINVAL;
+
+}
+
+static int voice_send_cvp_register_vol_cal_table_cmd(struct voice_data *v)
+{
+ struct cvp_register_vol_cal_table_cmd cvp_reg_cal_tbl_cmd;
+ struct acdb_cal_block vol_block;
+ struct acdb_cal_block voc_block;
+ int ret = 0;
+ void *apr_cvp;
+ u16 cvp_handle;
+ uint32_t cal_paddr;
+
+ /* get the cvp vol cal data */
+ get_all_vocvol_cal(&vol_block);
+ get_all_vocproc_cal(&voc_block);
+
+ if (vol_block.cal_size == 0)
+ goto fail;
+
+ if (v == NULL) {
+ pr_err("%s: v is NULL\n", __func__);
+ return -EINVAL;
+ }
+ apr_cvp = common.apr_q6_cvp;
+
+ if (!apr_cvp) {
+ pr_err("%s: apr_cvp is NULL.\n", __func__);
+ return -EINVAL;
+ }
+
+ if (is_voip_session(v->session_id)) {
+ if (common.cvp_cal.buf) {
+ cal_paddr = common.cvp_cal.phy + voc_block.cal_size;
+
+ memcpy(common.cvp_cal.buf + voc_block.cal_size,
+ (void *) vol_block.cal_kvaddr,
+ vol_block.cal_size);
+ } else {
+ return -EINVAL;
+ }
+ } else {
+ cal_paddr = vol_block.cal_paddr;
+ }
+
+ cvp_handle = voice_get_cvp_handle(v);
+
+ /* fill in the header */
+ cvp_reg_cal_tbl_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ cvp_reg_cal_tbl_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+ sizeof(cvp_reg_cal_tbl_cmd) - APR_HDR_SIZE);
+ cvp_reg_cal_tbl_cmd.hdr.src_port = v->session_id;
+ cvp_reg_cal_tbl_cmd.hdr.dest_port = cvp_handle;
+ cvp_reg_cal_tbl_cmd.hdr.token = 0;
+ cvp_reg_cal_tbl_cmd.hdr.opcode =
+ VSS_IVOCPROC_CMD_REGISTER_VOLUME_CAL_TABLE;
+
+ cvp_reg_cal_tbl_cmd.cvp_vol_cal_tbl.phys_addr = cal_paddr;
+ cvp_reg_cal_tbl_cmd.cvp_vol_cal_tbl.mem_size = vol_block.cal_size;
+
+ v->cvp_state = CMD_STATUS_FAIL;
+ ret = apr_send_pkt(apr_cvp, (uint32_t *) &cvp_reg_cal_tbl_cmd);
+ if (ret < 0) {
+ pr_err("Fail: sending cvp cal table,\n");
+ goto fail;
+ }
+ ret = wait_event_timeout(v->cvp_wait,
+ (v->cvp_state == CMD_STATUS_SUCCESS),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: wait_event timeout\n", __func__);
+ goto fail;
+ }
+ return 0;
+fail:
+ return -EINVAL;
+
+}
+
+static int voice_send_cvp_deregister_vol_cal_table_cmd(struct voice_data *v)
+{
+ struct cvp_deregister_vol_cal_table_cmd cvp_dereg_cal_tbl_cmd;
+ struct acdb_cal_block cal_block;
+ int ret = 0;
+ void *apr_cvp;
+ u16 cvp_handle;
+
+ get_all_vocvol_cal(&cal_block);
+ if (cal_block.cal_size == 0)
+ return 0;
+
+ if (v == NULL) {
+ pr_err("%s: v is NULL\n", __func__);
+ return -EINVAL;
+ }
+ apr_cvp = common.apr_q6_cvp;
+
+ if (!apr_cvp) {
+ pr_err("%s: apr_cvp is NULL.\n", __func__);
+ return -EINVAL;
+ }
+ cvp_handle = voice_get_cvp_handle(v);
+
+ /* fill in the header */
+ cvp_dereg_cal_tbl_cmd.hdr.hdr_field = APR_HDR_FIELD(
+ APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ cvp_dereg_cal_tbl_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+ sizeof(cvp_dereg_cal_tbl_cmd) - APR_HDR_SIZE);
+ cvp_dereg_cal_tbl_cmd.hdr.src_port = v->session_id;
+ cvp_dereg_cal_tbl_cmd.hdr.dest_port = cvp_handle;
+ cvp_dereg_cal_tbl_cmd.hdr.token = 0;
+ cvp_dereg_cal_tbl_cmd.hdr.opcode =
+ VSS_IVOCPROC_CMD_DEREGISTER_VOLUME_CAL_TABLE;
+
+ v->cvp_state = CMD_STATUS_FAIL;
+ ret = apr_send_pkt(apr_cvp, (uint32_t *) &cvp_dereg_cal_tbl_cmd);
+ if (ret < 0) {
+ pr_err("Fail: sending cvp cal table,\n");
+ goto fail;
+ }
+ ret = wait_event_timeout(v->cvp_wait,
+ (v->cvp_state == CMD_STATUS_SUCCESS),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: wait_event timeout\n", __func__);
+ goto fail;
+ }
+ return 0;
+fail:
+ return -EINVAL;
+
+}
+
+static int voice_send_set_widevoice_enable_cmd(struct voice_data *v)
+{
+ struct mvm_set_widevoice_enable_cmd mvm_set_wv_cmd;
+ int ret = 0;
+ void *apr_mvm;
+ u16 mvm_handle;
+
+ if (v == NULL) {
+ pr_err("%s: v is NULL\n", __func__);
+ return -EINVAL;
+ }
+ apr_mvm = common.apr_q6_mvm;
+
+ if (!apr_mvm) {
+ pr_err("%s: apr_mvm is NULL.\n", __func__);
+ return -EINVAL;
+ }
+ mvm_handle = voice_get_mvm_handle(v);
+
+ /* fill in the header */
+ mvm_set_wv_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ mvm_set_wv_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+ sizeof(mvm_set_wv_cmd) - APR_HDR_SIZE);
+ mvm_set_wv_cmd.hdr.src_port = v->session_id;
+ mvm_set_wv_cmd.hdr.dest_port = mvm_handle;
+ mvm_set_wv_cmd.hdr.token = 0;
+ mvm_set_wv_cmd.hdr.opcode = VSS_IWIDEVOICE_CMD_SET_WIDEVOICE;
+
+ mvm_set_wv_cmd.vss_set_wv.enable = v->wv_enable;
+
+ v->mvm_state = CMD_STATUS_FAIL;
+ ret = apr_send_pkt(apr_mvm, (uint32_t *) &mvm_set_wv_cmd);
+ if (ret < 0) {
+ pr_err("Fail: sending mvm set widevoice enable,\n");
+ goto fail;
+ }
+ ret = wait_event_timeout(v->mvm_wait,
+ (v->mvm_state == CMD_STATUS_SUCCESS),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: wait_event timeout\n", __func__);
+ goto fail;
+ }
+ return 0;
+fail:
+ return -EINVAL;
+}
+
+static int voice_send_set_pp_enable_cmd(struct voice_data *v,
+ uint32_t module_id, int enable)
+{
+ struct cvs_set_pp_enable_cmd cvs_set_pp_cmd;
+ int ret = 0;
+ void *apr_cvs;
+ u16 cvs_handle;
+
+ if (v == NULL) {
+ pr_err("%s: v is NULL\n", __func__);
+ return -EINVAL;
+ }
+ apr_cvs = common.apr_q6_cvs;
+
+ if (!apr_cvs) {
+ pr_err("%s: apr_cvs is NULL.\n", __func__);
+ return -EINVAL;
+ }
+ cvs_handle = voice_get_cvs_handle(v);
+
+ /* fill in the header */
+ cvs_set_pp_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ cvs_set_pp_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+ sizeof(cvs_set_pp_cmd) - APR_HDR_SIZE);
+ cvs_set_pp_cmd.hdr.src_port = v->session_id;
+ cvs_set_pp_cmd.hdr.dest_port = cvs_handle;
+ cvs_set_pp_cmd.hdr.token = 0;
+ cvs_set_pp_cmd.hdr.opcode = VSS_ICOMMON_CMD_SET_UI_PROPERTY;
+
+ cvs_set_pp_cmd.vss_set_pp.module_id = module_id;
+ cvs_set_pp_cmd.vss_set_pp.param_id = VOICE_PARAM_MOD_ENABLE;
+ cvs_set_pp_cmd.vss_set_pp.param_size = MOD_ENABLE_PARAM_LEN;
+ cvs_set_pp_cmd.vss_set_pp.reserved = 0;
+ cvs_set_pp_cmd.vss_set_pp.enable = enable;
+ cvs_set_pp_cmd.vss_set_pp.reserved_field = 0;
+ pr_debug("voice_send_set_pp_enable_cmd, module_id=%d, enable=%d\n",
+ module_id, enable);
+
+ v->cvs_state = CMD_STATUS_FAIL;
+ ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_set_pp_cmd);
+ if (ret < 0) {
+ pr_err("Fail: sending cvs set slowtalk enable,\n");
+ goto fail;
+ }
+ ret = wait_event_timeout(v->cvs_wait,
+ (v->cvs_state == CMD_STATUS_SUCCESS),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: wait_event timeout\n", __func__);
+ goto fail;
+ }
+ return 0;
+fail:
+ return -EINVAL;
+}
+
+static int voice_setup_vocproc(struct voice_data *v)
+{
+ struct cvp_create_full_ctl_session_cmd cvp_session_cmd;
+ int ret = 0;
+ void *apr_cvp;
+ if (v == NULL) {
+ pr_err("%s: v is NULL\n", __func__);
+ return -EINVAL;
+ }
+ apr_cvp = common.apr_q6_cvp;
+
+ if (!apr_cvp) {
+ pr_err("%s: apr_cvp is NULL.\n", __func__);
+ return -EINVAL;
+ }
+
+ /* create cvp session and wait for response */
+ cvp_session_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ cvp_session_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+ sizeof(cvp_session_cmd) - APR_HDR_SIZE);
+ pr_debug(" send create cvp session, pkt size = %d\n",
+ cvp_session_cmd.hdr.pkt_size);
+ cvp_session_cmd.hdr.src_port = v->session_id;
+ cvp_session_cmd.hdr.dest_port = 0;
+ cvp_session_cmd.hdr.token = 0;
+ cvp_session_cmd.hdr.opcode =
+ VSS_IVOCPROC_CMD_CREATE_FULL_CONTROL_SESSION;
+
+ /* Use default topology if invalid value in ACDB */
+ cvp_session_cmd.cvp_session.tx_topology_id =
+ get_voice_tx_topology();
+ if (cvp_session_cmd.cvp_session.tx_topology_id == 0)
+ cvp_session_cmd.cvp_session.tx_topology_id =
+ VSS_IVOCPROC_TOPOLOGY_ID_TX_SM_ECNS;
+
+ cvp_session_cmd.cvp_session.rx_topology_id =
+ get_voice_rx_topology();
+ if (cvp_session_cmd.cvp_session.rx_topology_id == 0)
+ cvp_session_cmd.cvp_session.rx_topology_id =
+ VSS_IVOCPROC_TOPOLOGY_ID_RX_DEFAULT;
+
+ cvp_session_cmd.cvp_session.direction = 2; /*tx and rx*/
+ cvp_session_cmd.cvp_session.network_id = VSS_NETWORK_ID_DEFAULT;
+ cvp_session_cmd.cvp_session.tx_port_id = v->dev_tx.port_id;
+ cvp_session_cmd.cvp_session.rx_port_id = v->dev_rx.port_id;
+
+ pr_debug("topology=%d net_id=%d, dir=%d tx_port_id=%d, rx_port_id=%d\n",
+ cvp_session_cmd.cvp_session.tx_topology_id,
+ cvp_session_cmd.cvp_session.network_id,
+ cvp_session_cmd.cvp_session.direction,
+ cvp_session_cmd.cvp_session.tx_port_id,
+ cvp_session_cmd.cvp_session.rx_port_id);
+
+ v->cvp_state = CMD_STATUS_FAIL;
+ ret = apr_send_pkt(apr_cvp, (uint32_t *) &cvp_session_cmd);
+ if (ret < 0) {
+ pr_err("Fail in sending VOCPROC_FULL_CONTROL_SESSION\n");
+ goto fail;
+ }
+ ret = wait_event_timeout(v->cvp_wait,
+ (v->cvp_state == CMD_STATUS_SUCCESS),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: wait_event timeout\n", __func__);
+ goto fail;
+ }
+
+ /* send cvs cal */
+ ret = voice_send_cvs_map_memory_cmd(v);
+ if (!ret)
+ voice_send_cvs_register_cal_cmd(v);
+
+ /* send cvp and vol cal */
+ ret = voice_send_cvp_map_memory_cmd(v);
+ if (!ret) {
+ voice_send_cvp_register_cal_cmd(v);
+ voice_send_cvp_register_vol_cal_table_cmd(v);
+ }
+
+ /* enable vocproc */
+ ret = voice_send_enable_vocproc_cmd(v);
+ if (ret < 0)
+ goto fail;
+
+ /* attach vocproc */
+ ret = voice_send_attach_vocproc_cmd(v);
+ if (ret < 0)
+ goto fail;
+
+ /* send tty mode if tty device is used */
+ voice_send_tty_mode_cmd(v);
+
+ /* enable widevoice if wv_enable is set */
+ if (v->wv_enable)
+ voice_send_set_widevoice_enable_cmd(v);
+
+ /* enable slowtalk if st_enable is set */
+ if (v->st_enable)
+ voice_send_set_pp_enable_cmd(v, MODULE_ID_VOICE_MODULE_ST,
+ v->st_enable);
+ voice_send_set_pp_enable_cmd(v, MODULE_ID_VOICE_MODULE_FENS,
+ v->fens_enable);
+
+ if (is_voip_session(v->session_id))
+ voice_send_netid_timing_cmd(v);
+
+ /* Start in-call music delivery if this feature is enabled */
+ if (v->music_info.play_enable)
+ voice_cvs_start_playback(v);
+
+ /* Start in-call recording if this feature is enabled */
+ if (v->rec_info.rec_enable)
+ voice_cvs_start_record(v, v->rec_info.rec_mode);
+
+ rtac_add_voice(voice_get_cvs_handle(v),
+ voice_get_cvp_handle(v),
+ v->dev_rx.port_id, v->dev_tx.port_id,
+ v->session_id);
+
+ return 0;
+
+fail:
+ return -EINVAL;
+}
+
+static int voice_send_enable_vocproc_cmd(struct voice_data *v)
+{
+ int ret = 0;
+ struct apr_hdr cvp_enable_cmd;
+ void *apr_cvp;
+ u16 cvp_handle;
+
+ if (v == NULL) {
+ pr_err("%s: v is NULL\n", __func__);
+ return -EINVAL;
+ }
+ apr_cvp = common.apr_q6_cvp;
+
+ if (!apr_cvp) {
+ pr_err("%s: apr_cvp is NULL.\n", __func__);
+ return -EINVAL;
+ }
+ cvp_handle = voice_get_cvp_handle(v);
+
+ /* enable vocproc and wait for respose */
+ cvp_enable_cmd.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ cvp_enable_cmd.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+ sizeof(cvp_enable_cmd) - APR_HDR_SIZE);
+ pr_debug("cvp_enable_cmd pkt size = %d, cvp_handle=%d\n",
+ cvp_enable_cmd.pkt_size, cvp_handle);
+ cvp_enable_cmd.src_port = v->session_id;
+ cvp_enable_cmd.dest_port = cvp_handle;
+ cvp_enable_cmd.token = 0;
+ cvp_enable_cmd.opcode = VSS_IVOCPROC_CMD_ENABLE;
+
+ v->cvp_state = CMD_STATUS_FAIL;
+ ret = apr_send_pkt(apr_cvp, (uint32_t *) &cvp_enable_cmd);
+ if (ret < 0) {
+ pr_err("Fail in sending VSS_IVOCPROC_CMD_ENABLE\n");
+ goto fail;
+ }
+ ret = wait_event_timeout(v->cvp_wait,
+ (v->cvp_state == CMD_STATUS_SUCCESS),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: wait_event timeout\n", __func__);
+ goto fail;
+ }
+
+ return 0;
+fail:
+ return -EINVAL;
+}
+
+static int voice_send_netid_timing_cmd(struct voice_data *v)
+{
+ int ret = 0;
+ void *apr_mvm;
+ u16 mvm_handle;
+ struct mvm_set_network_cmd mvm_set_network;
+ struct mvm_set_voice_timing_cmd mvm_set_voice_timing;
+
+ if (v == NULL) {
+ pr_err("%s: v is NULL\n", __func__);
+ return -EINVAL;
+ }
+ apr_mvm = common.apr_q6_mvm;
+
+ if (!apr_mvm) {
+ pr_err("%s: apr_mvm is NULL.\n", __func__);
+ return -EINVAL;
+ }
+ mvm_handle = voice_get_mvm_handle(v);
+
+ ret = voice_config_cvs_vocoder(v);
+ if (ret < 0) {
+ pr_err("%s: Error %d configuring CVS voc",
+ __func__, ret);
+ goto fail;
+ }
+ /* Set network ID. */
+ pr_debug("Setting network ID\n");
+
+ mvm_set_network.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ mvm_set_network.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+ sizeof(mvm_set_network) - APR_HDR_SIZE);
+ mvm_set_network.hdr.src_port = v->session_id;
+ mvm_set_network.hdr.dest_port = mvm_handle;
+ mvm_set_network.hdr.token = 0;
+ mvm_set_network.hdr.opcode = VSS_ICOMMON_CMD_SET_NETWORK;
+ mvm_set_network.network.network_id = common.mvs_info.network_type;
+
+ v->mvm_state = CMD_STATUS_FAIL;
+ ret = apr_send_pkt(apr_mvm, (uint32_t *) &mvm_set_network);
+ if (ret < 0) {
+ pr_err("%s: Error %d sending SET_NETWORK\n", __func__, ret);
+ goto fail;
+ }
+
+ ret = wait_event_timeout(v->mvm_wait,
+ (v->mvm_state == CMD_STATUS_SUCCESS),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: wait_event timeout\n", __func__);
+ goto fail;
+ }
+
+ /* Set voice timing. */
+ pr_debug("Setting voice timing\n");
+
+ mvm_set_voice_timing.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ mvm_set_voice_timing.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+ sizeof(mvm_set_voice_timing) -
+ APR_HDR_SIZE);
+ mvm_set_voice_timing.hdr.src_port = v->session_id;
+ mvm_set_voice_timing.hdr.dest_port = mvm_handle;
+ mvm_set_voice_timing.hdr.token = 0;
+ mvm_set_voice_timing.hdr.opcode = VSS_ICOMMON_CMD_SET_VOICE_TIMING;
+ mvm_set_voice_timing.timing.mode = 0;
+ mvm_set_voice_timing.timing.enc_offset = 8000;
+ if ((machine_is_apq8064_sim()) || (machine_is_copper_sim())) {
+ pr_debug("%s: Machine is copper sim\n", __func__);
+ mvm_set_voice_timing.timing.dec_req_offset = 0;
+ mvm_set_voice_timing.timing.dec_offset = 18000;
+ } else {
+ mvm_set_voice_timing.timing.dec_req_offset = 3300;
+ mvm_set_voice_timing.timing.dec_offset = 8300;
+ }
+
+ v->mvm_state = CMD_STATUS_FAIL;
+
+ ret = apr_send_pkt(apr_mvm, (uint32_t *) &mvm_set_voice_timing);
+ if (ret < 0) {
+ pr_err("%s: Error %d sending SET_TIMING\n", __func__, ret);
+ goto fail;
+ }
+
+ ret = wait_event_timeout(v->mvm_wait,
+ (v->mvm_state == CMD_STATUS_SUCCESS),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: wait_event timeout\n", __func__);
+ goto fail;
+ }
+
+ return 0;
+fail:
+ return -EINVAL;
+}
+
+static int voice_send_attach_vocproc_cmd(struct voice_data *v)
+{
+ int ret = 0;
+ struct mvm_attach_vocproc_cmd mvm_a_vocproc_cmd;
+ void *apr_mvm;
+ u16 mvm_handle, cvp_handle;
+
+ if (v == NULL) {
+ pr_err("%s: v is NULL\n", __func__);
+ return -EINVAL;
+ }
+ apr_mvm = common.apr_q6_mvm;
+
+ if (!apr_mvm) {
+ pr_err("%s: apr_mvm is NULL.\n", __func__);
+ return -EINVAL;
+ }
+ mvm_handle = voice_get_mvm_handle(v);
+ cvp_handle = voice_get_cvp_handle(v);
+
+ /* attach vocproc and wait for response */
+ mvm_a_vocproc_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ mvm_a_vocproc_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+ sizeof(mvm_a_vocproc_cmd) - APR_HDR_SIZE);
+ pr_debug("send mvm_a_vocproc_cmd pkt size = %d\n",
+ mvm_a_vocproc_cmd.hdr.pkt_size);
+ mvm_a_vocproc_cmd.hdr.src_port = v->session_id;
+ mvm_a_vocproc_cmd.hdr.dest_port = mvm_handle;
+ mvm_a_vocproc_cmd.hdr.token = 0;
+ mvm_a_vocproc_cmd.hdr.opcode = VSS_IMVM_CMD_ATTACH_VOCPROC;
+ mvm_a_vocproc_cmd.mvm_attach_cvp_handle.handle = cvp_handle;
+
+ v->mvm_state = CMD_STATUS_FAIL;
+ ret = apr_send_pkt(apr_mvm, (uint32_t *) &mvm_a_vocproc_cmd);
+ if (ret < 0) {
+ pr_err("Fail in sending VSS_IMVM_CMD_ATTACH_VOCPROC\n");
+ goto fail;
+ }
+ ret = wait_event_timeout(v->mvm_wait,
+ (v->mvm_state == CMD_STATUS_SUCCESS),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: wait_event timeout\n", __func__);
+ goto fail;
+ }
+
+ return 0;
+fail:
+ return -EINVAL;
+}
+
+static int voice_destroy_vocproc(struct voice_data *v)
+{
+ struct mvm_detach_vocproc_cmd mvm_d_vocproc_cmd;
+ struct apr_hdr cvp_destroy_session_cmd;
+ int ret = 0;
+ void *apr_mvm, *apr_cvp;
+ u16 mvm_handle, cvp_handle;
+
+ if (v == NULL) {
+ pr_err("%s: v is NULL\n", __func__);
+ return -EINVAL;
+ }
+ apr_mvm = common.apr_q6_mvm;
+ apr_cvp = common.apr_q6_cvp;
+
+ if (!apr_mvm || !apr_cvp) {
+ pr_err("%s: apr_mvm or apr_cvp is NULL.\n", __func__);
+ return -EINVAL;
+ }
+ mvm_handle = voice_get_mvm_handle(v);
+ cvp_handle = voice_get_cvp_handle(v);
+
+ /* stop playback or recording */
+ v->music_info.force = 1;
+ voice_cvs_stop_playback(v);
+ voice_cvs_stop_record(v);
+ /* send stop voice cmd */
+ voice_send_stop_voice_cmd(v);
+
+ /* detach VOCPROC and wait for response from mvm */
+ mvm_d_vocproc_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ mvm_d_vocproc_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+ sizeof(mvm_d_vocproc_cmd) - APR_HDR_SIZE);
+ pr_debug("mvm_d_vocproc_cmd pkt size = %d\n",
+ mvm_d_vocproc_cmd.hdr.pkt_size);
+ mvm_d_vocproc_cmd.hdr.src_port = v->session_id;
+ mvm_d_vocproc_cmd.hdr.dest_port = mvm_handle;
+ mvm_d_vocproc_cmd.hdr.token = 0;
+ mvm_d_vocproc_cmd.hdr.opcode = VSS_IMVM_CMD_DETACH_VOCPROC;
+ mvm_d_vocproc_cmd.mvm_detach_cvp_handle.handle = cvp_handle;
+
+ v->mvm_state = CMD_STATUS_FAIL;
+ ret = apr_send_pkt(apr_mvm, (uint32_t *) &mvm_d_vocproc_cmd);
+ if (ret < 0) {
+ pr_err("Fail in sending VSS_IMVM_CMD_DETACH_VOCPROC\n");
+ goto fail;
+ }
+ ret = wait_event_timeout(v->mvm_wait,
+ (v->mvm_state == CMD_STATUS_SUCCESS),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: wait_event timeout\n", __func__);
+ goto fail;
+ }
+
+ /* deregister cvp and vol cal */
+ voice_send_cvp_deregister_vol_cal_table_cmd(v);
+ voice_send_cvp_deregister_cal_cmd(v);
+ voice_send_cvp_unmap_memory_cmd(v);
+
+ /* deregister cvs cal */
+ voice_send_cvs_deregister_cal_cmd(v);
+ voice_send_cvs_unmap_memory_cmd(v);
+
+ /* destrop cvp session */
+ cvp_destroy_session_cmd.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ cvp_destroy_session_cmd.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+ sizeof(cvp_destroy_session_cmd) - APR_HDR_SIZE);
+ pr_debug("cvp_destroy_session_cmd pkt size = %d\n",
+ cvp_destroy_session_cmd.pkt_size);
+ cvp_destroy_session_cmd.src_port = v->session_id;
+ cvp_destroy_session_cmd.dest_port = cvp_handle;
+ cvp_destroy_session_cmd.token = 0;
+ cvp_destroy_session_cmd.opcode = APRV2_IBASIC_CMD_DESTROY_SESSION;
+
+ v->cvp_state = CMD_STATUS_FAIL;
+ ret = apr_send_pkt(apr_cvp, (uint32_t *) &cvp_destroy_session_cmd);
+ if (ret < 0) {
+ pr_err("Fail in sending APRV2_IBASIC_CMD_DESTROY_SESSION\n");
+ goto fail;
+ }
+ ret = wait_event_timeout(v->cvp_wait,
+ (v->cvp_state == CMD_STATUS_SUCCESS),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: wait_event timeout\n", __func__);
+ goto fail;
+ }
+
+ rtac_remove_voice(voice_get_cvs_handle(v));
+ cvp_handle = 0;
+ voice_set_cvp_handle(v, cvp_handle);
+
+ return 0;
+
+fail:
+ return -EINVAL;
+}
+
+static int voice_send_mute_cmd(struct voice_data *v)
+{
+ struct cvs_set_mute_cmd cvs_mute_cmd;
+ int ret = 0;
+ void *apr_cvs;
+ u16 cvs_handle;
+
+ if (v == NULL) {
+ pr_err("%s: v is NULL\n", __func__);
+ return -EINVAL;
+ }
+ apr_cvs = common.apr_q6_cvs;
+
+ if (!apr_cvs) {
+ pr_err("%s: apr_cvs is NULL.\n", __func__);
+ return -EINVAL;
+ }
+ cvs_handle = voice_get_cvs_handle(v);
+
+ /* send mute/unmute to cvs */
+ cvs_mute_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ cvs_mute_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+ sizeof(cvs_mute_cmd) - APR_HDR_SIZE);
+ cvs_mute_cmd.hdr.src_port = v->session_id;
+ cvs_mute_cmd.hdr.dest_port = cvs_handle;
+ cvs_mute_cmd.hdr.token = 0;
+ cvs_mute_cmd.hdr.opcode = VSS_ISTREAM_CMD_SET_MUTE;
+ cvs_mute_cmd.cvs_set_mute.direction = 0; /*tx*/
+ cvs_mute_cmd.cvs_set_mute.mute_flag = v->dev_tx.mute;
+
+ v->cvs_state = CMD_STATUS_FAIL;
+ ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_mute_cmd);
+ if (ret < 0) {
+ pr_err("Fail: send STREAM SET MUTE\n");
+ goto fail;
+ }
+ ret = wait_event_timeout(v->cvs_wait,
+ (v->cvs_state == CMD_STATUS_SUCCESS),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret)
+ pr_err("%s: wait_event timeout\n", __func__);
+
+ return 0;
+fail:
+ return -EINVAL;
+}
+
+static int voice_send_rx_device_mute_cmd(struct voice_data *v)
+{
+ struct cvp_set_mute_cmd cvp_mute_cmd;
+ int ret = 0;
+ void *apr_cvp;
+ u16 cvp_handle;
+ if (v == NULL) {
+ pr_err("%s: v is NULL\n", __func__);
+ return -EINVAL;
+ }
+ apr_cvp = common.apr_q6_cvp;
+
+ if (!apr_cvp) {
+ pr_err("%s: apr_cvp is NULL.\n", __func__);
+ return -EINVAL;
+ }
+ cvp_handle = voice_get_cvp_handle(v);
+
+ cvp_mute_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ cvp_mute_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+ sizeof(cvp_mute_cmd) - APR_HDR_SIZE);
+ cvp_mute_cmd.hdr.src_port = v->session_id;
+ cvp_mute_cmd.hdr.dest_port = cvp_handle;
+ cvp_mute_cmd.hdr.token = 0;
+ cvp_mute_cmd.hdr.opcode = VSS_IVOCPROC_CMD_SET_MUTE;
+ cvp_mute_cmd.cvp_set_mute.direction = 1;
+ cvp_mute_cmd.cvp_set_mute.mute_flag = v->dev_rx.mute;
+ v->cvp_state = CMD_STATUS_FAIL;
+ ret = apr_send_pkt(apr_cvp, (uint32_t *) &cvp_mute_cmd);
+ if (ret < 0) {
+ pr_err("Fail in sending RX device mute cmd\n");
+ return -EINVAL;
+ }
+ ret = wait_event_timeout(v->cvp_wait,
+ (v->cvp_state == CMD_STATUS_SUCCESS),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: wait_event timeout\n", __func__);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int voice_send_vol_index_cmd(struct voice_data *v)
+{
+ struct cvp_set_rx_volume_index_cmd cvp_vol_cmd;
+ int ret = 0;
+ void *apr_cvp;
+ u16 cvp_handle;
+ if (v == NULL) {
+ pr_err("%s: v is NULL\n", __func__);
+ return -EINVAL;
+ }
+ apr_cvp = common.apr_q6_cvp;
+
+ if (!apr_cvp) {
+ pr_err("%s: apr_cvp is NULL.\n", __func__);
+ return -EINVAL;
+ }
+ cvp_handle = voice_get_cvp_handle(v);
+
+ /* send volume index to cvp */
+ cvp_vol_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ cvp_vol_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+ sizeof(cvp_vol_cmd) - APR_HDR_SIZE);
+ cvp_vol_cmd.hdr.src_port = v->session_id;
+ cvp_vol_cmd.hdr.dest_port = cvp_handle;
+ cvp_vol_cmd.hdr.token = 0;
+ cvp_vol_cmd.hdr.opcode = VSS_IVOCPROC_CMD_SET_RX_VOLUME_INDEX;
+ cvp_vol_cmd.cvp_set_vol_idx.vol_index = v->dev_rx.volume;
+ v->cvp_state = CMD_STATUS_FAIL;
+ ret = apr_send_pkt(apr_cvp, (uint32_t *) &cvp_vol_cmd);
+ if (ret < 0) {
+ pr_err("Fail in sending RX VOL INDEX\n");
+ return -EINVAL;
+ }
+ ret = wait_event_timeout(v->cvp_wait,
+ (v->cvp_state == CMD_STATUS_SUCCESS),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: wait_event timeout\n", __func__);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int voice_cvs_start_record(struct voice_data *v, uint32_t rec_mode)
+{
+ int ret = 0;
+ void *apr_cvs;
+ u16 cvs_handle;
+
+ struct cvs_start_record_cmd cvs_start_record;
+
+ if (v == NULL) {
+ pr_err("%s: v is NULL\n", __func__);
+ return -EINVAL;
+ }
+ apr_cvs = common.apr_q6_cvs;
+
+ if (!apr_cvs) {
+ pr_err("%s: apr_cvs is NULL.\n", __func__);
+ return -EINVAL;
+ }
+
+ cvs_handle = voice_get_cvs_handle(v);
+
+ if (!v->rec_info.recording) {
+ cvs_start_record.hdr.hdr_field = APR_HDR_FIELD(
+ APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ cvs_start_record.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+ sizeof(cvs_start_record) - APR_HDR_SIZE);
+ cvs_start_record.hdr.src_port = v->session_id;
+ cvs_start_record.hdr.dest_port = cvs_handle;
+ cvs_start_record.hdr.token = 0;
+ cvs_start_record.hdr.opcode = VSS_ISTREAM_CMD_START_RECORD;
+
+ if (rec_mode == VOC_REC_UPLINK) {
+ cvs_start_record.rec_mode.rx_tap_point =
+ VSS_TAP_POINT_NONE;
+ cvs_start_record.rec_mode.tx_tap_point =
+ VSS_TAP_POINT_STREAM_END;
+ } else if (rec_mode == VOC_REC_DOWNLINK) {
+ cvs_start_record.rec_mode.rx_tap_point =
+ VSS_TAP_POINT_STREAM_END;
+ cvs_start_record.rec_mode.tx_tap_point =
+ VSS_TAP_POINT_NONE;
+ } else if (rec_mode == VOC_REC_BOTH) {
+ cvs_start_record.rec_mode.rx_tap_point =
+ VSS_TAP_POINT_STREAM_END;
+ cvs_start_record.rec_mode.tx_tap_point =
+ VSS_TAP_POINT_STREAM_END;
+ } else {
+ pr_err("%s: Invalid in-call rec_mode %d\n", __func__,
+ rec_mode);
+
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ v->cvs_state = CMD_STATUS_FAIL;
+
+ ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_start_record);
+ if (ret < 0) {
+ pr_err("%s: Error %d sending START_RECORD\n", __func__,
+ ret);
+
+ goto fail;
+ }
+
+ ret = wait_event_timeout(v->cvs_wait,
+ (v->cvs_state == CMD_STATUS_SUCCESS),
+ msecs_to_jiffies(TIMEOUT_MS));
+
+ if (!ret) {
+ pr_err("%s: wait_event timeout\n", __func__);
+
+ goto fail;
+ }
+ v->rec_info.recording = 1;
+ } else {
+ pr_debug("%s: Start record already sent\n", __func__);
+ }
+
+ return 0;
+
+fail:
+ return ret;
+}
+
+static int voice_cvs_stop_record(struct voice_data *v)
+{
+ int ret = 0;
+ void *apr_cvs;
+ u16 cvs_handle;
+ struct apr_hdr cvs_stop_record;
+
+ if (v == NULL) {
+ pr_err("%s: v is NULL\n", __func__);
+ return -EINVAL;
+ }
+ apr_cvs = common.apr_q6_cvs;
+
+ if (!apr_cvs) {
+ pr_err("%s: apr_cvs is NULL.\n", __func__);
+ return -EINVAL;
+ }
+
+ cvs_handle = voice_get_cvs_handle(v);
+
+ if (v->rec_info.recording) {
+ cvs_stop_record.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ cvs_stop_record.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+ sizeof(cvs_stop_record) - APR_HDR_SIZE);
+ cvs_stop_record.src_port = v->session_id;
+ cvs_stop_record.dest_port = cvs_handle;
+ cvs_stop_record.token = 0;
+ cvs_stop_record.opcode = VSS_ISTREAM_CMD_STOP_RECORD;
+
+ v->cvs_state = CMD_STATUS_FAIL;
+
+ ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_stop_record);
+ if (ret < 0) {
+ pr_err("%s: Error %d sending STOP_RECORD\n",
+ __func__, ret);
+
+ goto fail;
+ }
+
+ ret = wait_event_timeout(v->cvs_wait,
+ (v->cvs_state == CMD_STATUS_SUCCESS),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: wait_event timeout\n", __func__);
+
+ goto fail;
+ }
+ v->rec_info.recording = 0;
+ } else {
+ pr_debug("%s: Stop record already sent\n", __func__);
+ }
+
+ return 0;
+
+fail:
+
+ return ret;
+}
+
+int voc_start_record(uint32_t port_id, uint32_t set)
+{
+ int ret = 0;
+ int rec_mode = 0;
+ u16 cvs_handle;
+ int i, rec_set = 0;
+
+ for (i = 0; i < MAX_VOC_SESSIONS; i++) {
+ struct voice_data *v = &common.voice[i];
+ pr_debug("%s: i:%d port_id: %d, set: %d\n",
+ __func__, i, port_id, set);
+
+ mutex_lock(&v->lock);
+ rec_mode = v->rec_info.rec_mode;
+ rec_set = set;
+ if (set) {
+ if ((v->rec_route_state.ul_flag != 0) &&
+ (v->rec_route_state.dl_flag != 0)) {
+ pr_debug("%s: i=%d, rec mode already set.\n",
+ __func__, i);
+ mutex_unlock(&v->lock);
+ if (i < MAX_VOC_SESSIONS)
+ continue;
+ else
+ return 0;
+ }
+
+ if (port_id == VOICE_RECORD_TX) {
+ if ((v->rec_route_state.ul_flag == 0)
+ && (v->rec_route_state.dl_flag == 0)) {
+ rec_mode = VOC_REC_UPLINK;
+ v->rec_route_state.ul_flag = 1;
+ } else if ((v->rec_route_state.ul_flag == 0)
+ && (v->rec_route_state.dl_flag != 0)) {
+ voice_cvs_stop_record(v);
+ rec_mode = VOC_REC_BOTH;
+ v->rec_route_state.ul_flag = 1;
+ }
+ } else if (port_id == VOICE_RECORD_RX) {
+ if ((v->rec_route_state.ul_flag == 0)
+ && (v->rec_route_state.dl_flag == 0)) {
+ rec_mode = VOC_REC_DOWNLINK;
+ v->rec_route_state.dl_flag = 1;
+ } else if ((v->rec_route_state.ul_flag != 0)
+ && (v->rec_route_state.dl_flag == 0)) {
+ voice_cvs_stop_record(v);
+ rec_mode = VOC_REC_BOTH;
+ v->rec_route_state.dl_flag = 1;
+ }
+ }
+ rec_set = 1;
+ } else {
+ if ((v->rec_route_state.ul_flag == 0) &&
+ (v->rec_route_state.dl_flag == 0)) {
+ pr_debug("%s: i=%d, rec already stops.\n",
+ __func__, i);
+ mutex_unlock(&v->lock);
+ if (i < MAX_VOC_SESSIONS)
+ continue;
+ else
+ return 0;
+ }
+
+ if (port_id == VOICE_RECORD_TX) {
+ if ((v->rec_route_state.ul_flag != 0)
+ && (v->rec_route_state.dl_flag == 0)) {
+ v->rec_route_state.ul_flag = 0;
+ rec_set = 0;
+ } else if ((v->rec_route_state.ul_flag != 0)
+ && (v->rec_route_state.dl_flag != 0)) {
+ voice_cvs_stop_record(v);
+ v->rec_route_state.ul_flag = 0;
+ rec_mode = VOC_REC_DOWNLINK;
+ rec_set = 1;
+ }
+ } else if (port_id == VOICE_RECORD_RX) {
+ if ((v->rec_route_state.ul_flag == 0)
+ && (v->rec_route_state.dl_flag != 0)) {
+ v->rec_route_state.dl_flag = 0;
+ rec_set = 0;
+ } else if ((v->rec_route_state.ul_flag != 0)
+ && (v->rec_route_state.dl_flag != 0)) {
+ voice_cvs_stop_record(v);
+ v->rec_route_state.dl_flag = 0;
+ rec_mode = VOC_REC_UPLINK;
+ rec_set = 1;
+ }
+ }
+ }
+ pr_debug("%s: i=%d, mode =%d, set =%d\n", __func__,
+ i, rec_mode, rec_set);
+ cvs_handle = voice_get_cvs_handle(v);
+
+ if (cvs_handle != 0) {
+ if (rec_set)
+ ret = voice_cvs_start_record(v, rec_mode);
+ else
+ ret = voice_cvs_stop_record(v);
+ }
+
+ /* Cache the value */
+ v->rec_info.rec_enable = rec_set;
+ v->rec_info.rec_mode = rec_mode;
+
+ mutex_unlock(&v->lock);
+ }
+
+ return ret;
+}
+
+static int voice_cvs_start_playback(struct voice_data *v)
+{
+ int ret = 0;
+ struct apr_hdr cvs_start_playback;
+ void *apr_cvs;
+ u16 cvs_handle;
+
+ if (v == NULL) {
+ pr_err("%s: v is NULL\n", __func__);
+ return -EINVAL;
+ }
+ apr_cvs = common.apr_q6_cvs;
+
+ if (!apr_cvs) {
+ pr_err("%s: apr_cvs is NULL.\n", __func__);
+ return -EINVAL;
+ }
+
+ cvs_handle = voice_get_cvs_handle(v);
+
+ if (!v->music_info.playing && v->music_info.count) {
+ cvs_start_playback.hdr_field = APR_HDR_FIELD(
+ APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ cvs_start_playback.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+ sizeof(cvs_start_playback) - APR_HDR_SIZE);
+ cvs_start_playback.src_port = v->session_id;
+ cvs_start_playback.dest_port = cvs_handle;
+ cvs_start_playback.token = 0;
+ cvs_start_playback.opcode = VSS_ISTREAM_CMD_START_PLAYBACK;
+
+ v->cvs_state = CMD_STATUS_FAIL;
+
+ ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_start_playback);
+
+ if (ret < 0) {
+ pr_err("%s: Error %d sending START_PLAYBACK\n",
+ __func__, ret);
+
+ goto fail;
+ }
+
+ ret = wait_event_timeout(v->cvs_wait,
+ (v->cvs_state == CMD_STATUS_SUCCESS),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: wait_event timeout\n", __func__);
+
+ goto fail;
+ }
+
+ v->music_info.playing = 1;
+ } else {
+ pr_debug("%s: Start playback already sent\n", __func__);
+ }
+
+ return 0;
+
+fail:
+ return ret;
+}
+
+static int voice_cvs_stop_playback(struct voice_data *v)
+{
+ int ret = 0;
+ struct apr_hdr cvs_stop_playback;
+ void *apr_cvs;
+ u16 cvs_handle;
+
+ if (v == NULL) {
+ pr_err("%s: v is NULL\n", __func__);
+ return -EINVAL;
+ }
+ apr_cvs = common.apr_q6_cvs;
+
+ if (!apr_cvs) {
+ pr_err("%s: apr_cvs is NULL.\n", __func__);
+ return -EINVAL;
+ }
+
+ cvs_handle = voice_get_cvs_handle(v);
+
+ if (v->music_info.playing && ((!v->music_info.count) ||
+ (v->music_info.force))) {
+ cvs_stop_playback.hdr_field =
+ APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ cvs_stop_playback.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+ sizeof(cvs_stop_playback) - APR_HDR_SIZE);
+ cvs_stop_playback.src_port = v->session_id;
+ cvs_stop_playback.dest_port = cvs_handle;
+ cvs_stop_playback.token = 0;
+
+ cvs_stop_playback.opcode = VSS_ISTREAM_CMD_STOP_PLAYBACK;
+
+ v->cvs_state = CMD_STATUS_FAIL;
+
+ ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_stop_playback);
+ if (ret < 0) {
+ pr_err("%s: Error %d sending STOP_PLAYBACK\n",
+ __func__, ret);
+
+
+ goto fail;
+ }
+
+ ret = wait_event_timeout(v->cvs_wait,
+ (v->cvs_state == CMD_STATUS_SUCCESS),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: wait_event timeout\n", __func__);
+
+ goto fail;
+ }
+
+ v->music_info.playing = 0;
+ v->music_info.force = 0;
+ } else {
+ pr_debug("%s: Stop playback already sent\n", __func__);
+ }
+
+ return 0;
+
+fail:
+ return ret;
+}
+
+int voc_start_playback(uint32_t set)
+{
+ int ret = 0;
+ u16 cvs_handle;
+ int i;
+
+
+ for (i = 0; i < MAX_VOC_SESSIONS; i++) {
+ struct voice_data *v = &common.voice[i];
+
+ mutex_lock(&v->lock);
+ v->music_info.play_enable = set;
+ if (set)
+ v->music_info.count++;
+ else
+ v->music_info.count--;
+ pr_debug("%s: music_info count =%d\n", __func__,
+ v->music_info.count);
+
+ cvs_handle = voice_get_cvs_handle(v);
+ if (cvs_handle != 0) {
+ if (set)
+ ret = voice_cvs_start_playback(v);
+ else
+ ret = voice_cvs_stop_playback(v);
+ }
+
+ mutex_unlock(&v->lock);
+ }
+
+ return ret;
+}
+
+int voc_disable_cvp(uint16_t session_id)
+{
+ struct voice_data *v = voice_get_session(session_id);
+ int ret = 0;
+
+ if (v == NULL) {
+ pr_err("%s: invalid session_id 0x%x\n", __func__, session_id);
+
+ return -EINVAL;
+ }
+
+ mutex_lock(&v->lock);
+
+ if (v->voc_state == VOC_RUN) {
+ if (v->dev_tx.port_id != RT_PROXY_PORT_001_TX &&
+ v->dev_rx.port_id != RT_PROXY_PORT_001_RX)
+ afe_sidetone(v->dev_tx.port_id, v->dev_rx.port_id,
+ 0, 0);
+
+ rtac_remove_voice(voice_get_cvs_handle(v));
+ /* send cmd to dsp to disable vocproc */
+ ret = voice_send_disable_vocproc_cmd(v);
+ if (ret < 0) {
+ pr_err("%s: disable vocproc failed\n", __func__);
+ goto fail;
+ }
+
+ /* deregister cvp and vol cal */
+ voice_send_cvp_deregister_vol_cal_table_cmd(v);
+ voice_send_cvp_deregister_cal_cmd(v);
+ voice_send_cvp_unmap_memory_cmd(v);
+
+ v->voc_state = VOC_CHANGE;
+ }
+
+fail: mutex_unlock(&v->lock);
+
+ return ret;
+}
+
+int voc_enable_cvp(uint16_t session_id)
+{
+ struct voice_data *v = voice_get_session(session_id);
+ struct sidetone_cal sidetone_cal_data;
+ int ret = 0;
+
+ if (v == NULL) {
+ pr_err("%s: invalid session_id 0x%x\n", __func__, session_id);
+
+ return -EINVAL;
+ }
+
+ mutex_lock(&v->lock);
+
+ if (v->voc_state == VOC_CHANGE) {
+ ret = voice_send_set_device_cmd(v);
+ if (ret < 0) {
+ pr_err("%s: set device failed\n", __func__);
+ goto fail;
+ }
+ /* send cvp and vol cal */
+ ret = voice_send_cvp_map_memory_cmd(v);
+ if (!ret) {
+ voice_send_cvp_register_cal_cmd(v);
+ voice_send_cvp_register_vol_cal_table_cmd(v);
+ }
+ ret = voice_send_enable_vocproc_cmd(v);
+ if (ret < 0) {
+ pr_err("%s: enable vocproc failed\n", __func__);
+ goto fail;
+
+ }
+ /* send tty mode if tty device is used */
+ voice_send_tty_mode_cmd(v);
+
+ /* enable widevoice if wv_enable is set */
+ if (v->wv_enable)
+ voice_send_set_widevoice_enable_cmd(v);
+
+ /* enable slowtalk */
+ if (v->st_enable)
+ voice_send_set_pp_enable_cmd(v,
+ MODULE_ID_VOICE_MODULE_ST,
+ v->st_enable);
+ /* enable FENS */
+ if (v->fens_enable)
+ voice_send_set_pp_enable_cmd(v,
+ MODULE_ID_VOICE_MODULE_FENS,
+ v->fens_enable);
+
+ get_sidetone_cal(&sidetone_cal_data);
+ if (v->dev_tx.port_id != RT_PROXY_PORT_001_TX &&
+ v->dev_rx.port_id != RT_PROXY_PORT_001_RX) {
+ ret = afe_sidetone(v->dev_tx.port_id,
+ v->dev_rx.port_id,
+ sidetone_cal_data.enable,
+ sidetone_cal_data.gain);
+
+ if (ret < 0)
+ pr_err("%s: AFE command sidetone failed\n",
+ __func__);
+ }
+
+ rtac_add_voice(voice_get_cvs_handle(v),
+ voice_get_cvp_handle(v),
+ v->dev_rx.port_id, v->dev_tx.port_id,
+ v->session_id);
+ v->voc_state = VOC_RUN;
+ }
+
+fail:
+ mutex_unlock(&v->lock);
+
+ return ret;
+}
+
+int voc_set_tx_mute(uint16_t session_id, uint32_t dir, uint32_t mute)
+{
+ struct voice_data *v = voice_get_session(session_id);
+ int ret = 0;
+
+ if (v == NULL) {
+ pr_err("%s: invalid session_id 0x%x\n", __func__, session_id);
+
+ return -EINVAL;
+ }
+
+ mutex_lock(&v->lock);
+
+ v->dev_tx.mute = mute;
+
+ if (v->voc_state == VOC_RUN)
+ ret = voice_send_mute_cmd(v);
+
+ mutex_unlock(&v->lock);
+
+ return ret;
+}
+
+int voc_set_rx_device_mute(uint16_t session_id, uint32_t mute)
+{
+ struct voice_data *v = voice_get_session(session_id);
+ int ret = 0;
+
+ if (v == NULL) {
+ pr_err("%s: invalid session_id 0x%x\n", __func__, session_id);
+
+ return -EINVAL;
+ }
+
+ mutex_lock(&v->lock);
+
+ v->dev_rx.mute = mute;
+
+ if (v->voc_state == VOC_RUN)
+ ret = voice_send_rx_device_mute_cmd(v);
+
+ mutex_unlock(&v->lock);
+
+ return ret;
+}
+
+int voc_get_rx_device_mute(uint16_t session_id)
+{
+ struct voice_data *v = voice_get_session(session_id);
+ int ret = 0;
+
+ if (v == NULL) {
+ pr_err("%s: invalid session_id 0x%x\n", __func__, session_id);
+
+ return -EINVAL;
+ }
+
+ mutex_lock(&v->lock);
+
+ ret = v->dev_rx.mute;
+
+ mutex_unlock(&v->lock);
+
+ return ret;
+}
+
+int voc_set_tty_mode(uint16_t session_id, uint8_t tty_mode)
+{
+ struct voice_data *v = voice_get_session(session_id);
+ int ret = 0;
+
+ if (v == NULL) {
+ pr_err("%s: invalid session_id 0x%x\n", __func__, session_id);
+
+ return -EINVAL;
+ }
+
+ mutex_lock(&v->lock);
+
+ v->tty_mode = tty_mode;
+
+ mutex_unlock(&v->lock);
+
+ return ret;
+}
+
+uint8_t voc_get_tty_mode(uint16_t session_id)
+{
+ struct voice_data *v = voice_get_session(session_id);
+ int ret = 0;
+
+ if (v == NULL) {
+ pr_err("%s: invalid session_id 0x%x\n", __func__, session_id);
+
+ return -EINVAL;
+ }
+
+ mutex_lock(&v->lock);
+
+ ret = v->tty_mode;
+
+ mutex_unlock(&v->lock);
+
+ return ret;
+}
+
+int voc_set_widevoice_enable(uint16_t session_id, uint32_t wv_enable)
+{
+ struct voice_data *v = voice_get_session(session_id);
+ u16 mvm_handle;
+ int ret = 0;
+
+ if (v == NULL) {
+ pr_err("%s: invalid session_id 0x%x\n", __func__, session_id);
+
+ return -EINVAL;
+ }
+
+ mutex_lock(&v->lock);
+
+ v->wv_enable = wv_enable;
+
+ mvm_handle = voice_get_mvm_handle(v);
+
+ if (mvm_handle != 0)
+ voice_send_set_widevoice_enable_cmd(v);
+
+ mutex_unlock(&v->lock);
+
+ return ret;
+}
+
+uint32_t voc_get_widevoice_enable(uint16_t session_id)
+{
+ struct voice_data *v = voice_get_session(session_id);
+ int ret = 0;
+
+ if (v == NULL) {
+ pr_err("%s: invalid session_id 0x%x\n", __func__, session_id);
+
+ return -EINVAL;
+ }
+
+ mutex_lock(&v->lock);
+
+ ret = v->wv_enable;
+
+ mutex_unlock(&v->lock);
+
+ return ret;
+}
+
+int voc_set_pp_enable(uint16_t session_id, uint32_t module_id, uint32_t enable)
+{
+ struct voice_data *v = voice_get_session(session_id);
+ int ret = 0;
+
+ if (v == NULL) {
+ pr_err("%s: invalid session_id 0x%x\n", __func__, session_id);
+
+ return -EINVAL;
+ }
+
+ mutex_lock(&v->lock);
+ if (module_id == MODULE_ID_VOICE_MODULE_ST)
+ v->st_enable = enable;
+ else if (module_id == MODULE_ID_VOICE_MODULE_FENS)
+ v->fens_enable = enable;
+
+ if (v->voc_state == VOC_RUN) {
+ if (module_id == MODULE_ID_VOICE_MODULE_ST)
+ ret = voice_send_set_pp_enable_cmd(v,
+ MODULE_ID_VOICE_MODULE_ST,
+ enable);
+ else if (module_id == MODULE_ID_VOICE_MODULE_FENS)
+ ret = voice_send_set_pp_enable_cmd(v,
+ MODULE_ID_VOICE_MODULE_FENS,
+ enable);
+ }
+ mutex_unlock(&v->lock);
+
+ return ret;
+}
+
+int voc_get_pp_enable(uint16_t session_id, uint32_t module_id)
+{
+ struct voice_data *v = voice_get_session(session_id);
+ int ret = 0;
+
+ if (v == NULL) {
+ pr_err("%s: invalid session_id 0x%x\n", __func__, session_id);
+
+ return -EINVAL;
+ }
+
+ mutex_lock(&v->lock);
+ if (module_id == MODULE_ID_VOICE_MODULE_ST)
+ ret = v->st_enable;
+ else if (module_id == MODULE_ID_VOICE_MODULE_FENS)
+ ret = v->fens_enable;
+
+ mutex_unlock(&v->lock);
+
+ return ret;
+}
+
+int voc_set_rx_vol_index(uint16_t session_id, uint32_t dir, uint32_t vol_idx)
+{
+ struct voice_data *v = voice_get_session(session_id);
+ int ret = 0;
+
+ if (v == NULL) {
+ pr_err("%s: invalid session_id 0x%x\n", __func__, session_id);
+
+ return -EINVAL;
+ }
+
+ mutex_lock(&v->lock);
+
+ v->dev_rx.volume = vol_idx;
+
+ if (v->voc_state == VOC_RUN)
+ ret = voice_send_vol_index_cmd(v);
+
+ mutex_unlock(&v->lock);
+
+ return ret;
+}
+
+int voc_set_rxtx_port(uint16_t session_id, uint32_t port_id, uint32_t dev_type)
+{
+ struct voice_data *v = voice_get_session(session_id);
+
+ if (v == NULL) {
+ pr_err("%s: invalid session_id 0x%x\n", __func__, session_id);
+
+ return -EINVAL;
+ }
+
+ pr_debug("%s: port_id=%d, type=%d\n", __func__, port_id, dev_type);
+
+ mutex_lock(&v->lock);
+
+ if (dev_type == DEV_RX)
+ v->dev_rx.port_id = q6audio_get_port_id(port_id);
+ else
+ v->dev_tx.port_id = q6audio_get_port_id(port_id);
+
+ mutex_unlock(&v->lock);
+
+ return 0;
+}
+
+int voc_set_route_flag(uint16_t session_id, uint8_t path_dir, uint8_t set)
+{
+ struct voice_data *v = voice_get_session(session_id);
+
+ if (v == NULL) {
+ pr_err("%s: invalid session_id 0x%x\n", __func__, session_id);
+
+ return -EINVAL;
+ }
+
+ pr_debug("%s: path_dir=%d, set=%d\n", __func__, path_dir, set);
+
+ mutex_lock(&v->lock);
+
+ if (path_dir == RX_PATH)
+ v->voc_route_state.rx_route_flag = set;
+ else
+ v->voc_route_state.tx_route_flag = set;
+
+ mutex_unlock(&v->lock);
+
+ return 0;
+}
+
+uint8_t voc_get_route_flag(uint16_t session_id, uint8_t path_dir)
+{
+ struct voice_data *v = voice_get_session(session_id);
+ int ret = 0;
+
+ if (v == NULL) {
+ pr_err("%s: invalid session_id 0x%x\n", __func__, session_id);
+
+ return 0;
+ }
+
+ mutex_lock(&v->lock);
+
+ if (path_dir == RX_PATH)
+ ret = v->voc_route_state.rx_route_flag;
+ else
+ ret = v->voc_route_state.tx_route_flag;
+
+ mutex_unlock(&v->lock);
+
+ return ret;
+}
+
+int voc_end_voice_call(uint16_t session_id)
+{
+ struct voice_data *v = voice_get_session(session_id);
+ int ret = 0;
+
+ if (v == NULL) {
+ pr_err("%s: invalid session_id 0x%x\n", __func__, session_id);
+
+ return -EINVAL;
+ }
+
+ mutex_lock(&v->lock);
+
+ if (v->voc_state == VOC_RUN) {
+ if (v->dev_tx.port_id != RT_PROXY_PORT_001_TX &&
+ v->dev_rx.port_id != RT_PROXY_PORT_001_RX)
+ afe_sidetone(v->dev_tx.port_id, v->dev_rx.port_id,
+ 0, 0);
+ ret = voice_destroy_vocproc(v);
+ if (ret < 0)
+ pr_err("%s: destroy voice failed\n", __func__);
+ voice_destroy_mvm_cvs_session(v);
+
+ v->voc_state = VOC_RELEASE;
+ }
+ mutex_unlock(&v->lock);
+ return ret;
+}
+
+int voc_start_voice_call(uint16_t session_id)
+{
+ struct voice_data *v = voice_get_session(session_id);
+ struct sidetone_cal sidetone_cal_data;
+ int ret = 0;
+
+ if (v == NULL) {
+ pr_err("%s: invalid session_id 0x%x\n", __func__, session_id);
+
+ return -EINVAL;
+ }
+
+ mutex_lock(&v->lock);
+
+ if ((v->voc_state == VOC_INIT) ||
+ (v->voc_state == VOC_RELEASE)) {
+ ret = voice_apr_register();
+ if (ret < 0) {
+ pr_err("%s: apr register failed\n", __func__);
+ goto fail;
+ }
+ ret = voice_create_mvm_cvs_session(v);
+ if (ret < 0) {
+ pr_err("create mvm and cvs failed\n");
+ goto fail;
+ }
+ ret = voice_send_dual_control_cmd(v);
+ if (ret < 0) {
+ pr_err("Err Dual command failed\n");
+ goto fail;
+ }
+ ret = voice_setup_vocproc(v);
+ if (ret < 0) {
+ pr_err("setup voice failed\n");
+ goto fail;
+ }
+ ret = voice_send_start_voice_cmd(v);
+ if (ret < 0) {
+ pr_err("start voice failed\n");
+ goto fail;
+ }
+ get_sidetone_cal(&sidetone_cal_data);
+ if (v->dev_tx.port_id != RT_PROXY_PORT_001_TX &&
+ v->dev_rx.port_id != RT_PROXY_PORT_001_RX) {
+ ret = afe_sidetone(v->dev_tx.port_id,
+ v->dev_rx.port_id,
+ sidetone_cal_data.enable,
+ sidetone_cal_data.gain);
+ if (ret < 0)
+ pr_err("AFE command sidetone failed\n");
+ }
+
+ v->voc_state = VOC_RUN;
+ }
+fail: mutex_unlock(&v->lock);
+ return ret;
+}
+
+void voc_register_mvs_cb(ul_cb_fn ul_cb,
+ dl_cb_fn dl_cb,
+ void *private_data)
+{
+ common.mvs_info.ul_cb = ul_cb;
+ common.mvs_info.dl_cb = dl_cb;
+ common.mvs_info.private_data = private_data;
+}
+
+void voc_config_vocoder(uint32_t media_type,
+ uint32_t rate,
+ uint32_t network_type,
+ uint32_t dtx_mode)
+{
+ common.mvs_info.media_type = media_type;
+ common.mvs_info.rate = rate;
+ common.mvs_info.network_type = network_type;
+ common.mvs_info.dtx_mode = dtx_mode;
+}
+
+static int32_t qdsp_mvm_callback(struct apr_client_data *data, void *priv)
+{
+ uint32_t *ptr = NULL;
+ struct common_data *c = NULL;
+ struct voice_data *v = NULL;
+ int i = 0;
+
+ if ((data == NULL) || (priv == NULL)) {
+ pr_err("%s: data or priv is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ c = priv;
+
+ pr_debug("%s: session_id 0x%x\n", __func__, data->dest_port);
+
+ v = voice_get_session(data->dest_port);
+ if (v == NULL) {
+ pr_err("%s: v is NULL\n", __func__);
+
+ return -EINVAL;
+ }
+
+ pr_debug("%s: Payload Length = %d, opcode=%x\n", __func__,
+ data->payload_size, data->opcode);
+
+ if (data->opcode == RESET_EVENTS) {
+ pr_debug("%s: Reset event received in Voice service\n",
+ __func__);
+
+ apr_reset(c->apr_q6_mvm);
+ c->apr_q6_mvm = NULL;
+
+ /* Sub-system restart is applicable to all sessions. */
+ for (i = 0; i < MAX_VOC_SESSIONS; i++)
+ c->voice[i].mvm_handle = 0;
+
+ return 0;
+ }
+
+ if (data->opcode == APR_BASIC_RSP_RESULT) {
+ if (data->payload_size) {
+ ptr = data->payload;
+
+ /* ping mvm service ACK */
+ switch (ptr[0]) {
+ case VSS_IMVM_CMD_CREATE_PASSIVE_CONTROL_SESSION:
+ case VSS_IMVM_CMD_CREATE_FULL_CONTROL_SESSION:
+ /* Passive session is used for CS call
+ * Full session is used for VoIP call. */
+ pr_debug("%s: cmd = 0x%x\n", __func__, ptr[0]);
+ if (!ptr[1]) {
+ pr_debug("%s: MVM handle is %d\n",
+ __func__, data->src_port);
+ voice_set_mvm_handle(v, data->src_port);
+ } else
+ pr_err("got NACK for sending MVM create session\n");
+ v->mvm_state = CMD_STATUS_SUCCESS;
+ wake_up(&v->mvm_wait);
+ break;
+ case VSS_IMVM_CMD_START_VOICE:
+ case VSS_IMVM_CMD_ATTACH_VOCPROC:
+ case VSS_IMVM_CMD_STOP_VOICE:
+ case VSS_IMVM_CMD_DETACH_VOCPROC:
+ case VSS_ISTREAM_CMD_SET_TTY_MODE:
+ case APRV2_IBASIC_CMD_DESTROY_SESSION:
+ case VSS_IMVM_CMD_ATTACH_STREAM:
+ case VSS_IMVM_CMD_DETACH_STREAM:
+ case VSS_ICOMMON_CMD_SET_NETWORK:
+ case VSS_ICOMMON_CMD_SET_VOICE_TIMING:
+ case VSS_IWIDEVOICE_CMD_SET_WIDEVOICE:
+ case VSS_IMVM_CMD_SET_POLICY_DUAL_CONTROL:
+ pr_debug("%s: cmd = 0x%x\n", __func__, ptr[0]);
+ v->mvm_state = CMD_STATUS_SUCCESS;
+ wake_up(&v->mvm_wait);
+ break;
+ default:
+ pr_debug("%s: not match cmd = 0x%x\n",
+ __func__, ptr[0]);
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int32_t qdsp_cvs_callback(struct apr_client_data *data, void *priv)
+{
+ uint32_t *ptr = NULL;
+ struct common_data *c = NULL;
+ struct voice_data *v = NULL;
+ int i = 0;
+
+ if ((data == NULL) || (priv == NULL)) {
+ pr_err("%s: data or priv is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ c = priv;
+
+ pr_debug("%s: session_id 0x%x\n", __func__, data->dest_port);
+
+ v = voice_get_session(data->dest_port);
+ if (v == NULL) {
+ pr_err("%s: v is NULL\n", __func__);
+
+ return -EINVAL;
+ }
+
+ pr_debug("%s: Payload Length = %d, opcode=%x\n", __func__,
+ data->payload_size, data->opcode);
+
+ if (data->opcode == RESET_EVENTS) {
+ pr_debug("%s: Reset event received in Voice service\n",
+ __func__);
+
+ apr_reset(c->apr_q6_cvs);
+ c->apr_q6_cvs = NULL;
+
+ /* Sub-system restart is applicable to all sessions. */
+ for (i = 0; i < MAX_VOC_SESSIONS; i++)
+ c->voice[i].cvs_handle = 0;
+
+ return 0;
+ }
+
+ if (data->opcode == APR_BASIC_RSP_RESULT) {
+ if (data->payload_size) {
+ ptr = data->payload;
+
+ /*response from CVS */
+ switch (ptr[0]) {
+ case VSS_ISTREAM_CMD_CREATE_PASSIVE_CONTROL_SESSION:
+ case VSS_ISTREAM_CMD_CREATE_FULL_CONTROL_SESSION:
+ if (!ptr[1]) {
+ pr_debug("%s: CVS handle is %d\n",
+ __func__, data->src_port);
+ voice_set_cvs_handle(v, data->src_port);
+ } else
+ pr_err("got NACK for sending CVS create session\n");
+ v->cvs_state = CMD_STATUS_SUCCESS;
+ wake_up(&v->cvs_wait);
+ break;
+ case VSS_ISTREAM_CMD_SET_MUTE:
+ case VSS_ISTREAM_CMD_SET_MEDIA_TYPE:
+ case VSS_ISTREAM_CMD_VOC_AMR_SET_ENC_RATE:
+ case VSS_ISTREAM_CMD_VOC_AMRWB_SET_ENC_RATE:
+ case VSS_ISTREAM_CMD_SET_ENC_DTX_MODE:
+ case VSS_ISTREAM_CMD_CDMA_SET_ENC_MINMAX_RATE:
+ case APRV2_IBASIC_CMD_DESTROY_SESSION:
+ case VSS_ISTREAM_CMD_REGISTER_CALIBRATION_DATA:
+ case VSS_ISTREAM_CMD_DEREGISTER_CALIBRATION_DATA:
+ case VSS_ICOMMON_CMD_MAP_MEMORY:
+ case VSS_ICOMMON_CMD_UNMAP_MEMORY:
+ case VSS_ICOMMON_CMD_SET_UI_PROPERTY:
+ case VSS_ISTREAM_CMD_START_PLAYBACK:
+ case VSS_ISTREAM_CMD_STOP_PLAYBACK:
+ case VSS_ISTREAM_CMD_START_RECORD:
+ case VSS_ISTREAM_CMD_STOP_RECORD:
+ pr_debug("%s: cmd = 0x%x\n", __func__, ptr[0]);
+ v->cvs_state = CMD_STATUS_SUCCESS;
+ wake_up(&v->cvs_wait);
+ break;
+ case VOICE_CMD_SET_PARAM:
+ rtac_make_voice_callback(RTAC_CVS, ptr,
+ data->payload_size);
+ break;
+ default:
+ pr_debug("%s: cmd = 0x%x\n", __func__, ptr[0]);
+ break;
+ }
+ }
+ } else if (data->opcode == VSS_ISTREAM_EVT_SEND_ENC_BUFFER) {
+ uint32_t *voc_pkt = data->payload;
+ uint32_t pkt_len = data->payload_size;
+
+ if (voc_pkt != NULL && c->mvs_info.ul_cb != NULL) {
+ pr_debug("%s: Media type is 0x%x\n",
+ __func__, voc_pkt[0]);
+
+ /* Remove media ID from payload. */
+ voc_pkt++;
+ pkt_len = pkt_len - 4;
+
+ c->mvs_info.ul_cb((uint8_t *)voc_pkt,
+ pkt_len,
+ c->mvs_info.private_data);
+ } else
+ pr_err("%s: voc_pkt is 0x%x ul_cb is 0x%x\n",
+ __func__, (unsigned int)voc_pkt,
+ (unsigned int) c->mvs_info.ul_cb);
+ } else if (data->opcode == VSS_ISTREAM_EVT_REQUEST_DEC_BUFFER) {
+ struct cvs_send_dec_buf_cmd send_dec_buf;
+ int ret = 0;
+ uint32_t pkt_len = 0;
+
+ if (c->mvs_info.dl_cb != NULL) {
+ send_dec_buf.dec_buf.media_id = c->mvs_info.media_type;
+
+ c->mvs_info.dl_cb(
+ (uint8_t *)&send_dec_buf.dec_buf.packet_data,
+ &pkt_len,
+ c->mvs_info.private_data);
+
+ send_dec_buf.hdr.hdr_field =
+ APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ send_dec_buf.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+ sizeof(send_dec_buf.dec_buf.media_id) + pkt_len);
+ send_dec_buf.hdr.src_port = v->session_id;
+ send_dec_buf.hdr.dest_port = voice_get_cvs_handle(v);
+ send_dec_buf.hdr.token = 0;
+ send_dec_buf.hdr.opcode =
+ VSS_ISTREAM_EVT_SEND_DEC_BUFFER;
+
+ ret = apr_send_pkt(c->apr_q6_cvs,
+ (uint32_t *) &send_dec_buf);
+ if (ret < 0) {
+ pr_err("%s: Error %d sending DEC_BUF\n",
+ __func__, ret);
+ goto fail;
+ }
+ } else
+ pr_debug("%s: dl_cb is NULL\n", __func__);
+ } else if (data->opcode == VSS_ISTREAM_EVT_SEND_DEC_BUFFER) {
+ pr_debug("Send dec buf resp\n");
+ } else if (data->opcode == VOICE_EVT_GET_PARAM_ACK) {
+ rtac_make_voice_callback(RTAC_CVS, data->payload,
+ data->payload_size);
+ } else
+ pr_debug("Unknown opcode 0x%x\n", data->opcode);
+
+fail:
+ return 0;
+}
+
+static int32_t qdsp_cvp_callback(struct apr_client_data *data, void *priv)
+{
+ uint32_t *ptr = NULL;
+ struct common_data *c = NULL;
+ struct voice_data *v = NULL;
+ int i = 0;
+
+ if ((data == NULL) || (priv == NULL)) {
+ pr_err("%s: data or priv is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ c = priv;
+
+ v = voice_get_session(data->dest_port);
+ if (v == NULL) {
+ pr_err("%s: v is NULL\n", __func__);
+
+ return -EINVAL;
+ }
+
+ pr_debug("%s: Payload Length = %d, opcode=%x\n", __func__,
+ data->payload_size, data->opcode);
+
+ if (data->opcode == RESET_EVENTS) {
+ pr_debug("%s: Reset event received in Voice service\n",
+ __func__);
+
+ apr_reset(c->apr_q6_cvp);
+ c->apr_q6_cvp = NULL;
+
+ /* Sub-system restart is applicable to all sessions. */
+ for (i = 0; i < MAX_VOC_SESSIONS; i++)
+ c->voice[i].cvp_handle = 0;
+
+ return 0;
+ }
+
+ if (data->opcode == APR_BASIC_RSP_RESULT) {
+ if (data->payload_size) {
+ ptr = data->payload;
+
+ switch (ptr[0]) {
+ case VSS_IVOCPROC_CMD_CREATE_FULL_CONTROL_SESSION:
+ /*response from CVP */
+ pr_debug("%s: cmd = 0x%x\n", __func__, ptr[0]);
+ if (!ptr[1]) {
+ voice_set_cvp_handle(v, data->src_port);
+ pr_debug("cvphdl=%d\n", data->src_port);
+ } else
+ pr_err("got NACK from CVP create session response\n");
+ v->cvp_state = CMD_STATUS_SUCCESS;
+ wake_up(&v->cvp_wait);
+ break;
+ case VSS_IVOCPROC_CMD_SET_DEVICE:
+ case VSS_IVOCPROC_CMD_SET_RX_VOLUME_INDEX:
+ case VSS_IVOCPROC_CMD_ENABLE:
+ case VSS_IVOCPROC_CMD_DISABLE:
+ case APRV2_IBASIC_CMD_DESTROY_SESSION:
+ case VSS_IVOCPROC_CMD_REGISTER_VOLUME_CAL_TABLE:
+ case VSS_IVOCPROC_CMD_DEREGISTER_VOLUME_CAL_TABLE:
+ case VSS_IVOCPROC_CMD_REGISTER_CALIBRATION_DATA:
+ case VSS_IVOCPROC_CMD_DEREGISTER_CALIBRATION_DATA:
+ case VSS_ICOMMON_CMD_MAP_MEMORY:
+ case VSS_ICOMMON_CMD_UNMAP_MEMORY:
+ case VSS_IVOCPROC_CMD_SET_MUTE:
+ v->cvp_state = CMD_STATUS_SUCCESS;
+ wake_up(&v->cvp_wait);
+ break;
+ case VOICE_CMD_SET_PARAM:
+ rtac_make_voice_callback(RTAC_CVP, ptr,
+ data->payload_size);
+ break;
+ default:
+ pr_debug("%s: not match cmd = 0x%x\n",
+ __func__, ptr[0]);
+ break;
+ }
+ }
+ } else if (data->opcode == VOICE_EVT_GET_PARAM_ACK) {
+ rtac_make_voice_callback(RTAC_CVP, data->payload,
+ data->payload_size);
+ }
+ return 0;
+}
+
+
+static int __init voice_init(void)
+{
+ int rc = 0, i = 0;
+ int len;
+
+ memset(&common, 0, sizeof(struct common_data));
+
+ /* Allocate memory for VoIP calibration */
+ common.client = msm_ion_client_create(UINT_MAX, "voip_client");
+ if (IS_ERR_OR_NULL((void *)common.client)) {
+ pr_err("%s: ION create client for Voip failed\n", __func__);
+ goto cont;
+ }
+ common.cvp_cal.handle = ion_alloc(common.client, CVP_CAL_SIZE, SZ_4K,
+ ION_HEAP(ION_AUDIO_HEAP_ID));
+ if (IS_ERR_OR_NULL((void *) common.cvp_cal.handle)) {
+ pr_err("%s: ION memory allocation for CVP failed\n",
+ __func__);
+ ion_client_destroy(common.client);
+ goto cont;
+ }
+
+ rc = ion_phys(common.client, common.cvp_cal.handle,
+ (ion_phys_addr_t *)&common.cvp_cal.phy, (size_t *)&len);
+ if (rc) {
+ pr_err("%s: ION Get Physical for cvp failed, rc = %d\n",
+ __func__, rc);
+ ion_free(common.client, common.cvp_cal.handle);
+ ion_client_destroy(common.client);
+ goto cont;
+ }
+
+ common.cvp_cal.buf = ion_map_kernel(common.client,
+ common.cvp_cal.handle, 0);
+ if (IS_ERR_OR_NULL((void *) common.cvp_cal.buf)) {
+ pr_err("%s: ION memory mapping for cvp failed\n", __func__);
+ common.cvp_cal.buf = NULL;
+ ion_free(common.client, common.cvp_cal.handle);
+ ion_client_destroy(common.client);
+ goto cont;
+ }
+ memset((void *)common.cvp_cal.buf, 0, CVP_CAL_SIZE);
+
+ common.cvs_cal.handle = ion_alloc(common.client, CVS_CAL_SIZE, SZ_4K,
+ ION_HEAP(ION_AUDIO_HEAP_ID));
+ if (IS_ERR_OR_NULL((void *) common.cvs_cal.handle)) {
+ pr_err("%s: ION memory allocation for CVS failed\n",
+ __func__);
+ goto cont;
+ }
+
+ rc = ion_phys(common.client, common.cvs_cal.handle,
+ (ion_phys_addr_t *)&common.cvs_cal.phy, (size_t *)&len);
+ if (rc) {
+ pr_err("%s: ION Get Physical for cvs failed, rc = %d\n",
+ __func__, rc);
+ ion_free(common.client, common.cvs_cal.handle);
+ goto cont;
+ }
+
+ common.cvs_cal.buf = ion_map_kernel(common.client,
+ common.cvs_cal.handle, 0);
+ if (IS_ERR_OR_NULL((void *) common.cvs_cal.buf)) {
+ pr_err("%s: ION memory mapping for cvs failed\n", __func__);
+ common.cvs_cal.buf = NULL;
+ ion_free(common.client, common.cvs_cal.handle);
+ goto cont;
+ }
+ memset((void *)common.cvs_cal.buf, 0, CVS_CAL_SIZE);
+cont:
+ /* set default value */
+ common.default_mute_val = 1; /* default is mute */
+ common.default_vol_val = 0;
+ common.default_sample_val = 8000;
+
+ /* Initialize MVS info. */
+ common.mvs_info.network_type = VSS_NETWORK_ID_DEFAULT;
+
+ mutex_init(&common.common_lock);
+
+ for (i = 0; i < MAX_VOC_SESSIONS; i++) {
+ common.voice[i].session_id = SESSION_ID_BASE + i;
+
+ /* initialize dev_rx and dev_tx */
+ common.voice[i].dev_rx.volume = common.default_vol_val;
+ common.voice[i].dev_rx.mute = 0;
+ common.voice[i].dev_tx.mute = common.default_mute_val;
+
+ common.voice[i].dev_tx.port_id = 0x100B;
+ common.voice[i].dev_rx.port_id = 0x100A;
+ common.voice[i].sidetone_gain = 0x512;
+
+ common.voice[i].voc_state = VOC_INIT;
+
+ init_waitqueue_head(&common.voice[i].mvm_wait);
+ init_waitqueue_head(&common.voice[i].cvs_wait);
+ init_waitqueue_head(&common.voice[i].cvp_wait);
+
+ mutex_init(&common.voice[i].lock);
+ }
+
+ return rc;
+}
+
+device_initcall(voice_init);
diff --git a/sound/soc/msm/qdsp6v2/q6voice.h b/sound/soc/msm/qdsp6v2/q6voice.h
new file mode 100644
index 0000000..1bedb15
--- /dev/null
+++ b/sound/soc/msm/qdsp6v2/q6voice.h
@@ -0,0 +1,987 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __QDSP6VOICE_H__
+#define __QDSP6VOICE_H__
+
+#include <mach/qdsp6v2/apr.h>
+#include <linux/ion.h>
+
+#define MAX_VOC_PKT_SIZE 642
+#define SESSION_NAME_LEN 20
+
+#define VOC_REC_UPLINK 0x00
+#define VOC_REC_DOWNLINK 0x01
+#define VOC_REC_BOTH 0x02
+
+struct voice_header {
+ uint32_t id;
+ uint32_t data_len;
+};
+
+struct voice_init {
+ struct voice_header hdr;
+ void *cb_handle;
+};
+
+/* Device information payload structure */
+
+struct device_data {
+ uint32_t volume; /* in index */
+ uint32_t mute;
+ uint32_t sample;
+ uint32_t enabled;
+ uint32_t dev_id;
+ uint32_t port_id;
+};
+
+struct voice_dev_route_state {
+ u16 rx_route_flag;
+ u16 tx_route_flag;
+};
+
+struct voice_rec_route_state {
+ u16 ul_flag;
+ u16 dl_flag;
+};
+
+enum {
+ VOC_INIT = 0,
+ VOC_RUN,
+ VOC_CHANGE,
+ VOC_RELEASE,
+};
+
+/* Common */
+#define VSS_ICOMMON_CMD_SET_UI_PROPERTY 0x00011103
+/* Set a UI property */
+#define VSS_ICOMMON_CMD_MAP_MEMORY 0x00011025
+#define VSS_ICOMMON_CMD_UNMAP_MEMORY 0x00011026
+/* General shared memory; byte-accessible, 4 kB-aligned. */
+#define VSS_ICOMMON_MAP_MEMORY_SHMEM8_4K_POOL 3
+
+struct vss_icommon_cmd_map_memory_t {
+ uint32_t phys_addr;
+ /* Physical address of a memory region; must be at least
+ * 4 kB aligned.
+ */
+
+ uint32_t mem_size;
+ /* Number of bytes in the region; should be a multiple of 32. */
+
+ uint16_t mem_pool_id;
+ /* Type of memory being provided. The memory ID implicitly defines
+ * the characteristics of the memory. The characteristics might include
+ * alignment type, permissions, etc.
+ * Memory pool ID. Possible values:
+ * 3 -- VSS_ICOMMON_MEM_TYPE_SHMEM8_4K_POOL.
+ */
+} __packed;
+
+struct vss_icommon_cmd_unmap_memory_t {
+ uint32_t phys_addr;
+ /* Physical address of a memory region; must be at least
+ * 4 kB aligned.
+ */
+} __packed;
+
+struct vss_map_memory_cmd {
+ struct apr_hdr hdr;
+ struct vss_icommon_cmd_map_memory_t vss_map_mem;
+} __packed;
+
+struct vss_unmap_memory_cmd {
+ struct apr_hdr hdr;
+ struct vss_icommon_cmd_unmap_memory_t vss_unmap_mem;
+} __packed;
+
+/* TO MVM commands */
+#define VSS_IMVM_CMD_CREATE_PASSIVE_CONTROL_SESSION 0x000110FF
+/**< No payload. Wait for APRV2_IBASIC_RSP_RESULT response. */
+
+#define VSS_IMVM_CMD_SET_POLICY_DUAL_CONTROL 0x00011327
+/*
+ * VSS_IMVM_CMD_SET_POLICY_DUAL_CONTROL
+ * Description: This command is required to let MVM know
+ * who is in control of session.
+ * Payload: Defined by vss_imvm_cmd_set_policy_dual_control_t.
+ * Result: Wait for APRV2_IBASIC_RSP_RESULT response.
+ */
+
+#define VSS_IMVM_CMD_CREATE_FULL_CONTROL_SESSION 0x000110FE
+/* Create a new full control MVM session. */
+
+#define APRV2_IBASIC_CMD_DESTROY_SESSION 0x0001003C
+/**< No payload. Wait for APRV2_IBASIC_RSP_RESULT response. */
+
+#define VSS_IMVM_CMD_ATTACH_STREAM 0x0001123C
+/* Attach a stream to the MVM. */
+
+#define VSS_IMVM_CMD_DETACH_STREAM 0x0001123D
+/* Detach a stream from the MVM. */
+
+#define VSS_IMVM_CMD_ATTACH_VOCPROC 0x0001123E
+/* Attach a vocproc to the MVM. The MVM will symmetrically connect this vocproc
+ * to all the streams currently attached to it.
+ */
+
+#define VSS_IMVM_CMD_DETACH_VOCPROC 0x0001123F
+/* Detach a vocproc from the MVM. The MVM will symmetrically disconnect this
+ * vocproc from all the streams to which it is currently attached.
+*/
+
+#define VSS_IMVM_CMD_START_VOICE 0x00011190
+/**< No payload. Wait for APRV2_IBASIC_RSP_RESULT response. */
+
+#define VSS_IMVM_CMD_STOP_VOICE 0x00011192
+/**< No payload. Wait for APRV2_IBASIC_RSP_RESULT response. */
+
+#define VSS_ISTREAM_CMD_ATTACH_VOCPROC 0x000110F8
+/**< Wait for APRV2_IBASIC_RSP_RESULT response. */
+
+#define VSS_ISTREAM_CMD_DETACH_VOCPROC 0x000110F9
+/**< Wait for APRV2_IBASIC_RSP_RESULT response. */
+
+
+#define VSS_ISTREAM_CMD_SET_TTY_MODE 0x00011196
+/**< Wait for APRV2_IBASIC_RSP_RESULT response. */
+
+#define VSS_ICOMMON_CMD_SET_NETWORK 0x0001119C
+/* Set the network type. */
+
+#define VSS_ICOMMON_CMD_SET_VOICE_TIMING 0x000111E0
+/* Set the voice timing parameters. */
+
+#define VSS_IWIDEVOICE_CMD_SET_WIDEVOICE 0x00011243
+/* Enable/disable WideVoice */
+
+enum msm_audio_voc_rate {
+ VOC_0_RATE, /* Blank frame */
+ VOC_8_RATE, /* 1/8 rate */
+ VOC_4_RATE, /* 1/4 rate */
+ VOC_2_RATE, /* 1/2 rate */
+ VOC_1_RATE /* Full rate */
+};
+
+struct vss_istream_cmd_set_tty_mode_t {
+ uint32_t mode;
+ /**<
+ * TTY mode.
+ *
+ * 0 : TTY disabled
+ * 1 : HCO
+ * 2 : VCO
+ * 3 : FULL
+ */
+} __packed;
+
+struct vss_istream_cmd_attach_vocproc_t {
+ uint16_t handle;
+ /**< Handle of vocproc being attached. */
+} __packed;
+
+struct vss_istream_cmd_detach_vocproc_t {
+ uint16_t handle;
+ /**< Handle of vocproc being detached. */
+} __packed;
+
+struct vss_imvm_cmd_attach_stream_t {
+ uint16_t handle;
+ /* The stream handle to attach. */
+} __packed;
+
+struct vss_imvm_cmd_detach_stream_t {
+ uint16_t handle;
+ /* The stream handle to detach. */
+} __packed;
+
+struct vss_icommon_cmd_set_network_t {
+ uint32_t network_id;
+ /* Network ID. (Refer to VSS_NETWORK_ID_XXX). */
+} __packed;
+
+struct vss_icommon_cmd_set_voice_timing_t {
+ uint16_t mode;
+ /*
+ * The vocoder frame synchronization mode.
+ *
+ * 0 : No frame sync.
+ * 1 : Hard VFR (20ms Vocoder Frame Reference interrupt).
+ */
+ uint16_t enc_offset;
+ /*
+ * The offset in microseconds from the VFR to deliver a Tx vocoder
+ * packet. The offset should be less than 20000us.
+ */
+ uint16_t dec_req_offset;
+ /*
+ * The offset in microseconds from the VFR to request for an Rx vocoder
+ * packet. The offset should be less than 20000us.
+ */
+ uint16_t dec_offset;
+ /*
+ * The offset in microseconds from the VFR to indicate the deadline to
+ * receive an Rx vocoder packet. The offset should be less than 20000us.
+ * Rx vocoder packets received after this deadline are not guaranteed to
+ * be processed.
+ */
+} __packed;
+
+struct vss_imvm_cmd_create_control_session_t {
+ char name[SESSION_NAME_LEN];
+ /*
+ * A variable-sized stream name.
+ *
+ * The stream name size is the payload size minus the size of the other
+ * fields.
+ */
+} __packed;
+
+
+struct vss_imvm_cmd_set_policy_dual_control_t {
+ bool enable_flag;
+ /* Set to TRUE to enable modem state machine control */
+} __packed;
+
+struct vss_iwidevoice_cmd_set_widevoice_t {
+ uint32_t enable;
+ /* WideVoice enable/disable; possible values:
+ * - 0 -- WideVoice disabled
+ * - 1 -- WideVoice enabled
+ */
+} __packed;
+
+struct mvm_attach_vocproc_cmd {
+ struct apr_hdr hdr;
+ struct vss_istream_cmd_attach_vocproc_t mvm_attach_cvp_handle;
+} __packed;
+
+struct mvm_detach_vocproc_cmd {
+ struct apr_hdr hdr;
+ struct vss_istream_cmd_detach_vocproc_t mvm_detach_cvp_handle;
+} __packed;
+
+struct mvm_create_ctl_session_cmd {
+ struct apr_hdr hdr;
+ struct vss_imvm_cmd_create_control_session_t mvm_session;
+} __packed;
+
+struct mvm_modem_dual_control_session_cmd {
+ struct apr_hdr hdr;
+ struct vss_imvm_cmd_set_policy_dual_control_t voice_ctl;
+} __packed;
+
+struct mvm_set_tty_mode_cmd {
+ struct apr_hdr hdr;
+ struct vss_istream_cmd_set_tty_mode_t tty_mode;
+} __packed;
+
+struct mvm_attach_stream_cmd {
+ struct apr_hdr hdr;
+ struct vss_imvm_cmd_attach_stream_t attach_stream;
+} __packed;
+
+struct mvm_detach_stream_cmd {
+ struct apr_hdr hdr;
+ struct vss_imvm_cmd_detach_stream_t detach_stream;
+} __packed;
+
+struct mvm_set_network_cmd {
+ struct apr_hdr hdr;
+ struct vss_icommon_cmd_set_network_t network;
+} __packed;
+
+struct mvm_set_voice_timing_cmd {
+ struct apr_hdr hdr;
+ struct vss_icommon_cmd_set_voice_timing_t timing;
+} __packed;
+
+struct mvm_set_widevoice_enable_cmd {
+ struct apr_hdr hdr;
+ struct vss_iwidevoice_cmd_set_widevoice_t vss_set_wv;
+} __packed;
+
+/* TO CVS commands */
+#define VSS_ISTREAM_CMD_CREATE_PASSIVE_CONTROL_SESSION 0x00011140
+/**< Wait for APRV2_IBASIC_RSP_RESULT response. */
+
+#define VSS_ISTREAM_CMD_CREATE_FULL_CONTROL_SESSION 0x000110F7
+/* Create a new full control stream session. */
+
+#define APRV2_IBASIC_CMD_DESTROY_SESSION 0x0001003C
+
+#define VSS_ISTREAM_CMD_SET_MUTE 0x00011022
+
+#define VSS_ISTREAM_CMD_REGISTER_CALIBRATION_DATA 0x00011279
+
+#define VSS_ISTREAM_CMD_DEREGISTER_CALIBRATION_DATA 0x0001127A
+
+#define VSS_ISTREAM_CMD_SET_MEDIA_TYPE 0x00011186
+/* Set media type on the stream. */
+
+#define VSS_ISTREAM_EVT_SEND_ENC_BUFFER 0x00011015
+/* Event sent by the stream to its client to provide an encoded packet. */
+
+#define VSS_ISTREAM_EVT_REQUEST_DEC_BUFFER 0x00011017
+/* Event sent by the stream to its client requesting for a decoder packet.
+ * The client should respond with a VSS_ISTREAM_EVT_SEND_DEC_BUFFER event.
+ */
+
+#define VSS_ISTREAM_EVT_SEND_DEC_BUFFER 0x00011016
+/* Event sent by the client to the stream in response to a
+ * VSS_ISTREAM_EVT_REQUEST_DEC_BUFFER event, providing a decoder packet.
+ */
+
+#define VSS_ISTREAM_CMD_VOC_AMR_SET_ENC_RATE 0x0001113E
+/* Set AMR encoder rate. */
+
+#define VSS_ISTREAM_CMD_VOC_AMRWB_SET_ENC_RATE 0x0001113F
+/* Set AMR-WB encoder rate. */
+
+#define VSS_ISTREAM_CMD_CDMA_SET_ENC_MINMAX_RATE 0x00011019
+/* Set encoder minimum and maximum rate. */
+
+#define VSS_ISTREAM_CMD_SET_ENC_DTX_MODE 0x0001101D
+/* Set encoder DTX mode. */
+
+#define MODULE_ID_VOICE_MODULE_FENS 0x00010EEB
+#define MODULE_ID_VOICE_MODULE_ST 0x00010EE3
+#define VOICE_PARAM_MOD_ENABLE 0x00010E00
+#define MOD_ENABLE_PARAM_LEN 4
+
+#define VSS_ISTREAM_CMD_START_PLAYBACK 0x00011238
+/* Start in-call music delivery on the Tx voice path. */
+
+#define VSS_ISTREAM_CMD_STOP_PLAYBACK 0x00011239
+/* Stop the in-call music delivery on the Tx voice path. */
+
+#define VSS_ISTREAM_CMD_START_RECORD 0x00011236
+/* Start in-call conversation recording. */
+#define VSS_ISTREAM_CMD_STOP_RECORD 0x00011237
+/* Stop in-call conversation recording. */
+
+#define VSS_TAP_POINT_NONE 0x00010F78
+/* Indicates no tapping for specified path. */
+
+#define VSS_TAP_POINT_STREAM_END 0x00010F79
+/* Indicates that specified path should be tapped at the end of the stream. */
+
+struct vss_istream_cmd_start_record_t {
+ uint32_t rx_tap_point;
+ /* Tap point to use on the Rx path. Supported values are:
+ * VSS_TAP_POINT_NONE : Do not record Rx path.
+ * VSS_TAP_POINT_STREAM_END : Rx tap point is at the end of the stream.
+ */
+ uint32_t tx_tap_point;
+ /* Tap point to use on the Tx path. Supported values are:
+ * VSS_TAP_POINT_NONE : Do not record tx path.
+ * VSS_TAP_POINT_STREAM_END : Tx tap point is at the end of the stream.
+ */
+} __packed;
+
+struct vss_istream_cmd_create_passive_control_session_t {
+ char name[SESSION_NAME_LEN];
+ /**<
+ * A variable-sized stream name.
+ *
+ * The stream name size is the payload size minus the size of the other
+ * fields.
+ */
+} __packed;
+
+struct vss_istream_cmd_set_mute_t {
+ uint16_t direction;
+ /**<
+ * 0 : TX only
+ * 1 : RX only
+ * 2 : TX and Rx
+ */
+ uint16_t mute_flag;
+ /**<
+ * Mute, un-mute.
+ *
+ * 0 : Silence disable
+ * 1 : Silence enable
+ * 2 : CNG enable. Applicable to TX only. If set on RX behavior
+ * will be the same as 1
+ */
+} __packed;
+
+struct vss_istream_cmd_create_full_control_session_t {
+ uint16_t direction;
+ /*
+ * Stream direction.
+ *
+ * 0 : TX only
+ * 1 : RX only
+ * 2 : TX and RX
+ * 3 : TX and RX loopback
+ */
+ uint32_t enc_media_type;
+ /* Tx vocoder type. (Refer to VSS_MEDIA_ID_XXX). */
+ uint32_t dec_media_type;
+ /* Rx vocoder type. (Refer to VSS_MEDIA_ID_XXX). */
+ uint32_t network_id;
+ /* Network ID. (Refer to VSS_NETWORK_ID_XXX). */
+ char name[SESSION_NAME_LEN];
+ /*
+ * A variable-sized stream name.
+ *
+ * The stream name size is the payload size minus the size of the other
+ * fields.
+ */
+} __packed;
+
+struct vss_istream_cmd_set_media_type_t {
+ uint32_t rx_media_id;
+ /* Set the Rx vocoder type. (Refer to VSS_MEDIA_ID_XXX). */
+ uint32_t tx_media_id;
+ /* Set the Tx vocoder type. (Refer to VSS_MEDIA_ID_XXX). */
+} __packed;
+
+struct vss_istream_evt_send_enc_buffer_t {
+ uint32_t media_id;
+ /* Media ID of the packet. */
+ uint8_t packet_data[MAX_VOC_PKT_SIZE];
+ /* Packet data buffer. */
+} __packed;
+
+struct vss_istream_evt_send_dec_buffer_t {
+ uint32_t media_id;
+ /* Media ID of the packet. */
+ uint8_t packet_data[MAX_VOC_PKT_SIZE];
+ /* Packet data. */
+} __packed;
+
+struct vss_istream_cmd_voc_amr_set_enc_rate_t {
+ uint32_t mode;
+ /* Set the AMR encoder rate.
+ *
+ * 0x00000000 : 4.75 kbps
+ * 0x00000001 : 5.15 kbps
+ * 0x00000002 : 5.90 kbps
+ * 0x00000003 : 6.70 kbps
+ * 0x00000004 : 7.40 kbps
+ * 0x00000005 : 7.95 kbps
+ * 0x00000006 : 10.2 kbps
+ * 0x00000007 : 12.2 kbps
+ */
+} __packed;
+
+struct vss_istream_cmd_voc_amrwb_set_enc_rate_t {
+ uint32_t mode;
+ /* Set the AMR-WB encoder rate.
+ *
+ * 0x00000000 : 6.60 kbps
+ * 0x00000001 : 8.85 kbps
+ * 0x00000002 : 12.65 kbps
+ * 0x00000003 : 14.25 kbps
+ * 0x00000004 : 15.85 kbps
+ * 0x00000005 : 18.25 kbps
+ * 0x00000006 : 19.85 kbps
+ * 0x00000007 : 23.05 kbps
+ * 0x00000008 : 23.85 kbps
+ */
+} __packed;
+
+struct vss_istream_cmd_cdma_set_enc_minmax_rate_t {
+ uint16_t min_rate;
+ /* Set the lower bound encoder rate.
+ *
+ * 0x0000 : Blank frame
+ * 0x0001 : Eighth rate
+ * 0x0002 : Quarter rate
+ * 0x0003 : Half rate
+ * 0x0004 : Full rate
+ */
+ uint16_t max_rate;
+ /* Set the upper bound encoder rate.
+ *
+ * 0x0000 : Blank frame
+ * 0x0001 : Eighth rate
+ * 0x0002 : Quarter rate
+ * 0x0003 : Half rate
+ * 0x0004 : Full rate
+ */
+} __packed;
+
+struct vss_istream_cmd_set_enc_dtx_mode_t {
+ uint32_t enable;
+ /* Toggle DTX on or off.
+ *
+ * 0 : Disables DTX
+ * 1 : Enables DTX
+ */
+} __packed;
+
+struct vss_istream_cmd_register_calibration_data_t {
+ uint32_t phys_addr;
+ /* Phsical address to be registered with stream. The calibration data
+ * is stored at this address.
+ */
+ uint32_t mem_size;
+ /* Size of the calibration data in bytes. */
+};
+
+struct vss_icommon_cmd_set_ui_property_enable_t {
+ uint32_t module_id;
+ /* Unique ID of the module. */
+ uint32_t param_id;
+ /* Unique ID of the parameter. */
+ uint16_t param_size;
+ /* Size of the parameter in bytes: MOD_ENABLE_PARAM_LEN */
+ uint16_t reserved;
+ /* Reserved; set to 0. */
+ uint16_t enable;
+ uint16_t reserved_field;
+ /* Reserved, set to 0. */
+};
+
+struct cvs_create_passive_ctl_session_cmd {
+ struct apr_hdr hdr;
+ struct vss_istream_cmd_create_passive_control_session_t cvs_session;
+} __packed;
+
+struct cvs_create_full_ctl_session_cmd {
+ struct apr_hdr hdr;
+ struct vss_istream_cmd_create_full_control_session_t cvs_session;
+} __packed;
+
+struct cvs_destroy_session_cmd {
+ struct apr_hdr hdr;
+} __packed;
+
+struct cvs_set_mute_cmd {
+ struct apr_hdr hdr;
+ struct vss_istream_cmd_set_mute_t cvs_set_mute;
+} __packed;
+
+struct cvs_set_media_type_cmd {
+ struct apr_hdr hdr;
+ struct vss_istream_cmd_set_media_type_t media_type;
+} __packed;
+
+struct cvs_send_dec_buf_cmd {
+ struct apr_hdr hdr;
+ struct vss_istream_evt_send_dec_buffer_t dec_buf;
+} __packed;
+
+struct cvs_set_amr_enc_rate_cmd {
+ struct apr_hdr hdr;
+ struct vss_istream_cmd_voc_amr_set_enc_rate_t amr_rate;
+} __packed;
+
+struct cvs_set_amrwb_enc_rate_cmd {
+ struct apr_hdr hdr;
+ struct vss_istream_cmd_voc_amrwb_set_enc_rate_t amrwb_rate;
+} __packed;
+
+struct cvs_set_cdma_enc_minmax_rate_cmd {
+ struct apr_hdr hdr;
+ struct vss_istream_cmd_cdma_set_enc_minmax_rate_t cdma_rate;
+} __packed;
+
+struct cvs_set_enc_dtx_mode_cmd {
+ struct apr_hdr hdr;
+ struct vss_istream_cmd_set_enc_dtx_mode_t dtx_mode;
+} __packed;
+
+struct cvs_register_cal_data_cmd {
+ struct apr_hdr hdr;
+ struct vss_istream_cmd_register_calibration_data_t cvs_cal_data;
+} __packed;
+
+struct cvs_deregister_cal_data_cmd {
+ struct apr_hdr hdr;
+} __packed;
+
+struct cvs_set_pp_enable_cmd {
+ struct apr_hdr hdr;
+ struct vss_icommon_cmd_set_ui_property_enable_t vss_set_pp;
+} __packed;
+struct cvs_start_record_cmd {
+ struct apr_hdr hdr;
+ struct vss_istream_cmd_start_record_t rec_mode;
+} __packed;
+
+/* TO CVP commands */
+
+#define VSS_IVOCPROC_CMD_CREATE_FULL_CONTROL_SESSION 0x000100C3
+/**< Wait for APRV2_IBASIC_RSP_RESULT response. */
+
+#define APRV2_IBASIC_CMD_DESTROY_SESSION 0x0001003C
+
+#define VSS_IVOCPROC_CMD_SET_DEVICE 0x000100C4
+
+#define VSS_IVOCPROC_CMD_SET_VP3_DATA 0x000110EB
+
+#define VSS_IVOCPROC_CMD_SET_RX_VOLUME_INDEX 0x000110EE
+
+#define VSS_IVOCPROC_CMD_ENABLE 0x000100C6
+/**< No payload. Wait for APRV2_IBASIC_RSP_RESULT response. */
+
+#define VSS_IVOCPROC_CMD_DISABLE 0x000110E1
+/**< No payload. Wait for APRV2_IBASIC_RSP_RESULT response. */
+
+#define VSS_IVOCPROC_CMD_REGISTER_CALIBRATION_DATA 0x00011275
+#define VSS_IVOCPROC_CMD_DEREGISTER_CALIBRATION_DATA 0x00011276
+
+#define VSS_IVOCPROC_CMD_REGISTER_VOLUME_CAL_TABLE 0x00011277
+#define VSS_IVOCPROC_CMD_DEREGISTER_VOLUME_CAL_TABLE 0x00011278
+
+#define VSS_IVOCPROC_TOPOLOGY_ID_NONE 0x00010F70
+#define VSS_IVOCPROC_TOPOLOGY_ID_TX_SM_ECNS 0x00010F71
+#define VSS_IVOCPROC_TOPOLOGY_ID_TX_DM_FLUENCE 0x00010F72
+
+#define VSS_IVOCPROC_TOPOLOGY_ID_RX_DEFAULT 0x00010F77
+
+/* Newtwork IDs */
+#define VSS_NETWORK_ID_DEFAULT 0x00010037
+#define VSS_NETWORK_ID_VOIP_NB 0x00011240
+#define VSS_NETWORK_ID_VOIP_WB 0x00011241
+#define VSS_NETWORK_ID_VOIP_WV 0x00011242
+
+/* Media types */
+#define VSS_MEDIA_ID_EVRC_MODEM 0x00010FC2
+/* 80-VF690-47 CDMA enhanced variable rate vocoder modem format. */
+#define VSS_MEDIA_ID_AMR_NB_MODEM 0x00010FC6
+/* 80-VF690-47 UMTS AMR-NB vocoder modem format. */
+#define VSS_MEDIA_ID_AMR_WB_MODEM 0x00010FC7
+/* 80-VF690-47 UMTS AMR-WB vocoder modem format. */
+#define VSS_MEDIA_ID_PCM_NB 0x00010FCB
+#define VSS_MEDIA_ID_PCM_WB 0x00010FCC
+/* Linear PCM (16-bit, little-endian). */
+#define VSS_MEDIA_ID_G711_ALAW 0x00010FCD
+/* G.711 a-law (contains two 10ms vocoder frames). */
+#define VSS_MEDIA_ID_G711_MULAW 0x00010FCE
+/* G.711 mu-law (contains two 10ms vocoder frames). */
+#define VSS_MEDIA_ID_G729 0x00010FD0
+/* G.729AB (contains two 10ms vocoder frames. */
+#define VSS_MEDIA_ID_4GV_NB_MODEM 0x00010FC3
+/*CDMA EVRC-B vocoder modem format */
+#define VSS_MEDIA_ID_4GV_WB_MODEM 0x00010FC4
+/*CDMA EVRC-WB vocoder modem format */
+
+#define VSS_IVOCPROC_CMD_SET_MUTE 0x000110EF
+
+#define VOICE_CMD_SET_PARAM 0x00011006
+#define VOICE_CMD_GET_PARAM 0x00011007
+#define VOICE_EVT_GET_PARAM_ACK 0x00011008
+
+struct vss_ivocproc_cmd_create_full_control_session_t {
+ uint16_t direction;
+ /*
+ * stream direction.
+ * 0 : TX only
+ * 1 : RX only
+ * 2 : TX and RX
+ */
+ uint32_t tx_port_id;
+ /*
+ * TX device port ID which vocproc will connect to. If not supplying a
+ * port ID set to VSS_IVOCPROC_PORT_ID_NONE.
+ */
+ uint32_t tx_topology_id;
+ /*
+ * Tx leg topology ID. If not supplying a topology ID set to
+ * VSS_IVOCPROC_TOPOLOGY_ID_NONE.
+ */
+ uint32_t rx_port_id;
+ /*
+ * RX device port ID which vocproc will connect to. If not supplying a
+ * port ID set to VSS_IVOCPROC_PORT_ID_NONE.
+ */
+ uint32_t rx_topology_id;
+ /*
+ * Rx leg topology ID. If not supplying a topology ID set to
+ * VSS_IVOCPROC_TOPOLOGY_ID_NONE.
+ */
+ int32_t network_id;
+ /*
+ * Network ID. (Refer to VSS_NETWORK_ID_XXX). If not supplying a network
+ * ID set to VSS_NETWORK_ID_DEFAULT.
+ */
+} __packed;
+
+struct vss_ivocproc_cmd_set_volume_index_t {
+ uint16_t vol_index;
+ /**<
+ * Volume index utilized by the vocproc to index into the volume table
+ * provided in VSS_IVOCPROC_CMD_CACHE_VOLUME_CALIBRATION_TABLE and set
+ * volume on the VDSP.
+ */
+} __packed;
+
+struct vss_ivocproc_cmd_set_device_t {
+ uint32_t tx_port_id;
+ /**<
+ * TX device port ID which vocproc will connect to.
+ * VSS_IVOCPROC_PORT_ID_NONE means vocproc will not connect to any port.
+ */
+ uint32_t tx_topology_id;
+ /**<
+ * TX leg topology ID.
+ * VSS_IVOCPROC_TOPOLOGY_ID_NONE means vocproc does not contain any
+ * pre/post-processing blocks and is pass-through.
+ */
+ int32_t rx_port_id;
+ /**<
+ * RX device port ID which vocproc will connect to.
+ * VSS_IVOCPROC_PORT_ID_NONE means vocproc will not connect to any port.
+ */
+ uint32_t rx_topology_id;
+ /**<
+ * RX leg topology ID.
+ * VSS_IVOCPROC_TOPOLOGY_ID_NONE means vocproc does not contain any
+ * pre/post-processing blocks and is pass-through.
+ */
+} __packed;
+
+struct vss_ivocproc_cmd_register_calibration_data_t {
+ uint32_t phys_addr;
+ /* Phsical address to be registered with vocproc. Calibration data
+ * is stored at this address.
+ */
+ uint32_t mem_size;
+ /* Size of the calibration data in bytes. */
+} __packed;
+
+struct vss_ivocproc_cmd_register_volume_cal_table_t {
+ uint32_t phys_addr;
+ /* Phsical address to be registered with the vocproc. The volume
+ * calibration table is stored at this location.
+ */
+
+ uint32_t mem_size;
+ /* Size of the volume calibration table in bytes. */
+} __packed;
+
+struct vss_ivocproc_cmd_set_mute_t {
+ uint16_t direction;
+ /*
+ * 0 : TX only.
+ * 1 : RX only.
+ * 2 : TX and Rx.
+ */
+ uint16_t mute_flag;
+ /*
+ * Mute, un-mute.
+ *
+ * 0 : Disable.
+ * 1 : Enable.
+ */
+} __packed;
+
+struct cvp_create_full_ctl_session_cmd {
+ struct apr_hdr hdr;
+ struct vss_ivocproc_cmd_create_full_control_session_t cvp_session;
+} __packed;
+
+struct cvp_command {
+ struct apr_hdr hdr;
+} __packed;
+
+struct cvp_set_device_cmd {
+ struct apr_hdr hdr;
+ struct vss_ivocproc_cmd_set_device_t cvp_set_device;
+} __packed;
+
+struct cvp_set_vp3_data_cmd {
+ struct apr_hdr hdr;
+} __packed;
+
+struct cvp_set_rx_volume_index_cmd {
+ struct apr_hdr hdr;
+ struct vss_ivocproc_cmd_set_volume_index_t cvp_set_vol_idx;
+} __packed;
+
+struct cvp_register_cal_data_cmd {
+ struct apr_hdr hdr;
+ struct vss_ivocproc_cmd_register_calibration_data_t cvp_cal_data;
+} __packed;
+
+struct cvp_deregister_cal_data_cmd {
+ struct apr_hdr hdr;
+} __packed;
+
+struct cvp_register_vol_cal_table_cmd {
+ struct apr_hdr hdr;
+ struct vss_ivocproc_cmd_register_volume_cal_table_t cvp_vol_cal_tbl;
+} __packed;
+
+struct cvp_deregister_vol_cal_table_cmd {
+ struct apr_hdr hdr;
+} __packed;
+
+struct cvp_set_mute_cmd {
+ struct apr_hdr hdr;
+ struct vss_ivocproc_cmd_set_mute_t cvp_set_mute;
+} __packed;
+
+/* CB for up-link packets. */
+typedef void (*ul_cb_fn)(uint8_t *voc_pkt,
+ uint32_t pkt_len,
+ void *private_data);
+
+/* CB for down-link packets. */
+typedef void (*dl_cb_fn)(uint8_t *voc_pkt,
+ uint32_t *pkt_len,
+ void *private_data);
+
+
+struct mvs_driver_info {
+ uint32_t media_type;
+ uint32_t rate;
+ uint32_t network_type;
+ uint32_t dtx_mode;
+ ul_cb_fn ul_cb;
+ dl_cb_fn dl_cb;
+ void *private_data;
+};
+
+struct incall_rec_info {
+ uint32_t rec_enable;
+ uint32_t rec_mode;
+ uint32_t recording;
+};
+
+struct incall_music_info {
+ uint32_t play_enable;
+ uint32_t playing;
+ int count;
+ int force;
+};
+
+struct voice_data {
+ int voc_state;/*INIT, CHANGE, RELEASE, RUN */
+
+ wait_queue_head_t mvm_wait;
+ wait_queue_head_t cvs_wait;
+ wait_queue_head_t cvp_wait;
+
+ /* cache the values related to Rx and Tx */
+ struct device_data dev_rx;
+ struct device_data dev_tx;
+
+ u32 mvm_state;
+ u32 cvs_state;
+ u32 cvp_state;
+
+ /* Handle to MVM in the Q6 */
+ u16 mvm_handle;
+ /* Handle to CVS in the Q6 */
+ u16 cvs_handle;
+ /* Handle to CVP in the Q6 */
+ u16 cvp_handle;
+
+ struct mutex lock;
+
+ uint16_t sidetone_gain;
+ uint8_t tty_mode;
+ /* widevoice enable value */
+ uint8_t wv_enable;
+ /* slowtalk enable value */
+ uint32_t st_enable;
+ /* FENC enable value */
+ uint32_t fens_enable;
+
+ struct voice_dev_route_state voc_route_state;
+
+ u16 session_id;
+
+ struct incall_rec_info rec_info;
+
+ struct incall_music_info music_info;
+
+ struct voice_rec_route_state rec_route_state;
+};
+
+struct cal_mem {
+ struct ion_handle *handle;
+ uint32_t phy;
+ void *buf;
+};
+
+#define MAX_VOC_SESSIONS 3
+#define SESSION_ID_BASE 0xFFF0
+
+struct common_data {
+ /* these default values are for all devices */
+ uint32_t default_mute_val;
+ uint32_t default_vol_val;
+ uint32_t default_sample_val;
+
+ /* APR to MVM in the Q6 */
+ void *apr_q6_mvm;
+ /* APR to CVS in the Q6 */
+ void *apr_q6_cvs;
+ /* APR to CVP in the Q6 */
+ void *apr_q6_cvp;
+
+ struct ion_client *client;
+ struct cal_mem cvp_cal;
+ struct cal_mem cvs_cal;
+
+ struct mutex common_lock;
+
+ struct mvs_driver_info mvs_info;
+
+ struct voice_data voice[MAX_VOC_SESSIONS];
+};
+
+void voc_register_mvs_cb(ul_cb_fn ul_cb,
+ dl_cb_fn dl_cb,
+ void *private_data);
+
+void voc_config_vocoder(uint32_t media_type,
+ uint32_t rate,
+ uint32_t network_type,
+ uint32_t dtx_mode);
+
+enum {
+ DEV_RX = 0,
+ DEV_TX,
+};
+
+enum {
+ RX_PATH = 0,
+ TX_PATH,
+};
+
+/* called by alsa driver */
+int voc_set_pp_enable(uint16_t session_id, uint32_t module_id, uint32_t enable);
+int voc_get_pp_enable(uint16_t session_id, uint32_t module_id);
+int voc_set_widevoice_enable(uint16_t session_id, uint32_t wv_enable);
+uint32_t voc_get_widevoice_enable(uint16_t session_id);
+uint8_t voc_get_tty_mode(uint16_t session_id);
+int voc_set_tty_mode(uint16_t session_id, uint8_t tty_mode);
+int voc_start_voice_call(uint16_t session_id);
+int voc_end_voice_call(uint16_t session_id);
+int voc_set_rxtx_port(uint16_t session_id,
+ uint32_t dev_port_id,
+ uint32_t dev_type);
+int voc_set_rx_vol_index(uint16_t session_id, uint32_t dir, uint32_t voc_idx);
+int voc_set_tx_mute(uint16_t session_id, uint32_t dir, uint32_t mute);
+int voc_set_rx_device_mute(uint16_t session_id, uint32_t mute);
+int voc_get_rx_device_mute(uint16_t session_id);
+int voc_disable_cvp(uint16_t session_id);
+int voc_enable_cvp(uint16_t session_id);
+int voc_set_route_flag(uint16_t session_id, uint8_t path_dir, uint8_t set);
+uint8_t voc_get_route_flag(uint16_t session_id, uint8_t path_dir);
+
+#define VOICE_SESSION_NAME "Voice session"
+#define VOIP_SESSION_NAME "VoIP session"
+#define VOLTE_SESSION_NAME "VoLTE session"
+uint16_t voc_get_session_id(char *name);
+
+int voc_start_playback(uint32_t set);
+int voc_start_record(uint32_t port_id, uint32_t set);
+#endif