Merge "ARM: dts: msm: make cmd mode as default display for SDM845" into msm-4.9
diff --git a/Documentation/devicetree/bindings/arm/msm/qcom,osm.txt b/Documentation/devicetree/bindings/arm/msm/qcom,osm.txt
index 2347477..8a3e704 100644
--- a/Documentation/devicetree/bindings/arm/msm/qcom,osm.txt
+++ b/Documentation/devicetree/bindings/arm/msm/qcom,osm.txt
@@ -25,8 +25,9 @@
Value type: <stringlist>
Definition: Address names. Must be "osm_l3_base", "osm_pwrcl_base",
"osm_perfcl_base", "l3_pll", "pwrcl_pll", "perfcl_pll",
- "l3_sequencer", "pwrcl_sequencer", "perfcl_sequencer".
- Optionally, "l3_efuse", "pwrcl_efuse", "perfcl_efuse".
+ "l3_sequencer", "pwrcl_sequencer", or "perfcl_sequencer".
+ Optionally, "l3_efuse", "pwrcl_efuse", "perfcl_efuse",
+ "pwrcl_acd", "perfcl_acd", "l3_acd".
Must be specified in the same order as the corresponding
addresses are specified in the reg property.
@@ -328,6 +329,77 @@
Definition: Contains the addresses of the RAILx_CLKDOMy_PLL_MIN_FREQ
registers for the three clock domains.
+- qcom,acdtd-val
+ Usage: required if pwrcl_acd, perfcl_acd or l3_acd registers are
+ specified
+ Value type: <prop-encoded-array>
+ Definition: Array which defines the values to program to the ACD
+ Tunable-Length Delay register for the L3, power and
+ performance clusters.
+
+- qcom,acdcr-val
+ Usage: required if pwrcl_acd, perfcl_acd or l3_acd registers are
+ specified
+ Value type: <prop-encoded-array>
+ Definition: Array which defines the values for the ACD control register
+ for the L3, power and performance clusters.
+
+- qcom,acdsscr-val
+ Usage: required if pwrcl_acd, perfcl_acd or l3_acd registers are
+ specified
+ Value type: <prop-encoded-array>
+ Definition: Array which defines the values for the ACD Soft Start Control
+ register for the L3, power and performance clusters.
+
+- qcom,acdextint0-val
+ Usage: required if pwrcl_acd, perfcl_acd or l3_acd registers are
+ specified
+ Value type: <prop-encoded-array>
+ Definition: Array which defines the initial values for the ACD
+ external interface configuration register for the L3, power
+ and performance clusters.
+
+- qcom,acdextint1-val
+ Usage: required if pwrcl_acd, perfcl_acd or l3_acd registers are
+ specified
+ Value type: <prop-encoded-array>
+ Definition: Array which defines the final values for the ACD
+ external interface configuration register for the L3, power
+ and performance clusters.
+
+- qcom,acdautoxfer-val
+ Usage: required if pwrcl_acd, perfcl_acd or l3_acd registers are
+ specified
+ Value type: <prop-encoded-array>
+ Definition: Array which defines the values for the ACD auto transfer
+ control register for the L3, power and performance clusters.
+
+- qcom,acdavg-init
+ Usage: optional if pwrcl_acd, perfcl_acd or l3_acd registers are
+ specified
+ Value type: <prop-encoded-array>
+ Definition: Array which defines if the AVG feature for ACD should be
+ initialized for the L3, power and performance clusters.
+ Valid values are 0 or 1.
+
+- qcom,acdavgcfg0-val
+ Usage: required if qcom,acdavg-init is true for an ACD clock domain
+ Value type: <prop-encoded-array>
+ Definition: Array which defines the values for the ACD AVG CFG0
+ registers for the L3, power and performance clusters.
+
+- qcom,acdavgcfg1-val
+ Usage: required if qcom,acdavg-init is true for an ACD clock domain
+ Value type: <prop-encoded-array>
+ Definition: Array which defines the values for the ACD AVG CFG1
+ registers for the L3, power and performance clusters.
+
+- qcom,acdavgcfg2-val
+ Usage: required if qcom,acdavg-init is true for an ACD clock domain
+ Value type: <prop-encoded-array>
+ Definition: Array which defines the values for the ACD AVG CFG2
+ registers for the L3, power and performance clusters.
+
- clock-names
Usage: required
Value type: <string>
@@ -349,11 +421,27 @@
<0x178b0000 0x1000>,
<0x17d42400 0x0c00>,
<0x17d44400 0x0c00>,
- <0x17d46c00 0x0c00>;
+ <0x17d46c00 0x0c00>,
+ <0x17930000 0x10000>,
+ <0x17920000 0x10000>,
+ <0x17910000 0x10000>;
reg-names = "osm_l3_base", "osm_pwrcl_base", "osm_perfcl_base",
"l3_pll", "pwrcl_pll", "perfcl_pll",
"l3_sequencer", "pwrcl_sequencer",
- "perfcl_sequencer";
+ "perfcl_sequencer", "l3_acd", "pwrcl_acd",
+ "perfcl_acd";
+
+ /* ACD configurations for L3, Silver, and Gold clusters */
+ qcom,acdtd-val = <0x0000b411 0x0000b411 0x0000b411>;
+ qcom,acdcr-val = <0x002c5ffd 0x002c5ffd 0x002c5ffd>;
+ qcom,acdsscr-val = <0x00000901 0x00000901 0x00000901>;
+ qcom,acdextint0-val = <0x2cf9ae8 0x2cf9ae8 0x2cf9ae8>;
+ qcom,acdextint1-val = <0x2cf9afe 0x2cf9afe 0x2cf9afe>;
+ qcom,acdautoxfer-val = <0x00000015 0x00000015 0x00000015>;
+ qcom,acdavgcfg2-val = <0x0 0x56a38822 0x56a38822>;
+ qcom,acdavgcfg1-val = <0x0 0x27104e20 0x27104e20>;
+ qcom,acdavgcfg0-val = <0x0 0xa08007a1 0xa08007a1>;
+ qcom,acdavg-init = <0 1 1>;
vdd-l3-supply = <&apc0_l3_vreg>;
vdd-pwrcl-supply = <&apc0_pwrcl_vreg>;
diff --git a/Documentation/devicetree/bindings/dma/qcom_gpi.txt b/Documentation/devicetree/bindings/dma/qcom_gpi.txt
new file mode 100644
index 0000000..3b3b713
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/qcom_gpi.txt
@@ -0,0 +1,84 @@
+Qualcomm Technologies Inc GPI DMA controller
+
+QCOM GPI DMA controller provides DMA capabilities for
+peripheral buses such as I2C, UART, and SPI.
+
+==============
+Node Structure
+==============
+
+Main node properties:
+
+- #dma-cells
+ Usage: required
+ Value type: <u32>
+ Definition: Number of parameters client will provide. Must be set to 6.
+ 1st parameter: gpii index
+ 2nd parameter: channel index
+ 3rd parameter: serial engine index
+ 4th parameter: bus protocol, 1 for SPI, 2 for UART, 3 for I2C
+ 5th parameter: channel ring length in transfer ring elements
+ 6th parameter: event processing priority, set to 0 for lowest latency
+
+- compatible
+ Usage: required
+ Value type: <string>
+ Definition: "qcom,gpi-dma"
+
+- reg
+ Usage: required
+ Value type: Array of <u32>
+ Definition: register address space location and size
+
+- reg-name
+ Usage: required
+ Value type: <string>
+ Definition: register space name, must be "gpi-top"
+
+- interrupts
+ Usage: required
+ Value type: Array of <u32>
+ Definition: Array of tuples which describe interrupt line for each GPII
+ instance.
+
+- qcom,max-num-gpii
+ Usage: required
+ Value type: <u32>
+ Definition: Total number of GPII instances available for this controller.
+
+- qcom,gpii-mask
+ Usage: required
+ Value type: <u32>
+ Definition: Bitmap of supported GPII instances in hlos.
+
+- qcom,ev-factor
+ Usage: required
+ Value type: <u32>
+ Definition: Event ring transfer size compare to channel transfer ring. Event
+ ring length = ev-factor * transfer ring size
+
+- iommus
+ Usage: required
+ Value type: <phandle u32 u32>
+ Definition: phandle for apps smmu controller and SID, and mask
+ for the controller. For more detail please check binding
+ documentation arm,smmu.txt
+
+========
+Example:
+========
+gpi_dma0: qcom,gpi-dma@0x800000 {
+ #dma-cells = <6>;
+ compatible = "qcom,gpi-dma";
+ reg = <0x800000 0x60000>;
+ reg-names = "gpi-top";
+ interrupts = <0 244 0>, <0 245 0>, <0 246 0>, <0 247 0>,
+ <0 248 0>, <0 249 0>, <0 250 0>, <0 251 0>,
+ <0 252 0>, <0 253 0>, <0 254 0>, <0 255 0>,
+ <0 256 0>;
+ qcom,max-num-gpii = <13>;
+ qcom,gpii-mask = <0xfa>;
+ qcom,ev-factor = <2>;
+ iommus = <&apps_smmu 0x0016 0x0>;
+ status = "ok";
+};
diff --git a/Documentation/devicetree/bindings/i2c/qcom,i2c-qcom-geni.txt b/Documentation/devicetree/bindings/i2c/qcom,i2c-qcom-geni.txt
index a244d6c..51abe56 100644
--- a/Documentation/devicetree/bindings/i2c/qcom,i2c-qcom-geni.txt
+++ b/Documentation/devicetree/bindings/i2c/qcom,i2c-qcom-geni.txt
@@ -12,6 +12,7 @@
or when entering sleep state.
- #address-cells: Should be <1> Address cells for i2c device address
- #size-cells: Should be <0> as i2c addresses have no size component
+ - qcom,wrapper-core: Wrapper QUPv3 core containing this I2C controller.
Child nodes should conform to i2c bus binding.
@@ -30,4 +31,5 @@
pinctrl-1 = <&qup_1_i2c_5_sleep>;
#address-cells = <1>;
#size-cells = <0>;
+ qcom,wrapper-core = <&qupv3_0>;
};
diff --git a/Documentation/devicetree/bindings/platform/msm/qcom-geni-se.txt b/Documentation/devicetree/bindings/platform/msm/qcom-geni-se.txt
new file mode 100644
index 0000000..7da95f8
--- /dev/null
+++ b/Documentation/devicetree/bindings/platform/msm/qcom-geni-se.txt
@@ -0,0 +1,37 @@
+Qualcomm Technologies, Inc. GENI Serial Engine Driver
+
+GENI Serial Engine Driver is used to configure and read the configuration
+from the Serial Engines on Qualcomm Technologies, Inc. Universal Peripheral
+(QUPv3) core. It is also used to enable the stage1 IOMMU translation and
+manage resources associated with the QUPv3 core.
+
+Required properties:
+- compatible: Must be "qcom,qupv3-geni-se".
+- reg: Must contain QUPv3 register address and length.
+- qcom,bus-mas-id: Master Endpoint ID for bus driver.
+- qcom,bus-slv-id: Slave Endpoint ID for bus driver.
+
+Optional properties:
+- qcom,iommu-s1-bypass: Boolean flag to bypass IOMMU stage 1 translation.
+
+Optional subnodes:
+qcom,iommu_qupv3_geni_se_cb: Child node representing the QUPV3 context
+ bank.
+
+Subnode Required properties:
+- compatible : Must be "qcom,qupv3-geni-se-cb";
+- iommus: A list of phandle and IOMMU specifier pairs that
+ describe the IOMMU master interfaces of the device.
+
+Example:
+ qupv3_0: qcom,qupv3_0_geni_se@8c0000 {
+ compatible = "qcom,qupv3-geni-se";
+ reg = <0x8c0000 0x6000>;
+ qcom,bus-mas-id = <100>;
+ qcom,bus-slv-id = <300>;
+
+ iommu_qupv3_0_geni_se_cb: qcom,iommu_qupv3_0_geni_se_cb {
+ compatible = "qcom,qupv3-geni-se-cb";
+ iommus = <&apps_smmu 0x1 0x0>;
+ };
+ }
diff --git a/Documentation/devicetree/bindings/serial/qcom,msm-geni-uart.txt b/Documentation/devicetree/bindings/serial/qcom,msm-geni-uart.txt
index 0173a3d..b616bf3 100644
--- a/Documentation/devicetree/bindings/serial/qcom,msm-geni-uart.txt
+++ b/Documentation/devicetree/bindings/serial/qcom,msm-geni-uart.txt
@@ -15,10 +15,9 @@
- pinctrl-names/pinctrl-0/1: The GPIOs assigned to this core. The names
Should be "active" and "sleep" for the pin confuguration when core is active
or when entering sleep state.
+- qcom,wrapper-core: Wrapper QUPv3 core containing this UART controller.
Optional properties:
-- qcom,bus-mas: contains the bus master id needed to put in bus bandwidth votes
- for inter-connect buses.
- qcom,wakeup-byte: Byte to be injected in the tty layer during wakeup isr.
Example:
@@ -34,6 +33,6 @@
pinctrl-0 = <&qup_1_uart_3_active>;
pinctrl-1 = <&qup_1_uart_3_sleep>;
interrupts = <0 355 0>;
- qcom,bus-mas = <MASTER_BLSP_2>;
+ qcom,wrapper-core = <&qupv3_0>;
qcom,wakeup-byte = <0xFF>;
};
diff --git a/Documentation/devicetree/bindings/spi/qcom,spi-geni-qcom.txt b/Documentation/devicetree/bindings/spi/qcom,spi-geni-qcom.txt
index 868a5f0..cd2d2ea 100644
--- a/Documentation/devicetree/bindings/spi/qcom,spi-geni-qcom.txt
+++ b/Documentation/devicetree/bindings/spi/qcom,spi-geni-qcom.txt
@@ -23,6 +23,7 @@
- spi-max-frequency: Specifies maximum SPI clock frequency,
Units - Hz. Definition as per
Documentation/devicetree/bindings/spi/spi-bus.txt
+- qcom,wrapper-core: Wrapper QUPv3 core containing this SPI controller.
SPI slave nodes must be children of the SPI master node and can contain
properties described in Documentation/devicetree/bindings/spi/spi-bus.txt
@@ -44,6 +45,7 @@
pinctrl-1 = <&qup_1_spi_2_sleep>;
interrupts = <GIC_SPI 354 0>;
spi-max-frequency = <19200000>;
+ qcom,wrapper-core = <&qupv3_0>;
dev@0 {
compatible = "dummy,slave";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
index 58e536a..4c642e3 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
@@ -232,12 +232,10 @@
msm_cam_smmu_ife {
compatible = "qcom,msm-cam-smmu-cb";
- iommus = <&apps_smmu 0x808>,
- <&apps_smmu 0x810>,
- <&apps_smmu 0x818>,
- <&apps_smmu 0xc08>,
- <&apps_smmu 0xc10>,
- <&apps_smmu 0xc18>;
+ iommus = <&apps_smmu 0x808 0x0>,
+ <&apps_smmu 0x810 0x8>,
+ <&apps_smmu 0xc08 0x0>,
+ <&apps_smmu 0xc10 0x8>;
label = "ife";
ife_iova_mem_map: iova-mem-map {
/* IO region is approximately 3.4 GB */
@@ -259,13 +257,11 @@
msm_cam_smmu_icp {
compatible = "qcom,msm-cam-smmu-cb";
- iommus = <&apps_smmu 0x1078>,
- <&apps_smmu 0x1020>,
- <&apps_smmu 0x1028>,
- <&apps_smmu 0x1040>,
- <&apps_smmu 0x1048>,
- <&apps_smmu 0x1030>,
- <&apps_smmu 0x1050>;
+ iommus = <&apps_smmu 0x1078 0x2>,
+ <&apps_smmu 0x1020 0x8>,
+ <&apps_smmu 0x1040 0x8>,
+ <&apps_smmu 0x1030 0x0>,
+ <&apps_smmu 0x1050 0x0>;
label = "icp";
icp_iova_mem_map: iova-mem-map {
iova-mem-region-firmware {
@@ -299,7 +295,7 @@
msm_cam_smmu_cpas_cdm {
compatible = "qcom,msm-cam-smmu-cb";
- iommus = <&apps_smmu 0x1000>;
+ iommus = <&apps_smmu 0x1000 0x0>;
label = "cpas-cdm0";
cpas_cdm_iova_mem_map: iova-mem-map {
iova-mem-region-io {
@@ -315,7 +311,7 @@
msm_cam_smmu_secure {
compatible = "qcom,msm-cam-smmu-cb";
- iommus = <&apps_smmu 0x1001>;
+ iommus = <&apps_smmu 0x1001 0x0>;
label = "cam-secure";
cam_secure_iova_mem_map: iova-mem-map {
/* Secure IO region is approximately 3.4 GB */
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qupv3.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qupv3.dtsi
index e5d1a74..0fb455f 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qupv3.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-qupv3.dtsi
@@ -14,6 +14,18 @@
&soc {
/* QUPv3 South instances */
+ qupv3_0: qcom,qupv3_0_geni_se@8c0000 {
+ compatible = "qcom,qupv3-geni-se";
+ reg = <0x8c0000 0x6000>;
+ qcom,bus-mas-id = <MSM_BUS_MASTER_BLSP_1>;
+ qcom,bus-slv-id = <MSM_BUS_SLAVE_EBI_CH0>;
+ qcom,iommu-s1-bypass;
+
+ iommu_qupv3_0_geni_se_cb: qcom,iommu_qupv3_0_geni_se_cb {
+ compatible = "qcom,qupv3-geni-se-cb";
+ iommus = <&apps_smmu 0x003 0x0>;
+ };
+ };
/*
* HS UART instances. HS UART usecases can be supported on these
@@ -33,8 +45,8 @@
interrupts-extended = <&intc GIC_SPI 607 0>,
<&tlmm 48 0>;
status = "disabled";
- qcom,bus-mas = <MSM_BUS_MASTER_BLSP_1>;
qcom,wakeup-byte = <0xFD>;
+ qcom,wrapper-core = <&qupv3_0>;
};
qupv3_se7_4uart: qcom,qup_uart@0x89c000 {
@@ -51,8 +63,8 @@
interrupts-extended = <&intc GIC_SPI 608 0>,
<&tlmm 96 0>;
status = "disabled";
- qcom,bus-mas = <MSM_BUS_MASTER_BLSP_1>;
qcom,wakeup-byte = <0xFD>;
+ qcom,wrapper-core = <&qupv3_0>;
};
/* I2C */
@@ -69,6 +81,7 @@
pinctrl-names = "default", "sleep";
pinctrl-0 = <&qupv3_se0_i2c_active>;
pinctrl-1 = <&qupv3_se0_i2c_sleep>;
+ qcom,wrapper-core = <&qupv3_0>;
status = "disabled";
};
@@ -85,6 +98,7 @@
pinctrl-names = "default", "sleep";
pinctrl-0 = <&qupv3_se1_i2c_active>;
pinctrl-1 = <&qupv3_se1_i2c_sleep>;
+ qcom,wrapper-core = <&qupv3_0>;
status = "disabled";
};
@@ -101,6 +115,7 @@
pinctrl-names = "default", "sleep";
pinctrl-0 = <&qupv3_se2_i2c_active>;
pinctrl-1 = <&qupv3_se2_i2c_sleep>;
+ qcom,wrapper-core = <&qupv3_0>;
status = "disabled";
};
@@ -117,6 +132,7 @@
pinctrl-names = "default", "sleep";
pinctrl-0 = <&qupv3_se3_i2c_active>;
pinctrl-1 = <&qupv3_se3_i2c_sleep>;
+ qcom,wrapper-core = <&qupv3_0>;
status = "disabled";
};
@@ -133,6 +149,7 @@
pinctrl-names = "default", "sleep";
pinctrl-0 = <&qupv3_se4_i2c_active>;
pinctrl-1 = <&qupv3_se4_i2c_sleep>;
+ qcom,wrapper-core = <&qupv3_0>;
status = "disabled";
};
@@ -149,6 +166,7 @@
pinctrl-names = "default", "sleep";
pinctrl-0 = <&qupv3_se5_i2c_active>;
pinctrl-1 = <&qupv3_se5_i2c_sleep>;
+ qcom,wrapper-core = <&qupv3_0>;
status = "disabled";
};
@@ -165,6 +183,7 @@
pinctrl-names = "default", "sleep";
pinctrl-0 = <&qupv3_se6_i2c_active>;
pinctrl-1 = <&qupv3_se6_i2c_sleep>;
+ qcom,wrapper-core = <&qupv3_0>;
status = "disabled";
};
@@ -181,6 +200,7 @@
pinctrl-names = "default", "sleep";
pinctrl-0 = <&qupv3_se7_i2c_active>;
pinctrl-1 = <&qupv3_se7_i2c_sleep>;
+ qcom,wrapper-core = <&qupv3_0>;
status = "disabled";
};
@@ -200,6 +220,7 @@
pinctrl-1 = <&qupv3_se0_spi_sleep>;
interrupts = <GIC_SPI 601 0>;
spi-max-frequency = <50000000>;
+ qcom,wrapper-core = <&qupv3_0>;
status = "disabled";
};
@@ -218,6 +239,7 @@
pinctrl-1 = <&qupv3_se1_spi_sleep>;
interrupts = <GIC_SPI 602 0>;
spi-max-frequency = <50000000>;
+ qcom,wrapper-core = <&qupv3_0>;
status = "disabled";
};
@@ -236,6 +258,7 @@
pinctrl-1 = <&qupv3_se2_spi_sleep>;
interrupts = <GIC_SPI 603 0>;
spi-max-frequency = <50000000>;
+ qcom,wrapper-core = <&qupv3_0>;
status = "disabled";
};
@@ -254,6 +277,7 @@
pinctrl-1 = <&qupv3_se3_spi_sleep>;
interrupts = <GIC_SPI 604 0>;
spi-max-frequency = <50000000>;
+ qcom,wrapper-core = <&qupv3_0>;
status = "disabled";
};
@@ -272,6 +296,7 @@
pinctrl-1 = <&qupv3_se4_spi_sleep>;
interrupts = <GIC_SPI 605 0>;
spi-max-frequency = <50000000>;
+ qcom,wrapper-core = <&qupv3_0>;
status = "disabled";
};
@@ -290,6 +315,7 @@
pinctrl-1 = <&qupv3_se5_spi_sleep>;
interrupts = <GIC_SPI 606 0>;
spi-max-frequency = <50000000>;
+ qcom,wrapper-core = <&qupv3_0>;
status = "disabled";
};
@@ -308,6 +334,7 @@
pinctrl-1 = <&qupv3_se6_spi_sleep>;
interrupts = <GIC_SPI 607 0>;
spi-max-frequency = <50000000>;
+ qcom,wrapper-core = <&qupv3_0>;
status = "disabled";
};
@@ -326,10 +353,24 @@
pinctrl-1 = <&qupv3_se7_spi_sleep>;
interrupts = <GIC_SPI 608 0>;
spi-max-frequency = <50000000>;
+ qcom,wrapper-core = <&qupv3_0>;
status = "disabled";
};
/* QUPv3 North Instances */
+ qupv3_1: qcom,qupv3_1_geni_se@ac0000 {
+ compatible = "qcom,qupv3-geni-se";
+ reg = <0xac0000 0x6000>;
+ qcom,bus-mas-id = <MSM_BUS_MASTER_BLSP_2>;
+ qcom,bus-slv-id = <MSM_BUS_SLAVE_EBI_CH0>;
+ qcom,iommu-s1-bypass;
+
+ iommu_qupv3_1_geni_se_cb: qcom,iommu_qupv3_1_geni_se_cb {
+ compatible = "qcom,qupv3-geni-se-cb";
+ iommus = <&apps_smmu 0x6c3 0x0>;
+ };
+ };
+
/* 2-wire UART */
/* Debug UART Instance for CDP/MTP platform */
@@ -344,8 +385,8 @@
pinctrl-names = "default", "sleep";
pinctrl-0 = <&qupv3_se9_2uart_active>;
pinctrl-1 = <&qupv3_se9_2uart_sleep>;
- qcom,bus-mas = <MSM_BUS_MASTER_BLSP_2>;
interrupts = <GIC_SPI 354 0>;
+ qcom,wrapper-core = <&qupv3_1>;
status = "disabled";
};
@@ -362,7 +403,7 @@
pinctrl-0 = <&qupv3_se10_2uart_active>;
pinctrl-1 = <&qupv3_se10_2uart_sleep>;
interrupts = <GIC_SPI 355 0>;
- qcom,bus-mas = <MSM_BUS_MASTER_BLSP_2>;
+ qcom,wrapper-core = <&qupv3_1>;
status = "disabled";
};
@@ -380,6 +421,7 @@
pinctrl-names = "default", "sleep";
pinctrl-0 = <&qupv3_se8_i2c_active>;
pinctrl-1 = <&qupv3_se8_i2c_sleep>;
+ qcom,wrapper-core = <&qupv3_1>;
status = "disabled";
};
@@ -396,6 +438,7 @@
pinctrl-names = "default", "sleep";
pinctrl-0 = <&qupv3_se9_i2c_active>;
pinctrl-1 = <&qupv3_se9_i2c_sleep>;
+ qcom,wrapper-core = <&qupv3_1>;
status = "disabled";
};
@@ -412,6 +455,7 @@
pinctrl-names = "default", "sleep";
pinctrl-0 = <&qupv3_se10_i2c_active>;
pinctrl-1 = <&qupv3_se10_i2c_sleep>;
+ qcom,wrapper-core = <&qupv3_1>;
status = "disabled";
};
@@ -428,6 +472,7 @@
pinctrl-names = "default", "sleep";
pinctrl-0 = <&qupv3_se11_i2c_active>;
pinctrl-1 = <&qupv3_se11_i2c_sleep>;
+ qcom,wrapper-core = <&qupv3_1>;
status = "disabled";
};
@@ -444,6 +489,7 @@
pinctrl-names = "default", "sleep";
pinctrl-0 = <&qupv3_se12_i2c_active>;
pinctrl-1 = <&qupv3_se12_i2c_sleep>;
+ qcom,wrapper-core = <&qupv3_1>;
status = "disabled";
};
@@ -460,6 +506,7 @@
pinctrl-names = "default", "sleep";
pinctrl-0 = <&qupv3_se13_i2c_active>;
pinctrl-1 = <&qupv3_se13_i2c_sleep>;
+ qcom,wrapper-core = <&qupv3_1>;
status = "disabled";
};
@@ -476,6 +523,7 @@
pinctrl-names = "default", "sleep";
pinctrl-0 = <&qupv3_se14_i2c_active>;
pinctrl-1 = <&qupv3_se14_i2c_sleep>;
+ qcom,wrapper-core = <&qupv3_1>;
status = "disabled";
};
@@ -492,6 +540,7 @@
pinctrl-names = "default", "sleep";
pinctrl-0 = <&qupv3_se15_i2c_active>;
pinctrl-1 = <&qupv3_se15_i2c_sleep>;
+ qcom,wrapper-core = <&qupv3_1>;
status = "disabled";
};
@@ -511,6 +560,7 @@
pinctrl-1 = <&qupv3_se8_spi_sleep>;
interrupts = <GIC_SPI 353 0>;
spi-max-frequency = <50000000>;
+ qcom,wrapper-core = <&qupv3_1>;
status = "disabled";
};
@@ -529,6 +579,7 @@
pinctrl-1 = <&qupv3_se9_spi_sleep>;
interrupts = <GIC_SPI 354 0>;
spi-max-frequency = <50000000>;
+ qcom,wrapper-core = <&qupv3_1>;
status = "disabled";
};
@@ -547,6 +598,7 @@
pinctrl-1 = <&qupv3_se10_spi_sleep>;
interrupts = <GIC_SPI 355 0>;
spi-max-frequency = <50000000>;
+ qcom,wrapper-core = <&qupv3_1>;
status = "disabled";
};
@@ -565,6 +617,7 @@
pinctrl-1 = <&qupv3_se11_spi_sleep>;
interrupts = <GIC_SPI 356 0>;
spi-max-frequency = <50000000>;
+ qcom,wrapper-core = <&qupv3_1>;
status = "disabled";
};
@@ -583,6 +636,7 @@
pinctrl-1 = <&qupv3_se12_spi_sleep>;
interrupts = <GIC_SPI 357 0>;
spi-max-frequency = <50000000>;
+ qcom,wrapper-core = <&qupv3_1>;
status = "disabled";
};
@@ -601,6 +655,7 @@
pinctrl-1 = <&qupv3_se13_spi_sleep>;
interrupts = <GIC_SPI 358 0>;
spi-max-frequency = <50000000>;
+ qcom,wrapper-core = <&qupv3_1>;
status = "disabled";
};
@@ -619,6 +674,7 @@
pinctrl-1 = <&qupv3_se14_spi_sleep>;
interrupts = <GIC_SPI 359 0>;
spi-max-frequency = <50000000>;
+ qcom,wrapper-core = <&qupv3_1>;
status = "disabled";
};
@@ -637,6 +693,7 @@
pinctrl-1 = <&qupv3_se15_spi_sleep>;
interrupts = <GIC_SPI 360 0>;
spi-max-frequency = <50000000>;
+ qcom,wrapper-core = <&qupv3_1>;
status = "disabled";
};
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index 557a9b4..dddd241 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -24,6 +24,7 @@
#include <dt-bindings/soc/qcom,tcs-mbox.h>
#include <dt-bindings/spmi/spmi.h>
#include <dt-bindings/thermal/thermal.h>
+#include <dt-bindings/msm/msm-bus-ids.h>
/ {
model = "Qualcomm Technologies, Inc. SDM845";
@@ -800,6 +801,37 @@
qcom,target-dev = <&cpubw>;
};
+ llccbw: qcom,llccbw {
+ compatible = "qcom,devbw";
+ governor = "powersave";
+ qcom,src-dst-ports =
+ <MSM_BUS_MASTER_AMPSS_M0 MSM_BUS_SLAVE_EBI_CH0>;
+ qcom,active-only;
+ qcom,bw-tbl =
+ < 762 /* 200 MHz */ >,
+ < 1144 /* 300 MHz */ >,
+ < 1720 /* 451 MHz */ >,
+ < 2086 /* 547 MHz */ >,
+ < 2597 /* 681 MHz */ >,
+ < 2929 /* 768 MHz */ >,
+ < 3879 /* 1017 MHz */ >,
+ < 4943 /* 1296 MHz */ >,
+ < 5931 /* 1555 MHz */ >,
+ < 6881 /* 1804 MHz */ >;
+ };
+
+ llcc_bwmon: qcom,llcc-bwmon {
+ compatible = "qcom,bimc-bwmon5";
+ reg = <0x0114A000 0x1000>;
+ reg-names = "base";
+ interrupts = <GIC_SPI 580 IRQ_TYPE_LEVEL_HIGH>;
+ qcom,hw-timer-hz = <19200000>;
+ qcom,target-dev = <&llccbw>;
+ qcom,count-unit = <0x400000>;
+ qcom,byte-mid-mask = <0xe000>;
+ qcom,byte-mid-match = <0xe000>;
+ };
+
memlat_cpu0: qcom,memlat-cpu0 {
compatible = "qcom,devbw";
governor = "powersave";
@@ -2466,6 +2498,7 @@
<&clock_gcc GCC_CE1_AHB_CLK>,
<&clock_gcc GCC_CE1_AXI_CLK>;
qcom,ce-opp-freq = <171430000>;
+ qcom,request-bw-before-clk;
};
qcom_crypto: qcrypto@1de0000 {
@@ -2493,6 +2526,7 @@
<&clock_gcc GCC_CE1_AHB_CLK>,
<&clock_gcc GCC_CE1_AXI_CLK>;
qcom,ce-opp-freq = <171430000>;
+ qcom,request-bw-before-clk;
qcom,use-sw-aes-cbc-ecb-ctr-algo;
qcom,use-sw-aes-xts-algo;
qcom,use-sw-aes-ccm-algo;
diff --git a/arch/arm64/configs/sdm845-perf_defconfig b/arch/arm64/configs/sdm845-perf_defconfig
index 6e5a353..5a73a8e 100644
--- a/arch/arm64/configs/sdm845-perf_defconfig
+++ b/arch/arm64/configs/sdm845-perf_defconfig
@@ -430,6 +430,7 @@
CONFIG_QPNP_REVID=y
CONFIG_USB_BAM=y
CONFIG_SEEMP_CORE=y
+CONFIG_QCOM_GENI_SE=y
CONFIG_MSM_GCC_SDM845=y
CONFIG_MSM_VIDEOCC_SDM845=y
CONFIG_MSM_CAMCC_SDM845=y
@@ -481,6 +482,7 @@
CONFIG_ICNSS=y
CONFIG_QCOM_COMMAND_DB=y
CONFIG_MSM_ADSP_LOADER=y
+CONFIG_MSM_PERFORMANCE=y
CONFIG_MSM_CDSP_LOADER=y
CONFIG_MSM_AVTIMER=y
CONFIG_MSM_EVENT_TIMER=y
diff --git a/arch/arm64/configs/sdm845_defconfig b/arch/arm64/configs/sdm845_defconfig
index 9fc6d4e..07436c5 100644
--- a/arch/arm64/configs/sdm845_defconfig
+++ b/arch/arm64/configs/sdm845_defconfig
@@ -254,6 +254,7 @@
CONFIG_SCSI_UFSHCD_PLATFORM=y
CONFIG_SCSI_UFS_QCOM=y
CONFIG_SCSI_UFS_QCOM_ICE=y
+CONFIG_SCSI_UFSHCD_CMD_LOGGING=y
CONFIG_MD=y
CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=y
@@ -446,6 +447,7 @@
CONFIG_QPNP_REVID=y
CONFIG_USB_BAM=y
CONFIG_SEEMP_CORE=y
+CONFIG_QCOM_GENI_SE=y
CONFIG_MSM_GCC_SDM845=y
CONFIG_MSM_VIDEOCC_SDM845=y
CONFIG_MSM_CAMCC_SDM845=y
@@ -462,6 +464,7 @@
CONFIG_ARM_SMMU=y
CONFIG_QCOM_LAZY_MAPPING=y
CONFIG_IOMMU_DEBUG=y
+CONFIG_IOMMU_DEBUG_TRACKING=y
CONFIG_IOMMU_TESTS=y
CONFIG_QCOM_CPUSS_DUMP=y
CONFIG_QCOM_RUN_QUEUE_STATS=y
@@ -501,6 +504,7 @@
CONFIG_ICNSS_DEBUG=y
CONFIG_QCOM_COMMAND_DB=y
CONFIG_MSM_ADSP_LOADER=y
+CONFIG_MSM_PERFORMANCE=y
CONFIG_MSM_CDSP_LOADER=y
CONFIG_MSM_AVTIMER=y
CONFIG_MSM_EVENT_TIMER=y
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index 0363fe8..dc06a33 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -30,14 +30,20 @@
#include <asm/pgtable.h>
#include <asm/sysreg.h>
#include <asm/tlbflush.h>
+#include <linux/msm_rtb.h>
static inline void contextidr_thread_switch(struct task_struct *next)
{
+ pid_t pid = task_pid_nr(next);
+
if (!IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR))
return;
- write_sysreg(task_pid_nr(next), contextidr_el1);
+ write_sysreg(pid, contextidr_el1);
isb();
+
+ uncached_logk(LOGK_CTXID, (void *)(u64)pid);
+
}
/*
diff --git a/drivers/clk/qcom/clk-cpu-osm.c b/drivers/clk/qcom/clk-cpu-osm.c
index 5fb870b..4efecef 100644
--- a/drivers/clk/qcom/clk-cpu-osm.c
+++ b/drivers/clk/qcom/clk-cpu-osm.c
@@ -154,6 +154,42 @@
#define OSM_CYCLE_COUNTER_STATUS_REG(n) (OSM_CYCLE_COUNTER_STATUS_REG_0 + \
(4 * n))
+/* ACD registers */
+#define ACD_HW_VERSION 0x0
+#define ACDCR 0x4
+#define ACDTD 0x8
+#define ACDSSCR 0x28
+#define ACD_EXTINT_CFG 0x30
+#define ACD_DCVS_SW 0x34
+#define ACD_GFMUX_CFG 0x3c
+#define ACD_READOUT_CFG 0x48
+#define ACD_AVG_CFG_0 0x4c
+#define ACD_AVG_CFG_1 0x50
+#define ACD_AVG_CFG_2 0x54
+#define ACD_AUTOXFER_CFG 0x80
+#define ACD_AUTOXFER 0x84
+#define ACD_AUTOXFER_CTL 0x88
+#define ACD_AUTOXFER_STATUS 0x8c
+#define ACD_WRITE_CTL 0x90
+#define ACD_WRITE_STATUS 0x94
+#define ACD_READOUT 0x98
+
+#define ACD_MASTER_ONLY_REG_ADDR 0x80
+#define ACD_1P1_MAX_REG_OFFSET 0x100
+#define ACD_WRITE_CTL_UPDATE_EN BIT(0)
+#define ACD_WRITE_CTL_SELECT_SHIFT 1
+#define ACD_GFMUX_CFG_SELECT BIT(0)
+#define ACD_AUTOXFER_START_CLEAR 0
+#define ACD_AUTOXFER_START_SET 1
+#define AUTO_XFER_DONE_MASK BIT(0)
+#define ACD_DCVS_SW_DCVS_IN_PRGR_SET BIT(0)
+#define ACD_DCVS_SW_DCVS_IN_PRGR_CLEAR 0
+#define ACD_LOCAL_TRANSFER_TIMEOUT_NS 500
+
+#define ACD_REG_RELATIVE_ADDR(addr) (addr / 4)
+#define ACD_REG_RELATIVE_ADDR_BITMASK(addr) \
+ (1 << (ACD_REG_RELATIVE_ADDR(addr)))
+
static const struct regmap_config osm_qcom_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
@@ -166,6 +202,7 @@
PLL_BASE,
EFUSE_BASE,
SEQ_BASE,
+ ACD_BASE,
NUM_BASES,
};
@@ -187,6 +224,8 @@
long frequency;
};
+static struct dentry *osm_debugfs_base;
+
struct clk_osm {
struct clk_hw hw;
struct osm_entry osm_table[OSM_TABLE_SIZE];
@@ -236,12 +275,173 @@
u32 trace_periodic_timer;
bool trace_en;
bool wdog_trace_en;
+
+ bool acd_init;
+ u32 acd_td;
+ u32 acd_cr;
+ u32 acd_sscr;
+ u32 acd_extint0_cfg;
+ u32 acd_extint1_cfg;
+ u32 acd_autoxfer_ctl;
+ u32 acd_debugfs_addr;
+ bool acd_avg_init;
+ u32 acd_avg_cfg0;
+ u32 acd_avg_cfg1;
+ u32 acd_avg_cfg2;
};
static struct regulator *vdd_l3;
static struct regulator *vdd_pwrcl;
static struct regulator *vdd_perfcl;
+static inline int clk_osm_acd_mb(struct clk_osm *c)
+{
+ return readl_relaxed_no_log((char *)c->vbases[ACD_BASE] +
+ ACD_HW_VERSION);
+}
+
+static int clk_osm_acd_local_read_reg(struct clk_osm *c, u32 offset)
+{
+ u32 reg = 0;
+ int timeout;
+
+ if (offset >= ACD_MASTER_ONLY_REG_ADDR) {
+ pr_err("ACD register at offset=0x%x not locally readable\n",
+ offset);
+ return -EINVAL;
+ }
+
+ /* Set select field in read control register */
+ writel_relaxed(ACD_REG_RELATIVE_ADDR(offset),
+ (char *)c->vbases[ACD_BASE] + ACD_READOUT_CFG);
+
+ /* Clear write control register */
+ writel_relaxed(reg, (char *)c->vbases[ACD_BASE] + ACD_WRITE_CTL);
+
+ /* Set select and update_en fields in write control register */
+ reg = (ACD_REG_RELATIVE_ADDR(ACD_READOUT_CFG)
+ << ACD_WRITE_CTL_SELECT_SHIFT)
+ | ACD_WRITE_CTL_UPDATE_EN;
+ writel_relaxed(reg, (char *)c->vbases[ACD_BASE] + ACD_WRITE_CTL);
+
+ /* Ensure writes complete before polling */
+ clk_osm_acd_mb(c);
+
+ /* Poll write status register */
+ for (timeout = ACD_LOCAL_TRANSFER_TIMEOUT_NS; timeout > 0;
+ timeout -= 100) {
+ reg = readl_relaxed((char *)c->vbases[ACD_BASE]
+ + ACD_WRITE_STATUS);
+ if ((reg & (ACD_REG_RELATIVE_ADDR_BITMASK(ACD_READOUT_CFG))))
+ break;
+ ndelay(100);
+ }
+
+ if (!timeout) {
+ pr_err("local read timed out, offset=0x%x status=0x%x\n",
+ offset, reg);
+ return -ETIMEDOUT;
+ }
+
+ reg = readl_relaxed((char *)c->vbases[ACD_BASE] + ACD_READOUT);
+ return reg;
+}
+
+static int clk_osm_acd_local_write_reg(struct clk_osm *c, u32 val, u32 offset)
+{
+ u32 reg = 0;
+ int timeout;
+
+ if (offset >= ACD_MASTER_ONLY_REG_ADDR) {
+ pr_err("ACD register at offset=0x%x not transferrable\n",
+ offset);
+ return -EINVAL;
+ }
+
+ /* Clear write control register */
+ writel_relaxed(reg, (char *)c->vbases[ACD_BASE] + ACD_WRITE_CTL);
+
+ /* Set select and update_en fields in write control register */
+ reg = (ACD_REG_RELATIVE_ADDR(offset) << ACD_WRITE_CTL_SELECT_SHIFT)
+ | ACD_WRITE_CTL_UPDATE_EN;
+ writel_relaxed(reg, (char *)c->vbases[ACD_BASE] + ACD_WRITE_CTL);
+
+ /* Ensure writes complete before polling */
+ clk_osm_acd_mb(c);
+
+ /* Poll write status register */
+ for (timeout = ACD_LOCAL_TRANSFER_TIMEOUT_NS; timeout > 0;
+ timeout -= 100) {
+ reg = readl_relaxed((char *)c->vbases[ACD_BASE]
+ + ACD_WRITE_STATUS);
+ if ((reg & (ACD_REG_RELATIVE_ADDR_BITMASK(offset))))
+ break;
+ ndelay(100);
+ }
+
+ if (!timeout) {
+ pr_err("local write timed out, offset=0x%x val=0x%x status=0x%x\n",
+ offset, val, reg);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int clk_osm_acd_master_write_through_reg(struct clk_osm *c,
+ u32 val, u32 offset)
+{
+ writel_relaxed(val, (char *)c->vbases[ACD_BASE] + offset);
+
+ /* Ensure writes complete before transfer to local copy */
+ clk_osm_acd_mb(c);
+
+ return clk_osm_acd_local_write_reg(c, val, offset);
+}
+
+static int clk_osm_acd_auto_local_write_reg(struct clk_osm *c, u32 mask)
+{
+ u32 numregs, bitmask = mask;
+ u32 reg = 0;
+ int timeout;
+
+ /* count number of bits set in register mask */
+ for (numregs = 0; bitmask; numregs++)
+ bitmask &= bitmask - 1;
+
+ /* Program auto-transfer mask */
+ writel_relaxed(mask, (char *)c->vbases[ACD_BASE] + ACD_AUTOXFER_CFG);
+
+ /* Clear start field in auto-transfer register */
+ writel_relaxed(ACD_AUTOXFER_START_CLEAR,
+ (char *)c->vbases[ACD_BASE] + ACD_AUTOXFER);
+
+ /* Set start field in auto-transfer register */
+ writel_relaxed(ACD_AUTOXFER_START_SET,
+ (char *)c->vbases[ACD_BASE] + ACD_AUTOXFER);
+
+ /* Ensure writes complete before polling */
+ clk_osm_acd_mb(c);
+
+ /* Poll auto-transfer status register */
+ for (timeout = ACD_LOCAL_TRANSFER_TIMEOUT_NS * numregs;
+ timeout > 0; timeout -= 100) {
+ reg = readl_relaxed((char *)c->vbases[ACD_BASE]
+ + ACD_AUTOXFER_STATUS);
+ if (reg & AUTO_XFER_DONE_MASK)
+ break;
+ ndelay(100);
+ }
+
+ if (!timeout) {
+ pr_err("local register auto-transfer timed out, mask=0x%x registers=%d status=0x%x\n",
+ mask, numregs, reg);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
static inline struct clk_osm *to_clk_osm(struct clk_hw *_hw)
{
return container_of(_hw, struct clk_osm, hw);
@@ -265,9 +465,10 @@
writel_relaxed(val, (char *)c->vbases[SEQ_BASE] + offset);
}
-static inline void clk_osm_write_reg(struct clk_osm *c, u32 val, u32 offset)
+static inline void clk_osm_write_reg(struct clk_osm *c, u32 val, u32 offset,
+ int base)
{
- writel_relaxed(val, (char *)c->vbases[OSM_BASE] + offset);
+ writel_relaxed(val, (char *)c->vbases[base] + offset);
}
static inline int clk_osm_read_reg(struct clk_osm *c, u32 offset)
@@ -356,7 +557,7 @@
{
struct clk_osm *cpuclk = to_clk_osm(hw);
- clk_osm_write_reg(cpuclk, 1, ENABLE_REG);
+ clk_osm_write_reg(cpuclk, 1, ENABLE_REG, OSM_BASE);
/* Make sure the write goes through before proceeding */
clk_osm_mb(cpuclk, OSM_BASE);
@@ -410,7 +611,8 @@
* TODO: Program INACTIVE_OS_REQUEST if needed.
*/
clk_osm_write_reg(parent, index,
- DCVS_PERF_STATE_DESIRED_REG(cpuclk->core_num));
+ DCVS_PERF_STATE_DESIRED_REG(cpuclk->core_num),
+ OSM_BASE);
/* Make sure the write goes through before proceeding */
clk_osm_mb(parent, OSM_BASE);
@@ -444,7 +646,8 @@
}
pr_debug("rate: %lu --> index %d\n", rate, index);
- clk_osm_write_reg(cpuclk, index, DCVS_PERF_STATE_DESIRED_REG_0);
+ clk_osm_write_reg(cpuclk, index, DCVS_PERF_STATE_DESIRED_REG_0,
+ OSM_BASE);
/* Make sure the write goes through before proceeding */
clk_osm_mb(cpuclk, OSM_BASE);
@@ -916,50 +1119,51 @@
if (c->red_fsm_en) {
val = clk_osm_read_reg(c, VMIN_REDUCTION_ENABLE_REG) | BIT(0);
val |= BVAL(6, 1, c->min_cpr_vc);
- clk_osm_write_reg(c, val, VMIN_REDUCTION_ENABLE_REG);
+ clk_osm_write_reg(c, val, VMIN_REDUCTION_ENABLE_REG,
+ OSM_BASE);
clk_osm_write_reg(c, clk_osm_count_ns(c, 10000),
- VMIN_REDUCTION_TIMER_REG);
+ VMIN_REDUCTION_TIMER_REG, OSM_BASE);
}
/* Boost FSM */
if (c->boost_fsm_en) {
val = clk_osm_read_reg(c, PDN_FSM_CTRL_REG);
val |= DELTA_DEX_VAL | CC_BOOST_FSM_EN | IGNORE_PLL_LOCK;
- clk_osm_write_reg(c, val, PDN_FSM_CTRL_REG);
+ clk_osm_write_reg(c, val, PDN_FSM_CTRL_REG, OSM_BASE);
val = clk_osm_read_reg(c, CC_BOOST_FSM_TIMERS_REG0);
val |= BVAL(15, 0, clk_osm_count_ns(c, PLL_WAIT_LOCK_TIME_NS));
val |= BVAL(31, 16, clk_osm_count_ns(c, SAFE_FREQ_WAIT_NS));
- clk_osm_write_reg(c, val, CC_BOOST_FSM_TIMERS_REG0);
+ clk_osm_write_reg(c, val, CC_BOOST_FSM_TIMERS_REG0, OSM_BASE);
val = clk_osm_read_reg(c, CC_BOOST_FSM_TIMERS_REG1);
val |= BVAL(15, 0, clk_osm_count_ns(c, PLL_WAIT_LOCK_TIME_NS));
val |= BVAL(31, 16, clk_osm_count_ns(c, PLL_WAIT_LOCK_TIME_NS));
- clk_osm_write_reg(c, val, CC_BOOST_FSM_TIMERS_REG1);
+ clk_osm_write_reg(c, val, CC_BOOST_FSM_TIMERS_REG1, OSM_BASE);
val = clk_osm_read_reg(c, CC_BOOST_FSM_TIMERS_REG2);
val |= BVAL(15, 0, clk_osm_count_ns(c, DEXT_DECREMENT_WAIT_NS));
- clk_osm_write_reg(c, val, CC_BOOST_FSM_TIMERS_REG2);
+ clk_osm_write_reg(c, val, CC_BOOST_FSM_TIMERS_REG2, OSM_BASE);
}
/* Safe Freq FSM */
if (c->safe_fsm_en) {
val = clk_osm_read_reg(c, PDN_FSM_CTRL_REG);
clk_osm_write_reg(c, val | DCVS_BOOST_FSM_EN_MASK,
- PDN_FSM_CTRL_REG);
+ PDN_FSM_CTRL_REG, OSM_BASE);
val = clk_osm_read_reg(c, DCVS_BOOST_FSM_TIMERS_REG0);
val |= BVAL(31, 16, clk_osm_count_ns(c, 1000));
- clk_osm_write_reg(c, val, DCVS_BOOST_FSM_TIMERS_REG0);
+ clk_osm_write_reg(c, val, DCVS_BOOST_FSM_TIMERS_REG0, OSM_BASE);
val = clk_osm_read_reg(c, DCVS_BOOST_FSM_TIMERS_REG1);
val |= BVAL(15, 0, clk_osm_count_ns(c, SAFE_FREQ_WAIT_NS));
- clk_osm_write_reg(c, val, DCVS_BOOST_FSM_TIMERS_REG1);
+ clk_osm_write_reg(c, val, DCVS_BOOST_FSM_TIMERS_REG1, OSM_BASE);
val = clk_osm_read_reg(c, DCVS_BOOST_FSM_TIMERS_REG2);
val |= BVAL(15, 0, clk_osm_count_ns(c, DEXT_DECREMENT_WAIT_NS));
- clk_osm_write_reg(c, val, DCVS_BOOST_FSM_TIMERS_REG2);
+ clk_osm_write_reg(c, val, DCVS_BOOST_FSM_TIMERS_REG2, OSM_BASE);
}
@@ -967,46 +1171,46 @@
if (c->ps_fsm_en) {
val = clk_osm_read_reg(c, PDN_FSM_CTRL_REG);
clk_osm_write_reg(c, val | PS_BOOST_FSM_EN_MASK,
- PDN_FSM_CTRL_REG);
+ PDN_FSM_CTRL_REG, OSM_BASE);
val = clk_osm_read_reg(c, PS_BOOST_FSM_TIMERS_REG0);
val |= BVAL(15, 0, clk_osm_count_ns(c, SAFE_FREQ_WAIT_NS));
val |= BVAL(31, 16, clk_osm_count_ns(c, 1000));
- clk_osm_write_reg(c, val, PS_BOOST_FSM_TIMERS_REG0);
+ clk_osm_write_reg(c, val, PS_BOOST_FSM_TIMERS_REG0, OSM_BASE);
val = clk_osm_read_reg(c, PS_BOOST_FSM_TIMERS_REG1);
val |= BVAL(15, 0, clk_osm_count_ns(c, SAFE_FREQ_WAIT_NS));
val |= BVAL(31, 16, clk_osm_count_ns(c, 1000));
- clk_osm_write_reg(c, val, PS_BOOST_FSM_TIMERS_REG1);
+ clk_osm_write_reg(c, val, PS_BOOST_FSM_TIMERS_REG1, OSM_BASE);
val = clk_osm_read_reg(c, PS_BOOST_FSM_TIMERS_REG2);
val |= BVAL(15, 0, clk_osm_count_ns(c, DEXT_DECREMENT_WAIT_NS));
- clk_osm_write_reg(c, val, PS_BOOST_FSM_TIMERS_REG2);
+ clk_osm_write_reg(c, val, PS_BOOST_FSM_TIMERS_REG2, OSM_BASE);
}
/* PLL signal timing control */
if (c->boost_fsm_en || c->safe_fsm_en || c->ps_fsm_en)
- clk_osm_write_reg(c, 0x2, BOOST_PROG_SYNC_DELAY_REG);
+ clk_osm_write_reg(c, 0x2, BOOST_PROG_SYNC_DELAY_REG, OSM_BASE);
/* DCVS droop FSM - only if RCGwRC is not used for di/dt control */
if (c->droop_fsm_en) {
val = clk_osm_read_reg(c, PDN_FSM_CTRL_REG);
clk_osm_write_reg(c, val | DCVS_DROOP_FSM_EN_MASK,
- PDN_FSM_CTRL_REG);
+ PDN_FSM_CTRL_REG, OSM_BASE);
}
if (c->ps_fsm_en || c->droop_fsm_en) {
- clk_osm_write_reg(c, 0x1, DROOP_PROG_SYNC_DELAY_REG);
+ clk_osm_write_reg(c, 0x1, DROOP_PROG_SYNC_DELAY_REG, OSM_BASE);
clk_osm_write_reg(c, clk_osm_count_ns(c, 100),
- DROOP_RELEASE_TIMER_CTRL);
+ DROOP_RELEASE_TIMER_CTRL, OSM_BASE);
clk_osm_write_reg(c, clk_osm_count_ns(c, 150),
- DCVS_DROOP_TIMER_CTRL);
+ DCVS_DROOP_TIMER_CTRL, OSM_BASE);
/*
* TODO: Check if DCVS_DROOP_CODE used is correct. Also check
* if RESYNC_CTRL should be set for L3.
*/
val = BIT(31) | BVAL(22, 16, 0x2) | BVAL(6, 0, 0x8);
- clk_osm_write_reg(c, val, DROOP_CTRL_REG);
+ clk_osm_write_reg(c, val, DROOP_CTRL_REG, OSM_BASE);
}
}
@@ -1034,17 +1238,20 @@
} else {
val = clk_osm_count_ns(&l3_clk, array[l3_clk.cluster_num]);
clk_osm_write_reg(&l3_clk, val,
- LLM_VOLTAGE_VOTE_INC_HYSTERESIS);
+ LLM_VOLTAGE_VOTE_INC_HYSTERESIS,
+ OSM_BASE);
val = clk_osm_count_ns(&pwrcl_clk,
array[pwrcl_clk.cluster_num]);
clk_osm_write_reg(&pwrcl_clk, val,
- LLM_VOLTAGE_VOTE_INC_HYSTERESIS);
+ LLM_VOLTAGE_VOTE_INC_HYSTERESIS,
+ OSM_BASE);
val = clk_osm_count_ns(&perfcl_clk,
array[perfcl_clk.cluster_num]);
clk_osm_write_reg(&perfcl_clk, val,
- LLM_VOLTAGE_VOTE_INC_HYSTERESIS);
+ LLM_VOLTAGE_VOTE_INC_HYSTERESIS,
+ OSM_BASE);
}
/*
@@ -1060,17 +1267,20 @@
} else {
val = clk_osm_count_ns(&l3_clk, array[l3_clk.cluster_num]);
clk_osm_write_reg(&l3_clk, val,
- LLM_VOLTAGE_VOTE_DEC_HYSTERESIS);
+ LLM_VOLTAGE_VOTE_DEC_HYSTERESIS,
+ OSM_BASE);
val = clk_osm_count_ns(&pwrcl_clk,
array[pwrcl_clk.cluster_num]);
clk_osm_write_reg(&pwrcl_clk, val,
- LLM_VOLTAGE_VOTE_DEC_HYSTERESIS);
+ LLM_VOLTAGE_VOTE_DEC_HYSTERESIS,
+ OSM_BASE);
val = clk_osm_count_ns(&perfcl_clk,
- array[perfcl_clk.cluster_num]);
+ array[perfcl_clk.cluster_num]);
clk_osm_write_reg(&perfcl_clk, val,
- LLM_VOLTAGE_VOTE_DEC_HYSTERESIS);
+ LLM_VOLTAGE_VOTE_DEC_HYSTERESIS,
+ OSM_BASE);
}
/* Enable or disable honoring of LLM Voltage requests */
@@ -1084,11 +1294,11 @@
/* Enable or disable LLM VOLT DVCS */
regval = val | clk_osm_read_reg(&l3_clk, LLM_INTF_DCVS_DISABLE);
- clk_osm_write_reg(&l3_clk, regval, LLM_INTF_DCVS_DISABLE);
+ clk_osm_write_reg(&l3_clk, regval, LLM_INTF_DCVS_DISABLE, OSM_BASE);
regval = val | clk_osm_read_reg(&pwrcl_clk, LLM_INTF_DCVS_DISABLE);
- clk_osm_write_reg(&pwrcl_clk, regval, LLM_INTF_DCVS_DISABLE);
+ clk_osm_write_reg(&pwrcl_clk, regval, LLM_INTF_DCVS_DISABLE, OSM_BASE);
regval = val | clk_osm_read_reg(&perfcl_clk, LLM_INTF_DCVS_DISABLE);
- clk_osm_write_reg(&perfcl_clk, regval, LLM_INTF_DCVS_DISABLE);
+ clk_osm_write_reg(&perfcl_clk, regval, LLM_INTF_DCVS_DISABLE, OSM_BASE);
/* Wait for the writes to complete */
clk_osm_mb(&perfcl_clk, OSM_BASE);
@@ -1120,17 +1330,20 @@
rc);
} else {
val = clk_osm_count_ns(&l3_clk, array[l3_clk.cluster_num]);
- clk_osm_write_reg(&l3_clk, val, LLM_FREQ_VOTE_INC_HYSTERESIS);
+ clk_osm_write_reg(&l3_clk, val, LLM_FREQ_VOTE_INC_HYSTERESIS,
+ OSM_BASE);
val = clk_osm_count_ns(&pwrcl_clk,
array[pwrcl_clk.cluster_num]);
clk_osm_write_reg(&pwrcl_clk, val,
- LLM_FREQ_VOTE_INC_HYSTERESIS);
+ LLM_FREQ_VOTE_INC_HYSTERESIS,
+ OSM_BASE);
val = clk_osm_count_ns(&perfcl_clk,
array[perfcl_clk.cluster_num]);
clk_osm_write_reg(&perfcl_clk, val,
- LLM_FREQ_VOTE_INC_HYSTERESIS);
+ LLM_FREQ_VOTE_INC_HYSTERESIS,
+ OSM_BASE);
}
/*
@@ -1145,17 +1358,18 @@
rc);
} else {
val = clk_osm_count_ns(&l3_clk, array[l3_clk.cluster_num]);
- clk_osm_write_reg(&l3_clk, val, LLM_FREQ_VOTE_DEC_HYSTERESIS);
+ clk_osm_write_reg(&l3_clk, val, LLM_FREQ_VOTE_DEC_HYSTERESIS,
+ OSM_BASE);
val = clk_osm_count_ns(&pwrcl_clk,
array[pwrcl_clk.cluster_num]);
clk_osm_write_reg(&pwrcl_clk, val,
- LLM_FREQ_VOTE_DEC_HYSTERESIS);
+ LLM_FREQ_VOTE_DEC_HYSTERESIS, OSM_BASE);
val = clk_osm_count_ns(&perfcl_clk,
array[perfcl_clk.cluster_num]);
clk_osm_write_reg(&perfcl_clk, val,
- LLM_FREQ_VOTE_DEC_HYSTERESIS);
+ LLM_FREQ_VOTE_DEC_HYSTERESIS, OSM_BASE);
}
/* Enable or disable honoring of LLM frequency requests */
@@ -1169,11 +1383,11 @@
/* Enable or disable LLM FREQ DVCS */
regval = val | clk_osm_read_reg(&l3_clk, LLM_INTF_DCVS_DISABLE);
- clk_osm_write_reg(&l3_clk, regval, LLM_INTF_DCVS_DISABLE);
+ clk_osm_write_reg(&l3_clk, regval, LLM_INTF_DCVS_DISABLE, OSM_BASE);
regval = val | clk_osm_read_reg(&pwrcl_clk, LLM_INTF_DCVS_DISABLE);
- clk_osm_write_reg(&pwrcl_clk, regval, LLM_INTF_DCVS_DISABLE);
+ clk_osm_write_reg(&pwrcl_clk, regval, LLM_INTF_DCVS_DISABLE, OSM_BASE);
regval = val | clk_osm_read_reg(&perfcl_clk, LLM_INTF_DCVS_DISABLE);
- clk_osm_write_reg(&perfcl_clk, regval, LLM_INTF_DCVS_DISABLE);
+ clk_osm_write_reg(&perfcl_clk, regval, LLM_INTF_DCVS_DISABLE, OSM_BASE);
/* Wait for the write to complete */
clk_osm_mb(&perfcl_clk, OSM_BASE);
@@ -1201,15 +1415,18 @@
} else {
val = clk_osm_count_ns(&l3_clk,
array[l3_clk.cluster_num]);
- clk_osm_write_reg(&l3_clk, val, SPM_CC_INC_HYSTERESIS);
+ clk_osm_write_reg(&l3_clk, val, SPM_CC_INC_HYSTERESIS,
+ OSM_BASE);
val = clk_osm_count_ns(&pwrcl_clk,
array[pwrcl_clk.cluster_num]);
- clk_osm_write_reg(&pwrcl_clk, val, SPM_CC_INC_HYSTERESIS);
+ clk_osm_write_reg(&pwrcl_clk, val, SPM_CC_INC_HYSTERESIS,
+ OSM_BASE);
val = clk_osm_count_ns(&perfcl_clk,
array[perfcl_clk.cluster_num]);
- clk_osm_write_reg(&perfcl_clk, val, SPM_CC_INC_HYSTERESIS);
+ clk_osm_write_reg(&perfcl_clk, val, SPM_CC_INC_HYSTERESIS,
+ OSM_BASE);
}
rc = of_property_read_u32_array(of, "qcom,down-timer",
@@ -1219,15 +1436,18 @@
} else {
val = clk_osm_count_ns(&l3_clk,
array[l3_clk.cluster_num]);
- clk_osm_write_reg(&l3_clk, val, SPM_CC_DEC_HYSTERESIS);
+ clk_osm_write_reg(&l3_clk, val, SPM_CC_DEC_HYSTERESIS,
+ OSM_BASE);
val = clk_osm_count_ns(&pwrcl_clk,
array[pwrcl_clk.cluster_num]);
- clk_osm_write_reg(&pwrcl_clk, val, SPM_CC_DEC_HYSTERESIS);
+ clk_osm_write_reg(&pwrcl_clk, val, SPM_CC_DEC_HYSTERESIS,
+ OSM_BASE);
clk_osm_count_ns(&perfcl_clk,
array[perfcl_clk.cluster_num]);
- clk_osm_write_reg(&perfcl_clk, val, SPM_CC_DEC_HYSTERESIS);
+ clk_osm_write_reg(&perfcl_clk, val, SPM_CC_DEC_HYSTERESIS,
+ OSM_BASE);
}
/* OSM index override for cluster PC */
@@ -1236,15 +1456,18 @@
if (rc) {
dev_dbg(&pdev->dev, "No PC override index value, rc=%d\n",
rc);
- clk_osm_write_reg(&pwrcl_clk, 0, CC_ZERO_BEHAV_CTRL);
- clk_osm_write_reg(&perfcl_clk, 0, CC_ZERO_BEHAV_CTRL);
+ clk_osm_write_reg(&pwrcl_clk, 0, CC_ZERO_BEHAV_CTRL, OSM_BASE);
+ clk_osm_write_reg(&perfcl_clk, 0, CC_ZERO_BEHAV_CTRL,
+ OSM_BASE);
} else {
val = BVAL(6, 1, array[pwrcl_clk.cluster_num])
| ENABLE_OVERRIDE;
- clk_osm_write_reg(&pwrcl_clk, val, CC_ZERO_BEHAV_CTRL);
+ clk_osm_write_reg(&pwrcl_clk, val, CC_ZERO_BEHAV_CTRL,
+ OSM_BASE);
val = BVAL(6, 1, array[perfcl_clk.cluster_num])
| ENABLE_OVERRIDE;
- clk_osm_write_reg(&perfcl_clk, val, CC_ZERO_BEHAV_CTRL);
+ clk_osm_write_reg(&perfcl_clk, val, CC_ZERO_BEHAV_CTRL,
+ OSM_BASE);
}
/* Wait for the writes to complete */
@@ -1256,15 +1479,18 @@
val = clk_osm_read_reg(&l3_clk, SPM_CORE_INACTIVE_MAPPING);
val &= ~BIT(2);
- clk_osm_write_reg(&l3_clk, val, SPM_CORE_INACTIVE_MAPPING);
+ clk_osm_write_reg(&l3_clk, val, SPM_CORE_INACTIVE_MAPPING,
+ OSM_BASE);
val = clk_osm_read_reg(&pwrcl_clk, SPM_CORE_INACTIVE_MAPPING);
val &= ~BIT(2);
- clk_osm_write_reg(&pwrcl_clk, val, SPM_CORE_INACTIVE_MAPPING);
+ clk_osm_write_reg(&pwrcl_clk, val, SPM_CORE_INACTIVE_MAPPING,
+ OSM_BASE);
val = clk_osm_read_reg(&perfcl_clk, SPM_CORE_INACTIVE_MAPPING);
val &= ~BIT(2);
- clk_osm_write_reg(&perfcl_clk, val, SPM_CORE_INACTIVE_MAPPING);
+ clk_osm_write_reg(&perfcl_clk, val, SPM_CORE_INACTIVE_MAPPING,
+ OSM_BASE);
}
rc = of_property_read_bool(pdev->dev.of_node, "qcom,set-c2-active");
@@ -1273,15 +1499,18 @@
val = clk_osm_read_reg(&l3_clk, SPM_CORE_INACTIVE_MAPPING);
val &= ~BIT(1);
- clk_osm_write_reg(&l3_clk, val, SPM_CORE_INACTIVE_MAPPING);
+ clk_osm_write_reg(&l3_clk, val, SPM_CORE_INACTIVE_MAPPING,
+ OSM_BASE);
val = clk_osm_read_reg(&pwrcl_clk, SPM_CORE_INACTIVE_MAPPING);
val &= ~BIT(1);
- clk_osm_write_reg(&pwrcl_clk, val, SPM_CORE_INACTIVE_MAPPING);
+ clk_osm_write_reg(&pwrcl_clk, val, SPM_CORE_INACTIVE_MAPPING,
+ OSM_BASE);
val = clk_osm_read_reg(&perfcl_clk, SPM_CORE_INACTIVE_MAPPING);
val &= ~BIT(1);
- clk_osm_write_reg(&perfcl_clk, val, SPM_CORE_INACTIVE_MAPPING);
+ clk_osm_write_reg(&perfcl_clk, val, SPM_CORE_INACTIVE_MAPPING,
+ OSM_BASE);
}
rc = of_property_read_bool(pdev->dev.of_node, "qcom,disable-cc-dvcs");
@@ -1291,9 +1520,9 @@
} else
val = 0;
- clk_osm_write_reg(&l3_clk, val, SPM_CC_DCVS_DISABLE);
- clk_osm_write_reg(&pwrcl_clk, val, SPM_CC_DCVS_DISABLE);
- clk_osm_write_reg(&perfcl_clk, val, SPM_CC_DCVS_DISABLE);
+ clk_osm_write_reg(&l3_clk, val, SPM_CC_DCVS_DISABLE, OSM_BASE);
+ clk_osm_write_reg(&pwrcl_clk, val, SPM_CC_DCVS_DISABLE, OSM_BASE);
+ clk_osm_write_reg(&perfcl_clk, val, SPM_CC_DCVS_DISABLE, OSM_BASE);
/* Wait for the writes to complete */
clk_osm_mb(&perfcl_clk, OSM_BASE);
@@ -1335,8 +1564,9 @@
u32 lval = 0xFF, val;
int i;
- clk_osm_write_reg(c, BVAL(23, 16, 0xF), SPM_CORE_COUNT_CTRL);
- clk_osm_write_reg(c, PLL_MIN_LVAL, PLL_MIN_FREQ_REG);
+ clk_osm_write_reg(c, BVAL(23, 16, 0xF), SPM_CORE_COUNT_CTRL,
+ OSM_BASE);
+ clk_osm_write_reg(c, PLL_MIN_LVAL, PLL_MIN_FREQ_REG, OSM_BASE);
/* Pattern to set/clear PLL lock in PDN_FSM_CTRL_REG */
val = clk_osm_read_reg(c, PDN_FSM_CTRL_REG);
@@ -1396,10 +1626,12 @@
}
table_entry_offset = i * OSM_REG_SIZE;
- clk_osm_write_reg(c, freq_val, FREQ_REG + table_entry_offset);
- clk_osm_write_reg(c, volt_val, VOLT_REG + table_entry_offset);
+ clk_osm_write_reg(c, freq_val, FREQ_REG + table_entry_offset,
+ OSM_BASE);
+ clk_osm_write_reg(c, volt_val, VOLT_REG + table_entry_offset,
+ OSM_BASE);
clk_osm_write_reg(c, override_val, OVERRIDE_REG +
- table_entry_offset);
+ table_entry_offset, OSM_BASE);
}
/* Make sure all writes go through */
@@ -1575,7 +1807,7 @@
do_div(ratio, c->xo_clk_rate);
val |= BVAL(5, 1, ratio - 1) | OSM_CYCLE_COUNTER_USE_XO_EDGE_EN;
- clk_osm_write_reg(c, val, OSM_CYCLE_COUNTER_CTRL_REG);
+ clk_osm_write_reg(c, val, OSM_CYCLE_COUNTER_CTRL_REG, OSM_BASE);
pr_debug("OSM to XO clock ratio: %d\n", ratio);
}
@@ -1750,6 +1982,149 @@
return rc;
}
+static int clk_osm_parse_acd_dt_configs(struct platform_device *pdev)
+{
+ struct device_node *of = pdev->dev.of_node;
+ u32 *array;
+ int rc = 0;
+
+ array = devm_kzalloc(&pdev->dev, MAX_CLUSTER_CNT * sizeof(u32),
+ GFP_KERNEL);
+ if (!array)
+ return -ENOMEM;
+
+ l3_clk.acd_init = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "l3_acd") != NULL ? true : false;
+ pwrcl_clk.acd_init = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "pwrcl_acd") != NULL ? true : false;
+ perfcl_clk.acd_init = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "perfcl_acd") != NULL ? true : false;
+
+ if (pwrcl_clk.acd_init || perfcl_clk.acd_init || l3_clk.acd_init) {
+ rc = of_property_read_u32_array(of, "qcom,acdtd-val",
+ array, MAX_CLUSTER_CNT);
+ if (rc) {
+ dev_err(&pdev->dev, "unable to find qcom,acdtd-val property, rc=%d\n",
+ rc);
+ return -EINVAL;
+ }
+
+ pwrcl_clk.acd_td = array[pwrcl_clk.cluster_num];
+ perfcl_clk.acd_td = array[perfcl_clk.cluster_num];
+ l3_clk.acd_td = array[l3_clk.cluster_num];
+
+ rc = of_property_read_u32_array(of, "qcom,acdcr-val",
+ array, MAX_CLUSTER_CNT);
+ if (rc) {
+ dev_err(&pdev->dev, "unable to find qcom,acdcr-val property, rc=%d\n",
+ rc);
+ return -EINVAL;
+ }
+
+ pwrcl_clk.acd_cr = array[pwrcl_clk.cluster_num];
+ perfcl_clk.acd_cr = array[perfcl_clk.cluster_num];
+ l3_clk.acd_cr = array[l3_clk.cluster_num];
+
+ rc = of_property_read_u32_array(of, "qcom,acdsscr-val",
+ array, MAX_CLUSTER_CNT);
+ if (rc) {
+ dev_err(&pdev->dev, "unable to find qcom,acdsscr-val property, rc=%d\n",
+ rc);
+ return -EINVAL;
+ }
+
+ pwrcl_clk.acd_sscr = array[pwrcl_clk.cluster_num];
+ perfcl_clk.acd_sscr = array[perfcl_clk.cluster_num];
+ l3_clk.acd_sscr = array[l3_clk.cluster_num];
+
+ rc = of_property_read_u32_array(of, "qcom,acdextint0-val",
+ array, MAX_CLUSTER_CNT);
+ if (rc) {
+ dev_err(&pdev->dev, "unable to find qcom,acdextint0-val property, rc=%d\n",
+ rc);
+ return -EINVAL;
+ }
+
+ pwrcl_clk.acd_extint0_cfg = array[pwrcl_clk.cluster_num];
+ perfcl_clk.acd_extint0_cfg = array[perfcl_clk.cluster_num];
+ l3_clk.acd_extint0_cfg = array[l3_clk.cluster_num];
+
+ rc = of_property_read_u32_array(of, "qcom,acdextint1-val",
+ array, MAX_CLUSTER_CNT);
+ if (rc) {
+ dev_err(&pdev->dev, "unable to find qcom,acdextint1-val property, rc=%d\n",
+ rc);
+ return -EINVAL;
+ }
+
+ pwrcl_clk.acd_extint1_cfg = array[pwrcl_clk.cluster_num];
+ perfcl_clk.acd_extint1_cfg = array[perfcl_clk.cluster_num];
+ l3_clk.acd_extint1_cfg = array[l3_clk.cluster_num];
+
+ rc = of_property_read_u32_array(of, "qcom,acdautoxfer-val",
+ array, MAX_CLUSTER_CNT);
+ if (rc) {
+ dev_err(&pdev->dev, "unable to find qcom,acdautoxfer-val property, rc=%d\n",
+ rc);
+ return -EINVAL;
+ }
+
+ pwrcl_clk.acd_autoxfer_ctl = array[pwrcl_clk.cluster_num];
+ perfcl_clk.acd_autoxfer_ctl = array[perfcl_clk.cluster_num];
+ l3_clk.acd_autoxfer_ctl = array[l3_clk.cluster_num];
+
+ rc = of_property_read_u32_array(of, "qcom,acdavg-init",
+ array, MAX_CLUSTER_CNT);
+ if (rc) {
+ dev_err(&pdev->dev, "unable to find qcom,acdavg-init property, rc=%d\n",
+ rc);
+ return -EINVAL;
+ }
+ pwrcl_clk.acd_avg_init = array[pwrcl_clk.cluster_num];
+ perfcl_clk.acd_avg_init = array[perfcl_clk.cluster_num];
+ l3_clk.acd_avg_init = array[l3_clk.cluster_num];
+ }
+
+ if (pwrcl_clk.acd_avg_init || perfcl_clk.acd_avg_init ||
+ l3_clk.acd_avg_init) {
+ rc = of_property_read_u32_array(of, "qcom,acdavgcfg0-val",
+ array, MAX_CLUSTER_CNT);
+ if (rc) {
+ dev_err(&pdev->dev, "unable to find qcom,acdavgcfg0-val property, rc=%d\n",
+ rc);
+ return -EINVAL;
+ }
+ pwrcl_clk.acd_avg_cfg0 = array[pwrcl_clk.cluster_num];
+ perfcl_clk.acd_avg_cfg0 = array[perfcl_clk.cluster_num];
+ l3_clk.acd_avg_cfg0 = array[l3_clk.cluster_num];
+
+ rc = of_property_read_u32_array(of, "qcom,acdavgcfg1-val",
+ array, MAX_CLUSTER_CNT);
+ if (rc) {
+ dev_err(&pdev->dev, "unable to find qcom,acdavgcfg1-val property, rc=%d\n",
+ rc);
+ return -EINVAL;
+ }
+ pwrcl_clk.acd_avg_cfg1 = array[pwrcl_clk.cluster_num];
+ perfcl_clk.acd_avg_cfg1 = array[perfcl_clk.cluster_num];
+ l3_clk.acd_avg_cfg1 = array[l3_clk.cluster_num];
+
+ rc = of_property_read_u32_array(of, "qcom,acdavgcfg2-val",
+ array, MAX_CLUSTER_CNT);
+ if (rc) {
+ dev_err(&pdev->dev, "unable to find qcom,acdavgcfg2-val property, rc=%d\n",
+ rc);
+ return -EINVAL;
+ }
+ pwrcl_clk.acd_avg_cfg2 = array[pwrcl_clk.cluster_num];
+ perfcl_clk.acd_avg_cfg2 = array[perfcl_clk.cluster_num];
+ l3_clk.acd_avg_cfg2 = array[l3_clk.cluster_num];
+ }
+
+ devm_kfree(&pdev->dev, array);
+ return rc;
+}
+
static int clk_osm_parse_dt_configs(struct platform_device *pdev)
{
struct device_node *of = pdev->dev.of_node;
@@ -1938,7 +2313,7 @@
resource_size(res));
if (!l3_clk.vbases[SEQ_BASE]) {
- dev_err(&pdev->dev, "Unable to map in l3_sequencer base\n");
+ dev_err(&pdev->dev, "Unable to map l3_sequencer base\n");
return -ENOMEM;
}
@@ -1955,7 +2330,7 @@
resource_size(res));
if (!pwrcl_clk.vbases[SEQ_BASE]) {
- dev_err(&pdev->dev, "Unable to map in pwrcl_sequencer base\n");
+ dev_err(&pdev->dev, "Unable to map pwrcl_sequencer base\n");
return -ENOMEM;
}
@@ -1972,7 +2347,7 @@
resource_size(res));
if (!perfcl_clk.vbases[SEQ_BASE]) {
- dev_err(&pdev->dev, "Unable to map in perfcl_sequencer base\n");
+ dev_err(&pdev->dev, "Unable to map perfcl_sequencer base\n");
return -ENOMEM;
}
@@ -2038,6 +2413,57 @@
return rc;
}
+static int clk_osm_acd_resources_init(struct platform_device *pdev)
+{
+ struct resource *res;
+ unsigned long pbase;
+ void *vbase;
+ int rc = 0;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "pwrcl_acd");
+ if (res) {
+ pbase = (unsigned long)res->start;
+ vbase = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!vbase) {
+ dev_err(&pdev->dev, "Unable to map pwrcl_acd base\n");
+ return -ENOMEM;
+ }
+ pwrcl_clk.pbases[ACD_BASE] = pbase;
+ pwrcl_clk.vbases[ACD_BASE] = vbase;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "perfcl_acd");
+ if (res) {
+ pbase = (unsigned long)res->start;
+ vbase = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!vbase) {
+ dev_err(&pdev->dev, "Unable to map perfcl_acd base\n");
+ return -ENOMEM;
+ }
+ perfcl_clk.pbases[ACD_BASE] = pbase;
+ perfcl_clk.vbases[ACD_BASE] = vbase;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "l3_acd");
+ if (res) {
+ pbase = (unsigned long)res->start;
+ vbase = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!vbase) {
+ dev_err(&pdev->dev, "Unable to map l3_acd base\n");
+ return -ENOMEM;
+ }
+ l3_clk.pbases[ACD_BASE] = pbase;
+ l3_clk.vbases[ACD_BASE] = vbase;
+ }
+ return rc;
+}
+
static int clk_osm_resources_init(struct platform_device *pdev)
{
struct device_node *node;
@@ -2059,7 +2485,7 @@
resource_size(res));
if (!l3_clk.vbases[OSM_BASE]) {
- dev_err(&pdev->dev, "Unable to map in osm_l3_base base\n");
+ dev_err(&pdev->dev, "Unable to map osm_l3_base base\n");
return -ENOMEM;
}
@@ -2075,7 +2501,7 @@
pwrcl_clk.vbases[OSM_BASE] = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (!pwrcl_clk.vbases[OSM_BASE]) {
- dev_err(&pdev->dev, "Unable to map in osm_pwrcl_base base\n");
+ dev_err(&pdev->dev, "Unable to map osm_pwrcl_base base\n");
return -ENOMEM;
}
@@ -2092,7 +2518,7 @@
resource_size(res));
if (!perfcl_clk.vbases[OSM_BASE]) {
- dev_err(&pdev->dev, "Unable to map in osm_perfcl_base base\n");
+ dev_err(&pdev->dev, "Unable to map osm_perfcl_base base\n");
return -ENOMEM;
}
@@ -2169,7 +2595,7 @@
vbase = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (!vbase) {
- dev_err(&pdev->dev, "Unable to map in pwrcl_efuse base\n");
+ dev_err(&pdev->dev, "Unable to map pwrcl_efuse base\n");
return -ENOMEM;
}
pwrcl_clk.pbases[EFUSE_BASE] = pbase;
@@ -2183,7 +2609,7 @@
vbase = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (!vbase) {
- dev_err(&pdev->dev, "Unable to map in perfcl_efuse base\n");
+ dev_err(&pdev->dev, "Unable to map perfcl_efuse base\n");
return -ENOMEM;
}
perfcl_clk.pbases[EFUSE_BASE] = pbase;
@@ -2259,6 +2685,207 @@
return 0;
}
+static int debugfs_get_debug_reg(void *data, u64 *val)
+{
+ struct clk_osm *c = data;
+
+ if (c->acd_debugfs_addr >= ACD_MASTER_ONLY_REG_ADDR)
+ *val = readl_relaxed((char *)c->vbases[ACD_BASE] +
+ c->acd_debugfs_addr);
+ else
+ *val = clk_osm_acd_local_read_reg(c, c->acd_debugfs_addr);
+ return 0;
+}
+
+static int debugfs_set_debug_reg(void *data, u64 val)
+{
+ struct clk_osm *c = data;
+
+ if (c->acd_debugfs_addr >= ACD_MASTER_ONLY_REG_ADDR)
+ clk_osm_write_reg(c, val, c->acd_debugfs_addr, ACD_BASE);
+ else
+ clk_osm_acd_master_write_through_reg(c, val,
+ c->acd_debugfs_addr);
+
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(debugfs_acd_debug_reg_fops,
+ debugfs_get_debug_reg,
+ debugfs_set_debug_reg,
+ "0x%llx\n");
+
+static int debugfs_get_debug_reg_addr(void *data, u64 *val)
+{
+ struct clk_osm *c = data;
+
+ *val = c->acd_debugfs_addr;
+ return 0;
+}
+
+static int debugfs_set_debug_reg_addr(void *data, u64 val)
+{
+ struct clk_osm *c = data;
+
+ if (val > ACD_1P1_MAX_REG_OFFSET) {
+ pr_err("invalid ACD register address offset, must be between 0-0x%x\n",
+ ACD_1P1_MAX_REG_OFFSET);
+ return 0;
+ }
+
+ c->acd_debugfs_addr = val;
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(debugfs_acd_debug_reg_addr_fops,
+ debugfs_get_debug_reg_addr,
+ debugfs_set_debug_reg_addr,
+ "%llu\n");
+
+static void populate_debugfs_dir(struct clk_osm *c)
+{
+ struct dentry *temp;
+
+ if (osm_debugfs_base == NULL) {
+ osm_debugfs_base = debugfs_create_dir("osm", NULL);
+ if (IS_ERR_OR_NULL(osm_debugfs_base)) {
+ pr_err("osm debugfs base directory creation failed\n");
+ osm_debugfs_base = NULL;
+ return;
+ }
+ }
+
+ c->debugfs = debugfs_create_dir(clk_hw_get_name(&c->hw),
+ osm_debugfs_base);
+ if (IS_ERR_OR_NULL(c->debugfs)) {
+ pr_err("osm debugfs directory creation failed\n");
+ return;
+ }
+
+ temp = debugfs_create_file("acd_debug_reg",
+ 0644,
+ c->debugfs, c,
+ &debugfs_acd_debug_reg_fops);
+ if (IS_ERR_OR_NULL(temp)) {
+ pr_err("debugfs_acd_debug_reg_fops debugfs file creation failed\n");
+ goto exit;
+ }
+
+ temp = debugfs_create_file("acd_debug_reg_addr",
+ 0644,
+ c->debugfs, c,
+ &debugfs_acd_debug_reg_addr_fops);
+ if (IS_ERR_OR_NULL(temp)) {
+ pr_err("debugfs_acd_debug_reg_addr_fops debugfs file creation failed\n");
+ goto exit;
+ }
+
+exit:
+ if (IS_ERR_OR_NULL(temp))
+ debugfs_remove_recursive(c->debugfs);
+}
+
+static int clk_osm_acd_init(struct clk_osm *c)
+{
+
+ int rc = 0;
+ u32 auto_xfer_mask = 0;
+
+ if (c->secure_init) {
+ clk_osm_write_reg(c, c->pbases[ACD_BASE] + ACDCR,
+ DATA_MEM(115), OSM_BASE);
+ clk_osm_write_reg(c, c->pbases[ACD_BASE] + ACD_WRITE_CTL,
+ DATA_MEM(116), OSM_BASE);
+ }
+
+ if (!c->acd_init)
+ return 0;
+
+ c->acd_debugfs_addr = ACD_HW_VERSION;
+
+ /* Program ACD tunable-length delay register */
+ clk_osm_write_reg(c, c->acd_td, ACDTD, ACD_BASE);
+ auto_xfer_mask |= ACD_REG_RELATIVE_ADDR_BITMASK(ACDTD);
+
+ /* Program ACD control register */
+ clk_osm_write_reg(c, c->acd_cr, ACDCR, ACD_BASE);
+ auto_xfer_mask |= ACD_REG_RELATIVE_ADDR_BITMASK(ACDCR);
+
+ /* Program ACD soft start control register */
+ clk_osm_write_reg(c, c->acd_sscr, ACDSSCR, ACD_BASE);
+ auto_xfer_mask |= ACD_REG_RELATIVE_ADDR_BITMASK(ACDSSCR);
+
+ /* Program initial ACD external interface configuration register */
+ clk_osm_write_reg(c, c->acd_extint0_cfg, ACD_EXTINT_CFG, ACD_BASE);
+ auto_xfer_mask |= ACD_REG_RELATIVE_ADDR_BITMASK(ACD_EXTINT_CFG);
+
+ /* Program ACD auto-register transfer control register */
+ clk_osm_write_reg(c, c->acd_autoxfer_ctl, ACD_AUTOXFER_CTL, ACD_BASE);
+
+ /* Ensure writes complete before transfers to local copy */
+ clk_osm_acd_mb(c);
+
+ /* Transfer master copies */
+ rc = clk_osm_acd_auto_local_write_reg(c, auto_xfer_mask);
+ if (rc)
+ return rc;
+
+ /* Switch CPUSS clock source to ACD clock */
+ auto_xfer_mask |= ACD_REG_RELATIVE_ADDR_BITMASK(ACD_GFMUX_CFG);
+ rc = clk_osm_acd_master_write_through_reg(c, ACD_GFMUX_CFG_SELECT,
+ ACD_GFMUX_CFG);
+ if (rc)
+ return rc;
+
+ /* Program ACD_DCVS_SW */
+ rc = clk_osm_acd_master_write_through_reg(c,
+ ACD_DCVS_SW_DCVS_IN_PRGR_SET,
+ ACD_DCVS_SW);
+ if (rc)
+ return rc;
+
+ rc = clk_osm_acd_master_write_through_reg(c,
+ ACD_DCVS_SW_DCVS_IN_PRGR_CLEAR,
+ ACD_DCVS_SW);
+ if (rc)
+ return rc;
+
+ udelay(1);
+
+ /* Program final ACD external interface configuration register */
+ rc = clk_osm_acd_master_write_through_reg(c, c->acd_extint1_cfg,
+ ACD_EXTINT_CFG);
+ if (rc)
+ return rc;
+
+ if (c->acd_avg_init) {
+ auto_xfer_mask |= ACD_REG_RELATIVE_ADDR_BITMASK(ACD_AVG_CFG_2);
+ rc = clk_osm_acd_master_write_through_reg(c, c->acd_avg_cfg2,
+ ACD_AVG_CFG_2);
+ if (rc)
+ return rc;
+
+ auto_xfer_mask |= ACD_REG_RELATIVE_ADDR_BITMASK(ACD_AVG_CFG_1);
+ rc = clk_osm_acd_master_write_through_reg(c, c->acd_avg_cfg1,
+ ACD_AVG_CFG_1);
+ if (rc)
+ return rc;
+
+ auto_xfer_mask |= ACD_REG_RELATIVE_ADDR_BITMASK(ACD_AVG_CFG_0);
+ rc = clk_osm_acd_master_write_through_reg(c, c->acd_avg_cfg0,
+ ACD_AVG_CFG_0);
+ if (rc)
+ return rc;
+ }
+
+ /*
+ * ACDCR, ACDTD, ACDSSCR, ACD_EXTINT_CFG, ACD_GFMUX_CFG
+ * must be copied from master to local copy on PC exit.
+ * Also, ACD_AVG_CFG0, ACF_AVG_CFG1, and ACD_AVG_CFG2 when
+ * AVG is enabled.
+ */
+ clk_osm_write_reg(c, auto_xfer_mask, ACD_AUTOXFER_CFG, ACD_BASE);
+ return 0;
+}
+
static unsigned long init_rate = 300000000;
static int clk_cpu_osm_driver_probe(struct platform_device *pdev)
@@ -2303,15 +2930,28 @@
rc = clk_osm_parse_dt_configs(pdev);
if (rc) {
- dev_err(&pdev->dev, "Unable to parse device tree configurations\n");
+ dev_err(&pdev->dev, "Unable to parse OSM device tree configurations\n");
+ return rc;
+ }
+
+ rc = clk_osm_parse_acd_dt_configs(pdev);
+ if (rc) {
+ dev_err(&pdev->dev, "Unable to parse ACD device tree configurations\n");
return rc;
}
rc = clk_osm_resources_init(pdev);
if (rc) {
if (rc != -EPROBE_DEFER)
- dev_err(&pdev->dev, "resources init failed, rc=%d\n",
- rc);
+ dev_err(&pdev->dev, "OSM resources init failed, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = clk_osm_acd_resources_init(pdev);
+ if (rc) {
+ dev_err(&pdev->dev, "ACD resources init failed, rc=%d\n",
+ rc);
return rc;
}
@@ -2466,7 +3106,7 @@
/* Program VC at which the array power supply needs to be switched */
clk_osm_write_reg(&perfcl_clk, perfcl_clk.apm_threshold_vc,
- APM_CROSSOVER_VC);
+ APM_CROSSOVER_VC, OSM_BASE);
if (perfcl_clk.secure_init) {
clk_osm_write_seq_reg(&perfcl_clk, perfcl_clk.apm_crossover_vc,
DATA_MEM(77));
@@ -2510,11 +3150,11 @@
if (pwrcl_clk.per_core_dcvs) {
val = clk_osm_read_reg(&pwrcl_clk, CORE_DCVS_CTRL);
val |= BIT(0);
- clk_osm_write_reg(&pwrcl_clk, val, CORE_DCVS_CTRL);
+ clk_osm_write_reg(&pwrcl_clk, val, CORE_DCVS_CTRL, OSM_BASE);
val = clk_osm_read_reg(&perfcl_clk, CORE_DCVS_CTRL);
val |= BIT(0);
- clk_osm_write_reg(&perfcl_clk, val, CORE_DCVS_CTRL);
+ clk_osm_write_reg(&perfcl_clk, val, CORE_DCVS_CTRL, OSM_BASE);
}
clk_ops_core = clk_dummy_ops;
@@ -2522,6 +3162,22 @@
clk_ops_core.round_rate = cpu_clk_round_rate;
clk_ops_core.recalc_rate = cpu_clk_recalc_rate;
+ rc = clk_osm_acd_init(&l3_clk);
+ if (rc) {
+ pr_err("failed to initialize ACD for L3, rc=%d\n", rc);
+ goto exit;
+ }
+ rc = clk_osm_acd_init(&pwrcl_clk);
+ if (rc) {
+ pr_err("failed to initialize ACD for pwrcl, rc=%d\n", rc);
+ goto exit;
+ }
+ rc = clk_osm_acd_init(&perfcl_clk);
+ if (rc) {
+ pr_err("failed to initialize ACD for perfcl, rc=%d\n", rc);
+ goto exit;
+ }
+
spin_lock_init(&l3_clk.lock);
spin_lock_init(&pwrcl_clk.lock);
spin_lock_init(&perfcl_clk.lock);
@@ -2595,6 +3251,9 @@
clk_prepare_enable(perfcl_clk.hw.clk);
populate_opp_table(pdev);
+ populate_debugfs_dir(&l3_clk);
+ populate_debugfs_dir(&pwrcl_clk);
+ populate_debugfs_dir(&perfcl_clk);
of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
register_cpu_cycle_counter_cb(&cb);
diff --git a/drivers/crypto/msm/compat_qcedev.h b/drivers/crypto/msm/compat_qcedev.h
index 4cc3933..6c041cb 100644
--- a/drivers/crypto/msm/compat_qcedev.h
+++ b/drivers/crypto/msm/compat_qcedev.h
@@ -1,3 +1,16 @@
+/*
+ * Copyright (c) 2014, 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
#ifndef _UAPI_COMPAT_QCEDEV__H
#define _UAPI_COMPAT_QCEDEV__H
diff --git a/drivers/crypto/msm/qce.h b/drivers/crypto/msm/qce.h
index 2215dc1..c3b96f0 100644
--- a/drivers/crypto/msm/qce.h
+++ b/drivers/crypto/msm/qce.h
@@ -56,6 +56,12 @@
/* Maximum Nonce bytes */
#define MAX_NONCE 16
+/* Crypto clock control flags */
+#define QCE_CLK_ENABLE_FIRST 1
+#define QCE_BW_REQUEST_FIRST 2
+#define QCE_CLK_DISABLE_FIRST 3
+#define QCE_BW_REQUEST_RESET_FIRST 4
+
typedef void (*qce_comp_func_ptr_t)(void *areq,
unsigned char *icv, unsigned char *iv, int ret);
diff --git a/drivers/crypto/msm/qcedev.c b/drivers/crypto/msm/qcedev.c
index 0860e59..5d6e0c2 100644
--- a/drivers/crypto/msm/qcedev.c
+++ b/drivers/crypto/msm/qcedev.c
@@ -58,6 +58,97 @@
static DEFINE_MUTEX(send_cmd_lock);
static DEFINE_MUTEX(qcedev_sent_bw_req);
+static int qcedev_control_clocks(struct qcedev_control *podev, bool enable)
+{
+ unsigned int control_flag;
+ int ret = 0;
+
+ if (podev->ce_support.req_bw_before_clk) {
+ if (enable)
+ control_flag = QCE_BW_REQUEST_FIRST;
+ else
+ control_flag = QCE_CLK_DISABLE_FIRST;
+ } else {
+ if (enable)
+ control_flag = QCE_CLK_ENABLE_FIRST;
+ else
+ control_flag = QCE_BW_REQUEST_RESET_FIRST;
+ }
+
+ switch (control_flag) {
+ case QCE_CLK_ENABLE_FIRST:
+ ret = qce_enable_clk(podev->qce);
+ if (ret) {
+ pr_err("%s Unable enable clk\n", __func__);
+ return ret;
+ }
+ ret = msm_bus_scale_client_update_request(
+ podev->bus_scale_handle, 1);
+ if (ret) {
+ pr_err("%s Unable to set high bw\n", __func__);
+ ret = qce_disable_clk(podev->qce);
+ if (ret)
+ pr_err("%s Unable disable clk\n", __func__);
+ return ret;
+ }
+ break;
+ case QCE_BW_REQUEST_FIRST:
+ ret = msm_bus_scale_client_update_request(
+ podev->bus_scale_handle, 1);
+ if (ret) {
+ pr_err("%s Unable to set high bw\n", __func__);
+ return ret;
+ }
+ ret = qce_enable_clk(podev->qce);
+ if (ret) {
+ pr_err("%s Unable enable clk\n", __func__);
+ ret = msm_bus_scale_client_update_request(
+ podev->bus_scale_handle, 0);
+ if (ret)
+ pr_err("%s Unable to set low bw\n", __func__);
+ return ret;
+ }
+ break;
+ case QCE_CLK_DISABLE_FIRST:
+ ret = qce_disable_clk(podev->qce);
+ if (ret) {
+ pr_err("%s Unable to disable clk\n", __func__);
+ return ret;
+ }
+ ret = msm_bus_scale_client_update_request(
+ podev->bus_scale_handle, 0);
+ if (ret) {
+ pr_err("%s Unable to set low bw\n", __func__);
+ ret = qce_enable_clk(podev->qce);
+ if (ret)
+ pr_err("%s Unable enable clk\n", __func__);
+ return ret;
+ }
+ break;
+ case QCE_BW_REQUEST_RESET_FIRST:
+ ret = msm_bus_scale_client_update_request(
+ podev->bus_scale_handle, 0);
+ if (ret) {
+ pr_err("%s Unable to set low bw\n", __func__);
+ return ret;
+ }
+ ret = qce_disable_clk(podev->qce);
+ if (ret) {
+ pr_err("%s Unable to disable clk\n", __func__);
+ ret = msm_bus_scale_client_update_request(
+ podev->bus_scale_handle, 1);
+ if (ret)
+ pr_err("%s Unable to set high bw\n", __func__);
+ return ret;
+ }
+ break;
+ default:
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
static void qcedev_ce_high_bw_req(struct qcedev_control *podev,
bool high_bw_req)
{
@@ -66,47 +157,21 @@
mutex_lock(&qcedev_sent_bw_req);
if (high_bw_req) {
if (podev->high_bw_req_count == 0) {
- ret = qce_enable_clk(podev->qce);
- if (ret) {
- pr_err("%s Unable enable clk\n", __func__);
- mutex_unlock(&qcedev_sent_bw_req);
- return;
- }
- ret = msm_bus_scale_client_update_request(
- podev->bus_scale_handle, 1);
- if (ret) {
- pr_err("%s Unable to set to high bandwidth\n",
- __func__);
- ret = qce_disable_clk(podev->qce);
- mutex_unlock(&qcedev_sent_bw_req);
- return;
- }
+ ret = qcedev_control_clocks(podev, true);
+ if (ret)
+ goto exit_unlock_mutex;
}
podev->high_bw_req_count++;
} else {
if (podev->high_bw_req_count == 1) {
- ret = msm_bus_scale_client_update_request(
- podev->bus_scale_handle, 0);
- if (ret) {
- pr_err("%s Unable to set to low bandwidth\n",
- __func__);
- mutex_unlock(&qcedev_sent_bw_req);
- return;
- }
- ret = qce_disable_clk(podev->qce);
- if (ret) {
- pr_err("%s Unable disable clk\n", __func__);
- ret = msm_bus_scale_client_update_request(
- podev->bus_scale_handle, 1);
- if (ret)
- pr_err("%s Unable to set to high bandwidth\n",
- __func__);
- mutex_unlock(&qcedev_sent_bw_req);
- return;
- }
+ ret = qcedev_control_clocks(podev, false);
+ if (ret)
+ goto exit_unlock_mutex;
}
podev->high_bw_req_count--;
}
+
+exit_unlock_mutex:
mutex_unlock(&qcedev_sent_bw_req);
}
@@ -1767,32 +1832,47 @@
tasklet_init(&podev->done_tasklet, req_done, (unsigned long)podev);
- /* open qce */
+ podev->platform_support.bus_scale_table = (struct msm_bus_scale_pdata *)
+ msm_bus_cl_get_pdata(pdev);
+ if (!podev->platform_support.bus_scale_table) {
+ pr_err("bus_scale_table is NULL\n");
+ return -ENODATA;
+ }
+ podev->bus_scale_handle = msm_bus_scale_register_client(
+ (struct msm_bus_scale_pdata *)
+ podev->platform_support.bus_scale_table);
+ if (!podev->bus_scale_handle) {
+ pr_err("%s not able to get bus scale\n", __func__);
+ return -ENOMEM;
+ }
+
+ rc = msm_bus_scale_client_update_request(podev->bus_scale_handle, 1);
+ if (rc) {
+ pr_err("%s Unable to set to high bandwidth\n", __func__);
+ goto exit_unregister_bus_scale;
+ }
handle = qce_open(pdev, &rc);
if (handle == NULL) {
- platform_set_drvdata(pdev, NULL);
- return rc;
+ rc = -ENODEV;
+ goto exit_scale_busbandwidth;
+ }
+ rc = msm_bus_scale_client_update_request(podev->bus_scale_handle, 0);
+ if (rc) {
+ pr_err("%s Unable to set to low bandwidth\n", __func__);
+ goto exit_qce_close;
}
podev->qce = handle;
podev->pdev = pdev;
platform_set_drvdata(pdev, podev);
- rc = misc_register(&podev->miscdevice);
qce_hw_support(podev->qce, &podev->ce_support);
if (podev->ce_support.bam) {
podev->platform_support.ce_shared = 0;
podev->platform_support.shared_ce_resource = 0;
podev->platform_support.hw_key_support =
podev->ce_support.hw_key;
- podev->platform_support.bus_scale_table = NULL;
podev->platform_support.sha_hmac = 1;
-
- podev->platform_support.bus_scale_table =
- (struct msm_bus_scale_pdata *)
- msm_bus_cl_get_pdata(pdev);
- if (!podev->platform_support.bus_scale_table)
- pr_err("bus_scale_table is NULL\n");
} else {
platform_support =
(struct msm_ce_hw_support *)pdev->dev.platform_data;
@@ -1801,35 +1881,27 @@
platform_support->shared_ce_resource;
podev->platform_support.hw_key_support =
platform_support->hw_key_support;
- podev->platform_support.bus_scale_table =
- platform_support->bus_scale_table;
podev->platform_support.sha_hmac = platform_support->sha_hmac;
}
- if (podev->platform_support.bus_scale_table != NULL) {
- podev->bus_scale_handle =
- msm_bus_scale_register_client(
- (struct msm_bus_scale_pdata *)
- podev->platform_support.bus_scale_table);
- if (!podev->bus_scale_handle) {
- pr_err("%s not able to get bus scale\n",
- __func__);
- rc = -ENOMEM;
- goto err;
- }
- }
+ rc = misc_register(&podev->miscdevice);
if (rc >= 0)
return 0;
- if (podev->platform_support.bus_scale_table != NULL)
- msm_bus_scale_unregister_client(podev->bus_scale_handle);
-err:
+ misc_deregister(&podev->miscdevice);
+exit_qce_close:
if (handle)
qce_close(handle);
+exit_scale_busbandwidth:
+ msm_bus_scale_client_update_request(podev->bus_scale_handle, 0);
+exit_unregister_bus_scale:
+ if (podev->platform_support.bus_scale_table != NULL)
+ msm_bus_scale_unregister_client(podev->bus_scale_handle);
platform_set_drvdata(pdev, NULL);
- podev->qce = NULL;
podev->pdev = NULL;
+ podev->qce = NULL;
+
return rc;
};
@@ -1864,23 +1936,9 @@
mutex_lock(&qcedev_sent_bw_req);
if (podev->high_bw_req_count) {
- ret = msm_bus_scale_client_update_request(
- podev->bus_scale_handle, 0);
- if (ret) {
- pr_err("%s Unable to set to low bandwidth\n",
- __func__);
+ ret = qcedev_control_clocks(podev, false);
+ if (ret)
goto suspend_exit;
- }
- ret = qce_disable_clk(podev->qce);
- if (ret) {
- pr_err("%s Unable disable clk\n", __func__);
- ret = msm_bus_scale_client_update_request(
- podev->bus_scale_handle, 1);
- if (ret)
- pr_err("%s Unable to set to high bandwidth\n",
- __func__);
- goto suspend_exit;
- }
}
suspend_exit:
@@ -1900,22 +1958,9 @@
mutex_lock(&qcedev_sent_bw_req);
if (podev->high_bw_req_count) {
- ret = qce_enable_clk(podev->qce);
- if (ret) {
- pr_err("%s Unable enable clk\n", __func__);
+ ret = qcedev_control_clocks(podev, true);
+ if (ret)
goto resume_exit;
- }
- ret = msm_bus_scale_client_update_request(
- podev->bus_scale_handle, 1);
- if (ret) {
- pr_err("%s Unable to set to high bandwidth\n",
- __func__);
- ret = qce_disable_clk(podev->qce);
- if (ret)
- pr_err("%s Unable enable clk\n",
- __func__);
- goto resume_exit;
- }
}
resume_exit:
diff --git a/drivers/dma/qcom/Kconfig b/drivers/dma/qcom/Kconfig
index a7761c4..2afceb1 100644
--- a/drivers/dma/qcom/Kconfig
+++ b/drivers/dma/qcom/Kconfig
@@ -27,3 +27,22 @@
(user to kernel, kernel to kernel, etc.). It only supports
memcpy interface. The core is not intended for general
purpose slave DMA.
+
+config QCOM_GPI_DMA
+ tristate "Qualcomm Technologies Inc GPI DMA support"
+ depends on ARCH_QCOM
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Enable support for the QCOM GPI DMA controller. This controller
+ provides DMA capabilities for a variety of peripheral buses such
+ as I2C, UART, and SPI. By using GPI dmaengine driver, bus drivers
+ can use a standardize interface that is protocol independent to
+ transfer data between DDR and peripheral.
+
+config QCOM_GPI_DMA_DEBUG
+ bool "Qualcomm Technologies Inc GPI debug support"
+ depends on QCOM_GPI_DMA
+ help
+ Enable detailed logging for QCOM GPI driver. Extra logging will be
+ helpful when debugging critical issues.
diff --git a/drivers/dma/qcom/Makefile b/drivers/dma/qcom/Makefile
index 4bfc38b..6476ac5 100644
--- a/drivers/dma/qcom/Makefile
+++ b/drivers/dma/qcom/Makefile
@@ -3,3 +3,4 @@
hdma_mgmt-objs := hidma_mgmt.o hidma_mgmt_sys.o
obj-$(CONFIG_QCOM_HIDMA) += hdma.o
hdma-objs := hidma_ll.o hidma.o hidma_dbg.o
+obj-$(CONFIG_QCOM_GPI_DMA) += gpi.o
diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c
new file mode 100644
index 0000000..6e6f28f
--- /dev/null
+++ b/drivers/dma/qcom/gpi.c
@@ -0,0 +1,2816 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <asm/dma-iommu.h>
+#include <linux/atomic.h>
+#include <linux/completion.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/io.h>
+#include <linux/iommu.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/ipc_logging.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_dma.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+#include <linux/sched_clock.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <asm/cacheflush.h>
+#include <linux/msm_gpi.h>
+#include "../dmaengine.h"
+#include "../virt-dma.h"
+#include "msm_gpi_mmio.h"
+
+/* global logging macros */
+#define GPI_LOG(gpi_dev, fmt, ...) do { \
+ if (gpi_dev->klog_lvl != LOG_LVL_MASK_ALL) \
+ dev_dbg(gpi_dev->dev, "%s: " fmt, __func__, ##__VA_ARGS__); \
+ if (gpi_dev->ilctxt && gpi_dev->ipc_log_lvl != LOG_LVL_MASK_ALL) \
+ ipc_log_string(gpi_dev->ilctxt, \
+ "%s: " fmt, __func__, ##__VA_ARGS__); \
+ } while (0)
+#define GPI_ERR(gpi_dev, fmt, ...) do { \
+ if (gpi_dev->klog_lvl >= LOG_LVL_ERROR) \
+ dev_err(gpi_dev->dev, "%s: " fmt, __func__, ##__VA_ARGS__); \
+ if (gpi_dev->ilctxt && gpi_dev->ipc_log_lvl >= LOG_LVL_ERROR) \
+ ipc_log_string(gpi_dev->ilctxt, \
+ "%s: " fmt, __func__, ##__VA_ARGS__); \
+ } while (0)
+
+/* gpii specific logging macros */
+#define GPII_REG(gpii, ch, fmt, ...) do { \
+ if (gpii->klog_lvl >= LOG_LVL_REG_ACCESS) \
+ pr_info("%s:%u:%s: " fmt, gpii->label, \
+ ch, __func__, ##__VA_ARGS__); \
+ if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_REG_ACCESS) \
+ ipc_log_string(gpii->ilctxt, \
+ "ch:%u %s: " fmt, ch, \
+ __func__, ##__VA_ARGS__); \
+ } while (0)
+#define GPII_VERB(gpii, ch, fmt, ...) do { \
+ if (gpii->klog_lvl >= LOG_LVL_VERBOSE) \
+ pr_info("%s:%u:%s: " fmt, gpii->label, \
+ ch, __func__, ##__VA_ARGS__); \
+ if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_VERBOSE) \
+ ipc_log_string(gpii->ilctxt, \
+ "ch:%u %s: " fmt, ch, \
+ __func__, ##__VA_ARGS__); \
+ } while (0)
+#define GPII_INFO(gpii, ch, fmt, ...) do { \
+ if (gpii->klog_lvl >= LOG_LVL_INFO) \
+ pr_info("%s:%u:%s: " fmt, gpii->label, ch, \
+ __func__, ##__VA_ARGS__); \
+ if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_INFO) \
+ ipc_log_string(gpii->ilctxt, \
+ "ch:%u %s: " fmt, ch, \
+ __func__, ##__VA_ARGS__); \
+ } while (0)
+#define GPII_ERR(gpii, ch, fmt, ...) do { \
+ if (gpii->klog_lvl >= LOG_LVL_ERROR) \
+ pr_err("%s:%u:%s: " fmt, gpii->label, ch, \
+ __func__, ##__VA_ARGS__); \
+ if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_ERROR) \
+ ipc_log_string(gpii->ilctxt, \
+ "ch:%u %s: " fmt, ch, \
+ __func__, ##__VA_ARGS__); \
+ } while (0)
+#define GPII_CRITIC(gpii, ch, fmt, ...) do { \
+ if (gpii->klog_lvl >= LOG_LVL_CRITICAL) \
+ pr_err("%s:%u:%s: " fmt, gpii->label, ch, \
+ __func__, ##__VA_ARGS__); \
+ if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_CRITICAL) \
+ ipc_log_string(gpii->ilctxt, \
+ "ch:%u %s: " fmt, ch, \
+ __func__, ##__VA_ARGS__); \
+ } while (0)
+
+enum DEBUG_LOG_LVL {
+ LOG_LVL_MASK_ALL,
+ LOG_LVL_CRITICAL,
+ LOG_LVL_ERROR,
+ LOG_LVL_INFO,
+ LOG_LVL_VERBOSE,
+ LOG_LVL_REG_ACCESS,
+};
+
+enum EV_PRIORITY {
+ EV_PRIORITY_ISR,
+ EV_PRIORITY_TASKLET,
+};
+
+#define GPI_DMA_DRV_NAME "gpi_dma"
+#define DEFAULT_KLOG_LVL (LOG_LVL_CRITICAL)
+#ifdef CONFIG_QCOM_GPI_DMA_DEBUG
+#define DEFAULT_IPC_LOG_LVL (LOG_LVL_VERBOSE)
+#define IPC_LOG_PAGES (40)
+#define GPI_DBG_LOG_SIZE (SZ_1K) /* size must be power of 2 */
+#else
+#define IPC_LOG_PAGES (2)
+#define GPI_DBG_LOG_SIZE (0) /* size must be power of 2 */
+#define DEFAULT_IPC_LOG_LVL (LOG_LVL_ERROR)
+#endif
+
+#define GPI_LABEL_SIZE (256)
+#define GPI_DBG_COMMON (99)
+#define MAX_CHANNELS_PER_GPII (2)
+#define CMD_TIMEOUT_MS (50)
+#define STATE_IGNORE (U32_MAX)
+#define REQ_OF_DMA_ARGS (6) /* # of arguments required from client */
+
+struct __packed gpi_error_log_entry {
+ u32 routine : 4;
+ u32 type : 4;
+ u32 reserved0 : 4;
+ u32 code : 4;
+ u32 reserved1 : 3;
+ u32 chid : 5;
+ u32 reserved2 : 1;
+ u32 chtype : 1;
+ u32 ee : 1;
+};
+
+struct __packed xfer_compl_event {
+ u64 ptr;
+ u32 length : 24;
+ u8 code;
+ u16 status;
+ u8 type;
+ u8 chid;
+};
+
+struct __packed immediate_data_event {
+ u8 data_bytes[8];
+ u8 length : 4;
+ u8 resvd : 4;
+ u16 tre_index;
+ u8 code;
+ u16 status;
+ u8 type;
+ u8 chid;
+};
+
+struct __packed qup_notif_event {
+ u32 status;
+ u32 time;
+ u32 count :24;
+ u8 resvd;
+ u16 resvd1;
+ u8 type;
+ u8 chid;
+};
+
+struct __packed gpi_ere {
+ u32 dword[4];
+};
+
+enum GPI_EV_TYPE {
+ XFER_COMPLETE_EV_TYPE = 0x22,
+ IMMEDIATE_DATA_EV_TYPE = 0x30,
+ QUP_NOTIF_EV_TYPE = 0x31,
+ STALE_EV_TYPE = 0xFF,
+};
+
+union __packed gpi_event {
+ struct __packed xfer_compl_event xfer_compl_event;
+ struct __packed immediate_data_event immediate_data_event;
+ struct __packed qup_notif_event qup_notif_event;
+ struct __packed gpi_ere gpi_ere;
+};
+
+enum gpii_irq_settings {
+ DEFAULT_IRQ_SETTINGS,
+ MASK_IEOB_SETTINGS,
+};
+
+enum gpi_ev_state {
+ DEFAULT_EV_CH_STATE = 0,
+ EV_STATE_NOT_ALLOCATED = DEFAULT_EV_CH_STATE,
+ EV_STATE_ALLOCATED,
+ MAX_EV_STATES
+};
+
+static const char *const gpi_ev_state_str[MAX_EV_STATES] = {
+ [EV_STATE_NOT_ALLOCATED] = "NOT ALLOCATED",
+ [EV_STATE_ALLOCATED] = "ALLOCATED",
+};
+
+#define TO_GPI_EV_STATE_STR(state) ((state >= MAX_EV_STATES) ? \
+ "INVALID" : gpi_ev_state_str[state])
+
+enum gpi_ch_state {
+ DEFAULT_CH_STATE = 0x0,
+ CH_STATE_NOT_ALLOCATED = DEFAULT_CH_STATE,
+ CH_STATE_ALLOCATED = 0x1,
+ CH_STATE_STARTED = 0x2,
+ CH_STATE_STOPPED = 0x3,
+ CH_STATE_STOP_IN_PROC = 0x4,
+ CH_STATE_ERROR = 0xf,
+ MAX_CH_STATES
+};
+
+static const char *const gpi_ch_state_str[MAX_CH_STATES] = {
+ [CH_STATE_NOT_ALLOCATED] = "NOT ALLOCATED",
+ [CH_STATE_ALLOCATED] = "ALLOCATED",
+ [CH_STATE_STARTED] = "STARTED",
+ [CH_STATE_STOPPED] = "STOPPED",
+ [CH_STATE_STOP_IN_PROC] = "STOP IN PROCESS",
+ [CH_STATE_ERROR] = "ERROR",
+};
+
+#define TO_GPI_CH_STATE_STR(state) ((state >= MAX_CH_STATES) ? \
+ "INVALID" : gpi_ch_state_str[state])
+
+enum gpi_cmd {
+ GPI_CH_CMD_BEGIN,
+ GPI_CH_CMD_ALLOCATE = GPI_CH_CMD_BEGIN,
+ GPI_CH_CMD_START,
+ GPI_CH_CMD_STOP,
+ GPI_CH_CMD_RESET,
+ GPI_CH_CMD_DE_ALLOC,
+ GPI_CH_CMD_UART_SW_STALE,
+ GPI_CH_CMD_UART_RFR_READY,
+ GPI_CH_CMD_UART_RFR_NOT_READY,
+ GPI_CH_CMD_END = GPI_CH_CMD_UART_RFR_NOT_READY,
+ GPI_EV_CMD_BEGIN,
+ GPI_EV_CMD_ALLOCATE = GPI_EV_CMD_BEGIN,
+ GPI_EV_CMD_RESET,
+ GPI_EV_CMD_DEALLOC,
+ GPI_EV_CMD_END = GPI_EV_CMD_DEALLOC,
+ GPI_MAX_CMD,
+};
+
+#define IS_CHAN_CMD(cmd) (cmd <= GPI_CH_CMD_END)
+
+static const char *const gpi_cmd_str[GPI_MAX_CMD] = {
+ [GPI_CH_CMD_ALLOCATE] = "CH ALLOCATE",
+ [GPI_CH_CMD_START] = "CH START",
+ [GPI_CH_CMD_STOP] = "CH STOP",
+ [GPI_CH_CMD_RESET] = "CH_RESET",
+ [GPI_CH_CMD_DE_ALLOC] = "DE ALLOC",
+ [GPI_CH_CMD_UART_SW_STALE] = "UART SW STALE",
+ [GPI_CH_CMD_UART_RFR_READY] = "UART RFR READY",
+ [GPI_CH_CMD_UART_RFR_NOT_READY] = "UART RFR NOT READY",
+ [GPI_EV_CMD_ALLOCATE] = "EV ALLOCATE",
+ [GPI_EV_CMD_RESET] = "EV RESET",
+ [GPI_EV_CMD_DEALLOC] = "EV DEALLOC",
+};
+
+#define TO_GPI_CMD_STR(cmd) ((cmd >= GPI_MAX_CMD) ? "INVALID" : \
+ gpi_cmd_str[cmd])
+
+static const char *const gpi_cb_event_str[MSM_GPI_QUP_MAX_EVENT] = {
+ [MSM_GPI_QUP_NOTIFY] = "NOTIFY",
+ [MSM_GPI_QUP_ERROR] = "GLOBAL ERROR",
+ [MSM_GPI_QUP_CH_ERROR] = "CHAN ERROR",
+ [MSM_GPI_QUP_PENDING_EVENT] = "PENDING EVENT",
+ [MSM_GPI_QUP_EOT_DESC_MISMATCH] = "EOT/DESC MISMATCH",
+ [MSM_GPI_QUP_SW_ERROR] = "SW ERROR",
+};
+
+#define TO_GPI_CB_EVENT_STR(event) ((event >= MSM_GPI_QUP_MAX_EVENT) ? \
+ "INVALID" : gpi_cb_event_str[event])
+
+enum se_protocol {
+ SE_PROTOCOL_SPI = 1,
+ SE_PROTOCOL_UART = 2,
+ SE_PROTOCOL_I2C = 3,
+ SE_MAX_PROTOCOL
+};
+
+/*
+ * @DISABLE_STATE: no register access allowed
+ * @CONFIG_STATE: client has configured the channel
+ * @PREP_HARDWARE: register access is allowed
+ * however, no processing EVENTS
+ * @ACTIVE_STATE: channels are fully operational
+ * @PREPARE_TERIMNATE: graceful termination of channels
+ * register access is allowed
+ * @PAUSE_STATE: channels are active, but not processing any events
+ */
+enum gpi_pm_state {
+ DISABLE_STATE,
+ CONFIG_STATE,
+ PREPARE_HARDWARE,
+ ACTIVE_STATE,
+ PREPARE_TERMINATE,
+ PAUSE_STATE,
+ MAX_PM_STATE
+};
+
+#define REG_ACCESS_VALID(pm_state) (pm_state >= PREPARE_HARDWARE)
+
+static const char *const gpi_pm_state_str[MAX_PM_STATE] = {
+ [DISABLE_STATE] = "DISABLE",
+ [CONFIG_STATE] = "CONFIG",
+ [PREPARE_HARDWARE] = "PREPARE HARDWARE",
+ [ACTIVE_STATE] = "ACTIVE",
+ [PREPARE_TERMINATE] = "PREPARE TERMINATE",
+ [PAUSE_STATE] = "PAUSE",
+};
+
+#define TO_GPI_PM_STR(state) ((state >= MAX_PM_STATE) ? \
+ "INVALID" : gpi_pm_state_str[state])
+
+static const struct {
+ enum gpi_cmd gpi_cmd;
+ u32 opcode;
+ u32 state;
+ u32 timeout_ms;
+} gpi_cmd_info[GPI_MAX_CMD] = {
+ {
+ GPI_CH_CMD_ALLOCATE,
+ GPI_GPII_n_CH_CMD_ALLOCATE,
+ CH_STATE_ALLOCATED,
+ CMD_TIMEOUT_MS,
+ },
+ {
+ GPI_CH_CMD_START,
+ GPI_GPII_n_CH_CMD_START,
+ CH_STATE_STARTED,
+ CMD_TIMEOUT_MS,
+ },
+ {
+ GPI_CH_CMD_STOP,
+ GPI_GPII_n_CH_CMD_STOP,
+ CH_STATE_STOPPED,
+ CMD_TIMEOUT_MS,
+ },
+ {
+ GPI_CH_CMD_RESET,
+ GPI_GPII_n_CH_CMD_RESET,
+ CH_STATE_ALLOCATED,
+ CMD_TIMEOUT_MS,
+ },
+ {
+ GPI_CH_CMD_DE_ALLOC,
+ GPI_GPII_n_CH_CMD_DE_ALLOC,
+ CH_STATE_NOT_ALLOCATED,
+ CMD_TIMEOUT_MS,
+ },
+ {
+ GPI_CH_CMD_UART_SW_STALE,
+ GPI_GPII_n_CH_CMD_UART_SW_STALE,
+ STATE_IGNORE,
+ CMD_TIMEOUT_MS,
+ },
+ {
+ GPI_CH_CMD_UART_RFR_READY,
+ GPI_GPII_n_CH_CMD_UART_RFR_READY,
+ STATE_IGNORE,
+ CMD_TIMEOUT_MS,
+ },
+ {
+ GPI_CH_CMD_UART_RFR_NOT_READY,
+ GPI_GPII_n_CH_CMD_UART_RFR_NOT_READY,
+ STATE_IGNORE,
+ CMD_TIMEOUT_MS,
+ },
+ {
+ GPI_EV_CMD_ALLOCATE,
+ GPI_GPII_n_EV_CH_CMD_ALLOCATE,
+ EV_STATE_ALLOCATED,
+ CMD_TIMEOUT_MS,
+ },
+ {
+ GPI_EV_CMD_RESET,
+ GPI_GPII_n_EV_CH_CMD_RESET,
+ EV_STATE_ALLOCATED,
+ CMD_TIMEOUT_MS,
+ },
+ {
+ GPI_EV_CMD_DEALLOC,
+ GPI_GPII_n_EV_CH_CMD_DE_ALLOC,
+ EV_STATE_NOT_ALLOCATED,
+ CMD_TIMEOUT_MS,
+ },
+};
+
+struct gpi_ring {
+ void *pre_aligned;
+ size_t alloc_size;
+ phys_addr_t phys_addr;
+ dma_addr_t dma_handle;
+ void *base;
+ void *wp;
+ void *rp;
+ u32 len;
+ u32 el_size;
+ u32 elements;
+ bool configured;
+};
+
+struct sg_tre {
+ void *ptr;
+ void *wp; /* store chan wp for debugging */
+};
+
+struct gpi_dbg_log {
+ void *addr;
+ u64 time;
+ u32 val;
+ bool read;
+};
+
+struct gpi_dev {
+ struct dma_device dma_device;
+ struct device *dev;
+ struct resource *res;
+ void __iomem *regs;
+ u32 max_gpii; /* maximum # of gpii instances available per gpi block */
+ u32 gpii_mask; /* gpii instances available for apps */
+ u32 ev_factor; /* ev ring length factor */
+ struct gpii *gpiis;
+ void *ilctxt;
+ u32 ipc_log_lvl;
+ u32 klog_lvl;
+ struct dentry *dentry;
+};
+
+struct gpii_chan {
+ struct virt_dma_chan vc;
+ u32 chid;
+ u32 seid;
+ enum se_protocol protocol;
+ enum EV_PRIORITY priority; /* comes from clients DT node */
+ struct gpii *gpii;
+ enum gpi_ch_state ch_state;
+ enum gpi_pm_state pm_state;
+ void __iomem *ch_cntxt_base_reg;
+ void __iomem *ch_cntxt_db_reg;
+ void __iomem *ch_ring_base_lsb_reg,
+ *ch_ring_rp_lsb_reg,
+ *ch_ring_wp_lsb_reg;
+ void __iomem *ch_cmd_reg;
+ u32 req_tres; /* # of tre's client requested */
+ u32 dir;
+ struct gpi_ring ch_ring;
+ struct gpi_ring sg_ring; /* points to client scatterlist */
+ struct gpi_client_info client_info;
+};
+
+struct gpii {
+ u32 gpii_id;
+ struct gpii_chan gpii_chan[MAX_CHANNELS_PER_GPII];
+ struct gpi_dev *gpi_dev;
+ enum EV_PRIORITY ev_priority;
+ enum se_protocol protocol;
+ int irq;
+ void __iomem *regs; /* points to gpi top */
+ void __iomem *ev_cntxt_base_reg;
+ void __iomem *ev_cntxt_db_reg;
+ void __iomem *ev_ring_base_lsb_reg,
+ *ev_ring_rp_lsb_reg,
+ *ev_ring_wp_lsb_reg;
+ void __iomem *ev_cmd_reg;
+ void __iomem *ieob_src_reg;
+ void __iomem *ieob_clr_reg;
+ struct mutex ctrl_lock;
+ enum gpi_ev_state ev_state;
+ bool configured_irq;
+ enum gpi_pm_state pm_state;
+ rwlock_t pm_lock;
+ struct gpi_ring ev_ring;
+ struct tasklet_struct ev_task; /* event processing tasklet */
+ struct completion cmd_completion;
+ enum gpi_cmd gpi_cmd;
+ u32 cntxt_type_irq_msk;
+ void *ilctxt;
+ u32 ipc_log_lvl;
+ u32 klog_lvl;
+ struct gpi_dbg_log dbg_log[GPI_DBG_LOG_SIZE];
+ atomic_t dbg_index;
+ char label[GPI_LABEL_SIZE];
+ struct dentry *dentry;
+};
+
+struct gpi_desc {
+ struct virt_dma_desc vd;
+ void *wp; /* points to TRE last queued during issue_pending */
+ struct sg_tre *sg_tre; /* points to last scatterlist */
+ void *db; /* DB register to program */
+ struct gpii_chan *gpii_chan;
+};
+
+const u32 GPII_CHAN_DIR[MAX_CHANNELS_PER_GPII] = {
+ GPI_CHTYPE_DIR_OUT, GPI_CHTYPE_DIR_IN
+};
+
+struct dentry *pdentry;
+static irqreturn_t gpi_handle_irq(int irq, void *data);
+static void gpi_ring_recycle_ev_element(struct gpi_ring *ring);
+static int gpi_ring_add_element(struct gpi_ring *ring, void **wp);
+static void gpi_process_events(struct gpii *gpii);
+
+static inline struct gpii_chan *to_gpii_chan(struct dma_chan *dma_chan)
+{
+ return container_of(dma_chan, struct gpii_chan, vc.chan);
+}
+
+static inline struct gpi_desc *to_gpi_desc(struct virt_dma_desc *vd)
+{
+ return container_of(vd, struct gpi_desc, vd);
+}
+
+static inline phys_addr_t to_physical(const struct gpi_ring *const ring,
+ void *addr)
+{
+ return ring->phys_addr + (addr - ring->base);
+}
+
+static inline void *to_virtual(const struct gpi_ring *const ring,
+ phys_addr_t addr)
+{
+ return ring->base + (addr - ring->phys_addr);
+}
+
+#ifdef CONFIG_QCOM_GPI_DMA_DEBUG
+static inline u32 gpi_read_reg(struct gpii *gpii, void __iomem *addr)
+{
+ u64 time = sched_clock();
+ unsigned int index = atomic_inc_return(&gpii->dbg_index) - 1;
+ u32 val;
+
+ val = readl_relaxed(addr);
+ index &= (GPI_DBG_LOG_SIZE - 1);
+ (gpii->dbg_log + index)->addr = addr;
+ (gpii->dbg_log + index)->time = time;
+ (gpii->dbg_log + index)->val = val;
+ (gpii->dbg_log + index)->read = true;
+ GPII_REG(gpii, GPI_DBG_COMMON, "offset:0x%lx val:0x%x\n",
+ addr - gpii->regs, val);
+ return val;
+}
+static inline void gpi_write_reg(struct gpii *gpii, void __iomem *addr, u32 val)
+{
+ u64 time = sched_clock();
+ unsigned int index = atomic_inc_return(&gpii->dbg_index) - 1;
+
+ index &= (GPI_DBG_LOG_SIZE - 1);
+ (gpii->dbg_log + index)->addr = addr;
+ (gpii->dbg_log + index)->time = time;
+ (gpii->dbg_log + index)->val = val;
+ (gpii->dbg_log + index)->read = false;
+
+ GPII_REG(gpii, GPI_DBG_COMMON, "offset:0x%lx val:0x%x\n",
+ addr - gpii->regs, val);
+ writel_relaxed(val, addr);
+}
+#else
+static inline u32 gpi_read_reg(struct gpii *gpii, void __iomem *addr)
+{
+ u32 val = readl_relaxed(addr);
+
+ GPII_REG(gpii, GPI_DBG_COMMON, "offset:0x%lx val:0x%x\n",
+ addr - gpii->regs, val);
+ return val;
+}
+static inline void gpi_write_reg(struct gpii *gpii, void __iomem *addr, u32 val)
+{
+ GPII_REG(gpii, GPI_DBG_COMMON, "offset:0x%lx val:0x%x\n",
+ addr - gpii->regs, val);
+ writel_relaxed(val, addr);
+}
+#endif
+
+/* gpi_write_reg_field - write to specific bit field */
+static inline void gpi_write_reg_field(struct gpii *gpii,
+ void __iomem *addr,
+ u32 mask,
+ u32 shift,
+ u32 val)
+{
+ u32 tmp = gpi_read_reg(gpii, addr);
+
+ tmp &= ~mask;
+ val = tmp | ((val << shift) & mask);
+ gpi_write_reg(gpii, addr, val);
+}
+
+static void gpi_disable_interrupts(struct gpii *gpii)
+{
+ struct {
+ u32 offset;
+ u32 mask;
+ u32 shift;
+ u32 val;
+ } default_reg[] = {
+ {
+ GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS
+ (gpii->gpii_id),
+ GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_BMSK,
+ GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_SHFT,
+ 0,
+ },
+ {
+ GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS
+ (gpii->gpii_id),
+ GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_BMSK,
+ GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_SHFT,
+ 0,
+ },
+ {
+ GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_OFFS
+ (gpii->gpii_id),
+ GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_BMSK,
+ GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_SHFT,
+ 0,
+ },
+ {
+ GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS
+ (gpii->gpii_id),
+ GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_BMSK,
+ GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_SHFT,
+ 0,
+ },
+ {
+ GPI_GPII_n_CNTXT_GLOB_IRQ_EN_OFFS
+ (gpii->gpii_id),
+ GPI_GPII_n_CNTXT_GLOB_IRQ_EN_BMSK,
+ GPI_GPII_n_CNTXT_GLOB_IRQ_EN_SHFT,
+ 0,
+ },
+ {
+ GPI_GPII_n_CNTXT_GPII_IRQ_EN_OFFS
+ (gpii->gpii_id),
+ GPI_GPII_n_CNTXT_GPII_IRQ_EN_BMSK,
+ GPI_GPII_n_CNTXT_GPII_IRQ_EN_SHFT,
+ 0,
+ },
+ {
+ GPI_GPII_n_CNTXT_INTSET_OFFS
+ (gpii->gpii_id),
+ GPI_GPII_n_CNTXT_INTSET_BMSK,
+ GPI_GPII_n_CNTXT_INTSET_SHFT,
+ 0,
+ },
+ { 0 },
+ };
+ int i;
+
+ for (i = 0; default_reg[i].offset; i++)
+ gpi_write_reg_field(gpii, gpii->regs +
+ default_reg[i].offset,
+ default_reg[i].mask,
+ default_reg[i].shift,
+ default_reg[i].val);
+ gpii->cntxt_type_irq_msk = 0;
+ devm_free_irq(gpii->gpi_dev->dev, gpii->irq, gpii);
+ gpii->configured_irq = false;
+}
+
+/* configure and enable interrupts */
+static int gpi_config_interrupts(struct gpii *gpii,
+ enum gpii_irq_settings settings,
+ bool mask)
+{
+ int ret;
+ int i;
+ const u32 def_type = (GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_GENERAL |
+ GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB |
+ GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_GLOB |
+ GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL |
+ GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL);
+ struct {
+ u32 offset;
+ u32 mask;
+ u32 shift;
+ u32 val;
+ } default_reg[] = {
+ {
+ GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS
+ (gpii->gpii_id),
+ GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_BMSK,
+ GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_SHFT,
+ def_type,
+ },
+ {
+ GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS
+ (gpii->gpii_id),
+ GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_BMSK,
+ GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_SHFT,
+ GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_BMSK,
+ },
+ {
+ GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_OFFS
+ (gpii->gpii_id),
+ GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_BMSK,
+ GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_SHFT,
+ GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_BMSK,
+ },
+ {
+ GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS
+ (gpii->gpii_id),
+ GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_BMSK,
+ GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_SHFT,
+ GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_BMSK,
+ },
+ {
+ GPI_GPII_n_CNTXT_GLOB_IRQ_EN_OFFS
+ (gpii->gpii_id),
+ GPI_GPII_n_CNTXT_GLOB_IRQ_EN_BMSK,
+ GPI_GPII_n_CNTXT_GLOB_IRQ_EN_SHFT,
+ GPI_GPII_n_CNTXT_GLOB_IRQ_EN_ERROR_INT,
+ },
+ {
+ GPI_GPII_n_CNTXT_GPII_IRQ_EN_OFFS
+ (gpii->gpii_id),
+ GPI_GPII_n_CNTXT_GPII_IRQ_EN_BMSK,
+ GPI_GPII_n_CNTXT_GPII_IRQ_EN_SHFT,
+ GPI_GPII_n_CNTXT_GPII_IRQ_EN_BMSK,
+ },
+ {
+ GPI_GPII_n_CNTXT_MSI_BASE_LSB_OFFS
+ (gpii->gpii_id),
+ U32_MAX,
+ 0,
+ 0x0,
+ },
+ {
+ GPI_GPII_n_CNTXT_MSI_BASE_MSB_OFFS
+ (gpii->gpii_id),
+ U32_MAX,
+ 0,
+ 0x0,
+ },
+ {
+ GPI_GPII_n_CNTXT_SCRATCH_0_OFFS
+ (gpii->gpii_id),
+ U32_MAX,
+ 0,
+ 0x0,
+ },
+ {
+ GPI_GPII_n_CNTXT_SCRATCH_1_OFFS
+ (gpii->gpii_id),
+ U32_MAX,
+ 0,
+ 0x0,
+ },
+ {
+ GPI_GPII_n_CNTXT_INTSET_OFFS
+ (gpii->gpii_id),
+ GPI_GPII_n_CNTXT_INTSET_BMSK,
+ GPI_GPII_n_CNTXT_INTSET_SHFT,
+ 0x01,
+ },
+ {
+ GPI_GPII_n_ERROR_LOG_OFFS
+ (gpii->gpii_id),
+ U32_MAX,
+ 0,
+ 0x00,
+ },
+ { 0 },
+ };
+
+ GPII_VERB(gpii, GPI_DBG_COMMON, "configured:%c setting:%s mask:%c\n",
+ (gpii->configured_irq) ? 'F' : 'T',
+ (settings == DEFAULT_IRQ_SETTINGS) ? "default" : "user_spec",
+ (mask) ? 'T' : 'F');
+
+ if (gpii->configured_irq == false) {
+ ret = devm_request_irq(gpii->gpi_dev->dev, gpii->irq,
+ gpi_handle_irq, IRQF_TRIGGER_HIGH,
+ gpii->label, gpii);
+ if (ret < 0) {
+ GPII_CRITIC(gpii, GPI_DBG_COMMON,
+ "error request irq:%d ret:%d\n",
+ gpii->irq, ret);
+ return ret;
+ }
+ }
+
+ if (settings == MASK_IEOB_SETTINGS) {
+ /*
+ * GPII only uses one EV ring per gpii so we can globally
+ * enable/disable IEOB interrupt
+ */
+ if (mask)
+ gpii->cntxt_type_irq_msk |=
+ GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB;
+ else
+ gpii->cntxt_type_irq_msk &=
+ ~(GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB);
+ gpi_write_reg_field(gpii, gpii->regs +
+ GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS(gpii->gpii_id),
+ GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_BMSK,
+ GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_SHFT,
+ gpii->cntxt_type_irq_msk);
+ } else {
+ for (i = 0; default_reg[i].offset; i++)
+ gpi_write_reg_field(gpii, gpii->regs +
+ default_reg[i].offset,
+ default_reg[i].mask,
+ default_reg[i].shift,
+ default_reg[i].val);
+ gpii->cntxt_type_irq_msk = def_type;
+ };
+
+ gpii->configured_irq = true;
+
+ return 0;
+}
+
+/* Sends gpii event or channel command */
+static int gpi_send_cmd(struct gpii *gpii,
+ struct gpii_chan *gpii_chan,
+ enum gpi_cmd gpi_cmd)
+{
+ u32 chid = MAX_CHANNELS_PER_GPII;
+ u32 cmd;
+ unsigned long timeout;
+ void __iomem *cmd_reg;
+
+ if (gpi_cmd >= GPI_MAX_CMD)
+ return -EINVAL;
+ if (IS_CHAN_CMD(gpi_cmd))
+ chid = gpii_chan->chid;
+
+ GPII_INFO(gpii, chid,
+ "sending cmd: %s\n", TO_GPI_CMD_STR(gpi_cmd));
+
+ /* send opcode and wait for completion */
+ reinit_completion(&gpii->cmd_completion);
+ gpii->gpi_cmd = gpi_cmd;
+
+ cmd_reg = IS_CHAN_CMD(gpi_cmd) ? gpii_chan->ch_cmd_reg :
+ gpii->ev_cmd_reg;
+ cmd = IS_CHAN_CMD(gpi_cmd) ?
+ GPI_GPII_n_CH_CMD(gpi_cmd_info[gpi_cmd].opcode, chid) :
+ GPI_GPII_n_EV_CH_CMD(gpi_cmd_info[gpi_cmd].opcode, 0);
+ gpi_write_reg(gpii, cmd_reg, cmd);
+ timeout = wait_for_completion_timeout(&gpii->cmd_completion,
+ msecs_to_jiffies(gpi_cmd_info[gpi_cmd].timeout_ms));
+
+ if (!timeout) {
+ GPII_ERR(gpii, chid, "cmd: %s completion timeout\n",
+ TO_GPI_CMD_STR(gpi_cmd));
+ return -EIO;
+ }
+
+ /* confirm new ch state is correct , if the cmd is a state change cmd */
+ if (gpi_cmd_info[gpi_cmd].state == STATE_IGNORE)
+ return 0;
+ if (IS_CHAN_CMD(gpi_cmd) &&
+ gpii_chan->ch_state == gpi_cmd_info[gpi_cmd].state)
+ return 0;
+ if (!IS_CHAN_CMD(gpi_cmd) &&
+ gpii->ev_state == gpi_cmd_info[gpi_cmd].state)
+ return 0;
+
+ return -EIO;
+}
+
+/* program transfer ring DB register */
+static inline void gpi_write_ch_db(struct gpii_chan *gpii_chan,
+ struct gpi_ring *ring,
+ void *wp)
+{
+ struct gpii *gpii = gpii_chan->gpii;
+ phys_addr_t p_wp;
+
+ p_wp = to_physical(ring, wp);
+ gpi_write_reg(gpii, gpii_chan->ch_cntxt_db_reg, (u32)p_wp);
+}
+
+/* program event ring DB register */
+static inline void gpi_write_ev_db(struct gpii *gpii,
+ struct gpi_ring *ring,
+ void *wp)
+{
+ phys_addr_t p_wp;
+
+ p_wp = ring->phys_addr + (wp - ring->base);
+ gpi_write_reg(gpii, gpii->ev_cntxt_db_reg, (u32)p_wp);
+}
+
+/* notify client with generic event */
+static void gpi_generate_cb_event(struct gpii_chan *gpii_chan,
+ enum msm_gpi_cb_event event,
+ u64 status)
+{
+ struct gpii *gpii = gpii_chan->gpii;
+ struct gpi_client_info *client_info = &gpii_chan->client_info;
+ struct msm_gpi_cb msm_gpi_cb = {0};
+
+ GPII_ERR(gpii, gpii_chan->chid,
+ "notifying event:%s with status:%llu\n",
+ TO_GPI_CB_EVENT_STR(event), status);
+
+ msm_gpi_cb.cb_event = event;
+ msm_gpi_cb.status = status;
+ msm_gpi_cb.timestamp = sched_clock();
+ client_info->callback(&gpii_chan->vc.chan, &msm_gpi_cb,
+ client_info->cb_param);
+}
+
+/* process transfer completion interrupt */
+static void gpi_process_ieob(struct gpii *gpii)
+{
+ u32 ieob_irq;
+
+ ieob_irq = gpi_read_reg(gpii, gpii->ieob_src_reg);
+ gpi_write_reg(gpii, gpii->ieob_clr_reg, ieob_irq);
+ GPII_VERB(gpii, GPI_DBG_COMMON, "IEOB_IRQ:0x%x\n", ieob_irq);
+
+ /* process events based on priority */
+ if (likely(gpii->ev_priority >= EV_PRIORITY_TASKLET)) {
+ GPII_VERB(gpii, GPI_DBG_COMMON, "scheduling tasklet\n");
+ gpi_config_interrupts(gpii, MASK_IEOB_SETTINGS, 0);
+ tasklet_schedule(&gpii->ev_task);
+ } else {
+ GPII_VERB(gpii, GPI_DBG_COMMON, "processing events from isr\n");
+ gpi_process_events(gpii);
+ }
+}
+
+/* process channel control interrupt */
+static void gpi_process_ch_ctrl_irq(struct gpii *gpii)
+{
+ u32 gpii_id = gpii->gpii_id;
+ u32 offset = GPI_GPII_n_CNTXT_SRC_GPII_CH_IRQ_OFFS(gpii_id);
+ u32 ch_irq = gpi_read_reg(gpii, gpii->regs + offset);
+ u32 chid;
+ struct gpii_chan *gpii_chan;
+ u32 state;
+
+ /* clear the status */
+ offset = GPI_GPII_n_CNTXT_SRC_CH_IRQ_CLR_OFFS(gpii_id);
+ gpi_write_reg(gpii, gpii->regs + offset, (u32)ch_irq);
+
+ for (chid = 0; chid < MAX_CHANNELS_PER_GPII; chid++) {
+ if (!(BIT(chid) & ch_irq))
+ continue;
+
+ gpii_chan = &gpii->gpii_chan[chid];
+ GPII_VERB(gpii, chid, "processing channel ctrl irq\n");
+ state = gpi_read_reg(gpii, gpii_chan->ch_cntxt_base_reg +
+ CNTXT_0_CONFIG);
+ state = (state & GPI_GPII_n_CH_k_CNTXT_0_CHSTATE_BMSK) >>
+ GPI_GPII_n_CH_k_CNTXT_0_CHSTATE_SHFT;
+
+ /*
+ * CH_CMD_DEALLOC cmd always successful. However cmd does
+ * not change hardware status. So overwriting software state
+ * to default state.
+ */
+ if (gpii->gpi_cmd == GPI_CH_CMD_DE_ALLOC)
+ state = DEFAULT_CH_STATE;
+ gpii_chan->ch_state = state;
+ GPII_VERB(gpii, chid, "setting channel to state:%s\n",
+ TO_GPI_CH_STATE_STR(gpii_chan->ch_state));
+
+ /*
+ * Triggering complete all if ch_state is not a stop in process.
+ * Stop in process is a transition state and we will wait for
+ * stop interrupt before notifying.
+ */
+ if (gpii_chan->ch_state != CH_STATE_STOP_IN_PROC)
+ complete_all(&gpii->cmd_completion);
+
+ /* notifying clients if in error state */
+ if (gpii_chan->ch_state == CH_STATE_ERROR)
+ gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_CH_ERROR,
+ __LINE__);
+ }
+}
+
+/* processing gpi level error interrupts */
+static void gpi_process_glob_err_irq(struct gpii *gpii)
+{
+ u32 gpii_id = gpii->gpii_id;
+ u32 offset = GPI_GPII_n_CNTXT_GLOB_IRQ_STTS_OFFS(gpii_id);
+ u32 irq_stts = gpi_read_reg(gpii, gpii->regs + offset);
+ u32 error_log;
+ u32 chid;
+ struct gpii_chan *gpii_chan;
+ struct gpi_client_info *client_info;
+ struct msm_gpi_cb msm_gpi_cb;
+ struct gpi_error_log_entry *log_entry =
+ (struct gpi_error_log_entry *)&error_log;
+
+ offset = GPI_GPII_n_CNTXT_GLOB_IRQ_CLR_OFFS(gpii_id);
+ gpi_write_reg(gpii, gpii->regs + offset, irq_stts);
+
+ /* only error interrupt should be set */
+ if (irq_stts & ~GPI_GLOB_IRQ_ERROR_INT_MSK) {
+ GPII_ERR(gpii, GPI_DBG_COMMON, "invalid error status:0x%x\n",
+ irq_stts);
+ goto error_irq;
+ }
+
+ offset = GPI_GPII_n_ERROR_LOG_OFFS(gpii_id);
+ error_log = gpi_read_reg(gpii, gpii->regs + offset);
+ gpi_write_reg(gpii, gpii->regs + offset, 0);
+
+ /* get channel info */
+ chid = ((struct gpi_error_log_entry *)&error_log)->chid;
+ if (unlikely(chid >= MAX_CHANNELS_PER_GPII)) {
+ GPII_ERR(gpii, GPI_DBG_COMMON, "invalid chid reported:%u\n",
+ chid);
+ goto error_irq;
+ }
+
+ gpii_chan = &gpii->gpii_chan[chid];
+ client_info = &gpii_chan->client_info;
+
+ /* notify client with error log */
+ msm_gpi_cb.cb_event = MSM_GPI_QUP_ERROR;
+ msm_gpi_cb.error_log.routine = log_entry->routine;
+ msm_gpi_cb.error_log.type = log_entry->type;
+ msm_gpi_cb.error_log.error_code = log_entry->code;
+ GPII_INFO(gpii, gpii_chan->chid, "sending CB event:%s\n",
+ TO_GPI_CB_EVENT_STR(msm_gpi_cb.cb_event));
+ GPII_ERR(gpii, gpii_chan->chid,
+ "ee:%u chtype:%u routine:%u type:%u error_code:%u\n",
+ log_entry->ee, log_entry->chtype,
+ msm_gpi_cb.error_log.routine,
+ msm_gpi_cb.error_log.type,
+ msm_gpi_cb.error_log.error_code);
+ client_info->callback(&gpii_chan->vc.chan, &msm_gpi_cb,
+ client_info->cb_param);
+
+ return;
+
+error_irq:
+ for (chid = 0, gpii_chan = gpii->gpii_chan;
+ chid < MAX_CHANNELS_PER_GPII; chid++, gpii_chan++)
+ gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_FW_ERROR,
+ irq_stts);
+}
+
+/* gpii interrupt handler */
+static irqreturn_t gpi_handle_irq(int irq, void *data)
+{
+ struct gpii *gpii = data;
+ u32 type;
+ unsigned long flags;
+ u32 offset;
+ u32 gpii_id = gpii->gpii_id;
+
+ GPII_VERB(gpii, GPI_DBG_COMMON, "enter\n");
+
+ read_lock_irqsave(&gpii->pm_lock, flags);
+
+ /*
+ * States are out of sync to receive interrupt
+ * while software state is in DISABLE state, bailing out.
+ */
+ if (!REG_ACCESS_VALID(gpii->pm_state)) {
+ GPII_CRITIC(gpii, GPI_DBG_COMMON,
+ "receive interrupt while in %s state\n",
+ TO_GPI_PM_STR(gpii->pm_state));
+ goto exit_irq;
+ }
+
+ offset = GPI_GPII_n_CNTXT_TYPE_IRQ_OFFS(gpii->gpii_id);
+ type = gpi_read_reg(gpii, gpii->regs + offset);
+
+ do {
+ GPII_VERB(gpii, GPI_DBG_COMMON, "CNTXT_TYPE_IRQ:0x%08x\n",
+ type);
+ /* global gpii error */
+ if (type & GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_GLOB) {
+ GPII_ERR(gpii, GPI_DBG_COMMON,
+ "processing global error irq\n");
+ gpi_process_glob_err_irq(gpii);
+ type &= ~(GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_GLOB);
+ }
+
+ /* event control irq */
+ if (type & GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL) {
+ u32 ev_state;
+ u32 ev_ch_irq;
+
+ GPII_INFO(gpii, GPI_DBG_COMMON,
+ "processing EV CTRL interrupt\n");
+ offset = GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_OFFS(gpii_id);
+ ev_ch_irq = gpi_read_reg(gpii, gpii->regs + offset);
+
+ offset = GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_CLR_OFFS
+ (gpii_id);
+ gpi_write_reg(gpii, gpii->regs + offset, ev_ch_irq);
+ ev_state = gpi_read_reg(gpii, gpii->ev_cntxt_base_reg +
+ CNTXT_0_CONFIG);
+ ev_state &= GPI_GPII_n_EV_CH_k_CNTXT_0_CHSTATE_BMSK;
+ ev_state >>= GPI_GPII_n_EV_CH_k_CNTXT_0_CHSTATE_SHFT;
+
+ /*
+ * CMD EV_CMD_DEALLOC is always successful. However
+ * cmd does not change hardware status. So overwriting
+ * software state to default state.
+ */
+ if (gpii->gpi_cmd == GPI_EV_CMD_DEALLOC)
+ ev_state = DEFAULT_EV_CH_STATE;
+
+ gpii->ev_state = ev_state;
+ GPII_INFO(gpii, GPI_DBG_COMMON,
+ "setting EV state to %s\n",
+ TO_GPI_EV_STATE_STR(gpii->ev_state));
+ complete_all(&gpii->cmd_completion);
+ type &= ~(GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL);
+ }
+
+ /* channel control irq */
+ if (type & GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL) {
+ GPII_INFO(gpii, GPI_DBG_COMMON,
+ "process CH CTRL interrupts\n");
+ gpi_process_ch_ctrl_irq(gpii);
+ type &= ~(GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL);
+ }
+
+ /* transfer complete interrupt */
+ if (type & GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB) {
+ GPII_VERB(gpii, GPI_DBG_COMMON,
+ "process IEOB interrupts\n");
+ gpi_process_ieob(gpii);
+ type &= ~GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB;
+ }
+
+ if (type) {
+ GPII_CRITIC(gpii, GPI_DBG_COMMON,
+ "Unhandled interrupt status:0x%x\n", type);
+ goto exit_irq;
+ }
+ offset = GPI_GPII_n_CNTXT_TYPE_IRQ_OFFS(gpii->gpii_id);
+ type = gpi_read_reg(gpii, gpii->regs + offset);
+ } while (type);
+
+exit_irq:
+ read_unlock_irqrestore(&gpii->pm_lock, flags);
+ GPII_VERB(gpii, GPI_DBG_COMMON, "exit\n");
+
+ return IRQ_HANDLED;
+}
+
+/* process qup notification events */
+static void gpi_process_qup_notif_event(struct gpii_chan *gpii_chan,
+ struct qup_notif_event *notif_event)
+{
+ struct gpii *gpii = gpii_chan->gpii;
+ struct gpi_client_info *client_info = &gpii_chan->client_info;
+ struct msm_gpi_cb msm_gpi_cb;
+
+ GPII_VERB(gpii, gpii_chan->chid,
+ "status:0x%x time:0x%x count:0x%x\n",
+ notif_event->status, notif_event->time, notif_event->count);
+
+ msm_gpi_cb.cb_event = MSM_GPI_QUP_NOTIFY;
+ msm_gpi_cb.status = notif_event->status;
+ msm_gpi_cb.timestamp = notif_event->time;
+ msm_gpi_cb.count = notif_event->count;
+ GPII_VERB(gpii, gpii_chan->chid, "sending CB event:%s\n",
+ TO_GPI_CB_EVENT_STR(msm_gpi_cb.cb_event));
+ client_info->callback(&gpii_chan->vc.chan, &msm_gpi_cb,
+ client_info->cb_param);
+}
+
+/* process DMA Immediate completion data events */
+static void gpi_process_imed_data_event(struct gpii_chan *gpii_chan,
+ struct immediate_data_event *imed_event)
+{
+ struct gpii *gpii = gpii_chan->gpii;
+ struct gpi_ring *ch_ring = &gpii_chan->ch_ring;
+ struct gpi_ring *sg_ring = &gpii_chan->sg_ring;
+ struct virt_dma_desc *vd;
+ struct gpi_desc *gpi_desc;
+ struct msm_gpi_tre *client_tre;
+ void *sg_tre;
+ void *tre = ch_ring->base +
+ (ch_ring->el_size * imed_event->tre_index);
+ struct msm_gpi_dma_async_tx_cb_param *tx_cb_param;
+
+ /*
+ * If channel not active don't process event but let
+ * client know pending event is available
+ */
+ if (gpii_chan->pm_state != ACTIVE_STATE) {
+ GPII_ERR(gpii, gpii_chan->chid,
+ "skipping processing event because ch @ %s state\n",
+ TO_GPI_PM_STR(gpii_chan->pm_state));
+ gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_PENDING_EVENT,
+ __LINE__);
+ return;
+ }
+
+ spin_lock_irq(&gpii_chan->vc.lock);
+ vd = vchan_next_desc(&gpii_chan->vc);
+ if (!vd) {
+ struct gpi_ere *gpi_ere;
+ struct msm_gpi_tre *gpi_tre;
+
+ spin_unlock_irq(&gpii_chan->vc.lock);
+ GPII_ERR(gpii, gpii_chan->chid,
+ "event without a pending descriptor!\n");
+ gpi_ere = (struct gpi_ere *)imed_event;
+ GPII_ERR(gpii, gpii_chan->chid, "Event: %08x %08x %08x %08x\n",
+ gpi_ere->dword[0], gpi_ere->dword[1],
+ gpi_ere->dword[2], gpi_ere->dword[3]);
+ gpi_tre = tre;
+ GPII_ERR(gpii, gpii_chan->chid,
+ "Pending TRE: %08x %08x %08x %08x\n",
+ gpi_tre->dword[0], gpi_tre->dword[1],
+ gpi_tre->dword[2], gpi_tre->dword[3]);
+ gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_EOT_DESC_MISMATCH,
+ __LINE__);
+ return;
+ }
+ gpi_desc = to_gpi_desc(vd);
+
+ /* Event TR RP gen. don't match descriptor TR */
+ if (gpi_desc->wp != tre) {
+ spin_unlock_irq(&gpii_chan->vc.lock);
+ GPII_ERR(gpii, gpii_chan->chid,
+ "EOT/EOB received for wrong TRE 0x%0llx != 0x%0llx\n",
+ to_physical(ch_ring, gpi_desc->wp),
+ to_physical(ch_ring, tre));
+ gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_EOT_DESC_MISMATCH,
+ __LINE__);
+ return;
+ }
+
+ list_del(&vd->node);
+ spin_unlock_irq(&gpii_chan->vc.lock);
+
+ sg_tre = gpi_desc->sg_tre;
+ client_tre = ((struct sg_tre *)sg_tre)->ptr;
+
+ /*
+ * RP pointed by Event is to last TRE processed,
+ * we need to update ring rp to tre + 1
+ */
+ tre += ch_ring->el_size;
+ if (tre >= (ch_ring->base + ch_ring->len))
+ tre = ch_ring->base;
+ ch_ring->rp = tre;
+ sg_tre += sg_ring->el_size;
+ if (sg_tre >= (sg_ring->base + sg_ring->len))
+ sg_tre = sg_ring->base;
+ sg_ring->rp = sg_tre;
+
+ /* make sure rp updates are immediately visible to all cores */
+ smp_wmb();
+
+ /* update Immediate data from Event back in to TRE if it's RX channel */
+ if (gpii_chan->dir == GPI_CHTYPE_DIR_IN) {
+ client_tre->dword[0] =
+ ((struct msm_gpi_tre *)imed_event)->dword[0];
+ client_tre->dword[1] =
+ ((struct msm_gpi_tre *)imed_event)->dword[1];
+ client_tre->dword[2] = MSM_GPI_DMA_IMMEDIATE_TRE_DWORD2(
+ imed_event->length);
+ }
+
+ tx_cb_param = vd->tx.callback_param;
+ if (tx_cb_param) {
+ GPII_VERB(gpii, gpii_chan->chid,
+ "cb_length:%u compl_code:0x%x status:0x%x\n",
+ imed_event->length, imed_event->code,
+ imed_event->status);
+ tx_cb_param->length = imed_event->length;
+ tx_cb_param->completion_code = imed_event->code;
+ tx_cb_param->status = imed_event->status;
+ }
+
+ spin_lock_irq(&gpii_chan->vc.lock);
+ vchan_cookie_complete(vd);
+ spin_unlock_irq(&gpii_chan->vc.lock);
+}
+
+/* processing transfer completion events */
+static void gpi_process_xfer_compl_event(struct gpii_chan *gpii_chan,
+ struct xfer_compl_event *compl_event)
+{
+ struct gpii *gpii = gpii_chan->gpii;
+ struct gpi_ring *ch_ring = &gpii_chan->ch_ring;
+ struct gpi_ring *sg_ring = &gpii_chan->sg_ring;
+ void *ev_rp = to_virtual(ch_ring, compl_event->ptr);
+ struct msm_gpi_tre *client_tre;
+ struct virt_dma_desc *vd;
+ struct msm_gpi_dma_async_tx_cb_param *tx_cb_param;
+ struct gpi_desc *gpi_desc;
+ void *sg_tre = NULL;
+
+ /* only process events on active channel */
+ if (unlikely(gpii_chan->pm_state != ACTIVE_STATE)) {
+ GPII_ERR(gpii, gpii_chan->chid,
+ "skipping processing event because ch @ %s state\n",
+ TO_GPI_PM_STR(gpii_chan->pm_state));
+ gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_PENDING_EVENT,
+ __LINE__);
+ return;
+ }
+
+ spin_lock_irq(&gpii_chan->vc.lock);
+ vd = vchan_next_desc(&gpii_chan->vc);
+ if (!vd) {
+ struct gpi_ere *gpi_ere;
+
+ spin_unlock_irq(&gpii_chan->vc.lock);
+ GPII_ERR(gpii, gpii_chan->chid,
+ "Event without a pending descriptor!\n");
+ gpi_ere = (struct gpi_ere *)compl_event;
+ GPII_ERR(gpii, gpii_chan->chid, "Event: %08x %08x %08x %08x\n",
+ gpi_ere->dword[0], gpi_ere->dword[1],
+ gpi_ere->dword[2], gpi_ere->dword[3]);
+ gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_EOT_DESC_MISMATCH,
+ __LINE__);
+ return;
+ }
+
+ gpi_desc = to_gpi_desc(vd);
+
+ /* TRE Event generated didn't match descriptor's TRE */
+ if (gpi_desc->wp != ev_rp) {
+ spin_unlock_irq(&gpii_chan->vc.lock);
+ GPII_ERR(gpii, gpii_chan->chid,
+ "EOT\EOB received for wrong TRE 0x%0llx != 0x%0llx\n",
+ to_physical(ch_ring, gpi_desc->wp),
+ to_physical(ch_ring, ev_rp));
+ gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_EOT_DESC_MISMATCH,
+ __LINE__);
+ return;
+ }
+
+ list_del(&vd->node);
+ spin_unlock_irq(&gpii_chan->vc.lock);
+
+ sg_tre = gpi_desc->sg_tre;
+ client_tre = ((struct sg_tre *)sg_tre)->ptr;
+
+ /*
+ * RP pointed by Event is to last TRE processed,
+ * we need to update ring rp to ev_rp + 1
+ */
+ ev_rp += ch_ring->el_size;
+ if (ev_rp >= (ch_ring->base + ch_ring->len))
+ ev_rp = ch_ring->base;
+ ch_ring->rp = ev_rp;
+ sg_tre += sg_ring->el_size;
+ if (sg_tre >= (sg_ring->base + sg_ring->len))
+ sg_tre = sg_ring->base;
+ sg_ring->rp = sg_tre;
+
+ /* update must be visible to other cores */
+ smp_wmb();
+
+ tx_cb_param = vd->tx.callback_param;
+ if (tx_cb_param) {
+ GPII_VERB(gpii, gpii_chan->chid,
+ "cb_length:%u compl_code:0x%x status:0x%x\n",
+ compl_event->length, compl_event->code,
+ compl_event->status);
+ tx_cb_param->length = compl_event->length;
+ tx_cb_param->completion_code = compl_event->code;
+ tx_cb_param->status = compl_event->status;
+ }
+
+ spin_lock_irq(&gpii_chan->vc.lock);
+ vchan_cookie_complete(vd);
+ spin_unlock_irq(&gpii_chan->vc.lock);
+}
+
+/* process all events */
+static void gpi_process_events(struct gpii *gpii)
+{
+ struct gpi_ring *ev_ring = &gpii->ev_ring;
+ u32 cntxt_rp, local_rp;
+ union gpi_event *gpi_event;
+ struct gpii_chan *gpii_chan;
+ u32 chid, type;
+ u32 ieob_irq;
+
+ cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
+ local_rp = (u32)to_physical(ev_ring, (void *)ev_ring->rp);
+
+ GPII_VERB(gpii, GPI_DBG_COMMON, "cntxt_rp: 0x08%x local_rp:0x08%x\n",
+ cntxt_rp, local_rp);
+
+ do {
+ while (local_rp != cntxt_rp) {
+ gpi_event = ev_ring->rp;
+ chid = gpi_event->xfer_compl_event.chid;
+ type = gpi_event->xfer_compl_event.type;
+ GPII_VERB(gpii, GPI_DBG_COMMON,
+ "rp:0x%08x chid:%u type:0x%x %08x %08x %08x %08x\n",
+ local_rp, chid, type,
+ gpi_event->gpi_ere.dword[0],
+ gpi_event->gpi_ere.dword[1],
+ gpi_event->gpi_ere.dword[2],
+ gpi_event->gpi_ere.dword[3]);
+
+ switch (type) {
+ case XFER_COMPLETE_EV_TYPE:
+ gpii_chan = &gpii->gpii_chan[chid];
+ gpi_process_xfer_compl_event(gpii_chan,
+ &gpi_event->xfer_compl_event);
+ break;
+ case STALE_EV_TYPE:
+ GPII_VERB(gpii, GPI_DBG_COMMON,
+ "stale event, not processing\n");
+ break;
+ case IMMEDIATE_DATA_EV_TYPE:
+ gpii_chan = &gpii->gpii_chan[chid];
+ gpi_process_imed_data_event(gpii_chan,
+ &gpi_event->immediate_data_event);
+ break;
+ case QUP_NOTIF_EV_TYPE:
+ gpii_chan = &gpii->gpii_chan[chid];
+ gpi_process_qup_notif_event(gpii_chan,
+ &gpi_event->qup_notif_event);
+ break;
+ default:
+ GPII_VERB(gpii, GPI_DBG_COMMON,
+ "not supported event type:0x%x\n",
+ type);
+ }
+ gpi_ring_recycle_ev_element(ev_ring);
+ local_rp = (u32)to_physical(ev_ring,
+ (void *)ev_ring->rp);
+ }
+ gpi_write_ev_db(gpii, ev_ring, ev_ring->wp);
+
+ /* clear pending IEOB events */
+ ieob_irq = gpi_read_reg(gpii, gpii->ieob_src_reg);
+ gpi_write_reg(gpii, gpii->ieob_clr_reg, ieob_irq);
+
+ cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
+ local_rp = (u32)to_physical(ev_ring, (void *)ev_ring->rp);
+
+ } while (cntxt_rp != local_rp);
+
+ GPII_VERB(gpii, GPI_DBG_COMMON, "exit: c_rp:0x%x l_rp:0x%x\n", cntxt_rp,
+ local_rp);
+}
+
+/* processing events using tasklet */
+static void gpi_ev_tasklet(unsigned long data)
+{
+ struct gpii *gpii = (struct gpii *)data;
+
+ GPII_VERB(gpii, GPI_DBG_COMMON, "enter\n");
+
+ read_lock_bh(&gpii->pm_lock);
+ if (!REG_ACCESS_VALID(gpii->pm_state)) {
+ read_unlock_bh(&gpii->pm_lock);
+ GPII_ERR(gpii, GPI_DBG_COMMON,
+ "not processing any events, pm_state:%s\n",
+ TO_GPI_PM_STR(gpii->pm_state));
+ return;
+ }
+
+ /* process the events */
+ gpi_process_events(gpii);
+
+ /* enable IEOB, switching back to interrupts */
+ gpi_config_interrupts(gpii, MASK_IEOB_SETTINGS, 1);
+ read_unlock_bh(&gpii->pm_lock);
+
+ GPII_VERB(gpii, GPI_DBG_COMMON, "exit\n");
+}
+
+/* marks all pending events for the channel as stale */
+void gpi_mark_stale_events(struct gpii_chan *gpii_chan)
+{
+ struct gpii *gpii = gpii_chan->gpii;
+ struct gpi_ring *ev_ring = &gpii->ev_ring;
+ void *ev_rp;
+ u32 cntxt_rp, local_rp;
+
+ GPII_INFO(gpii, gpii_chan->chid, "Enter\n");
+ cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
+
+ ev_rp = ev_ring->rp;
+ local_rp = (u32)to_physical(ev_ring, ev_rp);
+ while (local_rp != cntxt_rp) {
+ union gpi_event *gpi_event = ev_rp;
+ u32 chid = gpi_event->xfer_compl_event.chid;
+
+ if (chid == gpii_chan->chid)
+ gpi_event->xfer_compl_event.type = STALE_EV_TYPE;
+ ev_rp += ev_ring->el_size;
+ if (ev_rp >= (ev_ring->base + ev_ring->len))
+ ev_rp = ev_ring->base;
+ cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
+ local_rp = (u32)to_physical(ev_ring, ev_rp);
+ }
+}
+
+/* reset sw state and issue channel reset or de-alloc */
+static int gpi_reset_chan(struct gpii_chan *gpii_chan, enum gpi_cmd gpi_cmd)
+{
+ struct gpii *gpii = gpii_chan->gpii;
+ struct gpi_ring *ch_ring = &gpii_chan->ch_ring;
+ struct gpi_ring *sg_ring = &gpii_chan->sg_ring;
+ unsigned long flags;
+ LIST_HEAD(list);
+ int ret;
+
+ GPII_INFO(gpii, gpii_chan->chid, "Enter\n");
+ ret = gpi_send_cmd(gpii, gpii_chan, gpi_cmd);
+ if (ret) {
+ GPII_ERR(gpii, gpii_chan->chid,
+ "Error with cmd:%s ret:%d\n",
+ TO_GPI_CMD_STR(gpi_cmd), ret);
+ return ret;
+ }
+
+ /* initialize the local ring ptrs */
+ ch_ring->rp = ch_ring->base;
+ ch_ring->wp = ch_ring->base;
+ sg_ring->rp = sg_ring->base;
+ sg_ring->wp = sg_ring->base;
+
+ /* visible to other cores */
+ smp_wmb();
+
+ /* check event ring for any stale events */
+ write_lock_irq(&gpii->pm_lock);
+ gpi_mark_stale_events(gpii_chan);
+
+ /* remove all async descriptors */
+ spin_lock_irqsave(&gpii_chan->vc.lock, flags);
+ vchan_get_all_descriptors(&gpii_chan->vc, &list);
+ spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
+ write_unlock_irq(&gpii->pm_lock);
+ vchan_dma_desc_free_list(&gpii_chan->vc, &list);
+
+ return 0;
+}
+
+static int gpi_start_chan(struct gpii_chan *gpii_chan)
+{
+ struct gpii *gpii = gpii_chan->gpii;
+ int ret;
+
+ GPII_INFO(gpii, gpii_chan->chid, "Enter\n");
+
+ ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_START);
+ if (ret) {
+ GPII_ERR(gpii, gpii_chan->chid,
+ "Error with cmd:%s ret:%d\n",
+ TO_GPI_CMD_STR(GPI_CH_CMD_START), ret);
+ return ret;
+ }
+
+ /* gpii CH is active now */
+ write_lock_irq(&gpii->pm_lock);
+ gpii_chan->pm_state = ACTIVE_STATE;
+ write_unlock_irq(&gpii->pm_lock);
+
+ return 0;
+}
+
+/* allocate and configure the transfer channel */
+static int gpi_alloc_chan(struct gpii_chan *gpii_chan, bool send_alloc_cmd)
+{
+ struct gpii *gpii = gpii_chan->gpii;
+ struct gpi_ring *ring = &gpii_chan->ch_ring;
+ int i;
+ int ret;
+ struct {
+ void *base;
+ int offset;
+ u32 val;
+ } ch_reg[] = {
+ {
+ gpii_chan->ch_cntxt_base_reg,
+ CNTXT_0_CONFIG,
+ GPI_GPII_n_CH_k_CNTXT_0(ring->el_size, 0,
+ gpii_chan->dir,
+ GPI_CHTYPE_PROTO_GPI),
+ },
+ {
+ gpii_chan->ch_cntxt_base_reg,
+ CNTXT_1_R_LENGTH,
+ ring->len,
+ },
+ {
+ gpii_chan->ch_cntxt_base_reg,
+ CNTXT_2_RING_BASE_LSB,
+ (u32)ring->phys_addr,
+ },
+ {
+ gpii_chan->ch_cntxt_base_reg,
+ CNTXT_3_RING_BASE_MSB,
+ (u32)(ring->phys_addr >> 32),
+ },
+ { /* program MSB of DB register with ring base */
+ gpii_chan->ch_cntxt_db_reg,
+ CNTXT_5_RING_RP_MSB - CNTXT_4_RING_RP_LSB,
+ (u32)(ring->phys_addr >> 32),
+ },
+ {
+ gpii->regs,
+ GPI_GPII_n_CH_k_SCRATCH_0_OFFS(gpii->gpii_id,
+ gpii_chan->chid),
+ GPI_GPII_n_CH_K_SCRATCH_0(!gpii_chan->chid,
+ gpii_chan->protocol,
+ gpii_chan->seid),
+ },
+ {
+ gpii->regs,
+ GPI_GPII_n_CH_k_SCRATCH_1_OFFS(gpii->gpii_id,
+ gpii_chan->chid),
+ 0,
+ },
+ {
+ gpii->regs,
+ GPI_GPII_n_CH_k_SCRATCH_2_OFFS(gpii->gpii_id,
+ gpii_chan->chid),
+ 0,
+ },
+ {
+ gpii->regs,
+ GPI_GPII_n_CH_k_SCRATCH_3_OFFS(gpii->gpii_id,
+ gpii_chan->chid),
+ 0,
+ },
+ {
+ gpii->regs,
+ GPI_GPII_n_CH_k_QOS_OFFS(gpii->gpii_id,
+ gpii_chan->chid),
+ 1,
+ },
+ { NULL },
+ };
+
+ GPII_INFO(gpii, gpii_chan->chid, "Enter\n");
+
+ if (send_alloc_cmd) {
+ ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_ALLOCATE);
+ if (ret) {
+ GPII_ERR(gpii, gpii_chan->chid,
+ "Error with cmd:%s ret:%d\n",
+ TO_GPI_CMD_STR(GPI_CH_CMD_ALLOCATE), ret);
+ return ret;
+ }
+ }
+
+ /* program channel cntxt registers */
+ for (i = 0; ch_reg[i].base; i++)
+ gpi_write_reg(gpii, ch_reg[i].base + ch_reg[i].offset,
+ ch_reg[i].val);
+ /* flush all the writes */
+ wmb();
+ return 0;
+}
+
+/* allocate and configure event ring */
+static int gpi_alloc_ev_chan(struct gpii *gpii)
+{
+ struct gpi_ring *ring = &gpii->ev_ring;
+ int i;
+ int ret;
+ struct {
+ void *base;
+ int offset;
+ u32 val;
+ } ev_reg[] = {
+ {
+ gpii->ev_cntxt_base_reg,
+ CNTXT_0_CONFIG,
+ GPI_GPII_n_EV_CH_k_CNTXT_0(ring->el_size,
+ GPI_INTTYPE_IRQ,
+ GPI_CHTYPE_GPI_EV),
+ },
+ {
+ gpii->ev_cntxt_base_reg,
+ CNTXT_1_R_LENGTH,
+ ring->len,
+ },
+ {
+ gpii->ev_cntxt_base_reg,
+ CNTXT_2_RING_BASE_LSB,
+ (u32)ring->phys_addr,
+ },
+ {
+ gpii->ev_cntxt_base_reg,
+ CNTXT_3_RING_BASE_MSB,
+ (u32)(ring->phys_addr >> 32),
+ },
+ {
+ /* program db msg with ring base msb */
+ gpii->ev_cntxt_db_reg,
+ CNTXT_5_RING_RP_MSB - CNTXT_4_RING_RP_LSB,
+ (u32)(ring->phys_addr >> 32),
+ },
+ {
+ gpii->ev_cntxt_base_reg,
+ CNTXT_8_RING_INT_MOD,
+ 0,
+ },
+ {
+ gpii->ev_cntxt_base_reg,
+ CNTXT_10_RING_MSI_LSB,
+ 0,
+ },
+ {
+ gpii->ev_cntxt_base_reg,
+ CNTXT_11_RING_MSI_MSB,
+ 0,
+ },
+ {
+ gpii->ev_cntxt_base_reg,
+ CNTXT_8_RING_INT_MOD,
+ 0,
+ },
+ {
+ gpii->ev_cntxt_base_reg,
+ CNTXT_12_RING_RP_UPDATE_LSB,
+ 0,
+ },
+ {
+ gpii->ev_cntxt_base_reg,
+ CNTXT_13_RING_RP_UPDATE_MSB,
+ 0,
+ },
+ { NULL },
+ };
+
+ GPII_INFO(gpii, GPI_DBG_COMMON, "enter\n");
+
+ ret = gpi_send_cmd(gpii, NULL, GPI_EV_CMD_ALLOCATE);
+ if (ret) {
+ GPII_ERR(gpii, GPI_DBG_COMMON, "error with cmd:%s ret:%d\n",
+ TO_GPI_CMD_STR(GPI_EV_CMD_ALLOCATE), ret);
+ return ret;
+ }
+
+ /* program event context */
+ for (i = 0; ev_reg[i].base; i++)
+ gpi_write_reg(gpii, ev_reg[i].base + ev_reg[i].offset,
+ ev_reg[i].val);
+
+ /* add events to ring */
+ ring->wp = (ring->base + ring->len - ring->el_size);
+
+ /* flush all the writes */
+ wmb();
+
+ /* gpii is active now */
+ write_lock_irq(&gpii->pm_lock);
+ gpii->pm_state = ACTIVE_STATE;
+ write_unlock_irq(&gpii->pm_lock);
+ gpi_write_ev_db(gpii, ring, ring->wp);
+
+ return 0;
+}
+
+/* calculate # of ERE/TRE available to queue */
+static int gpi_ring_num_elements_avail(const struct gpi_ring * const ring)
+{
+ int elements = 0;
+
+ if (ring->wp < ring->rp)
+ elements = ((ring->rp - ring->wp) / ring->el_size) - 1;
+ else {
+ elements = (ring->rp - ring->base) / ring->el_size;
+ elements += ((ring->base + ring->len - ring->wp) /
+ ring->el_size) - 1;
+ }
+
+ return elements;
+}
+
+static int gpi_ring_add_element(struct gpi_ring *ring, void **wp)
+{
+
+ if (gpi_ring_num_elements_avail(ring) <= 0)
+ return -ENOMEM;
+
+ *wp = ring->wp;
+ ring->wp += ring->el_size;
+ if (ring->wp >= (ring->base + ring->len))
+ ring->wp = ring->base;
+
+ /* visible to other cores */
+ smp_wmb();
+
+ return 0;
+}
+
+static void gpi_ring_recycle_ev_element(struct gpi_ring *ring)
+{
+ /* Update the WP */
+ ring->wp += ring->el_size;
+ if (ring->wp >= (ring->base + ring->len))
+ ring->wp = ring->base;
+
+ /* Update the RP */
+ ring->rp += ring->el_size;
+ if (ring->rp >= (ring->base + ring->len))
+ ring->rp = ring->base;
+
+ /* visible to other cores */
+ smp_wmb();
+}
+
+static void gpi_free_ring(struct gpi_ring *ring,
+ struct gpii *gpii)
+{
+ if (ring->dma_handle)
+ dma_free_coherent(gpii->gpi_dev->dev, ring->alloc_size,
+ ring->pre_aligned, ring->dma_handle);
+ else
+ vfree(ring->pre_aligned);
+ memset(ring, 0, sizeof(*ring));
+}
+
+/* allocate memory for transfer and event rings */
+static int gpi_alloc_ring(struct gpi_ring *ring,
+ u32 elements,
+ u32 el_size,
+ struct gpii *gpii,
+ bool alloc_coherent)
+{
+ u64 len = elements * el_size;
+ int bit;
+
+ if (alloc_coherent) {
+ /* ring len must be power of 2 */
+ bit = find_last_bit((unsigned long *)&len, 32);
+ if (((1 << bit) - 1) & len)
+ bit++;
+ len = 1 << bit;
+ ring->alloc_size = (len + (len - 1));
+ GPII_INFO(gpii, GPI_DBG_COMMON,
+ "#el:%u el_size:%u len:%u actual_len:%llu alloc_size:%lu\n",
+ elements, el_size, (elements * el_size), len,
+ ring->alloc_size);
+ ring->pre_aligned = dma_alloc_coherent(gpii->gpi_dev->dev,
+ ring->alloc_size,
+ &ring->dma_handle,
+ GFP_KERNEL);
+ if (!ring->pre_aligned) {
+ GPII_CRITIC(gpii, GPI_DBG_COMMON,
+ "could not alloc size:%lu mem for ring\n",
+ ring->alloc_size);
+ return -ENOMEM;
+ }
+
+ /* align the physical mem */
+ ring->phys_addr = (ring->dma_handle + (len - 1)) & ~(len - 1);
+ ring->base = ring->pre_aligned +
+ (ring->phys_addr - ring->dma_handle);
+ } else {
+ ring->pre_aligned = vmalloc(len);
+ if (!ring->pre_aligned) {
+ GPII_CRITIC(gpii, GPI_DBG_COMMON,
+ "could not allocsize:%llu mem for ring\n",
+ len);
+ return -ENOMEM;
+ }
+ ring->phys_addr = 0;
+ ring->dma_handle = 0;
+ ring->base = ring->pre_aligned;
+ }
+
+ ring->rp = ring->base;
+ ring->wp = ring->base;
+ ring->len = len;
+ ring->el_size = el_size;
+ ring->elements = ring->len / ring->el_size;
+ memset(ring->base, 0, ring->len);
+ ring->configured = true;
+
+ /* update to other cores */
+ smp_wmb();
+
+ GPII_INFO(gpii, GPI_DBG_COMMON,
+ "phy_pre:0x%0llx phy_alig:0x%0llx len:%u el_size:%u elements:%u\n",
+ ring->dma_handle, ring->phys_addr, ring->len, ring->el_size,
+ ring->elements);
+
+ return 0;
+}
+
+/* copy tre into transfer ring */
+static void gpi_queue_xfer(struct gpii *gpii,
+ struct gpii_chan *gpii_chan,
+ struct msm_gpi_tre *gpi_tre,
+ void **wp,
+ struct sg_tre **sg_tre)
+{
+ struct msm_gpi_tre *ch_tre;
+ int ret;
+
+ /* get next tre location we can copy */
+ ret = gpi_ring_add_element(&gpii_chan->ch_ring, (void **)&ch_tre);
+ if (unlikely(ret)) {
+ GPII_CRITIC(gpii, gpii_chan->chid,
+ "Error adding ring element to xfer ring\n");
+ return;
+ }
+ /* get next sg tre location we can use */
+ ret = gpi_ring_add_element(&gpii_chan->sg_ring, (void **)sg_tre);
+ if (unlikely(ret)) {
+ GPII_CRITIC(gpii, gpii_chan->chid,
+ "Error adding ring element to sg ring\n");
+ return;
+ }
+
+ /* copy the tre info */
+ memcpy(ch_tre, gpi_tre, sizeof(*ch_tre));
+ (*sg_tre)->ptr = gpi_tre;
+ (*sg_tre)->wp = ch_tre;
+ *wp = ch_tre;
+}
+
+/* reset and restart transfer channel */
+int gpi_terminate_all(struct dma_chan *chan)
+{
+ struct gpii_chan *gpii_chan = to_gpii_chan(chan);
+ struct gpii *gpii = gpii_chan->gpii;
+ int schid, echid, i;
+ int ret = 0;
+
+ GPII_INFO(gpii, gpii_chan->chid, "Enter\n");
+ mutex_lock(&gpii->ctrl_lock);
+
+ /*
+ * treat both channels as a group if its protocol is not UART
+ * STOP, RESET, or START needs to be in lockstep
+ */
+ schid = (gpii->protocol == SE_PROTOCOL_UART) ? gpii_chan->chid : 0;
+ echid = (gpii->protocol == SE_PROTOCOL_UART) ? schid + 1 :
+ MAX_CHANNELS_PER_GPII;
+
+ /* stop the channel */
+ for (i = schid; i < echid; i++) {
+ gpii_chan = &gpii->gpii_chan[i];
+
+ /* disable ch state so no more TRE processing */
+ write_lock_irq(&gpii->pm_lock);
+ gpii_chan->pm_state = PREPARE_TERMINATE;
+ write_unlock_irq(&gpii->pm_lock);
+
+ /* send command to Stop the channel */
+ ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_STOP);
+ if (ret)
+ GPII_ERR(gpii, gpii_chan->chid,
+ "Error Stopping Channel:%d resetting anyway\n",
+ ret);
+ }
+
+ /* reset the channels (clears any pending tre) */
+ for (i = schid; i < echid; i++) {
+ gpii_chan = &gpii->gpii_chan[i];
+
+ ret = gpi_reset_chan(gpii_chan, GPI_CH_CMD_RESET);
+ if (ret) {
+ GPII_ERR(gpii, gpii_chan->chid,
+ "Error resetting channel ret:%d\n", ret);
+ goto terminate_exit;
+ }
+
+ /* reprogram channel CNTXT */
+ ret = gpi_alloc_chan(gpii_chan, false);
+ if (ret) {
+ GPII_ERR(gpii, gpii_chan->chid,
+ "Error alloc_channel ret:%d\n", ret);
+ goto terminate_exit;
+ }
+ }
+
+ /* restart the channels */
+ for (i = schid; i < echid; i++) {
+ gpii_chan = &gpii->gpii_chan[i];
+
+ ret = gpi_start_chan(gpii_chan);
+ if (ret) {
+ GPII_ERR(gpii, gpii_chan->chid,
+ "Error Starting Channel ret:%d\n", ret);
+ goto terminate_exit;
+ }
+ }
+
+terminate_exit:
+ mutex_unlock(&gpii->ctrl_lock);
+ return ret;
+}
+
+/* pause dma transfer for all channels */
+static int gpi_pause(struct dma_chan *chan)
+{
+ struct gpii_chan *gpii_chan = to_gpii_chan(chan);
+ struct gpii *gpii = gpii_chan->gpii;
+ int i, ret;
+
+ GPII_INFO(gpii, gpii_chan->chid, "Enter\n");
+ mutex_lock(&gpii->ctrl_lock);
+
+ /*
+ * pause/resume are per gpii not per channel, so
+ * client needs to call pause only once
+ */
+ if (gpii->pm_state == PAUSE_STATE) {
+ GPII_INFO(gpii, gpii_chan->chid,
+ "channel is already paused\n");
+ mutex_unlock(&gpii->ctrl_lock);
+ return 0;
+ }
+
+ /* send stop command to stop the channels */
+ for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) {
+ ret = gpi_send_cmd(gpii, &gpii->gpii_chan[i], GPI_CH_CMD_STOP);
+ if (ret) {
+ GPII_ERR(gpii, gpii->gpii_chan[i].chid,
+ "Error stopping chan, ret:%d\n", ret);
+ mutex_unlock(&gpii->ctrl_lock);
+ return ret;
+ }
+ }
+
+ disable_irq(gpii->irq);
+
+ /* Wait for threads to complete out */
+ tasklet_kill(&gpii->ev_task);
+
+ write_lock_irq(&gpii->pm_lock);
+ gpii->pm_state = PAUSE_STATE;
+ write_unlock_irq(&gpii->pm_lock);
+ mutex_unlock(&gpii->ctrl_lock);
+
+ return 0;
+}
+
+/* resume dma transfer */
+static int gpi_resume(struct dma_chan *chan)
+{
+ struct gpii_chan *gpii_chan = to_gpii_chan(chan);
+ struct gpii *gpii = gpii_chan->gpii;
+ int i;
+ int ret;
+
+ GPII_INFO(gpii, gpii_chan->chid, "enter\n");
+
+ mutex_lock(&gpii->ctrl_lock);
+ if (gpii->pm_state == ACTIVE_STATE) {
+ GPII_INFO(gpii, gpii_chan->chid,
+ "channel is already active\n");
+ mutex_unlock(&gpii->ctrl_lock);
+ return 0;
+ }
+
+ enable_irq(gpii->irq);
+
+ /* send start command to start the channels */
+ for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) {
+ ret = gpi_send_cmd(gpii, &gpii->gpii_chan[i], GPI_CH_CMD_START);
+ if (ret) {
+ GPII_ERR(gpii, gpii->gpii_chan[i].chid,
+ "Erro starting chan, ret:%d\n", ret);
+ mutex_unlock(&gpii->ctrl_lock);
+ return ret;
+ }
+ }
+
+ write_lock_irq(&gpii->pm_lock);
+ gpii->pm_state = ACTIVE_STATE;
+ write_unlock_irq(&gpii->pm_lock);
+ mutex_unlock(&gpii->ctrl_lock);
+
+ return 0;
+}
+
+void gpi_desc_free(struct virt_dma_desc *vd)
+{
+ struct gpi_desc *gpi_desc = to_gpi_desc(vd);
+
+ kfree(gpi_desc);
+}
+
+/* copy tre into transfer ring */
+struct dma_async_tx_descriptor *gpi_prep_slave_sg(struct dma_chan *chan,
+ struct scatterlist *sgl,
+ unsigned int sg_len,
+ enum dma_transfer_direction direction,
+ unsigned long flags,
+ void *context)
+{
+ struct gpii_chan *gpii_chan = to_gpii_chan(chan);
+ struct gpii *gpii = gpii_chan->gpii;
+ u32 nr, sg_nr;
+ u32 nr_req = 0;
+ int i, j;
+ struct scatterlist *sg;
+ struct gpi_ring *ch_ring = &gpii_chan->ch_ring;
+ struct gpi_ring *sg_ring = &gpii_chan->sg_ring;
+ void *tre, *wp = NULL;
+ struct sg_tre *sg_tre = NULL;
+ const gfp_t gfp = GFP_ATOMIC;
+ struct gpi_desc *gpi_desc;
+
+ GPII_VERB(gpii, gpii_chan->chid, "enter\n");
+
+ if (!is_slave_direction(direction)) {
+ GPII_ERR(gpii, gpii_chan->chid,
+ "invalid dma direction: %d\n", direction);
+ return NULL;
+ }
+
+ /* calculate # of elements required & available */
+ nr = gpi_ring_num_elements_avail(ch_ring);
+ sg_nr = gpi_ring_num_elements_avail(sg_ring);
+ for_each_sg(sgl, sg, sg_len, i) {
+ GPII_VERB(gpii, gpii_chan->chid,
+ "%d of %u len:%u\n", i, sg_len, sg->length);
+ nr_req += (sg->length / ch_ring->el_size);
+ }
+ GPII_VERB(gpii, gpii_chan->chid,
+ "nr_elements_avail:%u sg_avail:%u required:%u\n",
+ nr, sg_nr, nr_req);
+
+ if (nr < nr_req || sg_nr < nr_req) {
+ GPII_ERR(gpii, gpii_chan->chid,
+ "not enough space in ring, avail:%u,%u required:%u\n",
+ nr, sg_nr, nr_req);
+ return NULL;
+ }
+
+ gpi_desc = kzalloc(sizeof(*gpi_desc), gfp);
+ if (!gpi_desc) {
+ GPII_ERR(gpii, gpii_chan->chid,
+ "out of memory for descriptor\n");
+ return NULL;
+ }
+
+ /* copy each tre into transfer ring */
+ for_each_sg(sgl, sg, sg_len, i)
+ for (j = 0, tre = sg_virt(sg); j < sg->length;
+ j += ch_ring->el_size, tre += ch_ring->el_size)
+ gpi_queue_xfer(gpii, gpii_chan, tre, &wp, &sg_tre);
+
+ /* set up the descriptor */
+ gpi_desc->db = ch_ring->wp;
+ gpi_desc->wp = wp;
+ gpi_desc->sg_tre = sg_tre;
+ gpi_desc->gpii_chan = gpii_chan;
+ GPII_VERB(gpii, gpii_chan->chid, "exit wp:0x%0llx rp:0x%0llx\n",
+ to_physical(ch_ring, ch_ring->wp),
+ to_physical(ch_ring, ch_ring->rp));
+
+ return vchan_tx_prep(&gpii_chan->vc, &gpi_desc->vd, flags);
+}
+
+/* rings transfer ring db to being transfer */
+static void gpi_issue_pending(struct dma_chan *chan)
+{
+ struct gpii_chan *gpii_chan = to_gpii_chan(chan);
+ struct gpii *gpii = gpii_chan->gpii;
+ unsigned long flags, pm_lock_flags;
+ struct virt_dma_desc *vd = NULL;
+ struct gpi_desc *gpi_desc;
+
+ GPII_VERB(gpii, gpii_chan->chid, "Enter\n");
+
+ read_lock_irqsave(&gpii->pm_lock, pm_lock_flags);
+
+ /* move all submitted discriptors to issued list */
+ spin_lock_irqsave(&gpii_chan->vc.lock, flags);
+ if (vchan_issue_pending(&gpii_chan->vc))
+ vd = list_last_entry(&gpii_chan->vc.desc_issued,
+ struct virt_dma_desc, node);
+ spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
+
+ /* nothing to do list is empty */
+ if (!vd) {
+ read_unlock_irqrestore(&gpii->pm_lock, pm_lock_flags);
+ GPII_VERB(gpii, gpii_chan->chid, "no descriptors submitted\n");
+ return;
+ }
+
+ gpi_desc = to_gpi_desc(vd);
+ gpi_write_ch_db(gpii_chan, &gpii_chan->ch_ring, gpi_desc->db);
+ read_unlock_irqrestore(&gpii->pm_lock, pm_lock_flags);
+}
+
+/* configure or issue async command */
+static int gpi_config(struct dma_chan *chan,
+ struct dma_slave_config *config)
+{
+ struct gpii_chan *gpii_chan = to_gpii_chan(chan);
+ struct gpii *gpii = gpii_chan->gpii;
+ struct msm_gpi_ctrl *gpi_ctrl = chan->private;
+ const int ev_factor = gpii->gpi_dev->ev_factor;
+ u32 elements;
+ int i = 0;
+ int ret = 0;
+
+ GPII_INFO(gpii, gpii_chan->chid, "enter\n");
+ if (!gpi_ctrl) {
+ GPII_ERR(gpii, gpii_chan->chid,
+ "no config ctrl data provided");
+ return -EINVAL;
+ }
+
+ mutex_lock(&gpii->ctrl_lock);
+
+ switch (gpi_ctrl->cmd) {
+ case MSM_GPI_INIT:
+ GPII_INFO(gpii, gpii_chan->chid, "cmd: msm_gpi_init\n");
+
+ gpii_chan->client_info.callback = gpi_ctrl->init.callback;
+ gpii_chan->client_info.cb_param = gpi_ctrl->init.cb_param;
+ gpii_chan->pm_state = CONFIG_STATE;
+
+ /* check if both channels are configured before continue */
+ for (i = 0; i < MAX_CHANNELS_PER_GPII; i++)
+ if (gpii->gpii_chan[i].pm_state != CONFIG_STATE)
+ goto exit_gpi_init;
+
+ /* configure to highest priority from two channels */
+ gpii->ev_priority = min(gpii->gpii_chan[0].priority,
+ gpii->gpii_chan[1].priority);
+
+ /* protocol must be same for both channels */
+ if (gpii->gpii_chan[0].protocol !=
+ gpii->gpii_chan[1].protocol) {
+ GPII_ERR(gpii, gpii_chan->chid,
+ "protocol did not match protocol %u != %u\n",
+ gpii->gpii_chan[0].protocol,
+ gpii->gpii_chan[1].protocol);
+ ret = -EINVAL;
+ goto exit_gpi_init;
+ }
+ gpii->protocol = gpii_chan->protocol;
+
+ /* allocate memory for event ring */
+ elements = max(gpii->gpii_chan[0].req_tres,
+ gpii->gpii_chan[1].req_tres);
+ ret = gpi_alloc_ring(&gpii->ev_ring, elements << ev_factor,
+ sizeof(union gpi_event), gpii, true);
+ if (ret) {
+ GPII_ERR(gpii, gpii_chan->chid,
+ "error allocating mem for ev ring\n");
+ goto exit_gpi_init;
+ }
+
+ /* configure interrupts */
+ write_lock_irq(&gpii->pm_lock);
+ gpii->pm_state = PREPARE_HARDWARE;
+ write_unlock_irq(&gpii->pm_lock);
+ ret = gpi_config_interrupts(gpii, DEFAULT_IRQ_SETTINGS, 0);
+ if (ret) {
+ GPII_ERR(gpii, gpii_chan->chid,
+ "error config. interrupts, ret:%d\n", ret);
+ goto error_config_int;
+ }
+
+ /* allocate event rings */
+ ret = gpi_alloc_ev_chan(gpii);
+ if (ret) {
+ GPII_ERR(gpii, gpii_chan->chid,
+ "error alloc_ev_chan:%d\n", ret);
+ goto error_alloc_ev_ring;
+ }
+
+ /* Allocate all channels */
+ for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) {
+ ret = gpi_alloc_chan(&gpii->gpii_chan[i], true);
+ if (ret) {
+ GPII_ERR(gpii, gpii->gpii_chan[i].chid,
+ "Error allocating chan:%d\n", ret);
+ goto error_alloc_chan;
+ }
+ }
+
+ /* start channels */
+ for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) {
+ ret = gpi_start_chan(&gpii->gpii_chan[i]);
+ if (ret) {
+ GPII_ERR(gpii, gpii->gpii_chan[i].chid,
+ "Error start chan:%d\n", ret);
+ goto error_start_chan;
+ }
+ }
+
+ break;
+ case MSM_GPI_CMD_UART_SW_STALE:
+ GPII_INFO(gpii, gpii_chan->chid, "sending UART SW STALE cmd\n");
+ ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_UART_SW_STALE);
+ break;
+ case MSM_GPI_CMD_UART_RFR_READY:
+ GPII_INFO(gpii, gpii_chan->chid,
+ "sending UART RFR READY cmd\n");
+ ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_UART_RFR_READY);
+ break;
+ case MSM_GPI_CMD_UART_RFR_NOT_READY:
+ GPII_INFO(gpii, gpii_chan->chid,
+ "sending UART RFR READY NOT READY cmd\n");
+ ret = gpi_send_cmd(gpii, gpii_chan,
+ GPI_CH_CMD_UART_RFR_NOT_READY);
+ break;
+ default:
+ GPII_ERR(gpii, gpii_chan->chid,
+ "unsupported ctrl cmd:%d\n", gpi_ctrl->cmd);
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&gpii->ctrl_lock);
+ return ret;
+
+error_start_chan:
+ for (i = i - 1; i >= 0; i++) {
+ gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_STOP);
+ gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_RESET);
+ }
+ i = 2;
+error_alloc_chan:
+ for (i = i - 1; i >= 0; i--)
+ gpi_reset_chan(gpii_chan, GPI_CH_CMD_DE_ALLOC);
+error_alloc_ev_ring:
+ gpi_disable_interrupts(gpii);
+error_config_int:
+ gpi_free_ring(&gpii->ev_ring, gpii);
+exit_gpi_init:
+ mutex_unlock(&gpii->ctrl_lock);
+ return ret;
+}
+
+/* release all channel resources */
+static void gpi_free_chan_resources(struct dma_chan *chan)
+{
+ struct gpii_chan *gpii_chan = to_gpii_chan(chan);
+ struct gpii *gpii = gpii_chan->gpii;
+ enum gpi_pm_state cur_state;
+ int ret, i;
+
+ GPII_INFO(gpii, gpii_chan->chid, "enter\n");
+
+ mutex_lock(&gpii->ctrl_lock);
+
+ cur_state = gpii_chan->pm_state;
+
+ /* disable ch state so no more TRE processing for this channel */
+ write_lock_irq(&gpii->pm_lock);
+ gpii_chan->pm_state = PREPARE_TERMINATE;
+ write_unlock_irq(&gpii->pm_lock);
+
+ /* attemp to do graceful hardware shutdown */
+ if (cur_state == ACTIVE_STATE) {
+ ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_STOP);
+ if (ret)
+ GPII_ERR(gpii, gpii_chan->chid,
+ "error stopping channel:%d\n", ret);
+
+ ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_RESET);
+ if (ret)
+ GPII_ERR(gpii, gpii_chan->chid,
+ "error resetting channel:%d\n", ret);
+
+ gpi_reset_chan(gpii_chan, GPI_CH_CMD_DE_ALLOC);
+ }
+
+ /* free all allocated memory */
+ gpi_free_ring(&gpii_chan->ch_ring, gpii);
+ gpi_free_ring(&gpii_chan->sg_ring, gpii);
+ vchan_free_chan_resources(&gpii_chan->vc);
+
+ write_lock_irq(&gpii->pm_lock);
+ gpii_chan->pm_state = DISABLE_STATE;
+ write_unlock_irq(&gpii->pm_lock);
+
+ /* if other rings are still active exit */
+ for (i = 0; i < MAX_CHANNELS_PER_GPII; i++)
+ if (gpii->gpii_chan[i].ch_ring.configured)
+ goto exit_free;
+
+ GPII_INFO(gpii, gpii_chan->chid, "disabling gpii\n");
+
+ /* deallocate EV Ring */
+ cur_state = gpii->pm_state;
+ write_lock_irq(&gpii->pm_lock);
+ gpii->pm_state = PREPARE_TERMINATE;
+ write_unlock_irq(&gpii->pm_lock);
+
+ /* wait for threads to complete out */
+ tasklet_kill(&gpii->ev_task);
+
+ /* send command to de allocate event ring */
+ if (cur_state == ACTIVE_STATE)
+ gpi_send_cmd(gpii, NULL, GPI_EV_CMD_DEALLOC);
+
+ gpi_free_ring(&gpii->ev_ring, gpii);
+
+ /* disable interrupts */
+ if (cur_state == ACTIVE_STATE)
+ gpi_disable_interrupts(gpii);
+
+ /* set final state to disable */
+ write_lock_irq(&gpii->pm_lock);
+ gpii->pm_state = DISABLE_STATE;
+ write_unlock_irq(&gpii->pm_lock);
+
+exit_free:
+ mutex_unlock(&gpii->ctrl_lock);
+}
+
+/* allocate channel resources */
+static int gpi_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct gpii_chan *gpii_chan = to_gpii_chan(chan);
+ struct gpii *gpii = gpii_chan->gpii;
+ int ret;
+
+ GPII_INFO(gpii, gpii_chan->chid, "enter\n");
+
+ mutex_lock(&gpii->ctrl_lock);
+
+ /* allocate memory for transfer ring */
+ ret = gpi_alloc_ring(&gpii_chan->ch_ring, gpii_chan->req_tres,
+ sizeof(struct msm_gpi_tre), gpii, true);
+ if (ret) {
+ GPII_ERR(gpii, gpii_chan->chid,
+ "error allocating xfer ring, ret:%d\n", ret);
+ goto xfer_alloc_err;
+ }
+
+ ret = gpi_alloc_ring(&gpii_chan->sg_ring, gpii_chan->ch_ring.elements,
+ sizeof(struct sg_tre), gpii, false);
+ if (ret) {
+ GPII_ERR(gpii, gpii_chan->chid,
+ "error allocating sg ring, ret:%d\n", ret);
+ goto sg_alloc_error;
+ }
+ mutex_unlock(&gpii->ctrl_lock);
+
+ return 0;
+
+sg_alloc_error:
+ gpi_free_ring(&gpii_chan->ch_ring, gpii);
+xfer_alloc_err:
+ mutex_unlock(&gpii->ctrl_lock);
+
+ return ret;
+}
+
+/* gpi_of_dma_xlate: open client requested channel */
+static struct dma_chan *gpi_of_dma_xlate(struct of_phandle_args *args,
+ struct of_dma *of_dma)
+{
+ struct gpi_dev *gpi_dev = (struct gpi_dev *)of_dma->of_dma_data;
+ u32 gpii, chid;
+ struct gpii_chan *gpii_chan;
+
+ if (args->args_count < REQ_OF_DMA_ARGS) {
+ GPI_ERR(gpi_dev,
+ "gpii require minimum 6 args, client passed:%d args\n",
+ args->args_count);
+ return NULL;
+ }
+
+ /* Check if valid gpii instance */
+ gpii = args->args[0];
+ if (!((1 << gpii) & gpi_dev->gpii_mask)) {
+ GPI_ERR(gpi_dev, "gpii instance:%d is not supported\n", gpii);
+ return NULL;
+ }
+
+ chid = args->args[1];
+ if (chid >= MAX_CHANNELS_PER_GPII) {
+ GPI_ERR(gpi_dev, "gpii channel:%d not valid\n", chid);
+ return NULL;
+ }
+
+ /* get ring size, protocol, se_id, and priority */
+ gpii_chan = &gpi_dev->gpiis[gpii].gpii_chan[chid];
+ gpii_chan->seid = args->args[2];
+ gpii_chan->protocol = args->args[3];
+ gpii_chan->req_tres = args->args[4];
+ gpii_chan->priority = args->args[5];
+
+ GPI_LOG(gpi_dev,
+ "client req. gpii:%u chid:%u #_tre:%u priority:%u protocol:%u\n",
+ gpii, chid, gpii_chan->req_tres, gpii_chan->priority,
+ gpii_chan->protocol);
+
+ return dma_get_slave_channel(&gpii_chan->vc.chan);
+}
+
+/* gpi_setup_debug - setup debug capabilities */
+static void gpi_setup_debug(struct gpi_dev *gpi_dev)
+{
+ char node_name[GPI_LABEL_SIZE];
+ const umode_t mode = 0600;
+ int i;
+
+ snprintf(node_name, sizeof(node_name), "%s%llx", GPI_DMA_DRV_NAME,
+ (u64)gpi_dev->res->start);
+
+ gpi_dev->ilctxt = ipc_log_context_create(IPC_LOG_PAGES,
+ node_name, 0);
+ gpi_dev->ipc_log_lvl = DEFAULT_IPC_LOG_LVL;
+ if (!IS_ERR_OR_NULL(pdentry)) {
+ snprintf(node_name, sizeof(node_name), "%llx",
+ (u64)gpi_dev->res->start);
+ gpi_dev->dentry = debugfs_create_dir(node_name, pdentry);
+ if (!IS_ERR_OR_NULL(gpi_dev->dentry)) {
+ debugfs_create_u32("ipc_log_lvl", mode, gpi_dev->dentry,
+ &gpi_dev->ipc_log_lvl);
+ debugfs_create_u32("klog_lvl", mode,
+ gpi_dev->dentry, &gpi_dev->klog_lvl);
+ }
+ }
+
+ for (i = 0; i < gpi_dev->max_gpii; i++) {
+ struct gpii *gpii;
+
+ if (!((1 << i) & gpi_dev->gpii_mask))
+ continue;
+
+ gpii = &gpi_dev->gpiis[i];
+ snprintf(gpii->label, sizeof(gpii->label),
+ "%s%llx_gpii%d",
+ GPI_DMA_DRV_NAME, (u64)gpi_dev->res->start, i);
+ gpii->ilctxt = ipc_log_context_create(IPC_LOG_PAGES,
+ gpii->label, 0);
+ gpii->ipc_log_lvl = DEFAULT_IPC_LOG_LVL;
+ gpii->klog_lvl = DEFAULT_KLOG_LVL;
+
+ if (IS_ERR_OR_NULL(gpi_dev->dentry))
+ continue;
+
+ snprintf(node_name, sizeof(node_name), "gpii%d", i);
+ gpii->dentry = debugfs_create_dir(node_name, gpi_dev->dentry);
+ if (IS_ERR_OR_NULL(gpii->dentry))
+ continue;
+
+ debugfs_create_u32("ipc_log_lvl", mode, gpii->dentry,
+ &gpii->ipc_log_lvl);
+ debugfs_create_u32("klog_lvl", mode, gpii->dentry,
+ &gpii->klog_lvl);
+ }
+}
+
+static int gpi_smmu_init(struct gpi_dev *gpi_dev)
+{
+ u64 size = U64_MAX;
+ dma_addr_t base = 0x0;
+ struct dma_iommu_mapping *map;
+ int attr, ret;
+
+ map = arm_iommu_create_mapping(&platform_bus_type, base, size);
+ if (IS_ERR_OR_NULL(map)) {
+ ret = PTR_ERR(map) ? : -EIO;
+ GPI_ERR(gpi_dev, "error create_mapping, ret:%d\n", ret);
+ return ret;
+ }
+
+ attr = 1;
+ ret = iommu_domain_set_attr(map->domain, DOMAIN_ATTR_ATOMIC, &attr);
+ if (ret) {
+ GPI_ERR(gpi_dev, "error setting ATTTR_ATOMIC, ret:%d\n", ret);
+ goto error_smmu;
+ }
+
+ attr = 1;
+ ret = iommu_domain_set_attr(map->domain, DOMAIN_ATTR_S1_BYPASS, &attr);
+ if (ret) {
+ GPI_ERR(gpi_dev, "error setting S1_BYPASS, ret:%d\n", ret);
+ goto error_smmu;
+ }
+
+ ret = arm_iommu_attach_device(gpi_dev->dev, map);
+ if (ret) {
+ GPI_ERR(gpi_dev, "error iommu_attach, ret:%d\n", ret);
+ goto error_smmu;
+ }
+
+ ret = dma_set_mask(gpi_dev->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ GPI_ERR(gpi_dev, "error setting dma_mask, ret:%d\n", ret);
+ goto error_set_mask;
+ }
+
+ return ret;
+
+error_set_mask:
+ arm_iommu_detach_device(gpi_dev->dev);
+error_smmu:
+ arm_iommu_release_mapping(map);
+ return ret;
+}
+
+static int gpi_probe(struct platform_device *pdev)
+{
+ struct gpi_dev *gpi_dev;
+ int ret, i;
+
+ gpi_dev = devm_kzalloc(&pdev->dev, sizeof(*gpi_dev), GFP_KERNEL);
+ if (!gpi_dev)
+ return -ENOMEM;
+
+ gpi_dev->dev = &pdev->dev;
+ gpi_dev->klog_lvl = DEFAULT_KLOG_LVL;
+ gpi_dev->res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "gpi-top");
+ if (!gpi_dev->res) {
+ GPI_ERR(gpi_dev, "missing 'reg' DT node\n");
+ return -EINVAL;
+ }
+ gpi_dev->regs = devm_ioremap_nocache(gpi_dev->dev, gpi_dev->res->start,
+ resource_size(gpi_dev->res));
+ if (!gpi_dev->regs) {
+ GPI_ERR(gpi_dev, "IO remap failed\n");
+ return -EFAULT;
+ }
+
+ ret = of_property_read_u32(gpi_dev->dev->of_node, "qcom,max-num-gpii",
+ &gpi_dev->max_gpii);
+ if (ret) {
+ GPI_ERR(gpi_dev, "missing 'max-no-gpii' DT node\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(gpi_dev->dev->of_node, "qcom,gpii-mask",
+ &gpi_dev->gpii_mask);
+ if (ret) {
+ GPI_ERR(gpi_dev, "missing 'gpii-mask' DT node\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(gpi_dev->dev->of_node, "qcom,ev-factor",
+ &gpi_dev->ev_factor);
+ if (ret) {
+ GPI_ERR(gpi_dev, "missing 'qcom,ev-factor' DT node\n");
+ return ret;
+ }
+
+ ret = gpi_smmu_init(gpi_dev);
+ if (ret) {
+ GPI_ERR(gpi_dev, "error configuring smmu, ret:%d\n", ret);
+ return ret;
+ }
+
+ gpi_dev->gpiis = devm_kzalloc(gpi_dev->dev,
+ sizeof(*gpi_dev->gpiis) * gpi_dev->max_gpii,
+ GFP_KERNEL);
+ if (!gpi_dev->gpiis)
+ return -ENOMEM;
+
+
+ /* setup all the supported gpii */
+ INIT_LIST_HEAD(&gpi_dev->dma_device.channels);
+ for (i = 0; i < gpi_dev->max_gpii; i++) {
+ struct gpii *gpii = &gpi_dev->gpiis[i];
+ int chan;
+
+ if (!((1 << i) & gpi_dev->gpii_mask))
+ continue;
+
+ /* set up ev cntxt register map */
+ gpii->ev_cntxt_base_reg = gpi_dev->regs +
+ GPI_GPII_n_EV_CH_k_CNTXT_0_OFFS(i, 0);
+ gpii->ev_cntxt_db_reg = gpi_dev->regs +
+ GPI_GPII_n_EV_CH_k_DOORBELL_0_OFFS(i, 0);
+ gpii->ev_ring_base_lsb_reg = gpii->ev_cntxt_base_reg +
+ CNTXT_2_RING_BASE_LSB;
+ gpii->ev_ring_rp_lsb_reg = gpii->ev_cntxt_base_reg +
+ CNTXT_4_RING_RP_LSB;
+ gpii->ev_ring_wp_lsb_reg = gpii->ev_cntxt_base_reg +
+ CNTXT_6_RING_WP_LSB;
+ gpii->ev_cmd_reg = gpi_dev->regs +
+ GPI_GPII_n_EV_CH_CMD_OFFS(i);
+ gpii->ieob_src_reg = gpi_dev->regs +
+ GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_OFFS(i);
+ gpii->ieob_clr_reg = gpi_dev->regs +
+ GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(i);
+
+ /* set up irq */
+ ret = platform_get_irq(pdev, i);
+ if (ret < 0) {
+ GPI_ERR(gpi_dev, "could not req. irq for gpii%d ret:%d",
+ i, ret);
+ return ret;
+ }
+ gpii->irq = ret;
+
+ /* set up channel specific register info */
+ for (chan = 0; chan < MAX_CHANNELS_PER_GPII; chan++) {
+ struct gpii_chan *gpii_chan = &gpii->gpii_chan[chan];
+
+ /* set up ch cntxt register map */
+ gpii_chan->ch_cntxt_base_reg = gpi_dev->regs +
+ GPI_GPII_n_CH_k_CNTXT_0_OFFS(i, chan);
+ gpii_chan->ch_cntxt_db_reg = gpi_dev->regs +
+ GPI_GPII_n_CH_k_DOORBELL_0_OFFS(i, chan);
+ gpii_chan->ch_ring_base_lsb_reg =
+ gpii_chan->ch_cntxt_base_reg +
+ CNTXT_2_RING_BASE_LSB;
+ gpii_chan->ch_ring_rp_lsb_reg =
+ gpii_chan->ch_cntxt_base_reg +
+ CNTXT_4_RING_RP_LSB;
+ gpii_chan->ch_ring_wp_lsb_reg =
+ gpii_chan->ch_cntxt_base_reg +
+ CNTXT_6_RING_WP_LSB;
+ gpii_chan->ch_cmd_reg = gpi_dev->regs +
+ GPI_GPII_n_CH_CMD_OFFS(i);
+
+ /* vchan setup */
+ vchan_init(&gpii_chan->vc, &gpi_dev->dma_device);
+ gpii_chan->vc.desc_free = gpi_desc_free;
+ gpii_chan->chid = chan;
+ gpii_chan->gpii = gpii;
+ gpii_chan->dir = GPII_CHAN_DIR[chan];
+ }
+ mutex_init(&gpii->ctrl_lock);
+ rwlock_init(&gpii->pm_lock);
+ tasklet_init(&gpii->ev_task, gpi_ev_tasklet,
+ (unsigned long)gpii);
+ init_completion(&gpii->cmd_completion);
+ gpii->gpii_id = i;
+ gpii->regs = gpi_dev->regs;
+ gpii->gpi_dev = gpi_dev;
+ atomic_set(&gpii->dbg_index, 0);
+ }
+
+ platform_set_drvdata(pdev, gpi_dev);
+
+ /* clear and Set capabilities */
+ dma_cap_zero(gpi_dev->dma_device.cap_mask);
+ dma_cap_set(DMA_SLAVE, gpi_dev->dma_device.cap_mask);
+
+ /* configure dmaengine apis */
+ gpi_dev->dma_device.directions =
+ BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+ gpi_dev->dma_device.residue_granularity =
+ DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+ gpi_dev->dma_device.src_addr_widths = DMA_SLAVE_BUSWIDTH_8_BYTES;
+ gpi_dev->dma_device.dst_addr_widths = DMA_SLAVE_BUSWIDTH_8_BYTES;
+ gpi_dev->dma_device.device_alloc_chan_resources =
+ gpi_alloc_chan_resources;
+ gpi_dev->dma_device.device_free_chan_resources =
+ gpi_free_chan_resources;
+ gpi_dev->dma_device.device_tx_status = dma_cookie_status;
+ gpi_dev->dma_device.device_issue_pending = gpi_issue_pending;
+ gpi_dev->dma_device.device_prep_slave_sg = gpi_prep_slave_sg;
+ gpi_dev->dma_device.device_config = gpi_config;
+ gpi_dev->dma_device.device_terminate_all = gpi_terminate_all;
+ gpi_dev->dma_device.dev = gpi_dev->dev;
+ gpi_dev->dma_device.device_pause = gpi_pause;
+ gpi_dev->dma_device.device_resume = gpi_resume;
+
+ /* register with dmaengine framework */
+ ret = dma_async_device_register(&gpi_dev->dma_device);
+ if (ret) {
+ GPI_ERR(gpi_dev, "async_device_register failed ret:%d", ret);
+ return ret;
+ }
+
+ ret = of_dma_controller_register(gpi_dev->dev->of_node,
+ gpi_of_dma_xlate, gpi_dev);
+ if (ret) {
+ GPI_ERR(gpi_dev, "of_dma_controller_reg failed ret:%d", ret);
+ return ret;
+ }
+
+ /* setup debug capabilities */
+ gpi_setup_debug(gpi_dev);
+ GPI_LOG(gpi_dev, "probe success\n");
+
+ return ret;
+}
+
+static const struct of_device_id gpi_of_match[] = {
+ { .compatible = "qcom,gpi-dma" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, gpi_of_match);
+
+static struct platform_driver gpi_driver = {
+ .probe = gpi_probe,
+ .driver = {
+ .name = GPI_DMA_DRV_NAME,
+ .of_match_table = gpi_of_match,
+ },
+};
+
+static int __init gpi_init(void)
+{
+ pdentry = debugfs_create_dir(GPI_DMA_DRV_NAME, NULL);
+ return platform_driver_register(&gpi_driver);
+}
+module_init(gpi_init)
+
+MODULE_DESCRIPTION("QCOM GPI DMA engine driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/qcom/msm_gpi_mmio.h b/drivers/dma/qcom/msm_gpi_mmio.h
new file mode 100644
index 0000000..3fcff9e
--- /dev/null
+++ b/drivers/dma/qcom/msm_gpi_mmio.h
@@ -0,0 +1,224 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/* Register offsets from gpi-top */
+#define GPI_GPII_n_CH_k_CNTXT_0_OFFS(n, k) \
+ (0x20000 + (0x4000 * (n)) + (0x80 * (k)))
+#define GPI_GPII_n_CH_k_CNTXT_0_ELEMENT_SIZE_BMSK (0xFF000000)
+#define GPI_GPII_n_CH_k_CNTXT_0_ELEMENT_SIZE_SHFT (24)
+#define GPI_GPII_n_CH_k_CNTXT_0_CHSTATE_BMSK (0xF00000)
+#define GPI_GPII_n_CH_k_CNTXT_0_CHSTATE_SHFT (20)
+#define GPI_GPII_n_CH_k_CNTXT_0_ERINDEX_BMSK (0x7C000)
+#define GPI_GPII_n_CH_k_CNTXT_0_ERINDEX_SHFT (14)
+#define GPI_GPII_n_CH_k_CNTXT_0_CHID_BMSK (0x1F00)
+#define GPI_GPII_n_CH_k_CNTXT_0_CHID_SHFT (8)
+#define GPI_GPII_n_CH_k_CNTXT_0_EE_BMSK (0xF0)
+#define GPI_GPII_n_CH_k_CNTXT_0_EE_SHFT (4)
+#define GPI_GPII_n_CH_k_CNTXT_0_CHTYPE_DIR_BMSK (0x8)
+#define GPI_GPII_n_CH_k_CNTXT_0_CHTYPE_DIR_SHFT (3)
+#define GPI_GPII_n_CH_k_CNTXT_0_CHTYPE_PROTO_BMSK (0x7)
+#define GPI_GPII_n_CH_k_CNTXT_0_CHTYPE_PROTO_SHFT (0)
+#define GPI_GPII_n_CH_k_CNTXT_0(el_size, erindex, chtype_dir, chtype_proto) \
+ ((el_size << 24) | (erindex << 14) | (chtype_dir << 3) | (chtype_proto))
+#define GPI_CHTYPE_DIR_IN (0)
+#define GPI_CHTYPE_DIR_OUT (1)
+#define GPI_CHTYPE_PROTO_GPI (0x2)
+#define GPI_GPII_n_CH_k_CNTXT_1_R_LENGTH_BMSK (0xFFFF)
+#define GPI_GPII_n_CH_k_CNTXT_1_R_LENGTH_SHFT (0)
+#define GPI_GPII_n_CH_k_DOORBELL_0_OFFS(n, k) (0x22000 + (0x4000 * (n)) \
+ + (0x8 * (k)))
+#define GPI_GPII_n_CH_CMD_OFFS(n) (0x23008 + (0x4000 * (n)))
+#define GPI_GPII_n_CH_CMD_OPCODE_BMSK (0xFF000000)
+#define GPI_GPII_n_CH_CMD_OPCODE_SHFT (24)
+#define GPI_GPII_n_CH_CMD_CHID_BMSK (0xFF)
+#define GPI_GPII_n_CH_CMD_CHID_SHFT (0)
+#define GPI_GPII_n_CH_CMD(opcode, chid) ((opcode << 24) | chid)
+#define GPI_GPII_n_CH_CMD_ALLOCATE (0)
+#define GPI_GPII_n_CH_CMD_START (1)
+#define GPI_GPII_n_CH_CMD_STOP (2)
+#define GPI_GPII_n_CH_CMD_RESET (9)
+#define GPI_GPII_n_CH_CMD_DE_ALLOC (10)
+#define GPI_GPII_n_CH_CMD_UART_SW_STALE (32)
+#define GPI_GPII_n_CH_CMD_UART_RFR_READY (33)
+#define GPI_GPII_n_CH_CMD_UART_RFR_NOT_READY (34)
+
+/* EV Context Array */
+#define GPI_GPII_n_EV_CH_k_CNTXT_0_OFFS(n, k) \
+ (0x21000 + (0x4000 * (n)) + (0x80 * (k)))
+#define GPI_GPII_n_EV_CH_k_CNTXT_0_ELEMENT_SIZE_BMSK (0xFF000000)
+#define GPI_GPII_n_EV_CH_k_CNTXT_0_ELEMENT_SIZE_SHFT (24)
+#define GPI_GPII_n_EV_CH_k_CNTXT_0_CHSTATE_BMSK (0xF00000)
+#define GPI_GPII_n_EV_CH_k_CNTXT_0_CHSTATE_SHFT (20)
+#define GPI_GPII_n_EV_CH_k_CNTXT_0_INTYPE_BMSK (0x10000)
+#define GPI_GPII_n_EV_CH_k_CNTXT_0_INTYPE_SHFT (16)
+#define GPI_GPII_n_EV_CH_k_CNTXT_0_EVCHID_BMSK (0xFF00)
+#define GPI_GPII_n_EV_CH_k_CNTXT_0_EVCHID_SHFT (8)
+#define GPI_GPII_n_EV_CH_k_CNTXT_0_EE_BMSK (0xF0)
+#define GPI_GPII_n_EV_CH_k_CNTXT_0_EE_SHFT (4)
+#define GPI_GPII_n_EV_CH_k_CNTXT_0_CHTYPE_BMSK (0xF)
+#define GPI_GPII_n_EV_CH_k_CNTXT_0_CHTYPE_SHFT (0)
+#define GPI_GPII_n_EV_CH_k_CNTXT_0(el_size, intype, chtype) \
+ ((el_size << 24) | (intype << 16) | (chtype))
+#define GPI_INTTYPE_IRQ (1)
+#define GPI_CHTYPE_GPI_EV (0x2)
+#define GPI_GPII_n_EV_CH_k_CNTXT_1_R_LENGTH_BMSK (0xFFFF)
+#define GPI_GPII_n_EV_CH_k_CNTXT_1_R_LENGTH_SHFT (0)
+
+enum CNTXT_OFFS {
+ CNTXT_0_CONFIG = 0x0,
+ CNTXT_1_R_LENGTH = 0x4,
+ CNTXT_2_RING_BASE_LSB = 0x8,
+ CNTXT_3_RING_BASE_MSB = 0xC,
+ CNTXT_4_RING_RP_LSB = 0x10,
+ CNTXT_5_RING_RP_MSB = 0x14,
+ CNTXT_6_RING_WP_LSB = 0x18,
+ CNTXT_7_RING_WP_MSB = 0x1C,
+ CNTXT_8_RING_INT_MOD = 0x20,
+ CNTXT_9_RING_INTVEC = 0x24,
+ CNTXT_10_RING_MSI_LSB = 0x28,
+ CNTXT_11_RING_MSI_MSB = 0x2C,
+ CNTXT_12_RING_RP_UPDATE_LSB = 0x30,
+ CNTXT_13_RING_RP_UPDATE_MSB = 0x34,
+};
+
+#define GPI_GPII_n_EV_CH_k_DOORBELL_0_OFFS(n, k) \
+ (0x22100 + (0x4000 * (n)) + (0x8 * (k)))
+#define GPI_GPII_n_EV_CH_CMD_OFFS(n) \
+ (0x23010 + (0x4000 * (n)))
+#define GPI_GPII_n_EV_CH_CMD_OPCODE_BMSK (0xFF000000)
+#define GPI_GPII_n_EV_CH_CMD_OPCODE_SHFT (24)
+#define GPI_GPII_n_EV_CH_CMD_CHID_BMSK (0xFF)
+#define GPI_GPII_n_EV_CH_CMD_CHID_SHFT (0)
+#define GPI_GPII_n_EV_CH_CMD(opcode, chid) \
+ ((opcode << 24) | chid)
+#define GPI_GPII_n_EV_CH_CMD_ALLOCATE (0x00)
+#define GPI_GPII_n_EV_CH_CMD_RESET (0x09)
+#define GPI_GPII_n_EV_CH_CMD_DE_ALLOC (0x0A)
+
+#define GPI_GPII_n_CNTXT_TYPE_IRQ_OFFS(n) \
+ (0x23080 + (0x4000 * (n)))
+
+/* mask type register */
+#define GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS(n) \
+ (0x23088 + (0x4000 * (n)))
+#define GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_BMSK (0x7F)
+#define GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_SHFT (0)
+#define GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_GENERAL (0x40)
+#define GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_INTER_GPII_EV_CTRL (0x20)
+#define GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_INTER_GPII_CH_CTRL (0x10)
+#define GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB (0x08)
+#define GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_GLOB (0x04)
+#define GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL (0x02)
+#define GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL (0x01)
+
+#define GPI_GPII_n_CNTXT_SRC_GPII_CH_IRQ_OFFS(n) \
+ (0x23090 + (0x4000 * (n)))
+#define GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_OFFS(n) \
+ (0x23094 + (0x4000 * (n)))
+
+/* Mask channel control interrupt register */
+#define GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_OFFS(n) \
+ (0x23098 + (0x4000 * (n)))
+#define GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_BMSK (0x3)
+#define GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_SHFT (0)
+
+/* Mask event control interrupt register */
+#define GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS(n) \
+ (0x2309C + (0x4000 * (n)))
+#define GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_BMSK (0x1)
+#define GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_SHFT (0)
+
+#define GPI_GPII_n_CNTXT_SRC_CH_IRQ_CLR_OFFS(n) \
+ (0x230A0 + (0x4000 * (n)))
+#define GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_CLR_OFFS(n) \
+ (0x230A4 + (0x4000 * (n)))
+#define GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_OFFS(n) \
+ (0x230B0 + (0x4000 * (n)))
+
+/* Mask event interrupt register */
+#define GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(n) \
+ (0x230B8 + (0x4000 * (n)))
+#define GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_BMSK (0x1)
+#define GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_SHFT (0)
+
+#define GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(n) \
+ (0x230C0 + (0x4000 * (n)))
+#define GPI_GPII_n_CNTXT_GLOB_IRQ_STTS_OFFS(n) \
+ (0x23100 + (0x4000 * (n)))
+#define GPI_GLOB_IRQ_ERROR_INT_MSK (0x1)
+#define GPI_GLOB_IRQ_GP_INT1_MSK (0x2)
+#define GPI_GLOB_IRQ_GP_INT2_MSK (0x4)
+#define GPI_GLOB_IRQ_GP_INT3_MSK (0x8)
+
+/* GPII specific Global - Enable bit register */
+#define GPI_GPII_n_CNTXT_GLOB_IRQ_EN_OFFS(n) \
+ (0x23108 + (0x4000 * (n)))
+#define GPI_GPII_n_CNTXT_GLOB_IRQ_EN_BMSK (0xF)
+#define GPI_GPII_n_CNTXT_GLOB_IRQ_EN_SHFT (0)
+#define GPI_GPII_n_CNTXT_GLOB_IRQ_EN_GP_INT3 (0x8)
+#define GPI_GPII_n_CNTXT_GLOB_IRQ_EN_GP_INT2 (0x4)
+#define GPI_GPII_n_CNTXT_GLOB_IRQ_EN_GP_INT1 (0x2)
+#define GPI_GPII_n_CNTXT_GLOB_IRQ_EN_ERROR_INT (0x1)
+
+#define GPI_GPII_n_CNTXT_GLOB_IRQ_CLR_OFFS(n) \
+ (0x23110 + (0x4000 * (n)))
+#define GPI_GPII_n_CNTXT_GPII_IRQ_STTS_OFFS(n) \
+ (0x23118 + (0x4000 * (n)))
+
+/* GPII general interrupt - Enable bit register */
+#define GPI_GPII_n_CNTXT_GPII_IRQ_EN_OFFS(n) \
+ (0x23120 + (0x4000 * (n)))
+#define GPI_GPII_n_CNTXT_GPII_IRQ_EN_BMSK (0xF)
+#define GPI_GPII_n_CNTXT_GPII_IRQ_EN_SHFT (0)
+#define GPI_GPII_n_CNTXT_GPII_IRQ_EN_STACK_OVRFLOW (0x8)
+#define GPI_GPII_n_CNTXT_GLOB_IRQ_EN_CMD_FIFO_OVRFLOW (0x4)
+#define GPI_GPII_n_CNTXT_GLOB_IRQ_EN_BUS_ERROR (0x2)
+#define GPI_GPII_n_CNTXT_GLOB_IRQ_EN_BREAK_POINT (0x1)
+
+#define GPI_GPII_n_CNTXT_GPII_IRQ_CLR_OFFS(n) \
+ (0x23128 + (0x4000 * (n)))
+
+/* GPII Interrupt Type register */
+#define GPI_GPII_n_CNTXT_INTSET_OFFS(n) \
+ (0x23180 + (0x4000 * (n)))
+#define GPI_GPII_n_CNTXT_INTSET_BMSK (0x1)
+#define GPI_GPII_n_CNTXT_INTSET_SHFT (0)
+
+#define GPI_GPII_n_CNTXT_MSI_BASE_LSB_OFFS(n) \
+ (0x23188 + (0x4000 * (n)))
+#define GPI_GPII_n_CNTXT_MSI_BASE_MSB_OFFS(n) \
+ (0x2318C + (0x4000 * (n)))
+#define GPI_GPII_n_CNTXT_SCRATCH_0_OFFS(n) \
+ (0x23400 + (0x4000 * (n)))
+#define GPI_GPII_n_CNTXT_SCRATCH_1_OFFS(n) \
+ (0x23404 + (0x4000 * (n)))
+
+#define GPI_GPII_n_ERROR_LOG_OFFS(n) \
+ (0x23200 + (0x4000 * (n)))
+#define GPI_GPII_n_ERROR_LOG_CLR_OFFS(n) \
+ (0x23210 + (0x4000 * (n)))
+
+/* QOS Registers */
+#define GPI_GPII_n_CH_k_QOS_OFFS(n, k) \
+ (0x2005C + (0x4000 * (n)) + (0x80 * (k)))
+
+/* Scratch registeres */
+#define GPI_GPII_n_CH_k_SCRATCH_0_OFFS(n, k) \
+ (0x20060 + (0x4000 * (n)) + (0x80 * (k)))
+#define GPI_GPII_n_CH_K_SCRATCH_0(pair, proto, seid) \
+ ((pair << 16) | (proto << 4) | seid)
+#define GPI_GPII_n_CH_k_SCRATCH_1_OFFS(n, k) \
+ (0x20064 + (0x4000 * (n)) + (0x80 * (k)))
+#define GPI_GPII_n_CH_k_SCRATCH_2_OFFS(n, k) \
+ (0x20068 + (0x4000 * (n)) + (0x80 * (k)))
+#define GPI_GPII_n_CH_k_SCRATCH_3_OFFS(n, k) \
+ (0x2006C + (0x4000 * (n)) + (0x80 * (k)))
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index 475ea75..9bdde0b 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -940,8 +940,6 @@
int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
{
unsigned long flags;
- dma_addr_t paddr;
- void __iomem *vaddr = NULL;
/* config types are set a boot time and never change */
if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
@@ -959,21 +957,12 @@
*/
tmc_etr_enable_hw(drvdata);
} else {
- /*
- * The ETR is not tracing and the buffer was just read.
- * As such prepare to free the trace buffer.
- */
- vaddr = drvdata->vaddr;
- paddr = drvdata->paddr;
- drvdata->buf = drvdata->vaddr = NULL;
+ tmc_etr_free_mem(drvdata);
+ drvdata->buf = NULL;
}
drvdata->reading = false;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
- /* Free allocated memory out side of the spinlock */
- if (vaddr)
- dma_free_coherent(drvdata->dev, drvdata->size, vaddr, paddr);
-
return 0;
}
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index 58e8850..622ccbc 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -20,6 +20,7 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/qcom-geni-se.h>
@@ -50,9 +51,12 @@
#define SLV_ADDR_MSK (GENMASK(15, 9))
#define SLV_ADDR_SHFT (9)
+#define I2C_CORE2X_VOTE (10000)
+
struct geni_i2c_dev {
struct device *dev;
void __iomem *base;
+ unsigned int tx_wm;
int irq;
int err;
struct i2c_adapter adap;
@@ -61,6 +65,7 @@
struct se_geni_rsc i2c_rsc;
int cur_wr;
int cur_rd;
+ struct device *wrapper_dev;
};
static inline void qcom_geni_i2c_conf(void __iomem *base, int dfs, int div)
@@ -114,7 +119,7 @@
}
} else if ((m_stat & M_TX_FIFO_WATERMARK_EN) &&
!(cur->flags & I2C_M_RD)) {
- for (j = 0; j < 0x1f; j++) {
+ for (j = 0; j < gi2c->tx_wm; j++) {
u32 temp = 0;
int p;
@@ -163,9 +168,7 @@
pm_runtime_set_suspended(gi2c->dev);
return ret;
}
- geni_se_init(gi2c->base, FIFO_MODE, 0xF, 0x10);
qcom_geni_i2c_conf(gi2c->base, 0, 2);
- se_config_packing(gi2c->base, 8, 4, true);
dev_dbg(gi2c->dev, "i2c xfer:num:%d, msgs:len:%d,flg:%d\n",
num, msgs[0].len, msgs[0].flags);
for (i = 0; i < num; i++) {
@@ -237,6 +240,8 @@
{
struct geni_i2c_dev *gi2c;
struct resource *res;
+ struct platform_device *wrapper_pdev;
+ struct device_node *wrapper_ph_node;
int ret;
gi2c = devm_kzalloc(&pdev->dev, sizeof(*gi2c), GFP_KERNEL);
@@ -249,6 +254,29 @@
if (!res)
return -EINVAL;
+ wrapper_ph_node = of_parse_phandle(pdev->dev.of_node,
+ "qcom,wrapper-core", 0);
+ if (IS_ERR_OR_NULL(wrapper_ph_node)) {
+ ret = PTR_ERR(wrapper_ph_node);
+ dev_err(&pdev->dev, "No wrapper core defined\n");
+ return ret;
+ }
+ wrapper_pdev = of_find_device_by_node(wrapper_ph_node);
+ of_node_put(wrapper_ph_node);
+ if (IS_ERR_OR_NULL(wrapper_pdev)) {
+ ret = PTR_ERR(wrapper_pdev);
+ dev_err(&pdev->dev, "Cannot retrieve wrapper device\n");
+ return ret;
+ }
+ gi2c->wrapper_dev = &wrapper_pdev->dev;
+ gi2c->i2c_rsc.wrapper_dev = &wrapper_pdev->dev;
+ ret = geni_se_resources_init(&gi2c->i2c_rsc, I2C_CORE2X_VOTE,
+ (DEFAULT_SE_CLK * DEFAULT_BUS_WIDTH));
+ if (ret) {
+ dev_err(gi2c->dev, "geni_se_resources_init\n");
+ return ret;
+ }
+
gi2c->i2c_rsc.se_clk = devm_clk_get(&pdev->dev, "se-clk");
if (IS_ERR(gi2c->i2c_rsc.se_clk)) {
ret = PTR_ERR(gi2c->i2c_rsc.se_clk);
@@ -360,6 +388,14 @@
if (ret)
return ret;
+ if (unlikely(!gi2c->tx_wm)) {
+ int gi2c_tx_depth = get_tx_fifo_depth(gi2c->base);
+
+ gi2c->tx_wm = gi2c_tx_depth - 1;
+ geni_se_init(gi2c->base, gi2c->tx_wm, gi2c_tx_depth);
+ geni_se_select_mode(gi2c->base, FIFO_MODE);
+ se_config_packing(gi2c->base, 8, 4, true);
+ }
enable_irq(gi2c->irq);
return 0;
}
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index fbab1f1..d52b534 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -421,7 +421,6 @@
config IOMMU_DEBUG_TRACKING
bool "Track key IOMMU events"
- depends on BROKEN
select IOMMU_API
help
Enables additional debug tracking in the IOMMU framework code.
diff --git a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c
index 236e7f1..ca0dfac 100644
--- a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c
+++ b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c
@@ -94,11 +94,6 @@
dma_addr_t base;
};
-struct cam_smmu_region_info {
- dma_addr_t iova_start;
- size_t iova_len;
-};
-
struct cam_context_bank_info {
struct device *dev;
struct dma_iommu_mapping *mapping;
@@ -994,6 +989,87 @@
}
EXPORT_SYMBOL(cam_smmu_dealloc_firmware);
+int cam_smmu_get_region_info(int32_t smmu_hdl,
+ enum cam_smmu_region_id region_id,
+ struct cam_smmu_region_info *region_info)
+{
+ int32_t idx;
+ struct cam_context_bank_info *cb = NULL;
+
+ if (!region_info) {
+ pr_err("Invalid region_info pointer\n");
+ return -EINVAL;
+ }
+
+ if (smmu_hdl == HANDLE_INIT) {
+ pr_err("Invalid handle\n");
+ return -EINVAL;
+ }
+
+ idx = GET_SMMU_TABLE_IDX(smmu_hdl);
+ if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+ pr_err("Handle or index invalid. idx = %d hdl = %x\n",
+ idx, smmu_hdl);
+ return -EINVAL;
+ }
+
+ mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+ cb = &iommu_cb_set.cb_info[idx];
+ if (!cb) {
+ pr_err("SMMU context bank pointer invalid\n");
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return -EINVAL;
+ }
+
+ switch (region_id) {
+ case CAM_SMMU_REGION_FIRMWARE:
+ if (!cb->firmware_support) {
+ pr_err("Firmware not supported\n");
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return -ENODEV;
+ }
+ region_info->iova_start = cb->firmware_info.iova_start;
+ region_info->iova_len = cb->firmware_info.iova_len;
+ break;
+ case CAM_SMMU_REGION_SHARED:
+ if (!cb->shared_support) {
+ pr_err("Shared mem not supported\n");
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return -ENODEV;
+ }
+ region_info->iova_start = cb->shared_info.iova_start;
+ region_info->iova_len = cb->shared_info.iova_len;
+ break;
+ case CAM_SMMU_REGION_SCRATCH:
+ if (!cb->scratch_buf_support) {
+ pr_err("Scratch memory not supported\n");
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return -ENODEV;
+ }
+ region_info->iova_start = cb->scratch_info.iova_start;
+ region_info->iova_len = cb->scratch_info.iova_len;
+ break;
+ case CAM_SMMU_REGION_IO:
+ if (!cb->io_support) {
+ pr_err("IO memory not supported\n");
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return -ENODEV;
+ }
+ region_info->iova_start = cb->io_info.iova_start;
+ region_info->iova_len = cb->io_info.iova_len;
+ break;
+ default:
+ pr_err("Invalid region id: %d for smmu hdl: %X\n",
+ smmu_hdl, region_id);
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return -EINVAL;
+ }
+
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return 0;
+}
+EXPORT_SYMBOL(cam_smmu_get_region_info);
+
static int cam_smmu_map_buffer_and_add_to_list(int idx, int ion_fd,
enum dma_data_direction dma_dir, dma_addr_t *paddr_ptr,
size_t *len_ptr,
diff --git a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.h b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.h
index 76e9135..20445f3 100644
--- a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.h
+++ b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.h
@@ -49,6 +49,17 @@
};
/**
+ * @brief : Structure to store region information
+ *
+ * @param iova_start : Start address of region
+ * @param iova_len : length of region
+ */
+struct cam_smmu_region_info {
+ dma_addr_t iova_start;
+ size_t iova_len;
+};
+
+/**
* @brief : Gets an smmu handle
*
* @param identifier: Unique identifier to be used by clients which they
@@ -252,4 +263,17 @@
* @return Status of operation. Negative in case of error. Zero otherwise.
*/
int cam_smmu_dealloc_firmware(int32_t smmu_hdl);
+
+/**
+ * @brief Gets region information specified by smmu handle and region id
+ *
+ * @param smmu_hdl: SMMU handle identifying the context bank
+ * @param region_id: Region id for which information is desired
+ * @param region_info: Struct populated with region information
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_get_region_info(int32_t smmu_hdl,
+ enum cam_smmu_region_id region_id,
+ struct cam_smmu_region_info *region_info);
#endif /* _CAM_SMMU_API_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync.c b/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
index a736148..901632a 100644
--- a/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
+++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
@@ -246,7 +246,6 @@
sync_cb->status = list_info->status;
queue_work(sync_dev->work_queue,
&sync_cb->cb_dispatch_work);
- list_del_init(&sync_cb->list);
}
/* Dispatch user payloads if any were registered earlier */
diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c b/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c
index ecc62c8..3b3cbff 100644
--- a/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c
+++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c
@@ -223,6 +223,7 @@
cb_info->status,
cb_info->cb_data);
+ list_del_init(&cb_info->list);
kfree(cb_info);
}
diff --git a/drivers/media/platform/msm/camera/cam_utils/Makefile b/drivers/media/platform/msm/camera/cam_utils/Makefile
index 6f9525e..f22115c 100644
--- a/drivers/media/platform/msm/camera/cam_utils/Makefile
+++ b/drivers/media/platform/msm/camera/cam_utils/Makefile
@@ -1 +1,3 @@
-obj-$(CONFIG_SPECTRA_CAMERA) += cam_soc_util.o cam_io_util.o
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr/
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_soc_util.o cam_io_util.o cam_packet_util.o
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.c b/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.c
new file mode 100644
index 0000000..6d90c1e
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.c
@@ -0,0 +1,78 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
+
+#include "cam_mem_mgr.h"
+#include "cam_packet_util.h"
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+int cam_packet_util_process_patches(struct cam_packet *packet,
+ int32_t iommu_hdl)
+{
+ struct cam_patch_desc *patch_desc = NULL;
+ uint64_t iova_addr;
+ uint64_t cpu_addr;
+ uint32_t temp;
+ uint32_t *dst_cpu_addr;
+ uint32_t *src_buf_iova_addr;
+ size_t dst_buf_len;
+ size_t src_buf_size;
+ int i;
+ int rc = 0;
+
+ /* process patch descriptor */
+ patch_desc = (struct cam_patch_desc *)
+ ((uint32_t *) &packet->payload +
+ packet->patch_offset/4);
+ CDBG("packet = %pK patch_desc = %pK size = %lu\n",
+ (void *)packet, (void *)patch_desc,
+ sizeof(struct cam_patch_desc));
+
+ for (i = 0; i < packet->num_patches; i++) {
+ rc = cam_mem_get_io_buf(patch_desc[i].src_buf_hdl,
+ iommu_hdl, &iova_addr, &src_buf_size);
+ if (rc < 0) {
+ pr_err("unable to get src buf address\n");
+ return rc;
+ }
+ src_buf_iova_addr = (uint32_t *)iova_addr;
+ temp = iova_addr;
+
+ rc = cam_mem_get_cpu_buf(patch_desc[i].dst_buf_hdl,
+ &cpu_addr, &dst_buf_len);
+ if (rc < 0) {
+ pr_err("unable to get dst buf address\n");
+ return rc;
+ }
+ dst_cpu_addr = (uint32_t *)cpu_addr;
+
+ CDBG("i = %d patch info = %x %x %x %x\n", i,
+ patch_desc[i].dst_buf_hdl, patch_desc[i].dst_offset,
+ patch_desc[i].src_buf_hdl, patch_desc[i].src_offset);
+
+ dst_cpu_addr = (uint32_t *)((uint8_t *)dst_cpu_addr +
+ patch_desc[i].dst_offset);
+ temp += patch_desc[i].src_offset;
+
+ *dst_cpu_addr = temp;
+
+ CDBG("patch is done for dst %pK with src %pK value %llx\n",
+ dst_cpu_addr, src_buf_iova_addr,
+ *((uint64_t *)dst_cpu_addr));
+ }
+
+ return rc;
+}
+
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.h b/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.h
new file mode 100644
index 0000000..614e868
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.h
@@ -0,0 +1,33 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_PACKET_UTIL_H_
+#define _CAM_PACKET_UTIL_H_
+
+#include <uapi/media/cam_defs.h>
+
+/**
+ * cam_packet_util_process_patches()
+ *
+ * @brief: Replace the handle in Packet to Address using the
+ * information from patches.
+ *
+ * @packet: Input packet containing Command Buffers and Patches
+ * @iommu_hdl: IOMMU handle of the HW Device that received the packet
+ *
+ * @return: 0: Success
+ * Negative: Failure
+ */
+int cam_packet_util_process_patches(struct cam_packet *packet,
+ int32_t iommu_hdl);
+
+#endif /* _CAM_PACKET_UTIL_H_ */
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c b/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
index 489ded1..2fa39c8 100644
--- a/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
@@ -27,6 +27,7 @@
#include <media/cam_defs.h>
#include <media/cam_icp.h>
#include "cam_sync_api.h"
+#include "cam_packet_util.h"
#include "cam_hw.h"
#include "cam_hw_mgr_intf.h"
#include "cam_icp_hw_mgr_intf.h"
@@ -1175,14 +1176,9 @@
int ctx_id = 0;
uint32_t fw_handle;
int32_t idx;
- uint64_t iova_addr, cpu_addr;
+ uint64_t iova_addr;
uint32_t fw_cmd_buf_iova_addr;
- uint32_t temp;
- uint32_t *dst_cpu_addr;
- uint32_t *src_buf_iova_addr;
size_t fw_cmd_buf_len;
- size_t dst_buf_len;
- size_t src_buf_size;
int32_t sync_in_obj[CAM_ICP_IPE_IMAGE_MAX];
int32_t merged_sync_in_obj;
@@ -1194,7 +1190,6 @@
struct cam_packet *packet = NULL;
struct cam_cmd_buf_desc *cmd_desc = NULL;
struct cam_buf_io_cfg *io_cfg_ptr = NULL;
- struct cam_patch_desc *patch_desc = NULL;
struct hfi_cmd_ipebps_async *hfi_cmd = NULL;
if ((!prepare_args) || (!hw_mgr)) {
@@ -1279,45 +1274,11 @@
fw_cmd_buf_iova_addr = iova_addr;
fw_cmd_buf_iova_addr = (fw_cmd_buf_iova_addr + cmd_desc->offset);
- /* process patch descriptor */
- patch_desc = (struct cam_patch_desc *)
- ((uint32_t *) &packet->payload +
- packet->patch_offset/4);
- ICP_DBG("packet = %pK patch_desc = %pK size = %lu\n",
- (void *)packet, (void *)patch_desc,
- sizeof(struct cam_patch_desc));
-
- for (i = 0; i < packet->num_patches; i++) {
- rc = cam_mem_get_io_buf(patch_desc[i].src_buf_hdl,
- hw_mgr->iommu_hdl, &iova_addr, &src_buf_size);
- if (rc < 0) {
- pr_err("unable to get src buf address\n");
- return rc;
- }
- src_buf_iova_addr = (uint32_t *)iova_addr;
- temp = iova_addr;
-
- rc = cam_mem_get_cpu_buf(patch_desc[i].dst_buf_hdl,
- &cpu_addr, &dst_buf_len);
- if (rc < 0) {
- pr_err("unable to get dst buf address\n");
- return rc;
- }
- dst_cpu_addr = (uint32_t *)cpu_addr;
-
- ICP_DBG("i = %d patch info = %x %x %x %x\n", i,
- patch_desc[i].dst_buf_hdl, patch_desc[i].dst_offset,
- patch_desc[i].src_buf_hdl, patch_desc[i].src_offset);
-
- dst_cpu_addr = (uint32_t *)((uint8_t *)dst_cpu_addr +
- patch_desc[i].dst_offset);
- temp += patch_desc[i].src_offset;
-
- *dst_cpu_addr = temp;
-
- ICP_DBG("patch is done for dst %pK with src %pK value %llx\n",
- dst_cpu_addr, src_buf_iova_addr,
- *((uint64_t *)dst_cpu_addr));
+ /* Update Buffer Address from handles and patch information */
+ rc = cam_packet_util_process_patches(packet, hw_mgr->iommu_hdl);
+ if (rc) {
+ pr_err("Patch processing failed\n");
+ return rc;
}
/* process io config out descriptors */
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
index c147b0b..980df9f 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
@@ -48,7 +48,7 @@
#define XIN_WRITEBACK 1
/* wait for at most 2 vsync for lowest refresh rate (24hz) */
-#define KOFF_TIMEOUT (84)
+#define KOFF_TIMEOUT (42 * 32)
/* default stream buffer headroom in lines */
#define DEFAULT_SBUF_HEADROOM 20
diff --git a/drivers/media/platform/msm/vidc/msm_vdec.c b/drivers/media/platform/msm/vidc/msm_vdec.c
index c42d7aa..053d748 100644
--- a/drivers/media/platform/msm/vidc/msm_vdec.c
+++ b/drivers/media/platform/msm/vidc/msm_vdec.c
@@ -639,6 +639,7 @@
int msm_vdec_inst_init(struct msm_vidc_inst *inst)
{
int rc = 0;
+ struct msm_vidc_format *fmt = NULL;
if (!inst) {
dprintk(VIDC_ERR, "Invalid input = %pK\n", inst);
@@ -661,10 +662,31 @@
inst->bufq[CAPTURE_PORT].num_planes = 1;
inst->prop.fps = DEFAULT_FPS;
inst->clk_data.operating_rate = 0;
- memcpy(&inst->fmts[OUTPUT_PORT], &vdec_formats[2],
+
+ /* By default, initialize CAPTURE port to UBWC YUV format */
+ fmt = msm_comm_get_pixel_fmt_fourcc(vdec_formats,
+ ARRAY_SIZE(vdec_formats), V4L2_PIX_FMT_NV12_UBWC,
+ CAPTURE_PORT);
+ if (!fmt || fmt->type != CAPTURE_PORT) {
+ dprintk(VIDC_ERR,
+ "vdec_formats corrupted\n");
+ return -EINVAL;
+ }
+ memcpy(&inst->fmts[fmt->type], fmt,
sizeof(struct msm_vidc_format));
- memcpy(&inst->fmts[CAPTURE_PORT], &vdec_formats[0],
+
+ /* By default, initialize OUTPUT port to H264 decoder */
+ fmt = msm_comm_get_pixel_fmt_fourcc(vdec_formats,
+ ARRAY_SIZE(vdec_formats), V4L2_PIX_FMT_H264,
+ OUTPUT_PORT);
+ if (!fmt || fmt->type != OUTPUT_PORT) {
+ dprintk(VIDC_ERR,
+ "vdec_formats corrupted\n");
+ return -EINVAL;
+ }
+ memcpy(&inst->fmts[fmt->type], fmt,
sizeof(struct msm_vidc_format));
+
return rc;
}
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index e3d52bf..8906027 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -2096,6 +2096,7 @@
int msm_venc_inst_init(struct msm_vidc_inst *inst)
{
int rc = 0;
+ struct msm_vidc_format *fmt = NULL;
if (!inst) {
dprintk(VIDC_ERR, "Invalid input = %pK\n", inst);
@@ -2120,10 +2121,30 @@
inst->bufq[CAPTURE_PORT].num_planes = 1;
inst->clk_data.operating_rate = 0;
- memcpy(&inst->fmts[CAPTURE_PORT], &venc_formats[4],
+ /* By default, initialize OUTPUT port to UBWC YUV format */
+ fmt = msm_comm_get_pixel_fmt_fourcc(venc_formats,
+ ARRAY_SIZE(venc_formats), V4L2_PIX_FMT_NV12_UBWC,
+ OUTPUT_PORT);
+ if (!fmt || fmt->type != OUTPUT_PORT) {
+ dprintk(VIDC_ERR,
+ "venc_formats corrupted\n");
+ return -EINVAL;
+ }
+ memcpy(&inst->fmts[fmt->type], fmt,
sizeof(struct msm_vidc_format));
- memcpy(&inst->fmts[OUTPUT_PORT], &venc_formats[0],
+
+ /* By default, initialize CAPTURE port to H264 encoder */
+ fmt = msm_comm_get_pixel_fmt_fourcc(venc_formats,
+ ARRAY_SIZE(venc_formats), V4L2_PIX_FMT_H264,
+ CAPTURE_PORT);
+ if (!fmt || fmt->type != CAPTURE_PORT) {
+ dprintk(VIDC_ERR,
+ "venc_formats corrupted\n");
+ return -EINVAL;
+ }
+ memcpy(&inst->fmts[fmt->type], fmt,
sizeof(struct msm_vidc_format));
+
return rc;
}
diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c
index 1cab039..89da0a1 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc.c
@@ -909,7 +909,7 @@
switch (found_buf) {
case 0:
- dprintk(VIDC_WARN,
+ dprintk(VIDC_DBG,
"%s: No buffer(type: %d) found for index %d\n",
__func__, buffer_type, buffer_index);
break;
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
index b80aa08..25cc1e4 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
@@ -309,10 +309,10 @@
return freq;
}
- dprintk(VIDC_PROF, "%s Inst %pK : Freq = %lu\n", __func__, inst, freq);
-
freq = max(vpp_cycles, vsp_cycles);
+ dprintk(VIDC_PROF, "%s Inst %pK : Freq = %lu\n", __func__, inst, freq);
+
return freq;
}
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index 9dda0d2..7b75d70 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -5497,6 +5497,14 @@
return -EINVAL;
hdev = inst->core->device;
mutex_lock(&inst->lock);
+ if (inst->state >= MSM_VIDC_RELEASE_RESOURCES_DONE ||
+ inst->state < MSM_VIDC_START_DONE ||
+ inst->core->state == VIDC_CORE_INVALID) {
+ dprintk(VIDC_DBG,
+ "Inst %pK : Not in valid state to call %s\n",
+ inst, __func__);
+ goto sess_continue_fail;
+ }
if (inst->session_type == MSM_VIDC_DECODER && inst->in_reconfig) {
dprintk(VIDC_DBG, "send session_continue\n");
rc = call_hfi_op(hdev, session_continue,
@@ -5515,6 +5523,7 @@
dprintk(VIDC_ERR,
"session_continue called in wrong state for decoder");
}
+
sess_continue_fail:
mutex_unlock(&inst->lock);
return rc;
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
index 2dd25f3..eaba920 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
@@ -456,7 +456,7 @@
#define HFI_INTRA_REFRESH_NONE (HFI_COMMON_BASE + 0x1)
#define HFI_INTRA_REFRESH_CYCLIC (HFI_COMMON_BASE + 0x2)
-#define HFI_INTRA_REFRESH_RANDOM (HFI_COMMON_BASE + 0x3)
+#define HFI_INTRA_REFRESH_RANDOM (HFI_COMMON_BASE + 0x5)
struct hfi_intra_refresh {
u32 mode;
diff --git a/drivers/platform/msm/Kconfig b/drivers/platform/msm/Kconfig
index 3a6e214..1946204 100644
--- a/drivers/platform/msm/Kconfig
+++ b/drivers/platform/msm/Kconfig
@@ -152,4 +152,12 @@
a log and rates the actions according to whether a typical user would
use the tools.
+config QCOM_GENI_SE
+ tristate "QCOM GENI Serial Engine Driver"
+ help
+ This module is used to interact with GENI based Serial Engines on
+ Qualcomm Technologies, Inc. Universal Peripheral(QUPv3). This
+ module is used to configure and read the configuration from the
+ Serial Engines.
+
endmenu
diff --git a/drivers/platform/msm/Makefile b/drivers/platform/msm/Makefile
index cf24d7a..ff1d0e2 100644
--- a/drivers/platform/msm/Makefile
+++ b/drivers/platform/msm/Makefile
@@ -11,3 +11,4 @@
obj-$(CONFIG_USB_BAM) += usb_bam.o
obj-$(CONFIG_MSM_11AD) += msm_11ad/
obj-$(CONFIG_SEEMP_CORE) += seemp_core/
+obj-$(CONFIG_QCOM_GENI_SE) += qcom-geni-se.o
diff --git a/drivers/platform/msm/ipa/ipa_api.c b/drivers/platform/msm/ipa/ipa_api.c
index a37947b..6c597f0 100644
--- a/drivers/platform/msm/ipa/ipa_api.c
+++ b/drivers/platform/msm/ipa/ipa_api.c
@@ -2600,6 +2600,9 @@
case IPA_HW_v3_5_1:
str = "3.5.1";
break;
+ case IPA_HW_v4_0:
+ str = "4.0";
+ break;
default:
str = "Invalid version";
break;
@@ -2660,6 +2663,7 @@
case IPA_HW_v3_1:
case IPA_HW_v3_5:
case IPA_HW_v3_5_1:
+ case IPA_HW_v4_0:
result = ipa3_plat_drv_probe(pdev_p, ipa_api_ctrl,
ipa_plat_drv_match);
break;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index e7b16b3..31e530e 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -1735,7 +1735,7 @@
IPAERR("failed to construct dma_shared_mem imm cmd\n");
return -ENOMEM;
}
- desc.opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+ desc.opcode = cmd_pyld->opcode;
desc.pyld = cmd_pyld->data;
desc.len = cmd_pyld->len;
desc.type = IPA_IMM_CMD_DESC;
@@ -2000,8 +2000,7 @@
retval = -ENOMEM;
goto free_empty_img;
}
- desc[num_cmds].opcode = ipahal_imm_cmd_get_opcode(
- IPA_IMM_CMD_DMA_SHARED_MEM);
+ desc[num_cmds].opcode = cmd_pyld[num_cmds]->opcode;
desc[num_cmds].pyld = cmd_pyld[num_cmds]->data;
desc[num_cmds].len = cmd_pyld[num_cmds]->len;
desc[num_cmds].type = IPA_IMM_CMD_DESC;
@@ -2100,8 +2099,7 @@
retval = -ENOMEM;
goto free_desc;
}
- desc->opcode =
- ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+ desc->opcode = cmd_pyld->opcode;
desc->pyld = cmd_pyld->data;
desc->len = cmd_pyld->len;
desc->type = IPA_IMM_CMD_DESC;
@@ -2191,8 +2189,7 @@
retval = -EFAULT;
goto bail_desc;
}
- desc->opcode =
- ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_REGISTER_WRITE);
+ desc->opcode = cmd_pyld->opcode;
desc->pyld = cmd_pyld->data;
desc->len = cmd_pyld->len;
desc->type = IPA_IMM_CMD_DESC;
@@ -2259,8 +2256,7 @@
BUG();
}
- desc[num_descs].opcode = ipahal_imm_cmd_get_opcode(
- IPA_IMM_CMD_REGISTER_WRITE);
+ desc[num_descs].opcode = cmd_pyld->opcode;
desc[num_descs].type = IPA_IMM_CMD_DESC;
desc[num_descs].callback = ipa3_destroy_imm;
desc[num_descs].user1 = cmd_pyld;
@@ -2289,8 +2285,7 @@
return -EFAULT;
}
- desc[num_descs].opcode = ipahal_imm_cmd_get_opcode(
- IPA_IMM_CMD_REGISTER_WRITE);
+ desc[num_descs].opcode = cmd_pyld->opcode;
desc[num_descs].type = IPA_IMM_CMD_DESC;
desc[num_descs].callback = ipa3_destroy_imm;
desc[num_descs].user1 = cmd_pyld;
@@ -2494,7 +2489,7 @@
mem.phys_base);
return -EFAULT;
}
- desc.opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_HDR_INIT_LOCAL);
+ desc.opcode = cmd_pyld->opcode;
desc.type = IPA_IMM_CMD_DESC;
desc.pyld = cmd_pyld->data;
desc.len = cmd_pyld->len;
@@ -2539,7 +2534,7 @@
mem.phys_base);
return -EFAULT;
}
- desc.opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+ desc.opcode = cmd_pyld->opcode;
desc.pyld = cmd_pyld->data;
desc.len = cmd_pyld->len;
desc.type = IPA_IMM_CMD_DESC;
@@ -2611,8 +2606,7 @@
goto free_mem;
}
- desc.opcode =
- ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_V4_ROUTING_INIT);
+ desc.opcode = cmd_pyld->opcode;
desc.type = IPA_IMM_CMD_DESC;
desc.pyld = cmd_pyld->data;
desc.len = cmd_pyld->len;
@@ -2678,8 +2672,7 @@
goto free_mem;
}
- desc.opcode =
- ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_V6_ROUTING_INIT);
+ desc.opcode = cmd_pyld->opcode;
desc.type = IPA_IMM_CMD_DESC;
desc.pyld = cmd_pyld->data;
desc.len = cmd_pyld->len;
@@ -2739,7 +2732,7 @@
goto free_mem;
}
- desc.opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_V4_FILTER_INIT);
+ desc.opcode = cmd_pyld->opcode;
desc.type = IPA_IMM_CMD_DESC;
desc.pyld = cmd_pyld->data;
desc.len = cmd_pyld->len;
@@ -2800,7 +2793,7 @@
goto free_mem;
}
- desc.opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_V6_FILTER_INIT);
+ desc.opcode = cmd_pyld->opcode;
desc.type = IPA_IMM_CMD_DESC;
desc.pyld = cmd_pyld->data;
desc.len = cmd_pyld->len;
@@ -3939,6 +3932,9 @@
case IPA_HW_v3_5_1:
gsi_ver = GSI_VER_1_3;
break;
+ case IPA_HW_v4_0:
+ gsi_ver = GSI_VER_2_0;
+ break;
default:
IPAERR("No GSI version for ipa type %d\n", ipa_hw_type);
WARN_ON(1);
@@ -4319,6 +4315,7 @@
IPAERR("failed to construct IMM cmd\n");
return -ENOMEM;
}
+ ipa3_ctx->pkt_init_imm_opcode = cmd_pyld->opcode;
mem.size = cmd_pyld->len * ipa3_ctx->ipa_num_pipes;
mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size,
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
index ca77be9..1ee8ec8 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
@@ -244,6 +244,38 @@
pipe, ipahal_read_reg_n(IPA_ENDP_INIT_CFG_n, pipe));
}
+/**
+ * _ipa_read_ep_reg_v4_0() - Reads and prints endpoint configuration registers
+ *
+ * Returns the number of characters printed
+ * Removed IPA_ENDP_INIT_ROUTE_n from v3
+ */
+int _ipa_read_ep_reg_v4_0(char *buf, int max_len, int pipe)
+{
+ return scnprintf(
+ dbg_buff, IPA_MAX_MSG_LEN,
+ "IPA_ENDP_INIT_NAT_%u=0x%x\n"
+ "IPA_ENDP_INIT_HDR_%u=0x%x\n"
+ "IPA_ENDP_INIT_HDR_EXT_%u=0x%x\n"
+ "IPA_ENDP_INIT_MODE_%u=0x%x\n"
+ "IPA_ENDP_INIT_AGGR_%u=0x%x\n"
+ "IPA_ENDP_INIT_CTRL_%u=0x%x\n"
+ "IPA_ENDP_INIT_HOL_EN_%u=0x%x\n"
+ "IPA_ENDP_INIT_HOL_TIMER_%u=0x%x\n"
+ "IPA_ENDP_INIT_DEAGGR_%u=0x%x\n"
+ "IPA_ENDP_INIT_CFG_%u=0x%x\n",
+ pipe, ipahal_read_reg_n(IPA_ENDP_INIT_NAT_n, pipe),
+ pipe, ipahal_read_reg_n(IPA_ENDP_INIT_HDR_n, pipe),
+ pipe, ipahal_read_reg_n(IPA_ENDP_INIT_HDR_EXT_n, pipe),
+ pipe, ipahal_read_reg_n(IPA_ENDP_INIT_MODE_n, pipe),
+ pipe, ipahal_read_reg_n(IPA_ENDP_INIT_AGGR_n, pipe),
+ pipe, ipahal_read_reg_n(IPA_ENDP_INIT_CTRL_n, pipe),
+ pipe, ipahal_read_reg_n(IPA_ENDP_INIT_HOL_BLOCK_EN_n, pipe),
+ pipe, ipahal_read_reg_n(IPA_ENDP_INIT_HOL_BLOCK_TIMER_n, pipe),
+ pipe, ipahal_read_reg_n(IPA_ENDP_INIT_DEAGGR_n, pipe),
+ pipe, ipahal_read_reg_n(IPA_ENDP_INIT_CFG_n, pipe));
+}
+
static ssize_t ipa3_read_ep_reg(struct file *file, char __user *ubuf,
size_t count, loff_t *ppos)
{
@@ -1381,6 +1413,11 @@
u32 option = 0;
struct ipahal_reg_debug_cnt_ctrl dbg_cnt_ctrl;
+ if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
+ IPAERR("IPA_DEBUG_CNT_CTRL is not supported in IPA 4.0\n");
+ return -EPERM;
+ }
+
if (sizeof(dbg_buff) < count + 1)
return -EFAULT;
@@ -1416,6 +1453,11 @@
int nbytes;
u32 regval;
+ if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
+ IPAERR("IPA_DEBUG_CNT_REG is not supported in IPA 4.0\n");
+ return -EPERM;
+ }
+
IPA_ACTIVE_CLIENTS_INC_SIMPLE();
regval =
ipahal_read_reg_n(IPA_DEBUG_CNT_REG_n, 0);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index faa47d8..bf13ac5 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -315,9 +315,7 @@
}
/* populate tag field */
- if (desc[i].opcode ==
- ipahal_imm_cmd_get_opcode(
- IPA_IMM_CMD_IP_PACKET_TAG_STATUS)) {
+ if (desc[i].is_tag_status) {
if (ipa_populate_tag_field(&desc[i], tx_pkt,
&tag_pyld_ret)) {
IPAERR("Failed to populate tag field\n");
@@ -1279,15 +1277,10 @@
* notification. IPA will generate a status packet with
* tag info as a result of the TAG STATUS command.
*/
- desc[data_idx].opcode =
- ipahal_imm_cmd_get_opcode(
- IPA_IMM_CMD_IP_PACKET_TAG_STATUS);
- desc[data_idx].type = IPA_IMM_CMD_DESC;
- desc[data_idx].callback = ipa3_tag_destroy_imm;
+ desc[data_idx].is_tag_status = true;
data_idx++;
}
- desc[data_idx].opcode =
- ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_PACKET_INIT);
+ desc[data_idx].opcode = ipa3_ctx->pkt_init_imm_opcode;
desc[data_idx].dma_address_valid = true;
desc[data_idx].dma_address = ipa3_ctx->pkt_init_imm[dst_ep_idx];
desc[data_idx].type = IPA_IMM_CMD_DESC;
@@ -1338,11 +1331,7 @@
* notification. IPA will generate a status packet with
* tag info as a result of the TAG STATUS command.
*/
- desc[data_idx].opcode =
- ipahal_imm_cmd_get_opcode(
- IPA_IMM_CMD_IP_PACKET_TAG_STATUS);
- desc[data_idx].type = IPA_IMM_CMD_DESC;
- desc[data_idx].callback = ipa3_tag_destroy_imm;
+ desc[data_idx].is_tag_status = true;
data_idx++;
}
desc[data_idx].pyld = skb->data;
@@ -2979,11 +2968,7 @@
(u8)sys->ep->cfg.meta.qmap_id;
/* the tag field will be populated in ipa3_send() function */
- desc[0].opcode =
- ipahal_imm_cmd_get_opcode(
- IPA_IMM_CMD_IP_PACKET_TAG_STATUS);
- desc[0].type = IPA_IMM_CMD_DESC;
- desc[0].callback = ipa3_tag_destroy_imm;
+ desc[0].is_tag_status = true;
desc[1].pyld = entry->pyld_buffer;
desc[1].len = entry->pyld_len;
desc[1].type = IPA_DATA_DESC_SKB;
@@ -3615,8 +3600,11 @@
*/
IPADBG_LOW("tx_pkt sent in tag: 0x%p\n", tx_pkt);
desc->pyld = tag_pyld->data;
+ desc->opcode = tag_pyld->opcode;
desc->len = tag_pyld->len;
desc->user1 = tag_pyld;
+ desc->type = IPA_IMM_CMD_DESC;
+ desc->callback = ipa3_tag_destroy_imm;
*tag_pyld_ret = tag_pyld;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
index ff763c4..d0ed782 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
@@ -573,7 +573,7 @@
rc = -EFAULT;
goto fail_reg_write_construct;
}
- desc[0].opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_REGISTER_WRITE);
+ desc[0].opcode = cmd_pyld[0]->opcode;
desc[0].pyld = cmd_pyld[0]->data;
desc[0].len = cmd_pyld[0]->len;
desc[0].type = IPA_IMM_CMD_DESC;
@@ -609,8 +609,7 @@
ip);
goto fail_imm_cmd_construct;
}
- desc[num_cmd].opcode =
- ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+ desc[num_cmd].opcode = cmd_pyld[num_cmd]->opcode;
desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
desc[num_cmd].len = cmd_pyld[num_cmd]->len;
desc[num_cmd++].type = IPA_IMM_CMD_DESC;
@@ -630,8 +629,7 @@
ip);
goto fail_imm_cmd_construct;
}
- desc[num_cmd].opcode =
- ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+ desc[num_cmd].opcode = cmd_pyld[num_cmd]->opcode;
desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
desc[num_cmd].len = cmd_pyld[num_cmd]->len;
desc[num_cmd++].type = IPA_IMM_CMD_DESC;
@@ -653,8 +651,7 @@
ip);
goto fail_imm_cmd_construct;
}
- desc[num_cmd].opcode =
- ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+ desc[num_cmd].opcode = cmd_pyld[num_cmd]->opcode;
desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
desc[num_cmd].len = cmd_pyld[num_cmd]->len;
desc[num_cmd++].type = IPA_IMM_CMD_DESC;
@@ -673,8 +670,7 @@
ip);
goto fail_imm_cmd_construct;
}
- desc[num_cmd].opcode =
- ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+ desc[num_cmd].opcode = cmd_pyld[num_cmd]->opcode;
desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
desc[num_cmd].len = cmd_pyld[num_cmd]->len;
desc[num_cmd++].type = IPA_IMM_CMD_DESC;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
index 69db99a..410b96a 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
@@ -181,8 +181,7 @@
IPAERR("fail construct dma_shared_mem cmd\n");
goto end;
}
- desc[0].opcode = ipahal_imm_cmd_get_opcode(
- IPA_IMM_CMD_DMA_SHARED_MEM);
+ desc[0].opcode = hdr_cmd_pyld->opcode;
desc[0].pyld = hdr_cmd_pyld->data;
desc[0].len = hdr_cmd_pyld->len;
}
@@ -200,8 +199,7 @@
IPAERR("fail construct hdr_init_system cmd\n");
goto end;
}
- desc[0].opcode = ipahal_imm_cmd_get_opcode(
- IPA_IMM_CMD_HDR_INIT_SYSTEM);
+ desc[0].opcode = hdr_cmd_pyld->opcode;
desc[0].pyld = hdr_cmd_pyld->data;
desc[0].len = hdr_cmd_pyld->len;
}
@@ -233,8 +231,7 @@
IPAERR("fail construct dma_shared_mem cmd\n");
goto end;
}
- desc[1].opcode = ipahal_imm_cmd_get_opcode(
- IPA_IMM_CMD_DMA_SHARED_MEM);
+ desc[1].opcode = ctx_cmd_pyld->opcode;
desc[1].pyld = ctx_cmd_pyld->data;
desc[1].len = ctx_cmd_pyld->len;
}
@@ -262,8 +259,7 @@
IPAERR("fail construct register_write cmd\n");
goto end;
}
- desc[1].opcode = ipahal_imm_cmd_get_opcode(
- IPA_IMM_CMD_REGISTER_WRITE);
+ desc[1].opcode = ctx_cmd_pyld->opcode;
desc[1].pyld = ctx_cmd_pyld->data;
desc[1].len = ctx_cmd_pyld->len;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index 3af4486..73a405f 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -701,6 +701,7 @@
* or kmalloc'ed immediate command parameters/plain old data
* @dma_address: dma mapped address of pyld
* @dma_address_valid: valid field for dma_address
+ * @is_tag_status: flag for IP_PACKET_TAG_STATUS imd cmd
* @len: length of the pyld
* @opcode: for immediate commands
* @callback: IPA client provided completion callback
@@ -715,6 +716,7 @@
skb_frag_t *frag;
dma_addr_t dma_address;
bool dma_address_valid;
+ bool is_tag_status;
u16 len;
u16 opcode;
void (*callback)(void *user1, int user2);
@@ -1069,6 +1071,7 @@
* @ipa_bus_hdl: msm driver handle for the data path bus
* @ctrl: holds the core specific operations based on
* core version (vtable like)
+ * @pkt_init_imm_opcode: opcode for IP_PACKET_INIT imm cmd
* @enable_clock_scaling: clock scaling is enabled ?
* @curr_ipa_clk_rate: IPA current clock rate
* @wcstats: wlan common buffer stats
@@ -1180,6 +1183,7 @@
bool q6_proxy_clk_vote_valid;
u32 ipa_num_pipes;
dma_addr_t pkt_init_imm[IPA3_MAX_NUM_PIPES];
+ u32 pkt_init_imm_opcode;
struct ipa3_wlan_comm_memb wc_memb;
@@ -1318,6 +1322,12 @@
* +-------------------------+
* | CANARY |
* +-------------------------+
+ * | PDN CONFIG |
+ * +-------------------------+
+ * | CANARY |
+ * +-------------------------+
+ * | CANARY |
+ * +-------------------------+
* | MODEM MEM |
* +-------------------------+
* | CANARY |
@@ -1398,6 +1408,8 @@
u32 apps_v6_rt_nhash_size;
u32 uc_event_ring_ofst;
u32 uc_event_ring_size;
+ u32 pdn_config_ofst;
+ u32 pdn_config_size;
};
struct ipa3_controller {
@@ -1827,6 +1839,7 @@
int __ipa3_release_hdr(u32 hdr_hdl);
int __ipa3_release_hdr_proc_ctx(u32 proc_ctx_hdl);
int _ipa_read_ep_reg_v3_0(char *buf, int max_len, int pipe);
+int _ipa_read_ep_reg_v4_0(char *buf, int max_len, int pipe);
void _ipa_enable_clks_v3_0(void);
void _ipa_disable_clks_v3_0(void);
struct device *ipa3_get_dma_dev(void);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c b/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
index d98e6b4..e1177ca 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -420,7 +420,7 @@
goto bail;
}
- desc[0].opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_REGISTER_WRITE);
+ desc[0].opcode = nop_cmd_pyld->opcode;
desc[0].type = IPA_IMM_CMD_DESC;
desc[0].callback = NULL;
desc[0].user1 = NULL;
@@ -505,7 +505,7 @@
goto free_nop;
}
- desc[1].opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_V4_NAT_INIT);
+ desc[1].opcode = cmd_pyld->opcode;
desc[1].type = IPA_IMM_CMD_DESC;
desc[1].callback = NULL;
desc[1].user1 = NULL;
@@ -668,7 +668,7 @@
goto bail;
}
desc[0].type = IPA_IMM_CMD_DESC;
- desc[0].opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_REGISTER_WRITE);
+ desc[0].opcode = nop_cmd_pyld->opcode;
desc[0].callback = NULL;
desc[0].user1 = NULL;
desc[0].user2 = 0;
@@ -687,7 +687,7 @@
continue;
}
desc[1].type = IPA_IMM_CMD_DESC;
- desc[1].opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_NAT_DMA);
+ desc[1].opcode = cmd_pyld->opcode;
desc[1].callback = NULL;
desc[1].user1 = NULL;
desc[1].user2 = 0;
@@ -777,7 +777,7 @@
result = -ENOMEM;
goto bail;
}
- desc[0].opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_REGISTER_WRITE);
+ desc[0].opcode = nop_cmd_pyld->opcode;
desc[0].type = IPA_IMM_CMD_DESC;
desc[0].callback = NULL;
desc[0].user1 = NULL;
@@ -804,7 +804,7 @@
result = -EPERM;
goto destroy_regwrt_imm_cmd;
}
- desc[1].opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_V4_NAT_INIT);
+ desc[1].opcode = cmd_pyld->opcode;
desc[1].type = IPA_IMM_CMD_DESC;
desc[1].callback = NULL;
desc[1].user1 = NULL;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
index 273877c..cf28986 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -550,8 +550,7 @@
IPAERR("fail construct register_write imm cmd. IP %d\n", ip);
goto fail_size_valid;
}
- desc[num_cmd].opcode =
- ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_REGISTER_WRITE);
+ desc[num_cmd].opcode = cmd_pyld[num_cmd]->opcode;
desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
desc[num_cmd].len = cmd_pyld[num_cmd]->len;
desc[num_cmd].type = IPA_IMM_CMD_DESC;
@@ -569,8 +568,7 @@
IPAERR("fail construct dma_shared_mem imm cmd. IP %d\n", ip);
goto fail_imm_cmd_construct;
}
- desc[num_cmd].opcode =
- ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+ desc[num_cmd].opcode = cmd_pyld[num_cmd]->opcode;
desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
desc[num_cmd].len = cmd_pyld[num_cmd]->len;
desc[num_cmd].type = IPA_IMM_CMD_DESC;
@@ -588,8 +586,7 @@
IPAERR("fail construct dma_shared_mem imm cmd. IP %d\n", ip);
goto fail_imm_cmd_construct;
}
- desc[num_cmd].opcode =
- ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+ desc[num_cmd].opcode = cmd_pyld[num_cmd]->opcode;
desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
desc[num_cmd].len = cmd_pyld[num_cmd]->len;
desc[num_cmd].type = IPA_IMM_CMD_DESC;
@@ -609,8 +606,7 @@
ip);
goto fail_imm_cmd_construct;
}
- desc[num_cmd].opcode =
- ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+ desc[num_cmd].opcode = cmd_pyld[num_cmd]->opcode;
desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
desc[num_cmd].len = cmd_pyld[num_cmd]->len;
desc[num_cmd].type = IPA_IMM_CMD_DESC;
@@ -630,8 +626,7 @@
ip);
goto fail_imm_cmd_construct;
}
- desc[num_cmd].opcode =
- ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+ desc[num_cmd].opcode = cmd_pyld[num_cmd]->opcode;
desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
desc[num_cmd].len = cmd_pyld[num_cmd]->len;
desc[num_cmd].type = IPA_IMM_CMD_DESC;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index 6321ca9..f8b4d7d 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -42,6 +42,7 @@
#define IPA_FORCE_CLOSE_TAG_PROCESS_TIMEOUT (10 * HZ)
#define IPA_BCR_REG_VAL_v3_0 (0x00000001)
#define IPA_BCR_REG_VAL_v3_5 (0x0000003B)
+#define IPA_BCR_REG_VAL_v4_0 (0x00000039)
#define IPA_AGGR_GRAN_MIN (1)
#define IPA_AGGR_GRAN_MAX (32)
#define IPA_EOT_COAL_GRAN_MIN (1)
@@ -62,8 +63,6 @@
/* configure IPA spare register 1 in order to have correct IPA version
* set bits 0,2,3 and 4. see SpareBits documentation.xlsx
*/
-#define IPA_SPARE_REG_1_VAL (0x0000081D)
-
/* HPS, DPS sequencers Types*/
#define IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY 0x00000000
@@ -121,6 +120,16 @@
#define IPA_v3_5_SRC_GROUP_MAX (4)
#define IPA_v3_5_DST_GROUP_MAX (3)
+#define IPA_v4_0_GROUP_LWA_DL (0)
+#define IPA_v4_0_MHI_GROUP_PCIE (0)
+#define IPA_v4_0_ETHERNET (0)
+#define IPA_v4_0_GROUP_UL_DL (1)
+#define IPA_v4_0_MHI_GROUP_DDR (1)
+#define IPA_v4_0_MHI_GROUP_DMA (2)
+#define IPA_v4_0_GROUP_UC_RX_Q (3)
+#define IPA_v4_0_SRC_GROUP_MAX (4)
+#define IPA_v4_0_DST_GROUP_MAX (4)
+
#define IPA_GROUP_MAX IPA_v3_0_GROUP_MAX
enum ipa_rsrc_grp_type_src {
@@ -139,7 +148,14 @@
IPA_v3_5_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF,
IPA_v3_5_RSRC_GRP_TYPE_SRC_HPS_DMARS,
IPA_v3_5_RSRC_GRP_TYPE_SRC_ACK_ENTRIES,
- IPA_v3_5_RSRC_GRP_TYPE_SRC_MAX
+ IPA_v3_5_RSRC_GRP_TYPE_SRC_MAX,
+
+ IPA_v4_0_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS = 0,
+ IPA_v4_0_RSRC_GRP_TYPE_SRS_DESCRIPTOR_LISTS,
+ IPA_v4_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF,
+ IPA_v4_0_RSRC_GRP_TYPE_SRC_HPS_DMARS,
+ IPA_v4_0_RSRC_GRP_TYPE_SRC_ACK_ENTRIES,
+ IPA_v4_0_RSRC_GRP_TYPE_SRC_MAX
};
#define IPA_RSRC_GRP_TYPE_SRC_MAX IPA_v3_0_RSRC_GRP_TYPE_SRC_MAX
@@ -153,6 +169,10 @@
IPA_v3_5_RSRC_GRP_TYPE_DST_DATA_SECTORS = 0,
IPA_v3_5_RSRC_GRP_TYPE_DST_DPS_DMARS,
IPA_v3_5_RSRC_GRP_TYPE_DST_MAX,
+
+ IPA_v4_0_RSRC_GRP_TYPE_DST_DATA_SECTORS = 0,
+ IPA_v4_0_RSRC_GRP_TYPE_DST_DPS_DMARS,
+ IPA_v4_0_RSRC_GRP_TYPE_DST_MAX,
};
#define IPA_RSRC_GRP_TYPE_DST_MAX IPA_v3_0_RSRC_GRP_TYPE_DST_MAX
@@ -160,6 +180,12 @@
IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ,
IPA_RSRC_GRP_TYPE_RX_MAX
};
+
+enum ipa_rsrc_grp_rx_hps_weight_config {
+ IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_CONFIG,
+ IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_MAX
+};
+
struct rsrc_min_max {
u32 min;
u32 max;
@@ -170,6 +196,8 @@
IPA_3_5,
IPA_3_5_MHI,
IPA_3_5_1,
+ IPA_4_0,
+ IPA_4_0_MHI,
IPA_VER_MAX,
};
@@ -233,6 +261,32 @@
[IPA_v3_5_RSRC_GRP_TYPE_SRC_ACK_ENTRIES] = {
{14, 14}, {20, 20}, {0, 0}, {14, 14}, {0, 0}, {0, 0} },
},
+ [IPA_4_0] = {
+ /* LWA_DL UL_DL not used UC_RX_Q, other are invalid */
+ [IPA_v4_0_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS] = {
+ {1, 255}, {1, 255}, {0, 0}, {1, 255}, {0, 0}, {0, 0} },
+ [IPA_v4_0_RSRC_GRP_TYPE_SRS_DESCRIPTOR_LISTS] = {
+ {10, 10}, {10, 10}, {0, 0}, {8, 8}, {0, 0}, {0, 0} },
+ [IPA_v4_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF] = {
+ {12, 12}, {14, 14}, {0, 0}, {8, 8}, {0, 0}, {0, 0} },
+ [IPA_v4_0_RSRC_GRP_TYPE_SRC_HPS_DMARS] = {
+ {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 0}, {0, 0} },
+ [IPA_v4_0_RSRC_GRP_TYPE_SRC_ACK_ENTRIES] = {
+ {14, 14}, {20, 20}, {0, 0}, {14, 14}, {0, 0}, {0, 0} },
+ },
+ [IPA_4_0_MHI] = {
+ /* PCIE DDR DMA not used, other are invalid */
+ [IPA_v4_0_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS] = {
+ {4, 4}, {5, 5}, {1, 1}, {0, 0}, {0, 0}, {0, 0} },
+ [IPA_v4_0_RSRC_GRP_TYPE_SRS_DESCRIPTOR_LISTS] = {
+ {10, 10}, {10, 10}, {8, 8}, {0, 0}, {0, 0}, {0, 0} },
+ [IPA_v4_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF] = {
+ {12, 12}, {12, 12}, {8, 8}, {0, 0}, {0, 0}, {0, 0} },
+ [IPA_v4_0_RSRC_GRP_TYPE_SRC_HPS_DMARS] = {
+ {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 0}, {0, 0} },
+ [IPA_v4_0_RSRC_GRP_TYPE_SRC_ACK_ENTRIES] = {
+ {14, 14}, {14, 14}, {14, 14}, {0, 0}, {0, 0}, {0, 0} },
+ },
};
static const struct rsrc_min_max ipa3_rsrc_dst_grp_config
@@ -267,6 +321,20 @@
[IPA_v3_5_RSRC_GRP_TYPE_DST_DPS_DMARS] = {
{2, 255}, {1, 255}, {1, 2}, {0, 0}, {0, 0}, {0, 0} },
},
+ [IPA_4_0] = {
+ /*LWA_DL UL/DL/DPL uC, other are invalid */
+ [IPA_v4_0_RSRC_GRP_TYPE_DST_DATA_SECTORS] = {
+ {4, 4}, {4, 4}, {3, 3}, {2, 2}, {0, 0}, {0, 0} },
+ [IPA_v4_0_RSRC_GRP_TYPE_DST_DPS_DMARS] = {
+ {2, 255}, {1, 255}, {1, 2}, {0, 2}, {0, 0}, {0, 0} },
+ },
+ [IPA_4_0_MHI] = {
+ /*LWA_DL UL/DL/DPL uC, other are invalid */
+ [IPA_v4_0_RSRC_GRP_TYPE_DST_DATA_SECTORS] = {
+ {4, 4}, {4, 4}, {3, 3}, {2, 2}, {0, 0}, {0, 0} },
+ [IPA_v4_0_RSRC_GRP_TYPE_DST_DPS_DMARS] = {
+ {2, 255}, {1, 255}, {1, 2}, {0, 2}, {0, 0}, {0, 0} },
+ },
};
static const struct rsrc_min_max ipa3_rsrc_rx_grp_config
@@ -285,12 +353,50 @@
/* PCIE DDR DMA unused N/A N/A */
[IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ] = {
{ 3, 3 }, { 7, 7 }, { 2, 2 }, { 0, 0 }, { 0, 0 }, { 0, 0 } },
-},
+ },
[IPA_3_5_1] = {
/* LWA_DL UL_DL unused UC_RX_Q N/A N/A */
[IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ] = {
{3, 3}, {7, 7}, {0, 0}, {2, 2}, {0, 0}, {0, 0} },
},
+ [IPA_4_0] = {
+ /* LWA_DL UL_DL not used UC_RX_Q, other are invalid */
+ [IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ] = {
+ {3, 3}, {7, 7}, {0, 0}, {2, 2}, {0, 0}, {0, 0} },
+ },
+ [IPA_4_0_MHI] = {
+ /* PCIE DDR DMA unused N/A N/A */
+ [IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ] = {
+ { 3, 3 }, { 7, 7 }, { 2, 2 }, { 0, 0 }, { 0, 0 }, { 0, 0 } },
+ },
+};
+
+static const u32 ipa3_rsrc_rx_grp_hps_weight_config
+ [IPA_VER_MAX][IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_MAX][IPA_GROUP_MAX] = {
+ [IPA_3_0] = {
+ /* UL DL DIAG DMA Unused uC Rx */
+ [IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_CONFIG] = { 0, 0, 0, 0, 0, 0 },
+ },
+ [IPA_3_5] = {
+ /* unused UL_DL unused UC_RX_Q N/A N/A */
+ [IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_CONFIG] = { 1, 1, 1, 1, 0, 0 },
+ },
+ [IPA_3_5_MHI] = {
+ /* PCIE DDR DMA unused N/A N/A */
+ [IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_CONFIG] = { 3, 5, 1, 1, 0, 0 },
+ },
+ [IPA_3_5_1] = {
+ /* LWA_DL UL_DL unused UC_RX_Q N/A N/A */
+ [IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_CONFIG] = { 1, 1, 1, 1, 0, 0 },
+ },
+ [IPA_4_0] = {
+ /* LWA_DL UL_DL not used UC_RX_Q, other are invalid */
+ [IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_CONFIG] = { 1, 1, 1, 1, 0, 0 },
+ },
+ [IPA_4_0_MHI] = {
+ /* PCIE DDR DMA unused N/A N/A */
+ [IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_CONFIG] = { 3, 5, 1, 1, 0, 0 },
+ },
};
enum ipa_ees {
@@ -1115,6 +1221,399 @@
IPA_DPS_HPS_SEQ_TYPE_INVALID,
QMB_MASTER_SELECT_DDR,
{ 11, 2, 4, 6, IPA_EE_AP } },
+
+
+ /* IPA_4_0 */
+ [IPA_4_0][IPA_CLIENT_HSIC1_PROD] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0][IPA_CLIENT_WLAN1_PROD] = {
+ 7, IPA_v4_0_GROUP_UL_DL, true,
+ IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+ QMB_MASTER_SELECT_DDR,
+ { 7, 9, 8, 16, IPA_EE_AP } },
+ [IPA_4_0][IPA_CLIENT_HSIC2_PROD] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0][IPA_CLIENT_USB2_PROD] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0][IPA_CLIENT_HSIC3_PROD] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0][IPA_CLIENT_USB3_PROD] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0][IPA_CLIENT_HSIC4_PROD] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0][IPA_CLIENT_USB4_PROD] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0][IPA_CLIENT_HSIC5_PROD] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0][IPA_CLIENT_USB_PROD] = {
+ 0, IPA_v4_0_GROUP_UL_DL, true,
+ IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+ QMB_MASTER_SELECT_DDR,
+ { 0, 8, 8, 16, IPA_EE_AP } },
+ [IPA_4_0][IPA_CLIENT_UC_USB_PROD] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0][IPA_CLIENT_A5_WLAN_AMPDU_PROD] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0][IPA_CLIENT_A2_EMBEDDED_PROD] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0][IPA_CLIENT_A2_TETHERED_PROD] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0][IPA_CLIENT_APPS_LAN_PROD] = {
+ 8, IPA_v4_0_GROUP_UL_DL, false,
+ IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+ QMB_MASTER_SELECT_DDR,
+ { 8, 10, 8, 16, IPA_EE_AP } },
+ [IPA_4_0][IPA_CLIENT_APPS_WAN_PROD] = {
+ 2, IPA_v4_0_GROUP_UL_DL, true,
+ IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+ QMB_MASTER_SELECT_DDR,
+ { 2, 3, 16, 32, IPA_EE_AP } },
+ [IPA_4_0][IPA_CLIENT_APPS_CMD_PROD] = {
+ 5, IPA_v4_0_GROUP_UL_DL, false,
+ IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+ QMB_MASTER_SELECT_DDR,
+ { 5, 4, 20, 24, IPA_EE_AP } },
+ [IPA_4_0][IPA_CLIENT_ODU_PROD] = {
+ 0, IPA_v4_0_GROUP_UL_DL, true,
+ IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+ QMB_MASTER_SELECT_DDR,
+ { 0, 1, 8, 16, IPA_EE_AP } },
+ [IPA_4_0][IPA_CLIENT_ETHERNET_PROD] = {
+ 9, IPA_v4_0_GROUP_UL_DL, true,
+ IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+ QMB_MASTER_SELECT_DDR,
+ { 9, 0, 8, 16, IPA_EE_UC } },
+ [IPA_4_0][IPA_CLIENT_MHI_PROD] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0][IPA_CLIENT_Q6_LAN_PROD] = {
+ 6, IPA_v4_0_GROUP_UL_DL, true,
+ IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
+ QMB_MASTER_SELECT_DDR,
+ { 6, 2, 12, 24, IPA_EE_Q6 } },
+ [IPA_4_0][IPA_CLIENT_Q6_WAN_PROD] = {
+ 3, IPA_v4_0_GROUP_UL_DL, true,
+ IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+ QMB_MASTER_SELECT_DDR,
+ { 3, 0, 16, 32, IPA_EE_Q6 } },
+ [IPA_4_0][IPA_CLIENT_Q6_CMD_PROD] = {
+ 4, IPA_v4_0_GROUP_UL_DL, false,
+ IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
+ QMB_MASTER_SELECT_DDR,
+ { 4, 1, 20, 24, IPA_EE_Q6 } },
+ [IPA_4_0][IPA_CLIENT_Q6_DECOMP_PROD] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0][IPA_CLIENT_Q6_DECOMP2_PROD] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0][IPA_CLIENT_MEMCPY_DMA_SYNC_PROD] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0][IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD] = IPA_CLIENT_NOT_USED,
+ /* Only for test purpose */
+ [IPA_4_0][IPA_CLIENT_TEST_PROD] = {
+ 0, IPA_v4_0_GROUP_UL_DL, true,
+ IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+ QMB_MASTER_SELECT_DDR,
+ {0, 8, 8, 16, IPA_EE_AP } },
+ [IPA_4_0][IPA_CLIENT_TEST1_PROD] = {
+ 0, IPA_v4_0_GROUP_UL_DL, true,
+ IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+ QMB_MASTER_SELECT_DDR,
+ {0, 8, 8, 16, IPA_EE_AP } },
+ [IPA_4_0][IPA_CLIENT_TEST2_PROD] = {
+ 1, IPA_v4_0_GROUP_UL_DL, true,
+ IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+ QMB_MASTER_SELECT_DDR,
+ { 1, 0, 8, 16, IPA_EE_AP } },
+ [IPA_4_0][IPA_CLIENT_TEST3_PROD] = {
+ 7, IPA_v4_0_GROUP_UL_DL, true,
+ IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+ QMB_MASTER_SELECT_DDR,
+ {7, 9, 8, 16, IPA_EE_AP } },
+ [IPA_4_0][IPA_CLIENT_TEST4_PROD] = {
+ 8, IPA_v4_0_GROUP_UL_DL, true,
+ IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+ QMB_MASTER_SELECT_DDR,
+ { 8, 10, 8, 16, IPA_EE_AP } },
+
+
+ [IPA_4_0][IPA_CLIENT_HSIC1_CONS] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0][IPA_CLIENT_WLAN1_CONS] = {
+ 18, IPA_v4_0_GROUP_UL_DL, false,
+ IPA_DPS_HPS_SEQ_TYPE_INVALID,
+ QMB_MASTER_SELECT_DDR,
+ { 18, 12, 6, 9, IPA_EE_AP } },
+ [IPA_4_0][IPA_CLIENT_HSIC2_CONS] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0][IPA_CLIENT_USB2_CONS] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0][IPA_CLIENT_WLAN2_CONS] = {
+ 20, IPA_v4_0_GROUP_UL_DL, false,
+ IPA_DPS_HPS_SEQ_TYPE_INVALID,
+ QMB_MASTER_SELECT_DDR,
+ { 20, 14, 9, 9, IPA_EE_AP } },
+ [IPA_4_0][IPA_CLIENT_HSIC3_CONS] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0][IPA_CLIENT_USB3_CONS] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0][IPA_CLIENT_WLAN3_CONS] = {
+ 21, IPA_v4_0_GROUP_UL_DL, false,
+ IPA_DPS_HPS_SEQ_TYPE_INVALID,
+ QMB_MASTER_SELECT_DDR,
+ { 21, 15, 9, 9, IPA_EE_AP } },
+ [IPA_4_0][IPA_CLIENT_HSIC4_CONS] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0][IPA_CLIENT_USB4_CONS] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0][IPA_CLIENT_WLAN4_CONS] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0][IPA_CLIENT_HSIC5_CONS] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0][IPA_CLIENT_USB_CONS] = {
+ 19, IPA_v4_0_GROUP_UL_DL, false,
+ IPA_DPS_HPS_SEQ_TYPE_INVALID,
+ QMB_MASTER_SELECT_PCIE,
+ { 19, 13, 9, 9, IPA_EE_AP } },
+ [IPA_4_0][IPA_CLIENT_USB_DPL_CONS] = {
+ 15, IPA_v4_0_GROUP_UL_DL, false,
+ IPA_DPS_HPS_SEQ_TYPE_INVALID,
+ QMB_MASTER_SELECT_DDR,
+ { 15, 7, 5, 5, IPA_EE_AP } },
+ [IPA_4_0][IPA_CLIENT_A2_EMBEDDED_CONS] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0][IPA_CLIENT_A2_TETHERED_CONS] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0][IPA_CLIENT_A5_LAN_WAN_CONS] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0][IPA_CLIENT_APPS_LAN_CONS] = {
+ 10, IPA_v4_0_GROUP_UL_DL, false,
+ IPA_DPS_HPS_SEQ_TYPE_INVALID,
+ QMB_MASTER_SELECT_DDR,
+ { 10, 5, 9, 9, IPA_EE_AP } },
+ [IPA_4_0][IPA_CLIENT_APPS_WAN_CONS] = {
+ 11, IPA_v4_0_GROUP_UL_DL, false,
+ IPA_DPS_HPS_SEQ_TYPE_INVALID,
+ QMB_MASTER_SELECT_DDR,
+ { 11, 6, 9, 9, IPA_EE_AP } },
+ [IPA_4_0][IPA_CLIENT_ODU_EMB_CONS] = {
+ 17, IPA_v4_0_GROUP_UL_DL, false,
+ IPA_DPS_HPS_SEQ_TYPE_INVALID,
+ QMB_MASTER_SELECT_DDR,
+ { 17, 1, 17, 17, IPA_EE_AP } },
+ [IPA_4_0][IPA_CLIENT_ETHERNET_CONS] = {
+ 22, IPA_v4_0_GROUP_UL_DL, true,
+ IPA_DPS_HPS_SEQ_TYPE_INVALID,
+ QMB_MASTER_SELECT_DDR,
+ { 22, 1, 17, 17, IPA_EE_UC } },
+ [IPA_4_0][IPA_CLIENT_ODU_TETH_CONS] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0][IPA_CLIENT_MHI_CONS] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0][IPA_CLIENT_Q6_LAN_CONS] = {
+ 14, IPA_v4_0_GROUP_UL_DL, false,
+ IPA_DPS_HPS_SEQ_TYPE_INVALID,
+ QMB_MASTER_SELECT_DDR,
+ { 14, 4, 9, 9, IPA_EE_Q6 } },
+ [IPA_4_0][IPA_CLIENT_Q6_WAN_CONS] = {
+ 13, IPA_v4_0_GROUP_UL_DL, false,
+ IPA_DPS_HPS_SEQ_TYPE_INVALID,
+ QMB_MASTER_SELECT_DDR,
+ { 13, 3, 9, 9, IPA_EE_Q6 } },
+ [IPA_4_0][IPA_CLIENT_Q6_DUN_CONS] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0][IPA_CLIENT_Q6_DECOMP_CONS] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0][IPA_CLIENT_Q6_DECOMP2_CONS] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0][IPA_CLIENT_MEMCPY_DMA_SYNC_CONS] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0][IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0][IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS] = {
+ 16, IPA_v4_0_GROUP_UL_DL, false,
+ IPA_DPS_HPS_SEQ_TYPE_INVALID,
+ QMB_MASTER_SELECT_DDR,
+ { 16, 5, 9, 9, IPA_EE_Q6 } },
+ /* Only for test purpose */
+ /* MBIM aggregation test pipes should have the same QMB as USB_CONS */
+ [IPA_4_0][IPA_CLIENT_TEST_CONS] = {
+ 12, IPA_v4_0_GROUP_UL_DL, false,
+ IPA_DPS_HPS_SEQ_TYPE_INVALID,
+ QMB_MASTER_SELECT_PCIE,
+ { 12, 2, 5, 5, IPA_EE_AP } },
+ [IPA_4_0][IPA_CLIENT_TEST1_CONS] = {
+ 12, IPA_v4_0_GROUP_UL_DL, false,
+ IPA_DPS_HPS_SEQ_TYPE_INVALID,
+ QMB_MASTER_SELECT_DDR,
+ { 12, 2, 5, 5, IPA_EE_AP } },
+ [IPA_4_0][IPA_CLIENT_TEST2_CONS] = {
+ 18, IPA_v4_0_GROUP_UL_DL, false,
+ IPA_DPS_HPS_SEQ_TYPE_INVALID,
+ QMB_MASTER_SELECT_PCIE,
+ { 18, 12, 6, 9, IPA_EE_AP } },
+ [IPA_4_0][IPA_CLIENT_TEST3_CONS] = {
+ 20, IPA_v4_0_GROUP_UL_DL, false,
+ IPA_DPS_HPS_SEQ_TYPE_INVALID,
+ QMB_MASTER_SELECT_DDR,
+ { 20, 14, 9, 9, IPA_EE_AP } },
+ [IPA_4_0][IPA_CLIENT_TEST4_CONS] = {
+ 21, IPA_v4_0_GROUP_UL_DL, false,
+ IPA_DPS_HPS_SEQ_TYPE_INVALID,
+ QMB_MASTER_SELECT_PCIE,
+ { 21, 15, 9, 9, IPA_EE_AP } },
+
+ /* IPA_4_0_MHI */
+ [IPA_4_0_MHI][IPA_CLIENT_HSIC1_PROD] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0_MHI][IPA_CLIENT_WLAN1_PROD] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0_MHI][IPA_CLIENT_HSIC2_PROD] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0_MHI][IPA_CLIENT_USB2_PROD] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0_MHI][IPA_CLIENT_HSIC3_PROD] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0_MHI][IPA_CLIENT_USB3_PROD] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0_MHI][IPA_CLIENT_HSIC4_PROD] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0_MHI][IPA_CLIENT_USB4_PROD] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0_MHI][IPA_CLIENT_HSIC5_PROD] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0_MHI][IPA_CLIENT_USB_PROD] = {
+ 0, IPA_v4_0_MHI_GROUP_DDR, true,
+ IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+ QMB_MASTER_SELECT_DDR,
+ { 0, 8, 8, 16, IPA_EE_AP } },
+ [IPA_4_0_MHI][IPA_CLIENT_UC_USB_PROD] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0_MHI][IPA_CLIENT_A5_WLAN_AMPDU_PROD] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0_MHI][IPA_CLIENT_A2_EMBEDDED_PROD] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0_MHI][IPA_CLIENT_A2_TETHERED_PROD] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0_MHI][IPA_CLIENT_APPS_LAN_PROD] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0_MHI][IPA_CLIENT_APPS_WAN_PROD] = {
+ 2, IPA_v4_0_MHI_GROUP_DDR, true,
+ IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+ QMB_MASTER_SELECT_DDR,
+ { 2, 3, 16, 32, IPA_EE_AP } },
+ [IPA_4_0_MHI][IPA_CLIENT_APPS_CMD_PROD] = {
+ 5, IPA_v4_0_MHI_GROUP_DDR, false,
+ IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+ QMB_MASTER_SELECT_DDR,
+ { 5, 4, 20, 24, IPA_EE_AP } },
+ [IPA_4_0_MHI][IPA_CLIENT_ODU_PROD] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0_MHI][IPA_CLIENT_MHI_PROD] = {
+ 1, IPA_v4_0_MHI_GROUP_PCIE, true,
+ IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+ QMB_MASTER_SELECT_PCIE,
+ { 1, 0, 8, 16, IPA_EE_AP } },
+ [IPA_4_0_MHI][IPA_CLIENT_Q6_LAN_PROD] = {
+ 3, IPA_v4_0_MHI_GROUP_DDR, true,
+ IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+ QMB_MASTER_SELECT_DDR,
+ { 3, 0, 16, 32, IPA_EE_Q6 } },
+ [IPA_4_0_MHI][IPA_CLIENT_Q6_WAN_PROD] = {
+ 6, IPA_v4_0_GROUP_UL_DL, true,
+ IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
+ QMB_MASTER_SELECT_DDR,
+ { 6, 2, 12, 24, IPA_EE_Q6 } },
+ [IPA_4_0_MHI][IPA_CLIENT_Q6_CMD_PROD] = {
+ 4, IPA_v4_0_MHI_GROUP_PCIE, false,
+ IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
+ QMB_MASTER_SELECT_DDR,
+ { 4, 1, 20, 24, IPA_EE_Q6 } },
+ [IPA_4_0_MHI][IPA_CLIENT_Q6_DECOMP_PROD] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0_MHI][IPA_CLIENT_Q6_DECOMP2_PROD] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0_MHI][IPA_CLIENT_MEMCPY_DMA_SYNC_PROD] = {
+ 7, IPA_v4_0_MHI_GROUP_DMA, false,
+ IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+ QMB_MASTER_SELECT_DDR,
+ { 7, 9, 8, 16, IPA_EE_AP } },
+ [IPA_4_0_MHI][IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD] = {
+ 8, IPA_v4_0_MHI_GROUP_DMA, false,
+ IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+ QMB_MASTER_SELECT_DDR,
+ { 8, 10, 8, 16, IPA_EE_AP } },
+ /* Only for test purpose */
+ [IPA_4_0_MHI][IPA_CLIENT_TEST_PROD] = {
+ 0, IPA_v4_0_GROUP_UL_DL, true,
+ IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+ QMB_MASTER_SELECT_DDR,
+ {0, 8, 8, 16, IPA_EE_AP } },
+ [IPA_4_0][IPA_CLIENT_TEST1_PROD] = {
+ 0, IPA_v4_0_GROUP_UL_DL, true,
+ IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+ QMB_MASTER_SELECT_DDR,
+ {0, 8, 8, 16, IPA_EE_AP } },
+ [IPA_4_0_MHI][IPA_CLIENT_TEST2_PROD] = {
+ 1, IPA_v4_0_GROUP_UL_DL, true,
+ IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+ QMB_MASTER_SELECT_DDR,
+ { 1, 0, 8, 16, IPA_EE_AP } },
+ [IPA_4_0_MHI][IPA_CLIENT_TEST3_PROD] = {
+ 7, IPA_v4_0_GROUP_UL_DL, true,
+ IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+ QMB_MASTER_SELECT_DDR,
+ {7, 9, 8, 16, IPA_EE_AP } },
+ [IPA_4_0_MHI][IPA_CLIENT_TEST4_PROD] = {
+ 8, IPA_v4_0_GROUP_UL_DL, true,
+ IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+ QMB_MASTER_SELECT_DDR,
+ { 8, 10, 8, 16, IPA_EE_AP } },
+
+ [IPA_4_0_MHI][IPA_CLIENT_HSIC1_CONS] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0_MHI][IPA_CLIENT_WLAN1_CONS] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0_MHI][IPA_CLIENT_HSIC2_CONS] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0_MHI][IPA_CLIENT_USB2_CONS] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0_MHI][IPA_CLIENT_WLAN2_CONS] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0_MHI][IPA_CLIENT_HSIC3_CONS] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0_MHI][IPA_CLIENT_USB3_CONS] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0_MHI][IPA_CLIENT_WLAN3_CONS] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0_MHI][IPA_CLIENT_HSIC4_CONS] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0_MHI][IPA_CLIENT_USB4_CONS] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0_MHI][IPA_CLIENT_WLAN4_CONS] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0_MHI][IPA_CLIENT_HSIC5_CONS] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0_MHI][IPA_CLIENT_USB_CONS] = {
+ 19, IPA_v4_0_MHI_GROUP_DDR, false,
+ IPA_DPS_HPS_SEQ_TYPE_INVALID,
+ QMB_MASTER_SELECT_DDR,
+ { 19, 13, 9, 9, IPA_EE_AP } },
+ [IPA_4_0_MHI][IPA_CLIENT_USB_DPL_CONS] = {
+ 15, IPA_v4_0_MHI_GROUP_DDR, false,
+ IPA_DPS_HPS_SEQ_TYPE_INVALID,
+ QMB_MASTER_SELECT_DDR,
+ { 15, 7, 5, 5, IPA_EE_AP } },
+ [IPA_4_0_MHI][IPA_CLIENT_A2_EMBEDDED_CONS] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0_MHI][IPA_CLIENT_A2_TETHERED_CONS] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0_MHI][IPA_CLIENT_A5_LAN_WAN_CONS] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0_MHI][IPA_CLIENT_APPS_LAN_CONS] = {
+ 10, IPA_v4_0_MHI_GROUP_DDR, false,
+ IPA_DPS_HPS_SEQ_TYPE_INVALID,
+ QMB_MASTER_SELECT_DDR,
+ { 10, 5, 9, 9, IPA_EE_AP } },
+ [IPA_4_0_MHI][IPA_CLIENT_APPS_WAN_CONS] = {
+ 11, IPA_v4_0_MHI_GROUP_DDR, false,
+ IPA_DPS_HPS_SEQ_TYPE_INVALID,
+ QMB_MASTER_SELECT_DDR,
+ { 11, 6, 9, 9, IPA_EE_AP } },
+ [IPA_4_0_MHI][IPA_CLIENT_ODU_EMB_CONS] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0_MHI][IPA_CLIENT_ODU_TETH_CONS] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0_MHI][IPA_CLIENT_MHI_CONS] = {
+ 17, IPA_v4_0_MHI_GROUP_PCIE, false,
+ IPA_DPS_HPS_SEQ_TYPE_INVALID,
+ QMB_MASTER_SELECT_PCIE,
+ { 17, 1, 17, 17, IPA_EE_AP } },
+ [IPA_4_0_MHI][IPA_CLIENT_Q6_LAN_CONS] = {
+ 14, IPA_v4_0_MHI_GROUP_DDR, false,
+ IPA_DPS_HPS_SEQ_TYPE_INVALID,
+ QMB_MASTER_SELECT_DDR,
+ { 14, 4, 9, 9, IPA_EE_Q6 } },
+ [IPA_4_0_MHI][IPA_CLIENT_Q6_WAN_CONS] = {
+ 13, IPA_v4_0_MHI_GROUP_DDR, false,
+ IPA_DPS_HPS_SEQ_TYPE_INVALID,
+ QMB_MASTER_SELECT_DDR,
+ { 13, 3, 9, 9, IPA_EE_Q6 } },
+ [IPA_4_0_MHI][IPA_CLIENT_Q6_DUN_CONS] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0_MHI][IPA_CLIENT_Q6_DECOMP_CONS] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0_MHI][IPA_CLIENT_Q6_DECOMP2_CONS] = IPA_CLIENT_NOT_USED,
+ [IPA_4_0_MHI][IPA_CLIENT_MEMCPY_DMA_SYNC_CONS] = {
+ 20, IPA_v4_0_MHI_GROUP_DMA, false,
+ IPA_DPS_HPS_SEQ_TYPE_INVALID,
+ QMB_MASTER_SELECT_PCIE,
+ { 20, 14, 9, 9, IPA_EE_AP } },
+ [IPA_4_0_MHI][IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS] = {
+ 21, IPA_v4_0_MHI_GROUP_DMA, false,
+ IPA_DPS_HPS_SEQ_TYPE_INVALID,
+ QMB_MASTER_SELECT_PCIE,
+ { 21, 15, 9, 9, IPA_EE_AP } },
+ [IPA_4_0_MHI][IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS] = {
+ 16, IPA_v4_0_GROUP_UL_DL, false,
+ IPA_DPS_HPS_SEQ_TYPE_INVALID,
+ QMB_MASTER_SELECT_DDR,
+ { 16, 5, 9, 9, IPA_EE_Q6 } },
+ /* Only for test purpose */
+ [IPA_4_0_MHI][IPA_CLIENT_TEST_CONS] = {
+ 12, IPA_v4_0_GROUP_UL_DL, false,
+ IPA_DPS_HPS_SEQ_TYPE_INVALID,
+ QMB_MASTER_SELECT_PCIE,
+ { 12, 2, 5, 5, IPA_EE_AP } },
+ [IPA_4_0_MHI][IPA_CLIENT_TEST1_CONS] = {
+ 12, IPA_v4_0_GROUP_UL_DL, false,
+ IPA_DPS_HPS_SEQ_TYPE_INVALID,
+ QMB_MASTER_SELECT_DDR,
+ { 12, 2, 5, 5, IPA_EE_AP } },
+ [IPA_4_0_MHI][IPA_CLIENT_TEST2_CONS] = {
+ 18, IPA_v4_0_GROUP_UL_DL, false,
+ IPA_DPS_HPS_SEQ_TYPE_INVALID,
+ QMB_MASTER_SELECT_PCIE,
+ { 18, 12, 6, 9, IPA_EE_AP } },
+ [IPA_4_0_MHI][IPA_CLIENT_TEST3_CONS] = {
+ 20, IPA_v4_0_GROUP_UL_DL, false,
+ IPA_DPS_HPS_SEQ_TYPE_INVALID,
+ QMB_MASTER_SELECT_DDR,
+ { 20, 14, 9, 9, IPA_EE_AP } },
+ [IPA_4_0_MHI][IPA_CLIENT_TEST4_CONS] = {
+ 21, IPA_v4_0_GROUP_UL_DL, false,
+ IPA_DPS_HPS_SEQ_TYPE_INVALID,
+ QMB_MASTER_SELECT_PCIE,
+ { 21, 15, 9, 9, IPA_EE_AP } },
+
+
};
static struct msm_bus_vectors ipa_init_vectors_v3_0[] = {
@@ -1587,16 +2086,22 @@
*/
void ipa3_cfg_qsb(void)
{
- int qsb_max_writes[2] = { 8, 2 };
- int qsb_max_reads[2] = { 8, 8 };
+ struct ipahal_reg_qsb_max_reads max_reads = { 0 };
+ struct ipahal_reg_qsb_max_writes max_writes = { 0 };
+
+ max_reads.qmb_0_max_reads = 8,
+ max_reads.qmb_1_max_reads = 8,
+
+ max_writes.qmb_0_max_writes = 8;
+ max_writes.qmb_1_max_writes = 2;
if (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5) {
- qsb_max_writes[1] = 4;
- qsb_max_reads[1] = 12;
+ max_writes.qmb_1_max_writes = 4;
+ max_reads.qmb_1_max_reads = 12;
}
- ipahal_write_reg_fields(IPA_QSB_MAX_WRITES, qsb_max_writes);
- ipahal_write_reg_fields(IPA_QSB_MAX_READS, qsb_max_reads);
+ ipahal_write_reg_fields(IPA_QSB_MAX_WRITES, &max_writes);
+ ipahal_write_reg_fields(IPA_QSB_MAX_READS, &max_reads);
}
/**
@@ -1624,6 +2129,9 @@
case IPA_HW_v3_5_1:
val = IPA_BCR_REG_VAL_v3_5;
break;
+ case IPA_HW_v4_0:
+ val = IPA_BCR_REG_VAL_v4_0;
+ break;
default:
IPAERR("unknown HW type in dts\n");
return -EFAULT;
@@ -1663,6 +2171,15 @@
case IPA_HW_v3_5_1:
hw_type_index = IPA_3_5_1;
break;
+ case IPA_HW_v4_0:
+ hw_type_index = IPA_4_0;
+ /*
+ *this flag is initialized only after fw load trigger from
+ * user space (ipa3_write)
+ */
+ if (ipa3_ctx->ipa_config_is_mhi)
+ hw_type_index = IPA_4_0_MHI;
+ break;
default:
IPAERR("Incorrect IPA version %d\n", ipa3_ctx->ipa_hw_type);
hw_type_index = IPA_3_0;
@@ -2573,12 +3090,15 @@
ipa3_ctx->ep[clnt_hdl].rt_tbl_idx =
IPA_MEM_PART(v4_apps_rt_index_lo);
- IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+ if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
- init_rt.route_table_index = ipa3_ctx->ep[clnt_hdl].rt_tbl_idx;
- ipahal_write_reg_n_fields(IPA_ENDP_INIT_ROUTE_n, clnt_hdl, &init_rt);
+ init_rt.route_table_index = ipa3_ctx->ep[clnt_hdl].rt_tbl_idx;
+ ipahal_write_reg_n_fields(IPA_ENDP_INIT_ROUTE_n,
+ clnt_hdl, &init_rt);
- IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+ }
return 0;
}
@@ -2815,11 +3335,18 @@
{
struct ipahal_reg_qcncm qcncm;
- IPA_ACTIVE_CLIENTS_INC_SIMPLE();
- ipahal_read_reg_fields(IPA_QCNCM, &qcncm);
- qcncm.mode_en = mode;
- ipahal_write_reg_fields(IPA_QCNCM, &qcncm);
- IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
+ if (mode != IPA_MBIM_AGGR) {
+ IPAERR("Only MBIM mode is supported staring 4.0\n");
+ return -EPERM;
+ }
+ } else {
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+ ipahal_read_reg_fields(IPA_QCNCM, &qcncm);
+ qcncm.mode_en = mode;
+ ipahal_write_reg_fields(IPA_QCNCM, &qcncm);
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ }
return 0;
}
@@ -2839,6 +3366,11 @@
{
struct ipahal_reg_qcncm qcncm;
+ if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
+ IPAERR("QCNCM mode is not supported staring 4.0\n");
+ return -EPERM;
+ }
+
if (sig == NULL) {
IPAERR("bad argument for ipa3_set_qcncm_ndp_sig/n");
return -EINVAL;
@@ -2863,6 +3395,11 @@
{
struct ipahal_reg_single_ndp_mode mode;
+ if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
+ IPAERR("QCNCM mode is not supported staring 4.0\n");
+ return -EPERM;
+ }
+
IPA_ACTIVE_CLIENTS_INC_SIMPLE();
ipahal_read_reg_fields(IPA_SINGLE_NDP_MODE, &mode);
mode.single_ndp_en = enable;
@@ -2910,13 +3447,13 @@
*/
int ipa3_init_mem_partition(struct device_node *node)
{
- const size_t ram_mmap_v3_0_size = 70;
- const size_t ram_mmap_v3_5_size = 72;
const size_t ram_mmap_current_version_size =
sizeof(ipa3_ctx->ctrl->mem_partition) / sizeof(u32);
- const size_t version = ipa_get_hw_type();
int result;
+ memset(&ipa3_ctx->ctrl->mem_partition, 0,
+ sizeof(ipa3_ctx->ctrl->mem_partition));
+
IPADBG("Reading from DTS as u32 array\n");
/*
@@ -2925,39 +3462,21 @@
* mismatch. The size of the array monotonically increasing because the
* obsolete entries are set to zero rather than deleted, so the
* possible sizes are in range
- * [ram_mmap_v3_0_size, ram_mmap_current_version_size]
+ * [1, ram_mmap_current_version_size]
*/
result = of_property_read_variable_u32_array(node, "qcom,ipa-ram-mmap",
(u32 *)&ipa3_ctx->ctrl->mem_partition,
- ram_mmap_v3_0_size, ram_mmap_current_version_size);
+ 1, ram_mmap_current_version_size);
- if (result <= 0) {
- IPAERR("Read operation failed\n");
+ if (IPA_MEM_PART(uc_event_ring_ofst) & 1023) {
+ IPAERR("UC EVENT RING OFST 0x%x is unaligned\n",
+ IPA_MEM_PART(uc_event_ring_ofst));
return -ENODEV;
}
- if (version < IPA_HW_v3_0)
- ipa_assert();
- if (version < IPA_HW_v3_5) {
- if (result != ram_mmap_v3_0_size) {
- IPAERR("Mismatch at IPA RAM MMAP DTS entry\n");
- return -ENODEV;
- }
- } else {
- if (result != ram_mmap_v3_5_size) {
- IPAERR("Mismatch at IPA RAM MMAP DTS entry\n");
- return -ENODEV;
- }
- if (IPA_MEM_PART(uc_event_ring_ofst) & 1023) {
- IPAERR("UC EVENT RING OFST 0x%x is unaligned\n",
- IPA_MEM_PART(uc_event_ring_ofst));
- return -ENODEV;
- }
-
- IPADBG("UC EVENT RING OFST 0x%x SIZE 0x%x\n",
- IPA_MEM_PART(uc_event_ring_ofst),
- IPA_MEM_PART(uc_event_ring_size));
- }
+ IPADBG("UC EVENT RING OFST 0x%x SIZE 0x%x\n",
+ IPA_MEM_PART(uc_event_ring_ofst),
+ IPA_MEM_PART(uc_event_ring_size));
IPADBG("NAT OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(nat_ofst),
IPA_MEM_PART(nat_size));
@@ -3126,6 +3645,16 @@
IPA_MEM_PART(apps_hdr_proc_ctx_size),
IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr));
+ if (IPA_MEM_PART(pdn_config_ofst) & 7) {
+ IPAERR("PDN CONFIG OFST 0x%x is unaligned\n",
+ IPA_MEM_PART(pdn_config_ofst));
+ return -ENODEV;
+ }
+
+ IPADBG("PDN CONFIG OFST 0x%x SIZE 0x%x\n",
+ IPA_MEM_PART(pdn_config_ofst),
+ IPA_MEM_PART(pdn_config_size));
+
if (IPA_MEM_PART(modem_ofst) & 7) {
IPAERR("MODEM OFST 0x%x is unaligned\n",
IPA_MEM_PART(modem_ofst));
@@ -3207,9 +3736,11 @@
ctrl->ipa_reg_base_ofst = ipahal_get_reg_base();
ctrl->ipa_init_sram = _ipa_init_sram_v3;
ctrl->ipa_sram_read_settings = _ipa_sram_settings_read_v3_0;
-
ctrl->ipa_init_hdr = _ipa_init_hdr_v3_0;
+ if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0)
+ ctrl->ipa3_read_ep_reg = _ipa_read_ep_reg_v4_0;
+
return 0;
}
@@ -3343,8 +3874,7 @@
res = -ENOMEM;
goto fail_free_tag_desc;
}
- tag_desc[desc_idx].opcode =
- ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_REGISTER_WRITE);
+ tag_desc[desc_idx].opcode = cmd_pyld->opcode;
tag_desc[desc_idx].pyld = cmd_pyld->data;
tag_desc[desc_idx].len = cmd_pyld->len;
tag_desc[desc_idx].type = IPA_IMM_CMD_DESC;
@@ -3362,8 +3892,7 @@
res = -ENOMEM;
goto fail_free_desc;
}
- tag_desc[desc_idx].opcode =
- ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_PACKET_INIT);
+ tag_desc[desc_idx].opcode = cmd_pyld->opcode;
tag_desc[desc_idx].pyld = cmd_pyld->data;
tag_desc[desc_idx].len = cmd_pyld->len;
tag_desc[desc_idx].type = IPA_IMM_CMD_DESC;
@@ -3380,8 +3909,7 @@
res = -ENOMEM;
goto fail_free_desc;
}
- tag_desc[desc_idx].opcode =
- ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_PACKET_TAG_STATUS);
+ tag_desc[desc_idx].opcode = cmd_pyld->opcode;
tag_desc[desc_idx].pyld = cmd_pyld->data;
tag_desc[desc_idx].len = cmd_pyld->len;
tag_desc[desc_idx].type = IPA_IMM_CMD_DESC;
@@ -3520,8 +4048,7 @@
goto fail_alloc_reg_write_agg_close;
}
- desc[desc_idx].opcode =
- ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_REGISTER_WRITE);
+ desc[desc_idx].opcode = cmd_pyld->opcode;
desc[desc_idx].pyld = cmd_pyld->data;
desc[desc_idx].len = cmd_pyld->len;
desc[desc_idx].type = IPA_IMM_CMD_DESC;
@@ -4059,6 +4586,49 @@
}
}
break;
+ case IPA_4_0:
+ case IPA_4_0_MHI:
+ if (src) {
+ switch (group_index) {
+ case IPA_v4_0_GROUP_LWA_DL:
+ case IPA_v4_0_GROUP_UL_DL:
+ ipahal_write_reg_n_fields(
+ IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n,
+ n, val);
+ break;
+ case IPA_v4_0_MHI_GROUP_DMA:
+ case IPA_v4_0_GROUP_UC_RX_Q:
+ ipahal_write_reg_n_fields(
+ IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n,
+ n, val);
+ break;
+ default:
+ IPAERR(
+ " Invalid source resource group,index #%d\n",
+ group_index);
+ break;
+ }
+ } else {
+ switch (group_index) {
+ case IPA_v4_0_GROUP_LWA_DL:
+ case IPA_v4_0_GROUP_UL_DL:
+ ipahal_write_reg_n_fields(
+ IPA_DST_RSRC_GRP_01_RSRC_TYPE_n,
+ n, val);
+ break;
+ case IPA_v4_0_MHI_GROUP_DMA:
+ ipahal_write_reg_n_fields(
+ IPA_DST_RSRC_GRP_23_RSRC_TYPE_n,
+ n, val);
+ break;
+ default:
+ IPAERR(
+ " Invalid destination resource group,index #%d\n",
+ group_index);
+ break;
+ }
+ }
+ break;
default:
IPAERR("invalid hw type\n");
WARN_ON(1);
@@ -4103,6 +4673,33 @@
}
}
+static void ipa3_configure_rx_hps_weight(void)
+{
+ struct ipahal_reg_rx_hps_weights val;
+ u8 hw_type_idx;
+
+ hw_type_idx = ipa3_get_hw_type_index();
+
+ val.hps_queue_weight_0 =
+ ipa3_rsrc_rx_grp_hps_weight_config
+ [hw_type_idx][IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_CONFIG]
+ [0];
+ val.hps_queue_weight_1 =
+ ipa3_rsrc_rx_grp_hps_weight_config
+ [hw_type_idx][IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_CONFIG]
+ [1];
+ val.hps_queue_weight_2 =
+ ipa3_rsrc_rx_grp_hps_weight_config
+ [hw_type_idx][IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_CONFIG]
+ [2];
+ val.hps_queue_weight_3 =
+ ipa3_rsrc_rx_grp_hps_weight_config
+ [hw_type_idx][IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_CONFIG]
+ [3];
+
+ ipahal_write_reg_fields(IPA_HPS_FTCH_ARB_QUEUE_WEIGHT, &val);
+}
+
void ipa3_set_resorce_groups_min_max_limits(void)
{
int i;
@@ -4133,6 +4730,13 @@
src_grp_idx_max = IPA_v3_5_SRC_GROUP_MAX;
dst_grp_idx_max = IPA_v3_5_DST_GROUP_MAX;
break;
+ case IPA_4_0:
+ case IPA_4_0_MHI:
+ src_rsrc_type_max = IPA_v4_0_RSRC_GRP_TYPE_SRC_MAX;
+ dst_rsrc_type_max = IPA_v4_0_RSRC_GRP_TYPE_DST_MAX;
+ src_grp_idx_max = IPA_v4_0_SRC_GROUP_MAX;
+ dst_grp_idx_max = IPA_v4_0_DST_GROUP_MAX;
+ break;
default:
IPAERR("invalid hw type index\n");
WARN_ON(1);
@@ -4186,6 +4790,9 @@
ipa3_configure_rx_hps_clients(1, false);
}
+ if (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5)
+ ipa3_configure_rx_hps_weight();
+
IPADBG("EXIT\n");
}
@@ -4309,8 +4916,7 @@
{
struct ipa3_desc desc = {0};
- desc.opcode = ipahal_imm_cmd_get_opcode_param(
- IPA_IMM_CMD_DMA_TASK_32B_ADDR, 1);
+ desc.opcode = ipa3_ctx->dma_task_info.cmd_pyld->opcode;
desc.pyld = ipa3_ctx->dma_task_info.cmd_pyld->data;
desc.len = ipa3_ctx->dma_task_info.cmd_pyld->len;
desc.type = IPA_IMM_CMD_DESC;
@@ -4565,6 +5171,7 @@
switch (ipa3_ctx->ipa_hw_type) {
case IPA_HW_v3_0:
case IPA_HW_v3_5:
+ case IPA_HW_v4_0:
return false;
case IPA_HW_v3_1:
case IPA_HW_v3_5_1:
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
index fa9c6c8..d35b8a7 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
@@ -49,6 +49,8 @@
#define IPAHAL_MEM_ALLOC(__size, __is_atomic_ctx) \
(kzalloc((__size), ((__is_atomic_ctx)?GFP_ATOMIC:GFP_KERNEL)))
+static u16 ipahal_imm_cmd_get_opcode(enum ipahal_imm_cmd_name cmd);
+
static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_dma_task_32b_addr(
enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
@@ -63,6 +65,8 @@
IPAHAL_ERR("kzalloc err\n");
return pyld;
}
+ /* Currently supports only one packet */
+ pyld->opcode = ipahal_imm_cmd_get_opcode(cmd) + (1 << 8);
pyld->len = sizeof(*data);
data = (struct ipa_imm_cmd_hw_dma_task_32b_addr *)pyld->data;
@@ -101,6 +105,7 @@
IPAHAL_ERR("kzalloc err\n");
return pyld;
}
+ pyld->opcode = ipahal_imm_cmd_get_opcode(cmd);
pyld->len = sizeof(*data);
data = (struct ipa_imm_cmd_hw_ip_packet_tag_status *)pyld->data;
@@ -127,6 +132,7 @@
IPAHAL_ERR("kzalloc err\n");
return pyld;
}
+ pyld->opcode = ipahal_imm_cmd_get_opcode(cmd);
pyld->len = sizeof(*data);
data = (struct ipa_imm_cmd_hw_dma_shared_mem *)pyld->data;
@@ -164,6 +170,61 @@
return pyld;
}
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_dma_shared_mem_v_4_0(
+ enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+ struct ipahal_imm_cmd_pyld *pyld;
+ struct ipa_imm_cmd_hw_dma_shared_mem_v_4_0 *data;
+ struct ipahal_imm_cmd_dma_shared_mem *mem_params =
+ (struct ipahal_imm_cmd_dma_shared_mem *)params;
+
+ if (unlikely(mem_params->size & ~0xFFFF)) {
+ IPAHAL_ERR("Size is bigger than 16bit width 0x%x\n",
+ mem_params->size);
+ WARN_ON(1);
+ return NULL;
+ }
+ if (unlikely(mem_params->local_addr & ~0xFFFF)) {
+ IPAHAL_ERR("Local addr is bigger than 16bit width 0x%x\n",
+ mem_params->local_addr);
+ WARN_ON(1);
+ return NULL;
+ }
+
+ pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+ if (unlikely(!pyld)) {
+ WARN_ON(1);
+ return pyld;
+ }
+
+ pyld->opcode = ipahal_imm_cmd_get_opcode(cmd);
+ pyld->len = sizeof(*data);
+ data = (struct ipa_imm_cmd_hw_dma_shared_mem_v_4_0 *)pyld->data;
+
+ data->direction = mem_params->is_read ? 1 : 0;
+ data->clear_after_read = mem_params->clear_after_read;
+ data->size = mem_params->size;
+ data->local_addr = mem_params->local_addr;
+ data->system_addr = mem_params->system_addr;
+ pyld->opcode |= (mem_params->skip_pipeline_clear ? 1 : 0) << 8;
+ switch (mem_params->pipeline_clear_options) {
+ case IPAHAL_HPS_CLEAR:
+ break;
+ case IPAHAL_SRC_GRP_CLEAR:
+ pyld->opcode |= (1 << 9);
+ break;
+ case IPAHAL_FULL_PIPELINE_CLEAR:
+ pyld->opcode |= (2 << 9);
+ break;
+ default:
+ IPAHAL_ERR("unsupported pipline clear option %d\n",
+ mem_params->pipeline_clear_options);
+ WARN_ON(1);
+ };
+
+ return pyld;
+}
+
static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_register_write(
enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
{
@@ -177,6 +238,7 @@
IPAHAL_ERR("kzalloc err\n");
return pyld;
}
+ pyld->opcode = ipahal_imm_cmd_get_opcode(cmd);
pyld->len = sizeof(*data);
data = (struct ipa_imm_cmd_hw_register_write *)pyld->data;
@@ -209,6 +271,54 @@
return pyld;
}
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_register_write_v_4_0(
+ enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+ struct ipahal_imm_cmd_pyld *pyld;
+ struct ipa_imm_cmd_hw_register_write_v_4_0 *data;
+ struct ipahal_imm_cmd_register_write *regwrt_params =
+ (struct ipahal_imm_cmd_register_write *)params;
+
+ if (unlikely(regwrt_params->offset & ~0xFFFF)) {
+ IPAHAL_ERR("Offset is bigger than 16bit width 0x%x\n",
+ regwrt_params->offset);
+ WARN_ON(1);
+ return NULL;
+ }
+
+ pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+ if (unlikely(!pyld)) {
+ WARN_ON(1);
+ return pyld;
+ }
+ pyld->opcode = ipahal_imm_cmd_get_opcode(cmd);
+ pyld->len = sizeof(*data);
+ data = (struct ipa_imm_cmd_hw_register_write_v_4_0 *)pyld->data;
+
+ data->offset = regwrt_params->offset;
+ data->offset_high = regwrt_params->offset >> 16;
+ data->value = regwrt_params->value;
+ data->value_mask = regwrt_params->value_mask;
+
+ pyld->opcode |= (regwrt_params->skip_pipeline_clear ? 1 : 0) << 8;
+ switch (regwrt_params->pipeline_clear_options) {
+ case IPAHAL_HPS_CLEAR:
+ break;
+ case IPAHAL_SRC_GRP_CLEAR:
+ pyld->opcode |= (1 << 9);
+ break;
+ case IPAHAL_FULL_PIPELINE_CLEAR:
+ pyld->opcode |= (2 << 9);
+ break;
+ default:
+ IPAHAL_ERR("unsupported pipline clear option %d\n",
+ regwrt_params->pipeline_clear_options);
+ WARN_ON(1);
+ };
+
+ return pyld;
+}
+
static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_packet_init(
enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
{
@@ -222,6 +332,7 @@
IPAHAL_ERR("kzalloc err\n");
return pyld;
}
+ pyld->opcode = ipahal_imm_cmd_get_opcode(cmd);
pyld->len = sizeof(*data);
data = (struct ipa_imm_cmd_hw_ip_packet_init *)pyld->data;
@@ -248,6 +359,7 @@
IPAHAL_ERR("kzalloc err\n");
return pyld;
}
+ pyld->opcode = ipahal_imm_cmd_get_opcode(cmd);
pyld->len = sizeof(*data);
data = (struct ipa_imm_cmd_hw_nat_dma *)pyld->data;
@@ -272,6 +384,7 @@
IPAHAL_ERR("kzalloc err\n");
return pyld;
}
+ pyld->opcode = ipahal_imm_cmd_get_opcode(cmd);
pyld->len = sizeof(*data);
data = (struct ipa_imm_cmd_hw_hdr_init_system *)pyld->data;
@@ -293,6 +406,7 @@
IPAHAL_ERR("kzalloc err\n");
return pyld;
}
+ pyld->opcode = ipahal_imm_cmd_get_opcode(cmd);
pyld->len = sizeof(*data);
data = (struct ipa_imm_cmd_hw_hdr_init_local *)pyld->data;
@@ -321,6 +435,7 @@
IPAHAL_ERR("kzalloc err\n");
return pyld;
}
+ pyld->opcode = ipahal_imm_cmd_get_opcode(cmd);
pyld->len = sizeof(*data);
data = (struct ipa_imm_cmd_hw_ip_v6_routing_init *)pyld->data;
@@ -347,6 +462,7 @@
IPAHAL_ERR("kzalloc err\n");
return pyld;
}
+ pyld->opcode = ipahal_imm_cmd_get_opcode(cmd);
pyld->len = sizeof(*data);
data = (struct ipa_imm_cmd_hw_ip_v4_routing_init *)pyld->data;
@@ -373,6 +489,7 @@
IPAHAL_ERR("kzalloc err\n");
return pyld;
}
+ pyld->opcode = ipahal_imm_cmd_get_opcode(cmd);
pyld->len = sizeof(*data);
data = (struct ipa_imm_cmd_hw_ip_v4_nat_init *)pyld->data;
@@ -411,6 +528,7 @@
IPAHAL_ERR("kzalloc err\n");
return pyld;
}
+ pyld->opcode = ipahal_imm_cmd_get_opcode(cmd);
pyld->len = sizeof(*data);
data = (struct ipa_imm_cmd_hw_ip_v6_filter_init *)pyld->data;
@@ -437,6 +555,7 @@
IPAHAL_ERR("kzalloc err\n");
return pyld;
}
+ pyld->opcode = ipahal_imm_cmd_get_opcode(cmd);
pyld->len = sizeof(*data);
data = (struct ipa_imm_cmd_hw_ip_v4_filter_init *)pyld->data;
@@ -455,16 +574,11 @@
* specific IPA version
* @construct - CB to construct imm command payload from abstracted structure
* @opcode - Immediate command OpCode
- * @dyn_op - Does this command supports Dynamic opcode?
- * Some commands opcode are dynamic where the part of the opcode is
- * supplied as param. This flag indicates if the specific command supports it
- * or not.
*/
struct ipahal_imm_cmd_obj {
struct ipahal_imm_cmd_pyld *(*construct)(enum ipahal_imm_cmd_name cmd,
const void *params, bool is_atomic_ctx);
u16 opcode;
- bool dyn_op;
};
/*
@@ -484,43 +598,51 @@
/* IPAv3 */
[IPA_HW_v3_0][IPA_IMM_CMD_IP_V4_FILTER_INIT] = {
ipa_imm_cmd_construct_ip_v4_filter_init,
- 3, false},
+ 3},
[IPA_HW_v3_0][IPA_IMM_CMD_IP_V6_FILTER_INIT] = {
ipa_imm_cmd_construct_ip_v6_filter_init,
- 4, false},
+ 4},
[IPA_HW_v3_0][IPA_IMM_CMD_IP_V4_NAT_INIT] = {
ipa_imm_cmd_construct_ip_v4_nat_init,
- 5, false},
+ 5},
[IPA_HW_v3_0][IPA_IMM_CMD_IP_V4_ROUTING_INIT] = {
ipa_imm_cmd_construct_ip_v4_routing_init,
- 7, false},
+ 7},
[IPA_HW_v3_0][IPA_IMM_CMD_IP_V6_ROUTING_INIT] = {
ipa_imm_cmd_construct_ip_v6_routing_init,
- 8, false},
+ 8},
[IPA_HW_v3_0][IPA_IMM_CMD_HDR_INIT_LOCAL] = {
ipa_imm_cmd_construct_hdr_init_local,
- 9, false},
+ 9},
[IPA_HW_v3_0][IPA_IMM_CMD_HDR_INIT_SYSTEM] = {
ipa_imm_cmd_construct_hdr_init_system,
- 10, false},
+ 10},
[IPA_HW_v3_0][IPA_IMM_CMD_REGISTER_WRITE] = {
ipa_imm_cmd_construct_register_write,
- 12, false},
+ 12},
[IPA_HW_v3_0][IPA_IMM_CMD_NAT_DMA] = {
ipa_imm_cmd_construct_nat_dma,
- 14, false},
+ 14},
[IPA_HW_v3_0][IPA_IMM_CMD_IP_PACKET_INIT] = {
ipa_imm_cmd_construct_ip_packet_init,
- 16, false},
+ 16},
[IPA_HW_v3_0][IPA_IMM_CMD_DMA_TASK_32B_ADDR] = {
ipa_imm_cmd_construct_dma_task_32b_addr,
- 17, true},
+ 17},
[IPA_HW_v3_0][IPA_IMM_CMD_DMA_SHARED_MEM] = {
ipa_imm_cmd_construct_dma_shared_mem,
- 19, false},
+ 19},
[IPA_HW_v3_0][IPA_IMM_CMD_IP_PACKET_TAG_STATUS] = {
ipa_imm_cmd_construct_ip_packet_tag_status,
- 20, false},
+ 20},
+
+ /* IPAv4 */
+ [IPA_HW_v4_0][IPA_IMM_CMD_REGISTER_WRITE] = {
+ ipa_imm_cmd_construct_register_write_v_4_0,
+ 12},
+ [IPA_HW_v4_0][IPA_IMM_CMD_DMA_SHARED_MEM] = {
+ ipa_imm_cmd_construct_dma_shared_mem_v_4_0,
+ 19},
};
/*
@@ -589,7 +711,7 @@
/*
* ipahal_imm_cmd_get_opcode() - Get the fixed opcode of the immediate command
*/
-u16 ipahal_imm_cmd_get_opcode(enum ipahal_imm_cmd_name cmd)
+static u16 ipahal_imm_cmd_get_opcode(enum ipahal_imm_cmd_name cmd)
{
u32 opcode;
@@ -613,63 +735,6 @@
}
/*
- * ipahal_imm_cmd_get_opcode_param() - Get the opcode of an immediate command
- * that supports dynamic opcode
- * Some commands opcode are not totaly fixed, but part of it is
- * a supplied parameter. E.g. Low-Byte is fixed and Hi-Byte
- * is a given parameter.
- * This API will return the composed opcode of the command given
- * the parameter
- * Note: Use this API only for immediate comamnds that support Dynamic Opcode
- */
-u16 ipahal_imm_cmd_get_opcode_param(enum ipahal_imm_cmd_name cmd, int param)
-{
- u32 opcode;
-
- if (cmd >= IPA_IMM_CMD_MAX) {
- IPAHAL_ERR("Invalid immediate command IMM_CMD=%u\n", cmd);
- ipa_assert();
- return -EFAULT;
- }
-
- IPAHAL_DBG_LOW("Get opcode of IMM_CMD=%s\n",
- ipahal_imm_cmd_name_str(cmd));
-
- if (!ipahal_imm_cmd_objs[ipahal_ctx->hw_type][cmd].dyn_op) {
- IPAHAL_ERR("IMM_CMD=%s does not support dynamic opcode\n",
- ipahal_imm_cmd_name_str(cmd));
- ipa_assert();
- return -EFAULT;
- }
-
- /* Currently, dynamic opcode commands uses params to be set
- * on the Opcode hi-byte (lo-byte is fixed).
- * If this to be changed in the future, make the opcode calculation
- * a CB per command
- */
- if (param & ~0xFFFF) {
- IPAHAL_ERR("IMM_CMD=%s opcode param is invalid\n",
- ipahal_imm_cmd_name_str(cmd));
- ipa_assert();
- return -EFAULT;
- }
- opcode = ipahal_imm_cmd_objs[ipahal_ctx->hw_type][cmd].opcode;
- if (opcode == -1) {
- IPAHAL_ERR("Try to get opcode of obsolete IMM_CMD=%s\n",
- ipahal_imm_cmd_name_str(cmd));
- ipa_assert();
- return -EFAULT;
- }
- if (opcode & ~0xFFFF) {
- IPAHAL_ERR("IMM_CMD=%s opcode will be overridden\n",
- ipahal_imm_cmd_name_str(cmd));
- ipa_assert();
- return -EFAULT;
- }
- return (opcode + (param<<8));
-}
-
-/*
* ipahal_construct_imm_cmd() - Construct immdiate command
* This function builds imm cmd bulk that can be be sent to IPA
* The command will be allocated dynamically.
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h
index 8f85d4e..e71a48b 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h
@@ -259,6 +259,8 @@
* Perform mem copy into or out of the SW area of IPA local mem
* @size: Size in bytes of data to copy. Expected size is up to 2K bytes
* @local_addr: Address in IPA local memory
+ * @clear_after_read: Clear local memory at the end of a read operation allows
+ * atomic read and clear if HPS is clear. Ignore for writes.
* @is_read: Read operation from local memory? If not, then write.
* @skip_pipeline_clear: if to skip pipeline clear waiting (don't wait)
* @pipeline_clear_option: options for pipeline clear waiting
@@ -267,6 +269,7 @@
struct ipahal_imm_cmd_dma_shared_mem {
u32 size;
u32 local_addr;
+ bool clear_after_read;
bool is_read;
bool skip_pipeline_clear;
enum ipahal_pipeline_clear_option pipeline_clear_options;
@@ -322,13 +325,13 @@
/*
* struct ipahal_imm_cmd_pyld - Immediate cmd payload information
* @len: length of the buffer
- * @reserved: padding bytes to make data buffer aligned
+ * @opcode: opcode of the immediate command
* @data: buffer contains the immediate command payload. Buffer goes
* back to back with this structure
*/
struct ipahal_imm_cmd_pyld {
u16 len;
- u16 reserved;
+ u16 opcode;
u8 data[0];
};
@@ -342,23 +345,6 @@
const char *ipahal_imm_cmd_name_str(enum ipahal_imm_cmd_name cmd_name);
/*
- * ipahal_imm_cmd_get_opcode() - Get the fixed opcode of the immediate command
- */
-u16 ipahal_imm_cmd_get_opcode(enum ipahal_imm_cmd_name cmd);
-
-/*
- * ipahal_imm_cmd_get_opcode_param() - Get the opcode of an immediate command
- * that supports dynamic opcode
- * Some commands opcode are not totaly fixed, but part of it is
- * a supplied parameter. E.g. Low-Byte is fixed and Hi-Byte
- * is a given parameter.
- * This API will return the composed opcode of the command given
- * the parameter
- * Note: Use this API only for immediate comamnds that support Dynamic Opcode
- */
-u16 ipahal_imm_cmd_get_opcode_param(enum ipahal_imm_cmd_name cmd, int param);
-
-/*
* ipahal_construct_imm_cmd() - Construct immdiate command
* This function builds imm cmd bulk that can be be sent to IPA
* The command will be allocated dynamically.
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h
index d6a496e..804c554 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h
@@ -278,7 +278,7 @@
* in H/W format.
* Write value to register. Allows reg changes to be synced with data packet
* and other immediate command. Can be used to access the sram
- * @sw_rsvd: Ignored by H/W. My be used by S/W
+ * @sw_rsvd: Ignored by H/W. May be used by S/W
* @skip_pipeline_clear: 0 to wait until IPA pipeline is clear. 1 don't wait
* @offset: offset from IPA base address - Lower 16bit of the IPA reg addr
* @value: value to write to register
@@ -301,6 +301,29 @@
};
/*
+ * struct ipa_imm_cmd_hw_register_write - REGISTER_WRITE command payload
+ * in H/W format.
+ * Write value to register. Allows reg changes to be synced with data packet
+ * and other immediate command. Can be used to access the sram
+ * @sw_rsvd: Ignored by H/W. May be used by S/W
+ * @offset_high: high bits of the Offset field - bits 17-20
+ * @rsvd: reserved - should be set to zero
+ * @offset: offset from IPA base address - Lower 16bit of the IPA reg addr
+ * @value: value to write to register
+ * @value_mask: mask specifying which value bits to write to the register
+ * @rsvd2: reserved - should be set to zero
+ */
+struct ipa_imm_cmd_hw_register_write_v_4_0 {
+ u64 sw_rsvd:11;
+ u64 offset_high:4;
+ u64 rsvd:1;
+ u64 offset:16;
+ u64 value:32;
+ u64 value_mask:32;
+ u64 rsvd2:32;
+};
+
+/*
* struct ipa_imm_cmd_hw_dma_shared_mem - DMA_SHARED_MEM command payload
* in H/W format.
* Perform mem copy into or out of the SW area of IPA local mem
@@ -331,6 +354,31 @@
};
/*
+ * struct ipa_imm_cmd_hw_dma_shared_mem - DMA_SHARED_MEM command payload
+ * in H/W format.
+ * Perform mem copy into or out of the SW area of IPA local mem
+ * @sw_rsvd: Ignored by H/W. My be used by S/W
+ * @size: Size in bytes of data to copy. Expected size is up to 2K bytes
+ * @clear_after_read: Clear local memory at the end of a read operation allows
+ * atomic read and clear if HPS is clear. Ignore for writes.
+ * @local_addr: Address in IPA local memory
+ * @direction: Read or write?
+ * 0: IPA write, Write to local address from system address
+ * 1: IPA read, Read from local address to system address
+ * @rsvd: reserved - should be set to zero
+ * @system_addr: Address in system memory
+ */
+struct ipa_imm_cmd_hw_dma_shared_mem_v_4_0 {
+ u64 sw_rsvd:15;
+ u64 clear_after_read:1;
+ u64 size:16;
+ u64 local_addr:16;
+ u64 direction:1;
+ u64 rsvd:15;
+ u64 system_addr:64;
+};
+
+/*
* struct ipa_imm_cmd_hw_ip_packet_tag_status -
* IP_PACKET_TAG_STATUS command payload in H/W format.
* This cmd is used for to allow SW to track HW processing by setting a TAG
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
index d369e82..1a119b9 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
@@ -78,6 +78,7 @@
__stringify(IPA_RX_HPS_CLIENTS_MIN_DEPTH_1),
__stringify(IPA_RX_HPS_CLIENTS_MAX_DEPTH_0),
__stringify(IPA_RX_HPS_CLIENTS_MAX_DEPTH_1),
+ __stringify(IPA_HPS_FTCH_ARB_QUEUE_WEIGHT),
__stringify(IPA_QSB_MAX_WRITES),
__stringify(IPA_QSB_MAX_READS),
__stringify(IPA_TX_CFG),
@@ -355,6 +356,29 @@
IPA_ENDP_STATUS_n_STATUS_LOCATION_BMSK);
}
+static void ipareg_construct_endp_status_n_v4_0(
+ enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+ struct ipahal_reg_ep_cfg_status *ep_status =
+ (struct ipahal_reg_ep_cfg_status *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, ep_status->status_en,
+ IPA_ENDP_STATUS_n_STATUS_EN_SHFT,
+ IPA_ENDP_STATUS_n_STATUS_EN_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, ep_status->status_ep,
+ IPA_ENDP_STATUS_n_STATUS_ENDP_SHFT,
+ IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, ep_status->status_location,
+ IPA_ENDP_STATUS_n_STATUS_LOCATION_SHFT,
+ IPA_ENDP_STATUS_n_STATUS_LOCATION_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, ep_status->status_pkt_suppress,
+ IPA_ENDP_STATUS_n_STATUS_PKT_SUPPRESS_SHFT,
+ IPA_ENDP_STATUS_n_STATUS_PKT_SUPPRESS_BMSK);
+}
+
static void ipareg_construct_qcncm(
enum ipahal_reg_name reg, const void *fields, u32 *val)
{
@@ -896,12 +920,14 @@
static void ipareg_construct_qsb_max_writes(enum ipahal_reg_name reg,
const void *fields, u32 *val)
{
- int *qsb_max_writes = (int *)fields;
+ struct ipahal_reg_qsb_max_writes *max_writes;
- IPA_SETFIELD_IN_REG(*val, qsb_max_writes[0],
+ max_writes = (struct ipahal_reg_qsb_max_writes *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, max_writes->qmb_0_max_writes,
IPA_QSB_MAX_WRITES_GEN_QMB_0_MAX_WRITES_SHFT,
IPA_QSB_MAX_WRITES_GEN_QMB_0_MAX_WRITES_BMSK);
- IPA_SETFIELD_IN_REG(*val, qsb_max_writes[1],
+ IPA_SETFIELD_IN_REG(*val, max_writes->qmb_1_max_writes,
IPA_QSB_MAX_WRITES_GEN_QMB_1_MAX_WRITES_SHFT,
IPA_QSB_MAX_WRITES_GEN_QMB_1_MAX_WRITES_BMSK);
}
@@ -909,16 +935,39 @@
static void ipareg_construct_qsb_max_reads(enum ipahal_reg_name reg,
const void *fields, u32 *val)
{
- int *qsb_max_reads = (int *)fields;
+ struct ipahal_reg_qsb_max_reads *max_reads;
- IPA_SETFIELD_IN_REG(*val, qsb_max_reads[0],
+ max_reads = (struct ipahal_reg_qsb_max_reads *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, max_reads->qmb_0_max_reads,
IPA_QSB_MAX_READS_GEN_QMB_0_MAX_READS_SHFT,
IPA_QSB_MAX_READS_GEN_QMB_0_MAX_READS_BMSK);
- IPA_SETFIELD_IN_REG(*val, qsb_max_reads[1],
+ IPA_SETFIELD_IN_REG(*val, max_reads->qmb_1_max_reads,
IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_SHFT,
IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_BMSK);
}
+static void ipareg_construct_qsb_max_reads_v4_0(enum ipahal_reg_name reg,
+ const void *fields, u32 *val)
+{
+ struct ipahal_reg_qsb_max_reads *max_reads;
+
+ max_reads = (struct ipahal_reg_qsb_max_reads *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, max_reads->qmb_0_max_reads,
+ IPA_QSB_MAX_READS_GEN_QMB_0_MAX_READS_SHFT,
+ IPA_QSB_MAX_READS_GEN_QMB_0_MAX_READS_BMSK);
+ IPA_SETFIELD_IN_REG(*val, max_reads->qmb_1_max_reads,
+ IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_SHFT,
+ IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_BMSK);
+ IPA_SETFIELD_IN_REG(*val, max_reads->qmb_0_max_read_beats,
+ IPA_QSB_MAX_READS_GEN_QMB_0_MAX_READS_BEATS_SHFT_V4_0,
+ IPA_QSB_MAX_READS_GEN_QMB_0_MAX_READS_BEATS_BMSK_V4_0);
+ IPA_SETFIELD_IN_REG(*val, max_reads->qmb_1_max_read_beats,
+ IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_BEATS_SHFT_V4_0,
+ IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_BEATS_BMSK_V4_0);
+}
+
static void ipareg_parse_tx_cfg(enum ipahal_reg_name reg,
void *fields, u32 val)
{
@@ -934,9 +983,44 @@
IPA_TX_CFG_TX1_PREFETCH_DISABLE_SHFT_V3_5,
IPA_TX_CFG_TX1_PREFETCH_DISABLE_BMSK_V3_5);
- tx_cfg->prefetch_almost_empty_size = IPA_GETFIELD_FROM_REG(val,
+ tx_cfg->tx0_prefetch_almost_empty_size = IPA_GETFIELD_FROM_REG(val,
IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_SHFT_V3_5,
IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_BMSK_V3_5);
+
+ tx_cfg->tx1_prefetch_almost_empty_size =
+ tx_cfg->tx0_prefetch_almost_empty_size;
+}
+
+static void ipareg_parse_tx_cfg_v4_0(enum ipahal_reg_name reg,
+ void *fields, u32 val)
+{
+ struct ipahal_reg_tx_cfg *tx_cfg;
+
+ tx_cfg = (struct ipahal_reg_tx_cfg *)fields;
+
+ tx_cfg->tx0_prefetch_almost_empty_size = IPA_GETFIELD_FROM_REG(val,
+ IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_TX0_SHFT_V4_0,
+ IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_TX0_BMSK_V4_0);
+
+ tx_cfg->tx1_prefetch_almost_empty_size = IPA_GETFIELD_FROM_REG(val,
+ IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_TX1_SHFT_V4_0,
+ IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_TX1_BMSK_V4_0);
+
+ tx_cfg->dmaw_scnd_outsd_pred_en = IPA_GETFIELD_FROM_REG(val,
+ IPA_TX_CFG_DMAW_SCND_OUTSD_PRED_EN_SHFT_V4_0,
+ IPA_TX_CFG_DMAW_SCND_OUTSD_PRED_EN_BMSK_V4_0);
+
+ tx_cfg->dmaw_scnd_outsd_pred_threshold = IPA_GETFIELD_FROM_REG(val,
+ IPA_TX_CFG_DMAW_SCND_OUTSD_PRED_THRESHOLD_SHFT_V4_0,
+ IPA_TX_CFG_DMAW_SCND_OUTSD_PRED_THRESHOLD_BMSK_V4_0);
+
+ tx_cfg->dmaw_max_beats_256_dis = IPA_GETFIELD_FROM_REG(val,
+ IPA_TX_CFG_DMAW_MAX_BEATS_256_DIS_SHFT_V4_0,
+ IPA_TX_CFG_DMAW_MAX_BEATS_256_DIS_BMSK_V4_0);
+
+ tx_cfg->pa_mask_en = IPA_GETFIELD_FROM_REG(val,
+ IPA_TX_CFG_PA_MASK_EN_SHFT_V4_0,
+ IPA_TX_CFG_PA_MASK_EN_BMSK_V4_0);
}
static void ipareg_construct_tx_cfg(enum ipahal_reg_name reg,
@@ -946,6 +1030,10 @@
tx_cfg = (struct ipahal_reg_tx_cfg *)fields;
+ if (tx_cfg->tx0_prefetch_almost_empty_size !=
+ tx_cfg->tx1_prefetch_almost_empty_size)
+ ipa_assert();
+
IPA_SETFIELD_IN_REG(*val, tx_cfg->tx0_prefetch_disable,
IPA_TX_CFG_TX0_PREFETCH_DISABLE_SHFT_V3_5,
IPA_TX_CFG_TX0_PREFETCH_DISABLE_BMSK_V3_5);
@@ -954,11 +1042,43 @@
IPA_TX_CFG_TX1_PREFETCH_DISABLE_SHFT_V3_5,
IPA_TX_CFG_TX1_PREFETCH_DISABLE_BMSK_V3_5);
- IPA_SETFIELD_IN_REG(*val, tx_cfg->prefetch_almost_empty_size,
+ IPA_SETFIELD_IN_REG(*val, tx_cfg->tx0_prefetch_almost_empty_size,
IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_SHFT_V3_5,
IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_BMSK_V3_5);
}
+static void ipareg_construct_tx_cfg_v4_0(enum ipahal_reg_name reg,
+ const void *fields, u32 *val)
+{
+ struct ipahal_reg_tx_cfg *tx_cfg;
+
+ tx_cfg = (struct ipahal_reg_tx_cfg *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, tx_cfg->tx0_prefetch_almost_empty_size,
+ IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_TX0_SHFT_V4_0,
+ IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_TX0_BMSK_V4_0);
+
+ IPA_SETFIELD_IN_REG(*val, tx_cfg->tx1_prefetch_almost_empty_size,
+ IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_TX1_SHFT_V4_0,
+ IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_TX1_BMSK_V4_0);
+
+ IPA_SETFIELD_IN_REG(*val, tx_cfg->dmaw_scnd_outsd_pred_threshold,
+ IPA_TX_CFG_DMAW_SCND_OUTSD_PRED_THRESHOLD_SHFT_V4_0,
+ IPA_TX_CFG_DMAW_SCND_OUTSD_PRED_THRESHOLD_BMSK_V4_0);
+
+ IPA_SETFIELD_IN_REG(*val, tx_cfg->dmaw_max_beats_256_dis,
+ IPA_TX_CFG_DMAW_MAX_BEATS_256_DIS_SHFT_V4_0,
+ IPA_TX_CFG_DMAW_MAX_BEATS_256_DIS_BMSK_V4_0);
+
+ IPA_SETFIELD_IN_REG(*val, tx_cfg->dmaw_scnd_outsd_pred_en,
+ IPA_TX_CFG_DMAW_SCND_OUTSD_PRED_EN_SHFT_V4_0,
+ IPA_TX_CFG_DMAW_SCND_OUTSD_PRED_EN_BMSK_V4_0);
+
+ IPA_SETFIELD_IN_REG(*val, tx_cfg->pa_mask_en,
+ IPA_TX_CFG_PA_MASK_EN_SHFT_V4_0,
+ IPA_TX_CFG_PA_MASK_EN_BMSK_V4_0);
+}
+
static void ipareg_construct_idle_indication_cfg(enum ipahal_reg_name reg,
const void *fields, u32 *val)
{
@@ -977,6 +1097,59 @@
IPA_IDLE_INDICATION_CFG_CONST_NON_IDLE_ENABLE_BMSK_V3_5);
}
+static void ipareg_construct_hps_queue_weights(enum ipahal_reg_name reg,
+ const void *fields, u32 *val)
+{
+ struct ipahal_reg_rx_hps_weights *hps_weights;
+
+ hps_weights = (struct ipahal_reg_rx_hps_weights *)fields;
+
+ IPA_SETFIELD_IN_REG(*val,
+ hps_weights->hps_queue_weight_0,
+ IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_0_SHFT,
+ IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_0_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val,
+ hps_weights->hps_queue_weight_1,
+ IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_1_SHFT,
+ IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_1_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val,
+ hps_weights->hps_queue_weight_2,
+ IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_2_SHFT,
+ IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_2_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val,
+ hps_weights->hps_queue_weight_3,
+ IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_3_SHFT,
+ IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_3_BMSK);
+}
+
+static void ipareg_parse_hps_queue_weights(
+ enum ipahal_reg_name reg, void *fields, u32 val)
+{
+ struct ipahal_reg_rx_hps_weights *hps_weights =
+ (struct ipahal_reg_rx_hps_weights *)fields;
+
+ memset(hps_weights, 0, sizeof(struct ipahal_reg_rx_hps_weights));
+
+ hps_weights->hps_queue_weight_0 = IPA_GETFIELD_FROM_REG(val,
+ IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_0_SHFT,
+ IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_0_BMSK);
+
+ hps_weights->hps_queue_weight_1 = IPA_GETFIELD_FROM_REG(val,
+ IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_1_SHFT,
+ IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_1_BMSK);
+
+ hps_weights->hps_queue_weight_2 = IPA_GETFIELD_FROM_REG(val,
+ IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_2_SHFT,
+ IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_2_BMSK);
+
+ hps_weights->hps_queue_weight_3 = IPA_GETFIELD_FROM_REG(val,
+ IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_3_SHFT,
+ IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_3_BMSK);
+}
+
/*
* struct ipahal_reg_obj - Register H/W information for specific IPA version
* @construct - CB to construct register value from abstracted structure
@@ -1266,6 +1439,41 @@
[IPA_HW_v3_5][IPA_IDLE_INDICATION_CFG] = {
ipareg_construct_idle_indication_cfg, ipareg_parse_dummy,
0x00000220, 0},
+ [IPA_HW_v3_5][IPA_HPS_FTCH_ARB_QUEUE_WEIGHT] = {
+ ipareg_construct_hps_queue_weights,
+ ipareg_parse_hps_queue_weights, 0x000005a4, 0},
+
+ /* IPAv4.0 */
+ [IPA_HW_v4_0][IPA_TX_CFG] = {
+ ipareg_construct_tx_cfg_v4_0, ipareg_parse_tx_cfg_v4_0,
+ 0x000001FC, 0},
+ [IPA_HW_v4_0][IPA_DEBUG_CNT_REG_n] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ -1, 0},
+ [IPA_HW_v4_0][IPA_DEBUG_CNT_CTRL_n] = {
+ ipareg_construct_debug_cnt_ctrl_n, ipareg_parse_dummy,
+ -1, 0},
+ [IPA_HW_v4_0][IPA_QCNCM] = {
+ ipareg_construct_qcncm, ipareg_parse_qcncm,
+ -1, 0},
+ [IPA_HW_v4_0][IPA_SINGLE_NDP_MODE] = {
+ ipareg_construct_single_ndp_mode, ipareg_parse_single_ndp_mode,
+ -1, 0},
+ [IPA_HW_v4_0][IPA_QSB_MAX_READS] = {
+ ipareg_construct_qsb_max_reads_v4_0, ipareg_parse_dummy,
+ 0x00000078, 0},
+ [IPA_HW_v4_0][IPA_FILT_ROUT_HASH_FLUSH] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x0000014c, 0},
+ [IPA_HW_v4_0][IPA_STATE_AGGR_ACTIVE] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x000000b4, 0},
+ [IPA_HW_v4_0][IPA_ENDP_INIT_ROUTE_n] = {
+ ipareg_construct_endp_init_route_n, ipareg_parse_dummy,
+ -1, 0},
+ [IPA_HW_v4_0][IPA_ENDP_STATUS_n] = {
+ ipareg_construct_endp_status_n_v4_0, ipareg_parse_dummy,
+ 0x00000840, 0x70},
};
/*
@@ -1597,11 +1805,16 @@
if (ipahal_ctx->hw_type <= IPA_HW_v3_1) {
shft = IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT;
bmsk = IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK;
- } else {
+ } else if (ipahal_ctx->hw_type <= IPA_HW_v3_5_1) {
shft =
IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT_V3_5;
bmsk =
IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK_V3_5;
+ } else {
+ shft =
+ IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT_V4_0;
+ bmsk =
+ IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK_V4_0;
}
IPA_SETFIELD_IN_REG(valmask->val, 1 << ep_idx, shft, bmsk);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
index 4490103..c9293b8 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
@@ -81,6 +81,7 @@
IPA_RX_HPS_CLIENTS_MIN_DEPTH_1,
IPA_RX_HPS_CLIENTS_MAX_DEPTH_0,
IPA_RX_HPS_CLIENTS_MAX_DEPTH_1,
+ IPA_HPS_FTCH_ARB_QUEUE_WEIGHT,
IPA_QSB_MAX_WRITES,
IPA_QSB_MAX_READS,
IPA_TX_CFG,
@@ -168,11 +169,13 @@
* If set to 0 (default), PKT-STATUS will be appended before the packet
* for this endpoint. If set to 1, PKT-STATUS will be appended after the
* packet for this endpoint. Valid only for Output Pipes (IPA Producer)
+ * @status_pkt_suppress:
*/
struct ipahal_reg_ep_cfg_status {
bool status_en;
u8 status_ep;
bool status_location;
+ u8 status_pkt_suppress;
};
/*
@@ -272,6 +275,20 @@
};
/*
+* struct ipahal_reg_rx_hps_weights - weight values for RX HPS clients
+* @hps_queue_weight_0 - 4 bit Weight for RX_HPS_CMDQ #0 (3:0)
+* @hps_queue_weight_1 - 4 bit Weight for RX_HPS_CMDQ #1 (7:4)
+* @hps_queue_weight_2 - 4 bit Weight for RX_HPS_CMDQ #2 (11:8)
+* @hps_queue_weight_3 - 4 bit Weight for RX_HPS_CMDQ #3 (15:12)
+*/
+struct ipahal_reg_rx_hps_weights {
+ u32 hps_queue_weight_0;
+ u32 hps_queue_weight_1;
+ u32 hps_queue_weight_2;
+ u32 hps_queue_weight_3;
+};
+
+/*
* struct ipahal_reg_valmask - holding values and masking for registers
* HAL application may require only value and mask of it for some
* register fields.
@@ -322,15 +339,50 @@
};
/*
+ * struct ipahal_reg_qsb_max_writes - IPA QSB Max Writes register
+ * @qmb_0_max_writes: Max number of outstanding writes for GEN_QMB_0
+ * @qmb_1_max_writes: Max number of outstanding writes for GEN_QMB_1
+ */
+struct ipahal_reg_qsb_max_writes {
+ u32 qmb_0_max_writes;
+ u32 qmb_1_max_writes;
+};
+
+/*
+ * struct ipahal_reg_qsb_max_reads - IPA QSB Max Reads register
+ * @qmb_0_max_reads: Max number of outstanding reads for GEN_QMB_0
+ * @qmb_1_max_reads: Max number of outstanding reads for GEN_QMB_1
+ * @qmb_0_max_read_beats: Max number of outstanding read beats for GEN_QMB_0
+ * @qmb_1_max_read_beats: Max number of outstanding read beats for GEN_QMB_1
+ */
+struct ipahal_reg_qsb_max_reads {
+ u32 qmb_0_max_reads;
+ u32 qmb_1_max_reads;
+ u32 qmb_0_max_read_beats;
+ u32 qmb_1_max_read_beats;
+};
+
+/*
* struct ipahal_reg_tx_cfg - IPA TX_CFG register
* @tx0_prefetch_disable: Disable prefetch on TX0
* @tx1_prefetch_disable: Disable prefetch on TX1
- * @prefetch_almost_empty_size: Prefetch almost empty size
+ * @tx0_prefetch_almost_empty_size: Prefetch almost empty size on TX0
+ * @tx1_prefetch_almost_empty_size: Prefetch almost empty size on TX1
+ * @dmaw_scnd_outsd_pred_threshold:
+ * @dmaw_max_beats_256_dis:
+ * @dmaw_scnd_outsd_pred_en:
+ * @pa_mask_en:
*/
struct ipahal_reg_tx_cfg {
bool tx0_prefetch_disable;
bool tx1_prefetch_disable;
- u16 prefetch_almost_empty_size;
+ u32 tx0_prefetch_almost_empty_size;
+ u32 tx1_prefetch_almost_empty_size;
+ u32 dmaw_scnd_outsd_pred_threshold;
+ u32 dmaw_max_beats_256_dis;
+ u32 dmaw_scnd_outsd_pred_en;
+ u32 pa_mask_en;
+
};
/*
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h
index 6d69b15..17bad03 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h
@@ -93,6 +93,8 @@
#define IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT 0
#define IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK_V3_5 0xfffff
#define IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT_V3_5 0
+#define IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK_V4_0 0x7fffff
+#define IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT_V4_0 0
/* IPA_ENDP_INIT_ROUTE_n register */
#define IPA_ENDP_INIT_ROUTE_n_ROUTE_TABLE_INDEX_BMSK 0x1f
@@ -129,6 +131,7 @@
/* IPA_ENDP_INIT_HOL_BLOCK_EN_n register */
#define IPA_ENDP_INIT_HOL_BLOCK_EN_n_RMSK 0x1
#define IPA_ENDP_INIT_HOL_BLOCK_EN_n_MAX 19
+#define IPA_ENDP_INIT_HOL_BLOCK_EN_n_MAX_V_4_0 22
#define IPA_ENDP_INIT_HOL_BLOCK_EN_n_EN_BMSK 0x1
#define IPA_ENDP_INIT_HOL_BLOCK_EN_n_EN_SHFT 0x0
@@ -230,6 +233,8 @@
#define IPA_QCNCM_MODE_EN_SHFT 0
/* IPA_ENDP_STATUS_n register */
+#define IPA_ENDP_STATUS_n_STATUS_PKT_SUPPRESS_BMSK 0x200
+#define IPA_ENDP_STATUS_n_STATUS_PKT_SUPPRESS_SHFT 0x9
#define IPA_ENDP_STATUS_n_STATUS_LOCATION_BMSK 0x100
#define IPA_ENDP_STATUS_n_STATUS_LOCATION_SHFT 0x8
#define IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK 0x3e
@@ -289,7 +294,6 @@
#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_BMSK_V3_5 0x3F
#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_SHFT_V3_5 0
-
/* IPA_IPA_IPA_RX_HPS_CLIENTS_MIN/MAX_DEPTH_0/1 registers */
#define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK(n) (0x7F << (8 * (n)))
#define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK_V3_5(n) \
@@ -308,6 +312,12 @@
#define IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_BMSK (0xf0)
#define IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_SHFT (4)
+/* IPA_QSB_MAX_READS_BEATS register */
+#define IPA_QSB_MAX_READS_GEN_QMB_0_MAX_READS_BEATS_BMSK_V4_0 (0xff0000)
+#define IPA_QSB_MAX_READS_GEN_QMB_0_MAX_READS_BEATS_SHFT_V4_0 (0x10)
+#define IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_BEATS_BMSK_V4_0 (0xff000000)
+#define IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_BEATS_SHFT_V4_0 (0x18)
+
/* IPA_TX_CFG register */
#define IPA_TX_CFG_TX0_PREFETCH_DISABLE_BMSK_V3_5 (0x1)
#define IPA_TX_CFG_TX0_PREFETCH_DISABLE_SHFT_V3_5 (0)
@@ -316,10 +326,34 @@
#define IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_BMSK_V3_5 (0x1C)
#define IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_SHFT_V3_5 (2)
+/* IPA_TX_CFG register v4.0 */
+#define IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_TX1_BMSK_V4_0 (0x1e000)
+#define IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_TX1_SHFT_V4_0 (0xd)
+#define IPA_TX_CFG_PA_MASK_EN_BMSK_V4_0 (0x1000)
+#define IPA_TX_CFG_PA_MASK_EN_SHFT_V4_0 (0xc)
+#define IPA_TX_CFG_DMAW_SCND_OUTSD_PRED_EN_BMSK_V4_0 (0x800)
+#define IPA_TX_CFG_DMAW_SCND_OUTSD_PRED_EN_SHFT_V4_0 (0xb)
+#define IPA_TX_CFG_DMAW_MAX_BEATS_256_DIS_BMSK_V4_0 (0x400)
+#define IPA_TX_CFG_DMAW_MAX_BEATS_256_DIS_SHFT_V4_0 (0xa)
+#define IPA_TX_CFG_DMAW_SCND_OUTSD_PRED_THRESHOLD_BMSK_V4_0 (0x3c0)
+#define IPA_TX_CFG_DMAW_SCND_OUTSD_PRED_THRESHOLD_SHFT_V4_0 (0x6)
+#define IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_TX0_BMSK_V4_0 (0x3c)
+#define IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_TX0_SHFT_V4_0 (0x2)
+
/* IPA_IDLE_INDICATION_CFG regiser */
#define IPA_IDLE_INDICATION_CFG_ENTER_IDLE_DEBOUNCE_THRESH_BMSK_V3_5 (0xffff)
#define IPA_IDLE_INDICATION_CFG_ENTER_IDLE_DEBOUNCE_THRESH_SHFT_V3_5 (0)
#define IPA_IDLE_INDICATION_CFG_CONST_NON_IDLE_ENABLE_BMSK_V3_5 (0x10000)
#define IPA_IDLE_INDICATION_CFG_CONST_NON_IDLE_ENABLE_SHFT_V3_5 (16)
+/* IPA_HPS_FTCH_QUEUE_WEIGHT register */
+#define IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_0_BMSK (0xf)
+#define IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_0_SHFT (0x0)
+#define IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_1_BMSK (0xf0)
+#define IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_1_SHFT (0x4)
+#define IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_2_BMSK (0xf00)
+#define IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_2_SHFT (0x8)
+#define IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_3_BMSK (0xf000)
+#define IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_3_SHFT (0xc)
+
#endif /* _IPAHAL_REG_I_H_ */
diff --git a/drivers/platform/msm/qcom-geni-se.c b/drivers/platform/msm/qcom-geni-se.c
new file mode 100644
index 0000000..1fffa7c
--- /dev/null
+++ b/drivers/platform/msm/qcom-geni-se.c
@@ -0,0 +1,1269 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <asm/dma-iommu.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/ipc_logging.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/pm_runtime.h>
+#include <linux/qcom-geni-se.h>
+#include <linux/spinlock.h>
+
+#define GENI_SE_IOMMU_VA_START (0x40000000)
+#define GENI_SE_IOMMU_VA_SIZE (0xC0000000)
+
+#define NUM_LOG_PAGES 2
+
+static unsigned long default_bus_bw_set[] = {0, 19200000, 50000000, 100000000};
+
+/**
+ * @struct geni_se_device - Data structure to represent the QUPv3 Core
+ * @dev: Device pointer of the QUPv3 core.
+ * @cb_dev: Device pointer of the context bank in the IOMMU.
+ * @iommu_lock: Lock to protect IOMMU Mapping & attachment.
+ * @iommu_map: IOMMU map of the memory space supported by this core.
+ * @iommu_s1_bypass: Bypass IOMMU stage 1 translation.
+ * @base: Base address of this instance of QUPv3 core.
+ * @bus_bw: Client handle to the bus bandwidth request.
+ * @bus_mas_id: Master Endpoint ID for bus BW request.
+ * @bus_slv_id: Slave Endpoint ID for bus BW request.
+ * @ab_ib_lock: Lock to protect the bus ab & ib values, list.
+ * @ab_list_head: Sorted resource list based on average bus BW.
+ * @ib_list_head: Sorted resource list based on instantaneous bus BW.
+ * @cur_ab: Current Bus Average BW request value.
+ * @cur_ib: Current Bus Instantaneous BW request value.
+ * @bus_bw_set: Clock plan for the bus driver.
+ * @cur_bus_bw_idx: Current index within the bus clock plan.
+ * @log_ctx: Logging context to hold the debug information
+ */
+struct geni_se_device {
+ struct device *dev;
+ struct device *cb_dev;
+ struct mutex iommu_lock;
+ struct dma_iommu_mapping *iommu_map;
+ bool iommu_s1_bypass;
+ void __iomem *base;
+ struct msm_bus_client_handle *bus_bw;
+ u32 bus_mas_id;
+ u32 bus_slv_id;
+ spinlock_t ab_ib_lock;
+ struct list_head ab_list_head;
+ struct list_head ib_list_head;
+ unsigned long cur_ab;
+ unsigned long cur_ib;
+ int bus_bw_set_size;
+ unsigned long *bus_bw_set;
+ int cur_bus_bw_idx;
+ void *log_ctx;
+};
+
+/* Offset of QUPV3 Hardware Version Register */
+#define QUPV3_HW_VER (0x4)
+
+#define HW_VER_MAJOR_MASK GENMASK(31, 28)
+#define HW_VER_MAJOR_SHFT 28
+#define HW_VER_MINOR_MASK GENMASK(27, 16)
+#define HW_VER_MINOR_SHFT 16
+#define HW_VER_STEP_MASK GENMASK(15, 0)
+
+static int geni_se_iommu_map_and_attach(struct geni_se_device *geni_se_dev);
+
+/**
+ * geni_read_reg_nolog() - Helper function to read from a GENI register
+ * @base: Base address of the serial engine's register block.
+ * @offset: Offset within the serial engine's register block.
+ *
+ * Return: Return the contents of the register.
+ */
+unsigned int geni_read_reg_nolog(void __iomem *base, int offset)
+{
+ return readl_relaxed_no_log(base + offset);
+}
+EXPORT_SYMBOL(geni_read_reg_nolog);
+
+/**
+ * geni_write_reg_nolog() - Helper function to write into a GENI register
+ * @value: Value to be written into the register.
+ * @base: Base address of the serial engine's register block.
+ * @offset: Offset within the serial engine's register block.
+ */
+void geni_write_reg_nolog(unsigned int value, void __iomem *base, int offset)
+{
+ return writel_relaxed_no_log(value, (base + offset));
+}
+EXPORT_SYMBOL(geni_write_reg_nolog);
+
+/**
+ * geni_read_reg() - Helper function to read from a GENI register
+ * @base: Base address of the serial engine's register block.
+ * @offset: Offset within the serial engine's register block.
+ *
+ * Return: Return the contents of the register.
+ */
+unsigned int geni_read_reg(void __iomem *base, int offset)
+{
+ return readl_relaxed(base + offset);
+}
+EXPORT_SYMBOL(geni_read_reg);
+
+/**
+ * geni_write_reg() - Helper function to write into a GENI register
+ * @value: Value to be written into the register.
+ * @base: Base address of the serial engine's register block.
+ * @offset: Offset within the serial engine's register block.
+ */
+void geni_write_reg(unsigned int value, void __iomem *base, int offset)
+{
+ return writel_relaxed(value, (base + offset));
+}
+EXPORT_SYMBOL(geni_write_reg);
+
+/**
+ * get_se_proto() - Read the protocol configured for a serial engine
+ * @base: Base address of the serial engine's register block.
+ *
+ * Return: Protocol value as configured in the serial engine.
+ */
+int get_se_proto(void __iomem *base)
+{
+ int proto;
+
+ proto = ((geni_read_reg(base, GENI_FW_REVISION_RO)
+ & FW_REV_PROTOCOL_MSK) >> FW_REV_PROTOCOL_SHFT);
+ return proto;
+}
+EXPORT_SYMBOL(get_se_proto);
+
+static int se_geni_irq_en(void __iomem *base)
+{
+ unsigned int common_geni_m_irq_en;
+ unsigned int common_geni_s_irq_en;
+
+ common_geni_m_irq_en = geni_read_reg(base, SE_GENI_M_IRQ_EN);
+ common_geni_s_irq_en = geni_read_reg(base, SE_GENI_S_IRQ_EN);
+ /* Common to all modes */
+ common_geni_m_irq_en |= M_COMMON_GENI_M_IRQ_EN;
+ common_geni_s_irq_en |= S_COMMON_GENI_S_IRQ_EN;
+
+ geni_write_reg(common_geni_m_irq_en, base, SE_GENI_M_IRQ_EN);
+ geni_write_reg(common_geni_s_irq_en, base, SE_GENI_S_IRQ_EN);
+ return 0;
+}
+
+
+static void se_set_rx_rfr_wm(void __iomem *base, unsigned int rx_wm,
+ unsigned int rx_rfr)
+{
+ geni_write_reg(rx_wm, base, SE_GENI_RX_WATERMARK_REG);
+ geni_write_reg(rx_rfr, base, SE_GENI_RX_RFR_WATERMARK_REG);
+}
+
+static int se_io_set_mode(void __iomem *base)
+{
+ unsigned int io_mode;
+ unsigned int geni_dma_mode;
+
+ io_mode = geni_read_reg(base, SE_IRQ_EN);
+ geni_dma_mode = geni_read_reg(base, SE_GENI_DMA_MODE_EN);
+
+ io_mode |= (GENI_M_IRQ_EN | GENI_S_IRQ_EN);
+ io_mode |= (DMA_TX_IRQ_EN | DMA_RX_IRQ_EN);
+ geni_dma_mode &= ~GENI_DMA_MODE_EN;
+
+ geni_write_reg(io_mode, base, SE_IRQ_EN);
+ geni_write_reg(geni_dma_mode, base, SE_GENI_DMA_MODE_EN);
+ geni_write_reg(0, base, SE_GSI_EVENT_EN);
+ return 0;
+}
+
+static void se_io_init(void __iomem *base)
+{
+ unsigned int io_op_ctrl;
+ unsigned int geni_cgc_ctrl;
+ unsigned int dma_general_cfg;
+
+ geni_cgc_ctrl = geni_read_reg(base, GENI_CGC_CTRL);
+ dma_general_cfg = geni_read_reg(base, SE_DMA_GENERAL_CFG);
+ geni_cgc_ctrl |= DEFAULT_CGC_EN;
+ dma_general_cfg |= (AHB_SEC_SLV_CLK_CGC_ON | DMA_AHB_SLV_CFG_ON |
+ DMA_TX_CLK_CGC_ON | DMA_RX_CLK_CGC_ON);
+ io_op_ctrl = DEFAULT_IO_OUTPUT_CTRL_MSK;
+ geni_write_reg(geni_cgc_ctrl, base, GENI_CGC_CTRL);
+ geni_write_reg(dma_general_cfg, base, SE_DMA_GENERAL_CFG);
+
+ geni_write_reg(io_op_ctrl, base, GENI_OUTPUT_CTRL);
+ geni_write_reg(FORCE_DEFAULT, base, GENI_FORCE_DEFAULT_REG);
+}
+
+/**
+ * geni_se_init() - Initialize the GENI Serial Engine
+ * @base: Base address of the serial engine's register block.
+ * @rx_wm: Receive watermark to be configured.
+ * @rx_rfr_wm: Ready-for-receive watermark to be configured.
+ *
+ * This function is used to initialize the GENI serial engine, configure
+ * receive watermark and ready-for-receive watermarks.
+ *
+ * Return: 0 on success, standard Linux error codes on failure/error.
+ */
+int geni_se_init(void __iomem *base, unsigned int rx_wm, unsigned int rx_rfr)
+{
+ int ret;
+
+ se_io_init(base);
+ ret = se_io_set_mode(base);
+ if (ret)
+ return ret;
+
+ se_set_rx_rfr_wm(base, rx_wm, rx_rfr);
+ ret = se_geni_irq_en(base);
+ return ret;
+}
+EXPORT_SYMBOL(geni_se_init);
+
+static int geni_se_select_fifo_mode(void __iomem *base)
+{
+ int proto = get_se_proto(base);
+ unsigned int common_geni_m_irq_en;
+ unsigned int common_geni_s_irq_en;
+ unsigned int geni_dma_mode;
+
+ geni_write_reg(0, base, SE_GSI_EVENT_EN);
+ geni_write_reg(0xFFFFFFFF, base, SE_GENI_M_IRQ_CLEAR);
+ geni_write_reg(0xFFFFFFFF, base, SE_GENI_S_IRQ_CLEAR);
+ geni_write_reg(0xFFFFFFFF, base, SE_DMA_TX_IRQ_CLR);
+ geni_write_reg(0xFFFFFFFF, base, SE_DMA_RX_IRQ_CLR);
+ geni_write_reg(0xFFFFFFFF, base, SE_IRQ_EN);
+
+ common_geni_m_irq_en = geni_read_reg(base, SE_GENI_M_IRQ_EN);
+ common_geni_s_irq_en = geni_read_reg(base, SE_GENI_S_IRQ_EN);
+ geni_dma_mode = geni_read_reg(base, SE_GENI_DMA_MODE_EN);
+ if (proto != UART) {
+ common_geni_m_irq_en |=
+ (M_CMD_DONE_EN | M_TX_FIFO_WATERMARK_EN |
+ M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN);
+ common_geni_s_irq_en |= S_CMD_DONE_EN;
+ }
+ geni_dma_mode &= ~GENI_DMA_MODE_EN;
+
+ geni_write_reg(common_geni_m_irq_en, base, SE_GENI_M_IRQ_EN);
+ geni_write_reg(common_geni_s_irq_en, base, SE_GENI_S_IRQ_EN);
+ geni_write_reg(geni_dma_mode, base, SE_GENI_DMA_MODE_EN);
+ return 0;
+}
+
+static int geni_se_select_dma_mode(void __iomem *base)
+{
+ unsigned int geni_dma_mode = 0;
+
+ geni_write_reg(0, base, SE_GSI_EVENT_EN);
+ geni_write_reg(0xFFFFFFFF, base, SE_GENI_M_IRQ_CLEAR);
+ geni_write_reg(0xFFFFFFFF, base, SE_GENI_S_IRQ_CLEAR);
+ geni_write_reg(0xFFFFFFFF, base, SE_DMA_TX_IRQ_CLR);
+ geni_write_reg(0xFFFFFFFF, base, SE_DMA_RX_IRQ_CLR);
+ geni_write_reg(0xFFFFFFFF, base, SE_IRQ_EN);
+
+ geni_dma_mode = geni_read_reg(base, SE_GENI_DMA_MODE_EN);
+ geni_dma_mode |= GENI_DMA_MODE_EN;
+ geni_write_reg(geni_dma_mode, base, SE_GENI_DMA_MODE_EN);
+ return 0;
+}
+
+static int geni_se_select_gsi_mode(void __iomem *base)
+{
+ unsigned int io_mode = 0;
+ unsigned int geni_dma_mode = 0;
+ unsigned int gsi_event_en = 0;
+
+ geni_dma_mode = geni_read_reg(base, SE_GENI_DMA_MODE_EN);
+ gsi_event_en = geni_read_reg(base, SE_GSI_EVENT_EN);
+ io_mode = geni_read_reg(base, SE_IRQ_EN);
+
+ geni_dma_mode |= GENI_DMA_MODE_EN;
+ io_mode &= ~(DMA_TX_IRQ_EN | DMA_RX_IRQ_EN);
+ gsi_event_en |= (DMA_RX_EVENT_EN | DMA_TX_EVENT_EN |
+ GENI_M_EVENT_EN | GENI_S_EVENT_EN);
+
+ geni_write_reg(io_mode, base, SE_IRQ_EN);
+ geni_write_reg(geni_dma_mode, base, SE_GENI_DMA_MODE_EN);
+ geni_write_reg(gsi_event_en, base, SE_GSI_EVENT_EN);
+ return 0;
+
+}
+
+/**
+ * geni_se_select_mode() - Select the serial engine transfer mode
+ * @base: Base address of the serial engine's register block.
+ * @mode: Transfer mode to be selected.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+int geni_se_select_mode(void __iomem *base, int mode)
+{
+ int ret = 0;
+
+ switch (mode) {
+ case FIFO_MODE:
+ geni_se_select_fifo_mode(base);
+ break;
+ case SE_DMA:
+ geni_se_select_dma_mode(base);
+ break;
+ case GSI_DMA:
+ geni_se_select_gsi_mode(base);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(geni_se_select_mode);
+
+/**
+ * geni_setup_m_cmd() - Setup the primary sequencer
+ * @base: Base address of the serial engine's register block.
+ * @cmd: Command/Operation to setup in the primary sequencer.
+ * @params: Parameter for the sequencer command.
+ *
+ * This function is used to configure the primary sequencer with the
+ * command and its assoicated parameters.
+ */
+void geni_setup_m_cmd(void __iomem *base, u32 cmd, u32 params)
+{
+ u32 m_cmd = (cmd << M_OPCODE_SHFT);
+
+ m_cmd |= (params & M_PARAMS_MSK);
+ geni_write_reg(m_cmd, base, SE_GENI_M_CMD0);
+}
+EXPORT_SYMBOL(geni_setup_m_cmd);
+
+/**
+ * geni_setup_s_cmd() - Setup the secondary sequencer
+ * @base: Base address of the serial engine's register block.
+ * @cmd: Command/Operation to setup in the secondary sequencer.
+ * @params: Parameter for the sequencer command.
+ *
+ * This function is used to configure the secondary sequencer with the
+ * command and its assoicated parameters.
+ */
+void geni_setup_s_cmd(void __iomem *base, u32 cmd, u32 params)
+{
+ u32 s_cmd = geni_read_reg(base, SE_GENI_S_CMD0);
+
+ s_cmd &= ~(S_OPCODE_MSK | S_PARAMS_MSK);
+ s_cmd |= (cmd << S_OPCODE_SHFT);
+ s_cmd |= (params & S_PARAMS_MSK);
+ geni_write_reg(s_cmd, base, SE_GENI_S_CMD0);
+}
+EXPORT_SYMBOL(geni_setup_s_cmd);
+
+/**
+ * geni_cancel_m_cmd() - Cancel the command configured in the primary sequencer
+ * @base: Base address of the serial engine's register block.
+ *
+ * This function is used to cancel the currently configured command in the
+ * primary sequencer.
+ */
+void geni_cancel_m_cmd(void __iomem *base)
+{
+ geni_write_reg(M_GENI_CMD_CANCEL, base, SE_GENI_S_CMD_CTRL_REG);
+}
+EXPORT_SYMBOL(geni_cancel_m_cmd);
+
+/**
+ * geni_cancel_s_cmd() - Cancel the command configured in the secondary
+ * sequencer
+ * @base: Base address of the serial engine's register block.
+ *
+ * This function is used to cancel the currently configured command in the
+ * secondary sequencer.
+ */
+void geni_cancel_s_cmd(void __iomem *base)
+{
+ geni_write_reg(S_GENI_CMD_CANCEL, base, SE_GENI_S_CMD_CTRL_REG);
+}
+EXPORT_SYMBOL(geni_cancel_s_cmd);
+
+/**
+ * geni_abort_m_cmd() - Abort the command configured in the primary sequencer
+ * @base: Base address of the serial engine's register block.
+ *
+ * This function is used to force abort the currently configured command in the
+ * primary sequencer.
+ */
+void geni_abort_m_cmd(void __iomem *base)
+{
+ geni_write_reg(M_GENI_CMD_ABORT, base, SE_GENI_M_CMD_CTRL_REG);
+}
+EXPORT_SYMBOL(geni_abort_m_cmd);
+
+/**
+ * geni_abort_s_cmd() - Abort the command configured in the secondary
+ * sequencer
+ * @base: Base address of the serial engine's register block.
+ *
+ * This function is used to force abort the currently configured command in the
+ * secondary sequencer.
+ */
+void geni_abort_s_cmd(void __iomem *base)
+{
+ geni_write_reg(S_GENI_CMD_ABORT, base, SE_GENI_S_CMD_CTRL_REG);
+}
+EXPORT_SYMBOL(geni_abort_s_cmd);
+
+/**
+ * get_tx_fifo_depth() - Get the TX fifo depth of the serial engine
+ * @base: Base address of the serial engine's register block.
+ *
+ * This function is used to get the depth i.e. number of elements in the
+ * TX fifo of the serial engine.
+ *
+ * Return: TX fifo depth in units of FIFO words.
+ */
+int get_tx_fifo_depth(void __iomem *base)
+{
+ int tx_fifo_depth;
+
+ tx_fifo_depth = ((geni_read_reg(base, SE_HW_PARAM_0)
+ & TX_FIFO_DEPTH_MSK) >> TX_FIFO_DEPTH_SHFT);
+ return tx_fifo_depth;
+}
+EXPORT_SYMBOL(get_tx_fifo_depth);
+
+/**
+ * get_tx_fifo_width() - Get the TX fifo width of the serial engine
+ * @base: Base address of the serial engine's register block.
+ *
+ * This function is used to get the width i.e. word size per element in the
+ * TX fifo of the serial engine.
+ *
+ * Return: TX fifo width in bits
+ */
+int get_tx_fifo_width(void __iomem *base)
+{
+ int tx_fifo_width;
+
+ tx_fifo_width = ((geni_read_reg(base, SE_HW_PARAM_0)
+ & TX_FIFO_WIDTH_MSK) >> TX_FIFO_WIDTH_SHFT);
+ return tx_fifo_width;
+}
+EXPORT_SYMBOL(get_tx_fifo_width);
+
+/**
+ * get_rx_fifo_depth() - Get the RX fifo depth of the serial engine
+ * @base: Base address of the serial engine's register block.
+ *
+ * This function is used to get the depth i.e. number of elements in the
+ * RX fifo of the serial engine.
+ *
+ * Return: RX fifo depth in units of FIFO words
+ */
+int get_rx_fifo_depth(void __iomem *base)
+{
+ int rx_fifo_depth;
+
+ rx_fifo_depth = ((geni_read_reg(base, SE_HW_PARAM_1)
+ & RX_FIFO_DEPTH_MSK) >> RX_FIFO_DEPTH_SHFT);
+ return rx_fifo_depth;
+}
+EXPORT_SYMBOL(get_rx_fifo_depth);
+
+/**
+ * se_get_packing_config() - Get the packing configuration based on input
+ * @bpw: Bits of data per transfer word.
+ * @pack_words: Number of words per fifo element.
+ * @msb_to_lsb: Transfer from MSB to LSB or vice-versa.
+ * @cfg0: Output buffer to hold the first half of configuration.
+ * @cfg1: Output buffer to hold the second half of configuration.
+ *
+ * This function is used to calculate the packing configuration based on
+ * the input packing requirement and the configuration logic.
+ */
+void se_get_packing_config(int bpw, int pack_words, bool msb_to_lsb,
+ unsigned long *cfg0, unsigned long *cfg1)
+{
+ u32 cfg[4] = {0};
+ int len;
+ int temp_bpw = bpw;
+ int idx_start = (msb_to_lsb ? (bpw - 1) : 0);
+ int idx = idx_start;
+ int idx_delta = (msb_to_lsb ? -BITS_PER_BYTE : BITS_PER_BYTE);
+ int ceil_bpw = ((bpw & (BITS_PER_BYTE - 1)) ?
+ ((bpw & ~(BITS_PER_BYTE - 1)) + BITS_PER_BYTE) : bpw);
+ int iter = (ceil_bpw * pack_words) >> 3;
+ int i;
+
+ if (unlikely(iter <= 0 || iter > 4)) {
+ *cfg0 = 0;
+ *cfg1 = 0;
+ return;
+ }
+
+ for (i = 0; i < iter; i++) {
+ len = (temp_bpw < BITS_PER_BYTE) ?
+ (temp_bpw - 1) : BITS_PER_BYTE - 1;
+ cfg[i] = ((idx << 5) | (msb_to_lsb << 4) | (len << 1));
+ idx = ((temp_bpw - BITS_PER_BYTE) <= 0) ?
+ ((i + 1) * BITS_PER_BYTE) + idx_start :
+ idx + idx_delta;
+ temp_bpw = ((temp_bpw - BITS_PER_BYTE) <= 0) ?
+ bpw : (temp_bpw - BITS_PER_BYTE);
+ }
+ cfg[iter - 1] |= 1;
+ *cfg0 = cfg[0] | (cfg[1] << 10);
+ *cfg1 = cfg[2] | (cfg[3] << 10);
+}
+EXPORT_SYMBOL(se_get_packing_config);
+
+/**
+ * se_config_packing() - Packing configuration of the serial engine
+ * @base: Base address of the serial engine's register block.
+ * @bpw: Bits of data per transfer word.
+ * @pack_words: Number of words per fifo element.
+ * @msb_to_lsb: Transfer from MSB to LSB or vice-versa.
+ *
+ * This function is used to configure the packing rules for the current
+ * transfer.
+ */
+void se_config_packing(void __iomem *base, int bpw,
+ int pack_words, bool msb_to_lsb)
+{
+ unsigned long cfg0, cfg1;
+
+ se_get_packing_config(bpw, pack_words, msb_to_lsb, &cfg0, &cfg1);
+ geni_write_reg(cfg0, base, SE_GENI_TX_PACKING_CFG0);
+ geni_write_reg(cfg1, base, SE_GENI_TX_PACKING_CFG1);
+ geni_write_reg(cfg0, base, SE_GENI_RX_PACKING_CFG0);
+ geni_write_reg(cfg1, base, SE_GENI_RX_PACKING_CFG1);
+ if (pack_words || bpw == 32)
+ geni_write_reg((bpw >> 4), base, SE_GENI_BYTE_GRAN);
+}
+EXPORT_SYMBOL(se_config_packing);
+
+static void se_geni_clks_off(struct se_geni_rsc *rsc)
+{
+ clk_disable_unprepare(rsc->se_clk);
+ clk_disable_unprepare(rsc->s_ahb_clk);
+ clk_disable_unprepare(rsc->m_ahb_clk);
+}
+
+static bool geni_se_check_bus_bw(struct geni_se_device *geni_se_dev)
+{
+ int i;
+ int new_bus_bw_idx = geni_se_dev->bus_bw_set_size - 1;
+ unsigned long new_bus_bw;
+ bool bus_bw_update = false;
+
+ new_bus_bw = max(geni_se_dev->cur_ib, geni_se_dev->cur_ab) /
+ DEFAULT_BUS_WIDTH;
+ for (i = 0; i < geni_se_dev->bus_bw_set_size; i++) {
+ if (geni_se_dev->bus_bw_set[i] >= new_bus_bw) {
+ new_bus_bw_idx = i;
+ break;
+ }
+ }
+
+ if (geni_se_dev->cur_bus_bw_idx != new_bus_bw_idx) {
+ geni_se_dev->cur_bus_bw_idx = new_bus_bw_idx;
+ bus_bw_update = true;
+ }
+ return bus_bw_update;
+}
+
+static int geni_se_rmv_ab_ib(struct geni_se_device *geni_se_dev,
+ struct se_geni_rsc *rsc)
+{
+ unsigned long flags;
+ struct se_geni_rsc *tmp;
+ bool bus_bw_update = false;
+ int ret = 0;
+
+ if (unlikely(list_empty(&rsc->ab_list) || list_empty(&rsc->ib_list)))
+ return -EINVAL;
+
+ spin_lock_irqsave(&geni_se_dev->ab_ib_lock, flags);
+ list_del_init(&rsc->ab_list);
+ geni_se_dev->cur_ab -= rsc->ab;
+
+ list_del_init(&rsc->ib_list);
+ tmp = list_first_entry_or_null(&geni_se_dev->ib_list_head,
+ struct se_geni_rsc, ib_list);
+ if (tmp && tmp->ib != geni_se_dev->cur_ib)
+ geni_se_dev->cur_ib = tmp->ib;
+ else if (!tmp && geni_se_dev->cur_ib)
+ geni_se_dev->cur_ib = 0;
+
+ bus_bw_update = geni_se_check_bus_bw(geni_se_dev);
+ spin_unlock_irqrestore(&geni_se_dev->ab_ib_lock, flags);
+
+ if (bus_bw_update)
+ ret = msm_bus_scale_update_bw(geni_se_dev->bus_bw,
+ geni_se_dev->cur_ab,
+ geni_se_dev->cur_ib);
+ GENI_SE_DBG(geni_se_dev->log_ctx, false, NULL,
+ "%s: %lu:%lu (%lu:%lu) %d\n", __func__,
+ geni_se_dev->cur_ab, geni_se_dev->cur_ib,
+ rsc->ab, rsc->ib, bus_bw_update);
+ return ret;
+}
+
+/**
+ * se_geni_resources_off() - Turn off resources associated with the serial
+ * engine
+ * @rsc: Handle to resources associated with the serial engine.
+ *
+ * Return: 0 on success, standard Linux error codes on failure/error.
+ */
+int se_geni_resources_off(struct se_geni_rsc *rsc)
+{
+ int ret = 0;
+ struct geni_se_device *geni_se_dev;
+
+ if (unlikely(!rsc || !rsc->wrapper_dev))
+ return -EINVAL;
+
+ geni_se_dev = dev_get_drvdata(rsc->wrapper_dev);
+ if (unlikely(!geni_se_dev || !geni_se_dev->bus_bw))
+ return -ENODEV;
+
+ ret = pinctrl_select_state(rsc->geni_pinctrl, rsc->geni_gpio_sleep);
+ if (ret) {
+ GENI_SE_ERR(geni_se_dev->log_ctx, false, NULL,
+ "%s: Error %d pinctrl_select_state\n", __func__, ret);
+ return ret;
+ }
+ se_geni_clks_off(rsc);
+ ret = geni_se_rmv_ab_ib(geni_se_dev, rsc);
+ if (ret)
+ GENI_SE_ERR(geni_se_dev->log_ctx, false, NULL,
+ "%s: Error %d during bus_bw_update\n", __func__, ret);
+ return ret;
+}
+EXPORT_SYMBOL(se_geni_resources_off);
+
+static int se_geni_clks_on(struct se_geni_rsc *rsc)
+{
+ int ret;
+
+ ret = clk_prepare_enable(rsc->m_ahb_clk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(rsc->s_ahb_clk);
+ if (ret) {
+ clk_disable_unprepare(rsc->m_ahb_clk);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(rsc->se_clk);
+ if (ret) {
+ clk_disable_unprepare(rsc->s_ahb_clk);
+ clk_disable_unprepare(rsc->m_ahb_clk);
+ }
+ return ret;
+}
+
+static int geni_se_add_ab_ib(struct geni_se_device *geni_se_dev,
+ struct se_geni_rsc *rsc)
+{
+ unsigned long flags;
+ struct se_geni_rsc *tmp;
+ struct list_head *ins_list_head;
+ bool bus_bw_update = false;
+ int ret = 0;
+
+ spin_lock_irqsave(&geni_se_dev->ab_ib_lock, flags);
+ list_add(&rsc->ab_list, &geni_se_dev->ab_list_head);
+ geni_se_dev->cur_ab += rsc->ab;
+
+ ins_list_head = &geni_se_dev->ib_list_head;
+ list_for_each_entry(tmp, &geni_se_dev->ib_list_head, ib_list) {
+ if (tmp->ib < rsc->ib)
+ break;
+ ins_list_head = &tmp->ib_list;
+ }
+ list_add(&rsc->ib_list, ins_list_head);
+ /* Currently inserted node has greater average BW value */
+ if (ins_list_head == &geni_se_dev->ib_list_head)
+ geni_se_dev->cur_ib = tmp->ib;
+
+ bus_bw_update = geni_se_check_bus_bw(geni_se_dev);
+ spin_unlock_irqrestore(&geni_se_dev->ab_ib_lock, flags);
+
+ if (bus_bw_update)
+ ret = msm_bus_scale_update_bw(geni_se_dev->bus_bw,
+ geni_se_dev->cur_ab,
+ geni_se_dev->cur_ib);
+ GENI_SE_DBG(geni_se_dev->log_ctx, false, NULL,
+ "%s: %lu:%lu (%lu:%lu) %d\n", __func__,
+ geni_se_dev->cur_ab, geni_se_dev->cur_ib,
+ rsc->ab, rsc->ib, bus_bw_update);
+ return ret;
+}
+
+/**
+ * se_geni_resources_on() - Turn on resources associated with the serial
+ * engine
+ * @rsc: Handle to resources associated with the serial engine.
+ *
+ * Return: 0 on success, standard Linux error codes on failure/error.
+ */
+int se_geni_resources_on(struct se_geni_rsc *rsc)
+{
+ int ret = 0;
+ struct geni_se_device *geni_se_dev;
+
+ if (unlikely(!rsc || !rsc->wrapper_dev))
+ return -EINVAL;
+
+ geni_se_dev = dev_get_drvdata(rsc->wrapper_dev);
+ if (unlikely(!geni_se_dev))
+ return -EPROBE_DEFER;
+
+ ret = geni_se_add_ab_ib(geni_se_dev, rsc);
+ if (ret) {
+ GENI_SE_ERR(geni_se_dev->log_ctx, false, NULL,
+ "%s: Error %d during bus_bw_update\n", __func__, ret);
+ return ret;
+ }
+
+ ret = se_geni_clks_on(rsc);
+ if (ret) {
+ GENI_SE_ERR(geni_se_dev->log_ctx, false, NULL,
+ "%s: Error %d during clks_on\n", __func__, ret);
+ geni_se_rmv_ab_ib(geni_se_dev, rsc);
+ return ret;
+ }
+
+ ret = pinctrl_select_state(rsc->geni_pinctrl, rsc->geni_gpio_active);
+ if (ret) {
+ GENI_SE_ERR(geni_se_dev->log_ctx, false, NULL,
+ "%s: Error %d pinctrl_select_state\n", __func__, ret);
+ se_geni_clks_off(rsc);
+ geni_se_rmv_ab_ib(geni_se_dev, rsc);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(se_geni_resources_on);
+
+/**
+ * geni_se_resources_init() - Init the SE resource structure
+ * @rsc: SE resource structure to be initialized.
+ * @ab: Initial Average bus bandwidth request value.
+ * @ib: Initial Instantaneous bus bandwidth request value.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+int geni_se_resources_init(struct se_geni_rsc *rsc,
+ unsigned long ab, unsigned long ib)
+{
+ struct geni_se_device *geni_se_dev;
+
+ if (unlikely(!rsc || !rsc->wrapper_dev))
+ return -EINVAL;
+
+ geni_se_dev = dev_get_drvdata(rsc->wrapper_dev);
+ if (unlikely(!geni_se_dev))
+ return -EPROBE_DEFER;
+
+ if (unlikely(IS_ERR_OR_NULL(geni_se_dev->bus_bw))) {
+ geni_se_dev->bus_bw = msm_bus_scale_register(
+ geni_se_dev->bus_mas_id,
+ geni_se_dev->bus_slv_id,
+ (char *)dev_name(geni_se_dev->dev),
+ false);
+ if (IS_ERR_OR_NULL(geni_se_dev->bus_bw)) {
+ GENI_SE_ERR(geni_se_dev->log_ctx, false, NULL,
+ "%s: Error creating bus client\n", __func__);
+ return (int)PTR_ERR(geni_se_dev->bus_bw);
+ }
+ }
+
+ rsc->ab = ab;
+ rsc->ib = ib;
+ INIT_LIST_HEAD(&rsc->ab_list);
+ INIT_LIST_HEAD(&rsc->ib_list);
+ geni_se_iommu_map_and_attach(geni_se_dev);
+ return 0;
+}
+EXPORT_SYMBOL(geni_se_resources_init);
+
+/**
+ * geni_se_tx_dma_prep() - Prepare the Serial Engine for TX DMA transfer
+ * @wrapper_dev: QUPv3 Wrapper Device to which the TX buffer is mapped.
+ * @base: Base address of the SE register block.
+ * @tx_buf: Pointer to the TX buffer.
+ * @tx_len: Length of the TX buffer.
+ * @tx_dma: Pointer to store the mapped DMA address.
+ *
+ * This function is used to prepare the buffers for DMA TX.
+ *
+ * Return: 0 on success, standard Linux error codes on error/failure.
+ */
+int geni_se_tx_dma_prep(struct device *wrapper_dev, void __iomem *base,
+ void *tx_buf, int tx_len, dma_addr_t *tx_dma)
+{
+ int ret;
+
+ if (unlikely(!wrapper_dev || !base || !tx_buf || !tx_len || !tx_dma))
+ return -EINVAL;
+
+ ret = geni_se_iommu_map_buf(wrapper_dev, tx_dma, tx_buf, tx_len,
+ DMA_TO_DEVICE);
+ if (ret)
+ return ret;
+
+ geni_write_reg(7, base, SE_DMA_TX_IRQ_EN_SET);
+ geni_write_reg((u32)(*tx_dma), base, SE_DMA_TX_PTR_L);
+ geni_write_reg((u32)((*tx_dma) >> 32), base, SE_DMA_TX_PTR_H);
+ geni_write_reg(1, base, SE_DMA_TX_ATTR);
+ geni_write_reg(tx_len, base, SE_DMA_TX_LEN);
+ return 0;
+}
+EXPORT_SYMBOL(geni_se_tx_dma_prep);
+
+/**
+ * geni_se_rx_dma_prep() - Prepare the Serial Engine for RX DMA transfer
+ * @wrapper_dev: QUPv3 Wrapper Device to which the RX buffer is mapped.
+ * @base: Base address of the SE register block.
+ * @rx_buf: Pointer to the RX buffer.
+ * @rx_len: Length of the RX buffer.
+ * @rx_dma: Pointer to store the mapped DMA address.
+ *
+ * This function is used to prepare the buffers for DMA RX.
+ *
+ * Return: 0 on success, standard Linux error codes on error/failure.
+ */
+int geni_se_rx_dma_prep(struct device *wrapper_dev, void __iomem *base,
+ void *rx_buf, int rx_len, dma_addr_t *rx_dma)
+{
+ int ret;
+
+ if (unlikely(!wrapper_dev || !base || !rx_buf || !rx_len || !rx_dma))
+ return -EINVAL;
+
+ ret = geni_se_iommu_map_buf(wrapper_dev, rx_dma, rx_buf, rx_len,
+ DMA_FROM_DEVICE);
+ if (ret)
+ return ret;
+
+ geni_write_reg(7, base, SE_DMA_RX_IRQ_EN_SET);
+ geni_write_reg((u32)(*rx_dma), base, SE_DMA_RX_PTR_L);
+ geni_write_reg((u32)((*rx_dma) >> 32), base, SE_DMA_RX_PTR_H);
+ /* RX does not have EOT bit */
+ geni_write_reg(0, base, SE_DMA_RX_ATTR);
+ geni_write_reg(rx_len, base, SE_DMA_RX_LEN);
+ return 0;
+}
+EXPORT_SYMBOL(geni_se_rx_dma_prep);
+
+/**
+ * geni_se_tx_dma_unprep() - Unprepare the Serial Engine after TX DMA transfer
+ * @wrapper_dev: QUPv3 Wrapper Device to which the RX buffer is mapped.
+ * @tx_dma: DMA address of the TX buffer.
+ * @tx_len: Length of the TX buffer.
+ *
+ * This function is used to unprepare the DMA buffers after DMA TX.
+ */
+void geni_se_tx_dma_unprep(struct device *wrapper_dev,
+ dma_addr_t tx_dma, int tx_len)
+{
+ if (tx_dma)
+ geni_se_iommu_unmap_buf(wrapper_dev, &tx_dma, tx_len,
+ DMA_TO_DEVICE);
+}
+EXPORT_SYMBOL(geni_se_tx_dma_unprep);
+
+/**
+ * geni_se_rx_dma_unprep() - Unprepare the Serial Engine after RX DMA transfer
+ * @wrapper_dev: QUPv3 Wrapper Device to which the RX buffer is mapped.
+ * @rx_dma: DMA address of the RX buffer.
+ * @rx_len: Length of the RX buffer.
+ *
+ * This function is used to unprepare the DMA buffers after DMA RX.
+ */
+void geni_se_rx_dma_unprep(struct device *wrapper_dev,
+ dma_addr_t rx_dma, int rx_len)
+{
+ if (rx_dma)
+ geni_se_iommu_unmap_buf(wrapper_dev, &rx_dma, rx_len,
+ DMA_FROM_DEVICE);
+}
+EXPORT_SYMBOL(geni_se_rx_dma_unprep);
+
+/**
+ * geni_se_qupv3_hw_version() - Read the QUPv3 Hardware version
+ * @wrapper_dev: Pointer to the corresponding QUPv3 wrapper core.
+ * @major: Buffer for Major Version field.
+ * @minor: Buffer for Minor Version field.
+ * @step: Buffer for Step Version field.
+ *
+ * Return: 0 on success, standard Linux error codes on failure/error.
+ */
+int geni_se_qupv3_hw_version(struct device *wrapper_dev, unsigned int *major,
+ unsigned int *minor, unsigned int *step)
+{
+ unsigned int version;
+ struct geni_se_device *geni_se_dev;
+
+ if (!wrapper_dev || !major || !minor || !step)
+ return -EINVAL;
+
+ geni_se_dev = dev_get_drvdata(wrapper_dev);
+ if (unlikely(!geni_se_dev))
+ return -ENODEV;
+
+ version = geni_read_reg(geni_se_dev->base, QUPV3_HW_VER);
+ *major = (version & HW_VER_MAJOR_MASK) >> HW_VER_MAJOR_SHFT;
+ *minor = (version & HW_VER_MINOR_MASK) >> HW_VER_MINOR_SHFT;
+ *step = version & HW_VER_STEP_MASK;
+ return 0;
+}
+EXPORT_SYMBOL(geni_se_qupv3_hw_version);
+
+static int geni_se_iommu_map_and_attach(struct geni_se_device *geni_se_dev)
+{
+ dma_addr_t va_start = GENI_SE_IOMMU_VA_START;
+ size_t va_size = GENI_SE_IOMMU_VA_SIZE;
+ int bypass = 1;
+ struct device *cb_dev = geni_se_dev->cb_dev;
+
+ mutex_lock(&geni_se_dev->iommu_lock);
+ if (likely(geni_se_dev->iommu_map)) {
+ mutex_unlock(&geni_se_dev->iommu_lock);
+ return 0;
+ }
+
+ geni_se_dev->iommu_map = arm_iommu_create_mapping(&platform_bus_type,
+ va_start, va_size);
+ if (IS_ERR(geni_se_dev->iommu_map)) {
+ GENI_SE_ERR(geni_se_dev->log_ctx, false, NULL,
+ "%s:%s iommu_create_mapping failure\n",
+ __func__, dev_name(cb_dev));
+ mutex_unlock(&geni_se_dev->iommu_lock);
+ return PTR_ERR(geni_se_dev->iommu_map);
+ }
+
+ if (geni_se_dev->iommu_s1_bypass) {
+ if (iommu_domain_set_attr(geni_se_dev->iommu_map->domain,
+ DOMAIN_ATTR_S1_BYPASS, &bypass)) {
+ GENI_SE_ERR(geni_se_dev->log_ctx, false, NULL,
+ "%s:%s Couldn't bypass s1 translation\n",
+ __func__, dev_name(cb_dev));
+ arm_iommu_release_mapping(geni_se_dev->iommu_map);
+ geni_se_dev->iommu_map = NULL;
+ mutex_unlock(&geni_se_dev->iommu_lock);
+ return -EIO;
+ }
+ }
+
+ if (arm_iommu_attach_device(cb_dev, geni_se_dev->iommu_map)) {
+ GENI_SE_ERR(geni_se_dev->log_ctx, false, NULL,
+ "%s:%s couldn't arm_iommu_attach_device\n",
+ __func__, dev_name(cb_dev));
+ arm_iommu_release_mapping(geni_se_dev->iommu_map);
+ geni_se_dev->iommu_map = NULL;
+ mutex_unlock(&geni_se_dev->iommu_lock);
+ return -EIO;
+ }
+ mutex_unlock(&geni_se_dev->iommu_lock);
+ GENI_SE_DBG(geni_se_dev->log_ctx, false, NULL, "%s:%s successful\n",
+ __func__, dev_name(cb_dev));
+ return 0;
+}
+
+/**
+ * geni_se_iommu_map_buf() - Map a single buffer into QUPv3 context bank
+ * @wrapper_dev: Pointer to the corresponding QUPv3 wrapper core.
+ * @iova: Pointer in which the mapped virtual address is stored.
+ * @buf: Address of the buffer that needs to be mapped.
+ * @size: Size of the buffer.
+ * @dir: Direction of the DMA transfer.
+ *
+ * This function is used to map an already allocated buffer into the
+ * QUPv3 context bank device space.
+ *
+ * Return: 0 on success, standard Linux error codes on failure/error.
+ */
+int geni_se_iommu_map_buf(struct device *wrapper_dev, dma_addr_t *iova,
+ void *buf, size_t size, enum dma_data_direction dir)
+{
+ struct device *cb_dev;
+ struct geni_se_device *geni_se_dev;
+
+ if (!wrapper_dev || !iova || !buf || !size)
+ return -EINVAL;
+
+ *iova = DMA_ERROR_CODE;
+ geni_se_dev = dev_get_drvdata(wrapper_dev);
+ if (!geni_se_dev || !geni_se_dev->cb_dev)
+ return -ENODEV;
+
+ cb_dev = geni_se_dev->cb_dev;
+
+ *iova = dma_map_single(cb_dev, buf, size, dir);
+ if (dma_mapping_error(cb_dev, *iova))
+ return -EIO;
+ return 0;
+}
+EXPORT_SYMBOL(geni_se_iommu_map_buf);
+
+/**
+ * geni_se_iommu_alloc_buf() - Allocate & map a single buffer into QUPv3
+ * context bank
+ * @wrapper_dev: Pointer to the corresponding QUPv3 wrapper core.
+ * @iova: Pointer in which the mapped virtual address is stored.
+ * @size: Size of the buffer.
+ *
+ * This function is used to allocate a buffer and map it into the
+ * QUPv3 context bank device space.
+ *
+ * Return: address of the buffer on success, NULL or ERR_PTR on
+ * failure/error.
+ */
+void *geni_se_iommu_alloc_buf(struct device *wrapper_dev, dma_addr_t *iova,
+ size_t size)
+{
+ struct device *cb_dev;
+ struct geni_se_device *geni_se_dev;
+ void *buf = NULL;
+
+ if (!wrapper_dev || !iova || !size)
+ return ERR_PTR(-EINVAL);
+
+ *iova = DMA_ERROR_CODE;
+ geni_se_dev = dev_get_drvdata(wrapper_dev);
+ if (!geni_se_dev || !geni_se_dev->cb_dev)
+ return ERR_PTR(-ENODEV);
+
+ cb_dev = geni_se_dev->cb_dev;
+
+ buf = dma_alloc_coherent(cb_dev, size, iova, GFP_KERNEL);
+ if (!buf)
+ GENI_SE_ERR(geni_se_dev->log_ctx, false, NULL,
+ "%s: Failed dma_alloc_coherent\n", __func__);
+ return buf;
+}
+EXPORT_SYMBOL(geni_se_iommu_alloc_buf);
+
+/**
+ * geni_se_iommu_unmap_buf() - Unmap a single buffer from QUPv3 context bank
+ * @wrapper_dev: Pointer to the corresponding QUPv3 wrapper core.
+ * @iova: Pointer in which the mapped virtual address is stored.
+ * @size: Size of the buffer.
+ * @dir: Direction of the DMA transfer.
+ *
+ * This function is used to unmap an already mapped buffer from the
+ * QUPv3 context bank device space.
+ *
+ * Return: 0 on success, standard Linux error codes on failure/error.
+ */
+int geni_se_iommu_unmap_buf(struct device *wrapper_dev, dma_addr_t *iova,
+ size_t size, enum dma_data_direction dir)
+{
+ struct device *cb_dev;
+ struct geni_se_device *geni_se_dev;
+
+ if (!wrapper_dev || !iova || !size)
+ return -EINVAL;
+
+ geni_se_dev = dev_get_drvdata(wrapper_dev);
+ if (!geni_se_dev || !geni_se_dev->cb_dev)
+ return -ENODEV;
+
+ cb_dev = geni_se_dev->cb_dev;
+
+ dma_unmap_single(cb_dev, *iova, size, dir);
+ return 0;
+}
+EXPORT_SYMBOL(geni_se_iommu_unmap_buf);
+
+/**
+ * geni_se_iommu_free_buf() - Unmap & free a single buffer from QUPv3
+ * context bank
+ * @wrapper_dev: Pointer to the corresponding QUPv3 wrapper core.
+ * @iova: Pointer in which the mapped virtual address is stored.
+ * @buf: Address of the buffer.
+ * @size: Size of the buffer.
+ *
+ * This function is used to unmap and free a buffer from the
+ * QUPv3 context bank device space.
+ *
+ * Return: 0 on success, standard Linux error codes on failure/error.
+ */
+int geni_se_iommu_free_buf(struct device *wrapper_dev, dma_addr_t *iova,
+ void *buf, size_t size)
+{
+ struct device *cb_dev;
+ struct geni_se_device *geni_se_dev;
+
+ if (!wrapper_dev || !iova || !buf || !size)
+ return -EINVAL;
+
+ geni_se_dev = dev_get_drvdata(wrapper_dev);
+ if (!geni_se_dev || !geni_se_dev->cb_dev)
+ return -ENODEV;
+
+ cb_dev = geni_se_dev->cb_dev;
+
+ dma_free_coherent(cb_dev, size, buf, *iova);
+ return 0;
+}
+EXPORT_SYMBOL(geni_se_iommu_free_buf);
+
+static const struct of_device_id geni_se_dt_match[] = {
+ { .compatible = "qcom,qupv3-geni-se", },
+ { .compatible = "qcom,qupv3-geni-se-cb", },
+ {}
+};
+
+static int geni_se_iommu_probe(struct device *dev)
+{
+ struct geni_se_device *geni_se_dev;
+
+ if (unlikely(!dev->parent)) {
+ dev_err(dev, "%s no parent for this device\n", __func__);
+ return -EINVAL;
+ }
+
+ geni_se_dev = dev_get_drvdata(dev->parent);
+ if (unlikely(!geni_se_dev)) {
+ dev_err(dev, "%s geni_se_dev not found\n", __func__);
+ return -EINVAL;
+ }
+ geni_se_dev->cb_dev = dev;
+
+ GENI_SE_DBG(geni_se_dev->log_ctx, false, NULL,
+ "%s: Probe successful\n", __func__);
+ return 0;
+}
+
+static int geni_se_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct geni_se_device *geni_se_dev;
+
+ if (of_device_is_compatible(dev->of_node, "qcom,qupv3-geni-se-cb"))
+ return geni_se_iommu_probe(dev);
+
+ geni_se_dev = devm_kzalloc(dev, sizeof(*geni_se_dev), GFP_KERNEL);
+ if (!geni_se_dev)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "%s: Mandatory resource info not found\n",
+ __func__);
+ devm_kfree(dev, geni_se_dev);
+ return -EINVAL;
+ }
+
+ geni_se_dev->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR_OR_NULL(geni_se_dev->base)) {
+ dev_err(dev, "%s: Error mapping the resource\n", __func__);
+ devm_kfree(dev, geni_se_dev);
+ return -EFAULT;
+ }
+
+ geni_se_dev->dev = dev;
+ ret = of_property_read_u32(dev->of_node, "qcom,bus-mas-id",
+ &geni_se_dev->bus_mas_id);
+ if (ret) {
+ dev_err(dev, "%s: Error missing bus master id\n", __func__);
+ devm_iounmap(dev, geni_se_dev->base);
+ devm_kfree(dev, geni_se_dev);
+ }
+ ret = of_property_read_u32(dev->of_node, "qcom,bus-slv-id",
+ &geni_se_dev->bus_slv_id);
+ if (ret) {
+ dev_err(dev, "%s: Error missing bus slave id\n", __func__);
+ devm_iounmap(dev, geni_se_dev->base);
+ devm_kfree(dev, geni_se_dev);
+ }
+
+ geni_se_dev->iommu_s1_bypass = of_property_read_bool(dev->of_node,
+ "qcom,iommu-s1-bypass");
+ geni_se_dev->bus_bw_set = default_bus_bw_set;
+ geni_se_dev->bus_bw_set_size = ARRAY_SIZE(default_bus_bw_set);
+ mutex_init(&geni_se_dev->iommu_lock);
+ INIT_LIST_HEAD(&geni_se_dev->ab_list_head);
+ INIT_LIST_HEAD(&geni_se_dev->ib_list_head);
+ spin_lock_init(&geni_se_dev->ab_ib_lock);
+ geni_se_dev->log_ctx = ipc_log_context_create(NUM_LOG_PAGES,
+ dev_name(geni_se_dev->dev), 0);
+ if (!geni_se_dev->log_ctx)
+ dev_err(dev, "%s Failed to allocate log context\n", __func__);
+ dev_set_drvdata(dev, geni_se_dev);
+
+ ret = of_platform_populate(dev->of_node, geni_se_dt_match, NULL, dev);
+ if (ret) {
+ dev_err(dev, "%s: Error populating children\n", __func__);
+ devm_iounmap(dev, geni_se_dev->base);
+ devm_kfree(dev, geni_se_dev);
+ }
+
+ GENI_SE_DBG(geni_se_dev->log_ctx, false, NULL,
+ "%s: Probe successful\n", __func__);
+ return ret;
+}
+
+static int geni_se_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct geni_se_device *geni_se_dev = dev_get_drvdata(dev);
+
+ if (likely(!IS_ERR_OR_NULL(geni_se_dev->iommu_map))) {
+ arm_iommu_detach_device(geni_se_dev->cb_dev);
+ arm_iommu_release_mapping(geni_se_dev->iommu_map);
+ }
+ ipc_log_context_destroy(geni_se_dev->log_ctx);
+ devm_iounmap(dev, geni_se_dev->base);
+ devm_kfree(dev, geni_se_dev);
+ return 0;
+}
+
+static struct platform_driver geni_se_driver = {
+ .driver = {
+ .name = "qupv3_geni_se",
+ .of_match_table = geni_se_dt_match,
+ },
+ .probe = geni_se_probe,
+ .remove = geni_se_remove,
+};
+
+static int __init geni_se_driver_init(void)
+{
+ return platform_driver_register(&geni_se_driver);
+}
+arch_initcall(geni_se_driver_init);
+
+static void __exit geni_se_driver_exit(void)
+{
+ platform_driver_unregister(&geni_se_driver);
+}
+module_exit(geni_se_driver_exit);
+
+MODULE_DESCRIPTION("GENI Serial Engine Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index 6be274f..1b283b2 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -114,3 +114,15 @@
The UFS unit-tests register as a block device test utility to
the test-iosched and will be initiated when the test-iosched will
be chosen to be the active I/O scheduler.
+
+config SCSI_UFSHCD_CMD_LOGGING
+ bool "Universal Flash Storage host controller driver layer command logging support"
+ depends on SCSI_UFSHCD
+ help
+ This selects the UFS host controller driver layer command logging.
+ UFS host controller driver layer command logging records all the
+ command information sent from UFS host controller for debugging
+ purpose.
+
+ Select this if you want above mentioned debug information captured.
+ If unsure, say N.
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 602c359..dc74484 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -532,14 +532,145 @@
*val = ' ';
}
+#define UFSHCD_MAX_CMD_LOGGING 100
+
#ifdef CONFIG_TRACEPOINTS
-static void ufshcd_add_command_trace(struct ufs_hba *hba,
- unsigned int tag, const char *str)
+static inline void ufshcd_add_command_trace(struct ufs_hba *hba,
+ struct ufshcd_cmd_log_entry *entry, u8 opcode)
{
- sector_t lba = -1;
- u8 opcode = 0;
- u32 intr, doorbell;
+ if (trace_ufshcd_command_enabled()) {
+ u32 intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
+
+ trace_ufshcd_command(dev_name(hba->dev), entry->str, entry->tag,
+ entry->doorbell, entry->transfer_len, intr,
+ entry->lba, opcode);
+ }
+}
+#else
+static inline void ufshcd_add_command_trace(struct ufs_hba *hba,
+ struct ufshcd_cmd_log_entry *entry, u8 opcode)
+{
+}
+#endif
+
+#ifdef CONFIG_SCSI_UFSHCD_CMD_LOGGING
+static void ufshcd_cmd_log_init(struct ufs_hba *hba)
+{
+ /* Allocate log entries */
+ if (!hba->cmd_log.entries) {
+ hba->cmd_log.entries = kzalloc(UFSHCD_MAX_CMD_LOGGING *
+ sizeof(struct ufshcd_cmd_log_entry), GFP_KERNEL);
+ if (!hba->cmd_log.entries)
+ return;
+ dev_dbg(hba->dev, "%s: cmd_log.entries initialized\n",
+ __func__);
+ }
+}
+
+static void __ufshcd_cmd_log(struct ufs_hba *hba, char *str, char *cmd_type,
+ unsigned int tag, u8 cmd_id, u8 idn, u8 lun,
+ sector_t lba, int transfer_len, u8 opcode)
+{
+ struct ufshcd_cmd_log_entry *entry;
+
+ if (!hba->cmd_log.entries)
+ return;
+
+ entry = &hba->cmd_log.entries[hba->cmd_log.pos];
+ entry->lun = lun;
+ entry->str = str;
+ entry->cmd_type = cmd_type;
+ entry->cmd_id = cmd_id;
+ entry->lba = lba;
+ entry->transfer_len = transfer_len;
+ entry->idn = idn;
+ entry->doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ entry->tag = tag;
+ entry->tstamp = ktime_get();
+ entry->outstanding_reqs = hba->outstanding_reqs;
+ entry->seq_num = hba->cmd_log.seq_num;
+ hba->cmd_log.seq_num++;
+ hba->cmd_log.pos =
+ (hba->cmd_log.pos + 1) % UFSHCD_MAX_CMD_LOGGING;
+
+ ufshcd_add_command_trace(hba, entry, opcode);
+}
+
+static void ufshcd_cmd_log(struct ufs_hba *hba, char *str, char *cmd_type,
+ unsigned int tag, u8 cmd_id, u8 idn)
+{
+ __ufshcd_cmd_log(hba, str, cmd_type, tag, cmd_id, idn,
+ 0xff, (sector_t)-1, -1, -1);
+}
+
+static void ufshcd_dme_cmd_log(struct ufs_hba *hba, char *str, u8 cmd_id)
+{
+ ufshcd_cmd_log(hba, str, "dme", 0xff, cmd_id, 0xff);
+}
+
+static void ufshcd_cmd_log_print(struct ufs_hba *hba)
+{
+ int i;
+ int pos;
+ struct ufshcd_cmd_log_entry *p;
+
+ if (!hba->cmd_log.entries)
+ return;
+
+ pos = hba->cmd_log.pos;
+ for (i = 0; i < UFSHCD_MAX_CMD_LOGGING; i++) {
+ p = &hba->cmd_log.entries[pos];
+ pos = (pos + 1) % UFSHCD_MAX_CMD_LOGGING;
+
+ if (ktime_to_us(p->tstamp)) {
+ pr_err("%s: %s: seq_no=%u lun=0x%x cmd_id=0x%02x lba=0x%llx txfer_len=%d tag=%u, doorbell=0x%x outstanding=0x%x idn=%d time=%lld us\n",
+ p->cmd_type, p->str, p->seq_num,
+ p->lun, p->cmd_id, (unsigned long long)p->lba,
+ p->transfer_len, p->tag, p->doorbell,
+ p->outstanding_reqs, p->idn,
+ ktime_to_us(p->tstamp));
+ usleep_range(1000, 1100);
+ }
+ }
+}
+#else
+static void ufshcd_cmd_log_init(struct ufs_hba *hba)
+{
+}
+
+static void __ufshcd_cmd_log(struct ufs_hba *hba, char *str, char *cmd_type,
+ unsigned int tag, u8 cmd_id, u8 idn, u8 lun,
+ sector_t lba, int transfer_len, u8 opcode)
+{
+ struct ufshcd_cmd_log_entry entry;
+
+ entry.str = str;
+ entry.lba = lba;
+ entry.transfer_len = transfer_len;
+ entry.doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ entry.tag = tag;
+
+ ufshcd_add_command_trace(hba, &entry, opcode);
+}
+
+static void ufshcd_dme_cmd_log(struct ufs_hba *hba, char *str, u8 cmd_id)
+{
+}
+
+static void ufshcd_cmd_log_print(struct ufs_hba *hba)
+{
+}
+#endif
+
+#ifdef CONFIG_TRACEPOINTS
+static inline void ufshcd_cond_add_cmd_trace(struct ufs_hba *hba,
+ unsigned int tag, const char *str)
+{
struct ufshcd_lrb *lrbp;
+ char *cmd_type = NULL;
+ u8 opcode = 0;
+ u8 cmd_id = 0, idn = 0;
+ sector_t lba = -1;
int transfer_len = -1;
lrbp = &hba->lrb[tag];
@@ -553,23 +684,28 @@
*/
if (lrbp->cmd->request && lrbp->cmd->request->bio)
lba =
- lrbp->cmd->request->bio->bi_iter.bi_sector;
+ lrbp->cmd->request->bio->bi_iter.bi_sector;
transfer_len = be32_to_cpu(
lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
}
}
- intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
- doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
- trace_ufshcd_command(dev_name(hba->dev), str, tag,
- doorbell, transfer_len, intr, lba, opcode);
-}
+ if (lrbp->cmd && (lrbp->command_type == UTP_CMD_TYPE_SCSI)) {
+ cmd_type = "scsi";
+ cmd_id = (u8)(*lrbp->cmd->cmnd);
+ } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) {
+ if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP) {
+ cmd_type = "nop";
+ cmd_id = 0;
+ } else if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY) {
+ cmd_type = "query";
+ cmd_id = hba->dev_cmd.query.request.upiu_req.opcode;
+ idn = hba->dev_cmd.query.request.upiu_req.idn;
+ }
+ }
-static inline void ufshcd_cond_add_cmd_trace(struct ufs_hba *hba,
- unsigned int tag, const char *str)
-{
- if (trace_ufshcd_command_enabled())
- ufshcd_add_command_trace(hba, tag, str);
+ __ufshcd_cmd_log(hba, (char *) str, cmd_type, tag, cmd_id, idn,
+ lrbp->lun, lba, transfer_len, opcode);
}
#else
static inline void ufshcd_cond_add_cmd_trace(struct ufs_hba *hba,
@@ -2280,6 +2416,7 @@
hba->active_uic_cmd = uic_cmd;
+ ufshcd_dme_cmd_log(hba, "send", hba->active_uic_cmd->command);
/* Write Args */
ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
@@ -2313,6 +2450,8 @@
if (ret)
ufsdbg_set_err_state(hba);
+ ufshcd_dme_cmd_log(hba, "cmp1", hba->active_uic_cmd->command);
+
spin_lock_irqsave(hba->host->host_lock, flags);
hba->active_uic_cmd = NULL;
spin_unlock_irqrestore(hba->host->host_lock, flags);
@@ -4148,6 +4287,8 @@
cmd->command, status);
ret = (status != PWR_OK) ? status : -1;
}
+ ufshcd_dme_cmd_log(hba, "cmp2", hba->active_uic_cmd->command);
+
out:
if (ret) {
ufsdbg_set_err_state(hba);
@@ -5474,7 +5615,7 @@
} else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) {
if (hba->dev_cmd.complete) {
ufshcd_cond_add_cmd_trace(hba, index,
- "dev_complete");
+ "dcmp");
complete(hba->dev_cmd.complete);
}
}
@@ -5997,6 +6138,7 @@
ufshcd_print_host_state(hba);
ufshcd_print_pwr_info(hba);
ufshcd_print_tmrs(hba, hba->outstanding_tasks);
+ ufshcd_cmd_log_print(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
}
}
@@ -6503,6 +6645,7 @@
hba = shost_priv(host);
tag = cmd->request->tag;
+ ufshcd_cmd_log_print(hba);
lrbp = &hba->lrb[tag];
err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
@@ -9986,6 +10129,8 @@
*/
ufshcd_set_ufs_dev_active(hba);
+ ufshcd_cmd_log_init(hba);
+
async_schedule(ufshcd_async_scan, hba);
ufsdbg_add_debugfs(hba);
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 11916ac..6966aac 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -515,7 +515,7 @@
bool is_scaled_up;
};
-#define UIC_ERR_REG_HIST_LENGTH 8
+#define UIC_ERR_REG_HIST_LENGTH 20
/**
* struct ufs_uic_err_reg_hist - keeps history of uic errors
* @pos: index to indicate cyclic buffer position
@@ -637,6 +637,27 @@
UFSHCD_DBG_PRINT_TMRS_EN | UFSHCD_DBG_PRINT_PWR_EN | \
UFSHCD_DBG_PRINT_HOST_STATE_EN)
+struct ufshcd_cmd_log_entry {
+ char *str; /* context like "send", "complete" */
+ char *cmd_type; /* "scsi", "query", "nop", "dme" */
+ u8 lun;
+ u8 cmd_id;
+ sector_t lba;
+ int transfer_len;
+ u8 idn; /* used only for query idn */
+ u32 doorbell;
+ u32 outstanding_reqs;
+ u32 seq_num;
+ unsigned int tag;
+ ktime_t tstamp;
+};
+
+struct ufshcd_cmd_log {
+ struct ufshcd_cmd_log_entry *entries;
+ int pos;
+ u32 seq_num;
+};
+
/**
* struct ufs_hba - per adapter private structure
* @mmio_base: UFSHCI base register address
@@ -860,6 +881,7 @@
struct ufs_clk_gating clk_gating;
struct ufs_hibern8_on_idle hibern8_on_idle;
+ struct ufshcd_cmd_log cmd_log;
/* Control to enable/disable host capabilities */
u32 caps;
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 3311380..0bdcc99 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -545,6 +545,25 @@
for the platforms that use APRv2.
Say M if you want to enable this module.
+config MSM_PERFORMANCE
+ tristate "msm performacne driver to support userspace hotplug requests"
+ default n
+ help
+ This driver is used to provide CPU hotplug support to userspace.
+ It ensures that no more than a user specified number of CPUs stay
+ online at any given point in time. This module can also restrict
+ max freq or min freq of cpu cluster
+
+config MSM_PERFORMANCE_HOTPLUG_ON
+ bool "Hotplug functionality through msm_performance turned on"
+ depends on MSM_PERFORMANCE
+ default y
+ help
+ If some other core-control driver is present turn off the core-control
+ capability of msm_performance driver. Setting this flag to false will
+ compile out the nodes needed for core-control functionality through
+ msm_performance.
+
config MSM_CDSP_LOADER
tristate "CDSP loader support"
depends on MSM_GLINK
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index ba00ef10..9d175cd 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -57,6 +57,8 @@
obj-$(CONFIG_MSM_PIL) += peripheral-loader.o
obj-$(CONFIG_MSM_AVTIMER) += avtimer.o
+obj-$(CONFIG_MSM_PERFORMANCE) += msm_performance.o
+
ifdef CONFIG_MSM_SUBSYSTEM_RESTART
obj-y += subsystem_notif.o
obj-y += subsystem_restart.o
diff --git a/drivers/soc/qcom/msm_performance.c b/drivers/soc/qcom/msm_performance.c
new file mode 100644
index 0000000..25e6a9d
--- /dev/null
+++ b/drivers/soc/qcom/msm_performance.c
@@ -0,0 +1,2771 @@
+/*
+ * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/notifier.h>
+#include <linux/cpu.h>
+#include <linux/moduleparam.h>
+#include <linux/cpumask.h>
+#include <linux/cpufreq.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/tick.h>
+#include <trace/events/power.h>
+#include <linux/sysfs.h>
+#include <linux/module.h>
+#include <linux/input.h>
+#include <linux/kthread.h>
+
+static struct mutex managed_cpus_lock;
+
+/* Maximum number to clusters that this module will manage */
+static unsigned int num_clusters;
+struct cluster {
+ cpumask_var_t cpus;
+ /* Number of CPUs to maintain online */
+ int max_cpu_request;
+ /* To track CPUs that the module decides to offline */
+ cpumask_var_t offlined_cpus;
+ /* stats for load detection */
+ /* IO */
+ u64 last_io_check_ts;
+ unsigned int iowait_enter_cycle_cnt;
+ unsigned int iowait_exit_cycle_cnt;
+ spinlock_t iowait_lock;
+ unsigned int cur_io_busy;
+ bool io_change;
+ /* CPU */
+ unsigned int mode;
+ bool mode_change;
+ u64 last_mode_check_ts;
+ unsigned int single_enter_cycle_cnt;
+ unsigned int single_exit_cycle_cnt;
+ unsigned int multi_enter_cycle_cnt;
+ unsigned int multi_exit_cycle_cnt;
+ spinlock_t mode_lock;
+ /* Perf Cluster Peak Loads */
+ unsigned int perf_cl_peak;
+ u64 last_perf_cl_check_ts;
+ bool perf_cl_detect_state_change;
+ unsigned int perf_cl_peak_enter_cycle_cnt;
+ unsigned int perf_cl_peak_exit_cycle_cnt;
+ spinlock_t perf_cl_peak_lock;
+ /* Tunables */
+ unsigned int single_enter_load;
+ unsigned int pcpu_multi_enter_load;
+ unsigned int perf_cl_peak_enter_load;
+ unsigned int single_exit_load;
+ unsigned int pcpu_multi_exit_load;
+ unsigned int perf_cl_peak_exit_load;
+ unsigned int single_enter_cycles;
+ unsigned int single_exit_cycles;
+ unsigned int multi_enter_cycles;
+ unsigned int multi_exit_cycles;
+ unsigned int perf_cl_peak_enter_cycles;
+ unsigned int perf_cl_peak_exit_cycles;
+ unsigned int current_freq;
+ spinlock_t timer_lock;
+ unsigned int timer_rate;
+ struct timer_list mode_exit_timer;
+ struct timer_list perf_cl_peak_mode_exit_timer;
+};
+
+static struct cluster **managed_clusters;
+static bool clusters_inited;
+
+/* Work to evaluate the onlining/offlining CPUs */
+static struct delayed_work evaluate_hotplug_work;
+
+/* To handle cpufreq min/max request */
+struct cpu_status {
+ unsigned int min;
+ unsigned int max;
+};
+static DEFINE_PER_CPU(struct cpu_status, cpu_stats);
+
+static unsigned int num_online_managed(struct cpumask *mask);
+static int init_cluster_control(void);
+static int rm_high_pwr_cost_cpus(struct cluster *cl);
+static int init_events_group(void);
+static DEFINE_PER_CPU(unsigned int, cpu_power_cost);
+struct events {
+ spinlock_t cpu_hotplug_lock;
+ bool cpu_hotplug;
+ bool init_success;
+};
+static struct events events_group;
+static struct task_struct *events_notify_thread;
+
+#define LAST_UPDATE_TOL USEC_PER_MSEC
+
+struct input_events {
+ unsigned int evt_x_cnt;
+ unsigned int evt_y_cnt;
+ unsigned int evt_pres_cnt;
+ unsigned int evt_dist_cnt;
+};
+struct trig_thr {
+ unsigned int pwr_cl_trigger_threshold;
+ unsigned int perf_cl_trigger_threshold;
+ unsigned int ip_evt_threshold;
+};
+struct load_stats {
+ u64 last_wallclock;
+ /* IO wait related */
+ u64 last_iowait;
+ unsigned int last_iopercent;
+ /* CPU load related */
+ unsigned int cpu_load;
+ /* CPU Freq */
+ unsigned int freq;
+};
+static bool input_events_handler_registered;
+static struct input_events *ip_evts;
+static struct trig_thr thr;
+static unsigned int use_input_evts_with_hi_slvt_detect;
+static int register_input_handler(void);
+static void unregister_input_handler(void);
+static DEFINE_PER_CPU(struct load_stats, cpu_load_stats);
+
+/* Bitmask to keep track of the workloads being detected */
+static unsigned int workload_detect;
+#define IO_DETECT 1
+#define MODE_DETECT 2
+#define PERF_CL_PEAK_DETECT 4
+
+/* IOwait related tunables */
+static unsigned int io_enter_cycles = 4;
+static unsigned int io_exit_cycles = 4;
+static u64 iowait_ceiling_pct = 25;
+static u64 iowait_floor_pct = 8;
+#define LAST_IO_CHECK_TOL (3 * USEC_PER_MSEC)
+
+static unsigned int aggr_iobusy;
+static unsigned int aggr_mode;
+
+static struct task_struct *notify_thread;
+
+static struct input_handler *handler;
+
+/* CPU workload detection related */
+#define NO_MODE (0)
+#define SINGLE (1)
+#define MULTI (2)
+#define MIXED (3)
+#define PERF_CL_PEAK (4)
+#define DEF_SINGLE_ENT 90
+#define DEF_PCPU_MULTI_ENT 85
+#define DEF_PERF_CL_PEAK_ENT 80
+#define DEF_SINGLE_EX 60
+#define DEF_PCPU_MULTI_EX 50
+#define DEF_PERF_CL_PEAK_EX 70
+#define DEF_SINGLE_ENTER_CYCLE 4
+#define DEF_SINGLE_EXIT_CYCLE 4
+#define DEF_MULTI_ENTER_CYCLE 4
+#define DEF_MULTI_EXIT_CYCLE 4
+#define DEF_PERF_CL_PEAK_ENTER_CYCLE 100
+#define DEF_PERF_CL_PEAK_EXIT_CYCLE 20
+#define LAST_LD_CHECK_TOL (2 * USEC_PER_MSEC)
+#define CLUSTER_0_THRESHOLD_FREQ 147000
+#define CLUSTER_1_THRESHOLD_FREQ 190000
+#define INPUT_EVENT_CNT_THRESHOLD 15
+#define MAX_LENGTH_CPU_STRING 256
+
+/**************************sysfs start********************************/
+
+static int set_num_clusters(const char *buf, const struct kernel_param *kp)
+{
+ unsigned int val;
+
+ if (sscanf(buf, "%u\n", &val) != 1)
+ return -EINVAL;
+ if (num_clusters)
+ return -EINVAL;
+
+ num_clusters = val;
+
+ if (init_cluster_control()) {
+ num_clusters = 0;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int get_num_clusters(char *buf, const struct kernel_param *kp)
+{
+ return snprintf(buf, PAGE_SIZE, "%u", num_clusters);
+}
+
+static const struct kernel_param_ops param_ops_num_clusters = {
+ .set = set_num_clusters,
+ .get = get_num_clusters,
+};
+device_param_cb(num_clusters, ¶m_ops_num_clusters, NULL, 0644);
+
+static int set_max_cpus(const char *buf, const struct kernel_param *kp)
+{
+ unsigned int i, ntokens = 0;
+ const char *cp = buf;
+ int val;
+
+ if (!clusters_inited)
+ return -EINVAL;
+
+ while ((cp = strpbrk(cp + 1, ":")))
+ ntokens++;
+
+ if (ntokens != (num_clusters - 1))
+ return -EINVAL;
+
+ cp = buf;
+ for (i = 0; i < num_clusters; i++) {
+
+ if (sscanf(cp, "%d\n", &val) != 1)
+ return -EINVAL;
+ if (val > (int)cpumask_weight(managed_clusters[i]->cpus))
+ return -EINVAL;
+
+ managed_clusters[i]->max_cpu_request = val;
+
+ cp = strnchr(cp, strlen(cp), ':');
+ cp++;
+ trace_set_max_cpus(cpumask_bits(managed_clusters[i]->cpus)[0],
+ val);
+ }
+
+ schedule_delayed_work(&evaluate_hotplug_work, 0);
+
+ return 0;
+}
+
+static int get_max_cpus(char *buf, const struct kernel_param *kp)
+{
+ int i, cnt = 0;
+
+ if (!clusters_inited)
+ return cnt;
+
+ for (i = 0; i < num_clusters; i++)
+ cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
+ "%d:", managed_clusters[i]->max_cpu_request);
+ cnt--;
+ cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, " ");
+ return cnt;
+}
+
+static const struct kernel_param_ops param_ops_max_cpus = {
+ .set = set_max_cpus,
+ .get = get_max_cpus,
+};
+
+#ifdef CONFIG_MSM_PERFORMANCE_HOTPLUG_ON
+device_param_cb(max_cpus, ¶m_ops_max_cpus, NULL, 0644);
+#endif
+
+static int set_managed_cpus(const char *buf, const struct kernel_param *kp)
+{
+ int i, ret;
+ struct cpumask tmp_mask;
+
+ if (!clusters_inited)
+ return -EINVAL;
+
+ ret = cpulist_parse(buf, &tmp_mask);
+
+ if (ret)
+ return ret;
+
+ for (i = 0; i < num_clusters; i++) {
+ if (cpumask_empty(managed_clusters[i]->cpus)) {
+ mutex_lock(&managed_cpus_lock);
+ cpumask_copy(managed_clusters[i]->cpus, &tmp_mask);
+ cpumask_clear(managed_clusters[i]->offlined_cpus);
+ mutex_unlock(&managed_cpus_lock);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int get_managed_cpus(char *buf, const struct kernel_param *kp)
+{
+ int i, cnt = 0, total_cnt = 0;
+ char tmp[MAX_LENGTH_CPU_STRING] = "";
+
+ if (!clusters_inited)
+ return cnt;
+
+ for (i = 0; i < num_clusters; i++) {
+ cnt = cpumap_print_to_pagebuf(true, buf,
+ managed_clusters[i]->cpus);
+ if ((i + 1) < num_clusters &&
+ (total_cnt + cnt + 1) <= MAX_LENGTH_CPU_STRING) {
+ snprintf(tmp + total_cnt, cnt, "%s", buf);
+ tmp[cnt-1] = ':';
+ tmp[cnt] = '\0';
+ total_cnt += cnt;
+ } else if ((i + 1) == num_clusters &&
+ (total_cnt + cnt) <= MAX_LENGTH_CPU_STRING) {
+ snprintf(tmp + total_cnt, cnt, "%s", buf);
+ total_cnt += cnt;
+ } else {
+ pr_err("invalid string for managed_cpu:%s%s\n", tmp,
+ buf);
+ break;
+ }
+ }
+ snprintf(buf, PAGE_SIZE, "%s", tmp);
+ return total_cnt;
+}
+
+static const struct kernel_param_ops param_ops_managed_cpus = {
+ .set = set_managed_cpus,
+ .get = get_managed_cpus,
+};
+device_param_cb(managed_cpus, ¶m_ops_managed_cpus, NULL, 0644);
+
+/* Read-only node: To display all the online managed CPUs */
+static int get_managed_online_cpus(char *buf, const struct kernel_param *kp)
+{
+ int i, cnt = 0, total_cnt = 0;
+ char tmp[MAX_LENGTH_CPU_STRING] = "";
+ struct cpumask tmp_mask;
+ struct cluster *i_cl;
+
+ if (!clusters_inited)
+ return cnt;
+
+ for (i = 0; i < num_clusters; i++) {
+ i_cl = managed_clusters[i];
+
+ cpumask_clear(&tmp_mask);
+ cpumask_complement(&tmp_mask, i_cl->offlined_cpus);
+ cpumask_and(&tmp_mask, i_cl->cpus, &tmp_mask);
+
+ cnt = cpumap_print_to_pagebuf(true, buf, &tmp_mask);
+ if ((i + 1) < num_clusters &&
+ (total_cnt + cnt + 1) <= MAX_LENGTH_CPU_STRING) {
+ snprintf(tmp + total_cnt, cnt, "%s", buf);
+ tmp[cnt-1] = ':';
+ tmp[cnt] = '\0';
+ total_cnt += cnt;
+ } else if ((i + 1) == num_clusters &&
+ (total_cnt + cnt) <= MAX_LENGTH_CPU_STRING) {
+ snprintf(tmp + total_cnt, cnt, "%s", buf);
+ total_cnt += cnt;
+ } else {
+ pr_err("invalid string for managed_cpu:%s%s\n", tmp,
+ buf);
+ break;
+ }
+ }
+ snprintf(buf, PAGE_SIZE, "%s", tmp);
+ return total_cnt;
+}
+
+static const struct kernel_param_ops param_ops_managed_online_cpus = {
+ .get = get_managed_online_cpus,
+};
+
+#ifdef CONFIG_MSM_PERFORMANCE_HOTPLUG_ON
+device_param_cb(managed_online_cpus, ¶m_ops_managed_online_cpus,
+ NULL, 0444);
+#endif
+/*
+ * Userspace sends cpu#:min_freq_value to vote for min_freq_value as the new
+ * scaling_min. To withdraw its vote it needs to enter cpu#:0
+ */
+static int set_cpu_min_freq(const char *buf, const struct kernel_param *kp)
+{
+ int i, j, ntokens = 0;
+ unsigned int val, cpu;
+ const char *cp = buf;
+ struct cpu_status *i_cpu_stats;
+ struct cpufreq_policy policy;
+ cpumask_var_t limit_mask;
+ int ret;
+
+ while ((cp = strpbrk(cp + 1, " :")))
+ ntokens++;
+
+ /* CPU:value pair */
+ if (!(ntokens % 2))
+ return -EINVAL;
+
+ cp = buf;
+ cpumask_clear(limit_mask);
+ for (i = 0; i < ntokens; i += 2) {
+ if (sscanf(cp, "%u:%u", &cpu, &val) != 2)
+ return -EINVAL;
+ if (cpu > (num_present_cpus() - 1))
+ return -EINVAL;
+
+ i_cpu_stats = &per_cpu(cpu_stats, cpu);
+
+ i_cpu_stats->min = val;
+ cpumask_set_cpu(cpu, limit_mask);
+
+ cp = strnchr(cp, strlen(cp), ' ');
+ cp++;
+ }
+
+ /*
+ * Since on synchronous systems policy is shared amongst multiple
+ * CPUs only one CPU needs to be updated for the limit to be
+ * reflected for the entire cluster. We can avoid updating the policy
+ * of other CPUs in the cluster once it is done for at least one CPU
+ * in the cluster
+ */
+ get_online_cpus();
+ for_each_cpu(i, limit_mask) {
+ i_cpu_stats = &per_cpu(cpu_stats, i);
+
+ if (cpufreq_get_policy(&policy, i))
+ continue;
+
+ if (cpu_online(i) && (policy.min != i_cpu_stats->min)) {
+ ret = cpufreq_update_policy(i);
+ if (ret)
+ continue;
+ }
+ for_each_cpu(j, policy.related_cpus)
+ cpumask_clear_cpu(j, limit_mask);
+ }
+ put_online_cpus();
+
+ return 0;
+}
+
+static int get_cpu_min_freq(char *buf, const struct kernel_param *kp)
+{
+ int cnt = 0, cpu;
+
+ for_each_present_cpu(cpu) {
+ cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
+ "%d:%u ", cpu, per_cpu(cpu_stats, cpu).min);
+ }
+ cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, "\n");
+ return cnt;
+}
+
+static const struct kernel_param_ops param_ops_cpu_min_freq = {
+ .set = set_cpu_min_freq,
+ .get = get_cpu_min_freq,
+};
+module_param_cb(cpu_min_freq, ¶m_ops_cpu_min_freq, NULL, 0644);
+
+/*
+ * Userspace sends cpu#:max_freq_value to vote for max_freq_value as the new
+ * scaling_max. To withdraw its vote it needs to enter cpu#:UINT_MAX
+ */
+static int set_cpu_max_freq(const char *buf, const struct kernel_param *kp)
+{
+ int i, j, ntokens = 0;
+ unsigned int val, cpu;
+ const char *cp = buf;
+ struct cpu_status *i_cpu_stats;
+ struct cpufreq_policy policy;
+ cpumask_var_t limit_mask;
+ int ret;
+
+ while ((cp = strpbrk(cp + 1, " :")))
+ ntokens++;
+
+ /* CPU:value pair */
+ if (!(ntokens % 2))
+ return -EINVAL;
+
+ cp = buf;
+ cpumask_clear(limit_mask);
+ for (i = 0; i < ntokens; i += 2) {
+ if (sscanf(cp, "%u:%u", &cpu, &val) != 2)
+ return -EINVAL;
+ if (cpu > (num_present_cpus() - 1))
+ return -EINVAL;
+
+ i_cpu_stats = &per_cpu(cpu_stats, cpu);
+
+ i_cpu_stats->max = val;
+ cpumask_set_cpu(cpu, limit_mask);
+
+ cp = strnchr(cp, strlen(cp), ' ');
+ cp++;
+ }
+
+ get_online_cpus();
+ for_each_cpu(i, limit_mask) {
+ i_cpu_stats = &per_cpu(cpu_stats, i);
+ if (cpufreq_get_policy(&policy, i))
+ continue;
+
+ if (cpu_online(i) && (policy.max != i_cpu_stats->max)) {
+ ret = cpufreq_update_policy(i);
+ if (ret)
+ continue;
+ }
+ for_each_cpu(j, policy.related_cpus)
+ cpumask_clear_cpu(j, limit_mask);
+ }
+ put_online_cpus();
+
+ return 0;
+}
+
+static int get_cpu_max_freq(char *buf, const struct kernel_param *kp)
+{
+ int cnt = 0, cpu;
+
+ for_each_present_cpu(cpu) {
+ cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
+ "%d:%u ", cpu, per_cpu(cpu_stats, cpu).max);
+ }
+ cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, "\n");
+ return cnt;
+}
+
+static const struct kernel_param_ops param_ops_cpu_max_freq = {
+ .set = set_cpu_max_freq,
+ .get = get_cpu_max_freq,
+};
+module_param_cb(cpu_max_freq, ¶m_ops_cpu_max_freq, NULL, 0644);
+
+static int set_ip_evt_trigger_threshold(const char *buf,
+ const struct kernel_param *kp)
+{
+ unsigned int val;
+
+ if (sscanf(buf, "%u\n", &val) != 1)
+ return -EINVAL;
+
+ thr.ip_evt_threshold = val;
+ return 0;
+}
+
+static int get_ip_evt_trigger_threshold(char *buf,
+ const struct kernel_param *kp)
+{
+ return snprintf(buf, PAGE_SIZE, "%u", thr.ip_evt_threshold);
+}
+
+static const struct kernel_param_ops param_ops_ip_evt_trig_thr = {
+ .set = set_ip_evt_trigger_threshold,
+ .get = get_ip_evt_trigger_threshold,
+};
+device_param_cb(ip_evt_trig_thr, ¶m_ops_ip_evt_trig_thr, NULL, 0644);
+
+
+static int set_perf_cl_trigger_threshold(const char *buf,
+ const struct kernel_param *kp)
+{
+ unsigned int val;
+
+ if (sscanf(buf, "%u\n", &val) != 1)
+ return -EINVAL;
+
+ thr.perf_cl_trigger_threshold = val;
+ return 0;
+}
+
+static int get_perf_cl_trigger_threshold(char *buf,
+ const struct kernel_param *kp)
+{
+ return snprintf(buf, PAGE_SIZE, "%u", thr.perf_cl_trigger_threshold);
+}
+
+static const struct kernel_param_ops param_ops_perf_trig_thr = {
+ .set = set_perf_cl_trigger_threshold,
+ .get = get_perf_cl_trigger_threshold,
+};
+device_param_cb(perf_cl_trig_thr, ¶m_ops_perf_trig_thr, NULL, 0644);
+
+
+static int set_pwr_cl_trigger_threshold(const char *buf,
+ const struct kernel_param *kp)
+{
+ unsigned int val;
+
+ if (sscanf(buf, "%u\n", &val) != 1)
+ return -EINVAL;
+
+ thr.pwr_cl_trigger_threshold = val;
+ return 0;
+}
+
+static int get_pwr_cl_trigger_threshold(char *buf,
+ const struct kernel_param *kp)
+{
+ return snprintf(buf, PAGE_SIZE, "%u", thr.pwr_cl_trigger_threshold);
+}
+
+static const struct kernel_param_ops param_ops_pwr_trig_thr = {
+ .set = set_pwr_cl_trigger_threshold,
+ .get = get_pwr_cl_trigger_threshold,
+};
+device_param_cb(pwr_cl_trig_thr, ¶m_ops_pwr_trig_thr, NULL, 0644);
+
+static int freq_greater_than_threshold(struct cluster *cl, int idx)
+{
+ int rc = 0;
+ /* Check for Cluster 0 */
+ if (!idx && cl->current_freq >= thr.pwr_cl_trigger_threshold)
+ rc = 1;
+ /* Check for Cluster 1 */
+ if (idx && cl->current_freq >= thr.perf_cl_trigger_threshold)
+ rc = 1;
+ return rc;
+}
+
+static bool input_events_greater_than_threshold(void)
+{
+
+ bool rc = false;
+
+ if ((ip_evts->evt_x_cnt >= thr.ip_evt_threshold) ||
+ (ip_evts->evt_y_cnt >= thr.ip_evt_threshold) ||
+ !use_input_evts_with_hi_slvt_detect)
+ rc = true;
+
+ return rc;
+}
+
+static int set_single_enter_load(const char *buf, const struct kernel_param *kp)
+{
+ unsigned int val, i, ntokens = 0;
+ const char *cp = buf;
+ unsigned int bytes_left;
+
+ if (!clusters_inited)
+ return -EINVAL;
+
+ while ((cp = strpbrk(cp + 1, ":")))
+ ntokens++;
+
+ if (ntokens != (num_clusters - 1))
+ return -EINVAL;
+
+ cp = buf;
+ for (i = 0; i < num_clusters; i++) {
+
+ if (sscanf(cp, "%u\n", &val) != 1)
+ return -EINVAL;
+
+ if (val < managed_clusters[i]->single_exit_load)
+ return -EINVAL;
+
+ managed_clusters[i]->single_enter_load = val;
+
+ bytes_left = PAGE_SIZE - (cp - buf);
+ cp = strnchr(cp, bytes_left, ':');
+ cp++;
+ }
+
+ return 0;
+}
+
+static int get_single_enter_load(char *buf, const struct kernel_param *kp)
+{
+ int i, cnt = 0;
+
+ if (!clusters_inited)
+ return cnt;
+
+ for (i = 0; i < num_clusters; i++)
+ cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
+ "%u:", managed_clusters[i]->single_enter_load);
+ cnt--;
+ cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, " ");
+ return cnt;
+}
+
+static const struct kernel_param_ops param_ops_single_enter_load = {
+ .set = set_single_enter_load,
+ .get = get_single_enter_load,
+};
+device_param_cb(single_enter_load, ¶m_ops_single_enter_load, NULL, 0644);
+
+static int set_single_exit_load(const char *buf, const struct kernel_param *kp)
+{
+ unsigned int val, i, ntokens = 0;
+ const char *cp = buf;
+ unsigned int bytes_left;
+
+ if (!clusters_inited)
+ return -EINVAL;
+
+ while ((cp = strpbrk(cp + 1, ":")))
+ ntokens++;
+
+ if (ntokens != (num_clusters - 1))
+ return -EINVAL;
+
+ cp = buf;
+ for (i = 0; i < num_clusters; i++) {
+
+ if (sscanf(cp, "%u\n", &val) != 1)
+ return -EINVAL;
+
+ if (val > managed_clusters[i]->single_enter_load)
+ return -EINVAL;
+
+ managed_clusters[i]->single_exit_load = val;
+
+ bytes_left = PAGE_SIZE - (cp - buf);
+ cp = strnchr(cp, bytes_left, ':');
+ cp++;
+ }
+
+ return 0;
+}
+
+static int get_single_exit_load(char *buf, const struct kernel_param *kp)
+{
+ int i, cnt = 0;
+
+ if (!clusters_inited)
+ return cnt;
+
+ for (i = 0; i < num_clusters; i++)
+ cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
+ "%u:", managed_clusters[i]->single_exit_load);
+ cnt--;
+ cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, " ");
+ return cnt;
+}
+
+static const struct kernel_param_ops param_ops_single_exit_load = {
+ .set = set_single_exit_load,
+ .get = get_single_exit_load,
+};
+device_param_cb(single_exit_load, ¶m_ops_single_exit_load, NULL, 0644);
+
+static int set_pcpu_multi_enter_load(const char *buf,
+ const struct kernel_param *kp)
+{
+ unsigned int val, i, ntokens = 0;
+ const char *cp = buf;
+ unsigned int bytes_left;
+
+ if (!clusters_inited)
+ return -EINVAL;
+
+ while ((cp = strpbrk(cp + 1, ":")))
+ ntokens++;
+
+ if (ntokens != (num_clusters - 1))
+ return -EINVAL;
+
+ cp = buf;
+ for (i = 0; i < num_clusters; i++) {
+
+ if (sscanf(cp, "%u\n", &val) != 1)
+ return -EINVAL;
+
+ if (val < managed_clusters[i]->pcpu_multi_exit_load)
+ return -EINVAL;
+
+ managed_clusters[i]->pcpu_multi_enter_load = val;
+
+ bytes_left = PAGE_SIZE - (cp - buf);
+ cp = strnchr(cp, bytes_left, ':');
+ cp++;
+ }
+
+ return 0;
+}
+
+static int get_pcpu_multi_enter_load(char *buf, const struct kernel_param *kp)
+{
+ int i, cnt = 0;
+
+ if (!clusters_inited)
+ return cnt;
+
+ for (i = 0; i < num_clusters; i++)
+ cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
+ "%u:", managed_clusters[i]->pcpu_multi_enter_load);
+ cnt--;
+ cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, " ");
+ return cnt;
+}
+
+static const struct kernel_param_ops param_ops_pcpu_multi_enter_load = {
+ .set = set_pcpu_multi_enter_load,
+ .get = get_pcpu_multi_enter_load,
+};
+device_param_cb(pcpu_multi_enter_load, ¶m_ops_pcpu_multi_enter_load,
+ NULL, 0644);
+
+static int set_pcpu_multi_exit_load(const char *buf,
+ const struct kernel_param *kp)
+{
+ unsigned int val, i, ntokens = 0;
+ const char *cp = buf;
+ unsigned int bytes_left;
+
+ if (!clusters_inited)
+ return -EINVAL;
+
+ while ((cp = strpbrk(cp + 1, ":")))
+ ntokens++;
+
+ if (ntokens != (num_clusters - 1))
+ return -EINVAL;
+
+ cp = buf;
+ for (i = 0; i < num_clusters; i++) {
+
+ if (sscanf(cp, "%u\n", &val) != 1)
+ return -EINVAL;
+
+ if (val > managed_clusters[i]->pcpu_multi_enter_load)
+ return -EINVAL;
+
+ managed_clusters[i]->pcpu_multi_exit_load = val;
+
+ bytes_left = PAGE_SIZE - (cp - buf);
+ cp = strnchr(cp, bytes_left, ':');
+ cp++;
+ }
+
+ return 0;
+}
+
+static int get_pcpu_multi_exit_load(char *buf, const struct kernel_param *kp)
+{
+ int i, cnt = 0;
+
+ if (!clusters_inited)
+ return cnt;
+
+ for (i = 0; i < num_clusters; i++)
+ cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
+ "%u:", managed_clusters[i]->pcpu_multi_exit_load);
+ cnt--;
+ cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, " ");
+ return cnt;
+}
+
+static const struct kernel_param_ops param_ops_pcpu_multi_exit_load = {
+ .set = set_pcpu_multi_exit_load,
+ .get = get_pcpu_multi_exit_load,
+};
+device_param_cb(pcpu_multi_exit_load, ¶m_ops_pcpu_multi_exit_load,
+ NULL, 0644);
+static int set_perf_cl_peak_enter_load(const char *buf,
+ const struct kernel_param *kp)
+{
+ unsigned int val, i, ntokens = 0;
+ const char *cp = buf;
+ unsigned int bytes_left;
+
+ if (!clusters_inited)
+ return -EINVAL;
+
+ while ((cp = strpbrk(cp + 1, ":")))
+ ntokens++;
+
+ if (ntokens != (num_clusters - 1))
+ return -EINVAL;
+
+ cp = buf;
+ for (i = 0; i < num_clusters; i++) {
+
+ if (sscanf(cp, "%u\n", &val) != 1)
+ return -EINVAL;
+
+ if (val < managed_clusters[i]->perf_cl_peak_exit_load)
+ return -EINVAL;
+
+ managed_clusters[i]->perf_cl_peak_enter_load = val;
+
+ bytes_left = PAGE_SIZE - (cp - buf);
+ cp = strnchr(cp, bytes_left, ':');
+ cp++;
+ }
+
+ return 0;
+}
+
+static int get_perf_cl_peak_enter_load(char *buf,
+ const struct kernel_param *kp)
+{
+ int i, cnt = 0;
+
+ if (!clusters_inited)
+ return cnt;
+
+ for (i = 0; i < num_clusters; i++)
+ cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
+ "%u:", managed_clusters[i]->perf_cl_peak_enter_load);
+ cnt--;
+ cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, " ");
+ return cnt;
+}
+
+static const struct kernel_param_ops param_ops_perf_cl_peak_enter_load = {
+ .set = set_perf_cl_peak_enter_load,
+ .get = get_perf_cl_peak_enter_load,
+};
+device_param_cb(perf_cl_peak_enter_load, ¶m_ops_perf_cl_peak_enter_load,
+ NULL, 0644);
+
+static int set_perf_cl_peak_exit_load(const char *buf,
+ const struct kernel_param *kp)
+{
+ unsigned int val, i, ntokens = 0;
+ const char *cp = buf;
+ unsigned int bytes_left;
+
+ if (!clusters_inited)
+ return -EINVAL;
+
+ while ((cp = strpbrk(cp + 1, ":")))
+ ntokens++;
+
+ if (ntokens != (num_clusters - 1))
+ return -EINVAL;
+
+ cp = buf;
+ for (i = 0; i < num_clusters; i++) {
+
+ if (sscanf(cp, "%u\n", &val) != 1)
+ return -EINVAL;
+
+ if (val > managed_clusters[i]->perf_cl_peak_enter_load)
+ return -EINVAL;
+
+ managed_clusters[i]->perf_cl_peak_exit_load = val;
+
+ bytes_left = PAGE_SIZE - (cp - buf);
+ cp = strnchr(cp, bytes_left, ':');
+ cp++;
+ }
+
+ return 0;
+}
+
+static int get_perf_cl_peak_exit_load(char *buf,
+ const struct kernel_param *kp)
+{
+ int i, cnt = 0;
+
+ if (!clusters_inited)
+ return cnt;
+
+ for (i = 0; i < num_clusters; i++)
+ cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
+ "%u:", managed_clusters[i]->perf_cl_peak_exit_load);
+ cnt--;
+ cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, " ");
+ return cnt;
+}
+
+static const struct kernel_param_ops param_ops_perf_cl_peak_exit_load = {
+ .set = set_perf_cl_peak_exit_load,
+ .get = get_perf_cl_peak_exit_load,
+};
+device_param_cb(perf_cl_peak_exit_load, ¶m_ops_perf_cl_peak_exit_load,
+ NULL, 0644);
+
+static int set_perf_cl_peak_enter_cycles(const char *buf,
+ const struct kernel_param *kp)
+{
+ unsigned int val, i, ntokens = 0;
+ const char *cp = buf;
+ unsigned int bytes_left;
+
+ if (!clusters_inited)
+ return -EINVAL;
+
+ while ((cp = strpbrk(cp + 1, ":")))
+ ntokens++;
+
+ if (ntokens != (num_clusters - 1))
+ return -EINVAL;
+
+ cp = buf;
+ for (i = 0; i < num_clusters; i++) {
+
+ if (sscanf(cp, "%u\n", &val) != 1)
+ return -EINVAL;
+
+ managed_clusters[i]->perf_cl_peak_enter_cycles = val;
+
+ bytes_left = PAGE_SIZE - (cp - buf);
+ cp = strnchr(cp, bytes_left, ':');
+ cp++;
+ }
+
+ return 0;
+}
+
+static int get_perf_cl_peak_enter_cycles(char *buf,
+ const struct kernel_param *kp)
+{
+ int i, cnt = 0;
+
+ if (!clusters_inited)
+ return cnt;
+
+ for (i = 0; i < num_clusters; i++)
+ cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, "%u:",
+ managed_clusters[i]->perf_cl_peak_enter_cycles);
+ cnt--;
+ cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, " ");
+ return cnt;
+}
+
+static const struct kernel_param_ops param_ops_perf_cl_peak_enter_cycles = {
+ .set = set_perf_cl_peak_enter_cycles,
+ .get = get_perf_cl_peak_enter_cycles,
+};
+device_param_cb(perf_cl_peak_enter_cycles, ¶m_ops_perf_cl_peak_enter_cycles,
+ NULL, 0644);
+
+
+static int set_perf_cl_peak_exit_cycles(const char *buf,
+ const struct kernel_param *kp)
+{
+ unsigned int val, i, ntokens = 0;
+ const char *cp = buf;
+ unsigned int bytes_left;
+
+ if (!clusters_inited)
+ return -EINVAL;
+
+ while ((cp = strpbrk(cp + 1, ":")))
+ ntokens++;
+
+ if (ntokens != (num_clusters - 1))
+ return -EINVAL;
+
+ cp = buf;
+ for (i = 0; i < num_clusters; i++) {
+
+ if (sscanf(cp, "%u\n", &val) != 1)
+ return -EINVAL;
+
+ managed_clusters[i]->perf_cl_peak_exit_cycles = val;
+
+ bytes_left = PAGE_SIZE - (cp - buf);
+ cp = strnchr(cp, bytes_left, ':');
+ cp++;
+ }
+
+ return 0;
+}
+
+static int get_perf_cl_peak_exit_cycles(char *buf,
+ const struct kernel_param *kp)
+{
+ int i, cnt = 0;
+
+ if (!clusters_inited)
+ return cnt;
+
+ for (i = 0; i < num_clusters; i++)
+ cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
+ "%u:", managed_clusters[i]->perf_cl_peak_exit_cycles);
+ cnt--;
+ cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, " ");
+ return cnt;
+}
+
+static const struct kernel_param_ops param_ops_perf_cl_peak_exit_cycles = {
+ .set = set_perf_cl_peak_exit_cycles,
+ .get = get_perf_cl_peak_exit_cycles,
+};
+device_param_cb(perf_cl_peak_exit_cycles, ¶m_ops_perf_cl_peak_exit_cycles,
+ NULL, 0644);
+
+
+static int set_single_enter_cycles(const char *buf,
+ const struct kernel_param *kp)
+{
+ unsigned int val, i, ntokens = 0;
+ const char *cp = buf;
+ unsigned int bytes_left;
+
+ if (!clusters_inited)
+ return -EINVAL;
+
+ while ((cp = strpbrk(cp + 1, ":")))
+ ntokens++;
+
+ if (ntokens != (num_clusters - 1))
+ return -EINVAL;
+
+ cp = buf;
+ for (i = 0; i < num_clusters; i++) {
+
+ if (sscanf(cp, "%u\n", &val) != 1)
+ return -EINVAL;
+
+ managed_clusters[i]->single_enter_cycles = val;
+
+ bytes_left = PAGE_SIZE - (cp - buf);
+ cp = strnchr(cp, bytes_left, ':');
+ cp++;
+ }
+
+ return 0;
+}
+
+static int get_single_enter_cycles(char *buf, const struct kernel_param *kp)
+{
+ int i, cnt = 0;
+
+ if (!clusters_inited)
+ return cnt;
+
+ for (i = 0; i < num_clusters; i++)
+ cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, "%u:",
+ managed_clusters[i]->single_enter_cycles);
+ cnt--;
+ cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, " ");
+ return cnt;
+}
+
+static const struct kernel_param_ops param_ops_single_enter_cycles = {
+ .set = set_single_enter_cycles,
+ .get = get_single_enter_cycles,
+};
+device_param_cb(single_enter_cycles, ¶m_ops_single_enter_cycles,
+ NULL, 0644);
+
+
+static int set_single_exit_cycles(const char *buf,
+ const struct kernel_param *kp)
+{
+ unsigned int val, i, ntokens = 0;
+ const char *cp = buf;
+ unsigned int bytes_left;
+
+ if (!clusters_inited)
+ return -EINVAL;
+
+ while ((cp = strpbrk(cp + 1, ":")))
+ ntokens++;
+
+ if (ntokens != (num_clusters - 1))
+ return -EINVAL;
+
+ cp = buf;
+ for (i = 0; i < num_clusters; i++) {
+
+ if (sscanf(cp, "%u\n", &val) != 1)
+ return -EINVAL;
+
+ managed_clusters[i]->single_exit_cycles = val;
+
+ bytes_left = PAGE_SIZE - (cp - buf);
+ cp = strnchr(cp, bytes_left, ':');
+ cp++;
+ }
+
+ return 0;
+}
+
+static int get_single_exit_cycles(char *buf, const struct kernel_param *kp)
+{
+ int i, cnt = 0;
+
+ if (!clusters_inited)
+ return cnt;
+
+ for (i = 0; i < num_clusters; i++)
+ cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
+ "%u:", managed_clusters[i]->single_exit_cycles);
+ cnt--;
+ cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, " ");
+ return cnt;
+}
+
+static const struct kernel_param_ops param_ops_single_exit_cycles = {
+ .set = set_single_exit_cycles,
+ .get = get_single_exit_cycles,
+};
+device_param_cb(single_exit_cycles, ¶m_ops_single_exit_cycles, NULL, 0644);
+
+static int set_multi_enter_cycles(const char *buf,
+ const struct kernel_param *kp)
+{
+ unsigned int val, i, ntokens = 0;
+ const char *cp = buf;
+ unsigned int bytes_left;
+
+ if (!clusters_inited)
+ return -EINVAL;
+
+ while ((cp = strpbrk(cp + 1, ":")))
+ ntokens++;
+
+ if (ntokens != (num_clusters - 1))
+ return -EINVAL;
+
+ cp = buf;
+ for (i = 0; i < num_clusters; i++) {
+
+ if (sscanf(cp, "%u\n", &val) != 1)
+ return -EINVAL;
+
+ managed_clusters[i]->multi_enter_cycles = val;
+
+ bytes_left = PAGE_SIZE - (cp - buf);
+ cp = strnchr(cp, bytes_left, ':');
+ cp++;
+ }
+
+ return 0;
+}
+
+static int get_multi_enter_cycles(char *buf, const struct kernel_param *kp)
+{
+ int i, cnt = 0;
+
+ if (!clusters_inited)
+ return cnt;
+
+ for (i = 0; i < num_clusters; i++)
+ cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
+ "%u:", managed_clusters[i]->multi_enter_cycles);
+ cnt--;
+ cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, " ");
+ return cnt;
+}
+
+static const struct kernel_param_ops param_ops_multi_enter_cycles = {
+ .set = set_multi_enter_cycles,
+ .get = get_multi_enter_cycles,
+};
+device_param_cb(multi_enter_cycles, ¶m_ops_multi_enter_cycles, NULL, 0644);
+
+static int set_multi_exit_cycles(const char *buf, const struct kernel_param *kp)
+{
+ unsigned int val, i, ntokens = 0;
+ const char *cp = buf;
+ unsigned int bytes_left;
+
+ if (!clusters_inited)
+ return -EINVAL;
+
+ while ((cp = strpbrk(cp + 1, ":")))
+ ntokens++;
+
+ if (ntokens != (num_clusters - 1))
+ return -EINVAL;
+
+ cp = buf;
+ for (i = 0; i < num_clusters; i++) {
+
+ if (sscanf(cp, "%u\n", &val) != 1)
+ return -EINVAL;
+
+ managed_clusters[i]->multi_exit_cycles = val;
+
+ bytes_left = PAGE_SIZE - (cp - buf);
+ cp = strnchr(cp, bytes_left, ':');
+ cp++;
+ }
+
+ return 0;
+}
+
+static int get_multi_exit_cycles(char *buf, const struct kernel_param *kp)
+{
+ int i, cnt = 0;
+
+ if (!clusters_inited)
+ return cnt;
+
+ for (i = 0; i < num_clusters; i++)
+ cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
+ "%u:", managed_clusters[i]->multi_exit_cycles);
+ cnt--;
+ cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, " ");
+ return cnt;
+}
+
+static const struct kernel_param_ops param_ops_multi_exit_cycles = {
+ .set = set_multi_exit_cycles,
+ .get = get_multi_exit_cycles,
+};
+device_param_cb(multi_exit_cycles, ¶m_ops_multi_exit_cycles, NULL, 0644);
+
+static int set_io_enter_cycles(const char *buf, const struct kernel_param *kp)
+{
+ unsigned int val;
+
+ if (sscanf(buf, "%u\n", &val) != 1)
+ return -EINVAL;
+
+ io_enter_cycles = val;
+
+ return 0;
+}
+
+static int get_io_enter_cycles(char *buf, const struct kernel_param *kp)
+{
+ return snprintf(buf, PAGE_SIZE, "%u", io_enter_cycles);
+}
+
+static const struct kernel_param_ops param_ops_io_enter_cycles = {
+ .set = set_io_enter_cycles,
+ .get = get_io_enter_cycles,
+};
+device_param_cb(io_enter_cycles, ¶m_ops_io_enter_cycles, NULL, 0644);
+
+static int set_io_exit_cycles(const char *buf, const struct kernel_param *kp)
+{
+ unsigned int val;
+
+ if (sscanf(buf, "%u\n", &val) != 1)
+ return -EINVAL;
+
+ io_exit_cycles = val;
+
+ return 0;
+}
+
+static int get_io_exit_cycles(char *buf, const struct kernel_param *kp)
+{
+ return snprintf(buf, PAGE_SIZE, "%u", io_exit_cycles);
+}
+
+static const struct kernel_param_ops param_ops_io_exit_cycles = {
+ .set = set_io_exit_cycles,
+ .get = get_io_exit_cycles,
+};
+device_param_cb(io_exit_cycles, ¶m_ops_io_exit_cycles, NULL, 0644);
+
+static int set_iowait_floor_pct(const char *buf, const struct kernel_param *kp)
+{
+ u64 val;
+
+ if (sscanf(buf, "%llu\n", &val) != 1)
+ return -EINVAL;
+ if (val > iowait_ceiling_pct)
+ return -EINVAL;
+
+ iowait_floor_pct = val;
+
+ return 0;
+}
+
+static int get_iowait_floor_pct(char *buf, const struct kernel_param *kp)
+{
+ return snprintf(buf, PAGE_SIZE, "%llu", iowait_floor_pct);
+}
+
+static const struct kernel_param_ops param_ops_iowait_floor_pct = {
+ .set = set_iowait_floor_pct,
+ .get = get_iowait_floor_pct,
+};
+device_param_cb(iowait_floor_pct, ¶m_ops_iowait_floor_pct, NULL, 0644);
+
+static int set_iowait_ceiling_pct(const char *buf,
+ const struct kernel_param *kp)
+{
+ u64 val;
+
+ if (sscanf(buf, "%llu\n", &val) != 1)
+ return -EINVAL;
+ if (val < iowait_floor_pct)
+ return -EINVAL;
+
+ iowait_ceiling_pct = val;
+
+ return 0;
+}
+
+static int get_iowait_ceiling_pct(char *buf, const struct kernel_param *kp)
+{
+ return snprintf(buf, PAGE_SIZE, "%llu", iowait_ceiling_pct);
+}
+
+static const struct kernel_param_ops param_ops_iowait_ceiling_pct = {
+ .set = set_iowait_ceiling_pct,
+ .get = get_iowait_ceiling_pct,
+};
+device_param_cb(iowait_ceiling_pct, ¶m_ops_iowait_ceiling_pct, NULL, 0644);
+
+static int set_workload_detect(const char *buf, const struct kernel_param *kp)
+{
+ unsigned int val, i;
+ struct cluster *i_cl;
+ unsigned long flags;
+
+ if (!clusters_inited)
+ return -EINVAL;
+
+ if (sscanf(buf, "%u\n", &val) != 1)
+ return -EINVAL;
+
+ if (val == workload_detect)
+ return 0;
+
+ workload_detect = val;
+ if (!(workload_detect & IO_DETECT)) {
+ for (i = 0; i < num_clusters; i++) {
+ i_cl = managed_clusters[i];
+ spin_lock_irqsave(&i_cl->iowait_lock, flags);
+ i_cl->iowait_enter_cycle_cnt = 0;
+ i_cl->iowait_exit_cycle_cnt = 0;
+ i_cl->cur_io_busy = 0;
+ i_cl->io_change = true;
+ spin_unlock_irqrestore(&i_cl->iowait_lock, flags);
+ }
+ }
+ if (!(workload_detect & MODE_DETECT)) {
+ for (i = 0; i < num_clusters; i++) {
+ i_cl = managed_clusters[i];
+ spin_lock_irqsave(&i_cl->mode_lock, flags);
+ i_cl->single_enter_cycle_cnt = 0;
+ i_cl->single_exit_cycle_cnt = 0;
+ i_cl->multi_enter_cycle_cnt = 0;
+ i_cl->multi_exit_cycle_cnt = 0;
+ i_cl->mode = 0;
+ i_cl->mode_change = true;
+ spin_unlock_irqrestore(&i_cl->mode_lock, flags);
+ }
+ }
+
+ if (!(workload_detect & PERF_CL_PEAK_DETECT)) {
+ for (i = 0; i < num_clusters; i++) {
+ i_cl = managed_clusters[i];
+ spin_lock_irqsave(&i_cl->perf_cl_peak_lock, flags);
+ i_cl->perf_cl_peak_enter_cycle_cnt = 0;
+ i_cl->perf_cl_peak_exit_cycle_cnt = 0;
+ i_cl->perf_cl_peak = 0;
+ spin_unlock_irqrestore(&i_cl->perf_cl_peak_lock, flags);
+ }
+ }
+
+ wake_up_process(notify_thread);
+ return 0;
+}
+
+static int get_workload_detect(char *buf, const struct kernel_param *kp)
+{
+ return snprintf(buf, PAGE_SIZE, "%u", workload_detect);
+}
+
+static const struct kernel_param_ops param_ops_workload_detect = {
+ .set = set_workload_detect,
+ .get = get_workload_detect,
+};
+device_param_cb(workload_detect, ¶m_ops_workload_detect, NULL, 0644);
+
+
+static int set_input_evts_with_hi_slvt_detect(const char *buf,
+ const struct kernel_param *kp)
+{
+
+ unsigned int val;
+
+ if (sscanf(buf, "%u\n", &val) != 1)
+ return -EINVAL;
+
+ if (val == use_input_evts_with_hi_slvt_detect)
+ return 0;
+
+ use_input_evts_with_hi_slvt_detect = val;
+
+ if ((workload_detect & PERF_CL_PEAK_DETECT) &&
+ !input_events_handler_registered &&
+ use_input_evts_with_hi_slvt_detect) {
+ if (register_input_handler() == -ENOMEM) {
+ use_input_evts_with_hi_slvt_detect = 0;
+ return -ENOMEM;
+ }
+ } else if ((workload_detect & PERF_CL_PEAK_DETECT) &&
+ input_events_handler_registered &&
+ !use_input_evts_with_hi_slvt_detect) {
+ unregister_input_handler();
+ }
+ return 0;
+}
+
+static int get_input_evts_with_hi_slvt_detect(char *buf,
+ const struct kernel_param *kp)
+{
+ return snprintf(buf, PAGE_SIZE, "%u",
+ use_input_evts_with_hi_slvt_detect);
+}
+
+static const struct kernel_param_ops param_ops_ip_evts_with_hi_slvt_detect = {
+ .set = set_input_evts_with_hi_slvt_detect,
+ .get = get_input_evts_with_hi_slvt_detect,
+};
+device_param_cb(input_evts_with_hi_slvt_detect,
+ ¶m_ops_ip_evts_with_hi_slvt_detect, NULL, 0644);
+
+static struct kobject *mode_kobj;
+
+static ssize_t show_aggr_mode(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", aggr_mode);
+}
+static struct kobj_attribute aggr_mode_attr =
+__ATTR(aggr_mode, 0444, show_aggr_mode, NULL);
+
+static ssize_t show_aggr_iobusy(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", aggr_iobusy);
+}
+static struct kobj_attribute aggr_iobusy_attr =
+__ATTR(aggr_iobusy, 0444, show_aggr_iobusy, NULL);
+
+static struct attribute *attrs[] = {
+ &aggr_mode_attr.attr,
+ &aggr_iobusy_attr.attr,
+ NULL,
+};
+
+static struct attribute_group attr_group = {
+ .attrs = attrs,
+};
+
+static bool check_notify_status(void)
+{
+ int i;
+ struct cluster *cl;
+ bool any_change = false;
+ unsigned long flags;
+
+
+ for (i = 0; i < num_clusters; i++) {
+ cl = managed_clusters[i];
+ spin_lock_irqsave(&cl->iowait_lock, flags);
+ if (!any_change)
+ any_change = cl->io_change;
+ cl->io_change = false;
+ spin_unlock_irqrestore(&cl->iowait_lock, flags);
+
+ spin_lock_irqsave(&cl->mode_lock, flags);
+ if (!any_change)
+ any_change = cl->mode_change;
+ cl->mode_change = false;
+ spin_unlock_irqrestore(&cl->mode_lock, flags);
+
+ spin_lock_irqsave(&cl->perf_cl_peak_lock, flags);
+ if (!any_change)
+ any_change = cl->perf_cl_detect_state_change;
+ cl->perf_cl_detect_state_change = false;
+ spin_unlock_irqrestore(&cl->perf_cl_peak_lock, flags);
+ }
+
+ return any_change;
+}
+
+static int notify_userspace(void *data)
+{
+ unsigned int i, io, cpu_mode, perf_cl_peak_mode;
+
+ while (1) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (!check_notify_status()) {
+ schedule();
+
+ if (kthread_should_stop())
+ break;
+ }
+ set_current_state(TASK_RUNNING);
+
+ io = 0;
+ cpu_mode = 0;
+ perf_cl_peak_mode = 0;
+ for (i = 0; i < num_clusters; i++) {
+ io |= managed_clusters[i]->cur_io_busy;
+ cpu_mode |= managed_clusters[i]->mode;
+ perf_cl_peak_mode |= managed_clusters[i]->perf_cl_peak;
+ }
+ if (io != aggr_iobusy) {
+ aggr_iobusy = io;
+ sysfs_notify(mode_kobj, NULL, "aggr_iobusy");
+ pr_debug("msm_perf: Notifying IO: %u\n", aggr_iobusy);
+ }
+ if ((aggr_mode & (SINGLE | MULTI)) != cpu_mode) {
+ aggr_mode &= ~(SINGLE | MULTI);
+ aggr_mode |= cpu_mode;
+ sysfs_notify(mode_kobj, NULL, "aggr_mode");
+ pr_debug("msm_perf: Notifying CPU mode:%u\n",
+ aggr_mode);
+ }
+ if ((aggr_mode & PERF_CL_PEAK) != perf_cl_peak_mode) {
+ aggr_mode &= ~(PERF_CL_PEAK);
+ aggr_mode |= perf_cl_peak_mode;
+ sysfs_notify(mode_kobj, NULL, "aggr_mode");
+ pr_debug("msm_perf: Notifying Gaming mode:%u\n",
+ aggr_mode);
+ }
+ }
+
+ return 0;
+}
+
+static void check_cluster_iowait(struct cluster *cl, u64 now)
+{
+ struct load_stats *pcpu_st;
+ unsigned int i;
+ unsigned long flags;
+ unsigned int temp_iobusy;
+ u64 max_iowait = 0;
+
+ spin_lock_irqsave(&cl->iowait_lock, flags);
+
+ if (((now - cl->last_io_check_ts)
+ < (cl->timer_rate - LAST_IO_CHECK_TOL)) ||
+ !(workload_detect & IO_DETECT)) {
+ spin_unlock_irqrestore(&cl->iowait_lock, flags);
+ return;
+ }
+
+ temp_iobusy = cl->cur_io_busy;
+ for_each_cpu(i, cl->cpus) {
+ pcpu_st = &per_cpu(cpu_load_stats, i);
+ if ((now - pcpu_st->last_wallclock)
+ > (cl->timer_rate + LAST_UPDATE_TOL))
+ continue;
+ if (max_iowait < pcpu_st->last_iopercent)
+ max_iowait = pcpu_st->last_iopercent;
+ }
+
+ if (!cl->cur_io_busy) {
+ if (max_iowait > iowait_ceiling_pct) {
+ cl->iowait_enter_cycle_cnt++;
+ if (cl->iowait_enter_cycle_cnt >= io_enter_cycles) {
+ cl->cur_io_busy = 1;
+ cl->iowait_enter_cycle_cnt = 0;
+ }
+ } else {
+ cl->iowait_enter_cycle_cnt = 0;
+ }
+ } else {
+ if (max_iowait < iowait_floor_pct) {
+ cl->iowait_exit_cycle_cnt++;
+ if (cl->iowait_exit_cycle_cnt >= io_exit_cycles) {
+ cl->cur_io_busy = 0;
+ cl->iowait_exit_cycle_cnt = 0;
+ }
+ } else {
+ cl->iowait_exit_cycle_cnt = 0;
+ }
+ }
+
+ cl->last_io_check_ts = now;
+ trace_track_iowait(cpumask_first(cl->cpus), cl->iowait_enter_cycle_cnt,
+ cl->iowait_exit_cycle_cnt, cl->cur_io_busy, max_iowait);
+
+ if (temp_iobusy != cl->cur_io_busy) {
+ cl->io_change = true;
+ pr_debug("msm_perf: IO changed to %u\n", cl->cur_io_busy);
+ }
+
+ spin_unlock_irqrestore(&cl->iowait_lock, flags);
+ if (cl->io_change)
+ wake_up_process(notify_thread);
+}
+
+static void disable_timer(struct cluster *cl)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cl->timer_lock, flags);
+
+ if (del_timer(&cl->mode_exit_timer)) {
+ trace_single_cycle_exit_timer_stop(cpumask_first(cl->cpus),
+ cl->single_enter_cycles, cl->single_enter_cycle_cnt,
+ cl->single_exit_cycles, cl->single_exit_cycle_cnt,
+ cl->multi_enter_cycles, cl->multi_enter_cycle_cnt,
+ cl->multi_exit_cycles, cl->multi_exit_cycle_cnt,
+ cl->timer_rate, cl->mode);
+ }
+
+ spin_unlock_irqrestore(&cl->timer_lock, flags);
+}
+
+static void start_timer(struct cluster *cl)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cl->timer_lock, flags);
+ if ((cl->mode & SINGLE) && !timer_pending(&cl->mode_exit_timer)) {
+ /* Set timer for the Cluster since there is none pending */
+ cl->mode_exit_timer.expires = get_jiffies_64() +
+ usecs_to_jiffies(cl->single_exit_cycles * cl->timer_rate);
+ cl->mode_exit_timer.data = cpumask_first(cl->cpus);
+ add_timer(&cl->mode_exit_timer);
+ trace_single_cycle_exit_timer_start(cpumask_first(cl->cpus),
+ cl->single_enter_cycles, cl->single_enter_cycle_cnt,
+ cl->single_exit_cycles, cl->single_exit_cycle_cnt,
+ cl->multi_enter_cycles, cl->multi_enter_cycle_cnt,
+ cl->multi_exit_cycles, cl->multi_exit_cycle_cnt,
+ cl->timer_rate, cl->mode);
+ }
+ spin_unlock_irqrestore(&cl->timer_lock, flags);
+}
+
+static void disable_perf_cl_peak_timer(struct cluster *cl)
+{
+
+ if (del_timer(&cl->perf_cl_peak_mode_exit_timer)) {
+ trace_perf_cl_peak_exit_timer_stop(cpumask_first(cl->cpus),
+ cl->perf_cl_peak_enter_cycles,
+ cl->perf_cl_peak_enter_cycle_cnt,
+ cl->perf_cl_peak_exit_cycles,
+ cl->perf_cl_peak_exit_cycle_cnt,
+ cl->timer_rate, cl->mode);
+ }
+
+}
+
+static void start_perf_cl_peak_timer(struct cluster *cl)
+{
+ if ((cl->mode & PERF_CL_PEAK) &&
+ !timer_pending(&cl->perf_cl_peak_mode_exit_timer)) {
+ /* Set timer for the Cluster since there is none pending */
+ cl->perf_cl_peak_mode_exit_timer.expires = get_jiffies_64() +
+ usecs_to_jiffies(cl->perf_cl_peak_exit_cycles * cl->timer_rate);
+ cl->perf_cl_peak_mode_exit_timer.data = cpumask_first(cl->cpus);
+ add_timer(&cl->perf_cl_peak_mode_exit_timer);
+ trace_perf_cl_peak_exit_timer_start(cpumask_first(cl->cpus),
+ cl->perf_cl_peak_enter_cycles,
+ cl->perf_cl_peak_enter_cycle_cnt,
+ cl->perf_cl_peak_exit_cycles,
+ cl->perf_cl_peak_exit_cycle_cnt,
+ cl->timer_rate, cl->mode);
+ }
+}
+
+static const struct input_device_id msm_perf_input_ids[] = {
+
+ {
+ .flags = INPUT_DEVICE_ID_MATCH_EVBIT,
+ .evbit = {BIT_MASK(EV_ABS)},
+ .absbit = { [BIT_WORD(ABS_MT_POSITION_X)] =
+ BIT_MASK(ABS_MT_POSITION_X) |
+ BIT_MASK(ABS_MT_POSITION_Y)},
+ },
+
+ {},
+};
+
+static void msm_perf_input_event_handler(struct input_handle *handle,
+ unsigned int type,
+ unsigned int code,
+ int value)
+{
+ if (type != EV_ABS)
+ return;
+
+ switch (code) {
+
+ case ABS_MT_POSITION_X:
+ ip_evts->evt_x_cnt++;
+ break;
+ case ABS_MT_POSITION_Y:
+ ip_evts->evt_y_cnt++;
+ break;
+
+ case ABS_MT_DISTANCE:
+ break;
+
+ case ABS_MT_PRESSURE:
+ break;
+
+ default:
+ break;
+
+ }
+}
+static int msm_perf_input_connect(struct input_handler *handler,
+ struct input_dev *dev,
+ const struct input_device_id *id)
+{
+ int rc;
+ struct input_handle *handle;
+
+ handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+ if (!handle)
+ return -ENOMEM;
+
+ handle->dev = dev;
+ handle->handler = handler;
+ handle->name = handler->name;
+
+ rc = input_register_handle(handle);
+ if (rc) {
+ pr_err("Failed to register handle\n");
+ goto error;
+ }
+
+ rc = input_open_device(handle);
+ if (rc) {
+ pr_err("Failed to open device\n");
+ goto error_unregister;
+ }
+ return 0;
+
+error_unregister:
+ input_unregister_handle(handle);
+error:
+ kfree(handle);
+ return rc;
+}
+
+static void msm_perf_input_disconnect(struct input_handle *handle)
+{
+ input_close_device(handle);
+ input_unregister_handle(handle);
+ kfree(handle);
+}
+
+static void unregister_input_handler(void)
+{
+ if (handler != NULL) {
+ input_unregister_handler(handler);
+ input_events_handler_registered = false;
+ }
+}
+
+static int register_input_handler(void)
+{
+ int rc;
+
+ if (handler == NULL) {
+ handler = kzalloc(sizeof(*handler), GFP_KERNEL);
+ if (!handler)
+ return -ENOMEM;
+ handler->event = msm_perf_input_event_handler;
+ handler->connect = msm_perf_input_connect;
+ handler->disconnect = msm_perf_input_disconnect;
+ handler->name = "msm_perf";
+ handler->id_table = msm_perf_input_ids;
+ handler->private = NULL;
+ }
+ rc = input_register_handler(handler);
+ if (rc) {
+ pr_err("Unable to register the input handler for msm_perf\n");
+ kfree(handler);
+ } else {
+ input_events_handler_registered = true;
+ }
+ return rc;
+}
+
+static void check_perf_cl_peak_load(struct cluster *cl, u64 now)
+{
+ struct load_stats *pcpu_st;
+ unsigned int i, ret_mode, max_load = 0;
+ unsigned int total_load = 0, cpu_cnt = 0;
+ unsigned long flags;
+ bool cpu_of_cluster_zero = true;
+
+ spin_lock_irqsave(&cl->perf_cl_peak_lock, flags);
+
+ cpu_of_cluster_zero = cpumask_first(cl->cpus) ? false:true;
+ /*
+ * If delta of last load to now < than timer_rate - ld check tolerance
+ * which is 18ms OR if perf_cl_peak detection not set
+ * OR the first CPU of Cluster is CPU 0 (LVT)
+ * then return do nothing. We are interested only in SLVT
+ */
+ if (((now - cl->last_perf_cl_check_ts)
+ < (cl->timer_rate - LAST_LD_CHECK_TOL)) ||
+ !(workload_detect & PERF_CL_PEAK_DETECT) ||
+ cpu_of_cluster_zero) {
+ spin_unlock_irqrestore(&cl->perf_cl_peak_lock, flags);
+ return;
+ }
+ for_each_cpu(i, cl->cpus) {
+ pcpu_st = &per_cpu(cpu_load_stats, i);
+ if ((now - pcpu_st->last_wallclock)
+ > (cl->timer_rate + LAST_UPDATE_TOL))
+ continue;
+ if (pcpu_st->cpu_load > max_load)
+ max_load = pcpu_st->cpu_load;
+ /*
+ * Save the frequency for the cpu of the cluster
+ * This frequency is the most recent/current
+ * as obtained due to a transition
+ * notifier callback.
+ */
+ cl->current_freq = pcpu_st->freq;
+ }
+ ret_mode = cl->perf_cl_peak;
+
+ if (!(cl->perf_cl_peak & PERF_CL_PEAK)) {
+ if (max_load >= cl->perf_cl_peak_enter_load &&
+ freq_greater_than_threshold(cl,
+ cpumask_first(cl->cpus))) {
+ /*
+ * Reset the event count for the first cycle
+ * of perf_cl_peak we detect
+ */
+ if (!cl->perf_cl_peak_enter_cycle_cnt)
+ ip_evts->evt_x_cnt = ip_evts->evt_y_cnt = 0;
+ cl->perf_cl_peak_enter_cycle_cnt++;
+ if (cl->perf_cl_peak_enter_cycle_cnt >=
+ cl->perf_cl_peak_enter_cycles) {
+ if (input_events_greater_than_threshold())
+ ret_mode |= PERF_CL_PEAK;
+ cl->perf_cl_peak_enter_cycle_cnt = 0;
+ }
+ } else {
+ cl->perf_cl_peak_enter_cycle_cnt = 0;
+ /* Reset the event count */
+ ip_evts->evt_x_cnt = ip_evts->evt_y_cnt = 0;
+ }
+ } else {
+ if (max_load >= cl->perf_cl_peak_exit_load &&
+ freq_greater_than_threshold(cl,
+ cpumask_first(cl->cpus))) {
+ cl->perf_cl_peak_exit_cycle_cnt = 0;
+ disable_perf_cl_peak_timer(cl);
+ } else {
+ start_perf_cl_peak_timer(cl);
+ cl->perf_cl_peak_exit_cycle_cnt++;
+ if (cl->perf_cl_peak_exit_cycle_cnt
+ >= cl->perf_cl_peak_exit_cycles) {
+ ret_mode &= ~PERF_CL_PEAK;
+ cl->perf_cl_peak_exit_cycle_cnt = 0;
+ disable_perf_cl_peak_timer(cl);
+ }
+ }
+ }
+
+ cl->last_perf_cl_check_ts = now;
+ if (ret_mode != cl->perf_cl_peak) {
+ pr_debug("msm_perf: Mode changed to %u\n", ret_mode);
+ cl->perf_cl_peak = ret_mode;
+ cl->perf_cl_detect_state_change = true;
+ }
+
+ trace_cpu_mode_detect(cpumask_first(cl->cpus), max_load,
+ cl->single_enter_cycle_cnt, cl->single_exit_cycle_cnt,
+ total_load, cl->multi_enter_cycle_cnt,
+ cl->multi_exit_cycle_cnt, cl->perf_cl_peak_enter_cycle_cnt,
+ cl->perf_cl_peak_exit_cycle_cnt, cl->mode, cpu_cnt);
+
+ spin_unlock_irqrestore(&cl->perf_cl_peak_lock, flags);
+
+ if (cl->perf_cl_detect_state_change)
+ wake_up_process(notify_thread);
+
+}
+
+static void check_cpu_load(struct cluster *cl, u64 now)
+{
+ struct load_stats *pcpu_st;
+ unsigned int i, max_load = 0, total_load = 0, ret_mode, cpu_cnt = 0;
+ unsigned int total_load_ceil, total_load_floor;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cl->mode_lock, flags);
+
+ if (((now - cl->last_mode_check_ts)
+ < (cl->timer_rate - LAST_LD_CHECK_TOL)) ||
+ !(workload_detect & MODE_DETECT)) {
+ spin_unlock_irqrestore(&cl->mode_lock, flags);
+ return;
+ }
+
+ for_each_cpu(i, cl->cpus) {
+ pcpu_st = &per_cpu(cpu_load_stats, i);
+ if ((now - pcpu_st->last_wallclock)
+ > (cl->timer_rate + LAST_UPDATE_TOL))
+ continue;
+ if (pcpu_st->cpu_load > max_load)
+ max_load = pcpu_st->cpu_load;
+ total_load += pcpu_st->cpu_load;
+ cpu_cnt++;
+ }
+
+ if (cpu_cnt > 1) {
+ total_load_ceil = cl->pcpu_multi_enter_load * cpu_cnt;
+ total_load_floor = cl->pcpu_multi_exit_load * cpu_cnt;
+ } else {
+ total_load_ceil = UINT_MAX;
+ total_load_floor = UINT_MAX;
+ }
+
+ ret_mode = cl->mode;
+ if (!(cl->mode & SINGLE)) {
+ if (max_load >= cl->single_enter_load) {
+ cl->single_enter_cycle_cnt++;
+ if (cl->single_enter_cycle_cnt
+ >= cl->single_enter_cycles) {
+ ret_mode |= SINGLE;
+ cl->single_enter_cycle_cnt = 0;
+ }
+ } else {
+ cl->single_enter_cycle_cnt = 0;
+ }
+ } else {
+ if (max_load < cl->single_exit_load) {
+ start_timer(cl);
+ cl->single_exit_cycle_cnt++;
+ if (cl->single_exit_cycle_cnt
+ >= cl->single_exit_cycles) {
+ ret_mode &= ~SINGLE;
+ cl->single_exit_cycle_cnt = 0;
+ disable_timer(cl);
+ }
+ } else {
+ cl->single_exit_cycle_cnt = 0;
+ disable_timer(cl);
+ }
+ }
+
+ if (!(cl->mode & MULTI)) {
+ if (total_load >= total_load_ceil) {
+ cl->multi_enter_cycle_cnt++;
+ if (cl->multi_enter_cycle_cnt
+ >= cl->multi_enter_cycles) {
+ ret_mode |= MULTI;
+ cl->multi_enter_cycle_cnt = 0;
+ }
+ } else {
+ cl->multi_enter_cycle_cnt = 0;
+ }
+ } else {
+ if (total_load < total_load_floor) {
+ cl->multi_exit_cycle_cnt++;
+ if (cl->multi_exit_cycle_cnt
+ >= cl->multi_exit_cycles) {
+ ret_mode &= ~MULTI;
+ cl->multi_exit_cycle_cnt = 0;
+ }
+ } else {
+ cl->multi_exit_cycle_cnt = 0;
+ }
+ }
+
+ cl->last_mode_check_ts = now;
+
+ if (ret_mode != cl->mode) {
+ cl->mode = ret_mode;
+ cl->mode_change = true;
+ pr_debug("msm_perf: Mode changed to %u\n", ret_mode);
+ }
+
+ trace_cpu_mode_detect(cpumask_first(cl->cpus), max_load,
+ cl->single_enter_cycle_cnt, cl->single_exit_cycle_cnt,
+ total_load, cl->multi_enter_cycle_cnt,
+ cl->multi_exit_cycle_cnt, cl->perf_cl_peak_enter_cycle_cnt,
+ cl->perf_cl_peak_exit_cycle_cnt, cl->mode, cpu_cnt);
+
+ spin_unlock_irqrestore(&cl->mode_lock, flags);
+
+ if (cl->mode_change)
+ wake_up_process(notify_thread);
+}
+
+static void check_workload_stats(unsigned int cpu, unsigned int rate, u64 now)
+{
+ struct cluster *cl = NULL;
+ unsigned int i;
+
+ for (i = 0; i < num_clusters; i++) {
+ if (cpumask_test_cpu(cpu, managed_clusters[i]->cpus)) {
+ cl = managed_clusters[i];
+ break;
+ }
+ }
+ if (cl == NULL)
+ return;
+
+ cl->timer_rate = rate;
+ check_cluster_iowait(cl, now);
+ check_cpu_load(cl, now);
+ check_perf_cl_peak_load(cl, now);
+}
+
+static int perf_govinfo_notify(struct notifier_block *nb, unsigned long val,
+ void *data)
+{
+ struct cpufreq_govinfo *gov_info = data;
+ unsigned int cpu = gov_info->cpu;
+ struct load_stats *cpu_st = &per_cpu(cpu_load_stats, cpu);
+ u64 now, cur_iowait, time_diff, iowait_diff;
+
+ if (!clusters_inited || !workload_detect)
+ return NOTIFY_OK;
+
+ cur_iowait = get_cpu_iowait_time_us(cpu, &now);
+ if (cur_iowait >= cpu_st->last_iowait)
+ iowait_diff = cur_iowait - cpu_st->last_iowait;
+ else
+ iowait_diff = 0;
+
+ if (now > cpu_st->last_wallclock)
+ time_diff = now - cpu_st->last_wallclock;
+ else
+ return NOTIFY_OK;
+
+ if (iowait_diff <= time_diff) {
+ iowait_diff *= 100;
+ cpu_st->last_iopercent = div64_u64(iowait_diff, time_diff);
+ } else {
+ cpu_st->last_iopercent = 100;
+ }
+
+ cpu_st->last_wallclock = now;
+ cpu_st->last_iowait = cur_iowait;
+ cpu_st->cpu_load = gov_info->load;
+
+ /*
+ * Avoid deadlock in case governor notifier ran in the context
+ * of notify_work thread
+ */
+ if (current == notify_thread)
+ return NOTIFY_OK;
+
+ check_workload_stats(cpu, gov_info->sampling_rate_us, now);
+
+ return NOTIFY_OK;
+}
+
+static int perf_cputrans_notify(struct notifier_block *nb, unsigned long val,
+ void *data)
+{
+ struct cpufreq_freqs *freq = data;
+ unsigned int cpu = freq->cpu;
+ unsigned long flags;
+ unsigned int i;
+ struct cluster *cl = NULL;
+ struct load_stats *cpu_st = &per_cpu(cpu_load_stats, cpu);
+
+ if (!clusters_inited || !workload_detect)
+ return NOTIFY_OK;
+ for (i = 0; i < num_clusters; i++) {
+ if (cpumask_test_cpu(cpu, managed_clusters[i]->cpus)) {
+ cl = managed_clusters[i];
+ break;
+ }
+ }
+ if (cl == NULL)
+ return NOTIFY_OK;
+ if (val == CPUFREQ_POSTCHANGE) {
+ spin_lock_irqsave(&cl->perf_cl_peak_lock, flags);
+ cpu_st->freq = freq->new;
+ spin_unlock_irqrestore(&cl->perf_cl_peak_lock, flags);
+ }
+
+ /*
+ * Avoid deadlock in case governor notifier ran in the context
+ * of notify_work thread
+ */
+ if (current == notify_thread)
+ return NOTIFY_OK;
+ return NOTIFY_OK;
+}
+
+static struct notifier_block perf_govinfo_nb = {
+ .notifier_call = perf_govinfo_notify,
+};
+
+static struct notifier_block perf_cputransitions_nb = {
+ .notifier_call = perf_cputrans_notify,
+};
+
+static void single_mod_exit_timer(unsigned long data)
+{
+ int i;
+ struct cluster *i_cl = NULL;
+ unsigned long flags;
+
+ if (!clusters_inited)
+ return;
+
+ for (i = 0; i < num_clusters; i++) {
+ if (cpumask_test_cpu(data,
+ managed_clusters[i]->cpus)) {
+ i_cl = managed_clusters[i];
+ break;
+ }
+ }
+
+ if (i_cl == NULL)
+ return;
+
+ spin_lock_irqsave(&i_cl->mode_lock, flags);
+ if (i_cl->mode & SINGLE) {
+ /* Disable SINGLE mode and exit since the timer expired */
+ i_cl->mode = i_cl->mode & ~SINGLE;
+ i_cl->single_enter_cycle_cnt = 0;
+ i_cl->single_exit_cycle_cnt = 0;
+ trace_single_mode_timeout(cpumask_first(i_cl->cpus),
+ i_cl->single_enter_cycles, i_cl->single_enter_cycle_cnt,
+ i_cl->single_exit_cycles, i_cl->single_exit_cycle_cnt,
+ i_cl->multi_enter_cycles, i_cl->multi_enter_cycle_cnt,
+ i_cl->multi_exit_cycles, i_cl->multi_exit_cycle_cnt,
+ i_cl->timer_rate, i_cl->mode);
+ }
+ spin_unlock_irqrestore(&i_cl->mode_lock, flags);
+ wake_up_process(notify_thread);
+}
+
+static void perf_cl_peak_mod_exit_timer(unsigned long data)
+{
+ int i;
+ struct cluster *i_cl = NULL;
+ unsigned long flags;
+
+ if (!clusters_inited)
+ return;
+
+ for (i = 0; i < num_clusters; i++) {
+ if (cpumask_test_cpu(data,
+ managed_clusters[i]->cpus)) {
+ i_cl = managed_clusters[i];
+ break;
+ }
+ }
+
+ if (i_cl == NULL)
+ return;
+
+ spin_lock_irqsave(&i_cl->perf_cl_peak_lock, flags);
+ if (i_cl->perf_cl_peak & PERF_CL_PEAK) {
+ /* Disable PERF_CL_PEAK mode and exit since the timer expired */
+ i_cl->perf_cl_peak = i_cl->perf_cl_peak & ~PERF_CL_PEAK;
+ i_cl->perf_cl_peak_enter_cycle_cnt = 0;
+ i_cl->perf_cl_peak_exit_cycle_cnt = 0;
+ }
+ spin_unlock_irqrestore(&i_cl->perf_cl_peak_lock, flags);
+ wake_up_process(notify_thread);
+}
+
+/* CPU Hotplug */
+static struct kobject *events_kobj;
+
+static ssize_t show_cpu_hotplug(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "\n");
+}
+static struct kobj_attribute cpu_hotplug_attr =
+__ATTR(cpu_hotplug, 0444, show_cpu_hotplug, NULL);
+
+static struct attribute *events_attrs[] = {
+ &cpu_hotplug_attr.attr,
+ NULL,
+};
+
+static struct attribute_group events_attr_group = {
+ .attrs = events_attrs,
+};
+/*******************************sysfs ends************************************/
+
+static unsigned int num_online_managed(struct cpumask *mask)
+{
+ struct cpumask tmp_mask;
+
+ cpumask_clear(&tmp_mask);
+ cpumask_and(&tmp_mask, mask, cpu_online_mask);
+
+ return cpumask_weight(&tmp_mask);
+}
+
+static int perf_adjust_notify(struct notifier_block *nb, unsigned long val,
+ void *data)
+{
+ struct cpufreq_policy *policy = data;
+ unsigned int cpu = policy->cpu;
+ struct cpu_status *cpu_st = &per_cpu(cpu_stats, cpu);
+ unsigned int min = cpu_st->min, max = cpu_st->max;
+
+
+ if (val != CPUFREQ_ADJUST)
+ return NOTIFY_OK;
+
+ pr_debug("msm_perf: CPU%u policy before: %u:%u kHz\n", cpu,
+ policy->min, policy->max);
+ pr_debug("msm_perf: CPU%u seting min:max %u:%u kHz\n", cpu, min, max);
+
+ cpufreq_verify_within_limits(policy, min, max);
+
+ pr_debug("msm_perf: CPU%u policy after: %u:%u kHz\n", cpu,
+ policy->min, policy->max);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block perf_cpufreq_nb = {
+ .notifier_call = perf_adjust_notify,
+};
+
+static void hotplug_notify(int action)
+{
+ unsigned long flags;
+
+ if (!events_group.init_success)
+ return;
+
+ if ((action == CPU_ONLINE) || (action == CPU_DEAD)) {
+ spin_lock_irqsave(&(events_group.cpu_hotplug_lock), flags);
+ events_group.cpu_hotplug = true;
+ spin_unlock_irqrestore(&(events_group.cpu_hotplug_lock), flags);
+ wake_up_process(events_notify_thread);
+ }
+}
+
+static int events_notify_userspace(void *data)
+{
+ unsigned long flags;
+ bool notify_change;
+
+ while (1) {
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_lock_irqsave(&(events_group.cpu_hotplug_lock), flags);
+
+ if (!events_group.cpu_hotplug) {
+ spin_unlock_irqrestore(&(events_group.cpu_hotplug_lock),
+ flags);
+
+ schedule();
+ if (kthread_should_stop())
+ break;
+ spin_lock_irqsave(&(events_group.cpu_hotplug_lock),
+ flags);
+ }
+
+ set_current_state(TASK_RUNNING);
+ notify_change = events_group.cpu_hotplug;
+ events_group.cpu_hotplug = false;
+ spin_unlock_irqrestore(&(events_group.cpu_hotplug_lock), flags);
+
+ if (notify_change)
+ sysfs_notify(events_kobj, NULL, "cpu_hotplug");
+ }
+
+ return 0;
+}
+
+/*
+ * Attempt to offline CPUs based on their power cost.
+ * CPUs with higher power costs are offlined first.
+ */
+static int __ref rm_high_pwr_cost_cpus(struct cluster *cl)
+{
+ unsigned int cpu, i;
+ struct cpu_pwr_stats *per_cpu_info = get_cpu_pwr_stats();
+ struct cpu_pstate_pwr *costs;
+ unsigned int *pcpu_pwr;
+ unsigned int max_cost_cpu, max_cost;
+ int any_cpu = -1;
+
+ if (!per_cpu_info)
+ return -EAGAIN;
+
+ for_each_cpu(cpu, cl->cpus) {
+ costs = per_cpu_info[cpu].ptable;
+ if (!costs || !costs[0].freq)
+ continue;
+
+ i = 1;
+ while (costs[i].freq)
+ i++;
+
+ pcpu_pwr = &per_cpu(cpu_power_cost, cpu);
+ *pcpu_pwr = costs[i - 1].power;
+ any_cpu = (int)cpu;
+ pr_debug("msm_perf: CPU:%d Power:%u\n", cpu, *pcpu_pwr);
+ }
+
+ if (any_cpu < 0)
+ return -EAGAIN;
+
+ for (i = 0; i < cpumask_weight(cl->cpus); i++) {
+ max_cost = 0;
+ max_cost_cpu = cpumask_first(cl->cpus);
+
+ for_each_cpu(cpu, cl->cpus) {
+ pcpu_pwr = &per_cpu(cpu_power_cost, cpu);
+ if (max_cost < *pcpu_pwr) {
+ max_cost = *pcpu_pwr;
+ max_cost_cpu = cpu;
+ }
+ }
+
+ if (!cpu_online(max_cost_cpu))
+ goto end;
+
+ pr_debug("msm_perf: Offlining CPU%d Power:%d\n", max_cost_cpu,
+ max_cost);
+ cpumask_set_cpu(max_cost_cpu, cl->offlined_cpus);
+ lock_device_hotplug();
+ if (device_offline(get_cpu_device(max_cost_cpu))) {
+ cpumask_clear_cpu(max_cost_cpu, cl->offlined_cpus);
+ pr_debug("msm_perf: Offlining CPU%d failed\n",
+ max_cost_cpu);
+ }
+ unlock_device_hotplug();
+
+end:
+ pcpu_pwr = &per_cpu(cpu_power_cost, max_cost_cpu);
+ *pcpu_pwr = 0;
+ if (num_online_managed(cl->cpus) <= cl->max_cpu_request)
+ break;
+ }
+
+ if (num_online_managed(cl->cpus) > cl->max_cpu_request)
+ return -EAGAIN;
+ else
+ return 0;
+}
+
+/*
+ * try_hotplug tries to online/offline cores based on the current requirement.
+ * It loops through the currently managed CPUs and tries to online/offline
+ * them until the max_cpu_request criteria is met.
+ */
+static void __ref try_hotplug(struct cluster *data)
+{
+ unsigned int i;
+
+ if (!clusters_inited)
+ return;
+
+ pr_debug("msm_perf: Trying hotplug...%d:%d\n",
+ num_online_managed(data->cpus), num_online_cpus());
+
+ mutex_lock(&managed_cpus_lock);
+ if (num_online_managed(data->cpus) > data->max_cpu_request) {
+ if (!rm_high_pwr_cost_cpus(data)) {
+ mutex_unlock(&managed_cpus_lock);
+ return;
+ }
+
+ /*
+ * If power aware offlining fails due to power cost info
+ * being unavaiable fall back to original implementation
+ */
+ for (i = num_present_cpus() - 1; i >= 0 &&
+ i < num_present_cpus(); i--) {
+ if (!cpumask_test_cpu(i, data->cpus) || !cpu_online(i))
+ continue;
+
+ pr_debug("msm_perf: Offlining CPU%d\n", i);
+ cpumask_set_cpu(i, data->offlined_cpus);
+ lock_device_hotplug();
+ if (device_offline(get_cpu_device(i))) {
+ cpumask_clear_cpu(i, data->offlined_cpus);
+ pr_debug("msm_perf: Offlining CPU%d failed\n",
+ i);
+ unlock_device_hotplug();
+ continue;
+ }
+ unlock_device_hotplug();
+ if (num_online_managed(data->cpus) <=
+ data->max_cpu_request)
+ break;
+ }
+ } else {
+ for_each_cpu(i, data->cpus) {
+ if (cpu_online(i))
+ continue;
+ pr_debug("msm_perf: Onlining CPU%d\n", i);
+ lock_device_hotplug();
+ if (device_online(get_cpu_device(i))) {
+ pr_debug("msm_perf: Onlining CPU%d failed\n",
+ i);
+ unlock_device_hotplug();
+ continue;
+ }
+ unlock_device_hotplug();
+ cpumask_clear_cpu(i, data->offlined_cpus);
+ if (num_online_managed(data->cpus) >=
+ data->max_cpu_request)
+ break;
+ }
+ }
+ mutex_unlock(&managed_cpus_lock);
+}
+
+static void __ref release_cluster_control(struct cpumask *off_cpus)
+{
+ int cpu;
+
+ for_each_cpu(cpu, off_cpus) {
+ pr_debug("msm_perf: Release CPU %d\n", cpu);
+ lock_device_hotplug();
+ if (!device_online(get_cpu_device(cpu)))
+ cpumask_clear_cpu(cpu, off_cpus);
+ unlock_device_hotplug();
+ }
+}
+
+/* Work to evaluate current online CPU status and hotplug CPUs as per need */
+static void check_cluster_status(struct work_struct *work)
+{
+ int i;
+ struct cluster *i_cl;
+
+ for (i = 0; i < num_clusters; i++) {
+ i_cl = managed_clusters[i];
+
+ if (cpumask_empty(i_cl->cpus))
+ continue;
+
+ if (i_cl->max_cpu_request < 0) {
+ if (!cpumask_empty(i_cl->offlined_cpus))
+ release_cluster_control(i_cl->offlined_cpus);
+ continue;
+ }
+
+ if (num_online_managed(i_cl->cpus) !=
+ i_cl->max_cpu_request)
+ try_hotplug(i_cl);
+ }
+}
+
+static int __ref msm_performance_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ uint32_t cpu = (uintptr_t)hcpu;
+ unsigned int i;
+ struct cluster *i_cl = NULL;
+
+ hotplug_notify(action);
+
+ if (!clusters_inited)
+ return NOTIFY_OK;
+
+ for (i = 0; i < num_clusters; i++) {
+ if (managed_clusters[i]->cpus == NULL)
+ return NOTIFY_OK;
+ if (cpumask_test_cpu(cpu, managed_clusters[i]->cpus)) {
+ i_cl = managed_clusters[i];
+ break;
+ }
+ }
+
+ if (i_cl == NULL)
+ return NOTIFY_OK;
+
+ if (action == CPU_UP_PREPARE || action == CPU_UP_PREPARE_FROZEN) {
+ /*
+ * Prevent onlining of a managed CPU if max_cpu criteria is
+ * already satisfied
+ */
+ if (i_cl->offlined_cpus == NULL)
+ return NOTIFY_OK;
+ if (i_cl->max_cpu_request <=
+ num_online_managed(i_cl->cpus)) {
+ pr_debug("msm_perf: Prevent CPU%d onlining\n", cpu);
+ cpumask_set_cpu(cpu, i_cl->offlined_cpus);
+ return NOTIFY_BAD;
+ }
+ cpumask_clear_cpu(cpu, i_cl->offlined_cpus);
+
+ } else if (action == CPU_DEAD) {
+ if (i_cl->offlined_cpus == NULL)
+ return NOTIFY_OK;
+ if (cpumask_test_cpu(cpu, i_cl->offlined_cpus))
+ return NOTIFY_OK;
+ /*
+ * Schedule a re-evaluation to check if any more CPUs can be
+ * brought online to meet the max_cpu_request requirement. This
+ * work is delayed to account for CPU hotplug latencies
+ */
+ if (schedule_delayed_work(&evaluate_hotplug_work, 0)) {
+ trace_reevaluate_hotplug(cpumask_bits(i_cl->cpus)[0],
+ i_cl->max_cpu_request);
+ pr_debug("msm_perf: Re-evaluation scheduled %d\n", cpu);
+ } else {
+ pr_debug("msm_perf: Work scheduling failed %d\n", cpu);
+ }
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block __refdata msm_performance_cpu_notifier = {
+ .notifier_call = msm_performance_cpu_callback,
+};
+
+static int init_cluster_control(void)
+{
+ unsigned int i;
+ int ret = 0;
+
+ struct kobject *module_kobj;
+
+ managed_clusters = kcalloc(num_clusters, sizeof(struct cluster *),
+ GFP_KERNEL);
+ if (!managed_clusters)
+ return -ENOMEM;
+ for (i = 0; i < num_clusters; i++) {
+ managed_clusters[i] = kcalloc(1, sizeof(struct cluster),
+ GFP_KERNEL);
+ if (!managed_clusters[i]) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ if (!alloc_cpumask_var(&managed_clusters[i]->cpus,
+ GFP_KERNEL)) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ if (!alloc_cpumask_var(&managed_clusters[i]->offlined_cpus,
+ GFP_KERNEL)) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ managed_clusters[i]->max_cpu_request = -1;
+ managed_clusters[i]->single_enter_load = DEF_SINGLE_ENT;
+ managed_clusters[i]->single_exit_load = DEF_SINGLE_EX;
+ managed_clusters[i]->single_enter_cycles
+ = DEF_SINGLE_ENTER_CYCLE;
+ managed_clusters[i]->single_exit_cycles
+ = DEF_SINGLE_EXIT_CYCLE;
+ managed_clusters[i]->pcpu_multi_enter_load
+ = DEF_PCPU_MULTI_ENT;
+ managed_clusters[i]->pcpu_multi_exit_load = DEF_PCPU_MULTI_EX;
+ managed_clusters[i]->multi_enter_cycles = DEF_MULTI_ENTER_CYCLE;
+ managed_clusters[i]->multi_exit_cycles = DEF_MULTI_EXIT_CYCLE;
+ managed_clusters[i]->perf_cl_peak_enter_load =
+ DEF_PERF_CL_PEAK_ENT;
+ managed_clusters[i]->perf_cl_peak_exit_load =
+ DEF_PERF_CL_PEAK_EX;
+ managed_clusters[i]->perf_cl_peak_enter_cycles =
+ DEF_PERF_CL_PEAK_ENTER_CYCLE;
+ managed_clusters[i]->perf_cl_peak_exit_cycles =
+ DEF_PERF_CL_PEAK_EXIT_CYCLE;
+
+ /* Initialize trigger threshold */
+ thr.perf_cl_trigger_threshold = CLUSTER_1_THRESHOLD_FREQ;
+ thr.pwr_cl_trigger_threshold = CLUSTER_0_THRESHOLD_FREQ;
+ thr.ip_evt_threshold = INPUT_EVENT_CNT_THRESHOLD;
+ spin_lock_init(&(managed_clusters[i]->iowait_lock));
+ spin_lock_init(&(managed_clusters[i]->mode_lock));
+ spin_lock_init(&(managed_clusters[i]->timer_lock));
+ spin_lock_init(&(managed_clusters[i]->perf_cl_peak_lock));
+ init_timer(&managed_clusters[i]->mode_exit_timer);
+ managed_clusters[i]->mode_exit_timer.function =
+ single_mod_exit_timer;
+ init_timer(&managed_clusters[i]->perf_cl_peak_mode_exit_timer);
+ managed_clusters[i]->perf_cl_peak_mode_exit_timer.function =
+ perf_cl_peak_mod_exit_timer;
+ }
+
+ INIT_DELAYED_WORK(&evaluate_hotplug_work, check_cluster_status);
+ mutex_init(&managed_cpus_lock);
+
+ ip_evts = kcalloc(1, sizeof(struct input_events), GFP_KERNEL);
+ if (!ip_evts) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
+ if (!module_kobj) {
+ pr_err("msm_perf: Couldn't find module kobject\n");
+ ret = -ENOENT;
+ goto error;
+ }
+ mode_kobj = kobject_create_and_add("workload_modes", module_kobj);
+ if (!mode_kobj) {
+ pr_err("msm_perf: Failed to add mode_kobj\n");
+ ret = -ENOMEM;
+ kobject_put(module_kobj);
+ goto error;
+ }
+ ret = sysfs_create_group(mode_kobj, &attr_group);
+ if (ret) {
+ pr_err("msm_perf: Failed to create sysfs\n");
+ kobject_put(module_kobj);
+ kobject_put(mode_kobj);
+ goto error;
+ }
+ notify_thread = kthread_run(notify_userspace, NULL, "wrkld_notify");
+
+ clusters_inited = true;
+
+ return 0;
+
+error:
+ for (i = 0; i < num_clusters; i++) {
+ if (!managed_clusters[i])
+ break;
+ if (managed_clusters[i]->offlined_cpus)
+ free_cpumask_var(managed_clusters[i]->offlined_cpus);
+ if (managed_clusters[i]->cpus)
+ free_cpumask_var(managed_clusters[i]->cpus);
+ kfree(managed_clusters[i]);
+ }
+ kfree(managed_clusters);
+ return ret;
+}
+
+static int init_events_group(void)
+{
+ int ret;
+ struct kobject *module_kobj;
+
+ module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
+ if (!module_kobj) {
+ pr_err("msm_perf: Couldn't find module kobject\n");
+ return -ENOENT;
+ }
+
+ events_kobj = kobject_create_and_add("events", module_kobj);
+ if (!events_kobj) {
+ pr_err("msm_perf: Failed to add events_kobj\n");
+ return -ENOMEM;
+ }
+
+ ret = sysfs_create_group(events_kobj, &events_attr_group);
+ if (ret) {
+ pr_err("msm_perf: Failed to create sysfs\n");
+ return ret;
+ }
+
+ spin_lock_init(&(events_group.cpu_hotplug_lock));
+ events_notify_thread = kthread_run(events_notify_userspace,
+ NULL, "msm_perf:events_notify");
+ if (IS_ERR(events_notify_thread))
+ return PTR_ERR(events_notify_thread);
+
+ events_group.init_success = true;
+
+ return 0;
+}
+
+static int __init msm_performance_init(void)
+{
+ unsigned int cpu;
+
+ cpufreq_register_notifier(&perf_cpufreq_nb, CPUFREQ_POLICY_NOTIFIER);
+ cpufreq_register_notifier(&perf_govinfo_nb, CPUFREQ_GOVINFO_NOTIFIER);
+ cpufreq_register_notifier(&perf_cputransitions_nb,
+ CPUFREQ_TRANSITION_NOTIFIER);
+
+ for_each_present_cpu(cpu)
+ per_cpu(cpu_stats, cpu).max = UINT_MAX;
+
+ register_cpu_notifier(&msm_performance_cpu_notifier);
+
+ init_events_group();
+
+ return 0;
+}
+late_initcall(msm_performance_init);
diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c
index 79627d0..5ca0fe5 100644
--- a/drivers/soc/qcom/rpmh.c
+++ b/drivers/soc/qcom/rpmh.c
@@ -480,7 +480,7 @@
while (n[count++])
;
count--;
- if (!count || count >= RPMH_MAX_REQ_IN_BATCH)
+ if (!count || count > RPMH_MAX_REQ_IN_BATCH)
return -EINVAL;
if (state == RPMH_ACTIVE_ONLY_STATE || state == RPMH_AWAKE_STATE) {
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index db12900..8cc77c1 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -16,6 +16,7 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_platform.h>
#include <linux/pm_runtime.h>
#include <linux/qcom-geni-se.h>
#include <linux/spi/spi.h>
@@ -78,6 +79,8 @@
#define TIMESTAMP_AFTER (3)
#define POST_CMD_DELAY (4)
+#define SPI_CORE2X_VOTE (10000)
+
struct spi_geni_master {
struct se_geni_rsc spi_rsc;
resource_size_t phys_addr;
@@ -96,6 +99,7 @@
unsigned int rx_rem_bytes;
struct spi_transfer *cur_xfer;
struct completion xfer_done;
+ struct device *wrapper_dev;
};
static struct spi_master *get_spi_master(struct device *dev)
@@ -243,8 +247,8 @@
dev_err(mas->dev, "Invalid proto %d\n", proto);
return -ENXIO;
}
- geni_se_init(mas->base, FIFO_MODE, 0x0,
- (mas->tx_fifo_depth - 2));
+ geni_se_init(mas->base, 0x0, (mas->tx_fifo_depth - 2));
+ geni_se_select_mode(mas->base, FIFO_MODE);
mas->tx_fifo_depth = get_tx_fifo_depth(mas->base);
mas->rx_fifo_depth = get_rx_fifo_depth(mas->base);
mas->tx_fifo_width = get_tx_fifo_width(mas->base);
@@ -476,6 +480,8 @@
struct spi_geni_master *geni_mas;
struct se_geni_rsc *rsc;
struct resource *res;
+ struct platform_device *wrapper_pdev;
+ struct device_node *wrapper_ph_node;
spi = spi_alloc_master(&pdev->dev, sizeof(struct spi_geni_master));
if (!spi) {
@@ -489,6 +495,29 @@
rsc = &geni_mas->spi_rsc;
geni_mas->dev = &pdev->dev;
spi->dev.of_node = pdev->dev.of_node;
+ wrapper_ph_node = of_parse_phandle(pdev->dev.of_node,
+ "qcom,wrapper-core", 0);
+ if (IS_ERR_OR_NULL(wrapper_ph_node)) {
+ ret = PTR_ERR(wrapper_ph_node);
+ dev_err(&pdev->dev, "No wrapper core defined\n");
+ goto spi_geni_probe_err;
+ }
+ wrapper_pdev = of_find_device_by_node(wrapper_ph_node);
+ of_node_put(wrapper_ph_node);
+ if (IS_ERR_OR_NULL(wrapper_pdev)) {
+ ret = PTR_ERR(wrapper_pdev);
+ dev_err(&pdev->dev, "Cannot retrieve wrapper device\n");
+ goto spi_geni_probe_err;
+ }
+ geni_mas->wrapper_dev = &wrapper_pdev->dev;
+ geni_mas->spi_rsc.wrapper_dev = &wrapper_pdev->dev;
+ ret = geni_se_resources_init(rsc, SPI_CORE2X_VOTE,
+ (DEFAULT_SE_CLK * DEFAULT_BUS_WIDTH));
+ if (ret) {
+ dev_err(&pdev->dev, "Error geni_se_resources_init\n");
+ goto spi_geni_probe_err;
+ }
+
rsc->geni_pinctrl = devm_pinctrl_get(&pdev->dev);
if (IS_ERR_OR_NULL(rsc->geni_pinctrl)) {
dev_err(&pdev->dev, "No pinctrl config specified!\n");
diff --git a/drivers/tty/serial/msm_geni_serial.c b/drivers/tty/serial/msm_geni_serial.c
index 2ffd0df..8108da8 100644
--- a/drivers/tty/serial/msm_geni_serial.c
+++ b/drivers/tty/serial/msm_geni_serial.c
@@ -28,8 +28,6 @@
#include <linux/serial_core.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
-#include <linux/msm-bus.h>
-#include <linux/msm-bus-board.h>
/* UART specific GENI registers */
#define SE_UART_LOOPBACK_CFG (0x22C)
@@ -107,8 +105,6 @@
#define DEF_TX_WM (2)
#define DEF_FIFO_WIDTH_BITS (32)
#define UART_CORE2X_VOTE (10000)
-#define DEFAULT_SE_CLK (19200000)
-#define DEFAULT_BUS_WIDTH (4)
#define WAKEBYTE_TIMEOUT_MSEC (2000)
#define IPC_LOG_PWR_PAGES (2)
@@ -138,6 +134,7 @@
unsigned int rx_fifo_wc,
unsigned int rx_last_byte_valid,
unsigned int rx_last);
+ struct device *wrapper_dev;
struct se_geni_rsc serial_rsc;
int loopback;
int wakeup_irq;
@@ -1035,18 +1032,24 @@
if (!uart_console(uport)) {
/* For now only assume FIFO mode. */
msm_port->xfer_mode = FIFO_MODE;
- ret = geni_se_init(uport->membase, msm_port->xfer_mode,
+ ret = geni_se_init(uport->membase,
msm_port->rx_wm, msm_port->rx_rfr);
if (ret) {
dev_err(uport->dev, "%s: Fail\n", __func__);
goto exit_portsetup;
}
+
+ ret = geni_se_select_mode(uport->membase, msm_port->xfer_mode);
+ if (ret)
+ goto exit_portsetup;
+
se_get_packing_config(8, 4, false, &cfg0, &cfg1);
geni_write_reg_nolog(cfg0, uport->membase,
SE_GENI_TX_PACKING_CFG0);
geni_write_reg_nolog(cfg1, uport->membase,
SE_GENI_TX_PACKING_CFG1);
}
+
msm_port->port_setup = true;
/*
* Ensure Port setup related IO completes before returning to
@@ -1435,8 +1438,9 @@
goto exit_geni_serial_earlyconsetup;
}
- geni_se_init(uport->membase, FIFO_MODE, (DEF_FIFO_DEPTH_WORDS >> 1),
- (DEF_FIFO_DEPTH_WORDS - 2));
+ geni_se_init(uport->membase, (DEF_FIFO_DEPTH_WORDS >> 1),
+ (DEF_FIFO_DEPTH_WORDS - 2));
+ geni_se_select_mode(uport->membase, FIFO_MODE);
/*
* Ignore Flow control.
* Disable Tx Parity.
@@ -1599,6 +1603,8 @@
struct uart_driver *drv;
const struct of_device_id *id;
bool is_console = false;
+ struct platform_device *wrapper_pdev;
+ struct device_node *wrapper_ph_node;
id = of_match_device(msm_geni_device_tbl, &pdev->dev);
if (id) {
@@ -1642,23 +1648,24 @@
uport->dev = &pdev->dev;
- if (!(of_property_read_u32(pdev->dev.of_node, "qcom,bus-mas",
- &dev_port->serial_rsc.bus_mas))) {
- dev_port->serial_rsc.bus_bw =
- msm_bus_scale_register(
- dev_port->serial_rsc.bus_mas,
- MSM_BUS_SLAVE_EBI_CH0,
- (char *)dev_name(&pdev->dev),
- false);
- if (IS_ERR_OR_NULL(dev_port->serial_rsc.bus_bw)) {
- ret = PTR_ERR(dev_port->serial_rsc.bus_bw);
- goto exit_geni_serial_probe;
- }
- dev_port->serial_rsc.ab = UART_CORE2X_VOTE;
- dev_port->serial_rsc.ib = DEFAULT_SE_CLK * DEFAULT_BUS_WIDTH;
- } else {
- dev_info(&pdev->dev, "No bus master specified\n");
+ wrapper_ph_node = of_parse_phandle(pdev->dev.of_node,
+ "qcom,wrapper-core", 0);
+ if (IS_ERR_OR_NULL(wrapper_ph_node)) {
+ ret = PTR_ERR(wrapper_ph_node);
+ goto exit_geni_serial_probe;
}
+ wrapper_pdev = of_find_device_by_node(wrapper_ph_node);
+ of_node_put(wrapper_ph_node);
+ if (IS_ERR_OR_NULL(wrapper_pdev)) {
+ ret = PTR_ERR(wrapper_pdev);
+ goto exit_geni_serial_probe;
+ }
+ dev_port->wrapper_dev = &wrapper_pdev->dev;
+ dev_port->serial_rsc.wrapper_dev = &wrapper_pdev->dev;
+ ret = geni_se_resources_init(&dev_port->serial_rsc, UART_CORE2X_VOTE,
+ (DEFAULT_SE_CLK * DEFAULT_BUS_WIDTH));
+ if (ret)
+ goto exit_geni_serial_probe;
if (of_property_read_u8(pdev->dev.of_node, "qcom,wakeup-byte",
&dev_port->wakeup_byte))
@@ -1777,7 +1784,6 @@
wakeup_source_trash(&port->geni_wake);
uart_remove_one_port(drv, &port->uport);
- msm_bus_scale_unregister(port->serial_rsc.bus_bw);
return 0;
}
@@ -1830,6 +1836,7 @@
if (uart_console(uport)) {
uart_suspend_port((struct uart_driver *)uport->private_data,
uport);
+ se_geni_resources_off(&port->serial_rsc);
} else {
if (!pm_runtime_status_suspended(dev)) {
dev_info(dev, "%s: Is still active\n", __func__);
diff --git a/include/linux/msm_gpi.h b/include/linux/msm_gpi.h
new file mode 100644
index 0000000..31eaf13
--- /dev/null
+++ b/include/linux/msm_gpi.h
@@ -0,0 +1,214 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_GPI_H_
+#define __MSM_GPI_H_
+
+struct __packed msm_gpi_tre {
+ u32 dword[4];
+};
+
+enum msm_gpi_tre_type {
+ MSM_GPI_TRE_INVALID = 0x00,
+ MSM_GPI_TRE_NOP = 0x01,
+ MSM_GPI_TRE_DMA_W_BUF = 0x10,
+ MSM_GPI_TRE_DMA_IMMEDIATE = 0x11,
+ MSM_GPI_TRE_DMA_W_SG_LIST = 0x12,
+ MSM_GPI_TRE_GO = 0x20,
+ MSM_GPI_TRE_CONFIG0 = 0x22,
+ MSM_GPI_TRE_CONFIG1 = 0x23,
+ MSM_GPI_TRE_CONFIG2 = 0x24,
+ MSM_GPI_TRE_CONFIG3 = 0x25,
+ MSM_GPI_TRE_LOCK = 0x30,
+ MSM_GPI_TRE_UNLOCK = 0x31,
+};
+
+#define MSM_GPI_TRE_TYPE(tre) ((tre->dword[3] >> 16) & 0xFF)
+
+/* DMA w. Buffer TRE */
+#define MSM_GPI_DMA_W_BUFFER_TRE_DWORD0(ptr) ((u32)ptr)
+#define MSM_GPI_DMA_W_BUFFER_TRE_DWORD1(ptr) ((u32)(ptr >> 32))
+#define MSM_GPI_DMA_W_BUFFER_TRE_DWORD2(length) (length & 0xFFFFFF)
+#define MSM_GPI_DMA_W_BUFFER_TRE_DWORD3(bei, ieot, ieob, ch) ((0x1 << 20) | \
+ (0x0 << 16) | (bei << 10) | (ieot << 9) | (ieob << 8) | ch)
+#define MSM_GPI_DMA_W_BUFFER_TRE_GET_LEN(tre) (tre->dword[2] & 0xFFFFFF)
+#define MSM_GPI_DMA_W_BUFFER_TRE_SET_LEN(tre, length) (tre->dword[2] = \
+ MSM_GPI_DMA_W_BUFFER_TRE_DWORD2(length))
+
+/* DMA Immediate TRE */
+#define MSM_GPI_DMA_IMMEDIATE_TRE_DWORD0(d3, d2, d1, d0) ((d3 << 24) | \
+ (d2 << 16) | (d1 << 8) | (d0))
+#define MSM_GPI_DMA_IMMEDIATE_TRE_DWORD1(d4, d5, d6, d7) ((d7 << 24) | \
+ (d6 << 16) | (d5 << 8) | (d4))
+#define MSM_GPI_DMA_IMMEDIATE_TRE_DWORD2(length) (length & 0xF)
+#define MSM_GPI_DMA_IMMEDIATE_TRE_DWORD3(bei, ieot, ieob, ch) ((0x1 << 20) | \
+ (0x1 << 16) | (bei << 10) | (ieot << 9) | (ieob << 8) | ch)
+#define MSM_GPI_DMA_IMMEDIATE_TRE_GET_LEN(tre) (tre->dword[2] & 0xF)
+
+/* DMA w. Scatter/Gather List TRE */
+#define MSM_GPI_SG_LIST_TRE_DWORD0(ptr) ((u32)ptr)
+#define MSM_GPI_SG_LIST_TRE_DWORD1(ptr) ((u32)(ptr >> 32))
+#define MSM_GPI_SG_LIST_TRE_DWORD2(length) (length & 0xFFFF)
+#define MSM_GPI_SG_LIST_TRE_DWORD3(bei, ieot, ieob, ch) ((0x1 << 20) | \
+ (0x2 << 16) | (bei << 10) | (ieot << 9) | (ieob << 8) | ch)
+
+/* SG Element */
+#define MSM_GPI_SG_ELEMENT_DWORD0(ptr) ((u32)ptr)
+#define MSM_GPI_SG_ELEMENT_DWORD1(ptr) ((u32)(ptr >> 32))
+#define MSM_GSI_SG_ELEMENT_DWORD2(length) (length & 0xFFFFF)
+#define MSM_GSI_SG_ELEMENT_DWORD3 (0)
+
+/* Config2 TRE */
+#define GPI_CONFIG2_TRE_DWORD0(gr, txp) ((gr << 20) | (txp))
+#define GPI_CONFIG2_TRE_DWORD1(txp) (txp)
+#define GPI_CONFIG2_TRE_DWORD2 (0)
+#define GPI_CONFIG2_TRE_DWORD3(bei, ieot, ieob, ch) ((0x2 << 20) | \
+ (0x4 << 16) | (bei << 10) | (ieot << 9) | (ieob << 8) | ch)
+
+/* Config3 TRE */
+#define GPI_CONFIG3_TRE_DWORD0(rxp) (rxp)
+#define GPI_CONFIG3_TRE_DWORD1(rxp) (rxp)
+#define GPI_CONFIG3_TRE_DWORD2 (0)
+#define GPI_CONFIG3_TRE_DWORD3(bei, ieot, ieob, ch) ((0x2 << 20) | \
+ (0x5 << 16) | (bei << 10) | (ieot << 9) | (ieob << 8) | ch)
+
+/* SPI Go TRE */
+#define MSM_GPI_SPI_GO_TRE_DWORD0(flags, cs, command) ((flags << 24) | \
+ (cs << 8) | command)
+#define MSM_GPI_SPI_GO_TRE_DWORD1 (0)
+#define MSM_GPI_SPI_GO_TRE_DWORD2(rx_len) (rx_len)
+#define MSM_GPI_SPI_GO_TRE_DWORD3(bei, ieot, ieob, ch) ((0x2 << 20) | \
+ (0x0 << 16) | (bei << 10) | (ieot << 9) | (ieob << 8) | ch)
+
+/* SPI Config0 TRE */
+#define MSM_GPI_SPI_CONFIG0_TRE_DWORD0(pack, flags, word_size) ((pack << 24) | \
+ (flags << 8) | word_size)
+#define MSM_GPI_SPI_CONFIG0_TRE_DWORD1(it_del, cs_clk_del, iw_del) \
+ ((it_del << 16) | (cs_clk_del << 8) | iw_del)
+#define MSM_GPI_SPI_CONFIG0_TRE_DWORD2(clk_src, clk_div) ((clk_src << 16) | \
+ clk_div)
+#define MSM_GPI_SPI_CONFIG0_TRE_DWORD3(bei, ieot, ieob, ch) ((0x2 << 20) | \
+ (0x2 << 16) | (bei << 10) | (ieot << 9) | (ieob << 8) | ch)
+
+/* UART Go TRE */
+#define MSM_GPI_UART_GO_TRE_DWORD0(en_hunt, command) ((en_hunt << 8) | command)
+#define MSM_GPI_UART_GO_TRE_DWORD1 (0)
+#define MSM_GPI_UART_GO_TRE_DWORD2 (0)
+#define MSM_GPI_UART_GO_TRE_DWORD3(bei, ieot, ieob, ch) ((0x2 << 20) | \
+ (0x0 << 16) | (bei << 10) | (ieot << 9) | (ieob << 8) | ch)
+
+/* UART Config0 TRE */
+#define MSM_GPI_UART_CONFIG0_TRE_DWORD0(pack, hunt, flags, parity, sbl, size) \
+ ((pack << 24) | (hunt << 16) | (flags << 8) | (parity << 5) | \
+ (sbl << 3) | size)
+#define MSM_GPI_UART_CONFIG0_TRE_DWORD1(rfr_level, rx_stale) \
+ ((rfr_level << 24) | rx_stale)
+#define MSM_GPI_UART_CONFIG0_TRE_DWORD2(clk_source, clk_div) \
+ ((clk_source << 16) | clk_div)
+#define MSM_GPI_UART_CONFIG0_TRE_DWORD3(bei, ieot, ieob, ch) ((0x2 << 20) | \
+ (0x2 << 16) | (bei << 10) | (ieot << 9) | (ieob << 8) | ch)
+
+/* I2C GO TRE */
+#define MSM_GPI_I2C_GO_TRE_DWORD0(flags, slave, opcode) \
+ ((flags << 24) | (slave << 8) | opcode)
+#define MSM_GPI_I2C_GO_TRE_DWORD1 (0)
+#define MSM_GPI_I2C_GO_TRE_DWORD2(rx_len) (rx_len)
+#define MSM_GPI_I2C_GO_TRE_DWORD3(bei, ieot, ieob, ch) ((0x2 << 20) | \
+ (0x0 << 16) | (bei << 10) | (ieot << 9) | (ieob << 8) | ch)
+
+/* I2C Config0 TRE */
+#define MSM_GPI_I2C_CONFIG0_TRE_DWORD0(pack, t_cycle, t_high, t_low) \
+ ((pack << 24) | (t_cycle << 16) | (t_high << 8) | t_low)
+#define MSM_GPI_I2C_CONFIG0_TRE_DWORD1(inter_delay, noise_rej) \
+ ((inter_delay << 16) | noise_rej)
+#define MSM_GPI_I2C_CONFIG0_TRE_DWORD2(clk_src, clk_div) \
+ ((clk_src << 16) | clk_div)
+#define MSM_GPI_I2C_CONFIG0_TRE_DWORD3(bei, ieot, ieob, ch) ((0x2 << 20) | \
+ (0x2 << 16) | (bei << 10) | (ieot << 9) | (ieob << 8) | ch)
+
+/* cmds to perform by using dmaengine_slave_config() */
+enum msm_gpi_ctrl_cmd {
+ MSM_GPI_INIT,
+ MSM_GPI_CMD_UART_SW_STALE,
+ MSM_GPI_CMD_UART_RFR_READY,
+ MSM_GPI_CMD_UART_RFR_NOT_READY,
+};
+
+enum msm_gpi_cb_event {
+ /* These events are hardware generated events */
+ MSM_GPI_QUP_NOTIFY,
+ MSM_GPI_QUP_ERROR, /* global error */
+ MSM_GPI_QUP_CH_ERROR, /* channel specific error */
+ MSM_GPI_QUP_FW_ERROR, /* unhandled error */
+ /* These events indicate a software bug */
+ MSM_GPI_QUP_PENDING_EVENT,
+ MSM_GPI_QUP_EOT_DESC_MISMATCH,
+ MSM_GPI_QUP_SW_ERROR,
+ MSM_GPI_QUP_MAX_EVENT,
+};
+
+struct msm_gpi_error_log {
+ u32 routine;
+ u32 type;
+ u32 error_code;
+};
+
+struct msm_gpi_cb {
+ enum msm_gpi_cb_event cb_event;
+ u64 status;
+ u64 timestamp;
+ u64 count;
+ struct msm_gpi_error_log error_log;
+};
+
+struct gpi_client_info {
+ /*
+ * memory for msm_gpi_cb is released after callback, clients shall
+ * save any required data for post processing after returning
+ * from callback
+ */
+ void (*callback)(struct dma_chan *chan,
+ struct msm_gpi_cb const *msm_gpi_cb,
+ void *cb_param);
+ void *cb_param;
+};
+
+/*
+ * control structure to config gpi dma engine via dmaengine_slave_config()
+ * dma_chan.private should point to msm_gpi_ctrl structure
+ */
+struct msm_gpi_ctrl {
+ enum msm_gpi_ctrl_cmd cmd;
+ union {
+ struct gpi_client_info init;
+ };
+};
+
+enum msm_gpi_tce_code {
+ MSM_GPI_TCE_SUCCESS = 1,
+ MSM_GPI_TCE_EOT = 2,
+ MSM_GPI_TCE_EOB = 4,
+ MSM_GPI_TCE_UNEXP_ERR = 16,
+};
+
+/*
+ * gpi specific callback parameters to pass between gpi client and gpi engine.
+ * client shall set async_desc.callback_parm to msm_gpi_dma_async_tx_cb_param
+ */
+struct msm_gpi_dma_async_tx_cb_param {
+ u32 length;
+ enum msm_gpi_tce_code completion_code; /* TCE event code */
+ u32 status;
+ void *userdata;
+};
+
+#endif
diff --git a/include/linux/qcom-geni-se.h b/include/linux/qcom-geni-se.h
index 12b3d51e8..657ac07 100644
--- a/include/linux/qcom-geni-se.h
+++ b/include/linux/qcom-geni-se.h
@@ -14,18 +14,22 @@
#ifndef _LINUX_QCOM_GENI_SE
#define _LINUX_QCOM_GENI_SE
-#include <linux/io.h>
#include <linux/clk.h>
+#include <linux/dma-direction.h>
+#include <linux/io.h>
+#include <linux/list.h>
#include <linux/msm-bus.h>
#include <linux/msm-bus-board.h>
-#include <linux/pm_runtime.h>
+/* Transfer mode supported by GENI Serial Engines */
enum se_xfer_mode {
INVALID,
FIFO_MODE,
GSI_DMA,
+ SE_DMA,
};
+/* Protocols supported by GENI Serial Engines */
enum se_protocol_types {
NONE,
SPI,
@@ -34,13 +38,28 @@
I3C
};
+/**
+ * struct geni_se_rsc - GENI Serial Engine Resource
+ * @wrapper_dev: Pointer to the parent QUPv3 core.
+ * @se_clk: Handle to the core serial engine clock.
+ * @m_ahb_clk: Handle to the primary AHB clock.
+ * @s_ahb_clk: Handle to the secondary AHB clock.
+ * @ab_list: List Head of Average bus banwidth list.
+ * @ab: Average bus bandwidth request value.
+ * @ib_list: List Head of Instantaneous bus banwidth list.
+ * @ib: Instantaneous bus bandwidth request value.
+ * @geni_pinctrl: Handle to the pinctrl configuration.
+ * @geni_gpio_active: Handle to the default/active pinctrl state.
+ * @geni_gpi_sleep: Handle to the sleep pinctrl state.
+ */
struct se_geni_rsc {
+ struct device *wrapper_dev;
struct clk *se_clk;
struct clk *m_ahb_clk;
struct clk *s_ahb_clk;
- struct msm_bus_client_handle *bus_bw;
- unsigned int bus_mas;
+ struct list_head ab_list;
unsigned long ab;
+ struct list_head ib_list;
unsigned long ib;
struct pinctrl *geni_pinctrl;
struct pinctrl_state *geni_gpio_active;
@@ -64,6 +83,7 @@
#define GENI_FW_REVISION_RO (0x68)
#define GENI_FW_S_REVISION_RO (0x6C)
#define SE_GENI_CLK_SEL (0x7C)
+#define SE_GENI_BYTE_GRAN (0x254)
#define SE_GENI_DMA_MODE_EN (0x258)
#define SE_GENI_TX_PACKING_CFG0 (0x260)
#define SE_GENI_TX_PACKING_CFG1 (0x264)
@@ -182,11 +202,11 @@
#define M_TX_FIFO_WR_ERR_EN (BIT(29))
#define M_TX_FIFO_WATERMARK_EN (BIT(30))
#define M_SEC_IRQ_EN (BIT(31))
-#define M_COMMON_GENI_M_IRQ_EN (GENMASK(3, 0) | M_TIMESTAMP_EN | \
- GENMASK(14, 8) | M_IO_DATA_DEASSERT_EN | \
+#define M_COMMON_GENI_M_IRQ_EN (GENMASK(6, 1) | \
+ M_IO_DATA_DEASSERT_EN | \
M_IO_DATA_ASSERT_EN | M_RX_FIFO_RD_ERR_EN | \
M_RX_FIFO_WR_ERR_EN | M_TX_FIFO_RD_ERR_EN | \
- M_TX_FIFO_WR_ERR_EN | M_SEC_IRQ_EN)
+ M_TX_FIFO_WR_ERR_EN)
/* GENI_S_IRQ_EN fields */
#define S_CMD_DONE_EN (BIT(0))
@@ -208,7 +228,7 @@
#define S_RX_FIFO_WR_ERR_EN (BIT(25))
#define S_RX_FIFO_WATERMARK_EN (BIT(26))
#define S_RX_FIFO_LAST_EN (BIT(27))
-#define S_COMMON_GENI_S_IRQ_EN (GENMASK(3, 0) | GENMASK(14, 8) | \
+#define S_COMMON_GENI_S_IRQ_EN (GENMASK(5, 1) | GENMASK(13, 9) | \
S_RX_FIFO_RD_ERR_EN | S_RX_FIFO_WR_ERR_EN)
/* GENI_/TX/RX/RX_RFR/_WATERMARK_REG fields */
@@ -261,304 +281,557 @@
#define RX_DMA_IRQ_DELAY_MSK (GENMASK(8, 6))
#define RX_DMA_IRQ_DELAY_SHFT (6)
+#define SE_DMA_TX_PTR_L (0xC30)
+#define SE_DMA_TX_PTR_H (0xC34)
+#define SE_DMA_TX_ATTR (0xC38)
+#define SE_DMA_TX_LEN (0xC3C)
+#define SE_DMA_TX_IRQ_STAT (0xC40)
+#define SE_DMA_TX_IRQ_CLR (0xC44)
+#define SE_DMA_TX_IRQ_EN (0xC48)
+#define SE_DMA_TX_IRQ_EN_SET (0xC4C)
+#define SE_DMA_TX_IRQ_EN_CLR (0xC50)
+#define SE_DMA_TX_LEN_IN (0xC54)
+#define SE_DMA_TX_FSM_RST (0xC58)
+#define SE_DMA_TX_MAX_BURST (0xC5C)
+
+#define SE_DMA_RX_PTR_L (0xD30)
+#define SE_DMA_RX_PTR_H (0xD34)
+#define SE_DMA_RX_ATTR (0xD38)
+#define SE_DMA_RX_LEN (0xD3C)
+#define SE_DMA_RX_IRQ_STAT (0xD40)
+#define SE_DMA_RX_IRQ_CLR (0xD44)
+#define SE_DMA_RX_IRQ_EN (0xD48)
+#define SE_DMA_RX_IRQ_EN_SET (0xD4C)
+#define SE_DMA_RX_IRQ_EN_CLR (0xD50)
+#define SE_DMA_RX_LEN_IN (0xD54)
+#define SE_DMA_RX_FSM_RST (0xD58)
+#define SE_DMA_RX_MAX_BURST (0xD5C)
+#define SE_DMA_RX_FLUSH (0xD60)
+
+#define DEFAULT_BUS_WIDTH (4)
+#define DEFAULT_SE_CLK (19200000)
+
+#define GENI_SE_ERR(log_ctx, print, dev, x...) do { \
+if (log_ctx) \
+ ipc_log_string(log_ctx, x); \
+if (print) { \
+ if (dev) \
+ dev_err((dev), x); \
+ else \
+ pr_err(x); \
+} \
+} while (0)
+
+#define GENI_SE_DBG(log_ctx, print, dev, x...) do { \
+if (log_ctx) \
+ ipc_log_string(log_ctx, x); \
+if (print) { \
+ if (dev) \
+ dev_dbg((dev), x); \
+ else \
+ pr_debug(x); \
+} \
+} while (0)
+
+
+#ifdef CONFIG_QCOM_GENI_SE
+/**
+ * geni_read_reg_nolog() - Helper function to read from a GENI register
+ * @base: Base address of the serial engine's register block.
+ * @offset: Offset within the serial engine's register block.
+ *
+ * Return: Return the contents of the register.
+ */
+unsigned int geni_read_reg_nolog(void __iomem *base, int offset);
+
+/**
+ * geni_write_reg_nolog() - Helper function to write into a GENI register
+ * @value: Value to be written into the register.
+ * @base: Base address of the serial engine's register block.
+ * @offset: Offset within the serial engine's register block.
+ */
+void geni_write_reg_nolog(unsigned int value, void __iomem *base, int offset);
+
+/**
+ * geni_read_reg() - Helper function to read from a GENI register
+ * @base: Base address of the serial engine's register block.
+ * @offset: Offset within the serial engine's register block.
+ *
+ * Return: Return the contents of the register.
+ */
+unsigned int geni_read_reg(void __iomem *base, int offset);
+
+/**
+ * geni_write_reg() - Helper function to write into a GENI register
+ * @value: Value to be written into the register.
+ * @base: Base address of the serial engine's register block.
+ * @offset: Offset within the serial engine's register block.
+ */
+void geni_write_reg(unsigned int value, void __iomem *base, int offset);
+
+/**
+ * get_se_proto() - Read the protocol configured for a serial engine
+ * @base: Base address of the serial engine's register block.
+ *
+ * Return: Protocol value as configured in the serial engine.
+ */
+int get_se_proto(void __iomem *base);
+
+/**
+ * geni_se_init() - Initialize the GENI Serial Engine
+ * @base: Base address of the serial engine's register block.
+ * @rx_wm: Receive watermark to be configured.
+ * @rx_rfr_wm: Ready-for-receive watermark to be configured.
+ *
+ * This function is used to initialize the GENI serial engine, configure
+ * the transfer mode, receive watermark and ready-for-receive watermarks.
+ *
+ * Return: 0 on success, standard Linux error codes on failure/error.
+ */
+int geni_se_init(void __iomem *base, unsigned int rx_wm, unsigned int rx_rfr);
+
+/**
+ * geni_se_select_mode() - Select the serial engine transfer mode
+ * @base: Base address of the serial engine's register block.
+ * @mode: Transfer mode to be selected.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+int geni_se_select_mode(void __iomem *base, int mode);
+
+/**
+ * geni_setup_m_cmd() - Setup the primary sequencer
+ * @base: Base address of the serial engine's register block.
+ * @cmd: Command/Operation to setup in the primary sequencer.
+ * @params: Parameter for the sequencer command.
+ *
+ * This function is used to configure the primary sequencer with the
+ * command and its assoicated parameters.
+ */
+void geni_setup_m_cmd(void __iomem *base, u32 cmd, u32 params);
+
+/**
+ * geni_setup_s_cmd() - Setup the secondary sequencer
+ * @base: Base address of the serial engine's register block.
+ * @cmd: Command/Operation to setup in the secondary sequencer.
+ * @params: Parameter for the sequencer command.
+ *
+ * This function is used to configure the secondary sequencer with the
+ * command and its assoicated parameters.
+ */
+void geni_setup_s_cmd(void __iomem *base, u32 cmd, u32 params);
+
+/**
+ * geni_cancel_m_cmd() - Cancel the command configured in the primary sequencer
+ * @base: Base address of the serial engine's register block.
+ *
+ * This function is used to cancel the currently configured command in the
+ * primary sequencer.
+ */
+void geni_cancel_m_cmd(void __iomem *base);
+
+/**
+ * geni_cancel_s_cmd() - Cancel the command configured in the secondary
+ * sequencer
+ * @base: Base address of the serial engine's register block.
+ *
+ * This function is used to cancel the currently configured command in the
+ * secondary sequencer.
+ */
+void geni_cancel_s_cmd(void __iomem *base);
+
+/**
+ * geni_abort_m_cmd() - Abort the command configured in the primary sequencer
+ * @base: Base address of the serial engine's register block.
+ *
+ * This function is used to force abort the currently configured command in the
+ * primary sequencer.
+ */
+void geni_abort_m_cmd(void __iomem *base);
+
+/**
+ * geni_abort_s_cmd() - Abort the command configured in the secondary
+ * sequencer
+ * @base: Base address of the serial engine's register block.
+ *
+ * This function is used to force abort the currently configured command in the
+ * secondary sequencer.
+ */
+void geni_abort_s_cmd(void __iomem *base);
+
+/**
+ * get_tx_fifo_depth() - Get the TX fifo depth of the serial engine
+ * @base: Base address of the serial engine's register block.
+ *
+ * This function is used to get the depth i.e. number of elements in the
+ * TX fifo of the serial engine.
+ *
+ * Return: TX fifo depth in units of FIFO words.
+ */
+int get_tx_fifo_depth(void __iomem *base);
+
+/**
+ * get_tx_fifo_width() - Get the TX fifo width of the serial engine
+ * @base: Base address of the serial engine's register block.
+ *
+ * This function is used to get the width i.e. word size per element in the
+ * TX fifo of the serial engine.
+ *
+ * Return: TX fifo width in bits.
+ */
+int get_tx_fifo_width(void __iomem *base);
+
+/**
+ * get_rx_fifo_depth() - Get the RX fifo depth of the serial engine
+ * @base: Base address of the serial engine's register block.
+ *
+ * This function is used to get the depth i.e. number of elements in the
+ * RX fifo of the serial engine.
+ *
+ * Return: RX fifo depth in units of FIFO words.
+ */
+int get_rx_fifo_depth(void __iomem *base);
+
+/**
+ * se_get_packing_config() - Get the packing configuration based on input
+ * @bpw: Bits of data per transfer word.
+ * @pack_words: Number of words per fifo element.
+ * @msb_to_lsb: Transfer from MSB to LSB or vice-versa.
+ * @cfg0: Output buffer to hold the first half of configuration.
+ * @cfg1: Output buffer to hold the second half of configuration.
+ *
+ * This function is used to calculate the packing configuration based on
+ * the input packing requirement and the configuration logic.
+ */
+void se_get_packing_config(int bpw, int pack_words, bool msb_to_lsb,
+ unsigned long *cfg0, unsigned long *cfg1);
+
+/**
+ * se_config_packing() - Packing configuration of the serial engine
+ * @base: Base address of the serial engine's register block.
+ * @bpw: Bits of data per transfer word.
+ * @pack_words: Number of words per fifo element.
+ * @msb_to_lsb: Transfer from MSB to LSB or vice-versa.
+ *
+ * This function is used to configure the packing rules for the current
+ * transfer.
+ */
+void se_config_packing(void __iomem *base, int bpw, int pack_words,
+ bool msb_to_lsb);
+
+/**
+ * se_geni_resources_off() - Turn off resources associated with the serial
+ * engine
+ * @rsc: Handle to resources associated with the serial engine.
+ *
+ * Return: 0 on success, standard Linux error codes on failure/error.
+ */
+int se_geni_resources_off(struct se_geni_rsc *rsc);
+
+/**
+ * se_geni_resources_on() - Turn on resources associated with the serial
+ * engine
+ * @rsc: Handle to resources associated with the serial engine.
+ *
+ * Return: 0 on success, standard Linux error codes on failure/error.
+ */
+int se_geni_resources_on(struct se_geni_rsc *rsc);
+
+/**
+ * geni_se_resources_init() - Init the SE resource structure
+ * @rsc: SE resource structure to be initialized.
+ * @ab: Initial Average bus bandwidth request value.
+ * @ib: Initial Instantaneous bus bandwidth request value.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+int geni_se_resources_init(struct se_geni_rsc *rsc,
+ unsigned long ab, unsigned long ib);
+
+/**
+ * geni_se_tx_dma_prep() - Prepare the Serial Engine for TX DMA transfer
+ * @wrapper_dev: QUPv3 Wrapper Device to which the TX buffer is mapped.
+ * @base: Base address of the SE register block.
+ * @tx_buf: Pointer to the TX buffer.
+ * @tx_len: Length of the TX buffer.
+ * @tx_dma: Pointer to store the mapped DMA address.
+ *
+ * This function is used to prepare the buffers for DMA TX.
+ *
+ * Return: 0 on success, standard Linux error codes on error/failure.
+ */
+int geni_se_tx_dma_prep(struct device *wrapper_dev, void __iomem *base,
+ void *tx_buf, int tx_len, dma_addr_t *tx_dma);
+
+/**
+ * geni_se_rx_dma_prep() - Prepare the Serial Engine for RX DMA transfer
+ * @wrapper_dev: QUPv3 Wrapper Device to which the TX buffer is mapped.
+ * @base: Base address of the SE register block.
+ * @rx_buf: Pointer to the RX buffer.
+ * @rx_len: Length of the RX buffer.
+ * @rx_dma: Pointer to store the mapped DMA address.
+ *
+ * This function is used to prepare the buffers for DMA RX.
+ *
+ * Return: 0 on success, standard Linux error codes on error/failure.
+ */
+int geni_se_rx_dma_prep(struct device *wrapper_dev, void __iomem *base,
+ void *rx_buf, int rx_len, dma_addr_t *rx_dma);
+
+/**
+ * geni_se_tx_dma_unprep() - Unprepare the Serial Engine after TX DMA transfer
+ * @wrapper_dev: QUPv3 Wrapper Device to which the TX buffer is mapped.
+ * @tx_dma: DMA address of the TX buffer.
+ * @tx_len: Length of the TX buffer.
+ *
+ * This function is used to unprepare the DMA buffers after DMA TX.
+ */
+void geni_se_tx_dma_unprep(struct device *wrapper_dev,
+ dma_addr_t tx_dma, int tx_len);
+
+/**
+ * geni_se_rx_dma_unprep() - Unprepare the Serial Engine after RX DMA transfer
+ * @wrapper_dev: QUPv3 Wrapper Device to which the TX buffer is mapped.
+ * @rx_dma: DMA address of the RX buffer.
+ * @rx_len: Length of the RX buffer.
+ *
+ * This function is used to unprepare the DMA buffers after DMA RX.
+ */
+void geni_se_rx_dma_unprep(struct device *wrapper_dev,
+ dma_addr_t rx_dma, int rx_len);
+
+/**
+ * geni_se_qupv3_hw_version() - Read the QUPv3 Hardware version
+ * @wrapper_dev: Pointer to the corresponding QUPv3 wrapper core.
+ * @major: Buffer for Major Version field.
+ * @minor: Buffer for Minor Version field.
+ * @step: Buffer for Step Version field.
+ *
+ * Return: 0 on success, standard Linux error codes on failure/error.
+ */
+int geni_se_qupv3_hw_version(struct device *wrapper_dev, unsigned int *major,
+ unsigned int *minor, unsigned int *step);
+
+/**
+ * geni_se_iommu_map_buf() - Map a single buffer into QUPv3 context bank
+ * @wrapper_dev: Pointer to the corresponding QUPv3 wrapper core.
+ * @iova: Pointer in which the mapped virtual address is stored.
+ * @buf: Address of the buffer that needs to be mapped.
+ * @size: Size of the buffer.
+ * @dir: Direction of the DMA transfer.
+ *
+ * This function is used to map an already allocated buffer into the
+ * QUPv3 context bank device space.
+ *
+ * Return: 0 on success, standard Linux error codes on failure/error.
+ */
+int geni_se_iommu_map_buf(struct device *wrapper_dev, dma_addr_t *iova,
+ void *buf, size_t size, enum dma_data_direction dir);
+
+/**
+ * geni_se_iommu_alloc_buf() - Allocate & map a single buffer into QUPv3
+ * context bank
+ * @wrapper_dev: Pointer to the corresponding QUPv3 wrapper core.
+ * @iova: Pointer in which the mapped virtual address is stored.
+ * @size: Size of the buffer.
+ *
+ * This function is used to allocate a buffer and map it into the
+ * QUPv3 context bank device space.
+ *
+ * Return: address of the buffer on success, NULL or ERR_PTR on
+ * failure/error.
+ */
+void *geni_se_iommu_alloc_buf(struct device *wrapper_dev, dma_addr_t *iova,
+ size_t size);
+
+/**
+ * geni_se_iommu_unmap_buf() - Unmap a single buffer from QUPv3 context bank
+ * @wrapper_dev: Pointer to the corresponding QUPv3 wrapper core.
+ * @iova: Pointer in which the mapped virtual address is stored.
+ * @size: Size of the buffer.
+ * @dir: Direction of the DMA transfer.
+ *
+ * This function is used to unmap an already mapped buffer from the
+ * QUPv3 context bank device space.
+ *
+ * Return: 0 on success, standard Linux error codes on failure/error.
+ */
+int geni_se_iommu_unmap_buf(struct device *wrapper_dev, dma_addr_t *iova,
+ size_t size, enum dma_data_direction dir);
+
+/**
+ * geni_se_iommu_free_buf() - Unmap & free a single buffer from QUPv3
+ * context bank
+ * @wrapper_dev: Pointer to the corresponding QUPv3 wrapper core.
+ * @iova: Pointer in which the mapped virtual address is stored.
+ * @buf: Address of the buffer.
+ * @size: Size of the buffer.
+ *
+ * This function is used to unmap and free a buffer from the
+ * QUPv3 context bank device space.
+ *
+ * Return: 0 on success, standard Linux error codes on failure/error.
+ */
+int geni_se_iommu_free_buf(struct device *wrapper_dev, dma_addr_t *iova,
+ void *buf, size_t size);
+
+#else
static inline unsigned int geni_read_reg_nolog(void __iomem *base, int offset)
{
- return readl_relaxed_no_log(base + offset);
+ return 0;
}
-static inline void geni_write_reg_nolog(unsigned int value, void __iomem *base,
- int offset)
+static inline void geni_write_reg_nolog(unsigned int value,
+ void __iomem *base, int offset)
{
- return writel_relaxed_no_log(value, (base + offset));
}
static inline unsigned int geni_read_reg(void __iomem *base, int offset)
{
- return readl_relaxed(base + offset);
+ return 0;
}
static inline void geni_write_reg(unsigned int value, void __iomem *base,
int offset)
{
- writel_relaxed(value, (base + offset));
}
static inline int get_se_proto(void __iomem *base)
{
- int proto = 0;
-
- proto = ((geni_read_reg(base, GENI_FW_REVISION_RO)
- & FW_REV_PROTOCOL_MSK) >> FW_REV_PROTOCOL_SHFT);
- return proto;
+ return -ENXIO;
}
-static inline int se_geni_irq_en(void __iomem *base, int mode)
-{
- int ret = 0;
- unsigned int common_geni_m_irq_en;
- unsigned int common_geni_s_irq_en;
- int proto = get_se_proto(base);
-
- common_geni_m_irq_en = geni_read_reg(base, SE_GENI_M_IRQ_EN);
- common_geni_s_irq_en = geni_read_reg(base, SE_GENI_S_IRQ_EN);
- /* Common to all modes */
- common_geni_m_irq_en |= M_COMMON_GENI_M_IRQ_EN;
- common_geni_s_irq_en |= S_COMMON_GENI_S_IRQ_EN;
-
- switch (mode) {
- case FIFO_MODE:
- {
- if (proto != UART) {
- common_geni_m_irq_en |=
- (M_CMD_DONE_EN | M_TX_FIFO_WATERMARK_EN |
- M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN);
- common_geni_s_irq_en |= S_CMD_DONE_EN;
- }
- break;
- }
- case GSI_DMA:
- break;
- default:
- pr_err("%s: Invalid mode %d\n", __func__, mode);
- ret = -ENXIO;
- goto exit_irq_en;
- }
-
-
- geni_write_reg(common_geni_m_irq_en, base, SE_GENI_M_IRQ_EN);
- geni_write_reg(common_geni_s_irq_en, base, SE_GENI_S_IRQ_EN);
-exit_irq_en:
- return ret;
-}
-
-
-static inline void se_set_rx_rfr_wm(void __iomem *base, unsigned int rx_wm,
- unsigned int rx_rfr)
-{
- geni_write_reg(rx_wm, base, SE_GENI_RX_WATERMARK_REG);
- geni_write_reg(rx_rfr, base, SE_GENI_RX_RFR_WATERMARK_REG);
-}
-
-static inline int se_io_set_mode(void __iomem *base, int mode)
-{
- int ret = 0;
- unsigned int io_mode = 0;
- unsigned int geni_dma_mode = 0;
- unsigned int gsi_event_en = 0;
-
- io_mode = geni_read_reg(base, SE_IRQ_EN);
- geni_dma_mode = geni_read_reg(base, SE_GENI_DMA_MODE_EN);
- gsi_event_en = geni_read_reg(base, SE_GSI_EVENT_EN);
-
- switch (mode) {
- case FIFO_MODE:
- {
- io_mode |= (GENI_M_IRQ_EN | GENI_S_IRQ_EN);
- io_mode |= (DMA_TX_IRQ_EN | DMA_RX_IRQ_EN);
- geni_dma_mode &= ~GENI_DMA_MODE_EN;
- gsi_event_en = 0;
- break;
-
- }
- case GSI_DMA:
- geni_dma_mode |= GENI_DMA_MODE_EN;
- io_mode &= ~(DMA_TX_IRQ_EN | DMA_RX_IRQ_EN);
- gsi_event_en |= (DMA_RX_EVENT_EN | DMA_TX_EVENT_EN |
- GENI_M_EVENT_EN | GENI_S_EVENT_EN);
- break;
- default:
- ret = -ENXIO;
- goto exit_set_mode;
- }
- geni_write_reg(io_mode, base, SE_IRQ_EN);
- geni_write_reg(geni_dma_mode, base, SE_GENI_DMA_MODE_EN);
- geni_write_reg(gsi_event_en, base, SE_GSI_EVENT_EN);
-exit_set_mode:
- return ret;
-}
-
-static inline void se_io_init(void __iomem *base)
-{
- unsigned int io_op_ctrl = 0;
- unsigned int geni_cgc_ctrl;
- unsigned int dma_general_cfg;
-
- geni_cgc_ctrl = geni_read_reg(base, GENI_CGC_CTRL);
- dma_general_cfg = geni_read_reg(base, SE_DMA_GENERAL_CFG);
- geni_cgc_ctrl |= DEFAULT_CGC_EN;
- dma_general_cfg |= (AHB_SEC_SLV_CLK_CGC_ON | DMA_AHB_SLV_CFG_ON |
- DMA_TX_CLK_CGC_ON | DMA_RX_CLK_CGC_ON);
- io_op_ctrl |= DEFAULT_IO_OUTPUT_CTRL_MSK;
- geni_write_reg(geni_cgc_ctrl, base, GENI_CGC_CTRL);
- geni_write_reg(dma_general_cfg, base, SE_DMA_GENERAL_CFG);
-
- geni_write_reg(io_op_ctrl, base, GENI_OUTPUT_CTRL);
- geni_write_reg(FORCE_DEFAULT, base, GENI_FORCE_DEFAULT_REG);
-}
-
-static inline int geni_se_init(void __iomem *base, int mode,
+static inline int geni_se_init(void __iomem *base,
unsigned int rx_wm, unsigned int rx_rfr)
{
- int ret = 0;
+ return -ENXIO;
+}
- se_io_init(base);
- ret = se_io_set_mode(base, mode);
- if (ret)
- goto exit_geni_se_init;
-
- se_set_rx_rfr_wm(base, rx_wm, rx_rfr);
- ret = se_geni_irq_en(base, mode);
- if (ret)
- goto exit_geni_se_init;
-
-exit_geni_se_init:
- return ret;
+static inline int geni_se_select_mode(void __iomem *base, int mode)
+{
+ return -ENXIO;
}
static inline void geni_setup_m_cmd(void __iomem *base, u32 cmd,
u32 params)
{
- u32 m_cmd = geni_read_reg(base, SE_GENI_M_CMD0);
-
- m_cmd &= ~(M_OPCODE_MSK | M_PARAMS_MSK);
- m_cmd |= (cmd << M_OPCODE_SHFT);
- m_cmd |= (params & M_PARAMS_MSK);
- geni_write_reg(m_cmd, base, SE_GENI_M_CMD0);
}
static inline void geni_setup_s_cmd(void __iomem *base, u32 cmd,
u32 params)
{
- u32 s_cmd = geni_read_reg(base, SE_GENI_S_CMD0);
-
- s_cmd &= ~(S_OPCODE_MSK | S_PARAMS_MSK);
- s_cmd |= (cmd << S_OPCODE_SHFT);
- s_cmd |= (params & S_PARAMS_MSK);
- geni_write_reg(s_cmd, base, SE_GENI_S_CMD0);
}
static inline void geni_cancel_m_cmd(void __iomem *base)
{
- geni_write_reg(M_GENI_CMD_CANCEL, base, SE_GENI_S_CMD_CTRL_REG);
}
static inline void geni_cancel_s_cmd(void __iomem *base)
{
- geni_write_reg(S_GENI_CMD_CANCEL, base, SE_GENI_S_CMD_CTRL_REG);
}
static inline void geni_abort_m_cmd(void __iomem *base)
{
- geni_write_reg(M_GENI_CMD_ABORT, base, SE_GENI_M_CMD_CTRL_REG);
}
static inline void geni_abort_s_cmd(void __iomem *base)
{
- geni_write_reg(S_GENI_CMD_ABORT, base, SE_GENI_S_CMD_CTRL_REG);
}
static inline int get_tx_fifo_depth(void __iomem *base)
{
- int tx_fifo_depth;
-
- tx_fifo_depth = ((geni_read_reg(base, SE_HW_PARAM_0)
- & TX_FIFO_DEPTH_MSK) >> TX_FIFO_DEPTH_SHFT);
- return tx_fifo_depth;
+ return -ENXIO;
}
static inline int get_tx_fifo_width(void __iomem *base)
{
- int tx_fifo_width;
-
- tx_fifo_width = ((geni_read_reg(base, SE_HW_PARAM_0)
- & TX_FIFO_WIDTH_MSK) >> TX_FIFO_WIDTH_SHFT);
- return tx_fifo_width;
+ return -ENXIO;
}
static inline int get_rx_fifo_depth(void __iomem *base)
{
- int rx_fifo_depth;
-
- rx_fifo_depth = ((geni_read_reg(base, SE_HW_PARAM_1)
- & RX_FIFO_DEPTH_MSK) >> RX_FIFO_DEPTH_SHFT);
- return rx_fifo_depth;
+ return -ENXIO;
}
static inline void se_get_packing_config(int bpw, int pack_words,
bool msb_to_lsb, unsigned long *cfg0,
unsigned long *cfg1)
{
- u32 cfg[4] = {0};
- int len = ((bpw < 8) ? (bpw - 1) : 7);
- int idx = ((msb_to_lsb == 1) ? len : 0);
- int iter = (bpw * pack_words) >> 3;
- int i;
-
- for (i = 0; i < iter; i++) {
- cfg[i] = ((idx << 5) | (msb_to_lsb << 4) | (len << 1));
- idx += (len + 1);
- if (i == iter - 1)
- cfg[i] |= 1;
- }
- *cfg0 = cfg[0] | (cfg[1] << 10);
- *cfg1 = cfg[2] | (cfg[3] << 10);
}
static inline void se_config_packing(void __iomem *base, int bpw,
int pack_words, bool msb_to_lsb)
{
- unsigned long cfg0, cfg1;
-
- se_get_packing_config(bpw, pack_words, msb_to_lsb, &cfg0, &cfg1);
- geni_write_reg(cfg0, base, SE_GENI_TX_PACKING_CFG0);
- geni_write_reg(cfg1, base, SE_GENI_TX_PACKING_CFG1);
- geni_write_reg(cfg0, base, SE_GENI_RX_PACKING_CFG0);
- geni_write_reg(cfg1, base, SE_GENI_RX_PACKING_CFG1);
-}
-
-/*
- * Power/Resource Management functions
- */
-
-static inline int se_geni_clks_off(struct se_geni_rsc *rsc)
-{
- int ret = 0;
-
- clk_disable_unprepare(rsc->se_clk);
- clk_disable_unprepare(rsc->m_ahb_clk);
- clk_disable_unprepare(rsc->s_ahb_clk);
- return ret;
-}
-
-static inline int se_geni_resources_off(struct se_geni_rsc *rsc)
-{
- int ret = 0;
-
- ret = pinctrl_select_state(rsc->geni_pinctrl, rsc->geni_gpio_sleep);
- se_geni_clks_off(rsc);
- if (rsc->bus_bw)
- msm_bus_scale_update_bw(rsc->bus_bw, 0, 0);
- return ret;
-}
-
-static inline int se_geni_clks_on(struct se_geni_rsc *rsc)
-{
- int ret = 0;
-
- clk_prepare_enable(rsc->se_clk);
- clk_prepare_enable(rsc->m_ahb_clk);
- clk_prepare_enable(rsc->s_ahb_clk);
- return ret;
}
static inline int se_geni_resources_on(struct se_geni_rsc *rsc)
{
- int ret = 0;
-
- if (rsc->bus_bw)
- msm_bus_scale_update_bw(rsc->bus_bw, rsc->ab, rsc->ib);
- se_geni_clks_on(rsc);
- ret = pinctrl_select_state(rsc->geni_pinctrl, rsc->geni_gpio_active);
- return ret;
+ return -ENXIO;
}
+
+static inline int se_geni_resources_off(struct se_geni_rsc *rsc)
+{
+ return -ENXIO;
+}
+
+static inline int geni_se_resources_init(struct se_geni_rsc *rsc,
+ unsigned long ab, unsigned long ib)
+{
+ return -ENXIO;
+}
+
+static inline int geni_se_tx_dma_prep(struct device *wrapper_dev,
+ void __iomem *base, void *tx_buf, int tx_len, dma_addr_t *tx_dma)
+{
+ return -ENXIO;
+}
+
+static inline int geni_se_rx_dma_prep(struct device *wrapper_dev,
+ void __iomem *base, void *rx_buf, int rx_len, dma_addr_t *rx_dma)
+{
+ return -ENXIO;
+}
+
+static inline void geni_se_tx_dma_unprep(struct device *wrapper_dev,
+ dma_addr_t tx_dma, int tx_len)
+{
+}
+
+static inline void geni_se_rx_dma_unprep(struct device *wrapper_dev,
+ dma_addr_t rx_dma, int rx_len)
+{
+}
+
+static inline int geni_se_qupv3_hw_version(struct device *wrapper_dev,
+ unsigned int *major, unsigned int *minor, unsigned int *step)
+{
+ return -ENXIO;
+}
+
+static inline int geni_se_iommu_map_buf(struct device *wrapper_dev,
+ dma_addr_t *iova, void *buf, size_t size, enum dma_data_direction dir)
+{
+ return -ENXIO;
+}
+
+static inline void *geni_se_iommu_alloc_buf(struct device *wrapper_dev,
+ dma_addr_t *iova, size_t size)
+{
+ return NULL;
+}
+
+static inline int geni_se_iommu_unmap_buf(struct device *wrapper_dev,
+ dma_addr_t *iova, size_t size, enum dma_data_direction dir)
+{
+ return -ENXIO;
+
+}
+
+static inline int geni_se_iommu_free_buf(struct device *wrapper_dev,
+ dma_addr_t *iova, void *buf, size_t size)
+{
+ return -ENXIO;
+}
+
+#endif
#endif
diff --git a/include/trace/events/power.h b/include/trace/events/power.h
index d55175e..57693e7 100644
--- a/include/trace/events/power.h
+++ b/include/trace/events/power.h
@@ -794,6 +794,323 @@
__entry->nl, __entry->pl, __entry->flags)
);
+DECLARE_EVENT_CLASS(kpm_module,
+
+ TP_PROTO(unsigned int managed_cpus, unsigned int max_cpus),
+
+ TP_ARGS(managed_cpus, max_cpus),
+
+ TP_STRUCT__entry(
+ __field(u32, managed_cpus)
+ __field(u32, max_cpus)
+ ),
+
+ TP_fast_assign(
+ __entry->managed_cpus = managed_cpus;
+ __entry->max_cpus = max_cpus;
+ ),
+
+ TP_printk("managed:%x max_cpus=%u", (unsigned int)__entry->managed_cpus,
+ (unsigned int)__entry->max_cpus)
+);
+
+DEFINE_EVENT(kpm_module, set_max_cpus,
+ TP_PROTO(unsigned int managed_cpus, unsigned int max_cpus),
+ TP_ARGS(managed_cpus, max_cpus)
+);
+
+DEFINE_EVENT(kpm_module, reevaluate_hotplug,
+ TP_PROTO(unsigned int managed_cpus, unsigned int max_cpus),
+ TP_ARGS(managed_cpus, max_cpus)
+);
+
+DECLARE_EVENT_CLASS(kpm_module2,
+
+ TP_PROTO(unsigned int cpu, unsigned int enter_cycle_cnt,
+ unsigned int exit_cycle_cnt,
+ unsigned int io_busy, u64 iowait),
+
+ TP_ARGS(cpu, enter_cycle_cnt, exit_cycle_cnt, io_busy, iowait),
+
+ TP_STRUCT__entry(
+ __field(u32, cpu)
+ __field(u32, enter_cycle_cnt)
+ __field(u32, exit_cycle_cnt)
+ __field(u32, io_busy)
+ __field(u64, iowait)
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ __entry->enter_cycle_cnt = enter_cycle_cnt;
+ __entry->exit_cycle_cnt = exit_cycle_cnt;
+ __entry->io_busy = io_busy;
+ __entry->iowait = iowait;
+ ),
+
+ TP_printk("CPU:%u enter_cycles=%u exit_cycles=%u io_busy=%u iowait=%lu",
+ (unsigned int)__entry->cpu,
+ (unsigned int)__entry->enter_cycle_cnt,
+ (unsigned int)__entry->exit_cycle_cnt,
+ (unsigned int)__entry->io_busy,
+ (unsigned long)__entry->iowait)
+);
+
+DEFINE_EVENT(kpm_module2, track_iowait,
+ TP_PROTO(unsigned int cpu, unsigned int enter_cycle_cnt,
+ unsigned int exit_cycle_cnt, unsigned int io_busy, u64 iowait),
+ TP_ARGS(cpu, enter_cycle_cnt, exit_cycle_cnt, io_busy, iowait)
+);
+
+DECLARE_EVENT_CLASS(cpu_modes,
+
+ TP_PROTO(unsigned int cpu, unsigned int max_load,
+ unsigned int single_enter_cycle_cnt,
+ unsigned int single_exit_cycle_cnt,
+ unsigned int total_load, unsigned int multi_enter_cycle_cnt,
+ unsigned int multi_exit_cycle_cnt,
+ unsigned int perf_cl_peak_enter_cycle_cnt,
+ unsigned int perf_cl_peak_exit_cycle_cnt,
+ unsigned int mode,
+ unsigned int cpu_cnt),
+
+ TP_ARGS(cpu, max_load, single_enter_cycle_cnt, single_exit_cycle_cnt,
+ total_load, multi_enter_cycle_cnt, multi_exit_cycle_cnt,
+ perf_cl_peak_enter_cycle_cnt, perf_cl_peak_exit_cycle_cnt, mode,
+ cpu_cnt),
+
+ TP_STRUCT__entry(
+ __field(u32, cpu)
+ __field(u32, max_load)
+ __field(u32, single_enter_cycle_cnt)
+ __field(u32, single_exit_cycle_cnt)
+ __field(u32, total_load)
+ __field(u32, multi_enter_cycle_cnt)
+ __field(u32, multi_exit_cycle_cnt)
+ __field(u32, perf_cl_peak_enter_cycle_cnt)
+ __field(u32, perf_cl_peak_exit_cycle_cnt)
+ __field(u32, mode)
+ __field(u32, cpu_cnt)
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ __entry->max_load = max_load;
+ __entry->single_enter_cycle_cnt = single_enter_cycle_cnt;
+ __entry->single_exit_cycle_cnt = single_exit_cycle_cnt;
+ __entry->total_load = total_load;
+ __entry->multi_enter_cycle_cnt = multi_enter_cycle_cnt;
+ __entry->multi_exit_cycle_cnt = multi_exit_cycle_cnt;
+ __entry->perf_cl_peak_enter_cycle_cnt =
+ perf_cl_peak_enter_cycle_cnt;
+ __entry->perf_cl_peak_exit_cycle_cnt =
+ perf_cl_peak_exit_cycle_cnt;
+ __entry->mode = mode;
+ __entry->cpu_cnt = cpu_cnt;
+ ),
+
+ TP_printk("%u:%4u:%4u:%4u:%4u:%4u:%4u:%4u:%4u:%4u:%u",
+ (unsigned int)__entry->cpu, (unsigned int)__entry->max_load,
+ (unsigned int)__entry->single_enter_cycle_cnt,
+ (unsigned int)__entry->single_exit_cycle_cnt,
+ (unsigned int)__entry->total_load,
+ (unsigned int)__entry->multi_enter_cycle_cnt,
+ (unsigned int)__entry->multi_exit_cycle_cnt,
+ (unsigned int)__entry->perf_cl_peak_enter_cycle_cnt,
+ (unsigned int)__entry->perf_cl_peak_exit_cycle_cnt,
+ (unsigned int)__entry->mode,
+ (unsigned int)__entry->cpu_cnt)
+);
+
+DEFINE_EVENT(cpu_modes, cpu_mode_detect,
+ TP_PROTO(unsigned int cpu, unsigned int max_load,
+ unsigned int single_enter_cycle_cnt,
+ unsigned int single_exit_cycle_cnt,
+ unsigned int total_load, unsigned int multi_enter_cycle_cnt,
+ unsigned int multi_exit_cycle_cnt,
+ unsigned int perf_cl_peak_enter_cycle_cnt,
+ unsigned int perf_cl_peak_exit_cycle_cnt,
+ unsigned int mode,
+ unsigned int cpu_cnt),
+ TP_ARGS(cpu, max_load, single_enter_cycle_cnt, single_exit_cycle_cnt,
+ total_load, multi_enter_cycle_cnt, multi_exit_cycle_cnt,
+ perf_cl_peak_enter_cycle_cnt, perf_cl_peak_exit_cycle_cnt,
+ mode, cpu_cnt)
+);
+
+DECLARE_EVENT_CLASS(timer_status,
+ TP_PROTO(unsigned int cpu, unsigned int single_enter_cycles,
+ unsigned int single_enter_cycle_cnt,
+ unsigned int single_exit_cycles,
+ unsigned int single_exit_cycle_cnt,
+ unsigned int multi_enter_cycles,
+ unsigned int multi_enter_cycle_cnt,
+ unsigned int multi_exit_cycles,
+ unsigned int multi_exit_cycle_cnt, unsigned int timer_rate,
+ unsigned int mode),
+ TP_ARGS(cpu, single_enter_cycles, single_enter_cycle_cnt,
+ single_exit_cycles, single_exit_cycle_cnt, multi_enter_cycles,
+ multi_enter_cycle_cnt, multi_exit_cycles,
+ multi_exit_cycle_cnt, timer_rate, mode),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, cpu)
+ __field(unsigned int, single_enter_cycles)
+ __field(unsigned int, single_enter_cycle_cnt)
+ __field(unsigned int, single_exit_cycles)
+ __field(unsigned int, single_exit_cycle_cnt)
+ __field(unsigned int, multi_enter_cycles)
+ __field(unsigned int, multi_enter_cycle_cnt)
+ __field(unsigned int, multi_exit_cycles)
+ __field(unsigned int, multi_exit_cycle_cnt)
+ __field(unsigned int, timer_rate)
+ __field(unsigned int, mode)
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ __entry->single_enter_cycles = single_enter_cycles;
+ __entry->single_enter_cycle_cnt = single_enter_cycle_cnt;
+ __entry->single_exit_cycles = single_exit_cycles;
+ __entry->single_exit_cycle_cnt = single_exit_cycle_cnt;
+ __entry->multi_enter_cycles = multi_enter_cycles;
+ __entry->multi_enter_cycle_cnt = multi_enter_cycle_cnt;
+ __entry->multi_exit_cycles = multi_exit_cycles;
+ __entry->multi_exit_cycle_cnt = multi_exit_cycle_cnt;
+ __entry->timer_rate = timer_rate;
+ __entry->mode = mode;
+ ),
+
+ TP_printk("%u:%4u:%4u:%4u:%4u:%4u:%4u:%4u:%4u:%4u:%4u",
+ (unsigned int) __entry->cpu,
+ (unsigned int) __entry->single_enter_cycles,
+ (unsigned int) __entry->single_enter_cycle_cnt,
+ (unsigned int) __entry->single_exit_cycles,
+ (unsigned int) __entry->single_exit_cycle_cnt,
+ (unsigned int) __entry->multi_enter_cycles,
+ (unsigned int) __entry->multi_enter_cycle_cnt,
+ (unsigned int) __entry->multi_exit_cycles,
+ (unsigned int) __entry->multi_exit_cycle_cnt,
+ (unsigned int) __entry->timer_rate,
+ (unsigned int) __entry->mode)
+);
+
+DEFINE_EVENT(timer_status, single_mode_timeout,
+ TP_PROTO(unsigned int cpu, unsigned int single_enter_cycles,
+ unsigned int single_enter_cycle_cnt,
+ unsigned int single_exit_cycles,
+ unsigned int single_exit_cycle_cnt,
+ unsigned int multi_enter_cycles,
+ unsigned int multi_enter_cycle_cnt,
+ unsigned int multi_exit_cycles,
+ unsigned int multi_exit_cycle_cnt, unsigned int timer_rate,
+ unsigned int mode),
+ TP_ARGS(cpu, single_enter_cycles, single_enter_cycle_cnt,
+ single_exit_cycles, single_exit_cycle_cnt, multi_enter_cycles,
+ multi_enter_cycle_cnt, multi_exit_cycles, multi_exit_cycle_cnt,
+ timer_rate, mode)
+);
+
+DEFINE_EVENT(timer_status, single_cycle_exit_timer_start,
+ TP_PROTO(unsigned int cpu, unsigned int single_enter_cycles,
+ unsigned int single_enter_cycle_cnt,
+ unsigned int single_exit_cycles,
+ unsigned int single_exit_cycle_cnt,
+ unsigned int multi_enter_cycles,
+ unsigned int multi_enter_cycle_cnt,
+ unsigned int multi_exit_cycles,
+ unsigned int multi_exit_cycle_cnt, unsigned int timer_rate,
+ unsigned int mode),
+ TP_ARGS(cpu, single_enter_cycles, single_enter_cycle_cnt,
+ single_exit_cycles, single_exit_cycle_cnt, multi_enter_cycles,
+ multi_enter_cycle_cnt, multi_exit_cycles, multi_exit_cycle_cnt,
+ timer_rate, mode)
+);
+
+DEFINE_EVENT(timer_status, single_cycle_exit_timer_stop,
+ TP_PROTO(unsigned int cpu, unsigned int single_enter_cycles,
+ unsigned int single_enter_cycle_cnt,
+ unsigned int single_exit_cycles,
+ unsigned int single_exit_cycle_cnt,
+ unsigned int multi_enter_cycles,
+ unsigned int multi_enter_cycle_cnt,
+ unsigned int multi_exit_cycles,
+ unsigned int multi_exit_cycle_cnt, unsigned int timer_rate,
+ unsigned int mode),
+ TP_ARGS(cpu, single_enter_cycles, single_enter_cycle_cnt,
+ single_exit_cycles, single_exit_cycle_cnt, multi_enter_cycles,
+ multi_enter_cycle_cnt, multi_exit_cycles, multi_exit_cycle_cnt,
+ timer_rate, mode)
+);
+
+DECLARE_EVENT_CLASS(perf_cl_peak_timer_status,
+ TP_PROTO(unsigned int cpu, unsigned int perf_cl_peak_enter_cycles,
+ unsigned int perf_cl_peak_enter_cycle_cnt,
+ unsigned int perf_cl_peak_exit_cycles,
+ unsigned int perf_cl_peak_exit_cycle_cnt,
+ unsigned int timer_rate,
+ unsigned int mode),
+ TP_ARGS(cpu, perf_cl_peak_enter_cycles, perf_cl_peak_enter_cycle_cnt,
+ perf_cl_peak_exit_cycles, perf_cl_peak_exit_cycle_cnt,
+ timer_rate, mode),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, cpu)
+ __field(unsigned int, perf_cl_peak_enter_cycles)
+ __field(unsigned int, perf_cl_peak_enter_cycle_cnt)
+ __field(unsigned int, perf_cl_peak_exit_cycles)
+ __field(unsigned int, perf_cl_peak_exit_cycle_cnt)
+ __field(unsigned int, timer_rate)
+ __field(unsigned int, mode)
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ __entry->perf_cl_peak_enter_cycles = perf_cl_peak_enter_cycles;
+ __entry->perf_cl_peak_enter_cycle_cnt =
+ perf_cl_peak_enter_cycle_cnt;
+ __entry->perf_cl_peak_exit_cycles = perf_cl_peak_exit_cycles;
+ __entry->perf_cl_peak_exit_cycle_cnt =
+ perf_cl_peak_exit_cycle_cnt;
+ __entry->timer_rate = timer_rate;
+ __entry->mode = mode;
+ ),
+
+ TP_printk("%u:%4u:%4u:%4u:%4u:%4u:%4u",
+ (unsigned int) __entry->cpu,
+ (unsigned int) __entry->perf_cl_peak_enter_cycles,
+ (unsigned int) __entry->perf_cl_peak_enter_cycle_cnt,
+ (unsigned int) __entry->perf_cl_peak_exit_cycles,
+ (unsigned int) __entry->perf_cl_peak_exit_cycle_cnt,
+ (unsigned int) __entry->timer_rate,
+ (unsigned int) __entry->mode)
+);
+
+DEFINE_EVENT(perf_cl_peak_timer_status, perf_cl_peak_exit_timer_start,
+ TP_PROTO(unsigned int cpu, unsigned int perf_cl_peak_enter_cycles,
+ unsigned int perf_cl_peak_enter_cycle_cnt,
+ unsigned int perf_cl_peak_exit_cycles,
+ unsigned int perf_cl_peak_exit_cycle_cnt,
+ unsigned int timer_rate,
+ unsigned int mode),
+ TP_ARGS(cpu, perf_cl_peak_enter_cycles, perf_cl_peak_enter_cycle_cnt,
+ perf_cl_peak_exit_cycles, perf_cl_peak_exit_cycle_cnt,
+ timer_rate, mode)
+);
+
+
+DEFINE_EVENT(perf_cl_peak_timer_status, perf_cl_peak_exit_timer_stop,
+ TP_PROTO(unsigned int cpu, unsigned int perf_cl_peak_enter_cycles,
+ unsigned int perf_cl_peak_enter_cycle_cnt,
+ unsigned int perf_cl_peak_exit_cycles,
+ unsigned int perf_cl_peak_exit_cycle_cnt,
+ unsigned int timer_rate,
+ unsigned int mode),
+ TP_ARGS(cpu, perf_cl_peak_enter_cycles, perf_cl_peak_enter_cycle_cnt,
+ perf_cl_peak_exit_cycles, perf_cl_peak_exit_cycle_cnt,
+ timer_rate, mode)
+);
+
#endif /* _TRACE_POWER_H */
/* This part must be outside protection */
diff --git a/include/uapi/linux/msm_ipa.h b/include/uapi/linux/msm_ipa.h
index 817feba..ea68202 100644
--- a/include/uapi/linux/msm_ipa.h
+++ b/include/uapi/linux/msm_ipa.h
@@ -449,6 +449,7 @@
* @IPA_HW_v3_1: IPA hardware version 3.1
* @IPA_HW_v3_5: IPA hardware version 3.5
* @IPA_HW_v3_5_1: IPA hardware version 3.5.1
+ * @IPA_HW_v4_0: IPA hardware version 4.0
*/
enum ipa_hw_type {
IPA_HW_None = 0,
@@ -463,9 +464,12 @@
IPA_HW_v3_1 = 11,
IPA_HW_v3_5 = 12,
IPA_HW_v3_5_1 = 13,
+ IPA_HW_v4_0 = 14,
IPA_HW_MAX
};
+#define IPA_HW_v4_0 IPA_HW_v4_0
+
/**
* struct ipa_rule_attrib - attributes of a routing/filtering
* rule, all in LE
diff --git a/kernel/sched/tune.c b/kernel/sched/tune.c
index bae3b2b..86a167b 100644
--- a/kernel/sched/tune.c
+++ b/kernel/sched/tune.c
@@ -1066,10 +1066,6 @@
* Assume we have EM data only at the CPU and
* the upper CLUSTER level
*/
- BUG_ON(!cpumask_equal(
- sched_group_cpus(sg),
- sched_group_cpus(sd2->parent->groups)
- ));
break;
}
}