Merge "mmc: sdhci: add err_state to sdhci_dumpregs func"
diff --git a/Documentation/devicetree/bindings/arm/coresight.txt b/Documentation/devicetree/bindings/arm/coresight.txt
index 592fcef..d4a352b 100644
--- a/Documentation/devicetree/bindings/arm/coresight.txt
+++ b/Documentation/devicetree/bindings/arm/coresight.txt
@@ -159,6 +159,23 @@
* qcom,inst-id: must be present. QMI instance id for remote ETMs.
+* Optional properties for funnels:
+
+ * qcom,duplicate-funnel: boolean, indicates its a duplicate of an
+ existing funnel. Funnel devices are now capable of supporting
+ multiple-input and multiple-output configuration with in built
+ hardware filtering for TPDM devices. Each set of input-output
+ combination is treated as independent funnel device.
+ funnel-base-dummy and funnel-base-real reg-names must be specified
+ when this property is enabled.
+
+ * reg-names: funnel-base-dummy: dummy register space used by a
+ duplicate funnel. Should be a valid register address space that
+ no other device is using.
+
+ * reg-names: funnel-base-real: actual register space for the
+ duplicate funnel.
+
Example:
1. Sinks
diff --git a/Documentation/devicetree/bindings/display/msm/sde-rsc.txt b/Documentation/devicetree/bindings/display/msm/sde-rsc.txt
index 7e54fdd..55d18cf 100644
--- a/Documentation/devicetree/bindings/display/msm/sde-rsc.txt
+++ b/Documentation/devicetree/bindings/display/msm/sde-rsc.txt
@@ -29,6 +29,10 @@
Bus Scaling Subnodes:
- qcom,sde-data-bus: Property to provide Bus scaling for data bus access for
sde blocks.
+- qcom,sde-llcc-bus: Property to provide Bus scaling for data bus access for
+ mnoc to llcc.
+- qcom,sde-ebi-bus: Property to provide Bus scaling for data bus access for
+ llcc to ebi.
Bus Scaling Data:
- qcom,msm-bus,name: String property describing client name.
@@ -69,4 +73,24 @@
<22 512 0 6400000>, <23 512 0 6400000>,
<22 512 0 6400000>, <23 512 0 6400000>;
};
+ qcom,sde-llcc-bus {
+ qcom,msm-bus,name = "sde_rsc_llcc";
+ qcom,msm-bus,active-only;
+ qcom,msm-bus,num-cases = <3>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <20001 20513 0 0>,
+ <20001 20513 0 6400000>,
+ <20001 20513 0 6400000>;
+ };
+ qcom,sde-ebi-bus {
+ qcom,msm-bus,name = "sde_rsc_ebi";
+ qcom,msm-bus,active-only;
+ qcom,msm-bus,num-cases = <3>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <20000 20512 0 0>,
+ <20000 20512 0 6400000>,
+ <20000 20512 0 6400000>;
+ };
};
diff --git a/Documentation/devicetree/bindings/display/msm/sde.txt b/Documentation/devicetree/bindings/display/msm/sde.txt
index 47fc465..863a169 100644
--- a/Documentation/devicetree/bindings/display/msm/sde.txt
+++ b/Documentation/devicetree/bindings/display/msm/sde.txt
@@ -341,6 +341,10 @@
mdss blocks.
- qcom,sde-data-bus: Property to provide Bus scaling for data bus access for
mdss blocks.
+- qcom,sde-llcc-bus: Property to provide Bus scaling for data bus access for
+ mnoc to llcc.
+- qcom,sde-ebi-bus: Property to provide Bus scaling for data bus access for
+ llcc to ebi.
- qcom,sde-inline-rotator: A 2 cell property, with format of (rotator phandle,
instance id), of inline rotator device.
@@ -638,6 +642,24 @@
<22 512 0 6400000>, <23 512 0 6400000>,
<25 512 0 6400000>;
};
+ qcom,sde-llcc-bus {
+ qcom,msm-bus,name = "mdss_sde_llcc";
+ qcom,msm-bus,num-cases = <3>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <132 770 0 0>,
+ <132 770 0 6400000>,
+ <132 770 0 6400000>;
+ };
+ qcom,sde-ebi-bus {
+ qcom,msm-bus,name = "mdss_sde_ebi";
+ qcom,msm-bus,num-cases = <3>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <129 512 0 0>,
+ <129 512 0 6400000>,
+ <129 512 0 6400000>;
+ };
qcom,sde-reg-bus {
/* Reg Bus Scale Settings */
diff --git a/Documentation/devicetree/bindings/mtd/msm_qpic_nand.txt b/Documentation/devicetree/bindings/mtd/msm_qpic_nand.txt
new file mode 100644
index 0000000..094dc25
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/msm_qpic_nand.txt
@@ -0,0 +1,74 @@
+Qualcomm Technologies, Inc. Parallel Interface controller (QPIC) for NAND devices
+
+Required properties:
+- compatible : "qcom,msm-nand".
+- reg : should specify QPIC NANDc and BAM physical address range.
+- reg-names : should specify relevant names to each reg property defined.
+- interrupts : should specify QPIC/BAM interrupt numbers.
+- interrupt-names : should specify relevant names to each interrupts property
+ defined.
+- qcom,reg-adjustment-offset : Specify the base adjustment offset value for the
+ version registers
+
+MTD flash partition layout for NAND devices -
+
+Each partition is represented as a sub-node of the qcom,mtd-partitions device.
+Each node's name represents the name of the corresponding partition.
+
+This is now completely optional as the partition information is avaialble from
+bootloader.
+
+Optional properties:
+- reg : boot_cfg. This is needed only on the targets where both NAND and eMMC
+ devices are supported. On eMMC based builds, NAND cannot be enabled by
+ default due to the absence of some of its required resources.
+- reg : The partition offset and size
+- label : The label / name for this partition.
+- read-only: This parameter, if present, indicates that this partition
+ should only be mounted read-only.
+- Refer to "Documentation/devicetree/bindings/arm/msm/msm_bus.txt" for
+below optional properties:
+ - qcom,msm-bus,name
+ - qcom,msm-bus,num-cases
+ - qcom,msm-bus,active-only
+ - qcom,msm-bus,num-paths
+ - qcom,msm-bus,vectors-KBps
+
+Examples:
+
+ qcom,nand@f9af0000 {
+ compatible = "qcom,msm-nand";
+ reg = <0xf9af0000 0x1000>,
+ <0xf9ac4000 0x8000>,
+ <0x5e02c 0x4>;
+ reg-names = "nand_phys",
+ "bam_phys",
+ "boot_cfg";
+ qcom,reg-adjustment-offset = <0x4000>;
+
+ interrupts = <0 279 0>;
+ interrupt-names = "bam_irq";
+
+ qcom,msm-bus,name = "qpic_nand";
+ qcom,msm-bus,num-cases = <1>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps = <91 512 0 0>,
+ };
+
+ qcom,mtd-partitions {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ partition@0 {
+ label = "boot";
+ reg = <0x0 0x1000>;
+ read-only;
+ };
+ partition@20000 {
+ label = "userdata";
+ reg = <0x20000 0x1000>;
+ };
+ partition@40000 {
+ label = "system";
+ reg = <0x40000 0x1000>;
+ };
+ };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
index c8f84fd..0430ea4 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
@@ -334,7 +334,9 @@
interrupts = <63 0>;
interrupt-names = "nfc_irq";
pinctrl-names = "nfc_active", "nfc_suspend";
- pinctrl-0 = <&nfc_int_active &nfc_enable_active>;
+ pinctrl-0 = <&nfc_int_active
+ &nfc_enable_active
+ &nfc_clk_default>;
pinctrl-1 = <&nfc_int_suspend &nfc_enable_suspend>;
clocks = <&clock_rpmh RPMH_LN_BB_CLK3>;
clock-names = "ref_clk";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi b/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
index e32ec6e..04a332e 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
@@ -767,13 +767,42 @@
<&tpdm_lpass_out_funnel_lpass>;
};
};
+ };
+ };
- port@2 {
+ funnel_lpass_1: funnel_1@6845000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b908>;
+
+ reg = <0x6867010 0x10>,
+ <0x6845000 0x1000>;
+ reg-names = "funnel-base-dummy", "funnel-base-real";
+
+ coresight-name = "coresight-funnel-lpass-1";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ qcom,duplicate-funnel;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ funnel_lpass_1_out_funnel_qatb: endpoint {
+ remote-endpoint =
+ <&funnel_qatb_in_funnel_lpass_1>;
+ };
+ };
+
+ port@1 {
reg = <1>;
- funnel_lpass_in_audio_etm0: endpoint {
+ funnel_lpass_1_in_audio_etm0: endpoint {
slave-mode;
remote-endpoint =
- <&audio_etm0_out_funnel_lpass>;
+ <&audio_etm0_out_funnel_lpass_1>;
};
};
};
@@ -1100,13 +1129,42 @@
<&tpdm_turing_out_funnel_turing>;
};
};
+ };
+ };
- port@2 {
+ funnel_turing_1: funnel_1@6861000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b908>;
+
+ reg = <0x6867000 0x10>,
+ <0x6861000 0x1000>;
+ reg-names = "funnel-base-dummy", "funnel-base-real";
+
+ coresight-name = "coresight-funnel-turing-1";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ qcom,duplicate-funnel;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ funnel_turing_1_out_funnel_qatb: endpoint {
+ remote-endpoint =
+ <&funnel_qatb_in_funnel_turing_1>;
+ };
+ };
+
+ port@1 {
reg = <1>;
- funnel_turing_in_turing_etm0: endpoint {
+ funnel_turing_1_in_turing_etm0: endpoint {
slave-mode;
remote-endpoint =
- <&turing_etm0_out_funnel_turing>;
+ <&turing_etm0_out_funnel_turing_1>;
};
};
};
@@ -1394,6 +1452,24 @@
<&tpda_out_funnel_qatb>;
};
};
+
+ port@2 {
+ reg = <6>;
+ funnel_qatb_in_funnel_lpass_1: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&funnel_lpass_1_out_funnel_qatb>;
+ };
+ };
+
+ port@3 {
+ reg = <7>;
+ funnel_qatb_in_funnel_turing_1: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&funnel_turing_1_out_funnel_qatb>;
+ };
+ };
};
};
@@ -1780,9 +1856,9 @@
qcom,inst-id = <13>;
port{
- turing_etm0_out_funnel_turing: endpoint {
+ turing_etm0_out_funnel_turing_1: endpoint {
remote-endpoint =
- <&funnel_turing_in_turing_etm0>;
+ <&funnel_turing_1_in_turing_etm0>;
};
};
};
@@ -1823,8 +1899,9 @@
qcom,inst-id = <5>;
port {
- audio_etm0_out_funnel_lpass: endpoint {
- remote-endpoint = <&funnel_lpass_in_audio_etm0>;
+ audio_etm0_out_funnel_lpass_1: endpoint {
+ remote-endpoint =
+ <&funnel_lpass_1_in_audio_etm0>;
};
};
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
index c75eb48..c3217e7 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
@@ -308,7 +308,9 @@
interrupts = <63 0>;
interrupt-names = "nfc_irq";
pinctrl-names = "nfc_active", "nfc_suspend";
- pinctrl-0 = <&nfc_int_active &nfc_enable_active>;
+ pinctrl-0 = <&nfc_int_active
+ &nfc_enable_active
+ &nfc_clk_default>;
pinctrl-1 = <&nfc_int_suspend &nfc_enable_suspend>;
clocks = <&clock_rpmh RPMH_LN_BB_CLK3>;
clock-names = "ref_clk";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
index 9946a25..dc58f9c 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
@@ -2800,14 +2800,6 @@
};
&pm8998_gpios {
- gpio@d400 {
- qcom,mode = <0>;
- qcom,vin-sel = <1>;
- qcom,src-sel = <0>;
- qcom,master-en = <1>;
- status = "okay";
- };
-
key_home {
key_home_default: key_home_default {
pins = "gpio5";
@@ -2865,6 +2857,15 @@
output-low;
};
};
+
+ nfc_clk {
+ nfc_clk_default: nfc_clk_default {
+ pins = "gpio21";
+ function = "normal";
+ input-enable;
+ power-source = <1>;
+ };
+ };
};
&pmi8998_gpios {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
index c2fbed5..f14293b 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
@@ -73,7 +73,9 @@
interrupts = <63 0>;
interrupt-names = "nfc_irq";
pinctrl-names = "nfc_active", "nfc_suspend";
- pinctrl-0 = <&nfc_int_active &nfc_enable_active>;
+ pinctrl-0 = <&nfc_int_active
+ &nfc_enable_active
+ &nfc_clk_default>;
pinctrl-1 = <&nfc_int_suspend &nfc_enable_suspend>;
clocks = <&clock_rpmh RPMH_LN_BB_CLK3>;
clock-names = "ref_clk";
@@ -265,3 +267,189 @@
&ext_5v_boost {
status = "ok";
};
+
+&pm8998_vadc {
+ chan@83 {
+ label = "vph_pwr";
+ reg = <0x83>;
+ qcom,decimation = <2>;
+ qcom,pre-div-channel-scaling = <1>;
+ qcom,calibration-type = "absolute";
+ qcom,scale-function = <0>;
+ qcom,hw-settle-time = <0>;
+ qcom,fast-avg-setup = <0>;
+ };
+
+ chan@85 {
+ label = "vcoin";
+ reg = <0x85>;
+ qcom,decimation = <2>;
+ qcom,pre-div-channel-scaling = <1>;
+ qcom,calibration-type = "absolute";
+ qcom,scale-function = <0>;
+ qcom,hw-settle-time = <0>;
+ qcom,fast-avg-setup = <0>;
+ };
+
+ chan@4c {
+ label = "xo_therm";
+ reg = <0x4c>;
+ qcom,decimation = <2>;
+ qcom,pre-div-channel-scaling = <0>;
+ qcom,calibration-type = "ratiometric";
+ qcom,scale-function = <4>;
+ qcom,hw-settle-time = <2>;
+ qcom,fast-avg-setup = <0>;
+ };
+
+ chan@4d {
+ label = "msm_therm";
+ reg = <0x4d>;
+ qcom,decimation = <2>;
+ qcom,pre-div-channel-scaling = <0>;
+ qcom,calibration-type = "ratiometric";
+ qcom,scale-function = <2>;
+ qcom,hw-settle-time = <2>;
+ qcom,fast-avg-setup = <0>;
+ };
+
+ chan@4f {
+ label = "pa_therm1";
+ reg = <0x4f>;
+ qcom,decimation = <2>;
+ qcom,pre-div-channel-scaling = <0>;
+ qcom,calibration-type = "ratiometric";
+ qcom,scale-function = <2>;
+ qcom,hw-settle-time = <2>;
+ qcom,fast-avg-setup = <0>;
+ };
+
+ chan@51 {
+ label = "quiet_therm";
+ reg = <0x51>;
+ qcom,decimation = <2>;
+ qcom,pre-div-channel-scaling = <0>;
+ qcom,calibration-type = "ratiometric";
+ qcom,scale-function = <2>;
+ qcom,hw-settle-time = <2>;
+ qcom,fast-avg-setup = <0>;
+ };
+};
+
+&pm8998_adc_tm {
+ chan@83 {
+ label = "vph_pwr";
+ reg = <0x83>;
+ qcom,pre-div-channel-scaling = <1>;
+ qcom,calibration-type = "absolute";
+ qcom,scale-function = <0>;
+ qcom,hw-settle-time = <0>;
+ qcom,btm-channel-number = <0x60>;
+ };
+
+ chan@4c {
+ label = "xo_therm";
+ reg = <0x4c>;
+ qcom,pre-div-channel-scaling = <0>;
+ qcom,calibration-type = "ratiometric";
+ qcom,scale-function = <4>;
+ qcom,hw-settle-time = <2>;
+ qcom,btm-channel-number = <0x68>;
+ qcom,thermal-node;
+ };
+
+ chan@4d {
+ label = "msm_therm";
+ reg = <0x4d>;
+ qcom,pre-div-channel-scaling = <0>;
+ qcom,calibration-type = "ratiometric";
+ qcom,scale-function = <2>;
+ qcom,hw-settle-time = <2>;
+ qcom,btm-channel-number = <0x70>;
+ qcom,thermal-node;
+ };
+
+ chan@4f {
+ label = "pa_therm1";
+ reg = <0x4f>;
+ qcom,pre-div-channel-scaling = <0>;
+ qcom,calibration-type = "ratiometric";
+ qcom,scale-function = <2>;
+ qcom,hw-settle-time = <2>;
+ qcom,btm-channel-number = <0x78>;
+ qcom,thermal-node;
+ };
+
+ chan@51 {
+ label = "quiet_therm";
+ reg = <0x51>;
+ qcom,pre-div-channel-scaling = <0>;
+ qcom,calibration-type = "ratiometric";
+ qcom,scale-function = <2>;
+ qcom,hw-settle-time = <2>;
+ qcom,btm-channel-number = <0x80>;
+ qcom,thermal-node;
+ };
+};
+
+&thermal_zones {
+ xo-therm-adc {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&pm8998_adc_tm 0x4c>;
+ thermal-governor = "user_space";
+
+ trips {
+ active-config0 {
+ temperature = <65000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ msm-therm-adc {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&pm8998_adc_tm 0x4d>;
+ thermal-governor = "user_space";
+
+ trips {
+ active-config0 {
+ temperature = <65000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ pa-therm1-adc {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&pm8998_adc_tm 0x4f>;
+ thermal-governor = "user_space";
+
+ trips {
+ active-config0 {
+ temperature = <65000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ quiet-therm-adc {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&pm8998_adc_tm 0x51>;
+ thermal-governor = "user_space";
+
+ trips {
+ active-config0 {
+ temperature = <65000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index 7ea200e..83f1166 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -488,6 +488,11 @@
firmware: firmware {
android {
compatible = "android,firmware";
+ vbmeta {
+ compatible = "android,vbmeta";
+ parts = "vbmeta,boot,system,vendor,dtbo";
+ };
+
fstab {
compatible = "android,fstab";
vendor {
@@ -495,7 +500,7 @@
dev = "/dev/block/platform/soc/1d84000.ufshc/by-name/vendor";
type = "ext4";
mnt_flags = "ro,barrier=1,discard";
- fsmgr_flags = "wait,slotselect";
+ fsmgr_flags = "wait,slotselect,avb";
};
};
};
@@ -939,20 +944,6 @@
clocks = <&clock_cpucc L3_CLUSTER0_VOTE_CLK>;
governor = "performance";
qcom,prepare-clk;
- freq-tbl-khz =
- < 300000 >,
- < 422400 >,
- < 499200 >,
- < 576000 >,
- < 652800 >,
- < 729600 >,
- < 806400 >,
- < 883200 >,
- < 960000 >,
- < 1036800 >,
- < 1094400 >,
- < 1209600 >,
- < 1305600 >;
};
l3_cpu4: qcom,l3-cpu4 {
@@ -961,20 +952,6 @@
clocks = <&clock_cpucc L3_CLUSTER1_VOTE_CLK>;
governor = "performance";
qcom,prepare-clk;
- freq-tbl-khz =
- < 300000 >,
- < 422400 >,
- < 499200 >,
- < 576000 >,
- < 652800 >,
- < 729600 >,
- < 806400 >,
- < 883200 >,
- < 960000 >,
- < 1036800 >,
- < 1094400 >,
- < 1209600 >,
- < 1305600 >;
};
devfreq_l3lat_0: qcom,cpu0-l3lat-mon {
diff --git a/arch/arm64/configs/sdm845-perf_defconfig b/arch/arm64/configs/sdm845-perf_defconfig
index 18b0a3b..8a5b17d 100644
--- a/arch/arm64/configs/sdm845-perf_defconfig
+++ b/arch/arm64/configs/sdm845-perf_defconfig
@@ -246,9 +246,12 @@
CONFIG_SCSI_UFSHCD_PLATFORM=y
CONFIG_SCSI_UFS_QCOM=y
CONFIG_SCSI_UFS_QCOM_ICE=y
+CONFIG_SCSI_UFSHCD_CMD_LOGGING=y
CONFIG_MD=y
CONFIG_BLK_DEV_DM=y
+CONFIG_DM_DEBUG=y
CONFIG_DM_CRYPT=y
+CONFIG_DM_REQ_CRYPT=y
CONFIG_DM_UEVENT=y
CONFIG_DM_VERITY=y
CONFIG_DM_VERITY_FEC=y
@@ -366,6 +369,7 @@
CONFIG_UHID=y
CONFIG_HID_APPLE=y
CONFIG_HID_MICROSOFT=y
+CONFIG_HID_PLANTRONICS=y
CONFIG_USB=y
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
CONFIG_USB_XHCI_HCD=y
diff --git a/arch/arm64/configs/sdm845_defconfig b/arch/arm64/configs/sdm845_defconfig
index 1f1b5b4..12ff6bc 100644
--- a/arch/arm64/configs/sdm845_defconfig
+++ b/arch/arm64/configs/sdm845_defconfig
@@ -259,7 +259,9 @@
CONFIG_SCSI_UFSHCD_CMD_LOGGING=y
CONFIG_MD=y
CONFIG_BLK_DEV_DM=y
+CONFIG_DM_DEBUG=y
CONFIG_DM_CRYPT=y
+CONFIG_DM_REQ_CRYPT=y
CONFIG_DM_UEVENT=y
CONFIG_DM_VERITY=y
CONFIG_DM_VERITY_FEC=y
@@ -374,6 +376,7 @@
CONFIG_UHID=y
CONFIG_HID_APPLE=y
CONFIG_HID_MICROSOFT=y
+CONFIG_HID_PLANTRONICS=y
CONFIG_USB=y
CONFIG_USB_XHCI_HCD=y
CONFIG_USB_EHCI_HCD=y
diff --git a/block/blk-core.c b/block/blk-core.c
index 710c93b..d8fba67 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1438,6 +1438,9 @@
/* this is a bio leak */
WARN_ON(req->bio != NULL);
+ /* this is a bio leak if the bio is not tagged with BIO_DONTFREE */
+ WARN_ON(req->bio && !bio_flagged(req->bio, BIO_DONTFREE));
+
/*
* Request may not have originated from ll_rw_blk. if not,
* it didn't come out of our reserved rq pools
@@ -2619,6 +2622,15 @@
blk_account_io_completion(req, nr_bytes);
total_bytes = 0;
+
+ /*
+ * Check for this if flagged, Req based dm needs to perform
+ * post processing, hence dont end bios or request.DM
+ * layer takes care.
+ */
+ if (bio_flagged(req->bio, BIO_DONTFREE))
+ return false;
+
while (req->bio) {
struct bio *bio = req->bio;
unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 2642e5f..abde370 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -492,6 +492,64 @@
}
EXPORT_SYMBOL(blk_rq_map_sg);
+/*
+ * map a request to scatterlist without combining PHY CONT
+ * blocks, return number of sg entries setup. Caller
+ * must make sure sg can hold rq->nr_phys_segments entries
+ */
+int blk_rq_map_sg_no_cluster(struct request_queue *q, struct request *rq,
+ struct scatterlist *sglist)
+{
+ struct bio_vec bvec, bvprv = { NULL };
+ struct req_iterator iter;
+ struct scatterlist *sg;
+ int nsegs, cluster = 0;
+
+ nsegs = 0;
+
+ /*
+ * for each bio in rq
+ */
+ sg = NULL;
+ rq_for_each_segment(bvec, rq, iter) {
+ __blk_segment_map_sg(q, &bvec, sglist, &bvprv, &sg,
+ &nsegs, &cluster);
+ } /* segments in rq */
+
+
+ if (!sg)
+ return nsegs;
+
+ if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
+ (blk_rq_bytes(rq) & q->dma_pad_mask)) {
+ unsigned int pad_len =
+ (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
+
+ sg->length += pad_len;
+ rq->extra_len += pad_len;
+ }
+
+ if (q->dma_drain_size && q->dma_drain_needed(rq)) {
+ if (rq->cmd_flags & REQ_OP_WRITE)
+ memset(q->dma_drain_buffer, 0, q->dma_drain_size);
+
+ sg->page_link &= ~0x02;
+ sg = sg_next(sg);
+ sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
+ q->dma_drain_size,
+ ((unsigned long)q->dma_drain_buffer) &
+ (PAGE_SIZE - 1));
+ nsegs++;
+ rq->extra_len += q->dma_drain_size;
+ }
+
+ if (sg)
+ sg_mark_end(sg);
+
+ return nsegs;
+}
+EXPORT_SYMBOL(blk_rq_map_sg_no_cluster);
+
static inline int ll_new_hw_segment(struct request_queue *q,
struct request *req,
struct bio *bio)
diff --git a/block/blk.h b/block/blk.h
index 74444c4..ae07666 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -207,7 +207,6 @@
int attempt_front_merge(struct request_queue *q, struct request *rq);
int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
struct request *next);
-void blk_recalc_rq_segments(struct request *rq);
void blk_rq_set_mixed_merge(struct request *rq);
bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
int blk_try_merge(struct request *rq, struct bio *bio);
diff --git a/drivers/clk/qcom/camcc-sdm845.c b/drivers/clk/qcom/camcc-sdm845.c
index 9ccef91..86e148d 100644
--- a/drivers/clk/qcom/camcc-sdm845.c
+++ b/drivers/clk/qcom/camcc-sdm845.c
@@ -1928,22 +1928,11 @@
};
static const struct qcom_reset_map cam_cc_sdm845_resets[] = {
- [TITAN_CAM_CC_BPS_BCR] = { 0x6000 },
- [TITAN_CAM_CC_CAMNOC_BCR] = { 0xb120 },
[TITAN_CAM_CC_CCI_BCR] = { 0xb0d4 },
[TITAN_CAM_CC_CPAS_BCR] = { 0xb118 },
[TITAN_CAM_CC_CSI0PHY_BCR] = { 0x5000 },
[TITAN_CAM_CC_CSI1PHY_BCR] = { 0x5024 },
[TITAN_CAM_CC_CSI2PHY_BCR] = { 0x5048 },
- [TITAN_CAM_CC_FD_BCR] = { 0xb0ac },
- [TITAN_CAM_CC_ICP_BCR] = { 0xb074 },
- [TITAN_CAM_CC_IFE_0_BCR] = { 0x9000 },
- [TITAN_CAM_CC_IFE_1_BCR] = { 0xa000 },
- [TITAN_CAM_CC_IFE_LITE_BCR] = { 0xb000 },
- [TITAN_CAM_CC_IPE_0_BCR] = { 0x7000 },
- [TITAN_CAM_CC_IPE_1_BCR] = { 0x8000 },
- [TITAN_CAM_CC_JPEG_BCR] = { 0xb048 },
- [TITAN_CAM_CC_LRME_BCR] = { 0xb0f4 },
[TITAN_CAM_CC_MCLK0_BCR] = { 0x4000 },
[TITAN_CAM_CC_MCLK1_BCR] = { 0x4020 },
[TITAN_CAM_CC_MCLK2_BCR] = { 0x4040 },
diff --git a/drivers/clk/qcom/dispcc-sdm845.c b/drivers/clk/qcom/dispcc-sdm845.c
index 6acab9f..d6ecf12 100644
--- a/drivers/clk/qcom/dispcc-sdm845.c
+++ b/drivers/clk/qcom/dispcc-sdm845.c
@@ -992,8 +992,6 @@
};
static const struct qcom_reset_map disp_cc_sdm845_resets[] = {
- [DISP_CC_MDSS_CORE_BCR] = { 0x2000 },
- [DISP_CC_MDSS_GCC_CLOCKS_BCR] = { 0x4000 },
[DISP_CC_MDSS_RSCC_BCR] = { 0x5000 },
};
diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
index 13de253..cd47e14 100644
--- a/drivers/clk/qcom/gcc-sdm845.c
+++ b/drivers/clk/qcom/gcc-sdm845.c
@@ -1240,6 +1240,8 @@
static struct clk_branch gcc_aggre_ufs_card_axi_clk = {
.halt_reg = 0x82028,
.halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x82028,
+ .hwcg_bit = 1,
.clkr = {
.enable_reg = 0x82028,
.enable_mask = BIT(0),
@@ -1275,6 +1277,8 @@
static struct clk_branch gcc_aggre_ufs_phy_axi_clk = {
.halt_reg = 0x82024,
.halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x82024,
+ .hwcg_bit = 1,
.clkr = {
.enable_reg = 0x82024,
.enable_mask = BIT(0),
@@ -1346,6 +1350,8 @@
static struct clk_branch gcc_boot_rom_ahb_clk = {
.halt_reg = 0x38004,
.halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x38004,
+ .hwcg_bit = 1,
.clkr = {
.enable_reg = 0x52004,
.enable_mask = BIT(10),
@@ -1359,6 +1365,8 @@
static struct clk_branch gcc_camera_ahb_clk = {
.halt_reg = 0xb008,
.halt_check = BRANCH_HALT,
+ .hwcg_reg = 0xb008,
+ .hwcg_bit = 1,
.clkr = {
.enable_reg = 0xb008,
.enable_mask = BIT(0),
@@ -1398,6 +1406,8 @@
static struct clk_branch gcc_ce1_ahb_clk = {
.halt_reg = 0x4100c,
.halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x4100c,
+ .hwcg_bit = 1,
.clkr = {
.enable_reg = 0x52004,
.enable_mask = BIT(3),
@@ -1504,6 +1514,8 @@
static struct clk_branch gcc_cpuss_gnoc_clk = {
.halt_reg = 0x48004,
.halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x48004,
+ .hwcg_bit = 1,
.clkr = {
.enable_reg = 0x52004,
.enable_mask = BIT(22),
@@ -1548,6 +1560,8 @@
static struct clk_branch gcc_disp_ahb_clk = {
.halt_reg = 0xb00c,
.halt_check = BRANCH_HALT,
+ .hwcg_reg = 0xb00c,
+ .hwcg_bit = 1,
.clkr = {
.enable_reg = 0xb00c,
.enable_mask = BIT(0),
@@ -1675,6 +1689,8 @@
static struct clk_branch gcc_gpu_cfg_ahb_clk = {
.halt_reg = 0x71004,
.halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x71004,
+ .hwcg_bit = 1,
.clkr = {
.enable_reg = 0x71004,
.enable_mask = BIT(0),
@@ -1774,6 +1790,8 @@
static struct clk_branch gcc_mss_cfg_ahb_clk = {
.halt_reg = 0x8a000,
.halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x8a000,
+ .hwcg_bit = 1,
.clkr = {
.enable_reg = 0x8a000,
.enable_mask = BIT(0),
@@ -1799,6 +1817,8 @@
static struct clk_branch gcc_mss_mfab_axis_clk = {
.halt_reg = 0x8a004,
.halt_check = BRANCH_VOTED,
+ .hwcg_reg = 0x8a004,
+ .hwcg_bit = 1,
.clkr = {
.enable_reg = 0x8a004,
.enable_mask = BIT(0),
@@ -1856,6 +1876,8 @@
static struct clk_branch gcc_pcie_0_cfg_ahb_clk = {
.halt_reg = 0x6b018,
.halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b018,
+ .hwcg_bit = 1,
.clkr = {
.enable_reg = 0x5200c,
.enable_mask = BIT(2),
@@ -1907,6 +1929,8 @@
static struct clk_branch gcc_pcie_0_slv_axi_clk = {
.halt_reg = 0x6b010,
.halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b010,
+ .hwcg_bit = 1,
.clkr = {
.enable_reg = 0x5200c,
.enable_mask = BIT(0),
@@ -1951,6 +1975,8 @@
static struct clk_branch gcc_pcie_1_cfg_ahb_clk = {
.halt_reg = 0x8d018,
.halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x8d018,
+ .hwcg_bit = 1,
.clkr = {
.enable_reg = 0x52004,
.enable_mask = BIT(28),
@@ -2002,6 +2028,8 @@
static struct clk_branch gcc_pcie_1_slv_axi_clk = {
.halt_reg = 0x8d010,
.halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x8d010,
+ .hwcg_bit = 1,
.clkr = {
.enable_reg = 0x52004,
.enable_mask = BIT(26),
@@ -2082,6 +2110,8 @@
static struct clk_branch gcc_pdm_ahb_clk = {
.halt_reg = 0x33004,
.halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x33004,
+ .hwcg_bit = 1,
.clkr = {
.enable_reg = 0x33004,
.enable_mask = BIT(0),
@@ -2108,6 +2138,8 @@
static struct clk_branch gcc_prng_ahb_clk = {
.halt_reg = 0x34004,
.halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x34004,
+ .hwcg_bit = 1,
.clkr = {
.enable_reg = 0x52004,
.enable_mask = BIT(13),
@@ -2121,6 +2153,8 @@
static struct clk_branch gcc_qmip_camera_ahb_clk = {
.halt_reg = 0xb014,
.halt_check = BRANCH_HALT,
+ .hwcg_reg = 0xb014,
+ .hwcg_bit = 1,
.clkr = {
.enable_reg = 0xb014,
.enable_mask = BIT(0),
@@ -2134,6 +2168,8 @@
static struct clk_branch gcc_qmip_disp_ahb_clk = {
.halt_reg = 0xb018,
.halt_check = BRANCH_HALT,
+ .hwcg_reg = 0xb018,
+ .hwcg_bit = 1,
.clkr = {
.enable_reg = 0xb018,
.enable_mask = BIT(0),
@@ -2147,6 +2183,8 @@
static struct clk_branch gcc_qmip_video_ahb_clk = {
.halt_reg = 0xb010,
.halt_check = BRANCH_HALT,
+ .hwcg_reg = 0xb010,
+ .hwcg_bit = 1,
.clkr = {
.enable_reg = 0xb010,
.enable_mask = BIT(0),
@@ -2461,6 +2499,8 @@
static struct clk_branch gcc_qupv3_wrap_0_s_ahb_clk = {
.halt_reg = 0x17008,
.halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17008,
+ .hwcg_bit = 1,
.clkr = {
.enable_reg = 0x5200c,
.enable_mask = BIT(7),
@@ -2487,6 +2527,8 @@
static struct clk_branch gcc_qupv3_wrap_1_s_ahb_clk = {
.halt_reg = 0x18010,
.halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x18010,
+ .hwcg_bit = 1,
.clkr = {
.enable_reg = 0x5200c,
.enable_mask = BIT(21),
@@ -2624,6 +2666,8 @@
static struct clk_branch gcc_ufs_card_ahb_clk = {
.halt_reg = 0x75010,
.halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x75010,
+ .hwcg_bit = 1,
.clkr = {
.enable_reg = 0x75010,
.enable_mask = BIT(0),
@@ -2637,6 +2681,8 @@
static struct clk_branch gcc_ufs_card_axi_clk = {
.halt_reg = 0x7500c,
.halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x7500c,
+ .hwcg_bit = 1,
.clkr = {
.enable_reg = 0x7500c,
.enable_mask = BIT(0),
@@ -2685,6 +2731,8 @@
static struct clk_branch gcc_ufs_card_ice_core_clk = {
.halt_reg = 0x75058,
.halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x75058,
+ .hwcg_bit = 1,
.clkr = {
.enable_reg = 0x75058,
.enable_mask = BIT(0),
@@ -2720,6 +2768,8 @@
static struct clk_branch gcc_ufs_card_phy_aux_clk = {
.halt_reg = 0x7508c,
.halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x7508c,
+ .hwcg_bit = 1,
.clkr = {
.enable_reg = 0x7508c,
.enable_mask = BIT(0),
@@ -2791,6 +2841,8 @@
static struct clk_branch gcc_ufs_card_unipro_core_clk = {
.halt_reg = 0x75054,
.halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x75054,
+ .hwcg_bit = 1,
.clkr = {
.enable_reg = 0x75054,
.enable_mask = BIT(0),
@@ -2839,6 +2891,8 @@
static struct clk_branch gcc_ufs_phy_ahb_clk = {
.halt_reg = 0x77010,
.halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x77010,
+ .hwcg_bit = 1,
.clkr = {
.enable_reg = 0x77010,
.enable_mask = BIT(0),
@@ -2852,6 +2906,8 @@
static struct clk_branch gcc_ufs_phy_axi_clk = {
.halt_reg = 0x7700c,
.halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x7700c,
+ .hwcg_bit = 1,
.clkr = {
.enable_reg = 0x7700c,
.enable_mask = BIT(0),
@@ -2887,6 +2943,8 @@
static struct clk_branch gcc_ufs_phy_ice_core_clk = {
.halt_reg = 0x77058,
.halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x77058,
+ .hwcg_bit = 1,
.clkr = {
.enable_reg = 0x77058,
.enable_mask = BIT(0),
@@ -2922,6 +2980,8 @@
static struct clk_branch gcc_ufs_phy_phy_aux_clk = {
.halt_reg = 0x7708c,
.halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x7708c,
+ .hwcg_bit = 1,
.clkr = {
.enable_reg = 0x7708c,
.enable_mask = BIT(0),
@@ -2993,6 +3053,8 @@
static struct clk_branch gcc_ufs_phy_unipro_core_clk = {
.halt_reg = 0x77054,
.halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x77054,
+ .hwcg_bit = 1,
.clkr = {
.enable_reg = 0x77054,
.enable_mask = BIT(0),
@@ -3248,6 +3310,8 @@
static struct clk_branch gcc_usb_phy_cfg_ahb2phy_clk = {
.halt_reg = 0x6a004,
.halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x6a004,
+ .hwcg_bit = 1,
.clkr = {
.enable_reg = 0x6a004,
.enable_mask = BIT(0),
@@ -3261,6 +3325,8 @@
static struct clk_branch gcc_video_ahb_clk = {
.halt_reg = 0xb004,
.halt_check = BRANCH_HALT,
+ .hwcg_reg = 0xb004,
+ .hwcg_bit = 1,
.clkr = {
.enable_reg = 0xb004,
.enable_mask = BIT(0),
@@ -3500,7 +3566,6 @@
};
static const struct qcom_reset_map gcc_sdm845_resets[] = {
- [GCC_GPU_BCR] = { 0x71000 },
[GCC_MMSS_BCR] = { 0xb000 },
[GCC_PCIE_0_BCR] = { 0x6b000 },
[GCC_PCIE_1_BCR] = { 0x8d000 },
diff --git a/drivers/clk/qcom/videocc-sdm845.c b/drivers/clk/qcom/videocc-sdm845.c
index 14a9cff..362ea0b 100644
--- a/drivers/clk/qcom/videocc-sdm845.c
+++ b/drivers/clk/qcom/videocc-sdm845.c
@@ -311,13 +311,6 @@
[VIDEO_PLL0] = &video_pll0.clkr,
};
-static const struct qcom_reset_map video_cc_sdm845_resets[] = {
- [VIDEO_CC_INTERFACE_BCR] = { 0x8f0 },
- [VIDEO_CC_VCODEC0_BCR] = { 0x870 },
- [VIDEO_CC_VCODEC1_BCR] = { 0x8b0 },
- [VIDEO_CC_VENUS_BCR] = { 0x810 },
-};
-
static const struct regmap_config video_cc_sdm845_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
@@ -330,8 +323,6 @@
.config = &video_cc_sdm845_regmap_config,
.clks = video_cc_sdm845_clocks,
.num_clks = ARRAY_SIZE(video_cc_sdm845_clocks),
- .resets = video_cc_sdm845_resets,
- .num_resets = ARRAY_SIZE(video_cc_sdm845_resets),
};
static const struct of_device_id video_cc_sdm845_match_table[] = {
diff --git a/drivers/crypto/msm/qcrypto.c b/drivers/crypto/msm/qcrypto.c
index 0f0da4f..b979fb9 100644
--- a/drivers/crypto/msm/qcrypto.c
+++ b/drivers/crypto/msm/qcrypto.c
@@ -4289,7 +4289,7 @@
};
EXPORT_SYMBOL(qcrypto_cipher_set_device);
-int qcrypto_cipher_set_device_hw(struct ablkcipher_request *req, u32 dev,
+int qcrypto_cipher_set_device_hw(struct skcipher_request *req, u32 dev,
u32 hw_inst)
{
struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
@@ -4335,7 +4335,7 @@
};
EXPORT_SYMBOL(qcrypto_ahash_set_device);
-int qcrypto_cipher_set_flag(struct ablkcipher_request *req, unsigned int flags)
+int qcrypto_cipher_set_flag(struct skcipher_request *req, unsigned int flags)
{
struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
struct crypto_priv *cp = ctx->cp;
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 747d9a6..d4a270e 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -304,7 +304,8 @@
list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
- kthread_queue_work(&priv->disp_thread[crtc_id].worker, &vbl_ctrl->work);
+ kthread_queue_work(&priv->event_thread[crtc_id].worker,
+ &vbl_ctrl->work);
return 0;
}
@@ -330,13 +331,19 @@
kfree(vbl_ev);
}
- /* clean up display commit worker threads */
+ /* clean up display commit/event worker threads */
for (i = 0; i < priv->num_crtcs; i++) {
if (priv->disp_thread[i].thread) {
kthread_flush_worker(&priv->disp_thread[i].worker);
kthread_stop(priv->disp_thread[i].thread);
priv->disp_thread[i].thread = NULL;
}
+
+ if (priv->event_thread[i].thread) {
+ kthread_flush_worker(&priv->event_thread[i].worker);
+ kthread_stop(priv->event_thread[i].thread);
+ priv->event_thread[i].thread = NULL;
+ }
}
msm_gem_shrinker_cleanup(ddev);
@@ -637,22 +644,50 @@
ddev->mode_config.funcs = &mode_config_funcs;
for (i = 0; i < priv->num_crtcs; i++) {
+
+ /* initialize display thread */
priv->disp_thread[i].crtc_id = priv->crtcs[i]->base.id;
kthread_init_worker(&priv->disp_thread[i].worker);
priv->disp_thread[i].dev = ddev;
priv->disp_thread[i].thread =
kthread_run(kthread_worker_fn,
&priv->disp_thread[i].worker,
- "crtc_commit:%d",
- priv->disp_thread[i].crtc_id);
+ "crtc_commit:%d", priv->disp_thread[i].crtc_id);
if (IS_ERR(priv->disp_thread[i].thread)) {
- dev_err(dev, "failed to create kthread\n");
+ dev_err(dev, "failed to create crtc_commit kthread\n");
priv->disp_thread[i].thread = NULL;
+ }
+
+ /* initialize event thread */
+ priv->event_thread[i].crtc_id = priv->crtcs[i]->base.id;
+ kthread_init_worker(&priv->event_thread[i].worker);
+ priv->event_thread[i].dev = ddev;
+ priv->event_thread[i].thread =
+ kthread_run(kthread_worker_fn,
+ &priv->event_thread[i].worker,
+ "crtc_event:%d", priv->event_thread[i].crtc_id);
+
+ if (IS_ERR(priv->event_thread[i].thread)) {
+ dev_err(dev, "failed to create crtc_event kthread\n");
+ priv->event_thread[i].thread = NULL;
+ }
+
+ if ((!priv->disp_thread[i].thread) ||
+ !priv->event_thread[i].thread) {
/* clean up previously created threads if any */
- for (i -= 1; i >= 0; i--) {
- kthread_stop(priv->disp_thread[i].thread);
- priv->disp_thread[i].thread = NULL;
+ for ( ; i >= 0; i--) {
+ if (priv->disp_thread[i].thread) {
+ kthread_stop(
+ priv->disp_thread[i].thread);
+ priv->disp_thread[i].thread = NULL;
+ }
+
+ if (priv->event_thread[i].thread) {
+ kthread_stop(
+ priv->event_thread[i].thread);
+ priv->event_thread[i].thread = NULL;
+ }
}
goto fail;
}
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 77dde55..fdf9b1f 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -136,8 +136,10 @@
CRTC_PROP_CORE_CLK,
CRTC_PROP_CORE_AB,
CRTC_PROP_CORE_IB,
- CRTC_PROP_MEM_AB,
- CRTC_PROP_MEM_IB,
+ CRTC_PROP_LLCC_AB,
+ CRTC_PROP_LLCC_IB,
+ CRTC_PROP_DRAM_AB,
+ CRTC_PROP_DRAM_IB,
CRTC_PROP_ROT_PREFILL_BW,
CRTC_PROP_ROT_CLK,
CRTC_PROP_ROI_V1,
@@ -471,8 +473,8 @@
u8 data[];
};
-/* Commit thread specific structure */
-struct msm_drm_commit {
+/* Commit/Event thread specific structure */
+struct msm_drm_thread {
struct drm_device *dev;
struct task_struct *thread;
unsigned int crtc_id;
@@ -536,7 +538,8 @@
unsigned int num_crtcs;
struct drm_crtc *crtcs[MAX_CRTCS];
- struct msm_drm_commit disp_thread[MAX_CRTCS];
+ struct msm_drm_thread disp_thread[MAX_CRTCS];
+ struct msm_drm_thread event_thread[MAX_CRTCS];
unsigned int num_encoders;
struct drm_encoder *encoders[MAX_ENCODERS];
diff --git a/drivers/gpu/drm/msm/sde/sde_core_perf.c b/drivers/gpu/drm/msm/sde/sde_core_perf.c
index b1f8b0f..71dfc12 100644
--- a/drivers/gpu/drm/msm/sde/sde_core_perf.c
+++ b/drivers/gpu/drm/msm/sde/sde_core_perf.c
@@ -110,6 +110,7 @@
struct sde_core_perf_params *perf)
{
struct sde_crtc_state *sde_cstate;
+ int i;
if (!kms || !kms->catalog || !crtc || !state || !perf) {
SDE_ERROR("invalid parameters\n");
@@ -119,29 +120,64 @@
sde_cstate = to_sde_crtc_state(state);
memset(perf, 0, sizeof(struct sde_core_perf_params));
- perf->bw_ctl = sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_AB);
- perf->max_per_pipe_ib =
+ perf->bw_ctl[SDE_POWER_HANDLE_DBUS_ID_MNOC] =
+ sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_AB);
+ perf->max_per_pipe_ib[SDE_POWER_HANDLE_DBUS_ID_MNOC] =
+ sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_IB);
+
+ if (sde_cstate->bw_split_vote) {
+ perf->bw_ctl[SDE_POWER_HANDLE_DBUS_ID_LLCC] =
+ sde_crtc_get_property(sde_cstate, CRTC_PROP_LLCC_AB);
+ perf->max_per_pipe_ib[SDE_POWER_HANDLE_DBUS_ID_LLCC] =
+ sde_crtc_get_property(sde_cstate, CRTC_PROP_LLCC_IB);
+ perf->bw_ctl[SDE_POWER_HANDLE_DBUS_ID_EBI] =
+ sde_crtc_get_property(sde_cstate, CRTC_PROP_DRAM_AB);
+ perf->max_per_pipe_ib[SDE_POWER_HANDLE_DBUS_ID_EBI] =
+ sde_crtc_get_property(sde_cstate, CRTC_PROP_DRAM_IB);
+ } else {
+ perf->bw_ctl[SDE_POWER_HANDLE_DBUS_ID_LLCC] =
+ sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_AB);
+ perf->max_per_pipe_ib[SDE_POWER_HANDLE_DBUS_ID_LLCC] =
sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_IB);
+ perf->bw_ctl[SDE_POWER_HANDLE_DBUS_ID_EBI] =
+ sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_AB);
+ perf->max_per_pipe_ib[SDE_POWER_HANDLE_DBUS_ID_EBI] =
+ sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_IB);
+ }
+
perf->core_clk_rate =
sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_CLK);
if (!sde_cstate->bw_control) {
- perf->bw_ctl = kms->catalog->perf.max_bw_high * 1000ULL;
- perf->max_per_pipe_ib = perf->bw_ctl;
+ for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ perf->bw_ctl[i] = kms->catalog->perf.max_bw_high *
+ 1000ULL;
+ perf->max_per_pipe_ib[i] = perf->bw_ctl[i];
+ }
perf->core_clk_rate = kms->perf.max_core_clk_rate;
} else if (kms->perf.perf_tune.mode == SDE_PERF_MODE_MINIMUM) {
- perf->bw_ctl = 0;
- perf->max_per_pipe_ib = 0;
+ for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ perf->bw_ctl[i] = 0;
+ perf->max_per_pipe_ib[i] = 0;
+ }
perf->core_clk_rate = 0;
} else if (kms->perf.perf_tune.mode == SDE_PERF_MODE_FIXED) {
- perf->bw_ctl = kms->perf.fix_core_ab_vote;
- perf->max_per_pipe_ib = kms->perf.fix_core_ib_vote;
+ for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ perf->bw_ctl[i] = kms->perf.fix_core_ab_vote;
+ perf->max_per_pipe_ib[i] = kms->perf.fix_core_ib_vote;
+ }
perf->core_clk_rate = kms->perf.fix_core_clk_rate;
}
- SDE_DEBUG("crtc=%d clk_rate=%llu ib=%llu ab=%llu\n",
+ SDE_DEBUG(
+ "crtc=%d clk_rate=%llu core_ib=%llu core_ab=%llu llcc_ib=%llu llcc_ab=%llu mem_ib=%llu mem_ab=%llu\n",
crtc->base.id, perf->core_clk_rate,
- perf->max_per_pipe_ib, perf->bw_ctl);
+ perf->max_per_pipe_ib[SDE_POWER_HANDLE_DBUS_ID_MNOC],
+ perf->bw_ctl[SDE_POWER_HANDLE_DBUS_ID_MNOC],
+ perf->max_per_pipe_ib[SDE_POWER_HANDLE_DBUS_ID_LLCC],
+ perf->bw_ctl[SDE_POWER_HANDLE_DBUS_ID_LLCC],
+ perf->max_per_pipe_ib[SDE_POWER_HANDLE_DBUS_ID_EBI],
+ perf->bw_ctl[SDE_POWER_HANDLE_DBUS_ID_EBI]);
}
int sde_core_perf_crtc_check(struct drm_crtc *crtc,
@@ -154,6 +190,7 @@
struct sde_crtc_state *sde_cstate;
struct drm_crtc *tmp_crtc;
struct sde_kms *kms;
+ int i;
if (!crtc || !state) {
SDE_ERROR("invalid crtc\n");
@@ -175,39 +212,46 @@
/* obtain new values */
_sde_core_perf_calc_crtc(kms, crtc, state, &sde_cstate->new_perf);
- bw_sum_of_intfs = sde_cstate->new_perf.bw_ctl;
- curr_client_type = sde_crtc_get_client_type(crtc);
+ for (i = SDE_POWER_HANDLE_DBUS_ID_MNOC;
+ i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ bw_sum_of_intfs = sde_cstate->new_perf.bw_ctl[i];
+ curr_client_type = sde_crtc_get_client_type(crtc);
- drm_for_each_crtc(tmp_crtc, crtc->dev) {
- if (_sde_core_perf_crtc_is_power_on(tmp_crtc) &&
- (sde_crtc_get_client_type(tmp_crtc) == curr_client_type) &&
- (tmp_crtc != crtc)) {
- struct sde_crtc_state *tmp_cstate =
+ drm_for_each_crtc(tmp_crtc, crtc->dev) {
+ if (_sde_core_perf_crtc_is_power_on(tmp_crtc) &&
+ (sde_crtc_get_client_type(tmp_crtc) ==
+ curr_client_type) &&
+ (tmp_crtc != crtc)) {
+ struct sde_crtc_state *tmp_cstate =
to_sde_crtc_state(tmp_crtc->state);
- bw_sum_of_intfs += tmp_cstate->new_perf.bw_ctl;
+ bw_sum_of_intfs +=
+ tmp_cstate->new_perf.bw_ctl[i];
+ }
}
- }
- /* convert bandwidth to kb */
- bw = DIV_ROUND_UP_ULL(bw_sum_of_intfs, 1000);
- SDE_DEBUG("calculated bandwidth=%uk\n", bw);
+ /* convert bandwidth to kb */
+ bw = DIV_ROUND_UP_ULL(bw_sum_of_intfs, 1000);
+ SDE_DEBUG("calculated bandwidth=%uk\n", bw);
- is_video_mode = sde_crtc_get_intf_mode(crtc) == INTF_MODE_VIDEO;
- threshold = (is_video_mode ||
- _sde_core_video_mode_intf_connected(crtc)) ?
- kms->catalog->perf.max_bw_low : kms->catalog->perf.max_bw_high;
+ is_video_mode = sde_crtc_get_intf_mode(crtc) == INTF_MODE_VIDEO;
+ threshold = (is_video_mode ||
+ _sde_core_video_mode_intf_connected(crtc)) ?
+ kms->catalog->perf.max_bw_low :
+ kms->catalog->perf.max_bw_high;
- SDE_DEBUG("final threshold bw limit = %d\n", threshold);
+ SDE_DEBUG("final threshold bw limit = %d\n", threshold);
- if (!sde_cstate->bw_control) {
- SDE_DEBUG("bypass bandwidth check\n");
- } else if (!threshold) {
- SDE_ERROR("no bandwidth limits specified\n");
- return -E2BIG;
- } else if (bw > threshold) {
- SDE_ERROR("exceeds bandwidth: %ukb > %ukb\n", bw, threshold);
- return -E2BIG;
+ if (!sde_cstate->bw_control) {
+ SDE_DEBUG("bypass bandwidth check\n");
+ } else if (!threshold) {
+ SDE_ERROR("no bandwidth limits specified\n");
+ return -E2BIG;
+ } else if (bw > threshold) {
+ SDE_ERROR("exceeds bandwidth: %ukb > %ukb\n", bw,
+ threshold);
+ return -E2BIG;
+ }
}
return 0;
@@ -240,10 +284,10 @@
}
static void _sde_core_perf_crtc_update_bus(struct sde_kms *kms,
- struct drm_crtc *crtc)
+ struct drm_crtc *crtc, u32 bus_id)
{
u64 bw_sum_of_intfs = 0, bus_ab_quota, bus_ib_quota;
- struct sde_core_perf_params perf = {0};
+ struct sde_core_perf_params perf = { { 0 } };
enum sde_crtc_client_type client_vote, curr_client_type
= sde_crtc_get_client_type(crtc);
struct drm_crtc *tmp_crtc;
@@ -256,19 +300,20 @@
&kms->perf)) {
sde_cstate = to_sde_crtc_state(tmp_crtc->state);
- perf.max_per_pipe_ib = max(perf.max_per_pipe_ib,
- sde_cstate->new_perf.max_per_pipe_ib);
+ perf.max_per_pipe_ib[bus_id] =
+ max(perf.max_per_pipe_ib[bus_id],
+ sde_cstate->new_perf.max_per_pipe_ib[bus_id]);
- bw_sum_of_intfs += sde_cstate->new_perf.bw_ctl;
+ bw_sum_of_intfs += sde_cstate->new_perf.bw_ctl[bus_id];
- SDE_DEBUG("crtc=%d bw=%llu\n",
- tmp_crtc->base.id,
- sde_cstate->new_perf.bw_ctl);
+ SDE_DEBUG("crtc=%d bus_id=%d bw=%llu\n",
+ tmp_crtc->base.id, bus_id,
+ sde_cstate->new_perf.bw_ctl[bus_id]);
}
}
bus_ab_quota = max(bw_sum_of_intfs, kms->perf.perf_tune.min_bus_vote);
- bus_ib_quota = perf.max_per_pipe_ib;
+ bus_ib_quota = perf.max_per_pipe_ib[bus_id];
if (kms->perf.perf_tune.mode == SDE_PERF_MODE_FIXED) {
bus_ab_quota = kms->perf.fix_core_ab_vote;
@@ -280,25 +325,25 @@
case NRT_CLIENT:
sde_power_data_bus_set_quota(&priv->phandle, kms->core_client,
SDE_POWER_HANDLE_DATA_BUS_CLIENT_NRT,
- bus_ab_quota, bus_ib_quota);
- SDE_DEBUG("client:%s ab=%llu ib=%llu\n", "nrt",
- bus_ab_quota, bus_ib_quota);
+ bus_id, bus_ab_quota, bus_ib_quota);
+ SDE_DEBUG("client:%s bus_id=%d ab=%llu ib=%llu\n", "nrt",
+ bus_id, bus_ab_quota, bus_ib_quota);
break;
case RT_CLIENT:
sde_power_data_bus_set_quota(&priv->phandle, kms->core_client,
SDE_POWER_HANDLE_DATA_BUS_CLIENT_RT,
- bus_ab_quota, bus_ib_quota);
- SDE_DEBUG("client:%s ab=%llu ib=%llu\n", "rt",
- bus_ab_quota, bus_ib_quota);
+ bus_id, bus_ab_quota, bus_ib_quota);
+ SDE_DEBUG("client:%s bus_id=%d ab=%llu ib=%llu\n", "rt",
+ bus_id, bus_ab_quota, bus_ib_quota);
break;
case RT_RSC_CLIENT:
sde_cstate = to_sde_crtc_state(crtc->state);
- sde_rsc_client_vote(sde_cstate->rsc_client, bus_ab_quota,
- bus_ib_quota);
- SDE_DEBUG("client:%s ab=%llu ib=%llu\n", "rt_rsc",
- bus_ab_quota, bus_ib_quota);
+ sde_rsc_client_vote(sde_cstate->rsc_client,
+ bus_id, bus_ab_quota, bus_ib_quota);
+ SDE_DEBUG("client:%s bus_id=%d ab=%llu ib=%llu\n", "rt_rsc",
+ bus_id, bus_ab_quota, bus_ib_quota);
break;
default:
@@ -311,10 +356,12 @@
case DISP_RSC_MODE:
sde_power_data_bus_set_quota(&priv->phandle,
kms->core_client,
- SDE_POWER_HANDLE_DATA_BUS_CLIENT_NRT, 0, 0);
+ SDE_POWER_HANDLE_DATA_BUS_CLIENT_NRT,
+ bus_id, 0, 0);
sde_power_data_bus_set_quota(&priv->phandle,
kms->core_client,
- SDE_POWER_HANDLE_DATA_BUS_CLIENT_RT, 0, 0);
+ SDE_POWER_HANDLE_DATA_BUS_CLIENT_RT,
+ bus_id, 0, 0);
kms->perf.bw_vote_mode_updated = false;
break;
@@ -322,7 +369,7 @@
sde_cstate = to_sde_crtc_state(crtc->state);
if (sde_cstate->rsc_client) {
sde_rsc_client_vote(sde_cstate->rsc_client,
- 0, 0);
+ bus_id, 0, 0);
kms->perf.bw_vote_mode_updated = false;
}
break;
@@ -347,6 +394,7 @@
struct sde_crtc *sde_crtc;
struct sde_crtc_state *sde_cstate;
struct sde_kms *kms;
+ int i;
if (!crtc) {
SDE_ERROR("invalid crtc\n");
@@ -382,9 +430,11 @@
/* Release the bandwidth */
if (kms->perf.enable_bw_release) {
trace_sde_cmd_release_bw(crtc->base.id);
- sde_crtc->cur_perf.bw_ctl = 0;
SDE_DEBUG("Release BW crtc=%d\n", crtc->base.id);
- _sde_core_perf_crtc_update_bus(kms, crtc);
+ for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ sde_crtc->cur_perf.bw_ctl[i] = 0;
+ _sde_core_perf_crtc_update_bus(kms, crtc, i);
+ }
}
}
@@ -419,7 +469,7 @@
u64 clk_rate = 0;
struct sde_crtc *sde_crtc;
struct sde_crtc_state *sde_cstate;
- int ret;
+ int ret, i;
struct msm_drm_private *priv;
struct sde_kms *kms;
@@ -449,38 +499,52 @@
new = &sde_cstate->new_perf;
if (_sde_core_perf_crtc_is_power_on(crtc) && !stop_req) {
- /*
- * cases for bus bandwidth update.
- * 1. new bandwidth vote - "ab or ib vote" is higher
- * than current vote for update request.
- * 2. new bandwidth vote - "ab or ib vote" is lower
- * than current vote at end of commit or stop.
- */
- if ((params_changed && ((new->bw_ctl > old->bw_ctl) ||
- (new->max_per_pipe_ib > old->max_per_pipe_ib))) ||
- (!params_changed && ((new->bw_ctl < old->bw_ctl) ||
- (new->max_per_pipe_ib < old->max_per_pipe_ib)))) {
- SDE_DEBUG("crtc=%d p=%d new_bw=%llu,old_bw=%llu\n",
- crtc->base.id, params_changed, new->bw_ctl,
- old->bw_ctl);
- old->bw_ctl = new->bw_ctl;
- old->max_per_pipe_ib = new->max_per_pipe_ib;
- update_bus = 1;
- }
+ for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ /*
+ * cases for bus bandwidth update.
+ * 1. new bandwidth vote - "ab or ib vote" is higher
+ * than current vote for update request.
+ * 2. new bandwidth vote - "ab or ib vote" is lower
+ * than current vote at end of commit or stop.
+ */
+ if ((params_changed && ((new->bw_ctl[i] >
+ old->bw_ctl[i]) ||
+ (new->max_per_pipe_ib[i] >
+ old->max_per_pipe_ib[i]))) ||
+ (!params_changed && ((new->bw_ctl[i] <
+ old->bw_ctl[i]) ||
+ (new->max_per_pipe_ib[i] <
+ old->max_per_pipe_ib[i])))) {
+ SDE_DEBUG(
+ "crtc=%d p=%d new_bw=%llu,old_bw=%llu\n",
+ crtc->base.id, params_changed,
+ new->bw_ctl[i], old->bw_ctl[i]);
+ old->bw_ctl[i] = new->bw_ctl[i];
+ old->max_per_pipe_ib[i] =
+ new->max_per_pipe_ib[i];
+ update_bus |= BIT(i);
+ }
- /* display rsc override during solver mode */
- if (kms->perf.bw_vote_mode == DISP_RSC_MODE &&
+ /* display rsc override during solver mode */
+ if (kms->perf.bw_vote_mode == DISP_RSC_MODE &&
get_sde_rsc_current_state(SDE_RSC_INDEX) ==
- SDE_RSC_CMD_STATE) {
- /* update new bandwdith in all cases */
- if (params_changed && ((new->bw_ctl != old->bw_ctl) ||
- (new->max_per_pipe_ib != old->max_per_pipe_ib))) {
- old->bw_ctl = new->bw_ctl;
- old->max_per_pipe_ib = new->max_per_pipe_ib;
- update_bus = 1;
- /* reduce bw vote is not required in solver mode */
- } else if (!params_changed) {
- update_bus = 0;
+ SDE_RSC_CMD_STATE) {
+ /* update new bandwidth in all cases */
+ if (params_changed && ((new->bw_ctl[i] !=
+ old->bw_ctl[i]) ||
+ (new->max_per_pipe_ib[i] !=
+ old->max_per_pipe_ib[i]))) {
+ old->bw_ctl[i] = new->bw_ctl[i];
+ old->max_per_pipe_ib[i] =
+ new->max_per_pipe_ib[i];
+ update_bus |= BIT(i);
+ /*
+ * reduce bw vote is not required in solver
+ * mode
+ */
+ } else if (!params_changed) {
+ update_bus &= ~BIT(i);
+ }
}
}
@@ -495,15 +559,20 @@
SDE_DEBUG("crtc=%d disable\n", crtc->base.id);
memset(old, 0, sizeof(*old));
memset(new, 0, sizeof(*new));
- update_bus = 1;
+ update_bus = ~0;
update_clk = 1;
}
- trace_sde_perf_crtc_update(crtc->base.id, new->bw_ctl,
+ trace_sde_perf_crtc_update(crtc->base.id,
+ new->bw_ctl[SDE_POWER_HANDLE_DBUS_ID_MNOC],
+ new->bw_ctl[SDE_POWER_HANDLE_DBUS_ID_LLCC],
+ new->bw_ctl[SDE_POWER_HANDLE_DBUS_ID_EBI],
new->core_clk_rate, stop_req,
update_bus, update_clk);
- if (update_bus)
- _sde_core_perf_crtc_update_bus(kms, crtc);
+ for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ if (update_bus & BIT(i))
+ _sde_core_perf_crtc_update_bus(kms, crtc, i);
+ }
/*
* Update the clock after bandwidth vote to ensure
diff --git a/drivers/gpu/drm/msm/sde/sde_core_perf.h b/drivers/gpu/drm/msm/sde/sde_core_perf.h
index 4a1bdad..589415c 100644
--- a/drivers/gpu/drm/msm/sde/sde_core_perf.h
+++ b/drivers/gpu/drm/msm/sde/sde_core_perf.h
@@ -30,8 +30,8 @@
* @core_clk_rate: core clock rate request
*/
struct sde_core_perf_params {
- u64 max_per_pipe_ib;
- u64 bw_ctl;
+ u64 max_per_pipe_ib[SDE_POWER_HANDLE_DBUS_ID_MAX];
+ u64 bw_ctl[SDE_POWER_HANDLE_DBUS_ID_MAX];
u64 core_clk_rate;
};
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index e708290..30bb72b 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -494,12 +494,6 @@
{
if (!sde_crtc)
return;
-
- if (sde_crtc->event_thread) {
- kthread_flush_worker(&sde_crtc->event_worker);
- kthread_stop(sde_crtc->event_thread);
- sde_crtc->event_thread = NULL;
- }
}
static void sde_crtc_destroy(struct drm_crtc *crtc)
@@ -1516,8 +1510,8 @@
struct sde_crtc *sde_crtc;
struct sde_crtc_state *cstate;
struct sde_kms *sde_kms;
+ struct drm_encoder *encoder;
unsigned long flags;
- bool disable_inprogress = false;
if (!work) {
SDE_ERROR("invalid work handle\n");
@@ -1543,9 +1537,6 @@
SDE_DEBUG("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event,
ktime_to_ns(fevent->ts));
- disable_inprogress = fevent->event &
- SDE_ENCODER_FRAME_EVENT_DURING_DISABLE;
- fevent->event &= ~SDE_ENCODER_FRAME_EVENT_DURING_DISABLE;
if (fevent->event == SDE_ENCODER_FRAME_EVENT_DONE ||
(fevent->event & SDE_ENCODER_FRAME_EVENT_ERROR) ||
@@ -1566,15 +1557,35 @@
ktime_to_ns(fevent->ts));
SDE_EVT32(DRMID(crtc), fevent->event,
SDE_EVTLOG_FUNC_CASE2);
- if (!disable_inprogress)
- sde_core_perf_crtc_release_bw(crtc);
+ sde_core_perf_crtc_release_bw(crtc);
} else {
SDE_EVT32_VERBOSE(DRMID(crtc), fevent->event,
SDE_EVTLOG_FUNC_CASE3);
}
- if (fevent->event == SDE_ENCODER_FRAME_EVENT_DONE &&
- !disable_inprogress)
+ if (fevent->event == SDE_ENCODER_FRAME_EVENT_DONE ||
+ (fevent->event & SDE_ENCODER_FRAME_EVENT_ERROR)) {
+ bool signal_fence = true;
+
+ drm_for_each_encoder(encoder, crtc->dev) {
+ if (encoder->crtc != crtc)
+ continue;
+
+ signal_fence &=
+ sde_encoder_is_cmd_mode(encoder);
+ }
+
+ /* signal release fence only for cmd mode panels here */
+ if (signal_fence) {
+ sde_fence_signal(&sde_crtc->output_fence, 0);
+ SDE_EVT32_VERBOSE(DRMID(crtc), fevent->event,
+ SDE_EVTLOG_FUNC_CASE4);
+ }
+
+ complete_all(&sde_crtc->frame_done_comp);
+ }
+
+ if (fevent->event == SDE_ENCODER_FRAME_EVENT_DONE)
sde_core_perf_crtc_update(crtc, 0, false);
} else {
SDE_ERROR("crtc%d ts:%lld unknown event %u\n", crtc->base.id,
@@ -1599,7 +1610,7 @@
struct msm_drm_private *priv;
struct sde_crtc_frame_event *fevent;
unsigned long flags;
- int pipe_id;
+ u32 crtc_id;
if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
SDE_ERROR("invalid parameters\n");
@@ -1607,7 +1618,7 @@
}
sde_crtc = to_sde_crtc(crtc);
priv = crtc->dev->dev_private;
- pipe_id = drm_crtc_index(crtc);
+ crtc_id = drm_crtc_index(crtc);
SDE_DEBUG("crtc%d\n", crtc->base.id);
SDE_EVT32_VERBOSE(DRMID(crtc), event);
@@ -1629,11 +1640,7 @@
fevent->event = event;
fevent->crtc = crtc;
fevent->ts = ktime_get();
- if (event & SDE_ENCODER_FRAME_EVENT_DURING_DISABLE)
- sde_crtc_frame_event_work(&fevent->work);
- else
- kthread_queue_work(&priv->disp_thread[pipe_id].worker,
- &fevent->work);
+ kthread_queue_work(&priv->event_thread[crtc_id].worker, &fevent->work);
}
void sde_crtc_complete_commit(struct drm_crtc *crtc,
@@ -1641,7 +1648,9 @@
{
struct sde_crtc *sde_crtc;
struct sde_crtc_state *cstate;
+ struct drm_encoder *encoder;
int i;
+ bool signal_fence = true;
if (!crtc || !crtc->state) {
SDE_ERROR("invalid crtc\n");
@@ -1652,9 +1661,18 @@
cstate = to_sde_crtc_state(crtc->state);
SDE_EVT32_VERBOSE(DRMID(crtc));
- /* signal output fence(s) at end of commit */
- sde_fence_signal(&sde_crtc->output_fence, 0);
+ drm_for_each_encoder(encoder, crtc->dev) {
+ if (encoder->crtc != crtc)
+ continue;
+ signal_fence &= !sde_encoder_is_cmd_mode(encoder);
+ }
+
+ /* signal release fence for non-cmd mode panels */
+ if (signal_fence)
+ sde_fence_signal(&sde_crtc->output_fence, 0);
+
+ /* signal retire fence */
for (i = 0; i < cstate->num_connectors; ++i)
sde_connector_complete_commit(cstate->connectors[i]);
}
@@ -2085,6 +2103,36 @@
cstate->property_values, cstate->property_blobs);
}
+static int _sde_crtc_wait_for_frame_done(struct drm_crtc *crtc)
+{
+ struct sde_crtc *sde_crtc;
+ int ret, rc = 0;
+
+ if (!crtc) {
+ SDE_ERROR("invalid argument\n");
+ return -EINVAL;
+ }
+ sde_crtc = to_sde_crtc(crtc);
+
+ if (!atomic_read(&sde_crtc->frame_pending)) {
+ SDE_DEBUG("no frames pending\n");
+ return 0;
+ }
+
+ SDE_EVT32(DRMID(crtc), SDE_EVTLOG_FUNC_ENTRY);
+ ret = wait_for_completion_timeout(&sde_crtc->frame_done_comp,
+ msecs_to_jiffies(SDE_FRAME_DONE_TIMEOUT));
+ if (!ret) {
+ SDE_ERROR("frame done completion wait timed out, ret:%d\n",
+ ret);
+ SDE_EVT32(DRMID(crtc), SDE_EVTLOG_FATAL);
+ rc = -ETIMEDOUT;
+ }
+ SDE_EVT32(DRMID(crtc), SDE_EVTLOG_FUNC_EXIT);
+
+ return rc;
+}
+
void sde_crtc_commit_kickoff(struct drm_crtc *crtc)
{
struct drm_encoder *encoder;
@@ -2129,19 +2177,21 @@
sde_encoder_prepare_for_kickoff(encoder, ¶ms);
}
- if (atomic_read(&sde_crtc->frame_pending) > 2) {
- /* framework allows only 1 outstanding + current */
- SDE_ERROR("crtc%d invalid frame pending\n",
- crtc->base.id);
- SDE_EVT32(DRMID(crtc), 0);
+ /* wait for frame_event_done completion */
+ if (_sde_crtc_wait_for_frame_done(crtc)) {
+ SDE_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
+ crtc->base.id,
+ atomic_read(&sde_crtc->frame_pending));
goto end;
- } else if (atomic_inc_return(&sde_crtc->frame_pending) == 1) {
+ }
+
+ if (atomic_inc_return(&sde_crtc->frame_pending) == 1) {
/* acquire bandwidth and other resources */
SDE_DEBUG("crtc%d first commit\n", crtc->base.id);
- SDE_EVT32(DRMID(crtc), 1);
+ SDE_EVT32(DRMID(crtc), SDE_EVTLOG_FUNC_CASE1);
} else {
SDE_DEBUG("crtc%d commit\n", crtc->base.id);
- SDE_EVT32(DRMID(crtc), 2);
+ SDE_EVT32(DRMID(crtc), SDE_EVTLOG_FUNC_CASE2);
}
sde_crtc->play_count++;
@@ -2151,6 +2201,9 @@
sde_encoder_kickoff(encoder);
}
+
+ reinit_completion(&sde_crtc->frame_done_comp);
+
end:
SDE_ATRACE_END("crtc_commit");
return;
@@ -2444,6 +2497,12 @@
mutex_lock(&sde_crtc->crtc_lock);
SDE_EVT32(DRMID(crtc));
+ /* wait for frame_event_done completion */
+ if (_sde_crtc_wait_for_frame_done(crtc))
+ SDE_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
+ crtc->base.id,
+ atomic_read(&sde_crtc->frame_pending));
+
if (atomic_read(&sde_crtc->vblank_refcount) && !sde_crtc->suspend) {
SDE_ERROR("crtc%d invalid vblank refcount\n",
crtc->base.id);
@@ -2455,8 +2514,6 @@
}
if (atomic_read(&sde_crtc->frame_pending)) {
- /* release bandwidth and other resources */
- SDE_ERROR("crtc%d invalid frame pending\n", crtc->base.id);
SDE_EVT32(DRMID(crtc), atomic_read(&sde_crtc->frame_pending),
SDE_EVTLOG_FUNC_CASE2);
sde_core_perf_crtc_release_bw(crtc);
@@ -2482,6 +2539,7 @@
/* disable clk & bw control until clk & bw properties are set */
cstate->bw_control = false;
+ cstate->bw_split_vote = false;
spin_lock_irqsave(&sde_crtc->spin_lock, flags);
list_for_each_entry(node, &sde_crtc->user_event_list, list) {
@@ -2986,13 +3044,21 @@
catalog->perf.max_bw_high * 1000ULL,
CRTC_PROP_CORE_IB);
msm_property_install_range(&sde_crtc->property_info,
- "mem_ab", 0x0, 0, U64_MAX,
+ "llcc_ab", 0x0, 0, U64_MAX,
catalog->perf.max_bw_high * 1000ULL,
- CRTC_PROP_MEM_AB);
+ CRTC_PROP_LLCC_AB);
msm_property_install_range(&sde_crtc->property_info,
- "mem_ib", 0x0, 0, U64_MAX,
+ "llcc_ib", 0x0, 0, U64_MAX,
catalog->perf.max_bw_high * 1000ULL,
- CRTC_PROP_MEM_IB);
+ CRTC_PROP_LLCC_IB);
+ msm_property_install_range(&sde_crtc->property_info,
+ "dram_ab", 0x0, 0, U64_MAX,
+ catalog->perf.max_bw_high * 1000ULL,
+ CRTC_PROP_DRAM_AB);
+ msm_property_install_range(&sde_crtc->property_info,
+ "dram_ib", 0x0, 0, U64_MAX,
+ catalog->perf.max_bw_high * 1000ULL,
+ CRTC_PROP_DRAM_IB);
msm_property_install_range(&sde_crtc->property_info,
"rot_prefill_bw", 0, 0, U64_MAX,
catalog->perf.max_bw_high * 1000ULL,
@@ -3120,10 +3186,15 @@
case CRTC_PROP_CORE_CLK:
case CRTC_PROP_CORE_AB:
case CRTC_PROP_CORE_IB:
- case CRTC_PROP_MEM_AB:
- case CRTC_PROP_MEM_IB:
cstate->bw_control = true;
break;
+ case CRTC_PROP_LLCC_AB:
+ case CRTC_PROP_LLCC_IB:
+ case CRTC_PROP_DRAM_AB:
+ case CRTC_PROP_DRAM_IB:
+ cstate->bw_control = true;
+ cstate->bw_split_vote = true;
+ break;
default:
/* nothing to do */
break;
@@ -3475,15 +3546,22 @@
struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
struct sde_crtc_state *cstate = to_sde_crtc_state(crtc->state);
struct sde_crtc_res *res;
+ int i;
seq_printf(s, "num_connectors: %d\n", cstate->num_connectors);
seq_printf(s, "client type: %d\n", sde_crtc_get_client_type(crtc));
seq_printf(s, "intf_mode: %d\n", sde_crtc_get_intf_mode(crtc));
- seq_printf(s, "bw_ctl: %llu\n", sde_crtc->cur_perf.bw_ctl);
seq_printf(s, "core_clk_rate: %llu\n",
sde_crtc->cur_perf.core_clk_rate);
- seq_printf(s, "max_per_pipe_ib: %llu\n",
- sde_crtc->cur_perf.max_per_pipe_ib);
+ for (i = SDE_POWER_HANDLE_DBUS_ID_MNOC;
+ i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ seq_printf(s, "bw_ctl[%s]: %llu\n",
+ sde_power_handle_get_dbus_name(i),
+ sde_crtc->cur_perf.bw_ctl[i]);
+ seq_printf(s, "max_per_pipe_ib[%s]: %llu\n",
+ sde_power_handle_get_dbus_name(i),
+ sde_crtc->cur_perf.max_per_pipe_ib[i]);
+ }
seq_printf(s, "rp.%d: ", cstate->rp.sequence_id);
list_for_each_entry(res, &cstate->rp.res_list, list)
@@ -3624,14 +3702,18 @@
{
unsigned long irq_flags;
struct sde_crtc *sde_crtc;
+ struct msm_drm_private *priv;
struct sde_crtc_event *event = NULL;
+ u32 crtc_id;
- if (!crtc || !func)
+ if (!crtc || !crtc->dev || !crtc->dev->dev_private || !func) {
+ SDE_ERROR("invalid parameters\n");
return -EINVAL;
+ }
sde_crtc = to_sde_crtc(crtc);
+ priv = crtc->dev->dev_private;
+ crtc_id = drm_crtc_index(crtc);
- if (!sde_crtc->event_thread)
- return -EINVAL;
/*
* Obtain an event struct from the private cache. This event
* queue may be called from ISR contexts, so use a private
@@ -3655,7 +3737,8 @@
/* queue new event request */
kthread_init_work(&event->kt_work, _sde_crtc_event_cb);
- kthread_queue_work(&sde_crtc->event_worker, &event->kt_work);
+ kthread_queue_work(&priv->event_thread[crtc_id].worker,
+ &event->kt_work);
return 0;
}
@@ -3676,17 +3759,6 @@
list_add_tail(&sde_crtc->event_cache[i].list,
&sde_crtc->event_free_list);
- kthread_init_worker(&sde_crtc->event_worker);
- sde_crtc->event_thread = kthread_run(kthread_worker_fn,
- &sde_crtc->event_worker, "crtc_event:%d",
- sde_crtc->base.base.id);
-
- if (IS_ERR_OR_NULL(sde_crtc->event_thread)) {
- SDE_ERROR("failed to create event thread\n");
- rc = PTR_ERR(sde_crtc->event_thread);
- sde_crtc->event_thread = NULL;
- }
-
return rc;
}
@@ -3714,6 +3786,8 @@
spin_lock_init(&sde_crtc->spin_lock);
atomic_set(&sde_crtc->frame_pending, 0);
+ init_completion(&sde_crtc->frame_done_comp);
+
INIT_LIST_HEAD(&sde_crtc->frame_event_list);
INIT_LIST_HEAD(&sde_crtc->user_event_list);
for (i = 0; i < ARRAY_SIZE(sde_crtc->frame_events); i++) {
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h
index 38311c1..0d72ff1 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.h
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.h
@@ -135,6 +135,7 @@
* @frame_events : static allocation of in-flight frame events
* @frame_event_list : available frame event list
* @spin_lock : spin lock for frame event, transaction status, etc...
+ * @frame_done_comp : for frame_event_done synchronization
* @event_thread : Pointer to event handler thread
* @event_worker : Event worker queue
* @event_cache : Local cache of event worker structures
@@ -186,10 +187,9 @@
struct sde_crtc_frame_event frame_events[SDE_CRTC_FRAME_EVENT_SIZE];
struct list_head frame_event_list;
spinlock_t spin_lock;
+ struct completion frame_done_comp;
/* for handling internal event thread */
- struct task_struct *event_thread;
- struct kthread_worker event_worker;
struct sde_crtc_event event_cache[SDE_CRTC_MAX_EVENT_COUNT];
struct list_head event_free_list;
spinlock_t event_lock;
@@ -260,7 +260,8 @@
* @intf_mode : Interface mode of the primary connector
* @rsc_client : sde rsc client when mode is valid
* @is_ppsplit : Whether current topology requires PPSplit special handling
- * @bw_control : true if bw/clk controlled by bw/clk properties
+ * @bw_control : true if bw/clk controlled by core bw/clk properties
+ * @bw_split_vote : true if bw controlled by llcc/dram bw properties
* @crtc_roi : Current CRTC ROI. Possibly sub-rectangle of mode.
* Origin top left of CRTC.
* @lm_bounds : LM boundaries based on current mode full resolution, no ROI.
@@ -287,6 +288,7 @@
struct sde_rsc_client *rsc_client;
bool rsc_update;
bool bw_control;
+ bool bw_split_vote;
bool is_ppsplit;
struct sde_rect crtc_roi;
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
index e1caeaf..0b4dd82 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.c
@@ -56,9 +56,6 @@
(p) ? ((p)->hw_pp ? (p)->hw_pp->idx - PINGPONG_0 : -1) : -1, \
##__VA_ARGS__)
-/* timeout in frames waiting for frame done */
-#define SDE_ENCODER_FRAME_DONE_TIMEOUT 60
-
/*
* Two to anticipate panels that can do cmd/vid dynamic switching
* plan is to create all possible physical encoder types, and switch between
@@ -173,7 +170,6 @@
* @rsc_cfg: rsc configuration
* @cur_conn_roi: current connector roi
* @prv_conn_roi: previous connector roi to optimize if unchanged
- * @disable_inprogress: sde encoder disable is in progress.
*/
struct sde_encoder_virt {
struct drm_encoder base;
@@ -217,7 +213,6 @@
struct sde_encoder_rsc_config rsc_cfg;
struct sde_rect cur_conn_roi;
struct sde_rect prv_conn_roi;
- bool disable_inprogress;
};
#define to_sde_encoder_virt(x) container_of(x, struct sde_encoder_virt, base)
@@ -1643,7 +1638,6 @@
SDE_EVT32(DRMID(drm_enc));
sde_enc->cur_master = NULL;
- sde_enc->disable_inprogress = false;
for (i = 0; i < sde_enc->num_phys_encs; i++) {
struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
@@ -1702,7 +1696,6 @@
priv = drm_enc->dev->dev_private;
sde_kms = to_sde_kms(priv->kms);
- sde_enc->disable_inprogress = true;
SDE_EVT32(DRMID(drm_enc));
@@ -1868,9 +1861,6 @@
sde_encoder_resource_control(drm_enc,
SDE_ENC_RC_EVENT_FRAME_DONE);
- if (sde_enc->disable_inprogress)
- event |= SDE_ENCODER_FRAME_EVENT_DURING_DISABLE;
-
if (sde_enc->crtc_frame_event_cb)
sde_enc->crtc_frame_event_cb(
sde_enc->crtc_frame_event_cb_data, event);
@@ -2224,6 +2214,22 @@
}
}
+bool sde_encoder_is_cmd_mode(struct drm_encoder *drm_enc)
+{
+ struct sde_encoder_virt *sde_enc;
+ struct msm_display_info *disp_info;
+
+ if (!drm_enc) {
+ SDE_ERROR("invalid encoder\n");
+ return false;
+ }
+
+ sde_enc = to_sde_encoder_virt(drm_enc);
+ disp_info = &sde_enc->disp_info;
+
+ return (disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE);
+}
+
void sde_encoder_trigger_kickoff_pending(struct drm_encoder *drm_enc)
{
struct sde_encoder_virt *sde_enc;
@@ -2332,7 +2338,7 @@
SDE_DEBUG_ENC(sde_enc, "\n");
atomic_set(&sde_enc->frame_done_timeout,
- SDE_ENCODER_FRAME_DONE_TIMEOUT * 1000 /
+ SDE_FRAME_DONE_TIMEOUT * 1000 /
drm_enc->crtc->state->adjusted_mode.vrefresh);
mod_timer(&sde_enc->frame_done_timer, jiffies +
((atomic_read(&sde_enc->frame_done_timeout) * HZ) / 1000));
@@ -2912,10 +2918,7 @@
SDE_ERROR_ENC(sde_enc, "frame done timeout\n");
- event = SDE_ENCODER_FRAME_EVENT_ERROR;
- if (sde_enc->disable_inprogress)
- event |= SDE_ENCODER_FRAME_EVENT_DURING_DISABLE;
-
+ event = SDE_ENCODER_FRAME_EVENT_ERROR;
SDE_EVT32(DRMID(drm_enc), event);
sde_enc->crtc_frame_event_cb(sde_enc->crtc_frame_event_cb_data, event);
}
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.h b/drivers/gpu/drm/msm/sde/sde_encoder.h
index 0b14a58..9c2d3e9 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.h
@@ -27,7 +27,6 @@
#define SDE_ENCODER_FRAME_EVENT_DONE BIT(0)
#define SDE_ENCODER_FRAME_EVENT_ERROR BIT(1)
#define SDE_ENCODER_FRAME_EVENT_PANEL_DEAD BIT(2)
-#define SDE_ENCODER_FRAME_EVENT_DURING_DISABLE BIT(3)
/**
* Encoder functions and data types
@@ -174,6 +173,13 @@
bool sde_encoder_is_dsc_merge(struct drm_encoder *drm_enc);
/**
+ * sde_encoder_is_cmd_mode - check if it is cmd mode
+ * @drm_enc: Pointer to drm encoder object
+ * @Return: true if it is cmd mode
+ */
+bool sde_encoder_is_cmd_mode(struct drm_encoder *drm_enc);
+
+/**
* sde_encoder_init - initialize virtual encoder object
* @dev: Pointer to drm device structure
* @disp_info: Pointer to display information structure
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.h b/drivers/gpu/drm/msm/sde/sde_kms.h
index 058f19b..5894fe2 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.h
+++ b/drivers/gpu/drm/msm/sde/sde_kms.h
@@ -88,6 +88,10 @@
#define SDE_NAME_SIZE 12
+
+/* timeout in frames waiting for frame done */
+#define SDE_FRAME_DONE_TIMEOUT 60
+
/*
* struct sde_irq_callback - IRQ callback handlers
* @list: list to callback
diff --git a/drivers/gpu/drm/msm/sde/sde_trace.h b/drivers/gpu/drm/msm/sde/sde_trace.h
index 6962bef..e233fc7 100644
--- a/drivers/gpu/drm/msm/sde/sde_trace.h
+++ b/drivers/gpu/drm/msm/sde/sde_trace.h
@@ -193,13 +193,16 @@
)
TRACE_EVENT(sde_perf_crtc_update,
- TP_PROTO(u32 crtc, u64 bw_ctl, u32 core_clk_rate,
- bool stop_req, u32 update_bus, u32 update_clk),
- TP_ARGS(crtc, bw_ctl, core_clk_rate,
+ TP_PROTO(u32 crtc, u64 bw_ctl_mnoc, u64 bw_ctl_llcc,
+ u64 bw_ctl_ebi, u32 core_clk_rate,
+ bool stop_req, u32 update_bus, u32 update_clk),
+ TP_ARGS(crtc, bw_ctl_mnoc, bw_ctl_llcc, bw_ctl_ebi, core_clk_rate,
stop_req, update_bus, update_clk),
TP_STRUCT__entry(
__field(u32, crtc)
- __field(u64, bw_ctl)
+ __field(u64, bw_ctl_mnoc)
+ __field(u64, bw_ctl_llcc)
+ __field(u64, bw_ctl_ebi)
__field(u32, core_clk_rate)
__field(bool, stop_req)
__field(u32, update_bus)
@@ -207,19 +210,24 @@
),
TP_fast_assign(
__entry->crtc = crtc;
- __entry->bw_ctl = bw_ctl;
+ __entry->bw_ctl_mnoc = bw_ctl_mnoc;
+ __entry->bw_ctl_llcc = bw_ctl_llcc;
+ __entry->bw_ctl_ebi = bw_ctl_ebi;
__entry->core_clk_rate = core_clk_rate;
__entry->stop_req = stop_req;
__entry->update_bus = update_bus;
__entry->update_clk = update_clk;
),
- TP_printk("crtc=%d bw=%llu clk_rate=%u stop_req=%d u_bus=%d u_clk=%d",
- __entry->crtc,
- __entry->bw_ctl,
- __entry->core_clk_rate,
- __entry->stop_req,
- __entry->update_bus,
- __entry->update_clk)
+ TP_printk(
+ "crtc=%d bw_mnoc=%llu bw_llcc=%llu bw_ebi=%llu clk_rate=%u stop_req=%d u_bus=%d u_clk=%d",
+ __entry->crtc,
+ __entry->bw_ctl_mnoc,
+ __entry->bw_ctl_llcc,
+ __entry->bw_ctl_ebi,
+ __entry->core_clk_rate,
+ __entry->stop_req,
+ __entry->update_bus,
+ __entry->update_clk)
);
#define SDE_ATRACE_END(name) trace_sde_mark_write(current->tgid, name, 0)
diff --git a/drivers/gpu/drm/msm/sde_power_handle.c b/drivers/gpu/drm/msm/sde_power_handle.c
index 452a3be..242cd64 100644
--- a/drivers/gpu/drm/msm/sde_power_handle.c
+++ b/drivers/gpu/drm/msm/sde_power_handle.c
@@ -30,6 +30,20 @@
#include "sde_power_handle.h"
#include "sde_trace.h"
+static const char *data_bus_name[SDE_POWER_HANDLE_DBUS_ID_MAX] = {
+ [SDE_POWER_HANDLE_DBUS_ID_MNOC] = "qcom,sde-data-bus",
+ [SDE_POWER_HANDLE_DBUS_ID_LLCC] = "qcom,sde-llcc-bus",
+ [SDE_POWER_HANDLE_DBUS_ID_EBI] = "qcom,sde-ebi-bus",
+};
+
+const char *sde_power_handle_get_dbus_name(u32 bus_id)
+{
+ if (bus_id < SDE_POWER_HANDLE_DBUS_ID_MAX)
+ return data_bus_name[bus_id];
+
+ return NULL;
+}
+
static void sde_power_event_trigger_locked(struct sde_power_handle *phandle,
u32 event_type)
{
@@ -415,7 +429,9 @@
vect->ab = ab_quota[i];
vect->ib = ib_quota[i];
- pr_debug("uc_idx=%d %s path idx=%d ab=%llu ib=%llu\n",
+ pr_debug(
+ "%s uc_idx=%d %s path idx=%d ab=%llu ib=%llu\n",
+ bw_table->name,
new_uc_idx, (i < rt_axi_port_cnt) ? "rt" : "nrt"
, i, vect->ab, vect->ib);
}
@@ -433,7 +449,8 @@
int sde_power_data_bus_set_quota(struct sde_power_handle *phandle,
struct sde_power_client *pclient,
- int bus_client, u64 ab_quota, u64 ib_quota)
+ int bus_client, u32 bus_id,
+ u64 ab_quota, u64 ib_quota)
{
int rc = 0;
int i;
@@ -442,7 +459,8 @@
struct sde_power_client *client;
if (!phandle || !pclient ||
- bus_client >= SDE_POWER_HANDLE_DATA_BUS_CLIENT_MAX) {
+ bus_client >= SDE_POWER_HANDLE_DATA_BUS_CLIENT_MAX ||
+ bus_id >= SDE_POWER_HANDLE_DBUS_ID_MAX) {
pr_err("invalid parameters\n");
return -EINVAL;
}
@@ -465,7 +483,9 @@
}
}
- rc = _sde_power_data_bus_set_quota(&phandle->data_bus_handle,
+ if (phandle->data_bus_handle[bus_id].data_bus_hdl)
+ rc = _sde_power_data_bus_set_quota(
+ &phandle->data_bus_handle[bus_id],
total_ab_rt, total_ab_nrt,
total_ib_rt, total_ib_nrt);
@@ -484,7 +504,7 @@
}
static int sde_power_data_bus_parse(struct platform_device *pdev,
- struct sde_power_data_bus_handle *pdbus)
+ struct sde_power_data_bus_handle *pdbus, const char *name)
{
struct device_node *node;
int rc = 0;
@@ -507,7 +527,7 @@
rc = 0;
}
- node = of_get_child_by_name(pdev->dev.of_node, "qcom,sde-data-bus");
+ node = of_get_child_by_name(pdev->dev.of_node, name);
if (node) {
rc = of_property_read_u32(node,
"qcom,msm-bus,num-paths", &paths);
@@ -533,7 +553,8 @@
rc = -EINVAL;
goto end;
}
- pr_debug("register data_bus_hdl=%x\n", pdbus->data_bus_hdl);
+ pr_debug("register %s data_bus_hdl=%x\n", name,
+ pdbus->data_bus_hdl);
}
end:
@@ -621,7 +642,8 @@
int sde_power_data_bus_set_quota(struct sde_power_handle *phandle,
struct sde_power_client *pclient,
- int bus_client, u64 ab_quota, u64 ib_quota)
+ int bus_client, u32 bus_id,
+ u64 ab_quota, u64 ib_quota)
{
return 0;
}
@@ -651,7 +673,7 @@
int sde_power_resource_init(struct platform_device *pdev,
struct sde_power_handle *phandle)
{
- int rc = 0;
+ int rc = 0, i;
struct dss_module_power *mp;
if (!phandle || !pdev) {
@@ -699,10 +721,16 @@
goto bus_err;
}
- rc = sde_power_data_bus_parse(pdev, &phandle->data_bus_handle);
- if (rc) {
- pr_err("register data bus parse failed rc=%d\n", rc);
- goto data_bus_err;
+ for (i = SDE_POWER_HANDLE_DBUS_ID_MNOC;
+ i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ rc = sde_power_data_bus_parse(pdev,
+ &phandle->data_bus_handle[i],
+ data_bus_name[i]);
+ if (rc) {
+ pr_err("register data bus parse failed id=%d rc=%d\n",
+ i, rc);
+ goto data_bus_err;
+ }
}
INIT_LIST_HEAD(&phandle->power_client_clist);
@@ -716,6 +744,8 @@
return rc;
data_bus_err:
+ for (i--; i >= 0; i--)
+ sde_power_data_bus_unregister(&phandle->data_bus_handle[i]);
sde_power_reg_bus_unregister(phandle->reg_bus_hdl);
bus_err:
msm_dss_put_clk(mp->clk_config, mp->num_clk);
@@ -739,6 +769,7 @@
struct dss_module_power *mp;
struct sde_power_client *curr_client, *next_client;
struct sde_power_event *curr_event, *next_event;
+ int i;
if (!phandle || !pdev) {
pr_err("invalid input param\n");
@@ -766,7 +797,8 @@
}
mutex_unlock(&phandle->phandle_lock);
- sde_power_data_bus_unregister(&phandle->data_bus_handle);
+ for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++)
+ sde_power_data_bus_unregister(&phandle->data_bus_handle[i]);
sde_power_reg_bus_unregister(phandle->reg_bus_hdl);
@@ -790,7 +822,7 @@
int sde_power_resource_enable(struct sde_power_handle *phandle,
struct sde_power_client *pclient, bool enable)
{
- int rc = 0;
+ int rc = 0, i;
bool changed = false;
u32 max_usecase_ndx = VOTE_INDEX_DISABLE, prev_usecase_ndx;
struct sde_power_client *client;
@@ -837,13 +869,15 @@
sde_power_event_trigger_locked(phandle,
SDE_POWER_EVENT_PRE_ENABLE);
- rc = sde_power_data_bus_update(&phandle->data_bus_handle,
- enable);
- if (rc) {
- pr_err("failed to set data bus vote rc=%d\n", rc);
- goto data_bus_hdl_err;
+ for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ rc = sde_power_data_bus_update(
+ &phandle->data_bus_handle[i], enable);
+ if (rc) {
+ pr_err("failed to set data bus vote id=%d rc=%d\n",
+ i, rc);
+ goto data_bus_hdl_err;
+ }
}
-
/*
* - When the target is RSCC enabled, regulator should
* be enabled by the s/w only for the first time during
@@ -897,7 +931,9 @@
if (!phandle->rsc_client)
msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg,
enable);
- sde_power_data_bus_update(&phandle->data_bus_handle, enable);
+ for (i = 0 ; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++)
+ sde_power_data_bus_update(&phandle->data_bus_handle[i],
+ enable);
sde_power_event_trigger_locked(phandle,
SDE_POWER_EVENT_POST_DISABLE);
@@ -915,7 +951,8 @@
if (!phandle->rsc_client)
msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg, 0);
vreg_err:
- sde_power_data_bus_update(&phandle->data_bus_handle, 0);
+ for (i = 0 ; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++)
+ sde_power_data_bus_update(&phandle->data_bus_handle[i], 0);
data_bus_hdl_err:
phandle->current_usecase_ndx = prev_usecase_ndx;
mutex_unlock(&phandle->phandle_lock);
diff --git a/drivers/gpu/drm/msm/sde_power_handle.h b/drivers/gpu/drm/msm/sde_power_handle.h
index c526b71..78c325d 100644
--- a/drivers/gpu/drm/msm/sde_power_handle.h
+++ b/drivers/gpu/drm/msm/sde_power_handle.h
@@ -16,9 +16,9 @@
#define MAX_CLIENT_NAME_LEN 128
-#define SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA 2000000
+#define SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA 1600000000
#define SDE_POWER_HANDLE_DISABLE_BUS_AB_QUOTA 0
-#define SDE_POWER_HANDLE_ENABLE_BUS_IB_QUOTA 2000000
+#define SDE_POWER_HANDLE_ENABLE_BUS_IB_QUOTA 1600000000
#define SDE_POWER_HANDLE_DISABLE_BUS_IB_QUOTA 0
#include <linux/sde_io_util.h>
@@ -60,6 +60,19 @@
};
/**
+ * enum SDE_POWER_HANDLE_DBUS_ID - data bus identifier
+ * @SDE_POWER_HANDLE_DBUS_ID_MNOC: DPU/MNOC data bus
+ * @SDE_POWER_HANDLE_DBUS_ID_LLCC: MNOC/LLCC data bus
+ * @SDE_POWER_HANDLE_DBUS_ID_EBI: LLCC/EBI data bus
+ */
+enum SDE_POWER_HANDLE_DBUS_ID {
+ SDE_POWER_HANDLE_DBUS_ID_MNOC,
+ SDE_POWER_HANDLE_DBUS_ID_LLCC,
+ SDE_POWER_HANDLE_DBUS_ID_EBI,
+ SDE_POWER_HANDLE_DBUS_ID_MAX,
+};
+
+/**
* struct sde_power_client: stores the power client for sde driver
* @name: name of the client
* @usecase_ndx: current regs bus vote type
@@ -152,7 +165,8 @@
struct device *dev;
u32 current_usecase_ndx;
u32 reg_bus_hdl;
- struct sde_power_data_bus_handle data_bus_handle;
+ struct sde_power_data_bus_handle data_bus_handle
+ [SDE_POWER_HANDLE_DBUS_ID_MAX];
struct list_head event_list;
struct sde_rsc_client *rsc_client;
bool rsc_client_init;
@@ -254,6 +268,7 @@
* @phandle: power handle containing the resources
* @client: client information to set quota
* @bus_client: real-time or non-real-time bus client
+ * @bus_id: identifier of data bus, see SDE_POWER_HANDLE_DBUS_ID
* @ab_quota: arbitrated bus bandwidth
* @ib_quota: instantaneous bus bandwidth
*
@@ -261,7 +276,8 @@
*/
int sde_power_data_bus_set_quota(struct sde_power_handle *phandle,
struct sde_power_client *pclient,
- int bus_client, u64 ab_quota, u64 ib_quota);
+ int bus_client, u32 bus_id,
+ u64 ab_quota, u64 ib_quota);
/**
* sde_power_data_bus_bandwidth_ctrl() - control data bus bandwidth enable
@@ -298,4 +314,11 @@
void sde_power_handle_unregister_event(struct sde_power_handle *phandle,
struct sde_power_event *event);
+/**
+ * sde_power_handle_get_dbus_name - get name of given data bus identifier
+ * @bus_id: data bus identifier
+ * Return: Pointer to name string if success; NULL otherwise
+ */
+const char *sde_power_handle_get_dbus_name(u32 bus_id);
+
#endif /* _SDE_POWER_HANDLE_H_ */
diff --git a/drivers/gpu/drm/msm/sde_rsc.c b/drivers/gpu/drm/msm/sde_rsc.c
index caa8cdf..8447916 100644
--- a/drivers/gpu/drm/msm/sde_rsc.c
+++ b/drivers/gpu/drm/msm/sde_rsc.c
@@ -657,13 +657,14 @@
* sde_rsc_client_vote() - ab/ib vote from rsc client
*
* @client: Client pointer provided by sde_rsc_client_create().
+ * @bus_id: data bus for which to be voted
* @ab: aggregated bandwidth vote from client.
* @ib: instant bandwidth vote from client.
*
* Return: error code.
*/
int sde_rsc_client_vote(struct sde_rsc_client *caller_client,
- u64 ab_vote, u64 ib_vote)
+ u32 bus_id, u64 ab_vote, u64 ib_vote)
{
int rc = 0;
struct sde_rsc_priv *rsc;
@@ -717,7 +718,8 @@
rpmh_invalidate(rsc->disp_rsc);
sde_power_data_bus_set_quota(&rsc->phandle, rsc->pclient,
- SDE_POWER_HANDLE_DATA_BUS_CLIENT_RT, ab_vote, ib_vote);
+ SDE_POWER_HANDLE_DATA_BUS_CLIENT_RT,
+ bus_id, ab_vote, ib_vote);
rpmh_flush(rsc->disp_rsc);
if (rsc->hw_ops.tcs_use_ok)
diff --git a/drivers/gpu/msm/Makefile b/drivers/gpu/msm/Makefile
index f513207..0058226 100644
--- a/drivers/gpu/msm/Makefile
+++ b/drivers/gpu/msm/Makefile
@@ -38,6 +38,7 @@
adreno_a6xx_snapshot.o \
adreno_a4xx_preempt.o \
adreno_a5xx_preempt.o \
+ adreno_a6xx_preempt.o \
adreno_sysfs.o \
adreno.o \
adreno_cp_parser.o \
diff --git a/drivers/gpu/msm/a6xx_reg.h b/drivers/gpu/msm/a6xx_reg.h
index 58ef5ee..f4552b6 100644
--- a/drivers/gpu/msm/a6xx_reg.h
+++ b/drivers/gpu/msm/a6xx_reg.h
@@ -70,6 +70,15 @@
#define A6XX_CP_ADDR_MODE_CNTL 0x842
#define A6XX_CP_PROTECT_CNTL 0x84F
#define A6XX_CP_PROTECT_REG 0x850
+#define A6XX_CP_CONTEXT_SWITCH_CNTL 0x8A0
+#define A6XX_CP_CONTEXT_SWITCH_SMMU_INFO_LO 0x8A1
+#define A6XX_CP_CONTEXT_SWITCH_SMMU_INFO_HI 0x8A2
+#define A6XX_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_LO 0x8A3
+#define A6XX_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_HI 0x8A4
+#define A6XX_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR_LO 0x8A5
+#define A6XX_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR_HI 0x8A6
+#define A6XX_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_LO 0x8A7
+#define A6XX_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_HI 0x8A8
#define A6XX_CP_PERFCTR_CP_SEL_0 0x8D0
#define A6XX_CP_PERFCTR_CP_SEL_1 0x8D1
#define A6XX_CP_PERFCTR_CP_SEL_2 0x8D2
@@ -590,6 +599,7 @@
#define A6XX_RB_PERFCTR_CMP_SEL_1 0x8E2D
#define A6XX_RB_PERFCTR_CMP_SEL_2 0x8E2E
#define A6XX_RB_PERFCTR_CMP_SEL_3 0x8E2F
+#define A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE 0x8E50
/* PC registers */
#define A6XX_PC_DBG_ECO_CNTL 0x9E00
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index 7a6581c..c7e3ad7 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -845,11 +845,14 @@
unsigned int *cmds,
struct kgsl_context *context);
int (*preemption_yield_enable)(unsigned int *);
+ unsigned int (*preemption_set_marker)(unsigned int *cmds, int start);
unsigned int (*preemption_post_ibsubmit)(
struct adreno_device *adreno_dev,
unsigned int *cmds);
int (*preemption_init)(struct adreno_device *);
void (*preemption_schedule)(struct adreno_device *);
+ int (*preemption_context_init)(struct kgsl_context *);
+ void (*preemption_context_destroy)(struct kgsl_context *);
void (*enable_64bit)(struct adreno_device *);
void (*clk_set_options)(struct adreno_device *,
const char *, struct clk *, bool on);
diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c
index 33854ea..ad0ce44 100644
--- a/drivers/gpu/msm/adreno_a6xx.c
+++ b/drivers/gpu/msm/adreno_a6xx.c
@@ -29,9 +29,6 @@
#include "kgsl_gmu.h"
#include "kgsl_trace.h"
-#define A6XX_CP_RB_CNTL_DEFAULT (((ilog2(4) << 8) & 0x1F00) | \
- (ilog2(KGSL_RB_DWORDS >> 1) & 0x3F))
-
#define MIN_HBB 13
#define A6XX_LLC_NUM_GPU_SCIDS 5
@@ -482,6 +479,12 @@
if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_TWO_PASS_USE_WFI))
kgsl_regrmw(device, A6XX_PC_DBG_ECO_CNTL, 0, (1 << 8));
+ /* Enable the GMEM save/restore feature for preemption */
+ if (adreno_is_preemption_enabled(adreno_dev))
+ kgsl_regwrite(device, A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE,
+ 0x1);
+
+ a6xx_preemption_start(adreno_dev);
a6xx_protect_init(adreno_dev);
}
@@ -612,6 +615,70 @@
}
/*
+ * Follow the ME_INIT sequence with a preemption yield to allow the GPU to move
+ * to a different ringbuffer, if desired
+ */
+static int _preemption_init(struct adreno_device *adreno_dev,
+ struct adreno_ringbuffer *rb, unsigned int *cmds,
+ struct kgsl_context *context)
+{
+ unsigned int *cmds_orig = cmds;
+
+ /* Turn CP protection OFF */
+ *cmds++ = cp_type7_packet(CP_SET_PROTECTED_MODE, 1);
+ *cmds++ = 0;
+
+ *cmds++ = cp_type7_packet(CP_SET_PSEUDO_REGISTER, 6);
+ *cmds++ = 1;
+ cmds += cp_gpuaddr(adreno_dev, cmds,
+ rb->preemption_desc.gpuaddr);
+
+ *cmds++ = 2;
+ cmds += cp_gpuaddr(adreno_dev, cmds, 0);
+
+ /* Turn CP protection ON */
+ *cmds++ = cp_type7_packet(CP_SET_PROTECTED_MODE, 1);
+ *cmds++ = 1;
+
+ *cmds++ = cp_type7_packet(CP_CONTEXT_SWITCH_YIELD, 4);
+ cmds += cp_gpuaddr(adreno_dev, cmds, 0x0);
+ *cmds++ = 0;
+ /* generate interrupt on preemption completion */
+ *cmds++ = 0;
+
+ return cmds - cmds_orig;
+}
+
+static int a6xx_post_start(struct adreno_device *adreno_dev)
+{
+ int ret;
+ unsigned int *cmds, *start;
+ struct adreno_ringbuffer *rb = adreno_dev->cur_rb;
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+
+ if (!adreno_is_preemption_enabled(adreno_dev))
+ return 0;
+
+ cmds = adreno_ringbuffer_allocspace(rb, 42);
+ if (IS_ERR(cmds)) {
+ KGSL_DRV_ERR(device, "error allocating preemption init cmds");
+ return PTR_ERR(cmds);
+ }
+ start = cmds;
+
+ cmds += _preemption_init(adreno_dev, rb, cmds, NULL);
+
+ rb->_wptr = rb->_wptr - (42 - (cmds - start));
+
+ ret = adreno_ringbuffer_submit_spin(rb, NULL, 2000);
+ if (ret)
+ adreno_spin_idle_debug(adreno_dev,
+ "hw preemption initialization failed to idle\n");
+
+ return ret;
+}
+
+/*
* a6xx_rb_start() - Start the ringbuffer
* @adreno_dev: Pointer to adreno device
* @start_type: Warm or cold start
@@ -651,7 +718,11 @@
return ret;
/* GPU comes up in secured mode, make it unsecured by default */
- return adreno_set_unsecured_mode(adreno_dev, rb);
+ ret = adreno_set_unsecured_mode(adreno_dev, rb);
+ if (ret)
+ return ret;
+
+ return a6xx_post_start(adreno_dev);
}
static int _load_firmware(struct kgsl_device *device, const char *fwfile,
@@ -678,9 +749,6 @@
}
release_firmware(fw);
-
- ret = _load_gmu_firmware(device);
-
return ret;
}
@@ -1567,9 +1635,18 @@
*/
static int a6xx_microcode_read(struct adreno_device *adreno_dev)
{
- return _load_firmware(KGSL_DEVICE(adreno_dev),
- adreno_dev->gpucore->sqefw_name,
- ADRENO_FW(adreno_dev, ADRENO_FW_SQE));
+ int ret;
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ struct adreno_firmware *sqe_fw = ADRENO_FW(adreno_dev, ADRENO_FW_SQE);
+
+ if (sqe_fw->memdesc.hostptr == NULL) {
+ ret = _load_firmware(device, adreno_dev->gpucore->sqefw_name,
+ sqe_fw);
+ if (ret)
+ return ret;
+ }
+
+ return _load_gmu_firmware(device);
}
#define VBIF_RESET_ACK_TIMEOUT 100
@@ -2080,7 +2157,7 @@
/* 6 - RBBM_ATB_ASYNC_OVERFLOW */
ADRENO_IRQ_CALLBACK(a6xx_err_callback),
ADRENO_IRQ_CALLBACK(NULL), /* 7 - GPC_ERR */
- ADRENO_IRQ_CALLBACK(NULL),/* 8 - CP_SW */
+ ADRENO_IRQ_CALLBACK(a6xx_preemption_callback),/* 8 - CP_SW */
ADRENO_IRQ_CALLBACK(a6xx_cp_hw_err_callback), /* 9 - CP_HW_ERROR */
ADRENO_IRQ_CALLBACK(NULL), /* 10 - CP_CCU_FLUSH_DEPTH_TS */
ADRENO_IRQ_CALLBACK(NULL), /* 11 - CP_CCU_FLUSH_COLOR_TS */
@@ -2574,6 +2651,11 @@
ADRENO_REG_DEFINE(ADRENO_REG_CP_IB2_BUFSZ, A6XX_CP_IB2_REM_SIZE),
ADRENO_REG_DEFINE(ADRENO_REG_CP_ROQ_ADDR, A6XX_CP_ROQ_DBG_ADDR),
ADRENO_REG_DEFINE(ADRENO_REG_CP_ROQ_DATA, A6XX_CP_ROQ_DBG_DATA),
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_PREEMPT, A6XX_CP_CONTEXT_SWITCH_CNTL),
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_CONTEXT_SWITCH_SMMU_INFO_LO,
+ A6XX_CP_CONTEXT_SWITCH_SMMU_INFO_LO),
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_CONTEXT_SWITCH_SMMU_INFO_HI,
+ A6XX_CP_CONTEXT_SWITCH_SMMU_INFO_HI),
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_STATUS, A6XX_RBBM_STATUS),
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_STATUS3, A6XX_RBBM_STATUS3),
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_CTL, A6XX_RBBM_PERFCTR_CNTL),
@@ -2687,4 +2769,11 @@
.iommu_fault_block = a6xx_iommu_fault_block,
.reset = a6xx_reset,
.soft_reset = a6xx_soft_reset,
+ .preemption_pre_ibsubmit = a6xx_preemption_pre_ibsubmit,
+ .preemption_post_ibsubmit = a6xx_preemption_post_ibsubmit,
+ .preemption_init = a6xx_preemption_init,
+ .preemption_schedule = a6xx_preemption_schedule,
+ .preemption_set_marker = a6xx_preemption_set_marker,
+ .preemption_context_init = a6xx_preemption_context_init,
+ .preemption_context_destroy = a6xx_preemption_context_destroy,
};
diff --git a/drivers/gpu/msm/adreno_a6xx.h b/drivers/gpu/msm/adreno_a6xx.h
index 4b96f56..ddf89d6 100644
--- a/drivers/gpu/msm/adreno_a6xx.h
+++ b/drivers/gpu/msm/adreno_a6xx.h
@@ -23,10 +23,93 @@
#define CP_CLUSTER_SP_PS 0x4
#define CP_CLUSTER_PS 0x5
+/**
+ * struct a6xx_cp_preemption_record - CP context record for
+ * preemption.
+ * @magic: (00) Value at this offset must be equal to
+ * A6XX_CP_CTXRECORD_MAGIC_REF.
+ * @info: (04) Type of record. Written non-zero (usually) by CP.
+ * we must set to zero for all ringbuffers.
+ * @errno: (08) Error code. Initialize this to A6XX_CP_CTXRECORD_ERROR_NONE.
+ * CP will update to another value if a preemption error occurs.
+ * @data: (12) DATA field in YIELD and SET_MARKER packets.
+ * Written by CP when switching out. Not used on switch-in. Initialized to 0.
+ * @cntl: (16) RB_CNTL, saved and restored by CP. We must initialize this.
+ * @rptr: (20) RB_RPTR, saved and restored by CP. We must initialize this.
+ * @wptr: (24) RB_WPTR, saved and restored by CP. We must initialize this.
+ * @_pad28: (28) Reserved/padding.
+ * @rptr_addr: (32) RB_RPTR_ADDR_LO|HI saved and restored. We must initialize.
+ * rbase: (40) RB_BASE_LO|HI saved and restored.
+ * counter: (48) Pointer to preemption counter.
+ */
+struct a6xx_cp_preemption_record {
+ uint32_t magic;
+ uint32_t info;
+ uint32_t errno;
+ uint32_t data;
+ uint32_t cntl;
+ uint32_t rptr;
+ uint32_t wptr;
+ uint32_t _pad28;
+ uint64_t rptr_addr;
+ uint64_t rbase;
+ uint64_t counter;
+};
+
+/**
+ * struct a6xx_cp_smmu_info - CP preemption SMMU info.
+ * @magic: (00) The value at this offset must be equal to
+ * A6XX_CP_SMMU_INFO_MAGIC_REF.
+ * @_pad4: (04) Reserved/padding
+ * @ttbr0: (08) Base address of the page table for the
+ * incoming context.
+ * @context_idr: (16) Context Identification Register value.
+ */
+struct a6xx_cp_smmu_info {
+ uint32_t magic;
+ uint32_t _pad4;
+ uint64_t ttbr0;
+ uint32_t asid;
+ uint32_t context_idr;
+};
+
+#define A6XX_CP_SMMU_INFO_MAGIC_REF 0x3618CDA3UL
+
+#define A6XX_CP_CTXRECORD_MAGIC_REF 0xAE399D6EUL
+/* Size of each CP preemption record */
+#define A6XX_CP_CTXRECORD_SIZE_IN_BYTES (2112 * 1024)
+/* Size of the preemption counter block (in bytes) */
+#define A6XX_CP_CTXRECORD_PREEMPTION_COUNTER_SIZE (16 * 4)
+/* Size of the user context record block (in bytes) */
+#define A6XX_CP_CTXRECORD_USER_RESTORE_SIZE (192 * 1024)
+/* Size of the performance counter save/restore block (in bytes) */
+#define A6XX_CP_PERFCOUNTER_SAVE_RESTORE_SIZE (4 * 1024)
+
+#define A6XX_CP_RB_CNTL_DEFAULT (((ilog2(4) << 8) & 0x1F00) | \
+ (ilog2(KGSL_RB_DWORDS >> 1) & 0x3F))
+
+/* Preemption functions */
+void a6xx_preemption_trigger(struct adreno_device *adreno_dev);
+void a6xx_preemption_schedule(struct adreno_device *adreno_dev);
+void a6xx_preemption_start(struct adreno_device *adreno_dev);
+int a6xx_preemption_init(struct adreno_device *adreno_dev);
+
+unsigned int a6xx_preemption_post_ibsubmit(struct adreno_device *adreno_dev,
+ unsigned int *cmds);
+unsigned int a6xx_preemption_pre_ibsubmit(struct adreno_device *adreno_dev,
+ struct adreno_ringbuffer *rb,
+ unsigned int *cmds, struct kgsl_context *context);
+
+unsigned int a6xx_preemption_set_marker(unsigned int *cmds, int start);
+
+void a6xx_preemption_callback(struct adreno_device *adreno_dev, int bit);
+
+int a6xx_preemption_context_init(struct kgsl_context *context);
+
+void a6xx_preemption_context_destroy(struct kgsl_context *context);
void a6xx_snapshot(struct adreno_device *adreno_dev,
struct kgsl_snapshot *snapshot);
void a6xx_crashdump_init(struct adreno_device *adreno_dev);
-
#endif
diff --git a/drivers/gpu/msm/adreno_a6xx_preempt.c b/drivers/gpu/msm/adreno_a6xx_preempt.c
new file mode 100644
index 0000000..00325e5
--- /dev/null
+++ b/drivers/gpu/msm/adreno_a6xx_preempt.c
@@ -0,0 +1,654 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "adreno.h"
+#include "adreno_a6xx.h"
+#include "a6xx_reg.h"
+#include "adreno_trace.h"
+#include "adreno_pm4types.h"
+
+#define PREEMPT_RECORD(_field) \
+ offsetof(struct a6xx_cp_preemption_record, _field)
+
+#define PREEMPT_SMMU_RECORD(_field) \
+ offsetof(struct a6xx_cp_smmu_info, _field)
+
+enum {
+ SET_PSEUDO_REGISTER_SAVE_REGISTER_SMMU_INFO = 0,
+ SET_PSEUDO_REGISTER_SAVE_REGISTER_PRIV_NON_SECURE_SAVE_ADDR,
+ SET_PSEUDO_REGISTER_SAVE_REGISTER_PRIV_SECURE_SAVE_ADDR,
+ SET_PSEUDO_REGISTER_SAVE_REGISTER_NON_PRIV_SAVE_ADDR,
+ SET_PSEUDO_REGISTER_SAVE_REGISTER_COUNTER,
+};
+
+static void _update_wptr(struct adreno_device *adreno_dev, bool reset_timer)
+{
+ struct adreno_ringbuffer *rb = adreno_dev->cur_rb;
+ unsigned int wptr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rb->preempt_lock, flags);
+
+ adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_WPTR, &wptr);
+
+ if (wptr != rb->wptr) {
+ adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_WPTR,
+ rb->wptr);
+ /*
+ * In case something got submitted while preemption was on
+ * going, reset the timer.
+ */
+ reset_timer = true;
+ }
+
+ if (reset_timer)
+ rb->dispatch_q.expires = jiffies +
+ msecs_to_jiffies(adreno_drawobj_timeout);
+
+ spin_unlock_irqrestore(&rb->preempt_lock, flags);
+}
+
+static inline bool adreno_move_preempt_state(struct adreno_device *adreno_dev,
+ enum adreno_preempt_states old, enum adreno_preempt_states new)
+{
+ return (atomic_cmpxchg(&adreno_dev->preempt.state, old, new) == old);
+}
+
+static void _a6xx_preemption_done(struct adreno_device *adreno_dev)
+{
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ unsigned int status;
+
+ /*
+ * In the very unlikely case that the power is off, do nothing - the
+ * state will be reset on power up and everybody will be happy
+ */
+
+ if (!kgsl_state_is_awake(device))
+ return;
+
+ adreno_readreg(adreno_dev, ADRENO_REG_CP_PREEMPT, &status);
+
+ if (status & 0x1) {
+ KGSL_DRV_ERR(device,
+ "Preemption not complete: status=%X cur=%d R/W=%X/%X next=%d R/W=%X/%X\n",
+ status, adreno_dev->cur_rb->id,
+ adreno_get_rptr(adreno_dev->cur_rb),
+ adreno_dev->cur_rb->wptr, adreno_dev->next_rb->id,
+ adreno_get_rptr(adreno_dev->next_rb),
+ adreno_dev->next_rb->wptr);
+
+ /* Set a fault and restart */
+ adreno_set_gpu_fault(adreno_dev, ADRENO_PREEMPT_FAULT);
+ adreno_dispatcher_schedule(device);
+
+ return;
+ }
+
+ del_timer_sync(&adreno_dev->preempt.timer);
+
+ trace_adreno_preempt_done(adreno_dev->cur_rb, adreno_dev->next_rb);
+
+ /* Clean up all the bits */
+ adreno_dev->prev_rb = adreno_dev->cur_rb;
+ adreno_dev->cur_rb = adreno_dev->next_rb;
+ adreno_dev->next_rb = NULL;
+
+ /* Update the wptr for the new command queue */
+ _update_wptr(adreno_dev, true);
+
+ /* Update the dispatcher timer for the new command queue */
+ mod_timer(&adreno_dev->dispatcher.timer,
+ adreno_dev->cur_rb->dispatch_q.expires);
+
+ /* Clear the preempt state */
+ adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE);
+}
+
+static void _a6xx_preemption_fault(struct adreno_device *adreno_dev)
+{
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ unsigned int status;
+
+ /*
+ * If the power is on check the preemption status one more time - if it
+ * was successful then just transition to the complete state
+ */
+ if (kgsl_state_is_awake(device)) {
+ adreno_readreg(adreno_dev, ADRENO_REG_CP_PREEMPT, &status);
+
+ if (status == 0) {
+ adreno_set_preempt_state(adreno_dev,
+ ADRENO_PREEMPT_COMPLETE);
+
+ adreno_dispatcher_schedule(device);
+ return;
+ }
+ }
+
+ KGSL_DRV_ERR(device,
+ "Preemption timed out: cur=%d R/W=%X/%X, next=%d R/W=%X/%X\n",
+ adreno_dev->cur_rb->id,
+ adreno_get_rptr(adreno_dev->cur_rb), adreno_dev->cur_rb->wptr,
+ adreno_dev->next_rb->id,
+ adreno_get_rptr(adreno_dev->next_rb),
+ adreno_dev->next_rb->wptr);
+
+ adreno_set_gpu_fault(adreno_dev, ADRENO_PREEMPT_FAULT);
+ adreno_dispatcher_schedule(device);
+}
+
+static void _a6xx_preemption_worker(struct work_struct *work)
+{
+ struct adreno_preemption *preempt = container_of(work,
+ struct adreno_preemption, work);
+ struct adreno_device *adreno_dev = container_of(preempt,
+ struct adreno_device, preempt);
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+
+ /* Need to take the mutex to make sure that the power stays on */
+ mutex_lock(&device->mutex);
+
+ if (adreno_in_preempt_state(adreno_dev, ADRENO_PREEMPT_FAULTED))
+ _a6xx_preemption_fault(adreno_dev);
+
+ mutex_unlock(&device->mutex);
+}
+
+static void _a6xx_preemption_timer(unsigned long data)
+{
+ struct adreno_device *adreno_dev = (struct adreno_device *) data;
+
+ /* We should only be here from a triggered state */
+ if (!adreno_move_preempt_state(adreno_dev,
+ ADRENO_PREEMPT_TRIGGERED, ADRENO_PREEMPT_FAULTED))
+ return;
+
+ /* Schedule the worker to take care of the details */
+ queue_work(system_unbound_wq, &adreno_dev->preempt.work);
+}
+
+/* Find the highest priority active ringbuffer */
+static struct adreno_ringbuffer *a6xx_next_ringbuffer(
+ struct adreno_device *adreno_dev)
+{
+ struct adreno_ringbuffer *rb;
+ unsigned long flags;
+ unsigned int i;
+
+ FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
+ bool empty;
+
+ spin_lock_irqsave(&rb->preempt_lock, flags);
+ empty = adreno_rb_empty(rb);
+ spin_unlock_irqrestore(&rb->preempt_lock, flags);
+
+ if (empty == false)
+ return rb;
+ }
+
+ return NULL;
+}
+
+void a6xx_preemption_trigger(struct adreno_device *adreno_dev)
+{
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ struct kgsl_iommu *iommu = KGSL_IOMMU_PRIV(device);
+ struct adreno_ringbuffer *next;
+ uint64_t ttbr0;
+ unsigned int contextidr;
+ unsigned long flags;
+ uint32_t preempt_level = 0, usesgmem = 1, skipsaverestore = 0;
+
+ /* Put ourselves into a possible trigger state */
+ if (!adreno_move_preempt_state(adreno_dev,
+ ADRENO_PREEMPT_NONE, ADRENO_PREEMPT_START))
+ return;
+
+ /* Get the next ringbuffer to preempt in */
+ next = a6xx_next_ringbuffer(adreno_dev);
+
+ /*
+ * Nothing to do if every ringbuffer is empty or if the current
+ * ringbuffer is the only active one
+ */
+ if (next == NULL || next == adreno_dev->cur_rb) {
+ /*
+ * Update any critical things that might have been skipped while
+ * we were looking for a new ringbuffer
+ */
+
+ if (next != NULL) {
+ _update_wptr(adreno_dev, false);
+
+ mod_timer(&adreno_dev->dispatcher.timer,
+ adreno_dev->cur_rb->dispatch_q.expires);
+ }
+
+ adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE);
+ return;
+ }
+
+ /* Turn off the dispatcher timer */
+ del_timer(&adreno_dev->dispatcher.timer);
+
+ /*
+ * This is the most critical section - we need to take care not to race
+ * until we have programmed the CP for the switch
+ */
+
+ spin_lock_irqsave(&next->preempt_lock, flags);
+
+ /*
+ * Get the pagetable from the pagetable info.
+ * The pagetable_desc is allocated and mapped at probe time, and
+ * preemption_desc at init time, so no need to check if
+ * sharedmem accesses to these memdescs succeed.
+ */
+ kgsl_sharedmem_readq(&next->pagetable_desc, &ttbr0,
+ PT_INFO_OFFSET(ttbr0));
+ kgsl_sharedmem_readl(&next->pagetable_desc, &contextidr,
+ PT_INFO_OFFSET(contextidr));
+
+ kgsl_sharedmem_writel(device, &next->preemption_desc,
+ PREEMPT_RECORD(wptr), next->wptr);
+
+ spin_unlock_irqrestore(&next->preempt_lock, flags);
+
+ /* And write it to the smmu info */
+ kgsl_sharedmem_writeq(device, &iommu->smmu_info,
+ PREEMPT_SMMU_RECORD(ttbr0), ttbr0);
+ kgsl_sharedmem_writel(device, &iommu->smmu_info,
+ PREEMPT_SMMU_RECORD(context_idr), contextidr);
+
+ kgsl_regwrite(device,
+ A6XX_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_LO,
+ lower_32_bits(next->preemption_desc.gpuaddr));
+ kgsl_regwrite(device,
+ A6XX_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_HI,
+ upper_32_bits(next->preemption_desc.gpuaddr));
+
+ if (next->drawctxt_active) {
+ struct kgsl_context *context = &next->drawctxt_active->base;
+ uint64_t gpuaddr = context->user_ctxt_record->memdesc.gpuaddr;
+
+ kgsl_regwrite(device,
+ A6XX_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_LO,
+ lower_32_bits(gpuaddr));
+ kgsl_regwrite(device,
+ A6XX_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_HI,
+ upper_32_bits(gpuaddr));
+ }
+
+ adreno_dev->next_rb = next;
+
+ /* Start the timer to detect a stuck preemption */
+ mod_timer(&adreno_dev->preempt.timer,
+ jiffies + msecs_to_jiffies(ADRENO_PREEMPT_TIMEOUT));
+
+ trace_adreno_preempt_trigger(adreno_dev->cur_rb, adreno_dev->next_rb);
+
+ adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_TRIGGERED);
+
+ /* Trigger the preemption */
+ adreno_writereg(adreno_dev, ADRENO_REG_CP_PREEMPT,
+ ((preempt_level << 6) & 0xC0) |
+ ((skipsaverestore << 9) & 0x200) |
+ ((usesgmem << 8) & 0x100) | 0x1);
+}
+
+void a6xx_preemption_callback(struct adreno_device *adreno_dev, int bit)
+{
+ unsigned int status;
+
+ if (!adreno_move_preempt_state(adreno_dev,
+ ADRENO_PREEMPT_TRIGGERED, ADRENO_PREEMPT_PENDING))
+ return;
+
+ adreno_readreg(adreno_dev, ADRENO_REG_CP_PREEMPT, &status);
+
+ if (status & 0x1) {
+ KGSL_DRV_ERR(KGSL_DEVICE(adreno_dev),
+ "preempt interrupt with non-zero status: %X\n", status);
+
+ /*
+ * Under the assumption that this is a race between the
+ * interrupt and the register, schedule the worker to clean up.
+ * If the status still hasn't resolved itself by the time we get
+ * there then we have to assume something bad happened
+ */
+ adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_COMPLETE);
+ adreno_dispatcher_schedule(KGSL_DEVICE(adreno_dev));
+ return;
+ }
+
+ del_timer(&adreno_dev->preempt.timer);
+
+ trace_adreno_preempt_done(adreno_dev->cur_rb,
+ adreno_dev->next_rb);
+
+ adreno_dev->prev_rb = adreno_dev->cur_rb;
+ adreno_dev->cur_rb = adreno_dev->next_rb;
+ adreno_dev->next_rb = NULL;
+
+ /* Update the wptr if it changed while preemption was ongoing */
+ _update_wptr(adreno_dev, true);
+
+ /* Update the dispatcher timer for the new command queue */
+ mod_timer(&adreno_dev->dispatcher.timer,
+ adreno_dev->cur_rb->dispatch_q.expires);
+
+ adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE);
+
+ a6xx_preemption_trigger(adreno_dev);
+}
+
+void a6xx_preemption_schedule(struct adreno_device *adreno_dev)
+{
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+
+ if (!adreno_is_preemption_enabled(adreno_dev))
+ return;
+
+ mutex_lock(&device->mutex);
+
+ if (adreno_in_preempt_state(adreno_dev, ADRENO_PREEMPT_COMPLETE))
+ _a6xx_preemption_done(adreno_dev);
+
+ a6xx_preemption_trigger(adreno_dev);
+
+ mutex_unlock(&device->mutex);
+}
+
+unsigned int a6xx_preemption_set_marker(unsigned int *cmds, int start)
+{
+ *cmds++ = cp_type7_packet(CP_SET_MARKER, 1);
+
+ /*
+ * Indicate the beginning and end of the IB1 list with a SET_MARKER.
+ * Among other things, this will implicitly enable and disable
+ * preemption respectively.
+ */
+ if (start)
+ *cmds++ = 0xD;
+ else
+ *cmds++ = 0xE;
+
+ return 2;
+}
+
+unsigned int a6xx_preemption_pre_ibsubmit(
+ struct adreno_device *adreno_dev,
+ struct adreno_ringbuffer *rb,
+ unsigned int *cmds, struct kgsl_context *context)
+{
+ unsigned int *cmds_orig = cmds;
+
+ if (context)
+ *cmds++ = cp_type7_packet(CP_SET_PSEUDO_REGISTER, 15);
+ else
+ *cmds++ = cp_type7_packet(CP_SET_PSEUDO_REGISTER, 12);
+
+ /* NULL SMMU_INFO buffer - we track in KMD */
+ *cmds++ = SET_PSEUDO_REGISTER_SAVE_REGISTER_SMMU_INFO;
+ cmds += cp_gpuaddr(adreno_dev, cmds, 0x0);
+
+ *cmds++ = SET_PSEUDO_REGISTER_SAVE_REGISTER_PRIV_NON_SECURE_SAVE_ADDR;
+ cmds += cp_gpuaddr(adreno_dev, cmds, rb->preemption_desc.gpuaddr);
+
+ *cmds++ = SET_PSEUDO_REGISTER_SAVE_REGISTER_PRIV_SECURE_SAVE_ADDR;
+ cmds += cp_gpuaddr(adreno_dev, cmds, 0);
+
+ if (context) {
+ uint64_t gpuaddr = context->user_ctxt_record->memdesc.gpuaddr;
+
+ *cmds++ = SET_PSEUDO_REGISTER_SAVE_REGISTER_NON_PRIV_SAVE_ADDR;
+ cmds += cp_gpuaddr(adreno_dev, cmds, gpuaddr);
+ }
+
+ /*
+ * There is no need to specify this address when we are about to
+ * trigger preemption. This is because CP internally stores this
+ * address specified here in the CP_SET_PSEUDO_REGISTER payload to
+ * the context record and thus knows from where to restore
+ * the saved perfcounters for the new ringbuffer.
+ */
+ *cmds++ = SET_PSEUDO_REGISTER_SAVE_REGISTER_COUNTER;
+ cmds += cp_gpuaddr(adreno_dev, cmds,
+ rb->perfcounter_save_restore_desc.gpuaddr);
+
+ return (unsigned int) (cmds - cmds_orig);
+}
+
+unsigned int a6xx_preemption_post_ibsubmit(struct adreno_device *adreno_dev,
+ unsigned int *cmds)
+{
+ unsigned int *cmds_orig = cmds;
+
+ *cmds++ = cp_type7_packet(CP_CONTEXT_SWITCH_YIELD, 4);
+ cmds += cp_gpuaddr(adreno_dev, cmds, 0x0);
+ *cmds++ = 1;
+ *cmds++ = 0;
+
+ return (unsigned int) (cmds - cmds_orig);
+}
+
+void a6xx_preemption_start(struct adreno_device *adreno_dev)
+{
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ struct kgsl_iommu *iommu = KGSL_IOMMU_PRIV(device);
+ struct adreno_ringbuffer *rb;
+ unsigned int i;
+
+ if (!adreno_is_preemption_enabled(adreno_dev))
+ return;
+
+ /* Force the state to be clear */
+ adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE);
+
+ /* smmu_info is allocated and mapped in a6xx_preemption_iommu_init */
+ kgsl_sharedmem_writel(device, &iommu->smmu_info,
+ PREEMPT_SMMU_RECORD(magic), A6XX_CP_SMMU_INFO_MAGIC_REF);
+ kgsl_sharedmem_writeq(device, &iommu->smmu_info,
+ PREEMPT_SMMU_RECORD(ttbr0), MMU_DEFAULT_TTBR0(device));
+
+ /* The CP doesn't use the asid record, so poison it */
+ kgsl_sharedmem_writel(device, &iommu->smmu_info,
+ PREEMPT_SMMU_RECORD(asid), 0xDECAFBAD);
+ kgsl_sharedmem_writel(device, &iommu->smmu_info,
+ PREEMPT_SMMU_RECORD(context_idr),
+ MMU_DEFAULT_CONTEXTIDR(device));
+
+ adreno_writereg64(adreno_dev,
+ ADRENO_REG_CP_CONTEXT_SWITCH_SMMU_INFO_LO,
+ ADRENO_REG_CP_CONTEXT_SWITCH_SMMU_INFO_HI,
+ iommu->smmu_info.gpuaddr);
+
+ FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
+ /*
+ * preemption_desc is allocated and mapped at init time,
+ * so no need to check sharedmem_writel return value
+ */
+ kgsl_sharedmem_writel(device, &rb->preemption_desc,
+ PREEMPT_RECORD(rptr), 0);
+ kgsl_sharedmem_writel(device, &rb->preemption_desc,
+ PREEMPT_RECORD(wptr), 0);
+
+ adreno_ringbuffer_set_pagetable(rb,
+ device->mmu.defaultpagetable);
+ }
+}
+
+static int a6xx_preemption_ringbuffer_init(struct adreno_device *adreno_dev,
+ struct adreno_ringbuffer *rb, uint64_t counteraddr)
+{
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ int ret;
+
+ ret = kgsl_allocate_global(device, &rb->preemption_desc,
+ A6XX_CP_CTXRECORD_SIZE_IN_BYTES, 0, KGSL_MEMDESC_PRIVILEGED,
+ "preemption_desc");
+ if (ret)
+ return ret;
+
+ ret = kgsl_allocate_global(device, &rb->perfcounter_save_restore_desc,
+ A6XX_CP_PERFCOUNTER_SAVE_RESTORE_SIZE, 0,
+ KGSL_MEMDESC_PRIVILEGED, "perfcounter_save_restore_desc");
+ if (ret)
+ return ret;
+
+ kgsl_sharedmem_writel(device, &rb->preemption_desc,
+ PREEMPT_RECORD(magic), A6XX_CP_CTXRECORD_MAGIC_REF);
+ kgsl_sharedmem_writel(device, &rb->preemption_desc,
+ PREEMPT_RECORD(info), 0);
+ kgsl_sharedmem_writel(device, &rb->preemption_desc,
+ PREEMPT_RECORD(data), 0);
+ kgsl_sharedmem_writel(device, &rb->preemption_desc,
+ PREEMPT_RECORD(cntl), A6XX_CP_RB_CNTL_DEFAULT);
+ kgsl_sharedmem_writel(device, &rb->preemption_desc,
+ PREEMPT_RECORD(rptr), 0);
+ kgsl_sharedmem_writel(device, &rb->preemption_desc,
+ PREEMPT_RECORD(wptr), 0);
+ kgsl_sharedmem_writeq(device, &rb->preemption_desc,
+ PREEMPT_RECORD(rptr_addr), SCRATCH_RPTR_GPU_ADDR(device,
+ rb->id));
+ kgsl_sharedmem_writeq(device, &rb->preemption_desc,
+ PREEMPT_RECORD(rbase), rb->buffer_desc.gpuaddr);
+ kgsl_sharedmem_writeq(device, &rb->preemption_desc,
+ PREEMPT_RECORD(counter), counteraddr);
+
+ return 0;
+}
+
+#ifdef CONFIG_QCOM_KGSL_IOMMU
+static int a6xx_preemption_iommu_init(struct adreno_device *adreno_dev)
+{
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ struct kgsl_iommu *iommu = KGSL_IOMMU_PRIV(device);
+
+ /* Allocate mem for storing preemption smmu record */
+ return kgsl_allocate_global(device, &iommu->smmu_info, PAGE_SIZE,
+ KGSL_MEMFLAGS_GPUREADONLY, KGSL_MEMDESC_PRIVILEGED,
+ "smmu_info");
+}
+
+static void a6xx_preemption_iommu_close(struct adreno_device *adreno_dev)
+{
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ struct kgsl_iommu *iommu = KGSL_IOMMU_PRIV(device);
+
+ kgsl_free_global(device, &iommu->smmu_info);
+}
+#else
+static int a6xx_preemption_iommu_init(struct adreno_device *adreno_dev)
+{
+ return -ENODEV;
+}
+
+static void a6xx_preemption_iommu_close(struct adreno_device *adreno_dev)
+{
+}
+#endif
+
+static void a6xx_preemption_close(struct kgsl_device *device)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_preemption *preempt = &adreno_dev->preempt;
+ struct adreno_ringbuffer *rb;
+ unsigned int i;
+
+ del_timer(&preempt->timer);
+ kgsl_free_global(device, &preempt->counters);
+ a6xx_preemption_iommu_close(adreno_dev);
+
+ FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
+ kgsl_free_global(device, &rb->preemption_desc);
+ kgsl_free_global(device, &rb->perfcounter_save_restore_desc);
+ }
+}
+
+int a6xx_preemption_init(struct adreno_device *adreno_dev)
+{
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ struct adreno_preemption *preempt = &adreno_dev->preempt;
+ struct adreno_ringbuffer *rb;
+ int ret;
+ unsigned int i;
+ uint64_t addr;
+
+ /* We are dependent on IOMMU to make preemption go on the CP side */
+ if (kgsl_mmu_get_mmutype(device) != KGSL_MMU_TYPE_IOMMU)
+ return -ENODEV;
+
+ INIT_WORK(&preempt->work, _a6xx_preemption_worker);
+
+ setup_timer(&preempt->timer, _a6xx_preemption_timer,
+ (unsigned long) adreno_dev);
+
+ /* Allocate mem for storing preemption counters */
+ ret = kgsl_allocate_global(device, &preempt->counters,
+ adreno_dev->num_ringbuffers *
+ A6XX_CP_CTXRECORD_PREEMPTION_COUNTER_SIZE, 0, 0,
+ "preemption_counters");
+ if (ret)
+ goto err;
+
+ addr = preempt->counters.gpuaddr;
+
+ /* Allocate mem for storing preemption switch record */
+ FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
+ ret = a6xx_preemption_ringbuffer_init(adreno_dev, rb, addr);
+ if (ret)
+ goto err;
+
+ addr += A6XX_CP_CTXRECORD_PREEMPTION_COUNTER_SIZE;
+ }
+
+ ret = a6xx_preemption_iommu_init(adreno_dev);
+
+err:
+ if (ret)
+ a6xx_preemption_close(device);
+
+ return ret;
+}
+
+void a6xx_preemption_context_destroy(struct kgsl_context *context)
+{
+ struct kgsl_device *device = context->device;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+ if (!adreno_is_preemption_enabled(adreno_dev))
+ return;
+
+ gpumem_free_entry(context->user_ctxt_record);
+}
+
+int a6xx_preemption_context_init(struct kgsl_context *context)
+{
+ struct kgsl_device *device = context->device;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+ if (!adreno_is_preemption_enabled(adreno_dev))
+ return 0;
+
+ context->user_ctxt_record = gpumem_alloc_entry(context->dev_priv,
+ A6XX_CP_CTXRECORD_USER_RESTORE_SIZE, 0);
+ if (IS_ERR(context->user_ctxt_record)) {
+ int ret = PTR_ERR(context->user_ctxt_record);
+
+ context->user_ctxt_record = NULL;
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/msm/adreno_drawctxt.c b/drivers/gpu/msm/adreno_drawctxt.c
index f217822..c6df7bb 100644
--- a/drivers/gpu/msm/adreno_drawctxt.c
+++ b/drivers/gpu/msm/adreno_drawctxt.c
@@ -341,6 +341,7 @@
struct adreno_context *drawctxt;
struct kgsl_device *device = dev_priv->device;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
int ret;
unsigned int local;
@@ -421,6 +422,16 @@
return ERR_PTR(ret);
}
+ if (gpudev->preemption_context_init) {
+ ret = gpudev->preemption_context_init(&drawctxt->base);
+ if (ret != 0) {
+ kgsl_context_detach(&drawctxt->base);
+ kgsl_context_put(&drawctxt->base);
+ kfree(drawctxt);
+ return ERR_PTR(ret);
+ }
+ }
+
kgsl_sharedmem_writel(device, &device->memstore,
KGSL_MEMSTORE_OFFSET(drawctxt->base.id, soptimestamp),
0);
@@ -545,10 +556,18 @@
void adreno_drawctxt_destroy(struct kgsl_context *context)
{
struct adreno_context *drawctxt;
+ struct adreno_device *adreno_dev;
+ struct adreno_gpudev *gpudev;
if (context == NULL)
return;
+ adreno_dev = ADRENO_DEVICE(context->device);
+ gpudev = ADRENO_GPU_DEVICE(adreno_dev);
+
+ if (gpudev->preemption_context_destroy)
+ gpudev->preemption_context_destroy(context);
+
drawctxt = ADRENO_CONTEXT(context);
debugfs_remove_recursive(drawctxt->debug_root);
kfree(drawctxt);
diff --git a/drivers/gpu/msm/adreno_iommu.c b/drivers/gpu/msm/adreno_iommu.c
index 80a04bc..1a2f8ff 100644
--- a/drivers/gpu/msm/adreno_iommu.c
+++ b/drivers/gpu/msm/adreno_iommu.c
@@ -574,6 +574,40 @@
return cmds - cmds_orig;
}
+static unsigned int _adreno_iommu_set_pt_v2_a6xx(struct kgsl_device *device,
+ unsigned int *cmds_orig,
+ u64 ttbr0, u32 contextidr,
+ struct adreno_ringbuffer *rb,
+ unsigned int cb_num)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ unsigned int *cmds = cmds_orig;
+
+ cmds += _adreno_iommu_add_idle_cmds(adreno_dev, cmds);
+ cmds += cp_wait_for_me(adreno_dev, cmds);
+
+ /* CP switches the pagetable and flushes the Caches */
+ *cmds++ = cp_packet(adreno_dev, CP_SMMU_TABLE_UPDATE, 4);
+ *cmds++ = lower_32_bits(ttbr0);
+ *cmds++ = upper_32_bits(ttbr0);
+ *cmds++ = contextidr;
+ *cmds++ = cb_num;
+
+ *cmds++ = cp_mem_packet(adreno_dev, CP_MEM_WRITE, 4, 1);
+ cmds += cp_gpuaddr(adreno_dev, cmds, (rb->pagetable_desc.gpuaddr +
+ PT_INFO_OFFSET(ttbr0)));
+ *cmds++ = lower_32_bits(ttbr0);
+ *cmds++ = upper_32_bits(ttbr0);
+ *cmds++ = contextidr;
+
+ /* release all commands with wait_for_me */
+ cmds += cp_wait_for_me(adreno_dev, cmds);
+
+ cmds += _adreno_iommu_add_idle_cmds(adreno_dev, cmds);
+
+ return cmds - cmds_orig;
+}
+
/**
* adreno_iommu_set_pt_generate_cmds() - Generate commands to change pagetable
* @rb: The RB pointer in which these commaands are to be submitted
@@ -588,6 +622,7 @@
struct adreno_device *adreno_dev = ADRENO_RB_DEVICE(rb);
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
struct kgsl_iommu *iommu = KGSL_IOMMU_PRIV(device);
+ struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
u64 ttbr0;
u32 contextidr;
unsigned int *cmds_orig = cmds;
@@ -601,7 +636,11 @@
iommu->setstate.gpuaddr + KGSL_IOMMU_SETSTATE_NOP_OFFSET);
if (iommu->version >= 2) {
- if (adreno_is_a5xx(adreno_dev) || adreno_is_a6xx(adreno_dev))
+ if (adreno_is_a6xx(adreno_dev))
+ cmds += _adreno_iommu_set_pt_v2_a6xx(device, cmds,
+ ttbr0, contextidr, rb,
+ ctx->cb_num);
+ else if (adreno_is_a5xx(adreno_dev))
cmds += _adreno_iommu_set_pt_v2_a5xx(device, cmds,
ttbr0, contextidr, rb);
else if (adreno_is_a4xx(adreno_dev))
diff --git a/drivers/gpu/msm/adreno_pm4types.h b/drivers/gpu/msm/adreno_pm4types.h
index fceceda..2a330b4 100644
--- a/drivers/gpu/msm/adreno_pm4types.h
+++ b/drivers/gpu/msm/adreno_pm4types.h
@@ -55,6 +55,12 @@
/* switches SMMU pagetable, used on a5xx only */
#define CP_SMMU_TABLE_UPDATE 0x53
+/* Set internal CP registers, used to indicate context save data addresses */
+#define CP_SET_PSEUDO_REGISTER 0x56
+
+/* Tell CP the current operation mode, indicates save and restore procedure */
+#define CP_SET_MARKER 0x65
+
/* register read/modify/write */
#define CP_REG_RMW 0x21
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index bff1fda..15c68fb 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -864,9 +864,12 @@
dwords += 2;
}
- if (gpudev->preemption_yield_enable &&
- adreno_is_preemption_enabled(adreno_dev))
- dwords += 8;
+ if (adreno_is_preemption_enabled(adreno_dev)) {
+ if (gpudev->preemption_set_marker)
+ dwords += 4;
+ else if (gpudev->preemption_yield_enable)
+ dwords += 8;
+ }
link = kcalloc(dwords, sizeof(unsigned int), GFP_KERNEL);
if (!link) {
@@ -897,6 +900,10 @@
gpu_ticks_submitted));
}
+ if (gpudev->preemption_set_marker &&
+ adreno_is_preemption_enabled(adreno_dev))
+ cmds += gpudev->preemption_set_marker(cmds, 1);
+
if (numibs) {
list_for_each_entry(ib, &cmdobj->cmdlist, node) {
/*
@@ -918,9 +925,12 @@
}
}
- if (gpudev->preemption_yield_enable &&
- adreno_is_preemption_enabled(adreno_dev))
- cmds += gpudev->preemption_yield_enable(cmds);
+ if (adreno_is_preemption_enabled(adreno_dev)) {
+ if (gpudev->preemption_set_marker)
+ cmds += gpudev->preemption_set_marker(cmds, 0);
+ else if (gpudev->preemption_yield_enable)
+ cmds += gpudev->preemption_yield_enable(cmds);
+ }
if (kernel_profiling) {
cmds += _get_alwayson_counter(adreno_dev, cmds,
diff --git a/drivers/gpu/msm/adreno_ringbuffer.h b/drivers/gpu/msm/adreno_ringbuffer.h
index 63374af..72fc5bf3 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.h
+++ b/drivers/gpu/msm/adreno_ringbuffer.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2007-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2007-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -92,6 +92,8 @@
* @drawctxt_active: The last pagetable that this ringbuffer is set to
* @preemption_desc: The memory descriptor containing
* preemption info written/read by CP
+ * @perfcounter_save_restore_desc: Used by CP to save/restore the perfcounter
+ * values across preemption
* @pagetable_desc: Memory to hold information about the pagetables being used
* and the commands to switch pagetable on the RB
* @dispatch_q: The dispatcher side queue for this ringbuffer
@@ -118,6 +120,7 @@
struct kgsl_event_group events;
struct adreno_context *drawctxt_active;
struct kgsl_memdesc preemption_desc;
+ struct kgsl_memdesc perfcounter_save_restore_desc;
struct kgsl_memdesc pagetable_desc;
struct adreno_dispatcher_drawqueue dispatch_q;
wait_queue_head_t ts_expire_waitq;
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 6bd212d..7b8cdc2 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -245,8 +245,6 @@
}
EXPORT_SYMBOL(kgsl_readtimestamp);
-static long gpumem_free_entry(struct kgsl_mem_entry *entry);
-
/* Scheduled by kgsl_mem_entry_put_deferred() */
static void _deferred_put(struct work_struct *work)
{
@@ -608,7 +606,7 @@
* detached by checking the KGSL_CONTEXT_PRIV_DETACHED bit in
* context->priv.
*/
-static void kgsl_context_detach(struct kgsl_context *context)
+void kgsl_context_detach(struct kgsl_context *context)
{
struct kgsl_device *device;
@@ -1812,7 +1810,7 @@
return 0;
}
-static long gpumem_free_entry(struct kgsl_mem_entry *entry)
+long gpumem_free_entry(struct kgsl_mem_entry *entry)
{
pid_t ptname = 0;
@@ -3054,7 +3052,7 @@
/* The largest allowable alignment for a GPU object is 32MB */
#define KGSL_MAX_ALIGN (32 * SZ_1M)
-static struct kgsl_mem_entry *gpumem_alloc_entry(
+struct kgsl_mem_entry *gpumem_alloc_entry(
struct kgsl_device_private *dev_priv,
uint64_t size, uint64_t flags)
{
diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h
index 3f1c86e..c54e51e 100644
--- a/drivers/gpu/msm/kgsl.h
+++ b/drivers/gpu/msm/kgsl.h
@@ -445,6 +445,10 @@
int kgsl_suspend_driver(struct platform_device *pdev, pm_message_t state);
int kgsl_resume_driver(struct platform_device *pdev);
+struct kgsl_mem_entry *gpumem_alloc_entry(struct kgsl_device_private *dev_priv,
+ uint64_t size, uint64_t flags);
+long gpumem_free_entry(struct kgsl_mem_entry *entry);
+
static inline int kgsl_gpuaddr_in_memdesc(const struct kgsl_memdesc *memdesc,
uint64_t gpuaddr, uint64_t size)
{
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index ca1f181..b621ada 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -378,6 +378,8 @@
* @pwr_constraint: power constraint from userspace for this context
* @fault_count: number of times gpu hanged in last _context_throttle_time ms
* @fault_time: time of the first gpu hang in last _context_throttle_time ms
+ * @user_ctxt_record: memory descriptor used by CP to save/restore VPC data
+ * across preemption
*/
struct kgsl_context {
struct kref refcount;
@@ -395,6 +397,7 @@
struct kgsl_pwr_constraint pwr_constraint;
unsigned int fault_count;
unsigned long fault_time;
+ struct kgsl_mem_entry *user_ctxt_record;
};
#define _context_comm(_c) \
@@ -689,6 +692,8 @@
void kgsl_events_init(void);
void kgsl_events_exit(void);
+void kgsl_context_detach(struct kgsl_context *context);
+
void kgsl_del_event_group(struct kgsl_event_group *group);
void kgsl_add_event_group(struct kgsl_event_group *group,
diff --git a/drivers/gpu/msm/kgsl_hfi.c b/drivers/gpu/msm/kgsl_hfi.c
index e91550a..067b276 100644
--- a/drivers/gpu/msm/kgsl_hfi.c
+++ b/drivers/gpu/msm/kgsl_hfi.c
@@ -188,9 +188,9 @@
msg->seqnum == rsp->ret_hdr.seqnum)
break;
}
- spin_unlock(&hfi->msglock);
if (msg == NULL) {
+ spin_unlock(&hfi->msglock);
dev_err(&gmu->pdev->dev,
"Cannot find receiver of ack msg with id=%d\n",
rsp->ret_hdr.id);
@@ -199,6 +199,7 @@
memcpy(&msg->results, (void *) rsp, rsp->hdr.size << 2);
complete(&msg->msg_complete);
+ spin_unlock(&hfi->msglock);
}
static void receive_err_msg(struct gmu_device *gmu, struct hfi_msg_rsp *rsp)
diff --git a/drivers/gpu/msm/kgsl_hfi.h b/drivers/gpu/msm/kgsl_hfi.h
index 83abec4..8eedbfa2 100644
--- a/drivers/gpu/msm/kgsl_hfi.h
+++ b/drivers/gpu/msm/kgsl_hfi.h
@@ -115,7 +115,7 @@
HFI_F2H_QPRI_DEBUG = 40,
};
-#define HFI_RSP_TIMEOUT 50 /* msec */
+#define HFI_RSP_TIMEOUT 100 /* msec */
#define HFI_H2F_CMD_IRQ_MASK BIT(0)
enum hfi_msg_type {
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index b3d02e6..73c0d71 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -1255,7 +1255,7 @@
ret = iommu_domain_get_attr(iommu_pt->domain,
DOMAIN_ATTR_CONTEXT_BANK, &cb_num);
if (ret) {
- KGSL_CORE_ERR("get DOMAIN_ATTR_PROCID failed: %d\n",
+ KGSL_CORE_ERR("get DOMAIN_ATTR_CONTEXT_BANK failed: %d\n",
ret);
goto done;
}
diff --git a/drivers/gpu/msm/kgsl_iommu.h b/drivers/gpu/msm/kgsl_iommu.h
index 6337a48..acf8ae4 100644
--- a/drivers/gpu/msm/kgsl_iommu.h
+++ b/drivers/gpu/msm/kgsl_iommu.h
@@ -23,7 +23,7 @@
* These defines control the address range for allocations that
* are mapped into all pagetables.
*/
-#define KGSL_IOMMU_GLOBAL_MEM_SIZE SZ_8M
+#define KGSL_IOMMU_GLOBAL_MEM_SIZE (20 * SZ_1M)
#define KGSL_IOMMU_GLOBAL_MEM_BASE 0xf8000000
#define KGSL_IOMMU_SECURE_SIZE SZ_256M
diff --git a/drivers/gpu/msm/kgsl_sharedmem.h b/drivers/gpu/msm/kgsl_sharedmem.h
index dd41e4e..5466a49 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.h
+++ b/drivers/gpu/msm/kgsl_sharedmem.h
@@ -292,8 +292,10 @@
else {
ret = kgsl_sharedmem_page_alloc_user(memdesc, (size_t) size);
if (ret == 0) {
- if (kgsl_memdesc_map(memdesc) == NULL)
+ if (kgsl_memdesc_map(memdesc) == NULL) {
+ kgsl_sharedmem_free(memdesc);
ret = -ENOMEM;
+ }
}
}
diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c
index 860fe6e..a3da8ffd 100644
--- a/drivers/hwtracing/coresight/coresight-funnel.c
+++ b/drivers/hwtracing/coresight/coresight-funnel.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2012, 2017, The Linux Foundation. All rights reserved.
*
* Description: CoreSight Funnel driver
*
@@ -23,6 +23,7 @@
#include <linux/coresight.h>
#include <linux/amba/bus.h>
#include <linux/clk.h>
+#include <linux/of_address.h>
#include "coresight-priv.h"
@@ -168,6 +169,29 @@
};
ATTRIBUTE_GROUPS(coresight_funnel);
+static int funnel_get_resource_byname(struct device_node *np,
+ char *ch_base, struct resource *res)
+{
+ const char *name = NULL;
+ int index = 0, found = 0;
+
+ while (!of_property_read_string_index(np, "reg-names", index, &name)) {
+ if (strcmp(ch_base, name)) {
+ index++;
+ continue;
+ }
+
+ /* We have a match and @index is where it's at */
+ found = 1;
+ break;
+ }
+
+ if (!found)
+ return -EINVAL;
+
+ return of_address_to_resource(np, index, res);
+}
+
static int funnel_probe(struct amba_device *adev, const struct amba_id *id)
{
int ret;
@@ -175,7 +199,8 @@
struct device *dev = &adev->dev;
struct coresight_platform_data *pdata = NULL;
struct funnel_drvdata *drvdata;
- struct resource *res = &adev->res;
+ struct resource *res;
+ struct resource res_real;
struct coresight_desc desc = { 0 };
struct device_node *np = adev->dev.of_node;
@@ -199,8 +224,19 @@
}
dev_set_drvdata(dev, drvdata);
- /* Validity for the resource is already checked by the AMBA core */
- base = devm_ioremap_resource(dev, res);
+ if (of_property_read_bool(np, "qcom,duplicate-funnel")) {
+ ret = funnel_get_resource_byname(np, "funnel-base-real",
+ &res_real);
+ if (ret)
+ return ret;
+
+ res = &res_real;
+ base = devm_ioremap(dev, res->start, resource_size(res));
+ } else {
+ /* Validity of resource is already checked by the AMBA core */
+ res = &adev->res;
+ base = devm_ioremap_resource(dev, res);
+ }
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index f9449fe..077cb45 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -497,14 +497,12 @@
struct coresight_desc desc = { 0 };
struct device_node *np = adev->dev.of_node;
- if (np) {
- pdata = of_get_coresight_platform_data(dev, np);
- if (IS_ERR(pdata)) {
- ret = PTR_ERR(pdata);
- goto out;
- }
- adev->dev.platform_data = pdata;
+ pdata = of_get_coresight_platform_data(dev, np);
+ if (IS_ERR(pdata)) {
+ ret = PTR_ERR(pdata);
+ goto out;
}
+ adev->dev.platform_data = pdata;
ret = -ENOMEM;
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
@@ -531,10 +529,8 @@
drvdata->memwidth = tmc_get_memwidth(devid);
if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
- if (np)
- ret = of_property_read_u32(np,
- "arm,buffer-size",
- &drvdata->size);
+ ret = of_property_read_u32(np, "arm,buffer-size",
+ &drvdata->size);
if (ret)
drvdata->size = SZ_1M;
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index e7b8f49..89fc93b 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -277,6 +277,23 @@
If unsure, say N.
+config DM_REQ_CRYPT
+ tristate "Req Crypt target support"
+ depends on BLK_DEV_DM
+ select XTS
+ select CRYPTO_XTS
+ ---help---
+ This request based device-mapper target allows you to create a device that
+ transparently encrypts the data on it. You'll need to activate
+ the ciphers you're going to use in the cryptoapi configuration.
+ The DM REQ CRYPT operates on requests (bigger payloads) to utilize
+ crypto hardware better.
+
+ To compile this code as a module, choose M here: the module will
+ be called dm-req-crypt.
+
+ If unsure, say N.
+
config DM_SNAPSHOT
tristate "Snapshot target"
depends on BLK_DEV_DM
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index f26ce41..f14e2fc 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -59,6 +59,7 @@
obj-$(CONFIG_DM_CACHE_CLEANER) += dm-cache-cleaner.o
obj-$(CONFIG_DM_ERA) += dm-era.o
obj-$(CONFIG_DM_LOG_WRITES) += dm-log-writes.o
+obj-$(CONFIG_DM_REQ_CRYPT) += dm-req-crypt.o
obj-$(CONFIG_DM_ANDROID_VERITY) += dm-android-verity.o
ifeq ($(CONFIG_DM_UEVENT),y)
diff --git a/drivers/md/dm-req-crypt.c b/drivers/md/dm-req-crypt.c
new file mode 100644
index 0000000..3ffe7e5
--- /dev/null
+++ b/drivers/md/dm-req-crypt.c
@@ -0,0 +1,1364 @@
+/*
+ * DM request based crypto driver
+ *
+ * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/completion.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+#include <linux/mempool.h>
+#include <linux/slab.h>
+#include <linux/crypto.h>
+#include <linux/qcrypto.h>
+#include <linux/workqueue.h>
+#include <linux/backing-dev.h>
+#include <linux/atomic.h>
+#include <linux/scatterlist.h>
+#include <linux/device-mapper.h>
+#include <linux/printk.h>
+
+#include <asm/page.h>
+#include <asm/unaligned.h>
+#include <crypto/skcipher.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/hash.h>
+#include <crypto/md5.h>
+#include <crypto/algapi.h>
+#include <crypto/ice.h>
+
+#define DM_MSG_PREFIX "req-crypt"
+
+#define MAX_SG_LIST 1024
+#define REQ_DM_512_KB (512*1024)
+#define MAX_ENCRYPTION_BUFFERS 1
+#define MIN_IOS 256
+#define MIN_POOL_PAGES 32
+#define KEY_SIZE_XTS 32
+#define AES_XTS_IV_LEN 16
+#define MAX_MSM_ICE_KEY_LUT_SIZE 32
+#define SECTOR_SIZE 512
+#define MIN_CRYPTO_TRANSFER_SIZE (4 * 1024)
+
+#define DM_REQ_CRYPT_ERROR -1
+#define DM_REQ_CRYPT_ERROR_AFTER_PAGE_MALLOC -2
+
+/*
+ * ENCRYPTION_MODE_CRYPTO means dm-req-crypt would invoke crypto operations
+ * for all of the requests. Crypto operations are performed by crypto engine
+ * plugged with Linux Kernel Crypto APIs
+ */
+#define DM_REQ_CRYPT_ENCRYPTION_MODE_CRYPTO 0
+/*
+ * ENCRYPTION_MODE_TRANSPARENT means dm-req-crypt would not invoke crypto
+ * operations for any of the requests. Data would be encrypted or decrypted
+ * using Inline Crypto Engine(ICE) embedded in storage hardware
+ */
+#define DM_REQ_CRYPT_ENCRYPTION_MODE_TRANSPARENT 1
+
+#define DM_REQ_CRYPT_QUEUE_SIZE 256
+
+struct req_crypt_result {
+ struct completion completion;
+ int err;
+};
+
+#define FDE_KEY_ID 0
+#define PFE_KEY_ID 1
+
+static struct dm_dev *dev;
+static struct kmem_cache *_req_crypt_io_pool;
+static struct kmem_cache *_req_dm_scatterlist_pool;
+static sector_t start_sector_orig;
+static struct workqueue_struct *req_crypt_queue;
+static struct workqueue_struct *req_crypt_split_io_queue;
+static mempool_t *req_io_pool;
+static mempool_t *req_page_pool;
+static mempool_t *req_scatterlist_pool;
+static bool is_fde_enabled;
+static struct crypto_skcipher *tfm;
+static unsigned int encryption_mode;
+static struct ice_crypto_setting *ice_settings;
+
+unsigned int num_engines;
+unsigned int num_engines_fde, fde_cursor;
+unsigned int num_engines_pfe, pfe_cursor;
+struct crypto_engine_entry *fde_eng, *pfe_eng;
+DEFINE_MUTEX(engine_list_mutex);
+
+struct req_dm_crypt_io {
+ struct ice_crypto_setting ice_settings;
+ struct work_struct work;
+ struct request *cloned_request;
+ int error;
+ atomic_t pending;
+ struct timespec start_time;
+ bool should_encrypt;
+ bool should_decrypt;
+ u32 key_id;
+};
+
+struct req_dm_split_req_io {
+ struct work_struct work;
+ struct scatterlist *req_split_sg_read;
+ struct req_crypt_result result;
+ struct crypto_engine_entry *engine;
+ u8 IV[AES_XTS_IV_LEN];
+ int size;
+ struct request *clone;
+};
+
+#ifdef CONFIG_FIPS_ENABLE
+static struct qcrypto_func_set dm_qcrypto_func;
+#else
+static struct qcrypto_func_set dm_qcrypto_func = {
+ qcrypto_cipher_set_device_hw,
+ qcrypto_cipher_set_flag,
+ qcrypto_get_num_engines,
+ qcrypto_get_engine_list
+};
+#endif
+static void req_crypt_cipher_complete
+ (struct crypto_async_request *req, int err);
+static void req_cryptd_split_req_queue_cb
+ (struct work_struct *work);
+static void req_cryptd_split_req_queue
+ (struct req_dm_split_req_io *io);
+static void req_crypt_split_io_complete
+ (struct req_crypt_result *res, int err);
+
+static bool req_crypt_should_encrypt(struct req_dm_crypt_io *req)
+{
+ int ret = 0;
+ bool should_encrypt = false;
+ struct bio *bio = NULL;
+ bool is_encrypted = false;
+ bool is_inplace = false;
+
+ if (!req || !req->cloned_request || !req->cloned_request->bio)
+ return false;
+
+ if (encryption_mode == DM_REQ_CRYPT_ENCRYPTION_MODE_TRANSPARENT)
+ return false;
+ bio = req->cloned_request->bio;
+
+ /* req->key_id = key_id; @todo support more than 1 pfe key */
+ if ((ret == 0) && (is_encrypted || is_inplace)) {
+ should_encrypt = true;
+ req->key_id = PFE_KEY_ID;
+ } else if (is_fde_enabled) {
+ should_encrypt = true;
+ req->key_id = FDE_KEY_ID;
+ }
+
+ return should_encrypt;
+}
+
+static bool req_crypt_should_deccrypt(struct req_dm_crypt_io *req)
+{
+ int ret = 0;
+ bool should_deccrypt = false;
+ struct bio *bio = NULL;
+ bool is_encrypted = false;
+ bool is_inplace = false;
+
+ if (!req || !req->cloned_request || !req->cloned_request->bio)
+ return false;
+ if (encryption_mode == DM_REQ_CRYPT_ENCRYPTION_MODE_TRANSPARENT)
+ return false;
+
+ bio = req->cloned_request->bio;
+
+ /* req->key_id = key_id; @todo support more than 1 pfe key */
+ if ((ret == 0) && (is_encrypted && !is_inplace)) {
+ should_deccrypt = true;
+ req->key_id = PFE_KEY_ID;
+ } else if (is_fde_enabled) {
+ should_deccrypt = true;
+ req->key_id = FDE_KEY_ID;
+ }
+
+ return should_deccrypt;
+}
+
+static void req_crypt_inc_pending(struct req_dm_crypt_io *io)
+{
+ atomic_inc(&io->pending);
+}
+
+static void req_crypt_dec_pending_encrypt(struct req_dm_crypt_io *io)
+{
+ int error = 0;
+ struct request *clone = NULL;
+
+ if (io) {
+ error = io->error;
+ if (io->cloned_request) {
+ clone = io->cloned_request;
+ } else {
+ DMERR("%s io->cloned_request is NULL\n",
+ __func__);
+ /*
+ * If Clone is NULL we cannot do anything,
+ * this should never happen
+ */
+ WARN_ON(1);
+ }
+ } else {
+ DMERR("%s io is NULL\n", __func__);
+ /*
+ * If Clone is NULL we cannot do anything,
+ * this should never happen
+ */
+ WARN_ON(1);
+ }
+
+ atomic_dec(&io->pending);
+
+ if (error < 0) {
+ dm_kill_unmapped_request(clone, error);
+ mempool_free(io, req_io_pool);
+ } else
+ dm_dispatch_request(clone);
+}
+
+static void req_crypt_dec_pending_decrypt(struct req_dm_crypt_io *io)
+{
+ int error = 0;
+ struct request *clone = NULL;
+
+ if (io) {
+ error = io->error;
+ if (io->cloned_request) {
+ clone = io->cloned_request;
+ } else {
+ DMERR("%s io->cloned_request is NULL\n",
+ __func__);
+ /*
+ * If Clone is NULL we cannot do anything,
+ * this should never happen
+ */
+ WARN_ON(1);
+ }
+ } else {
+ DMERR("%s io is NULL\n",
+ __func__);
+ /*
+ * If Clone is NULL we cannot do anything,
+ * this should never happen
+ */
+ WARN_ON(1);
+ }
+
+ /* Should never get here if io or Clone is NULL */
+ dm_end_request(clone, error);
+ atomic_dec(&io->pending);
+ mempool_free(io, req_io_pool);
+}
+
+/*
+ * The callback that will be called by the worker queue to perform Decryption
+ * for reads and use the dm function to complete the bios and requests.
+ */
+static void req_cryptd_crypt_read_convert(struct req_dm_crypt_io *io)
+{
+ struct request *clone = NULL;
+ int error = DM_REQ_CRYPT_ERROR;
+ int total_sg_len = 0, total_bytes_in_req = 0, temp_size = 0, i = 0;
+ struct scatterlist *sg = NULL;
+ struct scatterlist *req_sg_read = NULL;
+
+ unsigned int engine_list_total = 0;
+ struct crypto_engine_entry *curr_engine_list = NULL;
+ bool split_transfers = 0;
+ sector_t tempiv;
+ struct req_dm_split_req_io *split_io = NULL;
+
+ if (io) {
+ error = io->error;
+ if (io->cloned_request) {
+ clone = io->cloned_request;
+ } else {
+ DMERR("%s io->cloned_request is NULL\n",
+ __func__);
+ error = DM_REQ_CRYPT_ERROR;
+ goto submit_request;
+ }
+ } else {
+ DMERR("%s io is NULL\n",
+ __func__);
+ error = DM_REQ_CRYPT_ERROR;
+ goto submit_request;
+ }
+
+ req_crypt_inc_pending(io);
+
+ mutex_lock(&engine_list_mutex);
+
+ engine_list_total = (io->key_id == FDE_KEY_ID ? num_engines_fde :
+ (io->key_id == PFE_KEY_ID ?
+ num_engines_pfe : 0));
+
+ curr_engine_list = (io->key_id == FDE_KEY_ID ? fde_eng :
+ (io->key_id == PFE_KEY_ID ?
+ pfe_eng : NULL));
+
+ mutex_unlock(&engine_list_mutex);
+
+ req_sg_read = (struct scatterlist *)mempool_alloc(req_scatterlist_pool,
+ GFP_KERNEL);
+ if (!req_sg_read) {
+ DMERR("%s req_sg_read allocation failed\n",
+ __func__);
+ error = DM_REQ_CRYPT_ERROR;
+ goto skcipher_req_alloc_failure;
+ }
+ memset(req_sg_read, 0, sizeof(struct scatterlist) * MAX_SG_LIST);
+
+ total_sg_len = blk_rq_map_sg_no_cluster(clone->q, clone, req_sg_read);
+ if ((total_sg_len <= 0) || (total_sg_len > MAX_SG_LIST)) {
+ DMERR("%s Request Error%d", __func__, total_sg_len);
+ error = DM_REQ_CRYPT_ERROR;
+ goto skcipher_req_alloc_failure;
+ }
+
+ total_bytes_in_req = clone->__data_len;
+ if (total_bytes_in_req > REQ_DM_512_KB) {
+ DMERR("%s total_bytes_in_req > 512 MB %d",
+ __func__, total_bytes_in_req);
+ error = DM_REQ_CRYPT_ERROR;
+ goto skcipher_req_alloc_failure;
+ }
+
+
+ if ((clone->__data_len >= (MIN_CRYPTO_TRANSFER_SIZE *
+ engine_list_total))
+ && (engine_list_total > 1))
+ split_transfers = 1;
+
+ if (split_transfers) {
+ split_io = kzalloc(sizeof(struct req_dm_split_req_io)
+ * engine_list_total, GFP_KERNEL);
+ if (!split_io) {
+ DMERR("%s split_io allocation failed\n", __func__);
+ error = DM_REQ_CRYPT_ERROR;
+ goto skcipher_req_alloc_failure;
+ }
+
+ split_io[0].req_split_sg_read = sg = req_sg_read;
+ split_io[engine_list_total - 1].size = total_bytes_in_req;
+ for (i = 0; i < (engine_list_total); i++) {
+ while ((sg) && i < (engine_list_total - 1)) {
+ split_io[i].size += sg->length;
+ split_io[engine_list_total - 1].size -=
+ sg->length;
+ if (split_io[i].size >=
+ (total_bytes_in_req /
+ engine_list_total)) {
+ split_io[i + 1].req_split_sg_read =
+ sg_next(sg);
+ sg_mark_end(sg);
+ break;
+ }
+ sg = sg_next(sg);
+ }
+ split_io[i].engine = &curr_engine_list[i];
+ init_completion(&split_io[i].result.completion);
+ memset(&split_io[i].IV, 0, AES_XTS_IV_LEN);
+ tempiv = clone->__sector + (temp_size / SECTOR_SIZE);
+ memcpy(&split_io[i].IV, &tempiv, sizeof(sector_t));
+ temp_size += split_io[i].size;
+ split_io[i].clone = clone;
+ req_cryptd_split_req_queue(&split_io[i]);
+ }
+ } else {
+ split_io = kzalloc(sizeof(struct req_dm_split_req_io),
+ GFP_KERNEL);
+ if (!split_io) {
+ DMERR("%s split_io allocation failed\n", __func__);
+ error = DM_REQ_CRYPT_ERROR;
+ goto skcipher_req_alloc_failure;
+ }
+ split_io->engine = &curr_engine_list[0];
+ init_completion(&split_io->result.completion);
+ memcpy(split_io->IV, &clone->__sector, sizeof(sector_t));
+ split_io->req_split_sg_read = req_sg_read;
+ split_io->size = total_bytes_in_req;
+ split_io->clone = clone;
+ req_cryptd_split_req_queue(split_io);
+ }
+
+ if (!split_transfers) {
+ wait_for_completion_interruptible(&split_io->result.completion);
+ if (split_io->result.err) {
+ DMERR("%s error = %d for request\n",
+ __func__, split_io->result.err);
+ error = DM_REQ_CRYPT_ERROR;
+ goto skcipher_req_alloc_failure;
+ }
+ } else {
+ for (i = 0; i < (engine_list_total); i++) {
+ wait_for_completion_interruptible(
+ &split_io[i].result.completion);
+ if (split_io[i].result.err) {
+ DMERR("%s error = %d for %dst request\n",
+ __func__, split_io[i].result.err, i);
+ error = DM_REQ_CRYPT_ERROR;
+ goto skcipher_req_alloc_failure;
+ }
+ }
+ }
+ error = 0;
+skcipher_req_alloc_failure:
+
+ mempool_free(req_sg_read, req_scatterlist_pool);
+ kfree(split_io);
+submit_request:
+ if (io)
+ io->error = error;
+ req_crypt_dec_pending_decrypt(io);
+}
+
+/*
+ * This callback is called by the worker queue to perform non-decrypt reads
+ * and use the dm function to complete the bios and requests.
+ */
+static void req_cryptd_crypt_read_plain(struct req_dm_crypt_io *io)
+{
+ struct request *clone = NULL;
+ int error = 0;
+
+ if (!io || !io->cloned_request) {
+ DMERR("%s io is invalid\n", __func__);
+ WARN_ON(1); /* should not happen */
+ }
+
+ clone = io->cloned_request;
+
+ dm_end_request(clone, error);
+ mempool_free(io, req_io_pool);
+}
+
+/*
+ * The callback that will be called by the worker queue to perform Encryption
+ * for writes and submit the request using the elevelator.
+ */
+static void req_cryptd_crypt_write_convert(struct req_dm_crypt_io *io)
+{
+ struct request *clone = NULL;
+ struct bio *bio_src = NULL;
+ unsigned int total_sg_len_req_in = 0, total_sg_len_req_out = 0,
+ total_bytes_in_req = 0, error = DM_MAPIO_REMAPPED, rc = 0;
+ struct req_iterator iter;
+ struct req_iterator iter1;
+ struct skcipher_request *req = NULL;
+ struct req_crypt_result result;
+ struct bio_vec bvec;
+ struct scatterlist *req_sg_in = NULL;
+ struct scatterlist *req_sg_out = NULL;
+ int copy_bio_sector_to_req = 0;
+ gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
+ struct page *page = NULL;
+ u8 IV[AES_XTS_IV_LEN];
+ int remaining_size = 0, err = 0;
+ struct crypto_engine_entry engine;
+ unsigned int engine_list_total = 0;
+ struct crypto_engine_entry *curr_engine_list = NULL;
+ unsigned int *engine_cursor = NULL;
+
+
+ if (io) {
+ if (io->cloned_request) {
+ clone = io->cloned_request;
+ } else {
+ DMERR("%s io->cloned_request is NULL\n",
+ __func__);
+ error = DM_REQ_CRYPT_ERROR;
+ goto submit_request;
+ }
+ } else {
+ DMERR("%s io is NULL\n",
+ __func__);
+ error = DM_REQ_CRYPT_ERROR;
+ goto submit_request;
+ }
+
+ req_crypt_inc_pending(io);
+
+ req = skcipher_request_alloc(tfm, GFP_KERNEL);
+ if (!req) {
+ DMERR("%s skcipher request allocation failed\n",
+ __func__);
+ error = DM_REQ_CRYPT_ERROR;
+ goto skcipher_req_alloc_failure;
+ }
+
+ skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ req_crypt_cipher_complete, &result);
+
+ mutex_lock(&engine_list_mutex);
+ engine_list_total = (io->key_id == FDE_KEY_ID ? num_engines_fde :
+ (io->key_id == PFE_KEY_ID ?
+ num_engines_pfe : 0));
+
+ curr_engine_list = (io->key_id == FDE_KEY_ID ? fde_eng :
+ (io->key_id == PFE_KEY_ID ?
+ pfe_eng : NULL));
+
+ engine_cursor = (io->key_id == FDE_KEY_ID ? &fde_cursor :
+ (io->key_id == PFE_KEY_ID ? &pfe_cursor
+ : NULL));
+ if ((engine_list_total < 1) || (curr_engine_list == NULL) ||
+ (engine_cursor == NULL)) {
+ DMERR("%s Unknown Key ID!\n", __func__);
+ error = DM_REQ_CRYPT_ERROR;
+ mutex_unlock(&engine_list_mutex);
+ goto skcipher_req_alloc_failure;
+ }
+
+ engine = curr_engine_list[*engine_cursor];
+ (*engine_cursor)++;
+ (*engine_cursor) %= engine_list_total;
+
+ err = (dm_qcrypto_func.cipher_set)(req, engine.ce_device,
+ engine.hw_instance);
+ if (err) {
+ DMERR("%s qcrypto_cipher_set_device_hw failed with err %d\n",
+ __func__, err);
+ mutex_unlock(&engine_list_mutex);
+ goto skcipher_req_alloc_failure;
+ }
+ mutex_unlock(&engine_list_mutex);
+
+ init_completion(&result.completion);
+
+ (dm_qcrypto_func.cipher_flag)(req,
+ QCRYPTO_CTX_USE_PIPE_KEY | QCRYPTO_CTX_XTS_DU_SIZE_512B);
+ crypto_skcipher_clear_flags(tfm, ~0);
+ crypto_skcipher_setkey(tfm, NULL, KEY_SIZE_XTS);
+
+ req_sg_in = (struct scatterlist *)mempool_alloc(req_scatterlist_pool,
+ GFP_KERNEL);
+ if (!req_sg_in) {
+ DMERR("%s req_sg_in allocation failed\n",
+ __func__);
+ error = DM_REQ_CRYPT_ERROR;
+ goto skcipher_req_alloc_failure;
+ }
+ memset(req_sg_in, 0, sizeof(struct scatterlist) * MAX_SG_LIST);
+
+ req_sg_out = (struct scatterlist *)mempool_alloc(req_scatterlist_pool,
+ GFP_KERNEL);
+ if (!req_sg_out) {
+ DMERR("%s req_sg_out allocation failed\n",
+ __func__);
+ error = DM_REQ_CRYPT_ERROR;
+ goto skcipher_req_alloc_failure;
+ }
+ memset(req_sg_out, 0, sizeof(struct scatterlist) * MAX_SG_LIST);
+
+ total_sg_len_req_in = blk_rq_map_sg(clone->q, clone, req_sg_in);
+ if ((total_sg_len_req_in <= 0) ||
+ (total_sg_len_req_in > MAX_SG_LIST)) {
+ DMERR("%s Request Error%d", __func__, total_sg_len_req_in);
+ error = DM_REQ_CRYPT_ERROR;
+ goto skcipher_req_alloc_failure;
+ }
+
+ total_bytes_in_req = clone->__data_len;
+ if (total_bytes_in_req > REQ_DM_512_KB) {
+ DMERR("%s total_bytes_in_req > 512 MB %d",
+ __func__, total_bytes_in_req);
+ error = DM_REQ_CRYPT_ERROR;
+ goto skcipher_req_alloc_failure;
+ }
+
+ rq_for_each_segment(bvec, clone, iter) {
+ if (bvec.bv_len > remaining_size) {
+ page = NULL;
+ while (page == NULL) {
+ page = mempool_alloc(req_page_pool, gfp_mask);
+ if (!page) {
+ DMERR("%s Crypt page alloc failed",
+ __func__);
+ congestion_wait(BLK_RW_ASYNC, HZ/100);
+ }
+ }
+
+ bvec.bv_page = page;
+ bvec.bv_offset = 0;
+ remaining_size = PAGE_SIZE - bvec.bv_len;
+ if (remaining_size < 0)
+ WARN_ON(1);
+ } else {
+ bvec.bv_page = page;
+ bvec.bv_offset = PAGE_SIZE - remaining_size;
+ remaining_size = remaining_size - bvec.bv_len;
+ }
+ }
+
+ total_sg_len_req_out = blk_rq_map_sg(clone->q, clone, req_sg_out);
+ if ((total_sg_len_req_out <= 0) ||
+ (total_sg_len_req_out > MAX_SG_LIST)) {
+ DMERR("%s Request Error %d", __func__, total_sg_len_req_out);
+ error = DM_REQ_CRYPT_ERROR_AFTER_PAGE_MALLOC;
+ goto skcipher_req_alloc_failure;
+ }
+
+ memset(IV, 0, AES_XTS_IV_LEN);
+ memcpy(IV, &clone->__sector, sizeof(sector_t));
+
+ skcipher_request_set_crypt(req, req_sg_in, req_sg_out,
+ total_bytes_in_req, (void *) IV);
+
+ rc = crypto_skcipher_encrypt(req);
+
+ switch (rc) {
+ case 0:
+ break;
+
+ case -EBUSY:
+ /*
+ * Lets make this synchronous request by waiting on
+ * in progress as well
+ */
+ case -EINPROGRESS:
+ wait_for_completion_interruptible(&result.completion);
+ if (result.err) {
+ DMERR("%s error = %d encrypting the request\n",
+ __func__, result.err);
+ error = DM_REQ_CRYPT_ERROR_AFTER_PAGE_MALLOC;
+ goto skcipher_req_alloc_failure;
+ }
+ break;
+
+ default:
+ error = DM_REQ_CRYPT_ERROR_AFTER_PAGE_MALLOC;
+ goto skcipher_req_alloc_failure;
+ }
+
+ __rq_for_each_bio(bio_src, clone) {
+ if (copy_bio_sector_to_req == 0)
+ copy_bio_sector_to_req++;
+ blk_queue_bounce(clone->q, &bio_src);
+ }
+
+ /*
+ * Recalculate the phy_segments as we allocate new pages
+ * This is used by storage driver to fill the sg list.
+ */
+ blk_recalc_rq_segments(clone);
+
+skcipher_req_alloc_failure:
+ if (req)
+ skcipher_request_free(req);
+
+ if (error == DM_REQ_CRYPT_ERROR_AFTER_PAGE_MALLOC) {
+ rq_for_each_segment(bvec, clone, iter1) {
+ if (bvec.bv_offset == 0) {
+ mempool_free(bvec.bv_page, req_page_pool);
+ bvec.bv_page = NULL;
+ } else
+ bvec.bv_page = NULL;
+ }
+ }
+
+ mempool_free(req_sg_in, req_scatterlist_pool);
+ mempool_free(req_sg_out, req_scatterlist_pool);
+submit_request:
+ if (io)
+ io->error = error;
+ req_crypt_dec_pending_encrypt(io);
+}
+
+/*
+ * This callback is called by the worker queue to perform non-encrypted writes
+ * and submit the request using the elevelator.
+ */
+static void req_cryptd_crypt_write_plain(struct req_dm_crypt_io *io)
+{
+ struct request *clone = NULL;
+
+ if (!io || !io->cloned_request) {
+ DMERR("%s io is invalid\n", __func__);
+ WARN_ON(1); /* should not happen */
+ }
+
+ clone = io->cloned_request;
+ io->error = 0;
+ dm_dispatch_request(clone);
+}
+
+/* Queue callback function that will get triggered */
+static void req_cryptd_crypt(struct work_struct *work)
+{
+ struct req_dm_crypt_io *io =
+ container_of(work, struct req_dm_crypt_io, work);
+
+ if (rq_data_dir(io->cloned_request) == WRITE) {
+ if (io->should_encrypt)
+ req_cryptd_crypt_write_convert(io);
+ else
+ req_cryptd_crypt_write_plain(io);
+ } else if (rq_data_dir(io->cloned_request) == READ) {
+ if (io->should_decrypt)
+ req_cryptd_crypt_read_convert(io);
+ else
+ req_cryptd_crypt_read_plain(io);
+ } else {
+ DMERR("%s received non-write request for Clone 0x%p\n",
+ __func__, io->cloned_request);
+ }
+}
+
+static void req_cryptd_split_req_queue_cb(struct work_struct *work)
+{
+ struct req_dm_split_req_io *io =
+ container_of(work, struct req_dm_split_req_io, work);
+ struct skcipher_request *req = NULL;
+ struct req_crypt_result result;
+ int err = 0;
+ struct crypto_engine_entry *engine = NULL;
+
+ if ((!io) || (!io->req_split_sg_read) || (!io->engine)) {
+ DMERR("%s Input invalid\n",
+ __func__);
+ err = DM_REQ_CRYPT_ERROR;
+ /* If io is not populated this should not be called */
+ WARN_ON(1);
+ }
+ req = skcipher_request_alloc(tfm, GFP_KERNEL);
+ if (!req) {
+ DMERR("%s skcipher request allocation failed\n", __func__);
+ err = DM_REQ_CRYPT_ERROR;
+ goto skcipher_req_alloc_failure;
+ }
+
+ skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ req_crypt_cipher_complete, &result);
+
+ engine = io->engine;
+
+ err = (dm_qcrypto_func.cipher_set)(req, engine->ce_device,
+ engine->hw_instance);
+ if (err) {
+ DMERR("%s qcrypto_cipher_set_device_hw failed with err %d\n",
+ __func__, err);
+ goto skcipher_req_alloc_failure;
+ }
+ init_completion(&result.completion);
+ (dm_qcrypto_func.cipher_flag)(req,
+ QCRYPTO_CTX_USE_PIPE_KEY | QCRYPTO_CTX_XTS_DU_SIZE_512B);
+
+ crypto_skcipher_clear_flags(tfm, ~0);
+ crypto_skcipher_setkey(tfm, NULL, KEY_SIZE_XTS);
+
+ skcipher_request_set_crypt(req, io->req_split_sg_read,
+ io->req_split_sg_read, io->size, (void *) io->IV);
+
+ err = crypto_skcipher_decrypt(req);
+ switch (err) {
+ case 0:
+ break;
+
+ case -EBUSY:
+ /*
+ * Lets make this synchronous request by waiting on
+ * in progress as well
+ */
+ case -EINPROGRESS:
+ wait_for_completion_io(&result.completion);
+ if (result.err) {
+ DMERR("%s error = %d encrypting the request\n",
+ __func__, result.err);
+ err = DM_REQ_CRYPT_ERROR;
+ goto skcipher_req_alloc_failure;
+ }
+ break;
+
+ default:
+ err = DM_REQ_CRYPT_ERROR;
+ goto skcipher_req_alloc_failure;
+ }
+ err = 0;
+skcipher_req_alloc_failure:
+ if (req)
+ skcipher_request_free(req);
+
+ req_crypt_split_io_complete(&io->result, err);
+}
+
+static void req_cryptd_split_req_queue(struct req_dm_split_req_io *io)
+{
+ INIT_WORK(&io->work, req_cryptd_split_req_queue_cb);
+ queue_work(req_crypt_split_io_queue, &io->work);
+}
+
+static void req_cryptd_queue_crypt(struct req_dm_crypt_io *io)
+{
+ INIT_WORK(&io->work, req_cryptd_crypt);
+ queue_work(req_crypt_queue, &io->work);
+}
+
+/*
+ * Cipher complete callback, this is triggered by the Linux crypto api once
+ * the operation is done. This signals the waiting thread that the crypto
+ * operation is complete.
+ */
+static void req_crypt_cipher_complete(struct crypto_async_request *req, int err)
+{
+ struct req_crypt_result *res = req->data;
+
+ if (err == -EINPROGRESS)
+ return;
+
+ res->err = err;
+ complete(&res->completion);
+}
+
+static void req_crypt_split_io_complete(struct req_crypt_result *res, int err)
+{
+ if (err == -EINPROGRESS)
+ return;
+
+ res->err = err;
+ complete(&res->completion);
+}
+/*
+ * If bio->bi_dev is a partition, remap the location
+ */
+static inline void req_crypt_blk_partition_remap(struct bio *bio)
+{
+ struct block_device *bdev = bio->bi_bdev;
+
+ if (bio_sectors(bio) && bdev != bdev->bd_contains) {
+ struct hd_struct *p = bdev->bd_part;
+ /*
+ * Check for integer overflow, should never happen.
+ */
+ if (p->start_sect > (UINT_MAX - bio->bi_iter.bi_sector))
+ WARN_ON(1);
+
+ bio->bi_iter.bi_sector += p->start_sect;
+ bio->bi_bdev = bdev->bd_contains;
+ }
+}
+
+/*
+ * The endio function is called from ksoftirqd context (atomic).
+ * For write operations the new pages created form the mempool
+ * is freed and returned. * For read operations, decryption is
+ * required, since this is called in a atomic * context, the
+ * request is sent to a worker queue to complete decryptiona and
+ * free the request once done.
+ */
+static int req_crypt_endio(struct dm_target *ti, struct request *clone,
+ int error, union map_info *map_context)
+{
+ int err = 0;
+ struct req_iterator iter1;
+ struct bio_vec bvec;
+ struct req_dm_crypt_io *req_io = map_context->ptr;
+
+ /* If it is for ICE, free up req_io and return */
+ if (encryption_mode == DM_REQ_CRYPT_ENCRYPTION_MODE_TRANSPARENT) {
+ mempool_free(req_io, req_io_pool);
+ err = error;
+ goto submit_request;
+ }
+
+ if (rq_data_dir(clone) == WRITE) {
+ rq_for_each_segment(bvec, clone, iter1) {
+ if (req_io->should_encrypt && bvec.bv_offset == 0) {
+ mempool_free(bvec.bv_page, req_page_pool);
+ bvec.bv_page = NULL;
+ } else
+ bvec.bv_page = NULL;
+ }
+ mempool_free(req_io, req_io_pool);
+ goto submit_request;
+ } else if (rq_data_dir(clone) == READ) {
+ req_io->error = error;
+ req_cryptd_queue_crypt(req_io);
+ err = DM_ENDIO_INCOMPLETE;
+ goto submit_request;
+ }
+
+submit_request:
+ return err;
+}
+
+/*
+ * This function is called with interrupts disabled
+ * The function remaps the clone for the underlying device.
+ * If it is a write request, it calls into the worker queue to
+ * encrypt the data
+ * and submit the request directly using the elevator
+ * For a read request no pre-processing is required the request
+ * is returned to dm once mapping is done
+ */
+static int req_crypt_map(struct dm_target *ti, struct request *clone,
+ union map_info *map_context)
+{
+ struct req_dm_crypt_io *req_io = NULL;
+ int error = DM_REQ_CRYPT_ERROR, copy_bio_sector_to_req = 0;
+ struct bio *bio_src = NULL;
+ gfp_t gfp_flag = GFP_KERNEL;
+
+ if (in_interrupt() || irqs_disabled())
+ gfp_flag = GFP_NOWAIT;
+
+ req_io = mempool_alloc(req_io_pool, gfp_flag);
+ if (!req_io) {
+ WARN_ON(1);
+ error = DM_REQ_CRYPT_ERROR;
+ goto submit_request;
+ }
+
+ /* Save the clone in the req_io, the callback to the worker
+ * queue will get the req_io
+ */
+ req_io->cloned_request = clone;
+ map_context->ptr = req_io;
+ atomic_set(&req_io->pending, 0);
+
+ if (rq_data_dir(clone) == WRITE)
+ req_io->should_encrypt = req_crypt_should_encrypt(req_io);
+ if (rq_data_dir(clone) == READ)
+ req_io->should_decrypt = req_crypt_should_deccrypt(req_io);
+
+ /* Get the queue of the underlying original device */
+ clone->q = bdev_get_queue(dev->bdev);
+ clone->rq_disk = dev->bdev->bd_disk;
+
+ __rq_for_each_bio(bio_src, clone) {
+ bio_src->bi_bdev = dev->bdev;
+ /* Currently the way req-dm works is that once the underlying
+ * device driver completes the request by calling into the
+ * block layer. The block layer completes the bios (clones) and
+ * then the cloned request. This is undesirable for req-dm-crypt
+ * hence added a flag BIO_DONTFREE, this flag will ensure that
+ * blk layer does not complete the cloned bios before completing
+ * the request. When the crypt endio is called, post-processing
+ * is done and then the dm layer will complete the bios (clones)
+ * and free them.
+ */
+ if (encryption_mode == DM_REQ_CRYPT_ENCRYPTION_MODE_TRANSPARENT)
+ bio_src->bi_flags |= 1 << BIO_INLINECRYPT;
+ else
+ bio_src->bi_flags |= 1 << BIO_DONTFREE;
+
+ /*
+ * If this device has partitions, remap block n
+ * of partition p to block n+start(p) of the disk.
+ */
+ req_crypt_blk_partition_remap(bio_src);
+ if (copy_bio_sector_to_req == 0) {
+ clone->__sector = bio_src->bi_iter.bi_sector;
+ copy_bio_sector_to_req++;
+ }
+ blk_queue_bounce(clone->q, &bio_src);
+ }
+
+ if (encryption_mode == DM_REQ_CRYPT_ENCRYPTION_MODE_TRANSPARENT) {
+ /* Set all crypto parameters for inline crypto engine */
+ memcpy(&req_io->ice_settings, ice_settings,
+ sizeof(struct ice_crypto_setting));
+ } else {
+ /* ICE checks for key_index which could be >= 0. If a chip has
+ * both ICE and GPCE and wanted to use GPCE, there could be
+ * issue. Storage driver send all requests to ICE driver. If
+ * it sees key_index as 0, it would assume it is for ICE while
+ * it is not. Hence set invalid key index by default.
+ */
+ req_io->ice_settings.key_index = -1;
+
+ }
+
+ if (rq_data_dir(clone) == READ ||
+ encryption_mode == DM_REQ_CRYPT_ENCRYPTION_MODE_TRANSPARENT) {
+ error = DM_MAPIO_REMAPPED;
+ goto submit_request;
+ } else if (rq_data_dir(clone) == WRITE) {
+ req_cryptd_queue_crypt(req_io);
+ error = DM_MAPIO_SUBMITTED;
+ goto submit_request;
+ }
+
+submit_request:
+ return error;
+
+}
+
+static void deconfigure_qcrypto(void)
+{
+ mempool_destroy(req_page_pool);
+ req_page_pool = NULL;
+
+ mempool_destroy(req_scatterlist_pool);
+ req_scatterlist_pool = NULL;
+
+ if (req_crypt_split_io_queue) {
+ destroy_workqueue(req_crypt_split_io_queue);
+ req_crypt_split_io_queue = NULL;
+ }
+ if (req_crypt_queue) {
+ destroy_workqueue(req_crypt_queue);
+ req_crypt_queue = NULL;
+ }
+
+ kmem_cache_destroy(_req_dm_scatterlist_pool);
+
+ mutex_lock(&engine_list_mutex);
+ kfree(pfe_eng);
+ pfe_eng = NULL;
+ kfree(fde_eng);
+ fde_eng = NULL;
+ mutex_unlock(&engine_list_mutex);
+
+ if (tfm) {
+ crypto_free_skcipher(tfm);
+ tfm = NULL;
+ }
+}
+
+static void req_crypt_dtr(struct dm_target *ti)
+{
+ DMDEBUG("dm-req-crypt Destructor.\n");
+
+ mempool_destroy(req_io_pool);
+ req_io_pool = NULL;
+
+ if (encryption_mode == DM_REQ_CRYPT_ENCRYPTION_MODE_TRANSPARENT) {
+ kfree(ice_settings);
+ ice_settings = NULL;
+ } else {
+ deconfigure_qcrypto();
+ }
+
+ kmem_cache_destroy(_req_crypt_io_pool);
+
+ if (dev) {
+ dm_put_device(ti, dev);
+ dev = NULL;
+ }
+}
+
+static int configure_qcrypto(void)
+{
+ struct crypto_engine_entry *eng_list = NULL;
+ struct block_device *bdev = NULL;
+ int err = DM_REQ_CRYPT_ERROR, i;
+ struct request_queue *q = NULL;
+
+ bdev = dev->bdev;
+ q = bdev_get_queue(bdev);
+ blk_queue_max_hw_sectors(q, DM_REQ_CRYPT_QUEUE_SIZE);
+
+ /* Allocate the crypto alloc blk cipher and keep the handle */
+ tfm = crypto_alloc_skcipher("qcom-xts(aes)", 0, 0);
+ if (IS_ERR(tfm)) {
+ DMERR("%s skcipher tfm allocation failed : error\n",
+ __func__);
+ tfm = NULL;
+ goto exit_err;
+ }
+
+ num_engines_fde = num_engines_pfe = 0;
+
+ mutex_lock(&engine_list_mutex);
+ num_engines = (dm_qcrypto_func.get_num_engines)();
+ if (!num_engines) {
+ DMERR(KERN_INFO "%s qcrypto_get_num_engines failed\n",
+ __func__);
+ err = DM_REQ_CRYPT_ERROR;
+ mutex_unlock(&engine_list_mutex);
+ goto exit_err;
+ }
+
+ eng_list = kcalloc(num_engines, sizeof(*eng_list), GFP_KERNEL);
+ if (eng_list == NULL) {
+ DMERR("%s engine list allocation failed\n", __func__);
+ err = DM_REQ_CRYPT_ERROR;
+ mutex_unlock(&engine_list_mutex);
+ goto exit_err;
+ }
+
+ (dm_qcrypto_func.get_engine_list)(num_engines, eng_list);
+
+ for (i = 0; i < num_engines; i++) {
+ if (eng_list[i].ce_device == FDE_KEY_ID)
+ num_engines_fde++;
+ if (eng_list[i].ce_device == PFE_KEY_ID)
+ num_engines_pfe++;
+ }
+
+ fde_eng = kcalloc(num_engines_fde, sizeof(*fde_eng), GFP_KERNEL);
+ if (fde_eng == NULL) {
+ DMERR("%s fde engine list allocation failed\n", __func__);
+ mutex_unlock(&engine_list_mutex);
+ goto exit_err;
+ }
+
+ pfe_eng = kcalloc(num_engines_pfe, sizeof(*pfe_eng), GFP_KERNEL);
+ if (pfe_eng == NULL) {
+ DMERR("%s pfe engine list allocation failed\n", __func__);
+ mutex_unlock(&engine_list_mutex);
+ goto exit_err;
+ }
+
+ fde_cursor = 0;
+ pfe_cursor = 0;
+
+ for (i = 0; i < num_engines; i++) {
+ if (eng_list[i].ce_device == FDE_KEY_ID)
+ fde_eng[fde_cursor++] = eng_list[i];
+ if (eng_list[i].ce_device == PFE_KEY_ID)
+ pfe_eng[pfe_cursor++] = eng_list[i];
+ }
+
+ fde_cursor = 0;
+ pfe_cursor = 0;
+ mutex_unlock(&engine_list_mutex);
+
+ _req_dm_scatterlist_pool = kmem_cache_create("req_dm_scatterlist",
+ sizeof(struct scatterlist) * MAX_SG_LIST,
+ __alignof__(struct scatterlist), 0, NULL);
+ if (!_req_dm_scatterlist_pool)
+ goto exit_err;
+
+ req_crypt_queue = alloc_workqueue("req_cryptd",
+ WQ_UNBOUND |
+ WQ_CPU_INTENSIVE |
+ WQ_MEM_RECLAIM,
+ 0);
+ if (!req_crypt_queue) {
+ DMERR("%s req_crypt_queue not allocated\n", __func__);
+ goto exit_err;
+ }
+
+ req_crypt_split_io_queue = alloc_workqueue("req_crypt_split",
+ WQ_UNBOUND |
+ WQ_CPU_INTENSIVE |
+ WQ_MEM_RECLAIM,
+ 0);
+ if (!req_crypt_split_io_queue) {
+ DMERR("%s req_crypt_split_io_queue not allocated\n", __func__);
+ goto exit_err;
+ }
+ req_scatterlist_pool = mempool_create_slab_pool(MIN_IOS,
+ _req_dm_scatterlist_pool);
+ if (!req_scatterlist_pool) {
+ DMERR("%s req_scatterlist_pool is not allocated\n", __func__);
+ err = -ENOMEM;
+ goto exit_err;
+ }
+
+ req_page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
+ if (!req_page_pool) {
+ DMERR("%s req_page_pool not allocated\n", __func__);
+ goto exit_err;
+ }
+
+ err = 0;
+
+exit_err:
+ kfree(eng_list);
+ return err;
+}
+
+/*
+ * Construct an encryption mapping:
+ * <cipher> <key> <iv_offset> <dev_path> <start>
+ */
+static int req_crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+{
+ int err = DM_REQ_CRYPT_ERROR;
+ unsigned long long tmpll;
+ char dummy;
+ int ret;
+
+ DMDEBUG("dm-req-crypt Constructor.\n");
+
+ if (argc < 5) {
+ DMERR(" %s Not enough args\n", __func__);
+ err = DM_REQ_CRYPT_ERROR;
+ goto ctr_exit;
+ }
+
+ if (argv[3]) {
+ if (dm_get_device(ti, argv[3],
+ dm_table_get_mode(ti->table), &dev)) {
+ DMERR(" %s Device Lookup failed\n", __func__);
+ err = DM_REQ_CRYPT_ERROR;
+ goto ctr_exit;
+ }
+ } else {
+ DMERR(" %s Arg[3] invalid\n", __func__);
+ err = DM_REQ_CRYPT_ERROR;
+ goto ctr_exit;
+ }
+
+ if (argv[4]) {
+ if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1) {
+ DMERR("%s Invalid device sector\n", __func__);
+ err = DM_REQ_CRYPT_ERROR;
+ goto ctr_exit;
+ }
+ } else {
+ DMERR(" %s Arg[4] invalid\n", __func__);
+ err = DM_REQ_CRYPT_ERROR;
+ goto ctr_exit;
+ }
+ start_sector_orig = tmpll;
+
+ /* Allow backward compatible */
+ if (argc >= 6) {
+ if (argv[5]) {
+ if (!strcmp(argv[5], "fde_enabled"))
+ is_fde_enabled = true;
+ else
+ is_fde_enabled = false;
+ } else {
+ DMERR(" %s Arg[5] invalid\n", __func__);
+ err = DM_REQ_CRYPT_ERROR;
+ goto ctr_exit;
+ }
+ } else {
+ DMERR(" %s Arg[5] missing, set FDE enabled.\n", __func__);
+ is_fde_enabled = true; /* backward compatible */
+ }
+
+ _req_crypt_io_pool = KMEM_CACHE(req_dm_crypt_io, 0);
+ if (!_req_crypt_io_pool) {
+ err = DM_REQ_CRYPT_ERROR;
+ goto ctr_exit;
+ }
+
+ encryption_mode = DM_REQ_CRYPT_ENCRYPTION_MODE_CRYPTO;
+ if (argc >= 7 && argv[6]) {
+ if (!strcmp(argv[6], "ice"))
+ encryption_mode =
+ DM_REQ_CRYPT_ENCRYPTION_MODE_TRANSPARENT;
+ }
+
+ if (encryption_mode == DM_REQ_CRYPT_ENCRYPTION_MODE_TRANSPARENT) {
+ /* configure ICE settings */
+ ice_settings =
+ kzalloc(sizeof(struct ice_crypto_setting), GFP_KERNEL);
+ if (!ice_settings) {
+ err = -ENOMEM;
+ goto ctr_exit;
+ }
+ ice_settings->key_size = ICE_CRYPTO_KEY_SIZE_128;
+ ice_settings->algo_mode = ICE_CRYPTO_ALGO_MODE_AES_XTS;
+ ice_settings->key_mode = ICE_CRYPTO_USE_LUT_SW_KEY;
+ if (kstrtou16(argv[1], 0, &ice_settings->key_index) ||
+ ice_settings->key_index < 0 ||
+ ice_settings->key_index > MAX_MSM_ICE_KEY_LUT_SIZE) {
+ DMERR("%s Err: key index %d received for ICE\n",
+ __func__, ice_settings->key_index);
+ err = DM_REQ_CRYPT_ERROR;
+ goto ctr_exit;
+ }
+ } else {
+ ret = configure_qcrypto();
+ if (ret) {
+ DMERR("%s failed to configure qcrypto\n", __func__);
+ err = ret;
+ goto ctr_exit;
+ }
+ }
+
+ req_io_pool = mempool_create_slab_pool(MIN_IOS, _req_crypt_io_pool);
+ if (!req_io_pool) {
+ DMERR("%s req_io_pool not allocated\n", __func__);
+ err = -ENOMEM;
+ goto ctr_exit;
+ }
+
+ /*
+ * If underlying device supports flush/discard, mapped target
+ * should also allow it
+ */
+ ti->num_flush_bios = 1;
+ ti->num_discard_bios = 1;
+
+ err = 0;
+ DMINFO("%s: Mapping block_device %s to dm-req-crypt ok!\n",
+ __func__, argv[3]);
+ctr_exit:
+ if (err)
+ req_crypt_dtr(ti);
+
+ return err;
+}
+
+static int req_crypt_iterate_devices(struct dm_target *ti,
+ iterate_devices_callout_fn fn, void *data)
+{
+ return fn(ti, dev, start_sector_orig, ti->len, data);
+}
+void set_qcrypto_func_dm(void *dev,
+ void *flag,
+ void *engines,
+ void *engine_list)
+{
+ dm_qcrypto_func.cipher_set = dev;
+ dm_qcrypto_func.cipher_flag = flag;
+ dm_qcrypto_func.get_num_engines = engines;
+ dm_qcrypto_func.get_engine_list = engine_list;
+}
+EXPORT_SYMBOL(set_qcrypto_func_dm);
+
+static struct target_type req_crypt_target = {
+ .name = "req-crypt",
+ .version = {1, 0, 0},
+ .module = THIS_MODULE,
+ .ctr = req_crypt_ctr,
+ .dtr = req_crypt_dtr,
+ .map_rq = req_crypt_map,
+ .rq_end_io = req_crypt_endio,
+ .iterate_devices = req_crypt_iterate_devices,
+};
+
+static int __init req_dm_crypt_init(void)
+{
+ int r;
+
+
+ r = dm_register_target(&req_crypt_target);
+ if (r < 0) {
+ DMERR("register failed %d", r);
+ return r;
+ }
+
+ DMINFO("dm-req-crypt successfully initalized.\n");
+
+ return r;
+}
+
+static void __exit req_dm_crypt_exit(void)
+{
+ dm_unregister_target(&req_crypt_target);
+}
+
+module_init(req_dm_crypt_init);
+module_exit(req_dm_crypt_exit);
+
+MODULE_DESCRIPTION(DM_NAME " target for request based transparent encryption / decryption");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index ba7c4c6..bca4c0e 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -283,7 +283,7 @@
* Must be called without clone's queue lock held,
* see end_clone_request() for more details.
*/
-static void dm_end_request(struct request *clone, int error)
+void dm_end_request(struct request *clone, int error)
{
int rw = rq_data_dir(clone);
struct dm_rq_target_io *tio = clone->end_io_data;
@@ -464,7 +464,7 @@
* Target's rq_end_io() function isn't called.
* This may be used when the target's map_rq() or clone_and_map_rq() functions fail.
*/
-static void dm_kill_unmapped_request(struct request *rq, int error)
+void dm_kill_unmapped_request(struct request *rq, int error)
{
rq->cmd_flags |= REQ_FAILED;
dm_complete_request(rq, error);
@@ -512,6 +512,13 @@
dm_complete_request(rq, r);
}
+void dm_dispatch_request(struct request *rq)
+{
+ struct dm_rq_target_io *tio = tio_from_request(rq);
+
+ dm_dispatch_clone_request(tio->clone, rq);
+}
+
static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
void *data)
{
diff --git a/drivers/media/platform/msm/vidc/hfi_packetization.c b/drivers/media/platform/msm/vidc/hfi_packetization.c
index 8d54e20..40c306d 100644
--- a/drivers/media/platform/msm/vidc/hfi_packetization.c
+++ b/drivers/media/platform/msm/vidc/hfi_packetization.c
@@ -10,9 +10,6 @@
* GNU General Public License for more details.
*
*/
-#include <linux/errno.h>
-#include <linux/log2.h>
-#include <linux/hash.h>
#include "hfi_packetization.h"
#include "msm_vidc_debug.h"
@@ -868,8 +865,6 @@
output_frame->device_addr, output_frame->timestamp,
output_frame->alloc_len, output_frame->filled_len,
output_frame->offset);
- dprintk(VIDC_DBG, "### Q OUTPUT BUFFER ###: %d, %d, %d\n",
- pkt->alloc_len, pkt->filled_len, pkt->offset);
return rc;
}
diff --git a/drivers/media/platform/msm/vidc/hfi_response_handler.c b/drivers/media/platform/msm/vidc/hfi_response_handler.c
index 08cb055..c2a93a96 100644
--- a/drivers/media/platform/msm/vidc/hfi_response_handler.c
+++ b/drivers/media/platform/msm/vidc/hfi_response_handler.c
@@ -1074,15 +1074,6 @@
}
while (req_bytes) {
- if (hfi_buf_req->buffer_size &&
- hfi_buf_req->buffer_count_min > hfi_buf_req->
- buffer_count_actual)
- dprintk(VIDC_WARN,
- "Bad buffer requirements for %#x: min %d, actual %d\n",
- hfi_buf_req->buffer_type,
- hfi_buf_req->buffer_count_min,
- hfi_buf_req->buffer_count_actual);
-
dprintk(VIDC_DBG, "got buffer requirements for: %d\n",
hfi_buf_req->buffer_type);
switch (hfi_buf_req->buffer_type) {
@@ -1329,10 +1320,8 @@
pkt->ubwc_cr_stats.complexity_number;
data_done.input_done.offset = pkt->offset;
data_done.input_done.filled_len = pkt->filled_len;
- data_done.input_done.packet_buffer =
- (ion_phys_addr_t)pkt->packet_buffer;
- data_done.input_done.extra_data_buffer =
- (ion_phys_addr_t)pkt->extra_data_buffer;
+ data_done.input_done.packet_buffer = pkt->packet_buffer;
+ data_done.input_done.extra_data_buffer = pkt->extra_data_buffer;
data_done.input_done.status =
hfi_map_err_status(pkt->error_type);
hfi_picture_type = (struct hfi_picture_type *)&pkt->rgData[0];
@@ -1413,10 +1402,9 @@
data_done.output_done.alloc_len1 = pkt->alloc_len;
data_done.output_done.filled_len1 = pkt->filled_len;
data_done.output_done.picture_type = pkt->picture_type;
- data_done.output_done.packet_buffer1 =
- (ion_phys_addr_t)pkt->packet_buffer;
+ data_done.output_done.packet_buffer1 = pkt->packet_buffer;
data_done.output_done.extra_data_buffer =
- (ion_phys_addr_t)pkt->extra_data_buffer;
+ pkt->extra_data_buffer;
data_done.output_done.buffer_type = HAL_BUFFER_OUTPUT;
} else /* if (is_decoder) */ {
struct hfi_msg_session_fbd_uncompressed_plane0_packet *pkt =
diff --git a/drivers/media/platform/msm/vidc/msm_smem.c b/drivers/media/platform/msm/vidc/msm_smem.c
index b116622..9b23376 100644
--- a/drivers/media/platform/msm/vidc/msm_smem.c
+++ b/drivers/media/platform/msm/vidc/msm_smem.c
@@ -30,7 +30,7 @@
enum session_type session_type;
};
-static int get_device_address(struct smem_client *smem_client,
+static int msm_ion_get_device_address(struct smem_client *smem_client,
struct ion_handle *hndl, unsigned long align,
ion_phys_addr_t *iova, unsigned long *buffer_size,
unsigned long flags, enum hal_buffer buffer_type,
@@ -122,12 +122,6 @@
goto mem_map_sg_failed;
}
if (table->sgl) {
- dprintk(VIDC_DBG,
- "%s: CB : %s, DMA buf: %pK, device: %pK, attach: %pK, table: %pK, table sgl: %pK, rc: %d, dma_address: %pa\n",
- __func__, cb->name, buf, cb->dev, attach,
- table, table->sgl, rc,
- &table->sgl->dma_address);
-
*iova = table->sgl->dma_address;
*buffer_size = table->sgl->dma_length;
} else {
@@ -153,7 +147,6 @@
}
}
- dprintk(VIDC_DBG, "mapped ion handle %pK to %pa\n", hndl, iova);
return 0;
mem_map_sg_failed:
dma_buf_unmap_attachment(attach, table, DMA_BIDIRECTIONAL);
@@ -166,38 +159,26 @@
return rc;
}
-static void put_device_address(struct smem_client *smem_client,
+static int msm_ion_put_device_address(struct smem_client *smem_client,
struct ion_handle *hndl, u32 flags,
struct dma_mapping_info *mapping_info,
enum hal_buffer buffer_type)
{
- struct ion_client *clnt = NULL;
+ int rc = 0;
if (!hndl || !smem_client || !mapping_info) {
dprintk(VIDC_WARN, "Invalid params: %pK, %pK\n",
smem_client, hndl);
- return;
+ return -EINVAL;
}
if (!mapping_info->dev || !mapping_info->table ||
!mapping_info->buf || !mapping_info->attach) {
dprintk(VIDC_WARN, "Invalid params:\n");
- return;
+ return -EINVAL;
}
- clnt = smem_client->clnt;
- if (!clnt) {
- dprintk(VIDC_WARN, "Invalid client\n");
- return;
- }
if (is_iommu_present(smem_client->res)) {
- dprintk(VIDC_DBG,
- "Calling dma_unmap_sg - device: %pK, address: %pa, buf: %pK, table: %pK, attach: %pK\n",
- mapping_info->dev,
- &mapping_info->table->sgl->dma_address,
- mapping_info->buf, mapping_info->table,
- mapping_info->attach);
-
trace_msm_smem_buffer_iommu_op_start("UNMAP", 0, 0, 0, 0, 0);
msm_dma_unmap_sg(mapping_info->dev, mapping_info->table->sgl,
mapping_info->table->nents, DMA_BIDIRECTIONAL,
@@ -207,68 +188,257 @@
dma_buf_detach(mapping_info->buf, mapping_info->attach);
dma_buf_put(mapping_info->buf);
trace_msm_smem_buffer_iommu_op_end("UNMAP", 0, 0, 0, 0, 0);
+
+ mapping_info->dev = NULL;
+ mapping_info->mapping = NULL;
+ mapping_info->table = NULL;
+ mapping_info->attach = NULL;
+ mapping_info->buf = NULL;
}
+
+ return rc;
}
-static int ion_user_to_kernel(struct smem_client *client, int fd, u32 size,
- struct msm_smem *mem, enum hal_buffer buffer_type)
+static void *msm_ion_get_dma_buf(int fd)
{
- struct ion_handle *hndl = NULL;
- ion_phys_addr_t iova = 0;
- unsigned long buffer_size = size;
+ struct dma_buf *dma_buf;
+
+ dma_buf = dma_buf_get(fd);
+ if (IS_ERR_OR_NULL(dma_buf)) {
+ dprintk(VIDC_ERR, "Failed to get dma_buf for %d, error %ld\n",
+ fd, PTR_ERR(dma_buf));
+ dma_buf = NULL;
+ }
+
+ return dma_buf;
+}
+
+void *msm_smem_get_dma_buf(int fd)
+{
+ return (void *)msm_ion_get_dma_buf(fd);
+}
+
+static void msm_ion_put_dma_buf(struct dma_buf *dma_buf)
+{
+ if (!dma_buf) {
+ dprintk(VIDC_ERR, "%s: Invalid params: %pK\n",
+ __func__, dma_buf);
+ return;
+ }
+
+ dma_buf_put(dma_buf);
+}
+
+void msm_smem_put_dma_buf(void *dma_buf)
+{
+ return msm_ion_put_dma_buf((struct dma_buf *)dma_buf);
+}
+
+static struct ion_handle *msm_ion_get_handle(void *ion_client,
+ struct dma_buf *dma_buf)
+{
+ struct ion_handle *handle;
+
+ if (!ion_client || !dma_buf) {
+ dprintk(VIDC_ERR, "%s: Invalid params: %pK %pK\n",
+ __func__, ion_client, dma_buf);
+ return NULL;
+ }
+
+ handle = ion_import_dma_buf(ion_client, dma_buf);
+ if (IS_ERR_OR_NULL(handle)) {
+ dprintk(VIDC_ERR, "Failed to get ion_handle: %pK, %pK, %ld\n",
+ ion_client, dma_buf, PTR_ERR(handle));
+ handle = NULL;
+ }
+
+ return handle;
+}
+
+void *msm_smem_get_handle(struct smem_client *client, void *dma_buf)
+{
+ if (!client)
+ return NULL;
+
+ return (void *)msm_ion_get_handle(client->clnt,
+ (struct dma_buf *)dma_buf);
+}
+
+static void msm_ion_put_handle(struct ion_client *ion_client,
+ struct ion_handle *ion_handle)
+{
+ if (!ion_client || !ion_handle) {
+ dprintk(VIDC_ERR, "%s: Invalid params: %pK %pK\n",
+ __func__, ion_client, ion_handle);
+ return;
+ }
+
+ ion_free(ion_client, ion_handle);
+}
+
+void msm_smem_put_handle(struct smem_client *client, void *handle)
+{
+ if (!client) {
+ dprintk(VIDC_ERR, "%s: Invalid params %pK %pK\n",
+ __func__, client, handle);
+ return;
+ }
+ return msm_ion_put_handle(client->clnt, (struct ion_handle *)handle);
+}
+
+static int msm_ion_map_dma_buf(struct msm_vidc_inst *inst,
+ struct msm_smem *smem)
+{
int rc = 0;
+ ion_phys_addr_t iova = 0;
+ u32 temp = 0;
+ unsigned long buffer_size = 0;
unsigned long align = SZ_4K;
unsigned long ion_flags = 0;
+ struct ion_client *ion_client;
+ struct ion_handle *ion_handle;
+ struct dma_buf *dma_buf;
-#ifdef CONFIG_ION
- hndl = ion_import_dma_buf_fd(client->clnt, fd);
-#endif
- dprintk(VIDC_DBG, "%s ion handle: %pK\n", __func__, hndl);
- if (IS_ERR_OR_NULL(hndl)) {
- dprintk(VIDC_ERR, "Failed to get handle: %pK, %d, %d, %pK\n",
- client, fd, size, hndl);
- rc = -ENOMEM;
- goto fail_import_fd;
+ if (!inst || !inst->mem_client || !inst->mem_client->clnt) {
+ dprintk(VIDC_ERR, "%s: Invalid params: %pK %pK\n",
+ __func__, inst, smem);
+ return -EINVAL;
}
- mem->kvaddr = NULL;
- rc = ion_handle_get_flags(client->clnt, hndl, &ion_flags);
+ ion_client = inst->mem_client->clnt;
+ dma_buf = msm_ion_get_dma_buf(smem->fd);
+ if (!dma_buf)
+ return -EINVAL;
+ ion_handle = msm_ion_get_handle(ion_client, dma_buf);
+ if (!ion_handle)
+ return -EINVAL;
+
+ smem->dma_buf = dma_buf;
+ smem->handle = ion_handle;
+ rc = ion_handle_get_flags(ion_client, ion_handle, &ion_flags);
if (rc) {
dprintk(VIDC_ERR, "Failed to get ion flags: %d\n", rc);
- goto fail_device_address;
+ goto exit;
}
- mem->buffer_type = buffer_type;
if (ion_flags & ION_FLAG_CACHED)
- mem->flags |= SMEM_CACHED;
+ smem->flags |= SMEM_CACHED;
if (ion_flags & ION_FLAG_SECURE)
- mem->flags |= SMEM_SECURE;
+ smem->flags |= SMEM_SECURE;
- rc = get_device_address(client, hndl, align, &iova, &buffer_size,
- mem->flags, buffer_type, &mem->mapping_info);
+ rc = msm_ion_get_device_address(inst->mem_client, ion_handle,
+ align, &iova, &buffer_size, smem->flags,
+ smem->buffer_type, &smem->mapping_info);
if (rc) {
dprintk(VIDC_ERR, "Failed to get device address: %d\n", rc);
- goto fail_device_address;
+ goto exit;
+ }
+ temp = (u32)iova;
+ if ((ion_phys_addr_t)temp != iova) {
+ dprintk(VIDC_ERR, "iova(%pa) truncated to %#x", &iova, temp);
+ rc = -EINVAL;
+ goto exit;
}
- mem->mem_type = client->mem_type;
- mem->smem_priv = hndl;
- mem->device_addr = iova;
- mem->size = buffer_size;
- if ((u32)mem->device_addr != iova) {
- dprintk(VIDC_ERR, "iova(%pa) truncated to %#x",
- &iova, (u32)mem->device_addr);
- goto fail_device_address;
- }
- dprintk(VIDC_DBG,
- "%s: ion_handle = %pK, fd = %d, device_addr = %pa, size = %zx, kvaddr = %pK, buffer_type = %d, flags = %#lx\n",
- __func__, mem->smem_priv, fd, &mem->device_addr, mem->size,
- mem->kvaddr, mem->buffer_type, mem->flags);
+ smem->device_addr = (u32)iova + smem->offset;
+
+exit:
return rc;
-fail_device_address:
- ion_free(client->clnt, hndl);
-fail_import_fd:
+}
+
+int msm_smem_map_dma_buf(struct msm_vidc_inst *inst, struct msm_smem *smem)
+{
+ int rc = 0;
+
+ if (!inst || !smem) {
+ dprintk(VIDC_ERR, "%s: Invalid params: %pK %pK\n",
+ __func__, inst, smem);
+ return -EINVAL;
+ }
+
+ if (smem->refcount) {
+ smem->refcount++;
+ return rc;
+ }
+
+ switch (inst->mem_client->mem_type) {
+ case SMEM_ION:
+ rc = msm_ion_map_dma_buf(inst, smem);
+ break;
+ default:
+ dprintk(VIDC_ERR, "%s: Unknown mem_type %d\n",
+ __func__, inst->mem_client->mem_type);
+ rc = -EINVAL;
+ break;
+ }
+ if (!rc)
+ smem->refcount++;
+
+ return rc;
+}
+
+static int msm_ion_unmap_dma_buf(struct msm_vidc_inst *inst,
+ struct msm_smem *smem)
+{
+ int rc = 0;
+
+ if (!inst || !inst->mem_client || !smem) {
+ dprintk(VIDC_ERR, "%s: Invalid params: %pK %pK\n",
+ __func__, inst, smem);
+ return -EINVAL;
+ }
+
+ rc = msm_ion_put_device_address(inst->mem_client, smem->handle,
+ smem->flags, &smem->mapping_info, smem->buffer_type);
+ if (rc) {
+ dprintk(VIDC_ERR, "Failed to put device address: %d\n", rc);
+ goto exit;
+ }
+
+ msm_ion_put_handle(inst->mem_client->clnt, smem->handle);
+ msm_ion_put_dma_buf(smem->dma_buf);
+
+ smem->device_addr = 0x0;
+ smem->handle = NULL;
+ smem->dma_buf = NULL;
+
+exit:
+ return rc;
+}
+
+int msm_smem_unmap_dma_buf(struct msm_vidc_inst *inst, struct msm_smem *smem)
+{
+ int rc = 0;
+
+ if (!inst || !smem) {
+ dprintk(VIDC_ERR, "%s: Invalid params: %pK %pK\n",
+ __func__, inst, smem);
+ return -EINVAL;
+ }
+
+ if (smem->refcount) {
+ smem->refcount--;
+ } else {
+ dprintk(VIDC_WARN,
+ "unmap called while refcount is zero already\n");
+ return -EINVAL;
+ }
+
+ if (smem->refcount)
+ return rc;
+
+ switch (inst->mem_client->mem_type) {
+ case SMEM_ION:
+ rc = msm_ion_unmap_dma_buf(inst, smem);
+ break;
+ default:
+ dprintk(VIDC_ERR, "%s: Unknown mem_type %d\n",
+ __func__, inst->mem_client->mem_type);
+ rc = -EINVAL;
+ break;
+ }
+
return rc;
}
@@ -321,6 +491,12 @@
int rc = 0;
int ion_flags = 0;
+ if (!client || !mem) {
+ dprintk(VIDC_ERR, "%s: Invalid params: %pK %pK\n",
+ __func__, client, mem);
+ return -EINVAL;
+ }
+
align = ALIGN(align, SZ_4K);
size = ALIGN(size, SZ_4K);
@@ -366,10 +542,13 @@
}
trace_msm_smem_buffer_ion_op_end("ALLOC", (u32)buffer_type,
heap_mask, size, align, flags, map_kernel);
- mem->mem_type = client->mem_type;
- mem->smem_priv = hndl;
+
+ mem->handle = hndl;
mem->flags = flags;
mem->buffer_type = buffer_type;
+ mem->offset = 0;
+ mem->size = size;
+
if (map_kernel) {
mem->kvaddr = ion_map_kernel(client->clnt, hndl);
if (IS_ERR_OR_NULL(mem->kvaddr)) {
@@ -382,24 +561,23 @@
mem->kvaddr = NULL;
}
- rc = get_device_address(client, hndl, align, &iova, &buffer_size,
- flags, buffer_type, &mem->mapping_info);
+ rc = msm_ion_get_device_address(client, hndl, align, &iova,
+ &buffer_size, flags, buffer_type, &mem->mapping_info);
if (rc) {
dprintk(VIDC_ERR, "Failed to get device address: %d\n",
rc);
goto fail_device_address;
}
- mem->device_addr = iova;
- if ((u32)mem->device_addr != iova) {
+ mem->device_addr = (u32)iova;
+ if ((ion_phys_addr_t)mem->device_addr != iova) {
dprintk(VIDC_ERR, "iova(%pa) truncated to %#x",
- &iova, (u32)mem->device_addr);
+ &iova, mem->device_addr);
goto fail_device_address;
}
- mem->size = size;
dprintk(VIDC_DBG,
- "%s: ion_handle = %pK, device_addr = %pa, size = %#zx, kvaddr = %pK, buffer_type = %#x, flags = %#lx\n",
- __func__, mem->smem_priv, &mem->device_addr,
- mem->size, mem->kvaddr, mem->buffer_type, mem->flags);
+ "%s: ion_handle = %pK, device_addr = %x, size = %d, kvaddr = %pK, buffer_type = %#x, flags = %#lx\n",
+ __func__, mem->handle, mem->device_addr, mem->size,
+ mem->kvaddr, mem->buffer_type, mem->flags);
return rc;
fail_device_address:
if (mem->kvaddr)
@@ -410,30 +588,40 @@
return rc;
}
-static void free_ion_mem(struct smem_client *client, struct msm_smem *mem)
+static int free_ion_mem(struct smem_client *client, struct msm_smem *mem)
{
+ int rc = 0;
+
+ if (!client || !mem) {
+ dprintk(VIDC_ERR, "%s: Invalid params: %pK %pK\n",
+ __func__, client, mem);
+ return -EINVAL;
+ }
+
dprintk(VIDC_DBG,
- "%s: ion_handle = %pK, device_addr = %pa, size = %#zx, kvaddr = %pK, buffer_type = %#x\n",
- __func__, mem->smem_priv, &mem->device_addr,
- mem->size, mem->kvaddr, mem->buffer_type);
+ "%s: ion_handle = %pK, device_addr = %x, size = %d, kvaddr = %pK, buffer_type = %#x\n",
+ __func__, mem->handle, mem->device_addr, mem->size,
+ mem->kvaddr, mem->buffer_type);
if (mem->device_addr)
- put_device_address(client, mem->smem_priv, mem->flags,
+ msm_ion_put_device_address(client, mem->handle, mem->flags,
&mem->mapping_info, mem->buffer_type);
if (mem->kvaddr)
- ion_unmap_kernel(client->clnt, mem->smem_priv);
- if (mem->smem_priv) {
+ ion_unmap_kernel(client->clnt, mem->handle);
+
+ if (mem->handle) {
trace_msm_smem_buffer_ion_op_start("FREE",
(u32)mem->buffer_type, -1, mem->size, -1,
mem->flags, -1);
- dprintk(VIDC_DBG,
- "%s: Freeing handle %pK, client: %pK\n",
- __func__, mem->smem_priv, client->clnt);
- ion_free(client->clnt, mem->smem_priv);
+ ion_free(client->clnt, mem->handle);
trace_msm_smem_buffer_ion_op_end("FREE", (u32)mem->buffer_type,
-1, mem->size, -1, mem->flags, -1);
+ } else {
+ dprintk(VIDC_ERR, "%s: invalid ion_handle\n", __func__);
}
+
+ return rc;
}
static void *ion_new_client(void)
@@ -443,135 +631,105 @@
client = msm_ion_client_create("video_client");
if (!client)
dprintk(VIDC_ERR, "Failed to create smem client\n");
+
+ dprintk(VIDC_DBG, "%s: client %pK\n", __func__, client);
+
return client;
};
static void ion_delete_client(struct smem_client *client)
{
+ if (!client) {
+ dprintk(VIDC_ERR, "%s: Invalid params: %pK\n",
+ __func__, client);
+ return;
+ }
+
+ dprintk(VIDC_DBG, "%s: client %pK\n", __func__, client->clnt);
ion_client_destroy(client->clnt);
+ client->clnt = NULL;
}
-struct msm_smem *msm_smem_user_to_kernel(void *clt, int fd, u32 size,
- enum hal_buffer buffer_type)
+static int msm_ion_cache_operations(void *ion_client, void *ion_handle,
+ unsigned long offset, unsigned long size,
+ enum smem_cache_ops cache_op)
{
- struct smem_client *client = clt;
int rc = 0;
- struct msm_smem *mem;
-
- if (fd < 0) {
- dprintk(VIDC_ERR, "Invalid fd: %d\n", fd);
- return NULL;
- }
- mem = kzalloc(sizeof(*mem), GFP_KERNEL);
- if (!mem) {
- dprintk(VIDC_ERR, "Failed to allocate shared mem\n");
- return NULL;
- }
- switch (client->mem_type) {
- case SMEM_ION:
- rc = ion_user_to_kernel(clt, fd, size, mem, buffer_type);
- break;
- default:
- dprintk(VIDC_ERR, "Mem type not supported\n");
- rc = -EINVAL;
- break;
- }
- if (rc) {
- dprintk(VIDC_ERR, "Failed to allocate shared memory\n");
- kfree(mem);
- mem = NULL;
- }
- return mem;
-}
-
-bool msm_smem_compare_buffers(void *clt, int fd, void *priv)
-{
- struct smem_client *client = clt;
- struct ion_handle *handle = NULL;
- bool ret = false;
-
- if (!clt || !priv) {
- dprintk(VIDC_ERR, "Invalid params: %pK, %pK\n",
- clt, priv);
- return false;
- }
-#ifdef CONFIG_ION
- handle = ion_import_dma_buf_fd(client->clnt, fd);
-#endif
- ret = handle == priv;
- (!IS_ERR_OR_NULL(handle)) ? ion_free(client->clnt, handle) : 0;
- return ret;
-}
-
-static int ion_cache_operations(struct smem_client *client,
- struct msm_smem *mem, enum smem_cache_ops cache_op)
-{
- unsigned long ionflag = 0;
- int rc = 0;
+ unsigned long flags = 0;
int msm_cache_ops = 0;
- if (!mem || !client) {
- dprintk(VIDC_ERR, "Invalid params: %pK, %pK\n",
- mem, client);
+ if (!ion_client || !ion_handle) {
+ dprintk(VIDC_ERR, "%s: Invalid params: %pK %pK\n",
+ __func__, ion_client, ion_handle);
return -EINVAL;
}
- rc = ion_handle_get_flags(client->clnt, mem->smem_priv,
- &ionflag);
+
+ rc = ion_handle_get_flags(ion_client, ion_handle, &flags);
if (rc) {
dprintk(VIDC_ERR,
- "ion_handle_get_flags failed: %d\n", rc);
- goto cache_op_failed;
+ "%s: ion_handle_get_flags failed: %d, ion client %pK, ion handle %pK\n",
+ __func__, rc, ion_client, ion_handle);
+ goto exit;
}
- if (ION_IS_CACHED(ionflag)) {
- switch (cache_op) {
- case SMEM_CACHE_CLEAN:
- msm_cache_ops = ION_IOC_CLEAN_CACHES;
- break;
- case SMEM_CACHE_INVALIDATE:
- msm_cache_ops = ION_IOC_INV_CACHES;
- break;
- case SMEM_CACHE_CLEAN_INVALIDATE:
- msm_cache_ops = ION_IOC_CLEAN_INV_CACHES;
- break;
- default:
- dprintk(VIDC_ERR, "cache operation not supported\n");
- rc = -EINVAL;
- goto cache_op_failed;
- }
- rc = msm_ion_do_cache_op(client->clnt,
- (struct ion_handle *)mem->smem_priv,
- 0, (unsigned long)mem->size,
- msm_cache_ops);
- if (rc) {
- dprintk(VIDC_ERR,
- "cache operation failed %d\n", rc);
- goto cache_op_failed;
- }
+
+ if (!ION_IS_CACHED(flags))
+ goto exit;
+
+ switch (cache_op) {
+ case SMEM_CACHE_CLEAN:
+ msm_cache_ops = ION_IOC_CLEAN_CACHES;
+ break;
+ case SMEM_CACHE_INVALIDATE:
+ msm_cache_ops = ION_IOC_INV_CACHES;
+ break;
+ case SMEM_CACHE_CLEAN_INVALIDATE:
+ msm_cache_ops = ION_IOC_CLEAN_INV_CACHES;
+ break;
+ default:
+ dprintk(VIDC_ERR, "%s: cache (%d) operation not supported\n",
+ __func__, cache_op);
+ rc = -EINVAL;
+ goto exit;
}
-cache_op_failed:
+
+ rc = msm_ion_do_cache_offset_op(ion_client, ion_handle, NULL,
+ offset, size, msm_cache_ops);
+ if (rc) {
+ dprintk(VIDC_ERR,
+ "%s: cache operation failed %d, ion client %pK, ion handle %pK, offset %lu, size %lu, msm_cache_ops %u\n",
+ __func__, rc, ion_client, ion_handle, offset,
+ size, msm_cache_ops);
+ goto exit;
+ }
+
+exit:
return rc;
}
-int msm_smem_cache_operations(void *clt, struct msm_smem *mem,
+int msm_smem_cache_operations(struct smem_client *client,
+ void *handle, unsigned long offset, unsigned long size,
enum smem_cache_ops cache_op)
{
- struct smem_client *client = clt;
int rc = 0;
- if (!client) {
- dprintk(VIDC_ERR, "Invalid params: %pK\n",
- client);
+ if (!client || !handle) {
+ dprintk(VIDC_ERR, "%s: Invalid params: %pK %pK\n",
+ __func__, client, handle);
return -EINVAL;
}
+
switch (client->mem_type) {
case SMEM_ION:
- rc = ion_cache_operations(client, mem, cache_op);
+ rc = msm_ion_cache_operations(client->clnt, handle,
+ offset, size, cache_op);
if (rc)
dprintk(VIDC_ERR,
- "Failed cache operations: %d\n", rc);
+ "%s: Failed cache operations: %d\n", __func__, rc);
break;
default:
- dprintk(VIDC_ERR, "Mem type not supported\n");
+ dprintk(VIDC_ERR, "%s: Mem type (%d) not supported\n",
+ __func__, client->mem_type);
+ rc = -EINVAL;
break;
}
return rc;
@@ -607,32 +765,22 @@
return client;
}
-struct msm_smem *msm_smem_alloc(void *clt, size_t size, u32 align, u32 flags,
- enum hal_buffer buffer_type, int map_kernel)
+int msm_smem_alloc(struct smem_client *client, size_t size,
+ u32 align, u32 flags, enum hal_buffer buffer_type,
+ int map_kernel, struct msm_smem *smem)
{
- struct smem_client *client;
int rc = 0;
- struct msm_smem *mem;
- client = clt;
- if (!client) {
- dprintk(VIDC_ERR, "Invalid client passed\n");
- return NULL;
+ if (!client || !smem || !size) {
+ dprintk(VIDC_ERR, "%s: Invalid params %pK %pK %d\n",
+ __func__, client, smem, (u32)size);
+ return -EINVAL;
}
- if (!size) {
- dprintk(VIDC_ERR, "No need to allocate memory of size: %zx\n",
- size);
- return NULL;
- }
- mem = kzalloc(sizeof(*mem), GFP_KERNEL);
- if (!mem) {
- dprintk(VIDC_ERR, "Failed to allocate shared mem\n");
- return NULL;
- }
+
switch (client->mem_type) {
case SMEM_ION:
rc = alloc_ion_mem(client, size, align, flags, buffer_type,
- mem, map_kernel);
+ smem, map_kernel);
break;
default:
dprintk(VIDC_ERR, "Mem type not supported\n");
@@ -640,30 +788,34 @@
break;
}
if (rc) {
- dprintk(VIDC_ERR, "Failed to allocate shared memory\n");
- kfree(mem);
- mem = NULL;
+ dprintk(VIDC_ERR, "Failed to allocate memory\n");
}
- return mem;
+
+ return rc;
}
-void msm_smem_free(void *clt, struct msm_smem *mem)
+int msm_smem_free(void *clt, struct msm_smem *smem)
{
+ int rc = 0;
struct smem_client *client = clt;
- if (!client || !mem) {
+ if (!client || !smem) {
dprintk(VIDC_ERR, "Invalid client/handle passed\n");
- return;
+ return -EINVAL;
}
switch (client->mem_type) {
case SMEM_ION:
- free_ion_mem(client, mem);
+ rc = free_ion_mem(client, smem);
break;
default:
dprintk(VIDC_ERR, "Mem type not supported\n");
+ rc = -EINVAL;
break;
}
- kfree(mem);
+ if (rc)
+ dprintk(VIDC_ERR, "Failed to free memory\n");
+
+ return rc;
};
void msm_smem_delete_client(void *clt)
@@ -692,7 +844,7 @@
struct context_bank_info *cb = NULL, *match = NULL;
if (!clt) {
- dprintk(VIDC_ERR, "%s - invalid params\n", __func__);
+ dprintk(VIDC_ERR, "%s: invalid params\n", __func__);
return NULL;
}
@@ -713,12 +865,13 @@
if (cb->is_secure == is_secure &&
cb->buffer_type & buffer_type) {
match = cb;
- dprintk(VIDC_DBG,
- "context bank found for CB : %s, device: %pK mapping: %pK\n",
- match->name, match->dev, match->mapping);
break;
}
}
+ if (!match)
+ dprintk(VIDC_ERR,
+ "%s: cb not found for buffer_type %x, is_secure %d\n",
+ __func__, buffer_type, is_secure);
return match;
}
diff --git a/drivers/media/platform/msm/vidc/msm_vdec.c b/drivers/media/platform/msm/vidc/msm_vdec.c
index f31c11b..554e89a 100644
--- a/drivers/media/platform/msm/vidc/msm_vdec.c
+++ b/drivers/media/platform/msm/vidc/msm_vdec.c
@@ -20,7 +20,10 @@
#include "msm_vidc_clocks.h"
#define MSM_VDEC_DVC_NAME "msm_vdec_8974"
+#define MIN_NUM_THUMBNAIL_MODE_OUTPUT_BUFFERS MIN_NUM_OUTPUT_BUFFERS
#define MIN_NUM_THUMBNAIL_MODE_CAPTURE_BUFFERS MIN_NUM_CAPTURE_BUFFERS
+#define MIN_NUM_DEC_OUTPUT_BUFFERS 4
+#define MIN_NUM_DEC_CAPTURE_BUFFERS 4
#define DEFAULT_VIDEO_CONCEAL_COLOR_BLACK 0x8010
#define MB_SIZE_IN_PIXEL (16 * 16)
#define OPERATING_FRAME_RATE_STEP (1 << 16)
@@ -545,7 +548,6 @@
f->fmt.pix_mp.plane_fmt[i].sizeimage;
}
- rc = msm_comm_try_get_bufreqs(inst);
} else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
fmt = msm_comm_get_pixel_fmt_fourcc(vdec_formats,
@@ -599,8 +601,6 @@
inst->bufq[OUTPUT_PORT].plane_sizes[i] =
f->fmt.pix_mp.plane_fmt[i].sizeimage;
}
-
- rc = msm_comm_try_get_bufreqs(inst);
}
err_invalid_fmt:
return rc;
@@ -676,6 +676,19 @@
memcpy(&inst->fmts[fmt->type], fmt,
sizeof(struct msm_vidc_format));
+ inst->buff_req.buffer[1].buffer_type = HAL_BUFFER_INPUT;
+ inst->buff_req.buffer[1].buffer_count_min_host =
+ inst->buff_req.buffer[1].buffer_count_actual =
+ MIN_NUM_DEC_OUTPUT_BUFFERS;
+ inst->buff_req.buffer[2].buffer_type = HAL_BUFFER_OUTPUT;
+ inst->buff_req.buffer[2].buffer_count_min_host =
+ inst->buff_req.buffer[2].buffer_count_actual =
+ MIN_NUM_DEC_CAPTURE_BUFFERS;
+ inst->buff_req.buffer[3].buffer_type = HAL_BUFFER_OUTPUT2;
+ inst->buff_req.buffer[3].buffer_count_min_host =
+ inst->buff_req.buffer[3].buffer_count_actual =
+ MIN_NUM_DEC_CAPTURE_BUFFERS;
+
/* By default, initialize OUTPUT port to H264 decoder */
fmt = msm_comm_get_pixel_fmt_fourcc(vdec_formats,
ARRAY_SIZE(vdec_formats), V4L2_PIX_FMT_H264,
@@ -717,6 +730,7 @@
struct v4l2_ctrl *temp_ctrl = NULL;
struct hal_profile_level profile_level;
struct hal_frame_size frame_sz;
+ struct hal_buffer_requirements *bufreq;
if (!inst || !inst->core || !inst->core->device) {
dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
@@ -778,6 +792,59 @@
hal_property.enable = ctrl->val;
pdata = &hal_property;
msm_dcvs_try_enable(inst);
+
+ bufreq = get_buff_req_buffer(inst,
+ HAL_BUFFER_INPUT);
+ if (!bufreq) {
+ dprintk(VIDC_ERR,
+ "Failed : No buffer requirements : %x\n",
+ HAL_BUFFER_OUTPUT);
+ return -EINVAL;
+ }
+ bufreq->buffer_count_min =
+ MIN_NUM_THUMBNAIL_MODE_OUTPUT_BUFFERS;
+
+ if (msm_comm_get_stream_output_mode(inst) ==
+ HAL_VIDEO_DECODER_SECONDARY) {
+
+ bufreq = get_buff_req_buffer(inst,
+ HAL_BUFFER_OUTPUT);
+ if (!bufreq) {
+ dprintk(VIDC_ERR,
+ "Failed : No buffer requirements: %x\n",
+ HAL_BUFFER_OUTPUT);
+ return -EINVAL;
+ }
+
+ bufreq->buffer_count_min =
+ MIN_NUM_THUMBNAIL_MODE_CAPTURE_BUFFERS;
+
+ bufreq = get_buff_req_buffer(inst,
+ HAL_BUFFER_OUTPUT2);
+ if (!bufreq) {
+ dprintk(VIDC_ERR,
+ "Failed : No buffer requirements: %x\n",
+ HAL_BUFFER_OUTPUT2);
+ return -EINVAL;
+ }
+
+ bufreq->buffer_count_min =
+ MIN_NUM_THUMBNAIL_MODE_CAPTURE_BUFFERS;
+ } else {
+
+ bufreq = get_buff_req_buffer(inst,
+ HAL_BUFFER_OUTPUT);
+ if (!bufreq) {
+ dprintk(VIDC_ERR,
+ "Failed : No buffer requirements: %x\n",
+ HAL_BUFFER_OUTPUT);
+ return -EINVAL;
+ }
+ bufreq->buffer_count_min =
+ MIN_NUM_THUMBNAIL_MODE_CAPTURE_BUFFERS;
+
+ }
+
break;
case V4L2_CID_MPEG_VIDC_VIDEO_SECURE:
property_id = HAL_PARAM_SECURE;
@@ -897,7 +964,6 @@
"Failed setting OUTPUT2 size : %d\n",
rc);
- rc = msm_comm_try_get_bufreqs(inst);
break;
default:
dprintk(VIDC_ERR,
@@ -921,7 +987,6 @@
V4L2_CID_MPEG_VIDEO_H264_LEVEL,
temp_ctrl->val);
pdata = &profile_level;
- rc = msm_comm_try_get_bufreqs(inst);
break;
case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
temp_ctrl = TRY_GET_CTRL(V4L2_CID_MPEG_VIDEO_H264_PROFILE);
@@ -933,7 +998,6 @@
V4L2_CID_MPEG_VIDEO_H264_PROFILE,
temp_ctrl->val);
pdata = &profile_level;
- rc = msm_comm_try_get_bufreqs(inst);
break;
case V4L2_CID_MPEG_VIDC_VIDEO_BUFFER_SIZE_LIMIT:
dprintk(VIDC_DBG,
@@ -1051,11 +1115,6 @@
__func__, rc);
break;
}
- rc = msm_comm_try_get_bufreqs(inst);
- if (rc)
- dprintk(VIDC_ERR,
- "%s Failed to get buffer requirements : %d\n",
- __func__, rc);
}
inst->clk_data.dpb_fourcc = fourcc;
break;
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index d44684e..e2ea2bc 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -39,6 +39,8 @@
#define MIN_TIME_RESOLUTION 1
#define MAX_TIME_RESOLUTION 0xFFFFFF
#define DEFAULT_TIME_RESOLUTION 0x7530
+#define MIN_NUM_ENC_OUTPUT_BUFFERS 4
+#define MIN_NUM_ENC_CAPTURE_BUFFERS 5
/*
* Default 601 to 709 conversion coefficients for resolution: 176x144 negative
@@ -955,7 +957,7 @@
.name = "Set Color space transfer characterstics",
.type = V4L2_CTRL_TYPE_INTEGER,
.minimum = MSM_VIDC_TRANSFER_BT709_5,
- .maximum = MSM_VIDC_TRANSFER_BT_2020_12,
+ .maximum = MSM_VIDC_TRANSFER_HLG,
.default_value = MSM_VIDC_TRANSFER_601_6_625,
.step = 1,
.qmenu = NULL,
@@ -2124,6 +2126,15 @@
inst->bufq[CAPTURE_PORT].num_planes = 1;
inst->clk_data.operating_rate = 0;
+ inst->buff_req.buffer[1].buffer_type = HAL_BUFFER_INPUT;
+ inst->buff_req.buffer[1].buffer_count_min_host =
+ inst->buff_req.buffer[1].buffer_count_actual =
+ MIN_NUM_ENC_OUTPUT_BUFFERS;
+ inst->buff_req.buffer[2].buffer_type = HAL_BUFFER_OUTPUT;
+ inst->buff_req.buffer[2].buffer_count_min_host =
+ inst->buff_req.buffer[2].buffer_count_actual =
+ MIN_NUM_ENC_CAPTURE_BUFFERS;
+
/* By default, initialize OUTPUT port to UBWC YUV format */
fmt = msm_comm_get_pixel_fmt_fourcc(venc_formats,
ARRAY_SIZE(venc_formats), V4L2_PIX_FMT_NV12_UBWC,
diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c
index ede51b6..2ca3e8d 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc.c
@@ -23,6 +23,7 @@
#include <linux/delay.h>
#include "vidc_hfi_api.h"
#include "msm_vidc_clocks.h"
+#include <linux/dma-buf.h>
#define MAX_EVENTS 30
@@ -383,507 +384,6 @@
}
EXPORT_SYMBOL(msm_vidc_reqbufs);
-struct buffer_info *get_registered_buf(struct msm_vidc_inst *inst,
- struct v4l2_buffer *b, int idx, int *plane)
-{
- struct buffer_info *temp;
- struct buffer_info *ret = NULL;
- int i;
- int fd = b->m.planes[idx].reserved[0];
- u32 buff_off = b->m.planes[idx].reserved[1];
- u32 size = b->m.planes[idx].length;
- ion_phys_addr_t device_addr = b->m.planes[idx].m.userptr;
-
- if (fd < 0 || !plane) {
- dprintk(VIDC_ERR, "Invalid input\n");
- goto err_invalid_input;
- }
-
- WARN(!mutex_is_locked(&inst->registeredbufs.lock),
- "Registered buf lock is not acquired for %s", __func__);
-
- *plane = 0;
- list_for_each_entry(temp, &inst->registeredbufs.list, list) {
- for (i = 0; i < min(temp->num_planes, VIDEO_MAX_PLANES); i++) {
- bool ion_hndl_matches = temp->handle[i] ?
- msm_smem_compare_buffers(inst->mem_client, fd,
- temp->handle[i]->smem_priv) : false;
- bool device_addr_matches = device_addr ==
- temp->device_addr[i];
- bool contains_within = CONTAINS(temp->buff_off[i],
- temp->size[i], buff_off) ||
- CONTAINS(buff_off, size, temp->buff_off[i]);
- bool overlaps = OVERLAPS(buff_off, size,
- temp->buff_off[i], temp->size[i]);
-
- if (!temp->inactive &&
- (ion_hndl_matches || device_addr_matches) &&
- (contains_within || overlaps)) {
- dprintk(VIDC_DBG,
- "This memory region is already mapped\n");
- ret = temp;
- *plane = i;
- break;
- }
- }
- if (ret)
- break;
- }
-
-err_invalid_input:
- return ret;
-}
-
-static struct msm_smem *get_same_fd_buffer(struct msm_vidc_inst *inst, int fd)
-{
- struct buffer_info *temp;
- struct msm_smem *same_fd_handle = NULL;
- int i;
-
- if (!fd)
- return NULL;
-
- if (!inst || fd < 0) {
- dprintk(VIDC_ERR, "%s: Invalid input\n", __func__);
- goto err_invalid_input;
- }
-
- mutex_lock(&inst->registeredbufs.lock);
- list_for_each_entry(temp, &inst->registeredbufs.list, list) {
- for (i = 0; i < min(temp->num_planes, VIDEO_MAX_PLANES); i++) {
- bool ion_hndl_matches = temp->handle[i] ?
- msm_smem_compare_buffers(inst->mem_client, fd,
- temp->handle[i]->smem_priv) : false;
- if (ion_hndl_matches && temp->mapped[i]) {
- temp->same_fd_ref[i]++;
- dprintk(VIDC_INFO,
- "Found same fd buffer\n");
- same_fd_handle = temp->handle[i];
- break;
- }
- }
- if (same_fd_handle)
- break;
- }
- mutex_unlock(&inst->registeredbufs.lock);
-
-err_invalid_input:
- return same_fd_handle;
-}
-
-struct buffer_info *device_to_uvaddr(struct msm_vidc_list *buf_list,
- ion_phys_addr_t device_addr)
-{
- struct buffer_info *temp = NULL;
- bool found = false;
- int i;
-
- if (!buf_list || !device_addr) {
- dprintk(VIDC_ERR,
- "Invalid input- device_addr: %pa buf_list: %pK\n",
- &device_addr, buf_list);
- goto err_invalid_input;
- }
-
- mutex_lock(&buf_list->lock);
- list_for_each_entry(temp, &buf_list->list, list) {
- for (i = 0; i < min(temp->num_planes, VIDEO_MAX_PLANES); i++) {
- if (!temp->inactive &&
- temp->device_addr[i] == device_addr) {
- dprintk(VIDC_INFO,
- "Found same fd buffer\n");
- found = true;
- break;
- }
- }
-
- if (found)
- break;
- }
- mutex_unlock(&buf_list->lock);
-
-err_invalid_input:
- return temp;
-}
-
-static inline void populate_buf_info(struct buffer_info *binfo,
- struct v4l2_buffer *b, u32 i)
-{
- if (i >= VIDEO_MAX_PLANES) {
- dprintk(VIDC_ERR, "%s: Invalid input\n", __func__);
- return;
- }
- binfo->type = b->type;
- binfo->fd[i] = b->m.planes[i].reserved[0];
- binfo->buff_off[i] = b->m.planes[i].reserved[1];
- binfo->size[i] = b->m.planes[i].length;
- binfo->uvaddr[i] = b->m.planes[i].m.userptr;
- binfo->num_planes = b->length;
- binfo->memory = b->memory;
- binfo->v4l2_index = b->index;
- binfo->timestamp.tv_sec = b->timestamp.tv_sec;
- binfo->timestamp.tv_usec = b->timestamp.tv_usec;
- dprintk(VIDC_DBG, "%s: fd[%d] = %d b->index = %d",
- __func__, i, binfo->fd[i], b->index);
-}
-
-static inline void repopulate_v4l2_buffer(struct v4l2_buffer *b,
- struct buffer_info *binfo)
-{
- int i = 0;
-
- b->type = binfo->type;
- b->length = binfo->num_planes;
- b->memory = binfo->memory;
- b->index = binfo->v4l2_index;
- b->timestamp.tv_sec = binfo->timestamp.tv_sec;
- b->timestamp.tv_usec = binfo->timestamp.tv_usec;
- binfo->dequeued = false;
- for (i = 0; i < binfo->num_planes; ++i) {
- b->m.planes[i].reserved[0] = binfo->fd[i];
- b->m.planes[i].reserved[1] = binfo->buff_off[i];
- b->m.planes[i].length = binfo->size[i];
- b->m.planes[i].m.userptr = binfo->device_addr[i];
- dprintk(VIDC_DBG, "%s %d %d %d %pa\n", __func__, binfo->fd[i],
- binfo->buff_off[i], binfo->size[i],
- &binfo->device_addr[i]);
- }
-}
-
-static struct msm_smem *map_buffer(struct msm_vidc_inst *inst,
- struct v4l2_plane *p, enum hal_buffer buffer_type)
-{
- struct msm_smem *handle = NULL;
-
- handle = msm_comm_smem_user_to_kernel(inst,
- p->reserved[0],
- p->length,
- buffer_type);
- if (!handle) {
- dprintk(VIDC_ERR,
- "%s: Failed to get device buffer address\n", __func__);
- return NULL;
- }
- return handle;
-}
-
-static inline enum hal_buffer get_hal_buffer_type(
- struct msm_vidc_inst *inst, struct v4l2_buffer *b)
-{
- if (b->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
- return HAL_BUFFER_INPUT;
- else if (b->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
- return HAL_BUFFER_OUTPUT;
- else
- return -EINVAL;
-}
-
-static inline bool is_dynamic_buffer_mode(struct v4l2_buffer *b,
- struct msm_vidc_inst *inst)
-{
- enum vidc_ports port = b->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ?
- OUTPUT_PORT : CAPTURE_PORT;
- return inst->buffer_mode_set[port] == HAL_BUFFER_MODE_DYNAMIC;
-}
-
-
-static inline void save_v4l2_buffer(struct v4l2_buffer *b,
- struct buffer_info *binfo)
-{
- int i = 0;
-
- for (i = 0; i < b->length; ++i) {
- if (EXTRADATA_IDX(b->length) &&
- (i == EXTRADATA_IDX(b->length)) &&
- !b->m.planes[i].length) {
- continue;
- }
- populate_buf_info(binfo, b, i);
- }
-}
-
-int map_and_register_buf(struct msm_vidc_inst *inst, struct v4l2_buffer *b)
-{
- struct buffer_info *binfo = NULL;
- struct buffer_info *temp = NULL, *iterator = NULL;
- int plane = 0;
- int i = 0, rc = 0;
- struct msm_smem *same_fd_handle = NULL;
-
- if (!b || !inst) {
- dprintk(VIDC_ERR, "%s: invalid input\n", __func__);
- return -EINVAL;
- }
-
- binfo = kzalloc(sizeof(*binfo), GFP_KERNEL);
- if (!binfo) {
- dprintk(VIDC_ERR, "Out of memory\n");
- rc = -ENOMEM;
- goto exit;
- }
- if (b->length > VIDEO_MAX_PLANES) {
- dprintk(VIDC_ERR, "Num planes exceeds max: %d, %d\n",
- b->length, VIDEO_MAX_PLANES);
- rc = -EINVAL;
- goto exit;
- }
-
- dprintk(VIDC_DBG,
- "[MAP] Create binfo = %pK fd = %d size = %d type = %d\n",
- binfo, b->m.planes[0].reserved[0],
- b->m.planes[0].length, b->type);
-
- for (i = 0; i < b->length; ++i) {
- rc = 0;
- if (EXTRADATA_IDX(b->length) &&
- (i == EXTRADATA_IDX(b->length)) &&
- !b->m.planes[i].length) {
- continue;
- }
- mutex_lock(&inst->registeredbufs.lock);
- temp = get_registered_buf(inst, b, i, &plane);
- if (temp && !is_dynamic_buffer_mode(b, inst)) {
- dprintk(VIDC_DBG,
- "This memory region has already been prepared\n");
- rc = 0;
- mutex_unlock(&inst->registeredbufs.lock);
- goto exit;
- }
-
- if (temp && is_dynamic_buffer_mode(b, inst) && !i) {
- /*
- * Buffer is already present in registered list
- * increment ref_count, populate new values of v4l2
- * buffer in existing buffer_info struct.
- *
- * We will use the saved buffer info and queue it when
- * we receive RELEASE_BUFFER_REFERENCE EVENT from f/w.
- */
- dprintk(VIDC_DBG, "[MAP] Buffer already prepared\n");
- temp->inactive = false;
- list_for_each_entry(iterator,
- &inst->registeredbufs.list, list) {
- if (iterator == temp) {
- rc = buf_ref_get(inst, temp);
- save_v4l2_buffer(b, temp);
- break;
- }
- }
- }
- mutex_unlock(&inst->registeredbufs.lock);
- /*
- * rc == 1,
- * buffer is mapped, fw has released all reference, so skip
- * mapping and queue it immediately.
- *
- * rc == 2,
- * buffer is mapped and fw is holding a reference, hold it in
- * the driver and queue it later when fw has released
- */
- if (rc == 1) {
- rc = 0;
- goto exit;
- } else if (rc >= 2) {
- rc = -EEXIST;
- goto exit;
- }
-
- same_fd_handle = get_same_fd_buffer(
- inst, b->m.planes[i].reserved[0]);
-
- populate_buf_info(binfo, b, i);
- if (same_fd_handle) {
- binfo->device_addr[i] =
- same_fd_handle->device_addr + binfo->buff_off[i];
- b->m.planes[i].m.userptr = binfo->device_addr[i];
- binfo->mapped[i] = false;
- binfo->handle[i] = same_fd_handle;
- } else {
- binfo->handle[i] = map_buffer(inst, &b->m.planes[i],
- get_hal_buffer_type(inst, b));
- if (!binfo->handle[i]) {
- rc = -EINVAL;
- goto exit;
- }
-
- binfo->mapped[i] = true;
- binfo->device_addr[i] = binfo->handle[i]->device_addr +
- binfo->buff_off[i];
- b->m.planes[i].m.userptr = binfo->device_addr[i];
- }
-
- /* We maintain one ref count for all planes*/
- if (!i && is_dynamic_buffer_mode(b, inst)) {
- rc = buf_ref_get(inst, binfo);
- if (rc < 0)
- goto exit;
- }
- dprintk(VIDC_DBG,
- "%s: [MAP] binfo = %pK, handle[%d] = %pK, device_addr = %pa, fd = %d, offset = %d, mapped = %d\n",
- __func__, binfo, i, binfo->handle[i],
- &binfo->device_addr[i], binfo->fd[i],
- binfo->buff_off[i], binfo->mapped[i]);
- }
-
- mutex_lock(&inst->registeredbufs.lock);
- list_add_tail(&binfo->list, &inst->registeredbufs.list);
- mutex_unlock(&inst->registeredbufs.lock);
- return 0;
-
-exit:
- kfree(binfo);
- return rc;
-}
-int unmap_and_deregister_buf(struct msm_vidc_inst *inst,
- struct buffer_info *binfo)
-{
- int i = 0;
- struct buffer_info *temp = NULL;
- bool found = false, keep_node = false;
-
- if (!inst || !binfo) {
- dprintk(VIDC_ERR, "%s invalid param: %pK %pK\n",
- __func__, inst, binfo);
- return -EINVAL;
- }
-
- WARN(!mutex_is_locked(&inst->registeredbufs.lock),
- "Registered buf lock is not acquired for %s", __func__);
-
- /*
- * Make sure the buffer to be unmapped and deleted
- * from the registered list is present in the list.
- */
- list_for_each_entry(temp, &inst->registeredbufs.list, list) {
- if (temp == binfo) {
- found = true;
- break;
- }
- }
-
- /*
- * Free the buffer info only if
- * - buffer info has not been deleted from registered list
- * - vidc client has called dqbuf on the buffer
- * - no references are held on the buffer
- */
- if (!found || !temp || !temp->pending_deletion || !temp->dequeued)
- goto exit;
-
- for (i = 0; i < temp->num_planes; i++) {
- dprintk(VIDC_DBG,
- "%s: [UNMAP] binfo = %pK, handle[%d] = %pK, device_addr = %pa, fd = %d, offset = %d, mapped = %d\n",
- __func__, temp, i, temp->handle[i],
- &temp->device_addr[i], temp->fd[i],
- temp->buff_off[i], temp->mapped[i]);
- /*
- * Unmap the handle only if the buffer has been mapped and no
- * other buffer has a reference to this buffer.
- * In case of buffers with same fd, we will map the buffer only
- * once and subsequent buffers will refer to the mapped buffer's
- * device address.
- * For buffers which share the same fd, do not unmap and keep
- * the buffer info in registered list.
- */
- if (temp->handle[i] && temp->mapped[i] &&
- !temp->same_fd_ref[i]) {
- msm_comm_smem_free(inst,
- temp->handle[i]);
- }
-
- if (temp->same_fd_ref[i])
- keep_node = true;
- else {
- temp->fd[i] = 0;
- temp->handle[i] = 0;
- temp->device_addr[i] = 0;
- temp->uvaddr[i] = 0;
- }
- }
- if (!keep_node) {
- dprintk(VIDC_DBG, "[UNMAP] AND-FREED binfo: %pK\n", temp);
- list_del(&temp->list);
- kfree(temp);
- } else {
- temp->inactive = true;
- dprintk(VIDC_DBG, "[UNMAP] NOT-FREED binfo: %pK\n", temp);
- }
-exit:
- return 0;
-}
-
-
-int qbuf_dynamic_buf(struct msm_vidc_inst *inst,
- struct buffer_info *binfo)
-{
- struct v4l2_buffer b = {0};
- struct v4l2_plane plane[VIDEO_MAX_PLANES] = { {0} };
- struct buf_queue *q = NULL;
- int rc = 0;
-
- if (!binfo) {
- dprintk(VIDC_ERR, "%s invalid param: %pK\n", __func__, binfo);
- return -EINVAL;
- }
- dprintk(VIDC_DBG, "%s fd[0] = %d\n", __func__, binfo->fd[0]);
-
- b.m.planes = plane;
- repopulate_v4l2_buffer(&b, binfo);
-
- q = msm_comm_get_vb2q(inst, (&b)->type);
- if (!q) {
- dprintk(VIDC_ERR, "Failed to find buffer queue for type = %d\n"
- , (&b)->type);
- return -EINVAL;
- }
-
- mutex_lock(&q->lock);
- rc = vb2_qbuf(&q->vb2_bufq, &b);
- mutex_unlock(&q->lock);
-
- if (rc)
- dprintk(VIDC_ERR, "Failed to qbuf, %d\n", rc);
- return rc;
-}
-
-int output_buffer_cache_invalidate(struct msm_vidc_inst *inst,
- struct buffer_info *binfo)
-{
- int i = 0;
- int rc = 0;
-
- if (!inst) {
- dprintk(VIDC_ERR, "%s: invalid inst: %pK\n", __func__, inst);
- return -EINVAL;
- }
-
- if (!binfo) {
- dprintk(VIDC_ERR, "%s: invalid buffer info: %pK\n",
- __func__, inst);
- return -EINVAL;
- }
-
- for (i = 0; i < binfo->num_planes; i++) {
- if (binfo->handle[i]) {
- struct msm_smem smem = *binfo->handle[i];
-
- smem.offset = (unsigned int)(binfo->buff_off[i]);
- smem.size = binfo->size[i];
- rc = msm_comm_smem_cache_operations(inst,
- &smem, SMEM_CACHE_INVALIDATE);
- if (rc) {
- dprintk(VIDC_ERR,
- "%s: Failed to clean caches: %d\n",
- __func__, rc);
- return -EINVAL;
- }
- } else
- dprintk(VIDC_DBG, "%s: NULL handle for plane %d\n",
- __func__, i);
- }
- return 0;
-}
-
static bool valid_v4l2_buffer(struct v4l2_buffer *b,
struct msm_vidc_inst *inst) {
enum vidc_ports port =
@@ -896,17 +396,16 @@
inst->bufq[port].num_planes == b->length;
}
-int msm_vidc_release_buffer(void *instance, int buffer_type,
- unsigned int buffer_index)
+int msm_vidc_release_buffer(void *instance, int type, unsigned int index)
{
+ int rc = 0;
struct msm_vidc_inst *inst = instance;
- struct buffer_info *bi, *dummy;
- int i, rc = 0;
- int found_buf = 0;
- struct vb2_buf_entry *temp, *next;
+ struct msm_vidc_buffer *mbuf, *dummy;
- if (!inst)
+ if (!inst) {
+ dprintk(VIDC_ERR, "%s: invalid inst\n", __func__);
return -EINVAL;
+ }
if (!inst->in_reconfig &&
inst->state > MSM_VIDC_LOAD_RESOURCES &&
@@ -914,64 +413,26 @@
rc = msm_comm_try_state(inst, MSM_VIDC_RELEASE_RESOURCES_DONE);
if (rc) {
dprintk(VIDC_ERR,
- "Failed to move inst: %pK to release res done\n",
- inst);
+ "%s: Failed to move inst: %pK to release res done\n",
+ __func__, inst);
}
}
mutex_lock(&inst->registeredbufs.lock);
- list_for_each_entry_safe(bi, dummy, &inst->registeredbufs.list, list) {
- if (bi->type == buffer_type && bi->v4l2_index == buffer_index) {
- found_buf = 1;
- list_del(&bi->list);
- for (i = 0; i < bi->num_planes; i++) {
- if (bi->handle[i] && bi->mapped[i]) {
- dprintk(VIDC_DBG,
- "%s: [UNMAP] binfo = %pK, handle[%d] = %pK, device_addr = %pa, fd = %d, offset = %d, mapped = %d\n",
- __func__, bi, i, bi->handle[i],
- &bi->device_addr[i], bi->fd[i],
- bi->buff_off[i], bi->mapped[i]);
- msm_comm_smem_free(inst,
- bi->handle[i]);
- found_buf = 2;
- }
- }
- kfree(bi);
- break;
- }
+ list_for_each_entry_safe(mbuf, dummy, &inst->registeredbufs.list,
+ list) {
+ struct vb2_buffer *vb2 = &mbuf->vvb.vb2_buf;
+
+ if (vb2->type != type || vb2->index != index)
+ continue;
+
+ print_vidc_buffer(VIDC_DBG, "release buf", inst, mbuf);
+ msm_comm_unmap_vidc_buffer(inst, mbuf);
+ list_del(&mbuf->list);
+ kfree(mbuf);
}
mutex_unlock(&inst->registeredbufs.lock);
- switch (found_buf) {
- case 0:
- dprintk(VIDC_DBG,
- "%s: No buffer(type: %d) found for index %d\n",
- __func__, buffer_type, buffer_index);
- break;
- case 1:
- dprintk(VIDC_WARN,
- "%s: Buffer(type: %d) found for index %d.",
- __func__, buffer_type, buffer_index);
- dprintk(VIDC_WARN, "zero planes mapped.\n");
- break;
- case 2:
- dprintk(VIDC_DBG,
- "%s: Released buffer(type: %d) for index %d\n",
- __func__, buffer_type, buffer_index);
- break;
- default:
- break;
- }
-
- mutex_lock(&inst->pendingq.lock);
- list_for_each_entry_safe(temp, next, &inst->pendingq.list, list) {
- if (temp->vb->type == buffer_type) {
- list_del(&temp->list);
- kfree(temp);
- }
- }
- mutex_unlock(&inst->pendingq.lock);
-
return rc;
}
EXPORT_SYMBOL(msm_vidc_release_buffer);
@@ -979,65 +440,20 @@
int msm_vidc_qbuf(void *instance, struct v4l2_buffer *b)
{
struct msm_vidc_inst *inst = instance;
- struct buffer_info *binfo;
- int plane = 0;
- int rc = 0;
- int i;
+ int rc = 0, i = 0;
struct buf_queue *q = NULL;
- if (!inst || !inst->core || !b || !valid_v4l2_buffer(b, inst))
+ if (!inst || !inst->core || !b || !valid_v4l2_buffer(b, inst)) {
+ dprintk(VIDC_ERR, "%s: invalid params, inst %pK\n",
+ __func__, inst);
return -EINVAL;
-
- if (inst->state == MSM_VIDC_CORE_INVALID ||
- inst->core->state == VIDC_CORE_INVALID)
- return -EINVAL;
-
- rc = map_and_register_buf(inst, b);
- if (rc == -EEXIST) {
- if (atomic_read(&inst->in_flush) &&
- is_dynamic_buffer_mode(b, inst)) {
- dprintk(VIDC_ERR,
- "Flush in progress, do not hold any buffers in driver\n");
- msm_comm_flush_dynamic_buffers(inst);
- }
- return 0;
}
- if (rc)
- return rc;
- for (i = 0; i < b->length; ++i) {
- if (EXTRADATA_IDX(b->length) &&
- (i == EXTRADATA_IDX(b->length)) &&
- !b->m.planes[i].length) {
- b->m.planes[i].m.userptr = 0;
- continue;
- }
- mutex_lock(&inst->registeredbufs.lock);
- binfo = get_registered_buf(inst, b, i, &plane);
- mutex_unlock(&inst->registeredbufs.lock);
- if (!binfo) {
- dprintk(VIDC_ERR,
- "This buffer is not registered: %d, %d, %d\n",
- b->m.planes[i].reserved[0],
- b->m.planes[i].reserved[1],
- b->m.planes[i].length);
- goto err_invalid_buff;
- }
- b->m.planes[i].m.userptr = binfo->device_addr[i];
- dprintk(VIDC_DBG, "Queueing device address = %pa\n",
- &binfo->device_addr[i]);
-
- if (binfo->handle[i] &&
- (b->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)) {
- rc = msm_comm_smem_cache_operations(inst,
- binfo->handle[i], SMEM_CACHE_CLEAN);
- if (rc) {
- dprintk(VIDC_ERR,
- "Failed to clean caches: %d\n", rc);
- goto err_invalid_buff;
- }
- }
+ for (i = 0; i < b->length; i++) {
+ b->m.planes[i].m.fd = b->m.planes[i].reserved[0];
+ b->m.planes[i].data_offset = b->m.planes[i].reserved[1];
}
+ msm_comm_qbuf_cache_operations(inst, b);
q = msm_comm_get_vb2q(inst, b->type);
if (!q) {
@@ -1045,27 +461,28 @@
"Failed to find buffer queue for type = %d\n", b->type);
return -EINVAL;
}
+
mutex_lock(&q->lock);
rc = vb2_qbuf(&q->vb2_bufq, b);
mutex_unlock(&q->lock);
if (rc)
dprintk(VIDC_ERR, "Failed to qbuf, %d\n", rc);
- return rc;
-err_invalid_buff:
- return -EINVAL;
+ return rc;
}
EXPORT_SYMBOL(msm_vidc_qbuf);
int msm_vidc_dqbuf(void *instance, struct v4l2_buffer *b)
{
struct msm_vidc_inst *inst = instance;
- struct buffer_info *buffer_info = NULL;
- int i = 0, rc = 0;
+ int rc = 0, i = 0;
struct buf_queue *q = NULL;
- if (!inst || !b || !valid_v4l2_buffer(b, inst))
+ if (!inst || !b || !valid_v4l2_buffer(b, inst)) {
+ dprintk(VIDC_ERR, "%s: invalid params, inst %pK\n",
+ __func__, inst);
return -EINVAL;
+ }
q = msm_comm_get_vb2q(inst, b->type);
if (!q) {
@@ -1073,54 +490,21 @@
"Failed to find buffer queue for type = %d\n", b->type);
return -EINVAL;
}
+
mutex_lock(&q->lock);
rc = vb2_dqbuf(&q->vb2_bufq, b, true);
mutex_unlock(&q->lock);
- if (rc) {
- dprintk(VIDC_DBG, "Failed to dqbuf, %d\n", rc);
+ if (rc == -EAGAIN) {
+ return rc;
+ } else if (rc) {
+ dprintk(VIDC_ERR, "Failed to dqbuf, %d\n", rc);
return rc;
}
+ msm_comm_dqbuf_cache_operations(inst, b);
for (i = 0; i < b->length; i++) {
- if (EXTRADATA_IDX(b->length) &&
- i == EXTRADATA_IDX(b->length)) {
- continue;
- }
- buffer_info = device_to_uvaddr(&inst->registeredbufs,
- b->m.planes[i].m.userptr);
-
- if (!buffer_info) {
- dprintk(VIDC_ERR,
- "%s no buffer info registered for buffer addr: %#lx\n",
- __func__, b->m.planes[i].m.userptr);
- return -EINVAL;
- }
-
- b->m.planes[i].m.userptr = buffer_info->uvaddr[i];
- b->m.planes[i].reserved[0] = buffer_info->fd[i];
- b->m.planes[i].reserved[1] = buffer_info->buff_off[i];
- }
-
- if (!buffer_info) {
- dprintk(VIDC_ERR,
- "%s: error - no buffer info found in registered list\n",
- __func__);
- return -EINVAL;
- }
-
- rc = output_buffer_cache_invalidate(inst, buffer_info);
- if (rc)
- return rc;
-
-
- if (is_dynamic_buffer_mode(b, inst)) {
- buffer_info->dequeued = true;
-
- dprintk(VIDC_DBG, "[DEQUEUED]: fd[0] = %d\n",
- buffer_info->fd[0]);
- mutex_lock(&inst->registeredbufs.lock);
- rc = unmap_and_deregister_buf(inst, buffer_info);
- mutex_unlock(&inst->registeredbufs.lock);
+ b->m.planes[i].reserved[0] = b->m.planes[i].m.fd;
+ b->m.planes[i].reserved[1] = b->m.planes[i].data_offset;
}
return rc;
@@ -1419,7 +803,6 @@
int rc = 0;
struct hfi_device *hdev;
struct hal_buffer_size_minimum b;
- struct vb2_buf_entry *temp, *next;
hdev = inst->core->device;
@@ -1536,15 +919,22 @@
fail_start:
if (rc) {
- mutex_lock(&inst->pendingq.lock);
- list_for_each_entry_safe(temp, next, &inst->pendingq.list,
- list) {
- vb2_buffer_done(temp->vb,
- VB2_BUF_STATE_QUEUED);
+ struct msm_vidc_buffer *temp, *next;
+
+ mutex_lock(&inst->registeredbufs.lock);
+ list_for_each_entry_safe(temp, next,
+ &inst->registeredbufs.list, list) {
+ struct vb2_buffer *vb;
+
+ print_vidc_buffer(VIDC_ERR, "return buf", inst, temp);
+ vb = msm_comm_get_vb_using_vidc_buffer(inst, temp);
+ if (vb)
+ vb2_buffer_done(vb, VB2_BUF_STATE_QUEUED);
+ msm_comm_unmap_vidc_buffer(inst, temp);
list_del(&temp->list);
kfree(temp);
}
- mutex_unlock(&inst->pendingq.lock);
+ mutex_unlock(&inst->registeredbufs.lock);
}
return rc;
}
@@ -1651,12 +1041,29 @@
inst, q->type);
}
-static void msm_vidc_buf_queue(struct vb2_buffer *vb)
+static void msm_vidc_buf_queue(struct vb2_buffer *vb2)
{
- int rc = msm_comm_qbuf(vb2_get_drv_priv(vb->vb2_queue), vb);
+ int rc = 0;
+ struct msm_vidc_inst *inst = NULL;
+ struct msm_vidc_buffer *mbuf = NULL;
+ inst = vb2_get_drv_priv(vb2->vb2_queue);
+ if (!inst) {
+ dprintk(VIDC_ERR, "%s: invalid inst\n", __func__);
+ return;
+ }
+
+ mbuf = msm_comm_get_vidc_buffer(inst, vb2);
+ if (IS_ERR_OR_NULL(mbuf)) {
+ if (PTR_ERR(mbuf) != -EEXIST)
+ print_vb2_buffer(VIDC_ERR, "failed to get vidc-buf",
+ inst, vb2);
+ return;
+ }
+
+ rc = msm_comm_qbuf(inst, mbuf);
if (rc)
- dprintk(VIDC_ERR, "Failed to queue buffer: %d\n", rc);
+ print_vidc_buffer(VIDC_ERR, "failed qbuf", inst, mbuf);
}
static const struct vb2_ops msm_vidc_vb2q_ops = {
@@ -1843,7 +1250,7 @@
struct v4l2_ctrl *ctrl)
{
int rc = 0;
- struct hal_buffer_requirements *bufreq, *newreq;
+ struct hal_buffer_requirements *bufreq;
enum hal_buffer buffer_type;
if (ctrl->id == V4L2_CID_MIN_BUFFERS_FOR_OUTPUT) {
@@ -1892,16 +1299,7 @@
if (inst->in_reconfig) {
- rc = msm_comm_try_get_bufreqs(inst);
- newreq = get_buff_req_buffer(inst,
- buffer_type);
- if (!newreq) {
- dprintk(VIDC_ERR,
- "Failed to find new bufreqs = %d\n",
- buffer_type);
- return 0;
- }
- ctrl->val = newreq->buffer_count_min;
+ ctrl->val = bufreq->buffer_count_min;
}
if (inst->session_type == MSM_VIDC_DECODER &&
!inst->in_reconfig &&
@@ -1969,9 +1367,6 @@
break;
case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
- if (inst->in_reconfig)
- msm_comm_try_get_bufreqs(inst);
-
buffer_type = msm_comm_get_hal_output_buffer(inst);
bufreq = get_buff_req_buffer(inst,
buffer_type);
@@ -1984,7 +1379,6 @@
ctrl->val = bufreq->buffer_count_min_host;
break;
case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT:
- msm_comm_try_get_bufreqs(inst);
bufreq = get_buff_req_buffer(inst, HAL_BUFFER_INPUT);
if (!bufreq) {
dprintk(VIDC_ERR,
@@ -2085,7 +1479,6 @@
mutex_init(&inst->bufq[OUTPUT_PORT].lock);
mutex_init(&inst->lock);
- INIT_MSM_VIDC_LIST(&inst->pendingq);
INIT_MSM_VIDC_LIST(&inst->scratchbufs);
INIT_MSM_VIDC_LIST(&inst->freqs);
INIT_MSM_VIDC_LIST(&inst->persistbufs);
@@ -2192,7 +1585,6 @@
mutex_destroy(&inst->bufq[OUTPUT_PORT].lock);
mutex_destroy(&inst->lock);
- DEINIT_MSM_VIDC_LIST(&inst->pendingq);
DEINIT_MSM_VIDC_LIST(&inst->scratchbufs);
DEINIT_MSM_VIDC_LIST(&inst->persistbufs);
DEINIT_MSM_VIDC_LIST(&inst->pending_getpropq);
@@ -2208,55 +1600,43 @@
static void cleanup_instance(struct msm_vidc_inst *inst)
{
- struct vb2_buf_entry *entry, *dummy;
-
- if (inst) {
-
- mutex_lock(&inst->pendingq.lock);
- list_for_each_entry_safe(entry, dummy, &inst->pendingq.list,
- list) {
- list_del(&entry->list);
- kfree(entry);
- }
- mutex_unlock(&inst->pendingq.lock);
-
- msm_comm_free_freq_table(inst);
-
- if (msm_comm_release_scratch_buffers(inst, false)) {
- dprintk(VIDC_ERR,
- "Failed to release scratch buffers\n");
- }
-
- if (msm_comm_release_recon_buffers(inst)) {
- dprintk(VIDC_ERR,
- "Failed to release recon buffers\n");
- }
-
- if (msm_comm_release_persist_buffers(inst)) {
- dprintk(VIDC_ERR,
- "Failed to release persist buffers\n");
- }
-
- /*
- * At this point all buffes should be with driver
- * irrespective of scenario
- */
- msm_comm_validate_output_buffers(inst);
-
- if (msm_comm_release_output_buffers(inst, true)) {
- dprintk(VIDC_ERR,
- "Failed to release output buffers\n");
- }
-
- if (inst->extradata_handle)
- msm_comm_smem_free(inst, inst->extradata_handle);
-
- debugfs_remove_recursive(inst->debugfs_root);
-
- mutex_lock(&inst->pending_getpropq.lock);
- WARN_ON(!list_empty(&inst->pending_getpropq.list));
- mutex_unlock(&inst->pending_getpropq.lock);
+ if (!inst) {
+ dprintk(VIDC_ERR, "%s: invalid params\n", __func__);
+ return;
}
+
+ msm_comm_free_freq_table(inst);
+
+ if (msm_comm_release_scratch_buffers(inst, false))
+ dprintk(VIDC_ERR,
+ "Failed to release scratch buffers\n");
+
+ if (msm_comm_release_recon_buffers(inst))
+ dprintk(VIDC_ERR,
+ "Failed to release recon buffers\n");
+
+ if (msm_comm_release_persist_buffers(inst))
+ dprintk(VIDC_ERR,
+ "Failed to release persist buffers\n");
+
+ /*
+ * At this point all buffes should be with driver
+ * irrespective of scenario
+ */
+ msm_comm_validate_output_buffers(inst);
+
+ if (msm_comm_release_output_buffers(inst, true))
+ dprintk(VIDC_ERR,
+ "Failed to release output buffers\n");
+
+ if (inst->extradata_handle)
+ msm_comm_smem_free(inst, inst->extradata_handle);
+
+ debugfs_remove_recursive(inst->debugfs_root);
+
+ mutex_lock(&inst->pending_getpropq.lock);
+ WARN_ON(!list_empty(&inst->pending_getpropq.list));
+ mutex_unlock(&inst->pending_getpropq.lock);
}
int msm_vidc_destroy(struct msm_vidc_inst *inst)
@@ -2264,8 +1644,10 @@
struct msm_vidc_core *core;
int i = 0;
- if (!inst || !inst->core)
+ if (!inst || !inst->core) {
+ dprintk(VIDC_ERR, "%s: invalid params\n", __func__);
return -EINVAL;
+ }
core = inst->core;
@@ -2276,7 +1658,6 @@
msm_comm_ctrl_deinit(inst);
- DEINIT_MSM_VIDC_LIST(&inst->pendingq);
DEINIT_MSM_VIDC_LIST(&inst->scratchbufs);
DEINIT_MSM_VIDC_LIST(&inst->persistbufs);
DEINIT_MSM_VIDC_LIST(&inst->pending_getpropq);
@@ -2300,22 +1681,24 @@
return 0;
}
+static void close_helper(struct kref *kref)
+{
+ struct msm_vidc_inst *inst = container_of(kref,
+ struct msm_vidc_inst, kref);
+
+ msm_vidc_destroy(inst);
+}
+
int msm_vidc_close(void *instance)
{
- void close_helper(struct kref *kref)
- {
- struct msm_vidc_inst *inst = container_of(kref,
- struct msm_vidc_inst, kref);
-
- msm_vidc_destroy(inst);
- }
-
struct msm_vidc_inst *inst = instance;
- struct buffer_info *bi, *dummy;
+ struct msm_vidc_buffer *temp, *dummy;
int rc = 0;
- if (!inst || !inst->core)
+ if (!inst || !inst->core) {
+ dprintk(VIDC_ERR, "%s: invalid params\n", __func__);
return -EINVAL;
+ }
/*
* Make sure that HW stop working on these buffers that
@@ -2327,19 +1710,13 @@
MSM_VIDC_RELEASE_RESOURCES_DONE);
mutex_lock(&inst->registeredbufs.lock);
- list_for_each_entry_safe(bi, dummy, &inst->registeredbufs.list, list) {
- int i = 0;
-
- list_del(&bi->list);
-
- for (i = 0; i < min(bi->num_planes, VIDEO_MAX_PLANES);
- i++) {
- if (bi->handle[i] && bi->mapped[i])
- msm_comm_smem_free(inst, bi->handle[i]);
- }
-
- kfree(bi);
- }
+ list_for_each_entry_safe(temp, dummy, &inst->registeredbufs.list,
+ list) {
+ print_vidc_buffer(VIDC_ERR, "undequeud buf", inst, temp);
+ msm_comm_unmap_vidc_buffer(inst, temp);
+ list_del(&temp->list);
+ kfree(temp);
+ }
mutex_unlock(&inst->registeredbufs.lock);
cleanup_instance(inst);
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
index 60262a1..5e366d0 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
@@ -190,7 +190,7 @@
static inline int get_pending_bufs_fw(struct msm_vidc_inst *inst)
{
- int fw_out_qsize = 0, buffers_in_driver = 0;
+ int fw_out_qsize = 0;
/*
* DCVS always operates on Uncompressed buffers.
@@ -203,11 +203,9 @@
fw_out_qsize = inst->count.ftb - inst->count.fbd;
else
fw_out_qsize = inst->count.etb - inst->count.ebd;
-
- buffers_in_driver = inst->buffers_held_in_driver;
}
- return fw_out_qsize + buffers_in_driver;
+ return fw_out_qsize;
}
static int msm_dcvs_scale_clocks(struct msm_vidc_inst *inst)
@@ -266,7 +264,7 @@
}
static void msm_vidc_update_freq_entry(struct msm_vidc_inst *inst,
- unsigned long freq, ion_phys_addr_t device_addr)
+ unsigned long freq, u32 device_addr)
{
struct vidc_freq_data *temp, *next;
bool found = false;
@@ -292,7 +290,7 @@
// TODO this needs to be removed later and use queued_list
void msm_vidc_clear_freq_entry(struct msm_vidc_inst *inst,
- ion_phys_addr_t device_addr)
+ u32 device_addr)
{
struct vidc_freq_data *temp, *next;
@@ -515,10 +513,10 @@
int msm_comm_scale_clocks(struct msm_vidc_inst *inst)
{
- struct vb2_buf_entry *temp, *next;
+ struct msm_vidc_buffer *temp, *next;
unsigned long freq = 0;
u32 filled_len = 0;
- ion_phys_addr_t device_addr = 0;
+ u32 device_addr = 0;
if (!inst || !inst->core) {
dprintk(VIDC_ERR, "%s Invalid args: Inst = %pK\n",
@@ -526,15 +524,17 @@
return -EINVAL;
}
- mutex_lock(&inst->pendingq.lock);
- list_for_each_entry_safe(temp, next, &inst->pendingq.list, list) {
- if (temp->vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ mutex_lock(&inst->registeredbufs.lock);
+ list_for_each_entry_safe(temp, next, &inst->registeredbufs.list, list) {
+ if (temp->vvb.vb2_buf.type ==
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
+ temp->deferred) {
filled_len = max(filled_len,
- temp->vb->planes[0].bytesused);
- device_addr = temp->vb->planes[0].m.userptr;
+ temp->vvb.vb2_buf.planes[0].bytesused);
+ device_addr = temp->smem[0].device_addr;
}
}
- mutex_unlock(&inst->pendingq.lock);
+ mutex_unlock(&inst->registeredbufs.lock);
if (!filled_len || !device_addr) {
dprintk(VIDC_PROF, "No Change in frequency\n");
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.h b/drivers/media/platform/msm/vidc/msm_vidc_clocks.h
index db57647..e1226e4 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.h
@@ -42,7 +42,7 @@
int msm_vidc_decide_work_mode(struct msm_vidc_inst *inst);
int msm_vidc_decide_core_and_power_mode(struct msm_vidc_inst *inst);
void msm_vidc_clear_freq_entry(struct msm_vidc_inst *inst,
- ion_phys_addr_t device_addr);
+ u32 device_addr);
void update_recon_stats(struct msm_vidc_inst *inst,
struct recon_stats_type *recon_stats);
#endif
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index ff16698..ac69ab8 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -36,6 +36,7 @@
#define L_MODE V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY
#define MAX_SUPPORTED_INSTANCES 16
+static int msm_vidc_update_host_buff_counts(struct msm_vidc_inst *inst);
const char *const mpeg_video_vidc_extradata[] = {
"Extradata none",
@@ -1068,9 +1069,9 @@
mutex_lock(&inst->scratchbufs.lock);
list_for_each_safe(ptr, next, &inst->scratchbufs.list) {
buf = list_entry(ptr, struct internal_buf, list);
- if (address == (u32)buf->handle->device_addr) {
- dprintk(VIDC_DBG, "releasing scratch: %pa\n",
- &buf->handle->device_addr);
+ if (address == buf->smem.device_addr) {
+ dprintk(VIDC_DBG, "releasing scratch: %x\n",
+ buf->smem.device_addr);
buf_found = true;
}
}
@@ -1079,9 +1080,9 @@
mutex_lock(&inst->persistbufs.lock);
list_for_each_safe(ptr, next, &inst->persistbufs.list) {
buf = list_entry(ptr, struct internal_buf, list);
- if (address == (u32)buf->handle->device_addr) {
- dprintk(VIDC_DBG, "releasing persist: %pa\n",
- &buf->handle->device_addr);
+ if (address == buf->smem.device_addr) {
+ dprintk(VIDC_DBG, "releasing persist: %x\n",
+ buf->smem.device_addr);
buf_found = true;
}
}
@@ -1447,6 +1448,20 @@
put_inst(inst);
}
+static void msm_vidc_queue_rbr_event(struct msm_vidc_inst *inst,
+ int fd, u32 offset)
+{
+ struct v4l2_event buf_event = {0};
+ u32 *ptr;
+
+ buf_event.type = V4L2_EVENT_RELEASE_BUFFER_REFERENCE;
+ ptr = (u32 *)buf_event.u.data;
+ ptr[0] = fd;
+ ptr[1] = offset;
+
+ v4l2_event_queue_fh(&inst->event_handler, &buf_event);
+}
+
static void handle_event_change(enum hal_command_response cmd, void *data)
{
struct msm_vidc_inst *inst = NULL;
@@ -1456,6 +1471,7 @@
int rc = 0;
struct hfi_device *hdev;
u32 *ptr = NULL;
+ struct hal_buffer_requirements *bufreq;
if (!event_notify) {
dprintk(VIDC_WARN, "Got an empty event from hfi\n");
@@ -1479,65 +1495,17 @@
break;
case HAL_EVENT_RELEASE_BUFFER_REFERENCE:
{
- struct v4l2_event buf_event = {0};
- struct buffer_info *binfo = NULL, *temp = NULL;
- u32 *ptr = NULL;
-
- dprintk(VIDC_DBG, "%s - inst: %pK buffer: %pa extra: %pa\n",
- __func__, inst, &event_notify->packet_buffer,
- &event_notify->extra_data_buffer);
-
- if (inst->state == MSM_VIDC_CORE_INVALID ||
- inst->core->state == VIDC_CORE_INVALID) {
- dprintk(VIDC_DBG,
- "Event release buf ref received in invalid state - discard\n");
- goto err_bad_event;
- }
-
- /*
- * Get the buffer_info entry for the
- * device address.
- */
- binfo = device_to_uvaddr(&inst->registeredbufs,
- event_notify->packet_buffer);
- if (!binfo) {
- dprintk(VIDC_ERR,
- "%s buffer not found in registered list\n",
- __func__);
- goto err_bad_event;
- }
-
- /* Fill event data to be sent to client*/
- buf_event.type = V4L2_EVENT_RELEASE_BUFFER_REFERENCE;
- ptr = (u32 *)buf_event.u.data;
- ptr[0] = binfo->fd[0];
- ptr[1] = binfo->buff_off[0];
+ u32 planes[VIDEO_MAX_PLANES] = {0};
dprintk(VIDC_DBG,
- "RELEASE REFERENCE EVENT FROM F/W - fd = %d offset = %d\n",
- ptr[0], ptr[1]);
+ "%s: inst: %pK data_buffer: %x extradata_buffer: %x\n",
+ __func__, inst, event_notify->packet_buffer,
+ event_notify->extra_data_buffer);
- /* Decrement buffer reference count*/
- mutex_lock(&inst->registeredbufs.lock);
- list_for_each_entry(temp, &inst->registeredbufs.list,
- list) {
- if (temp == binfo) {
- buf_ref_put(inst, binfo);
- break;
- }
- }
+ planes[0] = event_notify->packet_buffer;
+ planes[1] = event_notify->extra_data_buffer;
+ handle_release_buffer_reference(inst, planes);
- /*
- * Release buffer and remove from list
- * if reference goes to zero.
- */
- if (unmap_and_deregister_buf(inst, binfo))
- dprintk(VIDC_ERR,
- "%s: buffer unmap failed\n", __func__);
- mutex_unlock(&inst->registeredbufs.lock);
-
- /*send event to client*/
- v4l2_event_queue_fh(&inst->event_handler, &buf_event);
goto err_bad_event;
}
default:
@@ -1600,6 +1568,46 @@
inst->in_reconfig = true;
inst->reconfig_height = event_notify->height;
inst->reconfig_width = event_notify->width;
+
+ if (msm_comm_get_stream_output_mode(inst) ==
+ HAL_VIDEO_DECODER_SECONDARY) {
+
+ bufreq = get_buff_req_buffer(inst,
+ HAL_BUFFER_OUTPUT);
+ if (!bufreq) {
+ dprintk(VIDC_ERR,
+ "Failed : No buffer requirements : %x\n",
+ HAL_BUFFER_OUTPUT);
+ return;
+ }
+
+ bufreq->buffer_count_min = event_notify->capture_buf_count;
+
+ bufreq = get_buff_req_buffer(inst,
+ HAL_BUFFER_OUTPUT2);
+ if (!bufreq) {
+ dprintk(VIDC_ERR,
+ "Failed : No buffer requirements : %x\n",
+ HAL_BUFFER_OUTPUT2);
+ return;
+ }
+
+ bufreq->buffer_count_min = event_notify->capture_buf_count;
+ } else {
+
+ bufreq = get_buff_req_buffer(inst,
+ HAL_BUFFER_OUTPUT);
+ if (!bufreq) {
+ dprintk(VIDC_ERR,
+ "Failed : No buffer requirements : %x\n",
+ HAL_BUFFER_OUTPUT);
+ return;
+ }
+ bufreq->buffer_count_min = event_notify->capture_buf_count;
+
+ }
+
+ msm_vidc_update_host_buff_counts(inst);
mutex_unlock(&inst->lock);
if (event == V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT) {
@@ -1780,8 +1788,8 @@
list_for_each_entry(binfo, &inst->outputbufs.list, list) {
if (binfo->buffer_ownership != DRIVER) {
dprintk(VIDC_DBG,
- "This buffer is with FW %pa\n",
- &binfo->handle->device_addr);
+ "This buffer is with FW %x\n",
+ binfo->smem.device_addr);
continue;
}
buffers_owned_by_driver++;
@@ -1801,7 +1809,6 @@
{
struct internal_buf *binfo;
struct hfi_device *hdev;
- struct msm_smem *handle;
struct vidc_frame_data frame_data = {0};
struct hal_buffer_requirements *output_buf, *extra_buf;
int rc = 0;
@@ -1831,13 +1838,12 @@
list_for_each_entry(binfo, &inst->outputbufs.list, list) {
if (binfo->buffer_ownership != DRIVER)
continue;
- handle = binfo->handle;
frame_data.alloc_len = output_buf->buffer_size;
frame_data.filled_len = 0;
frame_data.offset = 0;
- frame_data.device_addr = handle->device_addr;
+ frame_data.device_addr = binfo->smem.device_addr;
frame_data.flags = 0;
- frame_data.extradata_addr = handle->device_addr +
+ frame_data.extradata_addr = binfo->smem.device_addr +
output_buf->buffer_size;
frame_data.buffer_type = HAL_BUFFER_OUTPUT;
frame_data.extradata_size = extra_buf ?
@@ -1888,7 +1894,7 @@
}
}
}
- atomic_dec(&inst->in_flush);
+ inst->in_flush = false;
flush_event.type = V4L2_EVENT_MSM_VIDC_FLUSH_DONE;
ptr = (u32 *)flush_event.u.data;
@@ -2111,82 +2117,84 @@
put_inst(inst);
}
-static struct vb2_buffer *get_vb_from_device_addr(struct buf_queue *bufq,
- unsigned long dev_addr)
+struct vb2_buffer *msm_comm_get_vb_using_vidc_buffer(
+ struct msm_vidc_inst *inst, struct msm_vidc_buffer *mbuf)
{
+ u32 port = 0;
struct vb2_buffer *vb = NULL;
struct vb2_queue *q = NULL;
- int found = 0;
+ bool found = false;
- if (!bufq) {
- dprintk(VIDC_ERR, "Invalid parameter\n");
+ if (mbuf->vvb.vb2_buf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ port = CAPTURE_PORT;
+ } else if (mbuf->vvb.vb2_buf.type ==
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ port = OUTPUT_PORT;
+ } else {
+ dprintk(VIDC_ERR, "%s: invalid type %d\n",
+ __func__, mbuf->vvb.vb2_buf.type);
return NULL;
}
- q = &bufq->vb2_bufq;
- mutex_lock(&bufq->lock);
+
+ q = &inst->bufq[port].vb2_bufq;
+ mutex_lock(&inst->bufq[port].lock);
+ found = false;
list_for_each_entry(vb, &q->queued_list, queued_entry) {
- if (vb->planes[0].m.userptr == dev_addr &&
- vb->state == VB2_BUF_STATE_ACTIVE) {
- found = 1;
- dprintk(VIDC_DBG, "Found v4l2_buf index : %d\n",
- vb->index);
+ if (msm_comm_compare_vb2_planes(inst, mbuf, vb)) {
+ found = true;
break;
}
}
- mutex_unlock(&bufq->lock);
+ mutex_unlock(&inst->bufq[port].lock);
if (!found) {
- dprintk(VIDC_DBG,
- "Failed to find buffer in queued list: %#lx, qtype = %d\n",
- dev_addr, q->type);
- vb = NULL;
+ print_vidc_buffer(VIDC_ERR, "vb2 not found for", inst, mbuf);
+ return NULL;
}
+
return vb;
}
-static void handle_dynamic_buffer(struct msm_vidc_inst *inst,
- ion_phys_addr_t device_addr, u32 flags)
+int msm_comm_vb2_buffer_done(struct msm_vidc_inst *inst,
+ struct vb2_buffer *vb)
{
- struct buffer_info *binfo = NULL, *temp = NULL;
+ u32 port;
- /*
- * Update reference count and release OR queue back the buffer,
- * only when firmware is not holding a reference.
- */
- binfo = device_to_uvaddr(&inst->registeredbufs, device_addr);
- if (!binfo) {
- dprintk(VIDC_ERR,
- "%s buffer not found in registered list\n",
- __func__);
- return;
+ if (!inst || !vb) {
+ dprintk(VIDC_ERR, "%s: invalid params %pK %pK\n",
+ __func__, inst, vb);
+ return -EINVAL;
}
- if (flags & HAL_BUFFERFLAG_READONLY) {
- dprintk(VIDC_DBG,
- "FBD fd[0] = %d -> Reference with f/w, addr: %pa\n",
- binfo->fd[0], &device_addr);
+
+ if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ port = CAPTURE_PORT;
+ } else if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ port = OUTPUT_PORT;
} else {
- dprintk(VIDC_DBG,
- "FBD fd[0] = %d -> FBD_ref_released, addr: %pa\n",
- binfo->fd[0], &device_addr);
-
- mutex_lock(&inst->registeredbufs.lock);
- list_for_each_entry(temp, &inst->registeredbufs.list,
- list) {
- if (temp == binfo) {
- buf_ref_put(inst, binfo);
- break;
- }
- }
- mutex_unlock(&inst->registeredbufs.lock);
+ dprintk(VIDC_ERR, "%s: invalid type %d\n",
+ __func__, vb->type);
+ return -EINVAL;
}
+ msm_vidc_debugfs_update(inst, port == CAPTURE_PORT ?
+ MSM_VIDC_DEBUGFS_EVENT_FBD :
+ MSM_VIDC_DEBUGFS_EVENT_EBD);
+
+ mutex_lock(&inst->bufq[port].lock);
+ vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
+ mutex_unlock(&inst->bufq[port].lock);
+
+ return 0;
}
static void handle_ebd(enum hal_command_response cmd, void *data)
{
struct msm_vidc_cb_data_done *response = data;
+ struct msm_vidc_buffer *mbuf;
struct vb2_buffer *vb;
struct msm_vidc_inst *inst;
struct vidc_hal_ebd *empty_buf_done;
- struct vb2_v4l2_buffer *vbuf = NULL;
+ struct vb2_v4l2_buffer *vbuf;
+ u32 planes[VIDEO_MAX_PLANES] = {0};
+ u32 extra_idx = 0, i;
if (!response) {
dprintk(VIDC_ERR, "Invalid response from vidc_hal\n");
@@ -2199,140 +2207,79 @@
dprintk(VIDC_WARN, "Got a response for an inactive session\n");
return;
}
- if (inst->buffer_mode_set[OUTPUT_PORT] == HAL_BUFFER_MODE_DYNAMIC)
- handle_dynamic_buffer(inst,
- response->input_done.packet_buffer, 0);
- vb = get_vb_from_device_addr(&inst->bufq[OUTPUT_PORT],
- response->input_done.packet_buffer);
+ empty_buf_done = (struct vidc_hal_ebd *)&response->input_done;
+ planes[0] = empty_buf_done->packet_buffer;
+ planes[1] = empty_buf_done->extra_data_buffer;
+
+ mbuf = msm_comm_get_buffer_using_device_planes(inst, planes);
+ if (!mbuf) {
+ dprintk(VIDC_ERR,
+ "%s: data_addr %x, extradata_addr %x not found\n",
+ __func__, planes[0], planes[1]);
+ goto exit;
+ }
+ vb = &mbuf->vvb.vb2_buf;
+
+ vb->planes[0].bytesused = response->input_done.filled_len;
+ if (vb->planes[0].bytesused > vb->planes[0].length)
+ dprintk(VIDC_INFO, "bytesused overflow length\n");
+
+ if (empty_buf_done->status == VIDC_ERR_NOT_SUPPORTED) {
+ dprintk(VIDC_INFO, "Failed : Unsupported input stream\n");
+ mbuf->vvb.flags |= V4L2_QCOM_BUF_INPUT_UNSUPPORTED;
+ }
+ if (empty_buf_done->status == VIDC_ERR_BITSTREAM_ERR) {
+ dprintk(VIDC_INFO, "Failed : Corrupted input stream\n");
+ mbuf->vvb.flags |= V4L2_QCOM_BUF_DATA_CORRUPT;
+ }
+ if (empty_buf_done->flags & HAL_BUFFERFLAG_SYNCFRAME)
+ mbuf->vvb.flags |= V4L2_QCOM_BUF_FLAG_IDRFRAME |
+ V4L2_BUF_FLAG_KEYFRAME;
+
+ extra_idx = EXTRADATA_IDX(inst->bufq[OUTPUT_PORT].num_planes);
+ if (extra_idx && extra_idx < VIDEO_MAX_PLANES)
+ vb->planes[extra_idx].bytesused = vb->planes[extra_idx].length;
+
+ update_recon_stats(inst, &empty_buf_done->recon_stats);
+ msm_vidc_clear_freq_entry(inst, mbuf->smem[0].device_addr);
+
+ vb = msm_comm_get_vb_using_vidc_buffer(inst, mbuf);
if (vb) {
vbuf = to_vb2_v4l2_buffer(vb);
- vb->planes[0].bytesused = response->input_done.filled_len;
- vb->planes[0].data_offset = response->input_done.offset;
- if (vb->planes[0].data_offset > vb->planes[0].length)
- dprintk(VIDC_INFO, "data_offset overflow length\n");
- if (vb->planes[0].bytesused > vb->planes[0].length)
- dprintk(VIDC_INFO, "bytesused overflow length\n");
- if (vb->planes[0].m.userptr !=
- response->clnt_data)
- dprintk(VIDC_INFO, "Client data != bufaddr\n");
- empty_buf_done = (struct vidc_hal_ebd *)&response->input_done;
- if (empty_buf_done) {
- if (empty_buf_done->status == VIDC_ERR_NOT_SUPPORTED) {
- dprintk(VIDC_INFO,
- "Failed : Unsupported input stream\n");
- vbuf->flags |=
- V4L2_QCOM_BUF_INPUT_UNSUPPORTED;
- }
- if (empty_buf_done->status == VIDC_ERR_BITSTREAM_ERR) {
- dprintk(VIDC_INFO,
- "Failed : Corrupted input stream\n");
- vbuf->flags |=
- V4L2_QCOM_BUF_DATA_CORRUPT;
- }
- if (empty_buf_done->flags & HAL_BUFFERFLAG_SYNCFRAME)
- vbuf->flags |=
- V4L2_QCOM_BUF_FLAG_IDRFRAME |
- V4L2_BUF_FLAG_KEYFRAME;
- }
-
- update_recon_stats(inst, &empty_buf_done->recon_stats);
-
- dprintk(VIDC_DBG,
- "Got ebd from hal: device_addr: %pa, alloc: %d, status: %#x, pic_type: %#x, flags: %#x\n",
- &empty_buf_done->packet_buffer,
- empty_buf_done->alloc_len, empty_buf_done->status,
- empty_buf_done->picture_type, empty_buf_done->flags);
-
- msm_vidc_clear_freq_entry(inst, empty_buf_done->packet_buffer);
-
- mutex_lock(&inst->bufq[OUTPUT_PORT].lock);
- vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
- mutex_unlock(&inst->bufq[OUTPUT_PORT].lock);
- msm_vidc_debugfs_update(inst, MSM_VIDC_DEBUGFS_EVENT_EBD);
+ vbuf->flags |= mbuf->vvb.flags;
+ for (i = 0; i < mbuf->vvb.vb2_buf.num_planes; i++)
+ vb->planes[i].bytesused =
+ mbuf->vvb.vb2_buf.planes[i].bytesused;
}
+ /*
+ * put_buffer should be done before vb2_buffer_done else
+ * client might queue the same buffer before it is unmapped
+ * in put_buffer. also don't use mbuf after put_buffer
+ * as it may be freed in put_buffer.
+ */
+ msm_comm_put_vidc_buffer(inst, mbuf);
+ msm_comm_vb2_buffer_done(inst, vb);
+exit:
put_inst(inst);
}
-int buf_ref_get(struct msm_vidc_inst *inst, struct buffer_info *binfo)
-{
- int cnt = 0;
-
- if (!inst || !binfo)
- return -EINVAL;
-
- atomic_inc(&binfo->ref_count);
- cnt = atomic_read(&binfo->ref_count);
- if (cnt >= 2)
- inst->buffers_held_in_driver++;
-
- dprintk(VIDC_DBG, "REF_GET[%d] fd[0] = %d\n", cnt, binfo->fd[0]);
-
- return cnt;
-}
-
-int buf_ref_put(struct msm_vidc_inst *inst, struct buffer_info *binfo)
-{
- int rc = 0;
- int cnt;
- bool release_buf = false;
- bool qbuf_again = false;
-
- if (!inst || !binfo)
- return -EINVAL;
-
- atomic_dec(&binfo->ref_count);
- cnt = atomic_read(&binfo->ref_count);
- dprintk(VIDC_DBG, "REF_PUT[%d] fd[0] = %d\n", cnt, binfo->fd[0]);
- if (!cnt)
- release_buf = true;
- else if (cnt >= 1)
- qbuf_again = true;
- else {
- dprintk(VIDC_DBG, "%s: invalid ref_cnt: %d\n", __func__, cnt);
- cnt = -EINVAL;
- }
-
- if (cnt < 0)
- return cnt;
-
- if (release_buf) {
- /*
- * We can not delete binfo here as we need to set the user
- * virtual address saved in binfo->uvaddr to the dequeued v4l2
- * buffer.
- *
- * We will set the pending_deletion flag to true here and delete
- * binfo from registered list in dqbuf after setting the uvaddr.
- */
- dprintk(VIDC_DBG, "fd[0] = %d -> pending_deletion = true\n",
- binfo->fd[0]);
- binfo->pending_deletion = true;
- } else if (qbuf_again) {
- inst->buffers_held_in_driver--;
- rc = qbuf_dynamic_buf(inst, binfo);
- if (!rc)
- return rc;
- }
- return cnt;
-}
-
static int handle_multi_stream_buffers(struct msm_vidc_inst *inst,
- ion_phys_addr_t dev_addr)
+ u32 dev_addr)
{
struct internal_buf *binfo;
- struct msm_smem *handle;
+ struct msm_smem *smem;
bool found = false;
mutex_lock(&inst->outputbufs.lock);
list_for_each_entry(binfo, &inst->outputbufs.list, list) {
- handle = binfo->handle;
- if (handle && dev_addr == handle->device_addr) {
+ smem = &binfo->smem;
+ if (smem && dev_addr == smem->device_addr) {
if (binfo->buffer_ownership == DRIVER) {
dprintk(VIDC_ERR,
- "FW returned same buffer: %pa\n",
- &dev_addr);
+ "FW returned same buffer: %x\n",
+ dev_addr);
break;
}
binfo->buffer_ownership = DRIVER;
@@ -2344,8 +2291,8 @@
if (!found) {
dprintk(VIDC_ERR,
- "Failed to find output buffer in queued list: %pa\n",
- &dev_addr);
+ "Failed to find output buffer in queued list: %x\n",
+ dev_addr);
}
return 0;
@@ -2363,13 +2310,15 @@
static void handle_fbd(enum hal_command_response cmd, void *data)
{
struct msm_vidc_cb_data_done *response = data;
+ struct msm_vidc_buffer *mbuf;
struct msm_vidc_inst *inst;
struct vb2_buffer *vb = NULL;
struct vidc_hal_fbd *fill_buf_done;
+ struct vb2_v4l2_buffer *vbuf;
enum hal_buffer buffer_type;
- int extra_idx = 0;
u64 time_usec = 0;
- struct vb2_v4l2_buffer *vbuf = NULL;
+ u32 planes[VIDEO_MAX_PLANES] = {0};
+ u32 extra_idx, i;
if (!response) {
dprintk(VIDC_ERR, "Invalid response from vidc_hal\n");
@@ -2384,132 +2333,117 @@
}
fill_buf_done = (struct vidc_hal_fbd *)&response->output_done;
+ planes[0] = fill_buf_done->packet_buffer1;
+ planes[1] = fill_buf_done->extra_data_buffer;
+
buffer_type = msm_comm_get_hal_output_buffer(inst);
if (fill_buf_done->buffer_type == buffer_type) {
- vb = get_vb_from_device_addr(&inst->bufq[CAPTURE_PORT],
- fill_buf_done->packet_buffer1);
+ mbuf = msm_comm_get_buffer_using_device_planes(inst, planes);
+ if (!mbuf) {
+ dprintk(VIDC_ERR,
+ "%s: data_addr %x, extradata_addr %x not found\n",
+ __func__, planes[0], planes[1]);
+ goto exit;
+ }
} else {
if (handle_multi_stream_buffers(inst,
fill_buf_done->packet_buffer1))
dprintk(VIDC_ERR,
"Failed : Output buffer not found %pa\n",
&fill_buf_done->packet_buffer1);
- goto err_handle_fbd;
+ goto exit;
+ }
+ vb = &mbuf->vvb.vb2_buf;
+
+ if (fill_buf_done->flags1 & HAL_BUFFERFLAG_DROP_FRAME ||
+ fill_buf_done->flags1 & HAL_BUFFERFLAG_DECODEONLY)
+ fill_buf_done->filled_len1 = 0;
+ vb->planes[0].bytesused = fill_buf_done->filled_len1;
+ if (vb->planes[0].bytesused > vb->planes[0].length)
+ dprintk(VIDC_INFO,
+ "fbd:Overflow bytesused = %d; length = %d\n",
+ vb->planes[0].bytesused,
+ vb->planes[0].length);
+ if (vb->planes[0].data_offset != fill_buf_done->offset1)
+ dprintk(VIDC_ERR, "%s: data_offset %d vs %d\n",
+ __func__, vb->planes[0].data_offset,
+ fill_buf_done->offset1);
+ if (!(fill_buf_done->flags1 & HAL_BUFFERFLAG_TIMESTAMPINVALID)) {
+ time_usec = fill_buf_done->timestamp_hi;
+ time_usec = (time_usec << 32) | fill_buf_done->timestamp_lo;
+ } else {
+ time_usec = 0;
+ dprintk(VIDC_DBG,
+ "Set zero timestamp for buffer %pa, filled: %d, (hi:%u, lo:%u)\n",
+ &fill_buf_done->packet_buffer1,
+ fill_buf_done->filled_len1,
+ fill_buf_done->timestamp_hi,
+ fill_buf_done->timestamp_lo);
+ }
+ vb->timestamp = (time_usec * NSEC_PER_USEC);
+
+ extra_idx = EXTRADATA_IDX(inst->bufq[CAPTURE_PORT].num_planes);
+ if (extra_idx && extra_idx < VIDEO_MAX_PLANES)
+ vb->planes[extra_idx].bytesused = vb->planes[extra_idx].length;
+
+ mbuf->vvb.flags = 0;
+ if (fill_buf_done->flags1 & HAL_BUFFERFLAG_READONLY)
+ mbuf->vvb.flags |= V4L2_QCOM_BUF_FLAG_READONLY;
+ if (fill_buf_done->flags1 & HAL_BUFFERFLAG_EOS)
+ mbuf->vvb.flags |= V4L2_QCOM_BUF_FLAG_EOS;
+ if (fill_buf_done->flags1 & HAL_BUFFERFLAG_CODECCONFIG)
+ mbuf->vvb.flags |= V4L2_QCOM_BUF_FLAG_CODECCONFIG;
+ if (fill_buf_done->flags1 & HAL_BUFFERFLAG_SYNCFRAME)
+ mbuf->vvb.flags |= V4L2_QCOM_BUF_FLAG_IDRFRAME;
+ if (fill_buf_done->flags1 & HAL_BUFFERFLAG_EOSEQ)
+ mbuf->vvb.flags |= V4L2_QCOM_BUF_FLAG_EOSEQ;
+ if (fill_buf_done->flags1 & HAL_BUFFERFLAG_DECODEONLY ||
+ fill_buf_done->flags1 & HAL_BUFFERFLAG_DROP_FRAME)
+ mbuf->vvb.flags |= V4L2_QCOM_BUF_FLAG_DECODEONLY;
+ if (fill_buf_done->flags1 & HAL_BUFFERFLAG_DATACORRUPT)
+ mbuf->vvb.flags |= V4L2_QCOM_BUF_DATA_CORRUPT;
+ switch (fill_buf_done->picture_type) {
+ case HAL_PICTURE_IDR:
+ mbuf->vvb.flags |= V4L2_QCOM_BUF_FLAG_IDRFRAME;
+ mbuf->vvb.flags |= V4L2_BUF_FLAG_KEYFRAME;
+ break;
+ case HAL_PICTURE_I:
+ mbuf->vvb.flags |= V4L2_BUF_FLAG_KEYFRAME;
+ break;
+ case HAL_PICTURE_P:
+ mbuf->vvb.flags |= V4L2_BUF_FLAG_PFRAME;
+ break;
+ case HAL_PICTURE_B:
+ mbuf->vvb.flags |= V4L2_BUF_FLAG_BFRAME;
+ break;
+ case HAL_FRAME_NOTCODED:
+ case HAL_UNUSED_PICT:
+ /* Do we need to care about these? */
+ case HAL_FRAME_YUV:
+ break;
+ default:
+ break;
}
+ vb = msm_comm_get_vb_using_vidc_buffer(inst, mbuf);
if (vb) {
vbuf = to_vb2_v4l2_buffer(vb);
- if (fill_buf_done->flags1 & HAL_BUFFERFLAG_DROP_FRAME ||
- fill_buf_done->flags1 & HAL_BUFFERFLAG_DECODEONLY)
- fill_buf_done->filled_len1 = 0;
- vb->planes[0].bytesused = fill_buf_done->filled_len1;
- vb->planes[0].data_offset = fill_buf_done->offset1;
- if (vb->planes[0].data_offset > vb->planes[0].length)
- dprintk(VIDC_INFO,
- "fbd:Overflow data_offset = %d; length = %d\n",
- vb->planes[0].data_offset,
- vb->planes[0].length);
- if (vb->planes[0].bytesused > vb->planes[0].length)
- dprintk(VIDC_INFO,
- "fbd:Overflow bytesused = %d; length = %d\n",
- vb->planes[0].bytesused,
- vb->planes[0].length);
- if (!(fill_buf_done->flags1 &
- HAL_BUFFERFLAG_TIMESTAMPINVALID)) {
- time_usec = fill_buf_done->timestamp_hi;
- time_usec = (time_usec << 32) |
- fill_buf_done->timestamp_lo;
- } else {
- time_usec = 0;
- dprintk(VIDC_DBG,
- "Set zero timestamp for buffer %pa, filled: %d, (hi:%u, lo:%u)\n",
- &fill_buf_done->packet_buffer1,
- fill_buf_done->filled_len1,
- fill_buf_done->timestamp_hi,
- fill_buf_done->timestamp_lo);
- }
- vbuf->flags = 0;
- vb->timestamp = (time_usec * NSEC_PER_USEC);
-
- extra_idx =
- EXTRADATA_IDX(inst->bufq[CAPTURE_PORT].num_planes);
- if (extra_idx && extra_idx < VIDEO_MAX_PLANES) {
- vb->planes[extra_idx].m.userptr =
- (unsigned long)fill_buf_done->extra_data_buffer;
- vb->planes[extra_idx].bytesused =
- vb->planes[extra_idx].length;
- vb->planes[extra_idx].data_offset = 0;
- }
-
- if (inst->buffer_mode_set[CAPTURE_PORT] ==
- HAL_BUFFER_MODE_DYNAMIC)
- handle_dynamic_buffer(inst, fill_buf_done->packet_buffer1,
- fill_buf_done->flags1);
- if (fill_buf_done->flags1 & HAL_BUFFERFLAG_READONLY)
- vbuf->flags |= V4L2_QCOM_BUF_FLAG_READONLY;
- if (fill_buf_done->flags1 & HAL_BUFFERFLAG_EOS)
- vbuf->flags |= V4L2_QCOM_BUF_FLAG_EOS;
- if (fill_buf_done->flags1 & HAL_BUFFERFLAG_CODECCONFIG)
- vbuf->flags |= V4L2_QCOM_BUF_FLAG_CODECCONFIG;
- if (fill_buf_done->flags1 & HAL_BUFFERFLAG_SYNCFRAME)
- vbuf->flags |= V4L2_QCOM_BUF_FLAG_IDRFRAME;
- if (fill_buf_done->flags1 & HAL_BUFFERFLAG_EOSEQ)
- vbuf->flags |= V4L2_QCOM_BUF_FLAG_EOSEQ;
- if (fill_buf_done->flags1 & HAL_BUFFERFLAG_DECODEONLY ||
- fill_buf_done->flags1 & HAL_BUFFERFLAG_DROP_FRAME)
- vbuf->flags |= V4L2_QCOM_BUF_FLAG_DECODEONLY;
- if (fill_buf_done->flags1 & HAL_BUFFERFLAG_DATACORRUPT)
- vbuf->flags |= V4L2_QCOM_BUF_DATA_CORRUPT;
-
- switch (fill_buf_done->picture_type) {
- case HAL_PICTURE_IDR:
- vbuf->flags |= V4L2_QCOM_BUF_FLAG_IDRFRAME;
- vbuf->flags |= V4L2_BUF_FLAG_KEYFRAME;
- break;
- case HAL_PICTURE_I:
- vbuf->flags |= V4L2_BUF_FLAG_KEYFRAME;
- break;
- case HAL_PICTURE_P:
- vbuf->flags |= V4L2_BUF_FLAG_PFRAME;
- break;
- case HAL_PICTURE_B:
- vbuf->flags |= V4L2_BUF_FLAG_BFRAME;
- break;
- case HAL_FRAME_NOTCODED:
- case HAL_UNUSED_PICT:
- /* Do we need to care about these? */
- case HAL_FRAME_YUV:
- break;
- default:
- break;
- }
-
- inst->count.fbd++;
-
- if (extra_idx && extra_idx < VIDEO_MAX_PLANES) {
- dprintk(VIDC_DBG,
- "extradata: userptr = %pK;"
- " bytesused = %d; length = %d\n",
- (u8 *)vb->planes[extra_idx].m.userptr,
- vb->planes[extra_idx].bytesused,
- vb->planes[extra_idx].length);
- }
- dprintk(VIDC_DBG,
- "Got fbd from hal: device_addr: %pa, alloc: %d, filled: %d, offset: %d, ts: %lld, flags: %#x, crop: %d %d %d %d, pic_type: %#x, mark_data: %#x\n",
- &fill_buf_done->packet_buffer1, fill_buf_done->alloc_len1,
- fill_buf_done->filled_len1, fill_buf_done->offset1, time_usec,
- fill_buf_done->flags1, fill_buf_done->start_x_coord,
- fill_buf_done->start_y_coord, fill_buf_done->frame_width,
- fill_buf_done->frame_height, fill_buf_done->picture_type,
- fill_buf_done->mark_data);
-
- mutex_lock(&inst->bufq[CAPTURE_PORT].lock);
- vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
- mutex_unlock(&inst->bufq[CAPTURE_PORT].lock);
- msm_vidc_debugfs_update(inst, MSM_VIDC_DEBUGFS_EVENT_FBD);
+ vbuf->flags = mbuf->vvb.flags;
+ vb->timestamp = mbuf->vvb.vb2_buf.timestamp;
+ for (i = 0; i < mbuf->vvb.vb2_buf.num_planes; i++)
+ vb->planes[i].bytesused =
+ mbuf->vvb.vb2_buf.planes[i].bytesused;
}
+ /*
+ * put_buffer should be done before vb2_buffer_done else
+ * client might queue the same buffer before it is unmapped
+ * in put_buffer. also don't use mbuf after put_buffer
+ * as it may be freed in put_buffer.
+ */
+ msm_comm_put_vidc_buffer(inst, mbuf);
+ msm_comm_vb2_buffer_done(inst, vb);
-err_handle_fbd:
+exit:
put_inst(inst);
}
@@ -3245,7 +3179,6 @@
enum hal_buffer buffer_type)
{
int rc = 0;
- struct msm_smem *handle;
struct internal_buf *binfo;
u32 smem_flags = 0, buffer_size;
struct hal_buffer_requirements *output_buf, *extradata_buf;
@@ -3293,33 +3226,30 @@
if (output_buf->buffer_size) {
for (i = 0; i < output_buf->buffer_count_actual;
i++) {
- handle = msm_comm_smem_alloc(inst,
- buffer_size, 1, smem_flags,
- buffer_type, 0);
- if (!handle) {
- dprintk(VIDC_ERR,
- "Failed to allocate output memory\n");
- rc = -ENOMEM;
- goto err_no_mem;
- }
- rc = msm_comm_smem_cache_operations(inst,
- handle, SMEM_CACHE_CLEAN);
- if (rc) {
- dprintk(VIDC_WARN,
- "Failed to clean cache may cause undefined behavior\n");
- }
binfo = kzalloc(sizeof(*binfo), GFP_KERNEL);
if (!binfo) {
dprintk(VIDC_ERR, "Out of memory\n");
rc = -ENOMEM;
goto fail_kzalloc;
}
-
- binfo->handle = handle;
+ rc = msm_comm_smem_alloc(inst,
+ buffer_size, 1, smem_flags,
+ buffer_type, 0, &binfo->smem);
+ if (rc) {
+ dprintk(VIDC_ERR,
+ "Failed to allocate output memory\n");
+ goto err_no_mem;
+ }
+ rc = msm_comm_smem_cache_operations(inst,
+ &binfo->smem, SMEM_CACHE_CLEAN);
+ if (rc) {
+ dprintk(VIDC_WARN,
+ "Failed to clean cache may cause undefined behavior\n");
+ }
binfo->buffer_type = buffer_type;
binfo->buffer_ownership = DRIVER;
- dprintk(VIDC_DBG, "Output buffer address: %pa\n",
- &handle->device_addr);
+ dprintk(VIDC_DBG, "Output buffer address: %#x\n",
+ binfo->smem.device_addr);
if (inst->buffer_mode_set[CAPTURE_PORT] ==
HAL_BUFFER_MODE_STATIC) {
@@ -3330,9 +3260,9 @@
buffer_info.buffer_type = buffer_type;
buffer_info.num_buffers = 1;
buffer_info.align_device_addr =
- handle->device_addr;
+ binfo->smem.device_addr;
buffer_info.extradata_addr =
- handle->device_addr +
+ binfo->smem.device_addr +
output_buf->buffer_size;
if (extradata_buf)
buffer_info.extradata_size =
@@ -3355,7 +3285,7 @@
fail_set_buffers:
kfree(binfo);
fail_kzalloc:
- msm_comm_smem_free(inst, handle);
+ msm_comm_smem_free(inst, &binfo->smem);
err_no_mem:
return rc;
}
@@ -3405,10 +3335,10 @@
buffer_info.buffer_type = buffer_type;
buffer_info.num_buffers = 1;
buffer_info.align_device_addr = handle->device_addr;
- dprintk(VIDC_DBG, "%s %s buffer : %pa\n",
+ dprintk(VIDC_DBG, "%s %s buffer : %x\n",
reuse ? "Reusing" : "Allocated",
get_buffer_name(buffer_type),
- &buffer_info.align_device_addr);
+ buffer_info.align_device_addr);
rc = call_hfi_op(hdev, session_set_buffers,
(void *) inst->session, &buffer_info);
@@ -3434,11 +3364,6 @@
mutex_lock(&buf_list->lock);
list_for_each_entry(buf, &buf_list->list, list) {
- if (!buf->handle) {
- reused = false;
- break;
- }
-
if (buf->buffer_type != buffer_type)
continue;
@@ -3454,7 +3379,7 @@
&& buffer_type != HAL_BUFFER_INTERNAL_PERSIST_1) {
rc = set_internal_buf_on_fw(inst, buffer_type,
- buf->handle, true);
+ &buf->smem, true);
if (rc) {
dprintk(VIDC_ERR,
"%s: session_set_buffers failed\n",
@@ -3475,7 +3400,6 @@
struct hal_buffer_requirements *internal_bufreq,
struct msm_vidc_list *buf_list)
{
- struct msm_smem *handle;
struct internal_buf *binfo;
u32 smem_flags = 0;
int rc = 0;
@@ -3491,27 +3415,25 @@
smem_flags |= SMEM_SECURE;
for (i = 0; i < internal_bufreq->buffer_count_actual; i++) {
- handle = msm_comm_smem_alloc(inst, internal_bufreq->buffer_size,
- 1, smem_flags, internal_bufreq->buffer_type, 0);
- if (!handle) {
- dprintk(VIDC_ERR,
- "Failed to allocate scratch memory\n");
- rc = -ENOMEM;
- goto err_no_mem;
- }
-
binfo = kzalloc(sizeof(*binfo), GFP_KERNEL);
if (!binfo) {
dprintk(VIDC_ERR, "Out of memory\n");
rc = -ENOMEM;
goto fail_kzalloc;
}
+ rc = msm_comm_smem_alloc(inst, internal_bufreq->buffer_size,
+ 1, smem_flags, internal_bufreq->buffer_type,
+ 0, &binfo->smem);
+ if (rc) {
+ dprintk(VIDC_ERR,
+ "Failed to allocate scratch memory\n");
+ goto err_no_mem;
+ }
- binfo->handle = handle;
binfo->buffer_type = internal_bufreq->buffer_type;
rc = set_internal_buf_on_fw(inst, internal_bufreq->buffer_type,
- handle, false);
+ &binfo->smem, false);
if (rc)
goto fail_set_buffers;
@@ -3522,10 +3444,10 @@
return rc;
fail_set_buffers:
+ msm_comm_smem_free(inst, &binfo->smem);
+err_no_mem:
kfree(binfo);
fail_kzalloc:
- msm_comm_smem_free(inst, handle);
-err_no_mem:
return rc;
}
@@ -3764,25 +3686,32 @@
}
static void populate_frame_data(struct vidc_frame_data *data,
- const struct vb2_buffer *vb, struct msm_vidc_inst *inst)
+ struct msm_vidc_buffer *mbuf, struct msm_vidc_inst *inst)
{
u64 time_usec;
int extra_idx;
- enum v4l2_buf_type type = vb->type;
- enum vidc_ports port = type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ?
- OUTPUT_PORT : CAPTURE_PORT;
- struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vb2_buffer *vb;
+ struct vb2_v4l2_buffer *vbuf;
+
+ if (!inst || !mbuf || !data) {
+ dprintk(VIDC_ERR, "%s: invalid params %pK %pK %pK\n",
+ __func__, inst, mbuf, data);
+ return;
+ }
+
+ vb = &mbuf->vvb.vb2_buf;
+ vbuf = to_vb2_v4l2_buffer(vb);
time_usec = vb->timestamp;
do_div(time_usec, NSEC_PER_USEC);
data->alloc_len = vb->planes[0].length;
- data->device_addr = vb->planes[0].m.userptr;
+ data->device_addr = mbuf->smem[0].device_addr;
data->timestamp = time_usec;
data->flags = 0;
data->clnt_data = data->device_addr;
- if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
bool pic_decoding_mode = msm_comm_g_ctrl_for_id(inst,
V4L2_CID_MPEG_VIDC_VIDEO_PICTYPE_DEC_MODE);
@@ -3810,59 +3739,64 @@
data->mark_data = data->mark_target =
pic_decoding_mode ? 0xdeadbeef : 0;
- } else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ } else if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
data->buffer_type = msm_comm_get_hal_output_buffer(inst);
}
- extra_idx = EXTRADATA_IDX(inst->bufq[port].num_planes);
- if (extra_idx && extra_idx < VIDEO_MAX_PLANES &&
- vb->planes[extra_idx].m.userptr) {
- data->extradata_addr = vb->planes[extra_idx].m.userptr;
+ extra_idx = EXTRADATA_IDX(vb->num_planes);
+ if (extra_idx && extra_idx < VIDEO_MAX_PLANES) {
+ data->extradata_addr = mbuf->smem[extra_idx].device_addr;
data->extradata_size = vb->planes[extra_idx].length;
data->flags |= HAL_BUFFERFLAG_EXTRADATA;
}
}
-static unsigned int count_single_batch(struct msm_vidc_list *list,
+static unsigned int count_single_batch(struct msm_vidc_inst *inst,
enum v4l2_buf_type type)
{
- struct vb2_buf_entry *buf;
int count = 0;
- struct vb2_v4l2_buffer *vbuf = NULL;
+ struct msm_vidc_buffer *mbuf = NULL;
- mutex_lock(&list->lock);
- list_for_each_entry(buf, &list->list, list) {
- if (buf->vb->type != type)
+ mutex_lock(&inst->registeredbufs.lock);
+ list_for_each_entry(mbuf, &inst->registeredbufs.list, list) {
+ if (mbuf->vvb.vb2_buf.type != type)
+ continue;
+
+ /* count only deferred buffers */
+ if (!mbuf->deferred)
continue;
++count;
- vbuf = to_vb2_v4l2_buffer(buf->vb);
- if (!(vbuf->flags & V4L2_MSM_BUF_FLAG_DEFER))
+ if (!(mbuf->vvb.flags & V4L2_MSM_BUF_FLAG_DEFER))
goto found_batch;
}
- /* don't have a full batch */
+ /* don't have a full batch */
count = 0;
found_batch:
- mutex_unlock(&list->lock);
+ mutex_unlock(&inst->registeredbufs.lock);
return count;
}
-static unsigned int count_buffers(struct msm_vidc_list *list,
+static unsigned int count_buffers(struct msm_vidc_inst *inst,
enum v4l2_buf_type type)
{
- struct vb2_buf_entry *buf;
+ struct msm_vidc_buffer *mbuf;
int count = 0;
- mutex_lock(&list->lock);
- list_for_each_entry(buf, &list->list, list) {
- if (buf->vb->type != type)
+ mutex_lock(&inst->registeredbufs.lock);
+ list_for_each_entry(mbuf, &inst->registeredbufs.list, list) {
+ if (mbuf->vvb.vb2_buf.type != type)
+ continue;
+
+ /* count only deferred buffers */
+ if (!mbuf->deferred)
continue;
++count;
}
- mutex_unlock(&list->lock);
+ mutex_unlock(&inst->registeredbufs.lock);
return count;
}
@@ -3873,27 +3807,45 @@
if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
dprintk(VIDC_DBG,
- "Sending etb (%pa) to hal: filled: %d, ts: %lld, flags = %#x\n",
- &data->device_addr, data->filled_len,
+ "Sending etb (%x) to hal: filled: %d, ts: %lld, flags = %#x\n",
+ data->device_addr, data->filled_len,
data->timestamp, data->flags);
msm_vidc_debugfs_update(inst, MSM_VIDC_DEBUGFS_EVENT_ETB);
} else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
dprintk(VIDC_DBG,
- "Sending ftb (%pa) to hal: size: %d, ts: %lld, flags = %#x\n",
- &data->device_addr, data->alloc_len,
+ "Sending ftb (%x) to hal: size: %d, ts: %lld, flags = %#x\n",
+ data->device_addr, data->alloc_len,
data->timestamp, data->flags);
msm_vidc_debugfs_update(inst, MSM_VIDC_DEBUGFS_EVENT_FTB);
}
}
+enum hal_buffer get_hal_buffer_type(unsigned int type,
+ unsigned int plane_num)
+{
+ if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ if (plane_num == 0)
+ return HAL_BUFFER_INPUT;
+ else
+ return HAL_BUFFER_EXTRADATA_INPUT;
+ } else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ if (plane_num == 0)
+ return HAL_BUFFER_OUTPUT;
+ else
+ return HAL_BUFFER_EXTRADATA_OUTPUT;
+ } else {
+ return -EINVAL;
+ }
+}
+
/*
* Attempts to queue `vb` to hardware. If, for various reasons, the buffer
* cannot be queued to hardware, the buffer will be staged for commit in the
* pending queue. Once the hardware reaches a good state (or if `vb` is NULL,
* the subsequent *_qbuf will commit the previously staged buffers to hardware.
*/
-int msm_comm_qbuf(struct msm_vidc_inst *inst, struct vb2_buffer *vb)
+int msm_comm_qbuf(struct msm_vidc_inst *inst, struct msm_vidc_buffer *mbuf)
{
int rc = 0, capture_count, output_count;
struct msm_vidc_core *core;
@@ -3903,8 +3855,7 @@
int count;
} etbs, ftbs;
bool defer = false, batch_mode;
- struct vb2_buf_entry *temp, *next;
- struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct msm_vidc_buffer *temp = NULL, *next = NULL;
if (!inst) {
dprintk(VIDC_ERR, "%s: Invalid arguments\n", __func__);
@@ -3914,36 +3865,21 @@
core = inst->core;
hdev = core->device;
- if (inst->state == MSM_VIDC_CORE_INVALID ||
- core->state == VIDC_CORE_INVALID ||
- core->state == VIDC_CORE_UNINIT) {
- dprintk(VIDC_ERR, "Core is in bad state. Can't Queue\n");
+ if (inst->state == MSM_VIDC_CORE_INVALID) {
+ dprintk(VIDC_ERR, "%s: inst is in bad state\n", __func__);
return -EINVAL;
}
- /*
- * Stick the buffer into the pendinq, we'll pop it out later on
- * if we want to commit it to hardware
- */
- if (vb) {
- temp = kzalloc(sizeof(*temp), GFP_KERNEL);
- if (!temp) {
- dprintk(VIDC_ERR, "Out of memory\n");
- goto err_no_mem;
- }
-
- temp->vb = vb;
- mutex_lock(&inst->pendingq.lock);
- list_add_tail(&temp->list, &inst->pendingq.list);
- mutex_unlock(&inst->pendingq.lock);
- }
+ /* initially assume every buffer is going to be deferred */
+ if (mbuf)
+ mbuf->deferred = true;
batch_mode = msm_comm_g_ctrl_for_id(inst, V4L2_CID_VIDC_QBUF_MODE)
== V4L2_VIDC_QBUF_BATCHED;
capture_count = (batch_mode ? &count_single_batch : &count_buffers)
- (&inst->pendingq, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ (inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
output_count = (batch_mode ? &count_single_batch : &count_buffers)
- (&inst->pendingq, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ (inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
/*
* Somewhat complicated logic to prevent queuing the buffer to hardware.
@@ -3957,13 +3893,18 @@
* buffer to be batched with future frames. The batch size (on both
* capabilities) is completely determined by the client.
*/
- defer = defer ? defer : (vbuf && vbuf->flags & V4L2_MSM_BUF_FLAG_DEFER);
+ defer = defer ? defer :
+ (mbuf && mbuf->vvb.flags & V4L2_MSM_BUF_FLAG_DEFER);
/* 3) If we're in batch mode, we must have full batches of both types */
defer = defer ? defer:(batch_mode && (!output_count || !capture_count));
if (defer) {
- dprintk(VIDC_DBG, "Deferring queue of %pK\n", vb);
+ if (mbuf) {
+ mbuf->deferred = true;
+ print_vidc_buffer(VIDC_DBG, "deferred qbuf",
+ inst, mbuf);
+ }
return 0;
}
@@ -3993,15 +3934,18 @@
etbs.count = ftbs.count = 0;
/*
- * Try to collect all pending buffers into 2 batches of ftb and etb
+ * Try to collect all deferred buffers into 2 batches of ftb and etb
* Note that these "batches" might be empty if we're no in batching mode
- * and the pendingq is empty
+ * and the deferred is not set for buffers.
*/
- mutex_lock(&inst->pendingq.lock);
- list_for_each_entry_safe(temp, next, &inst->pendingq.list, list) {
+ mutex_lock(&inst->registeredbufs.lock);
+ list_for_each_entry_safe(temp, next, &inst->registeredbufs.list, list) {
struct vidc_frame_data *frame_data = NULL;
- switch (temp->vb->type) {
+ if (!temp->deferred)
+ continue;
+
+ switch (temp->vvb.vb2_buf.type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
if (ftbs.count < capture_count && ftbs.data)
frame_data = &ftbs.data[ftbs.count++];
@@ -4017,12 +3961,14 @@
if (!frame_data)
continue;
- populate_frame_data(frame_data, temp->vb, inst);
+ populate_frame_data(frame_data, temp, inst);
- list_del(&temp->list);
- kfree(temp);
+ /* this buffer going to be queued (not deferred) */
+ temp->deferred = false;
+
+ print_vidc_buffer(VIDC_DBG, "qbuf", inst, temp);
}
- mutex_unlock(&inst->pendingq.lock);
+ mutex_unlock(&inst->registeredbufs.lock);
/* Finally commit all our frame(s) to H/W */
if (batch_mode) {
@@ -4130,7 +4076,7 @@
/* For DPB buffers, no need to add Extra buffers */
- bufreq->buffer_count_actual = bufreq->buffer_count_min_host =
+ bufreq->buffer_count_min_host = bufreq->buffer_count_actual =
bufreq->buffer_count_min;
bufreq = get_buff_req_buffer(inst,
@@ -4145,7 +4091,7 @@
extra_buffers = msm_vidc_get_extra_buff_count(inst,
HAL_BUFFER_OUTPUT);
- bufreq->buffer_count_min_host =
+ bufreq->buffer_count_min_host = bufreq->buffer_count_actual =
bufreq->buffer_count_min + extra_buffers;
} else {
@@ -4161,7 +4107,7 @@
extra_buffers = msm_vidc_get_extra_buff_count(inst,
HAL_BUFFER_OUTPUT);
- bufreq->buffer_count_actual = bufreq->buffer_count_min_host =
+ bufreq->buffer_count_min_host = bufreq->buffer_count_actual =
bufreq->buffer_count_min + extra_buffers;
}
@@ -4342,11 +4288,7 @@
}
mutex_lock(&inst->outputbufs.lock);
list_for_each_entry_safe(buf, dummy, &inst->outputbufs.list, list) {
- handle = buf->handle;
- if (!handle) {
- dprintk(VIDC_ERR, "%s - invalid handle\n", __func__);
- goto exit;
- }
+ handle = &buf->smem;
if ((buf->buffer_ownership == FIRMWARE) && !force_release) {
dprintk(VIDC_INFO, "DPB is with f/w. Can't free it\n");
@@ -4366,18 +4308,17 @@
(void *)inst->session, &buffer_info);
if (rc) {
dprintk(VIDC_WARN,
- "Rel output buf fail:%pa, %d\n",
- &buffer_info.align_device_addr,
+ "Rel output buf fail:%x, %d\n",
+ buffer_info.align_device_addr,
buffer_info.buffer_size);
}
}
list_del(&buf->list);
- msm_comm_smem_free(inst, buf->handle);
+ msm_comm_smem_free(inst, &buf->smem);
kfree(buf);
}
-exit:
mutex_unlock(&inst->outputbufs.lock);
return rc;
}
@@ -4402,13 +4343,8 @@
mutex_lock(&inst->scratchbufs.lock);
list_for_each_entry(buf, &inst->scratchbufs.list, list) {
- if (!buf->handle) {
- dprintk(VIDC_ERR, "%s: invalid buf handle\n", __func__);
- mutex_unlock(&inst->scratchbufs.lock);
- goto not_sufficient;
- }
if (buf->buffer_type == buffer_type &&
- buf->handle->size >= bufreq->buffer_size)
+ buf->smem.size >= bufreq->buffer_size)
count++;
}
mutex_unlock(&inst->scratchbufs.lock);
@@ -4467,13 +4403,7 @@
mutex_lock(&inst->scratchbufs.lock);
list_for_each_entry_safe(buf, dummy, &inst->scratchbufs.list, list) {
- if (!buf->handle) {
- dprintk(VIDC_ERR, "%s - buf->handle NULL\n", __func__);
- rc = -EINVAL;
- goto exit;
- }
-
- handle = buf->handle;
+ handle = &buf->smem;
buffer_info.buffer_size = handle->size;
buffer_info.buffer_type = buf->buffer_type;
buffer_info.num_buffers = 1;
@@ -4485,8 +4415,8 @@
(void *)inst->session, &buffer_info);
if (rc) {
dprintk(VIDC_WARN,
- "Rel scrtch buf fail:%pa, %d\n",
- &buffer_info.align_device_addr,
+ "Rel scrtch buf fail:%x, %d\n",
+ buffer_info.align_device_addr,
buffer_info.buffer_size);
}
mutex_unlock(&inst->scratchbufs.lock);
@@ -4505,11 +4435,10 @@
continue;
list_del(&buf->list);
- msm_comm_smem_free(inst, buf->handle);
+ msm_comm_smem_free(inst, handle);
kfree(buf);
}
-exit:
mutex_unlock(&inst->scratchbufs.lock);
return rc;
}
@@ -4565,7 +4494,7 @@
mutex_lock(&inst->persistbufs.lock);
list_for_each_safe(ptr, next, &inst->persistbufs.list) {
buf = list_entry(ptr, struct internal_buf, list);
- handle = buf->handle;
+ handle = &buf->smem;
buffer_info.buffer_size = handle->size;
buffer_info.buffer_type = buf->buffer_type;
buffer_info.num_buffers = 1;
@@ -4577,8 +4506,8 @@
(void *)inst->session, &buffer_info);
if (rc) {
dprintk(VIDC_WARN,
- "Rel prst buf fail:%pa, %d\n",
- &buffer_info.align_device_addr,
+ "Rel prst buf fail:%x, %d\n",
+ buffer_info.align_device_addr,
buffer_info.buffer_size);
}
mutex_unlock(&inst->persistbufs.lock);
@@ -4591,7 +4520,7 @@
mutex_lock(&inst->persistbufs.lock);
}
list_del(&buf->list);
- msm_comm_smem_free(inst, buf->handle);
+ msm_comm_smem_free(inst, handle);
kfree(buf);
}
mutex_unlock(&inst->persistbufs.lock);
@@ -4769,116 +4698,20 @@
for (c = 0; c < ARRAY_SIZE(ports); ++c) {
enum vidc_ports port = ports[c];
- dprintk(VIDC_DBG, "Flushing buffers of type %d in bad state\n",
- port);
mutex_lock(&inst->bufq[port].lock);
- list_for_each_safe(ptr, next, &inst->bufq[port].
- vb2_bufq.queued_list) {
+ list_for_each_safe(ptr, next,
+ &inst->bufq[port].vb2_bufq.queued_list) {
struct vb2_buffer *vb = container_of(ptr,
struct vb2_buffer, queued_entry);
-
vb->planes[0].bytesused = 0;
- vb->planes[0].data_offset = 0;
-
+ print_vb2_buffer(VIDC_ERR, "flush in invalid",
+ inst, vb);
vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
}
mutex_unlock(&inst->bufq[port].lock);
}
-
msm_vidc_queue_v4l2_event(inst, V4L2_EVENT_MSM_VIDC_FLUSH_DONE);
-}
-
-void msm_comm_flush_dynamic_buffers(struct msm_vidc_inst *inst)
-{
- struct buffer_info *binfo = NULL;
-
- if (inst->buffer_mode_set[CAPTURE_PORT] != HAL_BUFFER_MODE_DYNAMIC)
- return;
-
- /*
- * dynamic buffer mode:- if flush is called during seek
- * driver should not queue any new buffer it has been holding.
- *
- * Each dynamic o/p buffer can have one of following ref_count:
- * ref_count : 0 - f/w has released reference and sent dynamic
- * buffer back. The buffer has been returned
- * back to client.
- *
- * ref_count : 1 - f/w is holding reference. f/w may have released
- * dynamic buffer as read_only OR dynamic buffer is
- * pending. f/w will release reference before sending
- * flush_done.
- *
- * ref_count : >=2 - f/w is holding reference, f/w has released dynamic
- * buffer as read_only, which client has queued back
- * to driver. Driver holds this buffer and will queue
- * back only when f/w releases the reference. During
- * flush_done, f/w will release the reference but
- * driver should not queue back the buffer to f/w.
- * Flush all buffers with ref_count >= 2.
- */
- mutex_lock(&inst->registeredbufs.lock);
- if (!list_empty(&inst->registeredbufs.list)) {
- struct v4l2_event buf_event = {0};
- u32 *ptr = NULL;
-
- list_for_each_entry(binfo, &inst->registeredbufs.list, list) {
- if (binfo->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
- atomic_read(&binfo->ref_count) >= 2) {
-
- atomic_dec(&binfo->ref_count);
- buf_event.type =
- V4L2_EVENT_MSM_VIDC_RELEASE_UNQUEUED_BUFFER;
- ptr = (u32 *)buf_event.u.data;
- ptr[0] = binfo->fd[0];
- ptr[1] = binfo->buff_off[0];
- ptr[2] = binfo->uvaddr[0];
- ptr[3] = (u32) binfo->timestamp.tv_sec;
- ptr[4] = (u32) binfo->timestamp.tv_usec;
- ptr[5] = binfo->v4l2_index;
- dprintk(VIDC_DBG,
- "released buffer held in driver before issuing flush: %pa fd[0]: %d\n",
- &binfo->device_addr[0], binfo->fd[0]);
- /*send event to client*/
- v4l2_event_queue_fh(&inst->event_handler,
- &buf_event);
- }
- }
- }
- mutex_unlock(&inst->registeredbufs.lock);
-}
-
-void msm_comm_flush_pending_dynamic_buffers(struct msm_vidc_inst *inst)
-{
- struct buffer_info *binfo = NULL;
-
- if (!inst)
- return;
-
- if (inst->buffer_mode_set[CAPTURE_PORT] != HAL_BUFFER_MODE_DYNAMIC)
- return;
-
- if (list_empty(&inst->pendingq.list) ||
- list_empty(&inst->registeredbufs.list))
- return;
-
- /*
- * Dynamic Buffer mode - Since pendingq is not empty
- * no output buffers have been sent to firmware yet.
- * Hence remove reference to all pendingq o/p buffers
- * before flushing them.
- */
-
- mutex_lock(&inst->registeredbufs.lock);
- list_for_each_entry(binfo, &inst->registeredbufs.list, list) {
- if (binfo->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
- dprintk(VIDC_DBG,
- "%s: binfo = %pK device_addr = %pa\n",
- __func__, binfo, &binfo->device_addr[0]);
- buf_ref_put(inst, binfo);
- }
- }
- mutex_unlock(&inst->registeredbufs.lock);
+ return;
}
int msm_comm_flush(struct msm_vidc_inst *inst, u32 flags)
@@ -4886,33 +4719,25 @@
int rc = 0;
bool ip_flush = false;
bool op_flush = false;
- struct vb2_buf_entry *temp, *next;
- struct mutex *lock;
+ struct msm_vidc_buffer *mbuf, *next;
struct msm_vidc_core *core;
struct hfi_device *hdev;
- if (!inst) {
+ if (!inst || !inst->core || !inst->core->device) {
dprintk(VIDC_ERR,
- "Invalid instance pointer = %pK\n", inst);
+ "Invalid params, inst %pK\n", inst);
return -EINVAL;
}
core = inst->core;
- if (!core) {
- dprintk(VIDC_ERR,
- "Invalid core pointer = %pK\n", core);
- return -EINVAL;
- }
hdev = core->device;
- if (!hdev) {
- dprintk(VIDC_ERR, "Invalid device pointer = %pK\n", hdev);
- return -EINVAL;
- }
ip_flush = flags & V4L2_QCOM_CMD_FLUSH_OUTPUT;
op_flush = flags & V4L2_QCOM_CMD_FLUSH_CAPTURE;
if (ip_flush && !op_flush) {
- dprintk(VIDC_INFO, "Input only flush not supported\n");
+ dprintk(VIDC_WARN,
+ "Input only flush not supported, making it flush all\n");
+ op_flush = true;
return 0;
}
@@ -4920,11 +4745,7 @@
msm_clock_data_reset(inst);
- msm_comm_flush_dynamic_buffers(inst);
-
- if (inst->state == MSM_VIDC_CORE_INVALID ||
- core->state == VIDC_CORE_INVALID ||
- core->state == VIDC_CORE_UNINIT) {
+ if (inst->state == MSM_VIDC_CORE_INVALID) {
dprintk(VIDC_ERR,
"Core %pK and inst %pK are in bad state\n",
core, inst);
@@ -4932,68 +4753,52 @@
return 0;
}
- if (inst->in_reconfig && !ip_flush && op_flush) {
- mutex_lock(&inst->pendingq.lock);
- if (!list_empty(&inst->pendingq.list)) {
- /*
- * Execution can never reach here since port reconfig
- * wont happen unless pendingq is emptied out
- * (both pendingq and flush being secured with same
- * lock). Printing a message here incase this breaks.
- */
- dprintk(VIDC_WARN,
- "FLUSH BUG: Pending q not empty! It should be empty\n");
- }
- mutex_unlock(&inst->pendingq.lock);
- atomic_inc(&inst->in_flush);
- dprintk(VIDC_DBG, "Send flush Output to firmware\n");
+ mutex_lock(&inst->registeredbufs.lock);
+ list_for_each_entry_safe(mbuf, next, &inst->registeredbufs.list, list) {
+ /* flush only deferred buffers (which are not queued yet) */
+ if (!mbuf->deferred)
+ continue;
+
+ /* don't flush input buffers if flush not requested on it */
+ if (!ip_flush && mbuf->vvb.vb2_buf.type ==
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ continue;
+
+ print_vidc_buffer(VIDC_DBG, "flush buf", inst, mbuf);
+ msm_comm_flush_vidc_buffer(inst, mbuf);
+ msm_comm_unmap_vidc_buffer(inst, mbuf);
+
+ /* remove from list */
+ list_del(&mbuf->list);
+ kfree(mbuf);
+ mbuf = NULL;
+ }
+ mutex_unlock(&inst->registeredbufs.lock);
+
+ /* enable in flush */
+ inst->in_flush = true;
+
+ hdev = inst->core->device;
+ if (ip_flush) {
+ dprintk(VIDC_DBG, "Send flush on all ports to firmware\n");
rc = call_hfi_op(hdev, session_flush, inst->session,
- HAL_FLUSH_OUTPUT);
+ HAL_FLUSH_ALL);
} else {
- msm_comm_flush_pending_dynamic_buffers(inst);
- /*
- * If flush is called after queueing buffers but before
- * streamon driver should flush the pending queue
- */
- mutex_lock(&inst->pendingq.lock);
- list_for_each_entry_safe(temp, next,
- &inst->pendingq.list, list) {
- enum v4l2_buf_type type = temp->vb->type;
-
- if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
- lock = &inst->bufq[CAPTURE_PORT].lock;
- else
- lock = &inst->bufq[OUTPUT_PORT].lock;
-
- temp->vb->planes[0].bytesused = 0;
-
- mutex_lock(lock);
- vb2_buffer_done(temp->vb, VB2_BUF_STATE_DONE);
- msm_vidc_debugfs_update(inst,
- type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE ?
- MSM_VIDC_DEBUGFS_EVENT_FBD :
- MSM_VIDC_DEBUGFS_EVENT_EBD);
- list_del(&temp->list);
- mutex_unlock(lock);
-
- kfree(temp);
- }
- mutex_unlock(&inst->pendingq.lock);
-
- /*Do not send flush in case of session_error */
- if (!(inst->state == MSM_VIDC_CORE_INVALID &&
- core->state != VIDC_CORE_INVALID)) {
- atomic_inc(&inst->in_flush);
- dprintk(VIDC_DBG, "Send flush all to firmware\n");
- rc = call_hfi_op(hdev, session_flush, inst->session,
- HAL_FLUSH_ALL);
- }
+ dprintk(VIDC_DBG, "Send flush on output port to firmware\n");
+ rc = call_hfi_op(hdev, session_flush, inst->session,
+ HAL_FLUSH_OUTPUT);
+ }
+ if (rc) {
+ dprintk(VIDC_ERR,
+ "Sending flush to firmware failed, flush out all buffers\n");
+ msm_comm_flush_in_invalid_state(inst);
+ /* disable in_flush */
+ inst->in_flush = false;
}
return rc;
}
-
enum hal_extradata_id msm_comm_get_hal_extradata_index(
enum v4l2_mpeg_vidc_extradata index)
{
@@ -5369,19 +5174,19 @@
return rc;
}
-struct msm_smem *msm_comm_smem_alloc(struct msm_vidc_inst *inst,
- size_t size, u32 align, u32 flags,
- enum hal_buffer buffer_type, int map_kernel)
+int msm_comm_smem_alloc(struct msm_vidc_inst *inst,
+ size_t size, u32 align, u32 flags, enum hal_buffer buffer_type,
+ int map_kernel, struct msm_smem *smem)
{
- struct msm_smem *m = NULL;
+ int rc = 0;
if (!inst || !inst->core) {
dprintk(VIDC_ERR, "%s: invalid inst: %pK\n", __func__, inst);
- return NULL;
+ return -EINVAL;
}
- m = msm_smem_alloc(inst->mem_client, size, align,
- flags, buffer_type, map_kernel);
- return m;
+ rc = msm_smem_alloc(inst->mem_client, size, align,
+ flags, buffer_type, map_kernel, smem);
+ return rc;
}
void msm_comm_smem_free(struct msm_vidc_inst *inst, struct msm_smem *mem)
@@ -5402,28 +5207,138 @@
"%s: invalid params: %pK %pK\n", __func__, inst, mem);
return -EINVAL;
}
- return msm_smem_cache_operations(inst->mem_client, mem, cache_ops);
+ return msm_smem_cache_operations(inst->mem_client, mem->handle,
+ mem->offset, mem->size, cache_ops);
}
-struct msm_smem *msm_comm_smem_user_to_kernel(struct msm_vidc_inst *inst,
- int fd, u32 offset, enum hal_buffer buffer_type)
+int msm_comm_qbuf_cache_operations(struct msm_vidc_inst *inst,
+ struct v4l2_buffer *b)
{
- struct msm_smem *m = NULL;
+ int rc = 0, i;
+ void *dma_buf;
+ void *handle;
+ bool skip;
- if (!inst || !inst->core) {
- dprintk(VIDC_ERR, "%s: invalid inst: %pK\n", __func__, inst);
- return NULL;
+ if (!inst || !b) {
+ dprintk(VIDC_ERR, "%s: invalid params %pK %pK\n",
+ __func__, inst, b);
+ return -EINVAL;
}
- if (inst->state == MSM_VIDC_CORE_INVALID) {
- dprintk(VIDC_ERR, "Core in Invalid state, returning from %s\n",
- __func__);
- return NULL;
+ for (i = 0; i < b->length; i++) {
+ unsigned long offset, size;
+ enum smem_cache_ops cache_ops;
+
+ dma_buf = msm_smem_get_dma_buf(b->m.planes[i].m.fd);
+ handle = msm_smem_get_handle(inst->mem_client, dma_buf);
+
+ offset = b->m.planes[i].data_offset;
+ size = b->m.planes[i].length;
+ cache_ops = SMEM_CACHE_INVALIDATE;
+ skip = false;
+
+ if (inst->session_type == MSM_VIDC_DECODER) {
+ if (b->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ if (!i) { /* bitstream */
+ size = b->m.planes[i].bytesused;
+ cache_ops = SMEM_CACHE_CLEAN_INVALIDATE;
+ }
+ } else if (b->type ==
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ if (!i) { /* yuv */
+ /* all values are correct */
+ }
+ }
+ } else if (inst->session_type == MSM_VIDC_ENCODER) {
+ if (b->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ if (!i) { /* yuv */
+ size = b->m.planes[i].bytesused;
+ cache_ops = SMEM_CACHE_CLEAN_INVALIDATE;
+ } else { /* extradata */
+ cache_ops = SMEM_CACHE_CLEAN_INVALIDATE;
+ }
+ } else if (b->type ==
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ if (!i) { /* bitstream */
+ /* all values are correct */
+ }
+ }
+ }
+
+ if (!skip) {
+ rc = msm_smem_cache_operations(inst->mem_client, handle,
+ offset, size, cache_ops);
+ if (rc)
+ print_v4l2_buffer(VIDC_ERR,
+ "qbuf cache ops failed", inst, b);
+ }
+
+ msm_smem_put_handle(inst->mem_client, handle);
+ msm_smem_put_dma_buf(dma_buf);
}
- m = msm_smem_user_to_kernel(inst->mem_client,
- fd, offset, buffer_type);
- return m;
+ return rc;
+}
+
+int msm_comm_dqbuf_cache_operations(struct msm_vidc_inst *inst,
+ struct v4l2_buffer *b)
+{
+ int rc = 0, i;
+ void *dma_buf;
+ void *handle;
+ bool skip;
+
+ if (!inst || !b) {
+ dprintk(VIDC_ERR, "%s: invalid params %pK %pK\n",
+ __func__, inst, b);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < b->length; i++) {
+ unsigned long offset, size;
+ enum smem_cache_ops cache_ops;
+
+ dma_buf = msm_smem_get_dma_buf(b->m.planes[i].m.fd);
+ handle = msm_smem_get_handle(inst->mem_client, dma_buf);
+
+ offset = b->m.planes[i].data_offset;
+ size = b->m.planes[i].length;
+ cache_ops = SMEM_CACHE_INVALIDATE;
+ skip = false;
+
+ if (inst->session_type == MSM_VIDC_DECODER) {
+ if (b->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ if (!i) /* bitstream */
+ skip = true;
+ } else if (b->type ==
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ if (!i) /* yuv */
+ skip = true;
+ }
+ } else if (inst->session_type == MSM_VIDC_ENCODER) {
+ if (b->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ if (!i) /* yuv */
+ skip = true;
+ } else if (b->type ==
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ if (!i) /* bitstream */
+ skip = true;
+ }
+ }
+
+ if (!skip) {
+ rc = msm_smem_cache_operations(inst->mem_client, handle,
+ offset, size, cache_ops);
+ if (rc)
+ print_v4l2_buffer(VIDC_ERR,
+ "dqbuf cache ops failed", inst, b);
+ }
+
+ msm_smem_put_handle(inst->mem_client, handle);
+ msm_smem_put_dma_buf(dma_buf);
+ }
+
+ return rc;
}
void msm_vidc_fw_unload_handler(struct work_struct *work)
@@ -5580,9 +5495,8 @@
void msm_comm_print_inst_info(struct msm_vidc_inst *inst)
{
- struct buffer_info *temp;
+ struct msm_vidc_buffer *mbuf;
struct internal_buf *buf;
- int i = 0;
bool is_decode = false;
enum vidc_ports port;
bool is_secure = false;
@@ -5610,37 +5524,32 @@
inst, inst->session_type);
mutex_lock(&inst->registeredbufs.lock);
dprintk(VIDC_ERR, "registered buffer list:\n");
- list_for_each_entry(temp, &inst->registeredbufs.list, list)
- for (i = 0; i < temp->num_planes; i++)
- dprintk(VIDC_ERR,
- "type: %d plane: %d addr: %pa size: %d\n",
- temp->type, i, &temp->device_addr[i],
- temp->size[i]);
-
+ list_for_each_entry(mbuf, &inst->registeredbufs.list, list)
+ print_vidc_buffer(VIDC_ERR, "buf", inst, mbuf);
mutex_unlock(&inst->registeredbufs.lock);
mutex_lock(&inst->scratchbufs.lock);
dprintk(VIDC_ERR, "scratch buffer list:\n");
list_for_each_entry(buf, &inst->scratchbufs.list, list)
- dprintk(VIDC_ERR, "type: %d addr: %pa size: %zu\n",
- buf->buffer_type, &buf->handle->device_addr,
- buf->handle->size);
+ dprintk(VIDC_ERR, "type: %d addr: %x size: %u\n",
+ buf->buffer_type, buf->smem.device_addr,
+ buf->smem.size);
mutex_unlock(&inst->scratchbufs.lock);
mutex_lock(&inst->persistbufs.lock);
dprintk(VIDC_ERR, "persist buffer list:\n");
list_for_each_entry(buf, &inst->persistbufs.list, list)
- dprintk(VIDC_ERR, "type: %d addr: %pa size: %zu\n",
- buf->buffer_type, &buf->handle->device_addr,
- buf->handle->size);
+ dprintk(VIDC_ERR, "type: %d addr: %x size: %u\n",
+ buf->buffer_type, buf->smem.device_addr,
+ buf->smem.size);
mutex_unlock(&inst->persistbufs.lock);
mutex_lock(&inst->outputbufs.lock);
dprintk(VIDC_ERR, "dpb buffer list:\n");
list_for_each_entry(buf, &inst->outputbufs.list, list)
- dprintk(VIDC_ERR, "type: %d addr: %pa size: %zu\n",
- buf->buffer_type, &buf->handle->device_addr,
- buf->handle->size);
+ dprintk(VIDC_ERR, "type: %d addr: %x size: %u\n",
+ buf->buffer_type, buf->smem.device_addr,
+ buf->smem.size);
mutex_unlock(&inst->outputbufs.lock);
}
@@ -5737,3 +5646,540 @@
return VENUS_BUFFER_SIZE(COLOR_FMT_NV12_BPP10_UBWC, width, height);
}
+
+void print_vidc_buffer(u32 tag, const char *str, struct msm_vidc_inst *inst,
+ struct msm_vidc_buffer *mbuf)
+{
+ struct vb2_buffer *vb2 = NULL;
+
+ if (!(tag & msm_vidc_debug) || !inst || !mbuf)
+ return;
+
+ vb2 = &mbuf->vvb.vb2_buf;
+
+ if (vb2->num_planes == 1)
+ dprintk(tag,
+ "%s: %s: %x : idx %2d fd %d off %d daddr %x size %d filled %d flags 0x%x ts %lld refcnt %d\n",
+ str, vb2->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ?
+ "OUTPUT" : "CAPTURE", hash32_ptr(inst->session),
+ vb2->index, vb2->planes[0].m.fd,
+ vb2->planes[0].data_offset, mbuf->smem[0].device_addr,
+ vb2->planes[0].length, vb2->planes[0].bytesused,
+ mbuf->vvb.flags, mbuf->vvb.vb2_buf.timestamp,
+ mbuf->smem[0].refcount);
+ else
+ dprintk(tag,
+ "%s: %s: %x : idx %2d fd %d off %d daddr %x size %d filled %d flags 0x%x ts %lld refcnt %d, extradata: fd %d off %d daddr %x size %d filled %d refcnt %d\n",
+ str, vb2->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ?
+ "OUTPUT" : "CAPTURE", hash32_ptr(inst->session),
+ vb2->index, vb2->planes[0].m.fd,
+ vb2->planes[0].data_offset, mbuf->smem[0].device_addr,
+ vb2->planes[0].length, vb2->planes[0].bytesused,
+ mbuf->vvb.flags, mbuf->vvb.vb2_buf.timestamp,
+ mbuf->smem[0].refcount, vb2->planes[1].m.fd,
+ vb2->planes[1].data_offset, mbuf->smem[1].device_addr,
+ vb2->planes[1].length, vb2->planes[1].bytesused,
+ mbuf->smem[1].refcount);
+}
+
+void print_vb2_buffer(u32 tag, const char *str, struct msm_vidc_inst *inst,
+ struct vb2_buffer *vb2)
+{
+ if (!(tag & msm_vidc_debug) || !inst || !vb2)
+ return;
+
+ if (vb2->num_planes == 1)
+ dprintk(tag,
+ "%s: %s: %x : idx %2d fd %d off %d size %d filled %d\n",
+ str, vb2->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ?
+ "OUTPUT" : "CAPTURE", hash32_ptr(inst->session),
+ vb2->index, vb2->planes[0].m.fd,
+ vb2->planes[0].data_offset, vb2->planes[0].length,
+ vb2->planes[0].bytesused);
+ else
+ dprintk(tag,
+ "%s: %s: %x : idx %2d fd %d off %d size %d filled %d, extradata: fd %d off %d size %d\n",
+ str, vb2->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ?
+ "OUTPUT" : "CAPTURE", hash32_ptr(inst->session),
+ vb2->index, vb2->planes[0].m.fd,
+ vb2->planes[0].data_offset, vb2->planes[0].length,
+ vb2->planes[0].bytesused, vb2->planes[1].m.fd,
+ vb2->planes[1].data_offset, vb2->planes[1].length);
+}
+
+void print_v4l2_buffer(u32 tag, const char *str, struct msm_vidc_inst *inst,
+ struct v4l2_buffer *v4l2)
+{
+ if (!(tag & msm_vidc_debug) || !inst || !v4l2)
+ return;
+
+ if (v4l2->length == 1)
+ dprintk(tag,
+ "%s: %s: %x : idx %2d fd %d off %d size %d filled %d\n",
+ str, v4l2->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ?
+ "OUTPUT" : "CAPTURE", hash32_ptr(inst->session),
+ v4l2->index, v4l2->m.planes[0].m.fd,
+ v4l2->m.planes[0].data_offset,
+ v4l2->m.planes[0].length,
+ v4l2->m.planes[0].bytesused);
+ else
+ dprintk(tag,
+ "%s: %s: %x : idx %2d fd %d off %d size %d filled %d, extradata: fd %d off %d size %d\n",
+ str, v4l2->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ?
+ "OUTPUT" : "CAPTURE", hash32_ptr(inst->session),
+ v4l2->index, v4l2->m.planes[0].m.fd,
+ v4l2->m.planes[0].data_offset,
+ v4l2->m.planes[0].length,
+ v4l2->m.planes[0].bytesused,
+ v4l2->m.planes[1].m.fd,
+ v4l2->m.planes[1].data_offset,
+ v4l2->m.planes[1].length);
+}
+
+bool msm_comm_compare_vb2_plane(struct msm_vidc_inst *inst,
+ struct msm_vidc_buffer *mbuf, struct vb2_buffer *vb2, u32 i)
+{
+ struct vb2_buffer *vb;
+
+ if (!inst || !mbuf || !vb2) {
+ dprintk(VIDC_ERR, "%s: invalid params, %pK %pK %pK\n",
+ __func__, inst, mbuf, vb2);
+ return false;
+ }
+
+ vb = &mbuf->vvb.vb2_buf;
+ if (vb->planes[i].m.fd == vb2->planes[i].m.fd &&
+ vb->planes[i].data_offset == vb2->planes[i].data_offset &&
+ vb->planes[i].length == vb2->planes[i].length) {
+ return true;
+ }
+
+ return false;
+}
+
+bool msm_comm_compare_vb2_planes(struct msm_vidc_inst *inst,
+ struct msm_vidc_buffer *mbuf, struct vb2_buffer *vb2)
+{
+ int i = 0;
+ struct vb2_buffer *vb;
+
+ if (!inst || !mbuf || !vb2) {
+ dprintk(VIDC_ERR, "%s: invalid params, %pK %pK %pK\n",
+ __func__, inst, mbuf, vb2);
+ return false;
+ }
+
+ vb = &mbuf->vvb.vb2_buf;
+
+ if (vb->num_planes != vb2->num_planes)
+ return false;
+
+ for (i = 0; i < vb->num_planes; i++) {
+ if (!msm_comm_compare_vb2_plane(inst, mbuf, vb2, i))
+ return false;
+ }
+
+ return true;
+}
+
+bool msm_comm_compare_dma_plane(struct msm_vidc_inst *inst,
+ struct msm_vidc_buffer *mbuf, unsigned long *dma_planes, u32 i)
+{
+ if (!inst || !mbuf || !dma_planes) {
+ dprintk(VIDC_ERR, "%s: invalid params, %pK %pK %pK\n",
+ __func__, inst, mbuf, dma_planes);
+ return false;
+ }
+
+ if ((unsigned long)mbuf->smem[i].dma_buf == dma_planes[i])
+ return true;
+
+ return false;
+}
+
+bool msm_comm_compare_dma_planes(struct msm_vidc_inst *inst,
+ struct msm_vidc_buffer *mbuf, unsigned long *dma_planes)
+{
+ int i = 0;
+ struct vb2_buffer *vb;
+
+ if (!inst || !mbuf || !dma_planes) {
+ dprintk(VIDC_ERR, "%s: invalid params, %pK %pK %pK\n",
+ __func__, inst, mbuf, dma_planes);
+ return false;
+ }
+
+ vb = &mbuf->vvb.vb2_buf;
+ for (i = 0; i < vb->num_planes; i++) {
+ if (!msm_comm_compare_dma_plane(inst, mbuf, dma_planes, i))
+ return false;
+ }
+
+ return true;
+}
+
+
+bool msm_comm_compare_device_plane(struct msm_vidc_buffer *mbuf,
+ u32 *planes, u32 i)
+{
+ if (!mbuf || !planes) {
+ dprintk(VIDC_ERR, "%s: invalid params, %pK %pK\n",
+ __func__, mbuf, planes);
+ return false;
+ }
+
+ if (mbuf->smem[i].device_addr == planes[i])
+ return true;
+
+ return false;
+}
+
+bool msm_comm_compare_device_planes(struct msm_vidc_buffer *mbuf,
+ u32 *planes)
+{
+ int i = 0;
+
+ if (!mbuf || !planes)
+ return false;
+
+ for (i = 0; i < mbuf->vvb.vb2_buf.num_planes; i++) {
+ if (!msm_comm_compare_device_plane(mbuf, planes, i))
+ return false;
+ }
+
+ return true;
+}
+
+struct msm_vidc_buffer *msm_comm_get_buffer_using_device_planes(
+ struct msm_vidc_inst *inst, u32 *planes)
+{
+ struct msm_vidc_buffer *mbuf;
+ bool found = false;
+
+ mutex_lock(&inst->registeredbufs.lock);
+ found = false;
+ list_for_each_entry(mbuf, &inst->registeredbufs.list, list) {
+ if (msm_comm_compare_device_planes(mbuf, planes)) {
+ found = true;
+ break;
+ }
+ }
+ mutex_unlock(&inst->registeredbufs.lock);
+ if (!found) {
+ dprintk(VIDC_ERR,
+ "%s: data_addr %x, extradata_addr %x not found\n",
+ __func__, planes[0], planes[1]);
+ mbuf = NULL;
+ }
+
+ return mbuf;
+}
+
+int msm_comm_flush_vidc_buffer(struct msm_vidc_inst *inst,
+ struct msm_vidc_buffer *mbuf)
+{
+ int rc;
+ struct vb2_buffer *vb;
+
+ if (!inst || !mbuf) {
+ dprintk(VIDC_ERR, "%s: invalid params %pK %pK\n",
+ __func__, inst, mbuf);
+ return -EINVAL;
+ }
+
+ vb = msm_comm_get_vb_using_vidc_buffer(inst, mbuf);
+ if (!vb) {
+ print_vidc_buffer(VIDC_ERR,
+ "vb not found for buf", inst, mbuf);
+ return -EINVAL;
+ }
+
+ vb->planes[0].bytesused = 0;
+ rc = msm_comm_vb2_buffer_done(inst, vb);
+ if (rc)
+ print_vidc_buffer(VIDC_ERR,
+ "vb2_buffer_done failed for", inst, mbuf);
+
+ return rc;
+}
+
+struct msm_vidc_buffer *msm_comm_get_vidc_buffer(struct msm_vidc_inst *inst,
+ struct vb2_buffer *vb2)
+{
+ int rc = 0;
+ struct vb2_v4l2_buffer *vbuf;
+ struct vb2_buffer *vb;
+ unsigned long dma_planes[VB2_MAX_PLANES] = {0};
+ struct msm_vidc_buffer *mbuf;
+ bool found = false;
+ int i;
+
+ if (!inst || !vb2) {
+ dprintk(VIDC_ERR, "%s: invalid params\n", __func__);
+ return NULL;
+ }
+
+ for (i = 0; i < vb2->num_planes; i++) {
+ /*
+ * always compare dma_buf addresses which is guaranteed
+ * to be same across the processes (duplicate fds).
+ */
+ dma_planes[i] = (unsigned long)dma_buf_get(vb2->planes[i].m.fd);
+ dma_buf_put((struct dma_buf *)dma_planes[i]);
+ }
+
+ mutex_lock(&inst->registeredbufs.lock);
+ list_for_each_entry(mbuf, &inst->registeredbufs.list, list) {
+ if (msm_comm_compare_dma_planes(inst, mbuf, dma_planes)) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ /* this is new vb2_buffer */
+ mbuf = kzalloc(sizeof(struct msm_vidc_buffer), GFP_KERNEL);
+ if (!mbuf) {
+ dprintk(VIDC_ERR, "%s: alloc msm_vidc_buffer failed\n",
+ __func__);
+ rc = -ENOMEM;
+ goto exit;
+ }
+ }
+
+ vbuf = to_vb2_v4l2_buffer(vb2);
+ memcpy(&mbuf->vvb, vbuf, sizeof(struct vb2_v4l2_buffer));
+ vb = &mbuf->vvb.vb2_buf;
+
+ for (i = 0; i < vb->num_planes; i++) {
+ mbuf->smem[i].buffer_type = get_hal_buffer_type(vb->type, i);
+ mbuf->smem[i].fd = vb->planes[i].m.fd;
+ mbuf->smem[i].offset = vb->planes[i].data_offset;
+ mbuf->smem[i].size = vb->planes[i].length;
+ rc = msm_smem_map_dma_buf(inst, &mbuf->smem[i]);
+ if (rc) {
+ dprintk(VIDC_ERR, "%s: map failed.\n", __func__);
+ goto exit;
+ }
+ /* increase refcount as we get both fbd and rbr */
+ rc = msm_smem_map_dma_buf(inst, &mbuf->smem[i]);
+ if (rc) {
+ dprintk(VIDC_ERR, "%s: map failed..\n", __func__);
+ goto exit;
+ }
+ }
+
+ /* special handling for decoder */
+ if (inst->session_type == MSM_VIDC_DECODER) {
+ if (found) {
+ rc = -EEXIST;
+ } else {
+ bool found_plane0 = false;
+ struct msm_vidc_buffer *temp;
+ /*
+ * client might have queued same plane[0] but different
+ * plane[1] search plane[0] and if found don't queue the
+ * buffer, the buffer will be queued when rbr event
+ * arrived.
+ */
+ list_for_each_entry(temp, &inst->registeredbufs.list,
+ list) {
+ if (msm_comm_compare_dma_plane(inst, temp,
+ dma_planes, 0)) {
+ found_plane0 = true;
+ break;
+ }
+ }
+ if (found_plane0)
+ rc = -EEXIST;
+ }
+ }
+
+ /* add the new buffer to list */
+ if (!found)
+ list_add_tail(&mbuf->list, &inst->registeredbufs.list);
+
+ mutex_unlock(&inst->registeredbufs.lock);
+ if (rc == -EEXIST) {
+ print_vidc_buffer(VIDC_DBG, "qbuf upon rbr", inst, mbuf);
+ return ERR_PTR(rc);
+ }
+
+ return mbuf;
+
+exit:
+ mutex_unlock(&inst->registeredbufs.lock);
+ dprintk(VIDC_ERR, "%s: rc %d\n", __func__, rc);
+ msm_comm_unmap_vidc_buffer(inst, mbuf);
+ if (!found)
+ kfree(mbuf);
+
+ return ERR_PTR(rc);
+}
+
+void msm_comm_put_vidc_buffer(struct msm_vidc_inst *inst,
+ struct msm_vidc_buffer *mbuf)
+{
+ struct msm_vidc_buffer *temp;
+ bool found = false;
+ int i = 0;
+
+ if (!inst || !mbuf) {
+ dprintk(VIDC_ERR, "%s: invalid params %pK %pK\n",
+ __func__, inst, mbuf);
+ return;
+ }
+
+ mutex_lock(&inst->registeredbufs.lock);
+ /* check if mbuf was not removed by any chance */
+ list_for_each_entry(temp, &inst->registeredbufs.list, list) {
+ if (msm_comm_compare_vb2_planes(inst, mbuf,
+ &temp->vvb.vb2_buf)) {
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ print_vidc_buffer(VIDC_ERR, "buf was removed", inst, mbuf);
+ goto unlock;
+ }
+
+ print_vidc_buffer(VIDC_DBG, "dqbuf", inst, mbuf);
+ for (i = 0; i < mbuf->vvb.vb2_buf.num_planes; i++) {
+ if (msm_smem_unmap_dma_buf(inst, &mbuf->smem[i]))
+ print_vidc_buffer(VIDC_ERR,
+ "dqbuf: unmap failed.", inst, mbuf);
+
+ if (!(mbuf->vvb.flags & V4L2_QCOM_BUF_FLAG_READONLY)) {
+ /* rbr won't come for this buffer */
+ if (msm_smem_unmap_dma_buf(inst, &mbuf->smem[i]))
+ print_vidc_buffer(VIDC_ERR,
+ "dqbuf: unmap failed..", inst, mbuf);
+ } /* else RBR event expected */
+ }
+ /*
+ * remove the entry if plane[0].refcount is zero else
+ * don't remove as client queued same buffer that's why
+ * plane[0].refcount is not zero
+ */
+ if (!mbuf->smem[0].refcount) {
+ list_del(&mbuf->list);
+ kfree(mbuf);
+ mbuf = NULL;
+ }
+unlock:
+ mutex_unlock(&inst->registeredbufs.lock);
+}
+
+void handle_release_buffer_reference(struct msm_vidc_inst *inst, u32 *planes)
+{
+ int rc = 0;
+ struct msm_vidc_buffer *mbuf = NULL;
+ bool found = false;
+ int i = 0;
+
+ mutex_lock(&inst->registeredbufs.lock);
+ found = false;
+ list_for_each_entry(mbuf, &inst->registeredbufs.list, list) {
+ if (msm_comm_compare_device_planes(mbuf, planes)) {
+ found = true;
+ break;
+ }
+ }
+ if (found) {
+ msm_vidc_queue_rbr_event(inst,
+ mbuf->vvb.vb2_buf.planes[0].m.fd,
+ mbuf->vvb.vb2_buf.planes[0].data_offset);
+
+ for (i = 0; i < mbuf->vvb.vb2_buf.num_planes; i++) {
+ if (msm_smem_unmap_dma_buf(inst, &mbuf->smem[i]))
+ print_vidc_buffer(VIDC_ERR,
+ "rbr unmap failed.", inst, mbuf);
+ }
+ /* refcount is not zero if client queued the same buffer */
+ if (!mbuf->smem[0].refcount) {
+ list_del(&mbuf->list);
+ kfree(mbuf);
+ mbuf = NULL;
+ }
+ } else {
+ dprintk(VIDC_ERR,
+ "%s: data_addr %x extradata_addr %x not found\n",
+ __func__, planes[0], planes[1]);
+ goto unlock;
+ }
+
+ /*
+ * 1. client might have pushed same planes in which case mbuf will be
+ * same and refcounts are positive and buffer wouldn't have been
+ * removed from the registeredbufs list.
+ * 2. client might have pushed same planes[0] but different planes[1]
+ * in which case mbuf will be different.
+ * 3. in either case we can search mbuf->smem[0].device_addr in the list
+ * and if found queue it to video hw (if not flushing).
+ */
+ found = false;
+ list_for_each_entry(mbuf, &inst->registeredbufs.list, list) {
+ if (msm_comm_compare_device_plane(mbuf, planes, 0)) {
+ found = true;
+ break;
+ }
+ }
+ if (!found)
+ goto unlock;
+
+ /* found means client queued the buffer already */
+ if (inst->in_reconfig || inst->in_flush) {
+ print_vidc_buffer(VIDC_DBG, "rbr flush buf", inst, mbuf);
+ msm_comm_flush_vidc_buffer(inst, mbuf);
+ msm_comm_unmap_vidc_buffer(inst, mbuf);
+ /* remove from list */
+ list_del(&mbuf->list);
+ kfree(mbuf);
+ mbuf = NULL;
+
+ /* don't queue the buffer */
+ found = false;
+ }
+unlock:
+ mutex_unlock(&inst->registeredbufs.lock);
+
+ if (found) {
+ print_vidc_buffer(VIDC_DBG, "rbr qbuf", inst, mbuf);
+ rc = msm_comm_qbuf(inst, mbuf);
+ if (rc)
+ print_vidc_buffer(VIDC_ERR,
+ "rbr qbuf failed", inst, mbuf);
+ }
+}
+
+int msm_comm_unmap_vidc_buffer(struct msm_vidc_inst *inst,
+ struct msm_vidc_buffer *mbuf)
+{
+ int rc = 0, i;
+
+ if (!inst || !mbuf) {
+ dprintk(VIDC_ERR, "%s: invalid params %pK %pK\n",
+ __func__, inst, mbuf);
+ return -EINVAL;
+ }
+ if (mbuf->vvb.vb2_buf.num_planes > VIDEO_MAX_PLANES) {
+ dprintk(VIDC_ERR, "%s: invalid num_planes %d\n", __func__,
+ mbuf->vvb.vb2_buf.num_planes);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < mbuf->vvb.vb2_buf.num_planes; i++) {
+ u32 refcount = mbuf->smem[i].refcount;
+
+ while (refcount) {
+ if (msm_smem_unmap_dma_buf(inst, &mbuf->smem[i]))
+ print_vidc_buffer(VIDC_ERR,
+ "unmap failed for buf", inst, mbuf);
+ refcount--;
+ }
+ }
+
+ return rc;
+}
+
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.h b/drivers/media/platform/msm/vidc/msm_vidc_common.h
index 52925eb..5c653f5 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.h
@@ -14,6 +14,7 @@
#ifndef _MSM_VIDC_COMMON_H_
#define _MSM_VIDC_COMMON_H_
#include "msm_vidc_internal.h"
+
struct vb2_buf_entry {
struct list_head list;
struct vb2_buffer *vb;
@@ -28,6 +29,8 @@
LOAD_CALC_IGNORE_NON_REALTIME_LOAD = 1 << 2,
};
+enum hal_buffer get_hal_buffer_type(unsigned int type,
+ unsigned int plane_num);
struct msm_vidc_core *get_vidc_core(int core_id);
const struct msm_vidc_format *msm_comm_get_pixel_fmt_index(
const struct msm_vidc_format fmt[], int size, int index, int fmt_type);
@@ -46,7 +49,7 @@
int msm_comm_set_persist_buffers(struct msm_vidc_inst *inst);
int msm_comm_set_output_buffers(struct msm_vidc_inst *inst);
int msm_comm_queue_output_buffers(struct msm_vidc_inst *inst);
-int msm_comm_qbuf(struct msm_vidc_inst *inst, struct vb2_buffer *vb);
+int msm_comm_qbuf(struct msm_vidc_inst *inst, struct msm_vidc_buffer *mbuf);
void msm_comm_flush_dynamic_buffers(struct msm_vidc_inst *inst);
int msm_comm_flush(struct msm_vidc_inst *inst, u32 flags);
int msm_comm_release_scratch_buffers(struct msm_vidc_inst *inst,
@@ -69,14 +72,12 @@
int msm_comm_kill_session(struct msm_vidc_inst *inst);
enum multi_stream msm_comm_get_stream_output_mode(struct msm_vidc_inst *inst);
enum hal_buffer msm_comm_get_hal_output_buffer(struct msm_vidc_inst *inst);
-struct msm_smem *msm_comm_smem_alloc(struct msm_vidc_inst *inst,
- size_t size, u32 align, u32 flags,
- enum hal_buffer buffer_type, int map_kernel);
-void msm_comm_smem_free(struct msm_vidc_inst *inst, struct msm_smem *mem);
+int msm_comm_smem_alloc(struct msm_vidc_inst *inst, size_t size, u32 align,
+ u32 flags, enum hal_buffer buffer_type, int map_kernel,
+ struct msm_smem *smem);
+void msm_comm_smem_free(struct msm_vidc_inst *inst, struct msm_smem *smem);
int msm_comm_smem_cache_operations(struct msm_vidc_inst *inst,
struct msm_smem *mem, enum smem_cache_ops cache_ops);
-struct msm_smem *msm_comm_smem_user_to_kernel(struct msm_vidc_inst *inst,
- int fd, u32 offset, enum hal_buffer buffer_type);
enum hal_video_codec get_hal_codec(int fourcc);
enum hal_domain get_hal_domain(int session_type);
int msm_comm_check_core_init(struct msm_vidc_core *core);
@@ -107,4 +108,41 @@
u32 get_frame_size_nv21(int plane, u32 height, u32 width);
u32 get_frame_size_tp10_ubwc(int plane, u32 height, u32 width);
void msm_comm_set_use_sys_cache(struct msm_vidc_inst *inst);
+struct vb2_buffer *msm_comm_get_vb_using_vidc_buffer(
+ struct msm_vidc_inst *inst, struct msm_vidc_buffer *mbuf);
+struct msm_vidc_buffer *msm_comm_get_buffer_using_device_planes(
+ struct msm_vidc_inst *inst, u32 *planes);
+struct msm_vidc_buffer *msm_comm_get_vidc_buffer(struct msm_vidc_inst *inst,
+ struct vb2_buffer *vb2);
+void msm_comm_put_vidc_buffer(struct msm_vidc_inst *inst,
+ struct msm_vidc_buffer *mbuf);
+void handle_release_buffer_reference(struct msm_vidc_inst *inst, u32 *planes);
+int msm_comm_vb2_buffer_done(struct msm_vidc_inst *inst,
+ struct vb2_buffer *vb);
+int msm_comm_flush_vidc_buffer(struct msm_vidc_inst *inst,
+ struct msm_vidc_buffer *mbuf);
+int msm_comm_unmap_vidc_buffer(struct msm_vidc_inst *inst,
+ struct msm_vidc_buffer *mbuf);
+bool msm_comm_compare_dma_plane(struct msm_vidc_inst *inst,
+ struct msm_vidc_buffer *mbuf, unsigned long *dma_planes, u32 i);
+bool msm_comm_compare_dma_planes(struct msm_vidc_inst *inst,
+ struct msm_vidc_buffer *mbuf, unsigned long *dma_planes);
+bool msm_comm_compare_vb2_plane(struct msm_vidc_inst *inst,
+ struct msm_vidc_buffer *mbuf, struct vb2_buffer *vb2, u32 i);
+bool msm_comm_compare_vb2_planes(struct msm_vidc_inst *inst,
+ struct msm_vidc_buffer *mbuf, struct vb2_buffer *vb2);
+bool msm_comm_compare_device_plane(struct msm_vidc_buffer *mbuf,
+ u32 *planes, u32 i);
+bool msm_comm_compare_device_planes(struct msm_vidc_buffer *mbuf,
+ u32 *planes);
+int msm_comm_qbuf_cache_operations(struct msm_vidc_inst *inst,
+ struct v4l2_buffer *b);
+int msm_comm_dqbuf_cache_operations(struct msm_vidc_inst *inst,
+ struct v4l2_buffer *b);
+void print_vidc_buffer(u32 tag, const char *str, struct msm_vidc_inst *inst,
+ struct msm_vidc_buffer *mbuf);
+void print_vb2_buffer(u32 tag, const char *str, struct msm_vidc_inst *inst,
+ struct vb2_buffer *vb2);
+void print_v4l2_buffer(u32 tag, const char *str, struct msm_vidc_inst *inst,
+ struct v4l2_buffer *v4l2);
#endif
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_debug.c b/drivers/media/platform/msm/vidc/msm_vidc_debug.c
index 3b1d08d..58c3b0f 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_debug.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_debug.c
@@ -265,7 +265,7 @@
static int publish_unreleased_reference(struct msm_vidc_inst *inst)
{
- struct buffer_info *temp = NULL;
+ struct msm_vidc_buffer *temp = NULL;
if (!inst) {
dprintk(VIDC_ERR, "%s: invalid param\n", __func__);
@@ -277,14 +277,15 @@
mutex_lock(&inst->registeredbufs.lock);
list_for_each_entry(temp, &inst->registeredbufs.list, list) {
- if (temp->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
- !temp->inactive && atomic_read(&temp->ref_count)) {
+ struct vb2_buffer *vb2 = &temp->vvb.vb2_buf;
+
+ if (vb2->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
write_str(&dbg_buf,
- "\tpending buffer: %#lx fd[0] = %d ref_count = %d held by: %s\n",
- temp->device_addr[0],
- temp->fd[0],
- atomic_read(&temp->ref_count),
- DYNAMIC_BUF_OWNER(temp));
+ "\tbuffer: %#x fd[0] = %d size %d refcount = %d\n",
+ temp->smem[0].device_addr,
+ vb2->planes[0].m.fd,
+ vb2->planes[0].length,
+ temp->smem[0].refcount);
}
}
mutex_unlock(&inst->registeredbufs.lock);
@@ -403,18 +404,14 @@
switch (e) {
case MSM_VIDC_DEBUGFS_EVENT_ETB:
- mutex_lock(&inst->lock);
inst->count.etb++;
- mutex_unlock(&inst->lock);
if (inst->count.ebd && inst->count.ftb > inst->count.fbd) {
d->pdata[FRAME_PROCESSING].name[0] = '\0';
tic(inst, FRAME_PROCESSING, a);
}
break;
case MSM_VIDC_DEBUGFS_EVENT_EBD:
- mutex_lock(&inst->lock);
inst->count.ebd++;
- mutex_unlock(&inst->lock);
if (inst->count.ebd && inst->count.ebd == inst->count.etb) {
toc(inst, FRAME_PROCESSING);
dprintk(VIDC_PROF, "EBD: FW needs input buffers\n");
@@ -431,6 +428,7 @@
}
break;
case MSM_VIDC_DEBUGFS_EVENT_FBD:
+ inst->count.fbd++;
inst->debug.samples++;
if (inst->count.ebd && inst->count.fbd == inst->count.ftb) {
toc(inst, FRAME_PROCESSING);
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_internal.h b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
index ca61708..22772ef 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_internal.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
@@ -33,7 +33,6 @@
#include <media/videobuf2-v4l2.h>
#include <media/msm_vidc.h>
#include <media/msm_media_info.h>
-
#include "vidc_hfi_api.h"
#define MSM_VIDC_DRV_NAME "msm_vidc_driver"
@@ -141,7 +140,7 @@
struct vidc_freq_data {
struct list_head list;
- ion_phys_addr_t device_addr;
+ u32 device_addr;
unsigned long freq;
};
@@ -155,7 +154,7 @@
struct internal_buf {
struct list_head list;
enum hal_buffer buffer_type;
- struct msm_smem *handle;
+ struct msm_smem smem;
enum buffer_owner buffer_ownership;
};
@@ -322,7 +321,6 @@
enum instance_state state;
struct msm_vidc_format fmts[MAX_PORT_NUM];
struct buf_queue bufq[MAX_PORT_NUM];
- struct msm_vidc_list pendingq;
struct msm_vidc_list freqs;
struct msm_vidc_list scratchbufs;
struct msm_vidc_list persistbufs;
@@ -331,7 +329,7 @@
struct msm_vidc_list reconbufs;
struct msm_vidc_list registeredbufs;
struct buffer_requirements buff_req;
- void *mem_client;
+ struct smem_client *mem_client;
struct v4l2_ctrl_handler ctrl_handler;
struct completion completions[SESSION_MSG_END - SESSION_MSG_START + 1];
struct v4l2_ctrl **cluster;
@@ -352,8 +350,7 @@
struct v4l2_ctrl **ctrls;
enum msm_vidc_pixel_depth bit_depth;
struct kref kref;
- u32 buffers_held_in_driver;
- atomic_t in_flush;
+ bool in_flush;
u32 pic_struct;
u32 colour_space;
u32 profile;
@@ -389,53 +386,33 @@
int msm_vidc_check_scaling_supported(struct msm_vidc_inst *inst);
void msm_vidc_queue_v4l2_event(struct msm_vidc_inst *inst, int event_type);
-struct buffer_info {
+struct msm_vidc_buffer {
struct list_head list;
- int type;
- int num_planes;
- int fd[VIDEO_MAX_PLANES];
- int buff_off[VIDEO_MAX_PLANES];
- int size[VIDEO_MAX_PLANES];
- unsigned long uvaddr[VIDEO_MAX_PLANES];
- ion_phys_addr_t device_addr[VIDEO_MAX_PLANES];
- struct msm_smem *handle[VIDEO_MAX_PLANES];
- enum v4l2_memory memory;
- u32 v4l2_index;
- bool pending_deletion;
- atomic_t ref_count;
- bool dequeued;
- bool inactive;
- bool mapped[VIDEO_MAX_PLANES];
- int same_fd_ref[VIDEO_MAX_PLANES];
- struct timeval timestamp;
+ struct msm_smem smem[VIDEO_MAX_PLANES];
+ struct vb2_v4l2_buffer vvb;
+ bool deferred;
};
-struct buffer_info *device_to_uvaddr(struct msm_vidc_list *buf_list,
- ion_phys_addr_t device_addr);
-int buf_ref_get(struct msm_vidc_inst *inst, struct buffer_info *binfo);
-int buf_ref_put(struct msm_vidc_inst *inst, struct buffer_info *binfo);
-int output_buffer_cache_invalidate(struct msm_vidc_inst *inst,
- struct buffer_info *binfo);
-int qbuf_dynamic_buf(struct msm_vidc_inst *inst,
- struct buffer_info *binfo);
-int unmap_and_deregister_buf(struct msm_vidc_inst *inst,
- struct buffer_info *binfo);
-
void msm_comm_handle_thermal_event(void);
void *msm_smem_new_client(enum smem_type mtype,
void *platform_resources, enum session_type stype);
-struct msm_smem *msm_smem_alloc(void *clt, size_t size, u32 align, u32 flags,
- enum hal_buffer buffer_type, int map_kernel);
-void msm_smem_free(void *clt, struct msm_smem *mem);
+int msm_smem_alloc(struct smem_client *client,
+ size_t size, u32 align, u32 flags, enum hal_buffer buffer_type,
+ int map_kernel, struct msm_smem *smem);
+int msm_smem_free(void *clt, struct msm_smem *mem);
void msm_smem_delete_client(void *clt);
-int msm_smem_cache_operations(void *clt, struct msm_smem *mem,
- enum smem_cache_ops);
-struct msm_smem *msm_smem_user_to_kernel(void *clt, int fd, u32 offset,
- enum hal_buffer buffer_type);
struct context_bank_info *msm_smem_get_context_bank(void *clt,
bool is_secure, enum hal_buffer buffer_type);
+int msm_smem_map_dma_buf(struct msm_vidc_inst *inst, struct msm_smem *smem);
+int msm_smem_unmap_dma_buf(struct msm_vidc_inst *inst, struct msm_smem *smem);
+void *msm_smem_get_dma_buf(int fd);
+void msm_smem_put_dma_buf(void *dma_buf);
+void *msm_smem_get_handle(struct smem_client *client, void *dma_buf);
+void msm_smem_put_handle(struct smem_client *client, void *handle);
+int msm_smem_cache_operations(struct smem_client *client,
+ void *handle, unsigned long offset, unsigned long size,
+ enum smem_cache_ops cache_op);
void msm_vidc_fw_unload_handler(struct work_struct *work);
-bool msm_smem_compare_buffers(void *clt, int fd, void *priv);
/*
* XXX: normally should be in msm_vidc.h, but that's meant for public APIs,
* whereas this is private
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
index 62dcc59..f8d8842 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc/venus_hfi.c
@@ -560,7 +560,7 @@
struct vidc_mem_addr *mem, u32 size, u32 align,
u32 flags, u32 usage)
{
- struct msm_smem *alloc = NULL;
+ struct msm_smem *alloc = &mem->mem_data;
int rc = 0;
if (!dev || !dev->hal_client || !mem || !size) {
@@ -569,8 +569,9 @@
}
dprintk(VIDC_INFO, "start to alloc size: %d, flags: %d\n", size, flags);
- alloc = msm_smem_alloc(dev->hal_client, size, align, flags, usage, 1);
- if (!alloc) {
+ rc = msm_smem_alloc(dev->hal_client, size, align, flags,
+ usage, 1, alloc);
+ if (rc) {
dprintk(VIDC_ERR, "Alloc failed\n");
rc = -ENOMEM;
goto fail_smem_alloc;
@@ -578,17 +579,16 @@
dprintk(VIDC_DBG, "__smem_alloc: ptr = %pK, size = %d\n",
alloc->kvaddr, size);
- rc = msm_smem_cache_operations(dev->hal_client, alloc,
- SMEM_CACHE_CLEAN);
+ rc = msm_smem_cache_operations(dev->hal_client, alloc->handle, 0,
+ alloc->size, SMEM_CACHE_CLEAN);
if (rc) {
dprintk(VIDC_WARN, "Failed to clean cache\n");
- dprintk(VIDC_WARN, "This may result in undefined behavior\n");
}
mem->mem_size = alloc->size;
- mem->mem_data = alloc;
mem->align_virtual_addr = alloc->kvaddr;
mem->align_device_addr = alloc->device_addr;
+
return rc;
fail_smem_alloc:
return rc;
@@ -1312,7 +1312,7 @@
unsigned long mem_map_table_base_addr;
struct context_bank_info *cb;
- if (device->qdss.mem_data) {
+ if (device->qdss.align_virtual_addr) {
qdss = (struct hfi_mem_map_table *)
device->qdss.align_virtual_addr;
qdss->mem_map_num_entries = num_entries;
@@ -1338,32 +1338,27 @@
mem_map[i].size);
}
- __smem_free(device, device->qdss.mem_data);
+ __smem_free(device, &device->qdss.mem_data);
}
- __smem_free(device, device->iface_q_table.mem_data);
- __smem_free(device, device->sfr.mem_data);
+ __smem_free(device, &device->iface_q_table.mem_data);
+ __smem_free(device, &device->sfr.mem_data);
for (i = 0; i < VIDC_IFACEQ_NUMQ; i++) {
device->iface_queues[i].q_hdr = NULL;
- device->iface_queues[i].q_array.mem_data = NULL;
device->iface_queues[i].q_array.align_virtual_addr = NULL;
device->iface_queues[i].q_array.align_device_addr = 0;
}
- device->iface_q_table.mem_data = NULL;
device->iface_q_table.align_virtual_addr = NULL;
device->iface_q_table.align_device_addr = 0;
- device->qdss.mem_data = NULL;
device->qdss.align_virtual_addr = NULL;
device->qdss.align_device_addr = 0;
- device->sfr.mem_data = NULL;
device->sfr.align_virtual_addr = NULL;
device->sfr.align_device_addr = 0;
- device->mem_addr.mem_data = NULL;
device->mem_addr.align_virtual_addr = NULL;
device->mem_addr.align_device_addr = 0;
@@ -1452,7 +1447,6 @@
struct vidc_mem_addr *mem_addr;
int offset = 0;
int num_entries = dev->res->qdss_addr_set.count;
- u32 value = 0;
phys_addr_t fw_bias = 0;
size_t q_size;
unsigned long mem_map_table_base_addr;
@@ -1483,7 +1477,6 @@
iface_q->q_array.align_virtual_addr =
mem_addr->align_virtual_addr + offset;
iface_q->q_array.mem_size = VIDC_IFACEQ_QUEUE_SIZE;
- iface_q->q_array.mem_data = NULL;
offset += iface_q->q_array.mem_size;
iface_q->q_hdr = VIDC_IFACEQ_GET_QHDR_START_ADDR(
dev->iface_q_table.align_virtual_addr, i);
@@ -1535,65 +1528,34 @@
iface_q = &dev->iface_queues[VIDC_IFACEQ_CMDQ_IDX];
q_hdr = iface_q->q_hdr;
- q_hdr->qhdr_start_addr = (u32)iface_q->q_array.align_device_addr;
+ q_hdr->qhdr_start_addr = iface_q->q_array.align_device_addr;
q_hdr->qhdr_type |= HFI_Q_ID_HOST_TO_CTRL_CMD_Q;
- if ((ion_phys_addr_t)q_hdr->qhdr_start_addr !=
- iface_q->q_array.align_device_addr) {
- dprintk(VIDC_ERR, "Invalid CMDQ device address (%pa)",
- &iface_q->q_array.align_device_addr);
- }
iface_q = &dev->iface_queues[VIDC_IFACEQ_MSGQ_IDX];
q_hdr = iface_q->q_hdr;
- q_hdr->qhdr_start_addr = (u32)iface_q->q_array.align_device_addr;
+ q_hdr->qhdr_start_addr = iface_q->q_array.align_device_addr;
q_hdr->qhdr_type |= HFI_Q_ID_CTRL_TO_HOST_MSG_Q;
- if ((ion_phys_addr_t)q_hdr->qhdr_start_addr !=
- iface_q->q_array.align_device_addr) {
- dprintk(VIDC_ERR, "Invalid MSGQ device address (%pa)",
- &iface_q->q_array.align_device_addr);
- }
iface_q = &dev->iface_queues[VIDC_IFACEQ_DBGQ_IDX];
q_hdr = iface_q->q_hdr;
- q_hdr->qhdr_start_addr = (u32)iface_q->q_array.align_device_addr;
+ q_hdr->qhdr_start_addr = iface_q->q_array.align_device_addr;
q_hdr->qhdr_type |= HFI_Q_ID_CTRL_TO_HOST_DEBUG_Q;
/*
* Set receive request to zero on debug queue as there is no
* need of interrupt from video hardware for debug messages
*/
q_hdr->qhdr_rx_req = 0;
- if ((ion_phys_addr_t)q_hdr->qhdr_start_addr !=
- iface_q->q_array.align_device_addr) {
- dprintk(VIDC_ERR, "Invalid DBGQ device address (%pa)",
- &iface_q->q_array.align_device_addr);
- }
- value = (u32)dev->iface_q_table.align_device_addr;
- if ((ion_phys_addr_t)value !=
- dev->iface_q_table.align_device_addr) {
- dprintk(VIDC_ERR,
- "Invalid iface_q_table device address (%pa)",
- &dev->iface_q_table.align_device_addr);
- }
-
- if (dev->qdss.mem_data) {
+ if (dev->qdss.align_virtual_addr) {
qdss = (struct hfi_mem_map_table *)dev->qdss.align_virtual_addr;
qdss->mem_map_num_entries = num_entries;
mem_map_table_base_addr = dev->qdss.align_device_addr +
sizeof(struct hfi_mem_map_table);
- qdss->mem_map_table_base_addr =
- (u32)mem_map_table_base_addr;
- if ((ion_phys_addr_t)qdss->mem_map_table_base_addr !=
- mem_map_table_base_addr) {
- dprintk(VIDC_ERR,
- "Invalid mem_map_table_base_addr (%#lx)",
- mem_map_table_base_addr);
- }
+ qdss->mem_map_table_base_addr = mem_map_table_base_addr;
mem_map = (struct hfi_mem_map *)(qdss + 1);
cb = msm_smem_get_context_bank(dev->hal_client, false,
HAL_BUFFER_INTERNAL_CMD_QUEUE);
-
if (!cb) {
dprintk(VIDC_ERR,
"%s: failed to get context bank\n", __func__);
@@ -1604,28 +1566,14 @@
if (rc) {
dprintk(VIDC_ERR,
"IOMMU mapping failed, Freeing qdss memdata\n");
- __smem_free(dev, dev->qdss.mem_data);
- dev->qdss.mem_data = NULL;
+ __smem_free(dev, &dev->qdss.mem_data);
dev->qdss.align_virtual_addr = NULL;
dev->qdss.align_device_addr = 0;
}
-
- value = (u32)dev->qdss.align_device_addr;
- if ((ion_phys_addr_t)value !=
- dev->qdss.align_device_addr) {
- dprintk(VIDC_ERR, "Invalid qdss device address (%pa)",
- &dev->qdss.align_device_addr);
- }
}
vsfr = (struct hfi_sfr_struct *) dev->sfr.align_virtual_addr;
vsfr->bufSize = ALIGNED_SFR_SIZE;
- value = (u32)dev->sfr.align_device_addr;
- if ((ion_phys_addr_t)value !=
- dev->sfr.align_device_addr) {
- dprintk(VIDC_ERR, "Invalid sfr device address (%pa)",
- &dev->sfr.align_device_addr);
- }
__setup_ucregion_memory_map(dev);
return 0;
@@ -1911,7 +1859,6 @@
__write_register(device, VIDC_CPU_CS_A2HSOFTINTCLR, 1);
__write_register(device, VIDC_WRAPPER_INTR_CLEAR, intr_status);
- dprintk(VIDC_DBG, "Cleared WRAPPER/A2H interrupt\n");
}
static int venus_hfi_core_ping(void *device)
@@ -3088,7 +3035,7 @@
mutex_lock(&device->lock);
- dprintk(VIDC_INFO, "Handling interrupt\n");
+ dprintk(VIDC_DBG, "Handling interrupt\n");
if (!__core_in_valid_state(device)) {
dprintk(VIDC_DBG, "%s - Core not in init state\n", __func__);
@@ -3123,7 +3070,8 @@
for (i = 0; !IS_ERR_OR_NULL(device->response_pkt) &&
i < num_responses; ++i) {
struct msm_vidc_cb_info *r = &device->response_pkt[i];
-
+ dprintk(VIDC_DBG, "Processing response %d of %d, type %d\n",
+ (i + 1), num_responses, r->response_type);
device->callback(r->response_type, &r->response);
}
@@ -3131,6 +3079,7 @@
if (!(intr_status & VIDC_WRAPPER_INTR_STATUS_A2HWD_BMSK))
enable_irq(device->hal_data->irq);
+ dprintk(VIDC_DBG, "Handling interrupt done\n");
/*
* XXX: Don't add any code beyond here. Reacquiring locks after release
* it above doesn't guarantee the atomicity that we're aiming for.
@@ -3991,7 +3940,6 @@
dprintk(VIDC_ERR, "Invalid params: %pK\n", device);
return -EINVAL;
} else if (device->power_enabled) {
- dprintk(VIDC_DBG, "Power is already enabled\n");
goto exit;
} else if (!__core_in_valid_state(device)) {
dprintk(VIDC_DBG, "venus_hfi_device in deinit state.");
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.h b/drivers/media/platform/msm/vidc/venus_hfi.h
index 925918c..4c4cb06 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.h
+++ b/drivers/media/platform/msm/vidc/venus_hfi.h
@@ -126,10 +126,10 @@
};
struct vidc_mem_addr {
- ion_phys_addr_t align_device_addr;
+ u32 align_device_addr;
u8 *align_virtual_addr;
u32 mem_size;
- struct msm_smem *mem_data;
+ struct msm_smem mem_data;
};
struct vidc_iface_q_info {
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
index 695c563..47ce0ba 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
@@ -17,6 +17,8 @@
#include <linux/log2.h>
#include <linux/platform_device.h>
#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/hash.h>
#include <media/msm_vidc.h>
#include "msm_vidc_resources.h"
@@ -882,8 +884,8 @@
enum hal_buffer buffer_type;
u32 buffer_size;
u32 num_buffers;
- ion_phys_addr_t align_device_addr;
- ion_phys_addr_t extradata_addr;
+ u32 align_device_addr;
+ u32 extradata_addr;
u32 extradata_size;
u32 response_required;
};
@@ -910,8 +912,8 @@
struct vidc_frame_data {
enum hal_buffer buffer_type;
- ion_phys_addr_t device_addr;
- ion_phys_addr_t extradata_addr;
+ u32 device_addr;
+ u32 extradata_addr;
int64_t timestamp;
u32 flags;
u32 offset;
@@ -1111,8 +1113,8 @@
u32 filled_len;
enum hal_picture picture_type;
struct recon_stats_type recon_stats;
- ion_phys_addr_t packet_buffer;
- ion_phys_addr_t extra_data_buffer;
+ u32 packet_buffer;
+ u32 extra_data_buffer;
};
struct vidc_hal_fbd {
@@ -1134,18 +1136,18 @@
u32 input_tag;
u32 input_tag1;
enum hal_picture picture_type;
- ion_phys_addr_t packet_buffer1;
- ion_phys_addr_t extra_data_buffer;
+ u32 packet_buffer1;
+ u32 extra_data_buffer;
u32 flags2;
u32 alloc_len2;
u32 filled_len2;
u32 offset2;
- ion_phys_addr_t packet_buffer2;
+ u32 packet_buffer2;
u32 flags3;
u32 alloc_len3;
u32 filled_len3;
u32 offset3;
- ion_phys_addr_t packet_buffer3;
+ u32 packet_buffer3;
enum hal_buffer buffer_type;
};
@@ -1247,8 +1249,8 @@
u32 width;
enum msm_vidc_pixel_depth bit_depth;
u32 hal_event_type;
- ion_phys_addr_t packet_buffer;
- ion_phys_addr_t extra_data_buffer;
+ u32 packet_buffer;
+ u32 extra_data_buffer;
u32 pic_struct;
u32 colour_space;
u32 profile;
diff --git a/drivers/media/v4l2-core/videobuf2-v4l2.c b/drivers/media/v4l2-core/videobuf2-v4l2.c
index 52ef883..a29ddca 100644
--- a/drivers/media/v4l2-core/videobuf2-v4l2.c
+++ b/drivers/media/v4l2-core/videobuf2-v4l2.c
@@ -330,6 +330,8 @@
b->m.planes[plane].m.userptr;
planes[plane].length =
b->m.planes[plane].length;
+ planes[plane].data_offset =
+ b->m.planes[plane].data_offset;
}
}
if (b->memory == VB2_MEMORY_DMABUF) {
@@ -338,6 +340,8 @@
b->m.planes[plane].m.fd;
planes[plane].length =
b->m.planes[plane].length;
+ planes[plane].data_offset =
+ b->m.planes[plane].data_offset;
}
}
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 9ac6568..d8e9599 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -1718,8 +1718,6 @@
/* We couldn't get a response from the card. Give up. */
if (err) {
- if (card->err_in_sdr104)
- return ERR_RETRY;
/* Check if the card is removed */
if (mmc_detect_card_removed(card->host))
return ERR_NOMEDIUM;
@@ -2210,8 +2208,7 @@
brq->data.error == -ETIMEDOUT ||
brq->cmd.error == -EILSEQ ||
brq->cmd.error == -EIO ||
- brq->cmd.error == -ETIMEDOUT ||
- brq->sbc.error))
+ brq->cmd.error == -ETIMEDOUT))
card->err_in_sdr104 = true;
/*
diff --git a/drivers/mmc/core/bus.h b/drivers/mmc/core/bus.h
index 00a1971..3f3f24b 100644
--- a/drivers/mmc/core/bus.h
+++ b/drivers/mmc/core/bus.h
@@ -15,7 +15,7 @@
static ssize_t mmc_##name##_show (struct device *dev, struct device_attribute *attr, char *buf) \
{ \
struct mmc_card *card = mmc_dev_to_card(dev); \
- return sprintf(buf, fmt, args); \
+ return snprintf(buf, PAGE_SIZE, fmt, args); \
} \
static DEVICE_ATTR(name, S_IRUGO, mmc_##name##_show, NULL)
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 545e26e..e75444a 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -456,22 +456,6 @@
}
EXPORT_SYMBOL(mmc_clk_update_freq);
-void mmc_recovery_fallback_lower_speed(struct mmc_host *host)
-{
- if (!host->card)
- return;
-
- if (host->sdr104_wa && mmc_card_sd(host->card) &&
- (host->ios.timing == MMC_TIMING_UHS_SDR104) &&
- !host->card->sdr104_blocked) {
- pr_err("%s: %s: blocked SDR104, lower the bus-speed (SDR50 / DDR50)\n",
- mmc_hostname(host), __func__);
- mmc_host_clear_sdr104(host);
- mmc_hw_reset(host);
- host->card->sdr104_blocked = true;
- }
-}
-
static int mmc_devfreq_set_target(struct device *dev,
unsigned long *freq, u32 devfreq_flags)
{
@@ -523,9 +507,6 @@
if (abort)
goto out;
- if (mmc_card_sd(host->card) && host->card->sdr104_blocked)
- goto rel_host;
-
/*
* In case we were able to claim host there is no need to
* defer the frequency change. It will be done now
@@ -534,18 +515,15 @@
mmc_host_clk_hold(host);
err = mmc_clk_update_freq(host, *freq, clk_scaling->state);
- if (err && err != -EAGAIN) {
+ if (err && err != -EAGAIN)
pr_err("%s: clock scale to %lu failed with error %d\n",
mmc_hostname(host), *freq, err);
- mmc_recovery_fallback_lower_speed(host);
- } else {
+ else
pr_debug("%s: clock change to %lu finished successfully (%s)\n",
mmc_hostname(host), *freq, current->comm);
- }
mmc_host_clk_release(host);
-rel_host:
mmc_release_host(host);
out:
return err;
@@ -566,9 +544,6 @@
if (!host->clk_scaling.enable)
return;
- if (mmc_card_sd(host->card) && host->card->sdr104_blocked)
- return;
-
spin_lock_bh(&host->clk_scaling.lock);
if (host->clk_scaling.clk_scaling_in_progress ||
@@ -589,15 +564,13 @@
err = mmc_clk_update_freq(host, target_freq,
host->clk_scaling.state);
- if (err && err != -EAGAIN) {
+ if (err && err != -EAGAIN)
pr_err("%s: failed on deferred scale clocks (%d)\n",
mmc_hostname(host), err);
- mmc_recovery_fallback_lower_speed(host);
- } else {
+ else
pr_debug("%s: clocks were successfully scaled to %lu (%s)\n",
mmc_hostname(host),
target_freq, current->comm);
- }
host->clk_scaling.clk_scaling_in_progress = false;
atomic_dec(&host->clk_scaling.devfreq_abort);
}
@@ -1598,13 +1571,8 @@
}
}
if (!cmd->error || !cmd->retries ||
- mmc_card_removed(host->card)) {
- if (cmd->error && !cmd->retries &&
- cmd->opcode != MMC_SEND_STATUS &&
- cmd->opcode != MMC_SEND_TUNING_BLOCK)
- mmc_recovery_fallback_lower_speed(host);
+ mmc_card_removed(host->card))
break;
- }
mmc_retune_recheck(host);
@@ -4291,18 +4259,12 @@
}
if (ret) {
- if (host->ops->get_cd && host->ops->get_cd(host)) {
- mmc_recovery_fallback_lower_speed(host);
- ret = 0;
- } else {
- mmc_card_set_removed(host->card);
- if (host->card->sdr104_blocked) {
- mmc_host_set_sdr104(host);
- host->card->sdr104_blocked = false;
- }
- pr_debug("%s: card remove detected\n",
- mmc_hostname(host));
+ mmc_card_set_removed(host->card);
+ if (host->card->sdr104_blocked) {
+ mmc_host_set_sdr104(host);
+ host->card->sdr104_blocked = false;
}
+ pr_debug("%s: card remove detected\n", mmc_hostname(host));
}
return ret;
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index 58329d2..e0f0c06 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -50,6 +50,19 @@
say M here and read <file:Documentation/kbuild/modules.txt>.
The module will be called ms02-nv.
+config MTD_MSM_QPIC_NAND
+ tristate "MSM QPIC NAND Device Support"
+ depends on MTD && (ARCH_QCOM || ARCH_MSM) && !MTD_MSM_NAND
+ select CRC16
+ select BITREVERSE
+ select MTD_NAND_IDS
+ default n
+ help
+ Support for NAND controller in Qualcomm Technologies, Inc.
+ Parallel Interface controller (QPIC). This new controller
+ supports BAM mode and BCH error correction mechanism. Based on the
+ device capabilities either 4 bit or 8 bit BCH ECC will be used.
+
config MTD_DATAFLASH
tristate "Support for AT45xxx DataFlash"
depends on SPI_MASTER
diff --git a/drivers/mtd/devices/Makefile b/drivers/mtd/devices/Makefile
index 7912d3a..1abde5d 100644
--- a/drivers/mtd/devices/Makefile
+++ b/drivers/mtd/devices/Makefile
@@ -10,6 +10,7 @@
obj-$(CONFIG_MTD_MTDRAM) += mtdram.o
obj-$(CONFIG_MTD_LART) += lart.o
obj-$(CONFIG_MTD_BLOCK2MTD) += block2mtd.o
+obj-$(CONFIG_MTD_MSM_QPIC_NAND) += msm_qpic_nand.o
obj-$(CONFIG_MTD_DATAFLASH) += mtd_dataflash.o
obj-$(CONFIG_MTD_M25P80) += m25p80.o
obj-$(CONFIG_MTD_SPEAR_SMI) += spear_smi.o
diff --git a/drivers/mtd/devices/msm_qpic_nand.c b/drivers/mtd/devices/msm_qpic_nand.c
new file mode 100644
index 0000000..44b56b6
--- /dev/null
+++ b/drivers/mtd/devices/msm_qpic_nand.c
@@ -0,0 +1,3594 @@
+/*
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "msm_qpic_nand.h"
+
+#define QPIC_BAM_DEFAULT_IPC_LOGLVL 2
+
+/* The driver supports devices upto 4K page */
+#define MAX_CW_PER_PAGE 8
+/*
+ * Max descriptors needed for erase, read, write operations.
+ * Usually, this is (2 * MAX_CW_PER_PAGE).
+ */
+#define MAX_DESC 16
+
+static bool enable_euclean;
+
+/*
+ * Get the DMA memory for requested amount of size. It returns the pointer
+ * to free memory available from the allocated pool. Returns NULL if there
+ * is no free memory.
+ */
+static void *msm_nand_get_dma_buffer(struct msm_nand_chip *chip, size_t size)
+{
+ uint32_t bitmask, free_bitmask, old_bitmask;
+ uint32_t need_mask, current_need_mask;
+ int free_index;
+
+ need_mask = (1UL << DIV_ROUND_UP(size, MSM_NAND_DMA_BUFFER_SLOT_SZ))
+ - 1;
+ bitmask = atomic_read(&chip->dma_buffer_busy);
+ free_bitmask = ~bitmask;
+ if (free_bitmask == 0)
+ return NULL;
+
+ do {
+ free_index = __ffs(free_bitmask);
+ current_need_mask = need_mask << free_index;
+
+ if (size + free_index * MSM_NAND_DMA_BUFFER_SLOT_SZ >=
+ MSM_NAND_DMA_BUFFER_SIZE)
+ return NULL;
+
+ if ((bitmask & current_need_mask) == 0) {
+ old_bitmask =
+ atomic_cmpxchg(&chip->dma_buffer_busy,
+ bitmask,
+ bitmask | current_need_mask);
+ if (old_bitmask == bitmask)
+ return chip->dma_virt_addr +
+ free_index * MSM_NAND_DMA_BUFFER_SLOT_SZ;
+ free_bitmask = 0;/* force return */
+ }
+ /* current free range was too small, clear all free bits */
+ /* below the top busy bit within current_need_mask */
+ free_bitmask &=
+ ~(~0U >> (32 - fls(bitmask & current_need_mask)));
+ } while (free_bitmask);
+
+ return NULL;
+}
+
+/*
+ * Releases the DMA memory used to the free pool and also wakes up any user
+ * thread waiting on wait queue for free memory to be available.
+ */
+static void msm_nand_release_dma_buffer(struct msm_nand_chip *chip,
+ void *buffer, size_t size)
+{
+ int index;
+ uint32_t used_mask;
+
+ used_mask = (1UL << DIV_ROUND_UP(size, MSM_NAND_DMA_BUFFER_SLOT_SZ))
+ - 1;
+ index = ((uint8_t *)buffer - chip->dma_virt_addr) /
+ MSM_NAND_DMA_BUFFER_SLOT_SZ;
+ atomic_sub(used_mask << index, &chip->dma_buffer_busy);
+
+ wake_up(&chip->dma_wait_queue);
+}
+
+/*
+ * Calculates page address of the buffer passed, offset of buffer within
+ * that page and then maps it for DMA by calling dma_map_page().
+ */
+static dma_addr_t msm_nand_dma_map(struct device *dev, void *addr, size_t size,
+ enum dma_data_direction dir)
+{
+ struct page *page;
+ unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
+
+ if (virt_addr_valid(addr))
+ page = virt_to_page(addr);
+ else {
+ if (WARN_ON(size + offset > PAGE_SIZE))
+ return ~0;
+ page = vmalloc_to_page(addr);
+ }
+ return dma_map_page(dev, page, offset, size, dir);
+}
+
+#ifdef CONFIG_MSM_BUS_SCALING
+static int msm_nand_bus_set_vote(struct msm_nand_info *info,
+ unsigned int vote)
+{
+ int ret = 0;
+
+ ret = msm_bus_scale_client_update_request(info->clk_data.client_handle,
+ vote);
+ if (ret)
+ pr_err("msm_bus_scale_client_update_request() failed, bus_client_handle=0x%x, vote=%d, err=%d\n",
+ info->clk_data.client_handle, vote, ret);
+ return ret;
+}
+
+static int msm_nand_setup_clocks_and_bus_bw(struct msm_nand_info *info,
+ bool vote)
+{
+ int ret = 0;
+
+ if (IS_ERR_OR_NULL(info->clk_data.qpic_clk)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (atomic_read(&info->clk_data.clk_enabled) == vote)
+ goto out;
+ if (!atomic_read(&info->clk_data.clk_enabled) && vote) {
+ ret = msm_nand_bus_set_vote(info, 1);
+ if (ret) {
+ pr_err("Failed to vote for bus with %d\n", ret);
+ goto out;
+ }
+ ret = clk_prepare_enable(info->clk_data.qpic_clk);
+ if (ret) {
+ pr_err("Failed to enable the bus-clock with error %d\n",
+ ret);
+ msm_nand_bus_set_vote(info, 0);
+ goto out;
+ }
+ } else if (atomic_read(&info->clk_data.clk_enabled) && !vote) {
+ clk_disable_unprepare(info->clk_data.qpic_clk);
+ msm_nand_bus_set_vote(info, 0);
+ }
+ atomic_set(&info->clk_data.clk_enabled, vote);
+out:
+ return ret;
+}
+#else
+static int msm_nand_setup_clocks_and_bus_bw(struct msm_nand_info *info,
+ bool vote)
+{
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM
+static int msm_nand_runtime_suspend(struct device *dev)
+{
+ int ret = 0;
+ struct msm_nand_info *info = dev_get_drvdata(dev);
+
+ ret = msm_nand_setup_clocks_and_bus_bw(info, false);
+
+ return ret;
+}
+
+static int msm_nand_runtime_resume(struct device *dev)
+{
+ int ret = 0;
+ struct msm_nand_info *info = dev_get_drvdata(dev);
+
+ ret = msm_nand_setup_clocks_and_bus_bw(info, true);
+
+ return ret;
+}
+
+static void msm_nand_print_rpm_info(struct device *dev)
+{
+ pr_err("RPM: runtime_status=%d, usage_count=%d, is_suspended=%d, disable_depth=%d, runtime_error=%d, request_pending=%d, request=%d\n",
+ dev->power.runtime_status, atomic_read(&dev->power.usage_count),
+ dev->power.is_suspended, dev->power.disable_depth,
+ dev->power.runtime_error, dev->power.request_pending,
+ dev->power.request);
+}
+#else
+static int msm_nand_runtime_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int msm_nand_runtime_resume(struct device *dev)
+{
+ return 0;
+}
+
+static void msm_nand_print_rpm_info(struct device *dev)
+{
+}
+#endif
+
+#ifdef CONFIG_PM
+static int msm_nand_suspend(struct device *dev)
+{
+ int ret = 0;
+
+ if (!pm_runtime_suspended(dev))
+ ret = msm_nand_runtime_suspend(dev);
+
+ return ret;
+}
+
+static int msm_nand_resume(struct device *dev)
+{
+ int ret = 0;
+
+ if (!pm_runtime_suspended(dev))
+ ret = msm_nand_runtime_resume(dev);
+
+ return ret;
+}
+#else
+static int msm_nand_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int msm_nand_resume(struct device *dev)
+{
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM
+static int msm_nand_get_device(struct device *dev)
+{
+ int ret = 0;
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ pr_err("Failed to resume with %d\n", ret);
+ msm_nand_print_rpm_info(dev);
+ } else { /* Reset to success */
+ ret = 0;
+ }
+ return ret;
+}
+
+static int msm_nand_put_device(struct device *dev)
+{
+ int ret = 0;
+
+ pm_runtime_mark_last_busy(dev);
+ ret = pm_runtime_put_autosuspend(dev);
+ if (ret < 0) {
+ pr_err("Failed to suspend with %d\n", ret);
+ msm_nand_print_rpm_info(dev);
+ } else { /* Reset to success */
+ ret = 0;
+ }
+ return ret;
+}
+#else
+static int msm_nand_get_device(struct device *dev)
+{
+ return 0;
+}
+
+static int msm_nand_put_device(struct device *dev)
+{
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_MSM_BUS_SCALING
+static int msm_nand_bus_register(struct platform_device *pdev,
+ struct msm_nand_info *info)
+{
+ int ret = 0;
+
+ info->clk_data.use_cases = msm_bus_cl_get_pdata(pdev);
+ if (!info->clk_data.use_cases) {
+ ret = -EINVAL;
+ pr_err("msm_bus_cl_get_pdata failed\n");
+ goto out;
+ }
+ info->clk_data.client_handle =
+ msm_bus_scale_register_client(info->clk_data.use_cases);
+ if (!info->clk_data.client_handle) {
+ ret = -EINVAL;
+ pr_err("msm_bus_scale_register_client failed\n");
+ }
+out:
+ return ret;
+}
+
+static void msm_nand_bus_unregister(struct msm_nand_info *info)
+{
+ if (info->clk_data.client_handle)
+ msm_bus_scale_unregister_client(info->clk_data.client_handle);
+}
+#else
+static int msm_nand_bus_register(struct platform_device *pdev,
+ struct msm_nand_info *info)
+{
+ return 0;
+}
+
+static void msm_nand_bus_unregister(struct msm_nand_info *info)
+{
+}
+#endif
+
+/*
+ * Wrapper function to prepare a single SPS command element with the data
+ * that is passed to this function.
+ */
+static inline void msm_nand_prep_ce(struct sps_command_element *ce,
+ uint32_t addr, uint32_t command, uint32_t data)
+{
+ ce->addr = addr;
+ ce->command = (command & WRITE) ? (uint32_t) SPS_WRITE_COMMAND :
+ (uint32_t) SPS_READ_COMMAND;
+ ce->data = data;
+ ce->mask = 0xFFFFFFFF;
+}
+
+static int msm_nand_sps_get_iovec(struct sps_pipe *pipe, uint32_t indx,
+ unsigned int cnt, struct sps_iovec *iovec)
+{
+ int ret = 0;
+
+ do {
+ do {
+ ret = sps_get_iovec((pipe), (iovec));
+ } while (((iovec)->addr == 0x0) && ((iovec)->size == 0x0));
+ if (ret)
+ return ret;
+ } while (--(cnt));
+ return ret;
+}
+
+/*
+ * Wrapper function to prepare a single command descriptor with a single
+ * SPS command element with the data that is passed to this function.
+ *
+ * Since for any command element it is a must to have this flag
+ * SPS_IOVEC_FLAG_CMD, this function by default updates this flag for a
+ * command element that is passed and thus, the caller need not explicilty
+ * pass this flag. The other flags must be passed based on the need. If a
+ * command element doesn't have any other flag, then 0 can be passed to flags.
+ */
+static inline void msm_nand_prep_single_desc(struct msm_nand_sps_cmd *sps_cmd,
+ uint32_t addr, uint32_t command,
+ uint32_t data, uint32_t flags)
+{
+ msm_nand_prep_ce(&sps_cmd->ce, addr, command, data);
+ sps_cmd->flags = SPS_IOVEC_FLAG_CMD | flags;
+}
+/*
+ * Read a single NANDc register as mentioned by its parameter addr. The return
+ * value indicates whether read is successful or not. The register value read
+ * is stored in val.
+ */
+static int msm_nand_flash_rd_reg(struct msm_nand_info *info, uint32_t addr,
+ uint32_t *val)
+{
+ int ret = 0, submitted_num_desc = 1;
+ struct msm_nand_sps_cmd *cmd;
+ struct msm_nand_chip *chip = &info->nand_chip;
+ struct {
+ struct msm_nand_sps_cmd cmd;
+ uint32_t data;
+ } *dma_buffer;
+ struct sps_iovec iovec_temp;
+
+ wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer(
+ chip, sizeof(*dma_buffer))));
+ cmd = &dma_buffer->cmd;
+ msm_nand_prep_single_desc(cmd, addr, READ, msm_virt_to_dma(chip,
+ &dma_buffer->data), SPS_IOVEC_FLAG_INT);
+
+ mutex_lock(&info->lock);
+ ret = msm_nand_get_device(chip->dev);
+ if (ret)
+ goto out;
+ ret = sps_transfer_one(info->sps.cmd_pipe.handle,
+ msm_virt_to_dma(chip, &cmd->ce),
+ sizeof(struct sps_command_element), NULL, cmd->flags);
+ if (ret) {
+ pr_err("failed to submit command %x ret %d\n", addr, ret);
+ msm_nand_put_device(chip->dev);
+ goto out;
+ }
+ ret = msm_nand_sps_get_iovec(info->sps.cmd_pipe.handle,
+ info->sps.cmd_pipe.index, submitted_num_desc,
+ &iovec_temp);
+ if (ret) {
+ pr_err("Failed to get iovec for pipe %d: (ret%d)\n",
+ (info->sps.cmd_pipe.index), ret);
+ goto out;
+ }
+ ret = msm_nand_put_device(chip->dev);
+ if (ret)
+ goto out;
+ *val = dma_buffer->data;
+out:
+ mutex_unlock(&info->lock);
+ msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
+ return ret;
+}
+
+/*
+ * Read the Flash ID from the Nand Flash Device. The return value < 0
+ * indicates failure. When successful, the Flash ID is stored in parameter
+ * read_id.
+ */
+#define READID_CMDS 5
+static int msm_nand_flash_read_id(struct msm_nand_info *info,
+ bool read_onfi_signature, uint32_t *read_id,
+ uint32_t *read_id2)
+{
+ int err = 0, i = 0;
+ struct msm_nand_sps_cmd *cmd;
+ struct sps_iovec *iovec;
+ struct sps_iovec iovec_temp;
+ struct msm_nand_chip *chip = &info->nand_chip;
+ /*
+ * The following 5 commands are required to read id -
+ * write commands - addr0, flash, exec
+ * read_commands - read_id, read_id2
+ */
+ struct {
+ struct sps_transfer xfer;
+ struct sps_iovec cmd_iovec[READID_CMDS];
+ struct msm_nand_sps_cmd cmd[READID_CMDS];
+ uint32_t data[READID_CMDS];
+ } *dma_buffer;
+
+ wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer
+ (chip, sizeof(*dma_buffer))));
+ if (read_onfi_signature)
+ dma_buffer->data[0] = FLASH_READ_ONFI_SIGNATURE_ADDRESS;
+ else
+ dma_buffer->data[0] = FLASH_READ_DEVICE_ID_ADDRESS;
+
+ dma_buffer->data[1] = EXTENDED_FETCH_ID | MSM_NAND_CMD_FETCH_ID;
+ dma_buffer->data[2] = 1;
+ dma_buffer->data[3] = 0xeeeeeeee;
+ dma_buffer->data[4] = 0xeeeeeeee;
+
+ cmd = dma_buffer->cmd;
+ msm_nand_prep_single_desc(cmd, MSM_NAND_ADDR0(info), WRITE,
+ dma_buffer->data[0], SPS_IOVEC_FLAG_LOCK);
+ cmd++;
+
+ msm_nand_prep_single_desc(cmd, MSM_NAND_FLASH_CMD(info), WRITE,
+ dma_buffer->data[1], 0);
+ cmd++;
+
+ msm_nand_prep_single_desc(cmd, MSM_NAND_EXEC_CMD(info), WRITE,
+ dma_buffer->data[2], SPS_IOVEC_FLAG_NWD);
+ cmd++;
+
+ msm_nand_prep_single_desc(cmd, MSM_NAND_READ_ID(info), READ,
+ msm_virt_to_dma(chip, &dma_buffer->data[3]), 0);
+ cmd++;
+
+ msm_nand_prep_single_desc(cmd, MSM_NAND_READ_ID2(info), READ,
+ msm_virt_to_dma(chip, &dma_buffer->data[4]),
+ SPS_IOVEC_FLAG_UNLOCK | SPS_IOVEC_FLAG_INT);
+ cmd++;
+
+ WARN_ON(cmd - dma_buffer->cmd > READID_CMDS);
+ dma_buffer->xfer.iovec_count = (cmd - dma_buffer->cmd);
+ dma_buffer->xfer.iovec = dma_buffer->cmd_iovec;
+ dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip,
+ &dma_buffer->cmd_iovec);
+ iovec = dma_buffer->xfer.iovec;
+
+ for (i = 0; i < dma_buffer->xfer.iovec_count; i++) {
+ iovec->addr = msm_virt_to_dma(chip, &dma_buffer->cmd[i].ce);
+ iovec->size = sizeof(struct sps_command_element);
+ iovec->flags = dma_buffer->cmd[i].flags;
+ iovec++;
+ }
+
+ mutex_lock(&info->lock);
+ err = msm_nand_get_device(chip->dev);
+ if (err)
+ goto out;
+ err = sps_transfer(info->sps.cmd_pipe.handle, &dma_buffer->xfer);
+ if (err) {
+ pr_err("Failed to submit commands %d\n", err);
+ msm_nand_put_device(chip->dev);
+ goto out;
+ }
+ err = msm_nand_sps_get_iovec(info->sps.cmd_pipe.handle,
+ info->sps.cmd_pipe.index, dma_buffer->xfer.iovec_count,
+ &iovec_temp);
+
+ if (err) {
+ pr_err("Failed to get iovec for pipe %d: (err:%d)\n",
+ (info->sps.cmd_pipe.index), err);
+ goto out;
+ }
+ pr_debug("Read ID register value 0x%x\n", dma_buffer->data[3]);
+ if (!read_onfi_signature)
+ pr_debug("nandid: %x maker %02x device %02x\n",
+ dma_buffer->data[3], dma_buffer->data[3] & 0xff,
+ (dma_buffer->data[3] >> 8) & 0xff);
+ *read_id = dma_buffer->data[3];
+ if (read_id2) {
+ pr_debug("Extended Read ID register value 0x%x\n",
+ dma_buffer->data[4]);
+ *read_id2 = dma_buffer->data[4];
+ }
+ err = msm_nand_put_device(chip->dev);
+out:
+ mutex_unlock(&info->lock);
+ msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
+ return err;
+}
+
+/*
+ * Contains data for common configuration registers that must be programmed
+ * for every NANDc operation.
+ */
+struct msm_nand_common_cfgs {
+ uint32_t cmd;
+ uint32_t addr0;
+ uint32_t addr1;
+ uint32_t cfg0;
+ uint32_t cfg1;
+};
+
+/*
+ * Function to prepare SPS command elements to write into NANDc configuration
+ * registers as per the data defined in struct msm_nand_common_cfgs. This is
+ * required for the following NANDc operations - Erase, Bad Block checking
+ * and for reading ONFI parameter page.
+ */
+static void msm_nand_prep_cfg_cmd_desc(struct msm_nand_info *info,
+ struct msm_nand_common_cfgs data,
+ struct msm_nand_sps_cmd **curr_cmd)
+{
+ struct msm_nand_sps_cmd *cmd;
+
+ cmd = *curr_cmd;
+ msm_nand_prep_single_desc(cmd, MSM_NAND_FLASH_CMD(info), WRITE,
+ data.cmd, SPS_IOVEC_FLAG_LOCK);
+ cmd++;
+
+ msm_nand_prep_single_desc(cmd, MSM_NAND_ADDR0(info), WRITE,
+ data.addr0, 0);
+ cmd++;
+
+ msm_nand_prep_single_desc(cmd, MSM_NAND_ADDR1(info), WRITE,
+ data.addr1, 0);
+ cmd++;
+
+ msm_nand_prep_single_desc(cmd, MSM_NAND_DEV0_CFG0(info), WRITE,
+ data.cfg0, 0);
+ cmd++;
+
+ msm_nand_prep_single_desc(cmd, MSM_NAND_DEV0_CFG1(info), WRITE,
+ data.cfg1, 0);
+ cmd++;
+ *curr_cmd = cmd;
+}
+
+/*
+ * Function to check the CRC integrity check on ONFI parameter page read.
+ * For ONFI parameter page read, the controller ECC will be disabled. Hence,
+ * it is mandatory to manually compute CRC and check it against the value
+ * stored within ONFI page.
+ */
+static uint16_t msm_nand_flash_onfi_crc_check(uint8_t *buffer, uint16_t count)
+{
+ int i;
+ uint16_t result;
+
+ for (i = 0; i < count; i++)
+ buffer[i] = bitrev8(buffer[i]);
+
+ result = bitrev16(crc16(bitrev16(0x4f4e), buffer, count));
+
+ for (i = 0; i < count; i++)
+ buffer[i] = bitrev8(buffer[i]);
+
+ return result;
+}
+
+/*
+ * Structure that contains NANDc register data for commands required
+ * for reading ONFI parameter page.
+ */
+struct msm_nand_flash_onfi_data {
+ struct msm_nand_common_cfgs cfg;
+ uint32_t exec;
+ uint32_t ecc_bch_cfg;
+};
+
+struct version {
+ uint16_t nand_major;
+ uint16_t nand_minor;
+ uint16_t qpic_major;
+ uint16_t qpic_minor;
+};
+
+static int msm_nand_version_check(struct msm_nand_info *info,
+ struct version *nandc_version)
+{
+ uint32_t qpic_ver = 0, nand_ver = 0;
+ int err = 0;
+
+ /* Lookup the version to identify supported features */
+ err = msm_nand_flash_rd_reg(info, MSM_NAND_VERSION(info),
+ &nand_ver);
+ if (err) {
+ pr_err("Failed to read NAND_VERSION, err=%d\n", err);
+ goto out;
+ }
+ nandc_version->nand_major = (nand_ver & MSM_NAND_VERSION_MAJOR_MASK) >>
+ MSM_NAND_VERSION_MAJOR_SHIFT;
+ nandc_version->nand_minor = (nand_ver & MSM_NAND_VERSION_MINOR_MASK) >>
+ MSM_NAND_VERSION_MINOR_SHIFT;
+
+ err = msm_nand_flash_rd_reg(info, MSM_NAND_QPIC_VERSION(info),
+ &qpic_ver);
+ if (err) {
+ pr_err("Failed to read QPIC_VERSION, err=%d\n", err);
+ goto out;
+ }
+ nandc_version->qpic_major = (qpic_ver & MSM_NAND_VERSION_MAJOR_MASK) >>
+ MSM_NAND_VERSION_MAJOR_SHIFT;
+ nandc_version->qpic_minor = (qpic_ver & MSM_NAND_VERSION_MINOR_MASK) >>
+ MSM_NAND_VERSION_MINOR_SHIFT;
+ pr_info("nand_major:%d, nand_minor:%d, qpic_major:%d, qpic_minor:%d\n",
+ nandc_version->nand_major, nandc_version->nand_minor,
+ nandc_version->qpic_major, nandc_version->qpic_minor);
+out:
+ return err;
+}
+
+/*
+ * Function to identify whether the attached NAND flash device is
+ * complaint to ONFI spec or not. If yes, then it reads the ONFI parameter
+ * page to get the device parameters.
+ */
+#define ONFI_CMDS 9
+static int msm_nand_flash_onfi_probe(struct msm_nand_info *info)
+{
+ struct msm_nand_chip *chip = &info->nand_chip;
+ struct flash_identification *flash = &info->flash_dev;
+ uint32_t crc_chk_count = 0, page_address = 0;
+ int ret = 0, i = 0, submitted_num_desc = 1;
+
+ /* SPS parameters */
+ struct msm_nand_sps_cmd *cmd, *curr_cmd;
+ struct sps_iovec *iovec;
+ struct sps_iovec iovec_temp;
+ uint32_t rdata;
+
+ /* ONFI Identifier/Parameter Page parameters */
+ uint8_t *onfi_param_info_buf = NULL;
+ dma_addr_t dma_addr_param_info = 0;
+ struct onfi_param_page *onfi_param_page_ptr;
+ struct msm_nand_flash_onfi_data data;
+ uint32_t onfi_signature = 0;
+
+ /*
+ * The following 9 commands are required to get onfi parameters -
+ * flash, addr0, addr1, cfg0, cfg1, dev0_ecc_cfg,
+ * read_loc_0, exec, flash_status (read cmd).
+ */
+ struct {
+ struct sps_transfer xfer;
+ struct sps_iovec cmd_iovec[ONFI_CMDS];
+ struct msm_nand_sps_cmd cmd[ONFI_CMDS];
+ uint32_t flash_status;
+ } *dma_buffer;
+
+
+ /* Lookup the version to identify supported features */
+ struct version nandc_version = {0};
+
+ ret = msm_nand_version_check(info, &nandc_version);
+ if (!ret && !(nandc_version.nand_major == 1 &&
+ nandc_version.nand_minor >= 5 &&
+ nandc_version.qpic_major == 1 &&
+ nandc_version.qpic_minor >= 5)) {
+ ret = -EPERM;
+ goto out;
+ }
+ wait_event(chip->dma_wait_queue, (onfi_param_info_buf =
+ msm_nand_get_dma_buffer(chip, ONFI_PARAM_INFO_LENGTH)));
+ dma_addr_param_info = msm_virt_to_dma(chip, onfi_param_info_buf);
+
+ wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer
+ (chip, sizeof(*dma_buffer))));
+
+ ret = msm_nand_flash_read_id(info, 1, &onfi_signature, NULL);
+ if (ret < 0) {
+ pr_err("Failed to read ONFI signature\n");
+ goto free_dma;
+ }
+ if (onfi_signature != ONFI_PARAMETER_PAGE_SIGNATURE) {
+ pr_info("Found a non ONFI device\n");
+ ret = -EIO;
+ goto free_dma;
+ }
+
+ memset(&data, 0, sizeof(struct msm_nand_flash_onfi_data));
+
+ /* Lookup the partition to which apps has access to */
+ for (i = 0; i < FLASH_PTABLE_MAX_PARTS_V4; i++) {
+ if (mtd_part[i].name && !strcmp("boot", mtd_part[i].name)) {
+ page_address = mtd_part[i].offset << 6;
+ break;
+ }
+ }
+ if (!page_address) {
+ pr_info("%s: no apps partition found in smem\n", __func__);
+ ret = -EPERM;
+ goto free_dma;
+ }
+ data.cfg.cmd = MSM_NAND_CMD_PAGE_READ_ONFI;
+ data.exec = 1;
+ data.cfg.addr0 = (page_address << 16) |
+ FLASH_READ_ONFI_PARAMETERS_ADDRESS;
+ data.cfg.addr1 = (page_address >> 16) & 0xFF;
+ data.cfg.cfg0 = MSM_NAND_CFG0_RAW_ONFI_PARAM_INFO;
+ data.cfg.cfg1 = MSM_NAND_CFG1_RAW_ONFI_PARAM_INFO;
+ data.ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
+ dma_buffer->flash_status = 0xeeeeeeee;
+
+ curr_cmd = cmd = dma_buffer->cmd;
+ msm_nand_prep_cfg_cmd_desc(info, data.cfg, &curr_cmd);
+
+ cmd = curr_cmd;
+ msm_nand_prep_single_desc(cmd, MSM_NAND_DEV0_ECC_CFG(info), WRITE,
+ data.ecc_bch_cfg, 0);
+ cmd++;
+
+ rdata = (0 << 0) | (ONFI_PARAM_INFO_LENGTH << 16) | (1 << 31);
+ msm_nand_prep_single_desc(cmd, MSM_NAND_READ_LOCATION_0(info), WRITE,
+ rdata, 0);
+ cmd++;
+
+ msm_nand_prep_single_desc(cmd, MSM_NAND_EXEC_CMD(info), WRITE,
+ data.exec, SPS_IOVEC_FLAG_NWD);
+ cmd++;
+
+ msm_nand_prep_single_desc(cmd, MSM_NAND_FLASH_STATUS(info), READ,
+ msm_virt_to_dma(chip, &dma_buffer->flash_status),
+ SPS_IOVEC_FLAG_UNLOCK | SPS_IOVEC_FLAG_INT);
+ cmd++;
+
+ WARN_ON(cmd - dma_buffer->cmd > ONFI_CMDS);
+ dma_buffer->xfer.iovec_count = (cmd - dma_buffer->cmd);
+ dma_buffer->xfer.iovec = dma_buffer->cmd_iovec;
+ dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip,
+ &dma_buffer->cmd_iovec);
+ iovec = dma_buffer->xfer.iovec;
+
+ for (i = 0; i < dma_buffer->xfer.iovec_count; i++) {
+ iovec->addr = msm_virt_to_dma(chip,
+ &dma_buffer->cmd[i].ce);
+ iovec->size = sizeof(struct sps_command_element);
+ iovec->flags = dma_buffer->cmd[i].flags;
+ iovec++;
+ }
+ mutex_lock(&info->lock);
+ ret = msm_nand_get_device(chip->dev);
+ if (ret)
+ goto unlock_mutex;
+ /* Submit data descriptor */
+ ret = sps_transfer_one(info->sps.data_prod.handle, dma_addr_param_info,
+ ONFI_PARAM_INFO_LENGTH, NULL, SPS_IOVEC_FLAG_INT);
+ if (ret) {
+ pr_err("Failed to submit data descriptors %d\n", ret);
+ goto put_dev;
+ }
+ /* Submit command descriptors */
+ ret = sps_transfer(info->sps.cmd_pipe.handle,
+ &dma_buffer->xfer);
+ if (ret) {
+ pr_err("Failed to submit commands %d\n", ret);
+ goto put_dev;
+ }
+
+ ret = msm_nand_sps_get_iovec(info->sps.cmd_pipe.handle,
+ info->sps.cmd_pipe.index, dma_buffer->xfer.iovec_count,
+ &iovec_temp);
+
+ if (ret) {
+ pr_err("Failed to get iovec for pipe %d: (ret:%d)\n",
+ (info->sps.cmd_pipe.index), ret);
+ goto put_dev;
+ }
+ ret = msm_nand_sps_get_iovec(info->sps.data_prod.handle,
+ info->sps.data_prod.index, submitted_num_desc,
+ &iovec_temp);
+ if (ret) {
+ pr_err("Failed to get iovec for pipe %d: (ret:%d)\n",
+ (info->sps.data_prod.index), ret);
+ goto put_dev;
+ }
+
+ ret = msm_nand_put_device(chip->dev);
+ mutex_unlock(&info->lock);
+ if (ret)
+ goto free_dma;
+
+ /* Check for flash status errors */
+ if (dma_buffer->flash_status & (FS_OP_ERR | FS_MPU_ERR)) {
+ pr_err("MPU/OP err (0x%x) is set\n", dma_buffer->flash_status);
+ ret = -EIO;
+ goto free_dma;
+ }
+
+ for (crc_chk_count = 0; crc_chk_count < ONFI_PARAM_INFO_LENGTH
+ / ONFI_PARAM_PAGE_LENGTH; crc_chk_count++) {
+ onfi_param_page_ptr =
+ (struct onfi_param_page *)
+ (&(onfi_param_info_buf
+ [ONFI_PARAM_PAGE_LENGTH *
+ crc_chk_count]));
+ if (msm_nand_flash_onfi_crc_check(
+ (uint8_t *)onfi_param_page_ptr,
+ ONFI_PARAM_PAGE_LENGTH - 2) ==
+ onfi_param_page_ptr->integrity_crc) {
+ break;
+ }
+ }
+ if (crc_chk_count >= ONFI_PARAM_INFO_LENGTH
+ / ONFI_PARAM_PAGE_LENGTH) {
+ pr_err("CRC Check failed on param page\n");
+ ret = -EIO;
+ goto free_dma;
+ }
+ ret = msm_nand_flash_read_id(info, 0, &flash->flash_id, NULL);
+ if (ret < 0) {
+ pr_err("Failed to read flash ID\n");
+ goto free_dma;
+ }
+ flash->widebus = onfi_param_page_ptr->features_supported & 0x01;
+ flash->pagesize = onfi_param_page_ptr->number_of_data_bytes_per_page;
+ flash->blksize = onfi_param_page_ptr->number_of_pages_per_block *
+ flash->pagesize;
+ flash->oobsize = onfi_param_page_ptr->number_of_spare_bytes_per_page;
+ flash->density = onfi_param_page_ptr->number_of_blocks_per_logical_unit
+ * flash->blksize;
+ flash->ecc_correctability = onfi_param_page_ptr->
+ number_of_bits_ecc_correctability;
+
+ pr_info("Found an ONFI compliant device %s\n",
+ onfi_param_page_ptr->device_model);
+ /*
+ * Temporary hack for MT29F4G08ABC device.
+ * Since the device is not properly adhering
+ * to ONFi specification it is reporting
+ * as 16 bit device though it is 8 bit device!!!
+ */
+ if (!strcmp(onfi_param_page_ptr->device_model, "MT29F4G08ABC"))
+ flash->widebus = 0;
+ goto free_dma;
+put_dev:
+ msm_nand_put_device(chip->dev);
+unlock_mutex:
+ mutex_unlock(&info->lock);
+free_dma:
+ msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
+ msm_nand_release_dma_buffer(chip, onfi_param_info_buf,
+ ONFI_PARAM_INFO_LENGTH);
+out:
+ return ret;
+}
+
+/*
+ * Structure that contains read/write parameters required for reading/writing
+ * from/to a page.
+ */
+struct msm_nand_rw_params {
+ uint32_t page;
+ uint32_t page_count;
+ uint32_t sectordatasize;
+ uint32_t sectoroobsize;
+ uint32_t cwperpage;
+ uint32_t oob_len_cmd;
+ uint32_t oob_len_data;
+ uint32_t start_sector;
+ uint32_t oob_col;
+ dma_addr_t data_dma_addr;
+ dma_addr_t oob_dma_addr;
+ dma_addr_t ecc_dma_addr;
+ dma_addr_t data_dma_addr_curr;
+ dma_addr_t oob_dma_addr_curr;
+ dma_addr_t ecc_dma_addr_curr;
+ bool read;
+};
+
+/*
+ * Structure that contains NANDc register data required for reading/writing
+ * from/to a page.
+ */
+struct msm_nand_rw_reg_data {
+ uint32_t cmd;
+ uint32_t addr0;
+ uint32_t addr1;
+ uint32_t cfg0;
+ uint32_t cfg1;
+ uint32_t ecc_bch_cfg;
+ uint32_t exec;
+ uint32_t ecc_cfg;
+ uint32_t clrfstatus;
+ uint32_t clrrstatus;
+};
+
+/*
+ * Function that validates page read/write MTD parameters received from upper
+ * layers such as MTD/YAFFS2 and returns error for any unsupported operations
+ * by the driver. In case of success, it also maps the data and oob buffer
+ * received for DMA.
+ */
+static int msm_nand_validate_mtd_params(struct mtd_info *mtd, bool read,
+ loff_t offset,
+ struct mtd_oob_ops *ops,
+ struct msm_nand_rw_params *args)
+{
+ struct msm_nand_info *info = mtd->priv;
+ struct msm_nand_chip *chip = &info->nand_chip;
+ int err = 0;
+
+ pr_debug("========================================================\n");
+ pr_debug("offset 0x%llx mode %d\ndatbuf 0x%pK datlen 0x%x\n",
+ offset, ops->mode, ops->datbuf, ops->len);
+ pr_debug("oobbuf 0x%pK ooblen 0x%x\n", ops->oobbuf, ops->ooblen);
+
+ if (ops->mode == MTD_OPS_PLACE_OOB) {
+ pr_err("MTD_OPS_PLACE_OOB is not supported\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (mtd->writesize == PAGE_SIZE_2K)
+ args->page = offset >> 11;
+
+ if (mtd->writesize == PAGE_SIZE_4K)
+ args->page = offset >> 12;
+
+ args->oob_len_cmd = ops->ooblen;
+ args->oob_len_data = ops->ooblen;
+ args->cwperpage = (mtd->writesize >> 9);
+ args->read = (read ? true : false);
+
+ if (offset & (mtd->writesize - 1)) {
+ pr_err("unsupported offset 0x%llx\n", offset);
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (!read && !ops->datbuf) {
+ pr_err("No data buffer provided for write!!\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (ops->mode == MTD_OPS_RAW) {
+ if (!ops->datbuf) {
+ pr_err("No data buffer provided for RAW mode\n");
+ err = -EINVAL;
+ goto out;
+ } else if ((ops->len % (mtd->writesize +
+ mtd->oobsize)) != 0) {
+ pr_err("unsupported data len %d for RAW mode\n",
+ ops->len);
+ err = -EINVAL;
+ goto out;
+ }
+ args->page_count = ops->len / (mtd->writesize + mtd->oobsize);
+
+ } else if (ops->mode == MTD_OPS_AUTO_OOB) {
+ if (ops->datbuf && (ops->len % mtd->writesize) != 0) {
+ /* when ops->datbuf is NULL, ops->len can be ooblen */
+ pr_err("unsupported data len %d for AUTO mode\n",
+ ops->len);
+ err = -EINVAL;
+ goto out;
+ }
+ if (read && ops->oobbuf && !ops->datbuf) {
+ args->start_sector = args->cwperpage - 1;
+ args->page_count = ops->ooblen / mtd->oobavail;
+ if ((args->page_count == 0) && (ops->ooblen))
+ args->page_count = 1;
+ } else if (ops->datbuf) {
+ args->page_count = ops->len / mtd->writesize;
+ }
+ }
+
+ if (ops->datbuf) {
+ if (read)
+ memset(ops->datbuf, 0xFF, ops->len);
+ args->data_dma_addr_curr = args->data_dma_addr =
+ msm_nand_dma_map(chip->dev, ops->datbuf, ops->len,
+ (read ? DMA_FROM_DEVICE : DMA_TO_DEVICE));
+ if (dma_mapping_error(chip->dev, args->data_dma_addr)) {
+ pr_err("dma mapping failed for 0x%pK\n", ops->datbuf);
+ err = -EIO;
+ goto out;
+ }
+ }
+ if (ops->oobbuf) {
+ if (read)
+ memset(ops->oobbuf, 0xFF, ops->ooblen);
+ args->oob_dma_addr_curr = args->oob_dma_addr =
+ msm_nand_dma_map(chip->dev, ops->oobbuf, ops->ooblen,
+ (read ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE));
+ if (dma_mapping_error(chip->dev, args->oob_dma_addr)) {
+ pr_err("dma mapping failed for 0x%pK\n", ops->oobbuf);
+ err = -EIO;
+ goto dma_map_oobbuf_failed;
+ }
+ }
+ goto out;
+dma_map_oobbuf_failed:
+ if (ops->datbuf)
+ dma_unmap_page(chip->dev, args->data_dma_addr, ops->len,
+ (read ? DMA_FROM_DEVICE : DMA_TO_DEVICE));
+out:
+ return err;
+}
+
+/*
+ * Function that updates NANDc register data (struct msm_nand_rw_reg_data)
+ * required for page read/write.
+ */
+static void msm_nand_update_rw_reg_data(struct msm_nand_chip *chip,
+ struct mtd_oob_ops *ops,
+ struct msm_nand_rw_params *args,
+ struct msm_nand_rw_reg_data *data)
+{
+ if (args->read) {
+ if (ops->mode != MTD_OPS_RAW) {
+ data->cmd = MSM_NAND_CMD_PAGE_READ_ECC;
+ data->cfg0 =
+ (chip->cfg0 & ~(7U << CW_PER_PAGE)) |
+ (((args->cwperpage-1) - args->start_sector)
+ << CW_PER_PAGE);
+ data->cfg1 = chip->cfg1;
+ data->ecc_bch_cfg = chip->ecc_bch_cfg;
+ } else {
+ data->cmd = MSM_NAND_CMD_PAGE_READ_ALL;
+ data->cfg0 =
+ (chip->cfg0_raw & ~(7U << CW_PER_PAGE)) |
+ (((args->cwperpage-1) - args->start_sector)
+ << CW_PER_PAGE);
+ data->cfg1 = chip->cfg1_raw;
+ data->ecc_bch_cfg = chip->ecc_cfg_raw;
+ }
+
+ } else {
+ if (ops->mode != MTD_OPS_RAW) {
+ data->cmd = MSM_NAND_CMD_PRG_PAGE;
+ data->cfg0 = chip->cfg0;
+ data->cfg1 = chip->cfg1;
+ data->ecc_bch_cfg = chip->ecc_bch_cfg;
+ } else {
+ data->cmd = MSM_NAND_CMD_PRG_PAGE_ALL;
+ data->cfg0 = chip->cfg0_raw;
+ data->cfg1 = chip->cfg1_raw;
+ data->ecc_bch_cfg = chip->ecc_cfg_raw;
+ }
+ data->clrfstatus = MSM_NAND_RESET_FLASH_STS;
+ data->clrrstatus = MSM_NAND_RESET_READ_STS;
+ }
+ data->exec = 1;
+ data->ecc_cfg = chip->ecc_buf_cfg;
+}
+
+/*
+ * Function to prepare series of SPS command descriptors required for a page
+ * read/write operation.
+ */
+static void msm_nand_prep_rw_cmd_desc(struct mtd_oob_ops *ops,
+ struct msm_nand_rw_params *args,
+ struct msm_nand_rw_reg_data *data,
+ struct msm_nand_info *info,
+ uint32_t curr_cw,
+ struct msm_nand_rw_cmd_desc *cmd_list,
+ uint32_t *cw_desc_cnt,
+ uint32_t ecc_parity_bytes)
+{
+ struct msm_nand_chip *chip = &info->nand_chip;
+ uint32_t rdata;
+ /* read_location register parameters */
+ uint32_t offset, size, last_read;
+ struct sps_command_element *curr_ce, *start_ce;
+ uint32_t *flags_ptr, *num_ce_ptr;
+
+ if (curr_cw == args->start_sector) {
+ curr_ce = start_ce = &cmd_list->setup_desc.ce[0];
+ num_ce_ptr = &cmd_list->setup_desc.num_ce;
+ flags_ptr = &cmd_list->setup_desc.flags;
+ *flags_ptr = CMD_LCK;
+ cmd_list->count = 1;
+ msm_nand_prep_ce(curr_ce, MSM_NAND_FLASH_CMD(info), WRITE,
+ data->cmd);
+ curr_ce++;
+
+ msm_nand_prep_ce(curr_ce, MSM_NAND_ADDR0(info), WRITE,
+ data->addr0);
+ curr_ce++;
+
+ msm_nand_prep_ce(curr_ce, MSM_NAND_ADDR1(info), WRITE,
+ data->addr1);
+ curr_ce++;
+
+ msm_nand_prep_ce(curr_ce, MSM_NAND_DEV0_CFG0(info), WRITE,
+ data->cfg0);
+ curr_ce++;
+
+ msm_nand_prep_ce(curr_ce, MSM_NAND_DEV0_CFG1(info), WRITE,
+ data->cfg1);
+ curr_ce++;
+
+ msm_nand_prep_ce(curr_ce, MSM_NAND_DEV0_ECC_CFG(info), WRITE,
+ data->ecc_bch_cfg);
+ curr_ce++;
+
+ msm_nand_prep_ce(curr_ce, MSM_NAND_EBI2_ECC_BUF_CFG(info),
+ WRITE, data->ecc_cfg);
+ curr_ce++;
+
+ if (!args->read) {
+ msm_nand_prep_ce(curr_ce, MSM_NAND_FLASH_STATUS(info),
+ WRITE, data->clrfstatus);
+ curr_ce++;
+ goto sub_exec_cmd;
+ } else {
+ msm_nand_prep_ce(curr_ce,
+ MSM_NAND_ERASED_CW_DETECT_CFG(info),
+ WRITE, CLR_ERASED_PAGE_DET);
+ curr_ce++;
+ msm_nand_prep_ce(curr_ce,
+ MSM_NAND_ERASED_CW_DETECT_CFG(info),
+ WRITE, SET_ERASED_PAGE_DET);
+ curr_ce++;
+ }
+ } else {
+ curr_ce = start_ce = &cmd_list->cw_desc[*cw_desc_cnt].ce[0];
+ num_ce_ptr = &cmd_list->cw_desc[*cw_desc_cnt].num_ce;
+ flags_ptr = &cmd_list->cw_desc[*cw_desc_cnt].flags;
+ *cw_desc_cnt += 1;
+ *flags_ptr = CMD;
+ cmd_list->count++;
+ }
+ if (!args->read)
+ goto sub_exec_cmd;
+
+ if (ops->mode == MTD_OPS_RAW) {
+ if (ecc_parity_bytes) {
+ rdata = (BYTES_517 << 0) | (ecc_parity_bytes << 16)
+ | (1 << 31);
+ msm_nand_prep_ce(curr_ce,
+ MSM_NAND_READ_LOCATION_0(info),
+ WRITE, rdata);
+ curr_ce++;
+ } else {
+ rdata = (0 << 0) | (chip->cw_size << 16) | (1 << 31);
+ msm_nand_prep_ce(curr_ce,
+ MSM_NAND_READ_LOCATION_0(info),
+ WRITE, rdata);
+ curr_ce++;
+ }
+ }
+ if (ops->mode == MTD_OPS_AUTO_OOB) {
+ if (ops->datbuf) {
+ offset = 0;
+ size = (curr_cw < (args->cwperpage - 1)) ? 516 :
+ (512 - ((args->cwperpage - 1) << 2));
+ last_read = (curr_cw < (args->cwperpage - 1)) ? 1 :
+ (ops->oobbuf ? 0 : 1);
+ rdata = (offset << 0) | (size << 16) |
+ (last_read << 31);
+
+ msm_nand_prep_ce(curr_ce,
+ MSM_NAND_READ_LOCATION_0(info),
+ WRITE,
+ rdata);
+ curr_ce++;
+ }
+ if (curr_cw == (args->cwperpage - 1) && ops->oobbuf) {
+ offset = 512 - ((args->cwperpage - 1) << 2);
+ size = (args->cwperpage) << 2;
+ if (size > args->oob_len_cmd)
+ size = args->oob_len_cmd;
+ args->oob_len_cmd -= size;
+ last_read = 1;
+ rdata = (offset << 0) | (size << 16) |
+ (last_read << 31);
+
+ if (!ops->datbuf)
+ msm_nand_prep_ce(curr_ce,
+ MSM_NAND_READ_LOCATION_0(info),
+ WRITE, rdata);
+ else
+ msm_nand_prep_ce(curr_ce,
+ MSM_NAND_READ_LOCATION_1(info),
+ WRITE, rdata);
+ curr_ce++;
+ }
+ }
+sub_exec_cmd:
+ *flags_ptr |= NWD;
+ msm_nand_prep_ce(curr_ce, MSM_NAND_EXEC_CMD(info), WRITE, data->exec);
+ curr_ce++;
+
+ *num_ce_ptr = curr_ce - start_ce;
+}
+
+/*
+ * Function to prepare and submit SPS data descriptors required for a page
+ * read/write operation.
+ */
+static int msm_nand_submit_rw_data_desc(struct mtd_oob_ops *ops,
+ struct msm_nand_rw_params *args,
+ struct msm_nand_info *info,
+ uint32_t curr_cw,
+ uint32_t ecc_parity_bytes)
+{
+ struct msm_nand_chip *chip = &info->nand_chip;
+ struct sps_pipe *data_pipe_handle;
+ uint32_t sectordatasize, sectoroobsize;
+ uint32_t sps_flags = 0;
+ int err = 0;
+
+ if (args->read)
+ data_pipe_handle = info->sps.data_prod.handle;
+ else
+ data_pipe_handle = info->sps.data_cons.handle;
+
+ if (ops->mode == MTD_OPS_RAW) {
+ if (ecc_parity_bytes && args->read) {
+ if (curr_cw == (args->cwperpage - 1))
+ sps_flags |= SPS_IOVEC_FLAG_INT;
+
+ /* read only ecc bytes */
+ err = sps_transfer_one(data_pipe_handle,
+ args->ecc_dma_addr_curr,
+ ecc_parity_bytes, NULL,
+ sps_flags);
+ if (err)
+ goto out;
+ args->ecc_dma_addr_curr += ecc_parity_bytes;
+ } else {
+ sectordatasize = chip->cw_size;
+ if (!args->read)
+ sps_flags = SPS_IOVEC_FLAG_EOT;
+ if (curr_cw == (args->cwperpage - 1))
+ sps_flags |= SPS_IOVEC_FLAG_INT;
+
+ err = sps_transfer_one(data_pipe_handle,
+ args->data_dma_addr_curr,
+ sectordatasize, NULL,
+ sps_flags);
+ if (err)
+ goto out;
+ args->data_dma_addr_curr += sectordatasize;
+ }
+ } else if (ops->mode == MTD_OPS_AUTO_OOB) {
+ if (ops->datbuf) {
+ sectordatasize = (curr_cw < (args->cwperpage - 1))
+ ? 516 : (512 - ((args->cwperpage - 1) << 2));
+
+ if (!args->read) {
+ sps_flags = SPS_IOVEC_FLAG_EOT;
+ if (curr_cw == (args->cwperpage - 1) &&
+ ops->oobbuf)
+ sps_flags = 0;
+ }
+ if ((curr_cw == (args->cwperpage - 1)) && !ops->oobbuf)
+ sps_flags |= SPS_IOVEC_FLAG_INT;
+
+ err = sps_transfer_one(data_pipe_handle,
+ args->data_dma_addr_curr,
+ sectordatasize, NULL,
+ sps_flags);
+ if (err)
+ goto out;
+ args->data_dma_addr_curr += sectordatasize;
+ }
+
+ if (ops->oobbuf && (curr_cw == (args->cwperpage - 1))) {
+ sectoroobsize = args->cwperpage << 2;
+ if (sectoroobsize > args->oob_len_data)
+ sectoroobsize = args->oob_len_data;
+
+ if (!args->read)
+ sps_flags |= SPS_IOVEC_FLAG_EOT;
+ sps_flags |= SPS_IOVEC_FLAG_INT;
+ err = sps_transfer_one(data_pipe_handle,
+ args->oob_dma_addr_curr,
+ sectoroobsize, NULL,
+ sps_flags);
+ if (err)
+ goto out;
+ args->oob_dma_addr_curr += sectoroobsize;
+ args->oob_len_data -= sectoroobsize;
+ }
+ }
+out:
+ return err;
+}
+
+/*
+ * Read ECC bytes and check whether page is erased or not.
+ *
+ * The NAND devices manufactured with newer process node technology are
+ * susceptible to bit-flips. These bit-flips are easily fixable with the
+ * ECC engine and ECC information stored on the NAND device. This device
+ * specific information is found in the data sheet for the NAND device
+ * and is usually specified as a "number of bit-flips expected per code-
+ * word". For example, "a single bit-flip per codeword". Also this means
+ * that the number of ECC errors don't increase over period of time as in
+ * the past and can't be used to predict a "bad-block about to happen"
+ * situation anymore.
+ *
+ * So what this means to erased pages:
+ * Since ECC data for an erased page is all 0xFF's, the ECC engine would
+ * not be able to correct any bit-flips that occur in these newer parts.
+ * If the NAND controller is unable to identify the erased page due to
+ * the bit-flips, then there would be "uncorrectable ECC errors" detected
+ * and would get reported to file system layer (YAFFS2/UBIFS etc) and would
+ * result in a good block being marked as a bad block and also lead to
+ * error scenarios.
+
+ * So to handle this, the following will be done by software until newer
+ * NAND controller hardware is avialable that can detected erased pages
+ * with bit-flips successfully.
+ *
+ * 1. msm_nand_read_oob() calls this function when "uncorrectable ECC
+ * errors" occur.
+ * 2. This function then performs a raw read of the page.
+ * 3. This read is done to extract ECC bytes and not data from that page.
+ * 4. For each codeword’s ECC data, the following is done
+ * a. Count number of zero bits
+ * b. If that count is greater than <BIT-FLIPS-EXPECTED>, then it is
+ * not an erased page.
+ * c. Else repeat for next codeword’s ECC data
+ * d. If all codewords have less than <BIT-FLIPS-EXPECTED> bits of
+ * zeros, then it’s considered an erased page.
+ *
+ * Since "uncorrectable ECC errors" do not occur except for either an
+ * erased page or in the case of an actual errror, this solution would
+ * work.
+ *
+ */
+static int msm_nand_is_erased_page(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops,
+ struct msm_nand_rw_params *rw_params,
+ bool *erased_page)
+{
+ struct msm_nand_info *info = mtd->priv;
+ struct msm_nand_chip *chip = &info->nand_chip;
+ uint32_t cwperpage = (mtd->writesize >> 9);
+ int err, submitted_num_desc = 0;
+ uint32_t n = 0, num_zero_bits = 0, total_ecc_byte_cnt;
+ struct msm_nand_rw_reg_data data;
+ struct sps_iovec *iovec;
+ struct sps_iovec iovec_temp;
+ struct mtd_oob_ops raw_ops;
+
+ /*
+ * The following 6 commands will be sent only once for the first
+ * codeword (CW) - addr0, addr1, dev0_cfg0, dev0_cfg1,
+ * dev0_ecc_cfg, ebi2_ecc_buf_cfg. The following 6 commands will
+ * be sent for every CW - flash, read_location_0, read_location_1,
+ * exec, flash_status and buffer_status.
+ */
+ struct msm_nand_rw_cmd_desc *cmd_list = NULL;
+ uint32_t cw_desc_cnt = 0;
+ struct {
+ struct sps_transfer xfer;
+ struct sps_iovec cmd_iovec[MAX_DESC];
+ struct {
+ uint32_t count;
+ struct msm_nand_cmd_setup_desc setup_desc;
+ struct msm_nand_cmd_cw_desc cw_desc[MAX_DESC - 1];
+ } cmd_list;
+ struct {
+ uint32_t flash_status;
+ uint32_t buffer_status;
+ uint32_t erased_cw_status;
+ } result[MAX_CW_PER_PAGE];
+ } *dma_buffer;
+ uint8_t *ecc;
+
+ pr_debug("========================================================\n");
+ total_ecc_byte_cnt = (chip->ecc_parity_bytes * cwperpage);
+ memcpy(&raw_ops, ops, sizeof(struct mtd_oob_ops));
+ raw_ops.mode = MTD_OPS_RAW;
+ ecc = kzalloc(total_ecc_byte_cnt, GFP_KERNEL);
+
+ wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer(
+ chip, sizeof(*dma_buffer))));
+
+ memset(&data, 0, sizeof(struct msm_nand_rw_reg_data));
+ msm_nand_update_rw_reg_data(chip, &raw_ops, rw_params, &data);
+ cmd_list = (struct msm_nand_rw_cmd_desc *)&dma_buffer->cmd_list;
+
+ /* map the ecc for dma operations */
+ rw_params->ecc_dma_addr_curr = rw_params->ecc_dma_addr =
+ dma_map_single(chip->dev, ecc, total_ecc_byte_cnt,
+ DMA_FROM_DEVICE);
+
+ data.addr0 = (rw_params->page << 16) | rw_params->oob_col;
+ data.addr1 = (rw_params->page >> 16) & 0xff;
+ for (n = rw_params->start_sector; n < cwperpage; n++) {
+ struct sps_command_element *curr_ce, *start_ce;
+
+ dma_buffer->result[n].flash_status = 0xeeeeeeee;
+ dma_buffer->result[n].buffer_status = 0xeeeeeeee;
+ dma_buffer->result[n].erased_cw_status = 0xeeeeee00;
+
+ msm_nand_prep_rw_cmd_desc(&raw_ops, rw_params, &data, info,
+ n, cmd_list, &cw_desc_cnt,
+ chip->ecc_parity_bytes);
+
+ start_ce = &cmd_list->cw_desc[cw_desc_cnt].ce[0];
+ curr_ce = start_ce;
+ cmd_list->cw_desc[cw_desc_cnt].flags = CMD;
+ if (n == (cwperpage - 1))
+ cmd_list->cw_desc[cw_desc_cnt].flags |=
+ INT_UNLCK;
+ cmd_list->count++;
+
+ msm_nand_prep_ce(curr_ce, MSM_NAND_FLASH_STATUS(info),
+ READ, msm_virt_to_dma(chip,
+ &dma_buffer->result[n].flash_status));
+ curr_ce++;
+
+ msm_nand_prep_ce(curr_ce, MSM_NAND_BUFFER_STATUS(info),
+ READ, msm_virt_to_dma(chip,
+ &dma_buffer->result[n].buffer_status));
+ curr_ce++;
+
+ msm_nand_prep_ce(curr_ce,
+ MSM_NAND_ERASED_CW_DETECT_STATUS(info),
+ READ, msm_virt_to_dma(chip,
+ &dma_buffer->result[n].erased_cw_status));
+ curr_ce++;
+ cmd_list->cw_desc[cw_desc_cnt++].num_ce = curr_ce -
+ start_ce;
+ }
+
+ dma_buffer->xfer.iovec_count = cmd_list->count;
+ dma_buffer->xfer.iovec = dma_buffer->cmd_iovec;
+ dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip,
+ &dma_buffer->cmd_iovec);
+ iovec = dma_buffer->xfer.iovec;
+
+ iovec->addr = msm_virt_to_dma(chip,
+ &cmd_list->setup_desc.ce[0]);
+ iovec->size = sizeof(struct sps_command_element) *
+ cmd_list->setup_desc.num_ce;
+ iovec->flags = cmd_list->setup_desc.flags;
+ iovec++;
+ for (n = 0; n < (cmd_list->count - 1); n++) {
+ iovec->addr = msm_virt_to_dma(chip,
+ &cmd_list->cw_desc[n].ce[0]);
+ iovec->size = sizeof(struct sps_command_element) *
+ cmd_list->cw_desc[n].num_ce;
+ iovec->flags = cmd_list->cw_desc[n].flags;
+ iovec++;
+ }
+ mutex_lock(&info->lock);
+ err = msm_nand_get_device(chip->dev);
+ if (err)
+ goto unlock_mutex;
+ /* Submit data descriptors */
+ for (n = rw_params->start_sector; n < cwperpage; n++) {
+ err = msm_nand_submit_rw_data_desc(&raw_ops,
+ rw_params, info, n,
+ chip->ecc_parity_bytes);
+ if (err) {
+ pr_err("Failed to submit data descs %d\n", err);
+ panic("error in nand driver\n");
+ goto put_dev;
+ }
+ }
+ submitted_num_desc = cwperpage - rw_params->start_sector;
+
+ /* Submit command descriptors */
+ err = sps_transfer(info->sps.cmd_pipe.handle,
+ &dma_buffer->xfer);
+ if (err) {
+ pr_err("Failed to submit commands %d\n", err);
+ goto put_dev;
+ }
+
+ err = msm_nand_sps_get_iovec(info->sps.cmd_pipe.handle,
+ info->sps.cmd_pipe.index,
+ dma_buffer->xfer.iovec_count,
+ &iovec_temp);
+ if (err) {
+ pr_err("Failed to get iovec for pipe %d: (err:%d)\n",
+ (info->sps.cmd_pipe.index), err);
+ goto put_dev;
+ }
+ err = msm_nand_sps_get_iovec(info->sps.data_prod.handle,
+ info->sps.data_prod.index, submitted_num_desc,
+ &iovec_temp);
+ if (err) {
+ pr_err("Failed to get iovec for pipe %d: (err:%d)\n",
+ (info->sps.data_prod.index), err);
+ goto put_dev;
+ }
+
+ err = msm_nand_put_device(chip->dev);
+ mutex_unlock(&info->lock);
+ if (err)
+ goto free_dma;
+
+ pr_debug("addr0: 0x%08x, addr1: 0x%08x\n", data.addr0, data.addr1);
+ for (n = rw_params->start_sector; n < cwperpage; n++)
+ pr_debug("cw %d: flash_sts %x buffr_sts %x, erased_cw_status: %x\n",
+ n, dma_buffer->result[n].flash_status,
+ dma_buffer->result[n].buffer_status,
+ dma_buffer->result[n].erased_cw_status);
+
+ goto free_dma;
+put_dev:
+ msm_nand_put_device(chip->dev);
+unlock_mutex:
+ mutex_unlock(&info->lock);
+free_dma:
+ msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
+ /* umap ecc dma memory */
+ dma_unmap_single(chip->dev, rw_params->ecc_dma_addr,
+ total_ecc_byte_cnt, DMA_FROM_DEVICE);
+ /* check for bit flips in ecc data */
+ for (n = rw_params->start_sector; n < cwperpage; n++) {
+ uint8_t *ecc_temp = ecc;
+ int last_pos = 0, next_pos = 0;
+ int ecc_bytes_percw_in_bits = (chip->ecc_parity_bytes * 8);
+
+ do {
+ last_pos = find_next_zero_bit(ecc_temp,
+ ecc_bytes_percw_in_bits, next_pos);
+
+ if (last_pos < ecc_bytes_percw_in_bits)
+ num_zero_bits++;
+
+ if (num_zero_bits > 4) {
+ *erased_page = false;
+ goto free_mem;
+ }
+
+ next_pos = last_pos + 1;
+ } while (last_pos < ecc_bytes_percw_in_bits);
+
+ num_zero_bits = last_pos = next_pos = 0;
+ ecc_temp += chip->ecc_parity_bytes;
+ }
+
+ if ((n == cwperpage) && (num_zero_bits <= 4))
+ *erased_page = true;
+free_mem:
+ kfree(ecc);
+ pr_debug("========================================================\n");
+ return err;
+}
+
+/*
+ * Function that gets called from upper layers such as MTD/YAFFS2 to read a
+ * page with main or/and spare data.
+ */
+static int msm_nand_read_oob(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ struct msm_nand_info *info = mtd->priv;
+ struct msm_nand_chip *chip = &info->nand_chip;
+ struct flash_identification *flash_dev = &info->flash_dev;
+ uint32_t cwperpage = (mtd->writesize >> 9);
+ int err, pageerr = 0, rawerr = 0, submitted_num_desc = 0;
+ uint32_t n = 0, pages_read = 0;
+ uint32_t ecc_errors = 0, total_ecc_errors = 0, ecc_capability;
+ struct msm_nand_rw_params rw_params;
+ struct msm_nand_rw_reg_data data;
+ struct sps_iovec *iovec;
+ struct sps_iovec iovec_temp;
+ bool erased_page;
+ uint64_t fix_data_in_pages = 0;
+
+ /*
+ * The following 6 commands will be sent only once for the first
+ * codeword (CW) - addr0, addr1, dev0_cfg0, dev0_cfg1,
+ * dev0_ecc_cfg, ebi2_ecc_buf_cfg. The following 6 commands will
+ * be sent for every CW - flash, read_location_0, read_location_1,
+ * exec, flash_status and buffer_status.
+ */
+ struct {
+ struct sps_transfer xfer;
+ struct sps_iovec cmd_iovec[MAX_DESC];
+ struct {
+ uint32_t count;
+ struct msm_nand_cmd_setup_desc setup_desc;
+ struct msm_nand_cmd_cw_desc cw_desc[MAX_DESC - 1];
+ } cmd_list;
+ struct {
+ uint32_t flash_status;
+ uint32_t buffer_status;
+ uint32_t erased_cw_status;
+ } result[MAX_CW_PER_PAGE];
+ } *dma_buffer;
+ struct msm_nand_rw_cmd_desc *cmd_list = NULL;
+
+ memset(&rw_params, 0, sizeof(struct msm_nand_rw_params));
+ err = msm_nand_validate_mtd_params(mtd, true, from, ops, &rw_params);
+ if (err)
+ goto validate_mtd_params_failed;
+
+ wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer(
+ chip, sizeof(*dma_buffer))));
+
+ rw_params.oob_col = rw_params.start_sector * chip->cw_size;
+ if (chip->cfg1 & (1 << WIDE_FLASH))
+ rw_params.oob_col >>= 1;
+
+ memset(&data, 0, sizeof(struct msm_nand_rw_reg_data));
+ msm_nand_update_rw_reg_data(chip, ops, &rw_params, &data);
+ cmd_list = (struct msm_nand_rw_cmd_desc *)&dma_buffer->cmd_list;
+
+ ecc_capability = flash_dev->ecc_capability;
+
+ while (rw_params.page_count-- > 0) {
+ uint32_t cw_desc_cnt = 0;
+
+ erased_page = false;
+ data.addr0 = (rw_params.page << 16) | rw_params.oob_col;
+ data.addr1 = (rw_params.page >> 16) & 0xff;
+
+ for (n = rw_params.start_sector; n < cwperpage; n++) {
+ struct sps_command_element *curr_ce, *start_ce;
+
+ dma_buffer->result[n].flash_status = 0xeeeeeeee;
+ dma_buffer->result[n].buffer_status = 0xeeeeeeee;
+ dma_buffer->result[n].erased_cw_status = 0xeeeeee00;
+
+ msm_nand_prep_rw_cmd_desc(ops, &rw_params, &data, info,
+ n, cmd_list, &cw_desc_cnt, 0);
+
+ start_ce = &cmd_list->cw_desc[cw_desc_cnt].ce[0];
+ curr_ce = start_ce;
+ cmd_list->cw_desc[cw_desc_cnt].flags = CMD;
+ if (n == (cwperpage - 1))
+ cmd_list->cw_desc[cw_desc_cnt].flags |=
+ INT_UNLCK;
+ cmd_list->count++;
+
+ msm_nand_prep_ce(curr_ce, MSM_NAND_FLASH_STATUS(info),
+ READ, msm_virt_to_dma(chip,
+ &dma_buffer->result[n].flash_status));
+ curr_ce++;
+
+ msm_nand_prep_ce(curr_ce, MSM_NAND_BUFFER_STATUS(info),
+ READ, msm_virt_to_dma(chip,
+ &dma_buffer->result[n].buffer_status));
+ curr_ce++;
+
+ msm_nand_prep_ce(curr_ce,
+ MSM_NAND_ERASED_CW_DETECT_STATUS(info),
+ READ, msm_virt_to_dma(chip,
+ &dma_buffer->result[n].erased_cw_status));
+ curr_ce++;
+ cmd_list->cw_desc[cw_desc_cnt++].num_ce = curr_ce -
+ start_ce;
+ }
+
+ dma_buffer->xfer.iovec_count = cmd_list->count;
+ dma_buffer->xfer.iovec = dma_buffer->cmd_iovec;
+ dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip,
+ &dma_buffer->cmd_iovec);
+ iovec = dma_buffer->xfer.iovec;
+
+ iovec->addr = msm_virt_to_dma(chip,
+ &cmd_list->setup_desc.ce[0]);
+ iovec->size = sizeof(struct sps_command_element) *
+ cmd_list->setup_desc.num_ce;
+ iovec->flags = cmd_list->setup_desc.flags;
+ iovec++;
+ for (n = 0; n < (cmd_list->count - 1); n++) {
+ iovec->addr = msm_virt_to_dma(chip,
+ &cmd_list->cw_desc[n].ce[0]);
+ iovec->size = sizeof(struct sps_command_element) *
+ cmd_list->cw_desc[n].num_ce;
+ iovec->flags = cmd_list->cw_desc[n].flags;
+ iovec++;
+ }
+ mutex_lock(&info->lock);
+ err = msm_nand_get_device(chip->dev);
+ if (err)
+ goto unlock_mutex;
+ /* Submit data descriptors */
+ for (n = rw_params.start_sector; n < cwperpage; n++) {
+ err = msm_nand_submit_rw_data_desc(ops,
+ &rw_params, info, n, 0);
+ if (err) {
+ pr_err("Failed to submit data descs %d\n", err);
+ panic("error in nand driver\n");
+ goto put_dev;
+ }
+ }
+
+ if (ops->mode == MTD_OPS_RAW) {
+ submitted_num_desc = cwperpage - rw_params.start_sector;
+ } else if (ops->mode == MTD_OPS_AUTO_OOB) {
+ if (ops->datbuf)
+ submitted_num_desc = cwperpage -
+ rw_params.start_sector;
+ if (ops->oobbuf)
+ submitted_num_desc++;
+ }
+
+ /* Submit command descriptors */
+ err = sps_transfer(info->sps.cmd_pipe.handle,
+ &dma_buffer->xfer);
+ if (err) {
+ pr_err("Failed to submit commands %d\n", err);
+ goto put_dev;
+ }
+
+ err = msm_nand_sps_get_iovec(info->sps.cmd_pipe.handle,
+ info->sps.cmd_pipe.index,
+ dma_buffer->xfer.iovec_count,
+ &iovec_temp);
+ if (err) {
+ pr_err("Failed to get iovec for pipe %d: (err: %d)\n",
+ (info->sps.cmd_pipe.index), err);
+ goto put_dev;
+ }
+ err = msm_nand_sps_get_iovec(info->sps.data_prod.handle,
+ info->sps.data_prod.index, submitted_num_desc,
+ &iovec_temp);
+ if (err) {
+ pr_err("Failed to get iovec for pipe %d: (err: %d)\n",
+ (info->sps.data_prod.index), err);
+ goto put_dev;
+ }
+
+ err = msm_nand_put_device(chip->dev);
+ mutex_unlock(&info->lock);
+ if (err)
+ goto free_dma;
+ /* Check for flash status errors */
+ pageerr = rawerr = 0;
+ for (n = rw_params.start_sector; n < cwperpage; n++) {
+ if (dma_buffer->result[n].flash_status & (FS_OP_ERR |
+ FS_MPU_ERR)) {
+ rawerr = -EIO;
+ /*
+ * Check if ECC error was due to an erased
+ * codeword. If so, ignore the error.
+ *
+ * NOTE: There is a bug in erased page
+ * detection hardware block when reading
+ * only spare data. In order to work around
+ * this issue, instead of using PAGE_ALL_ERASED
+ * bit to check for whether a whole page is
+ * erased or not, we use CODEWORD_ALL_ERASED
+ * and CODEWORD_ERASED bits together and check
+ * each codeword that has FP_OP_ERR bit set is
+ * an erased codeword or not.
+ */
+ if ((dma_buffer->result[n].erased_cw_status &
+ ERASED_CW) == ERASED_CW) {
+ /*
+ * At least one code word is detected
+ * as an erased code word.
+ */
+ pr_debug("erased codeword detected - ignore ecc error\n");
+ continue;
+ }
+ pageerr = rawerr;
+ break;
+ }
+ }
+ /* check for uncorrectable errors */
+ if (pageerr) {
+ for (n = rw_params.start_sector; n < cwperpage; n++) {
+ if (dma_buffer->result[n].buffer_status &
+ BS_UNCORRECTABLE_BIT) {
+ /*
+ * Check if page is actually
+ * erased or not.
+ */
+ err = msm_nand_is_erased_page(mtd,
+ from, ops,
+ &rw_params,
+ &erased_page);
+ if (err)
+ goto free_dma;
+ if (!erased_page) {
+ mtd->ecc_stats.failed++;
+ pageerr = -EBADMSG;
+ break;
+ }
+ pageerr = 0;
+ pr_debug("Uncorrectable ECC errors dectected on an erased page and has been fixed.\n");
+ break;
+ }
+ }
+ }
+
+ if (rawerr && !pageerr && erased_page) {
+ /*
+ * This means an erased page had bit flips and now
+ * those bit-flips need to be cleared in the data
+ * being sent to upper layers. This will keep track
+ * of those pages and at the end, the data will be
+ * fixed before this function returns.
+ * Note that a whole page worth of data will be fixed
+ * and this will only handle about 64 pages being read
+ * at a time i.e. one erase block worth of pages.
+ */
+ fix_data_in_pages |= BIT(rw_params.page_count);
+ }
+ /* check for correctable errors */
+ if (!rawerr) {
+ for (n = rw_params.start_sector; n < cwperpage; n++) {
+ ecc_errors =
+ dma_buffer->result[n].buffer_status
+ & BS_CORRECTABLE_ERR_MSK;
+ if (ecc_errors) {
+ total_ecc_errors += ecc_errors;
+ mtd->ecc_stats.corrected += ecc_errors;
+ /*
+ * Since the nand device can have the
+ * ecc errors even on the first ever
+ * write. Any reporting of EUCLEAN
+ * when there are less then the ecc
+ * capability of the device is not
+ * useful.
+ *
+ * Also don't report EUCLEAN unless
+ * the enable_euclean is set.
+ */
+ if (enable_euclean &&
+ ecc_errors >= ecc_capability)
+ pageerr = -EUCLEAN;
+ }
+ }
+ }
+ if (pageerr && (pageerr != -EUCLEAN || err == 0))
+ err = pageerr;
+
+ if (rawerr && !pageerr) {
+ pr_debug("%llx %x %x empty page\n",
+ (loff_t)rw_params.page * mtd->writesize,
+ ops->len, ops->ooblen);
+ } else {
+ for (n = rw_params.start_sector; n < cwperpage; n++)
+ pr_debug("cw %d: flash_sts %x buffr_sts %x, erased_cw_status: %x, pageerr: %d, rawerr: %d\n",
+ n, dma_buffer->result[n].flash_status,
+ dma_buffer->result[n].buffer_status,
+ dma_buffer->result[n].erased_cw_status,
+ pageerr, rawerr);
+ }
+ if (err && err != -EUCLEAN && err != -EBADMSG)
+ goto free_dma;
+ pages_read++;
+ rw_params.page++;
+ }
+ goto free_dma;
+put_dev:
+ msm_nand_put_device(chip->dev);
+unlock_mutex:
+ mutex_unlock(&info->lock);
+free_dma:
+ msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
+ if (ops->oobbuf)
+ dma_unmap_page(chip->dev, rw_params.oob_dma_addr,
+ ops->ooblen, DMA_FROM_DEVICE);
+ if (ops->datbuf)
+ dma_unmap_page(chip->dev, rw_params.data_dma_addr,
+ ops->len, DMA_BIDIRECTIONAL);
+ /*
+ * If there were any erased pages detected with ECC errors, then
+ * it is most likely that the data is not all 0xff. So memset that
+ * page to all 0xff.
+ */
+ while (fix_data_in_pages) {
+ int temp_page = 0, oobsize = rw_params.cwperpage << 2;
+ int count = 0, offset = 0;
+
+ temp_page = fix_data_in_pages & BIT_MASK(0);
+ fix_data_in_pages = fix_data_in_pages >> 1;
+ count++;
+
+ if (!temp_page)
+ continue;
+
+ offset = (count - 1) * mtd->writesize;
+ if (ops->datbuf)
+ memset((ops->datbuf + offset), 0xff, mtd->writesize);
+
+ offset = (count - 1) * oobsize;
+ if (ops->oobbuf)
+ memset(ops->oobbuf + offset, 0xff, oobsize);
+ }
+validate_mtd_params_failed:
+ if (ops->mode != MTD_OPS_RAW)
+ ops->retlen = mtd->writesize * pages_read;
+ else
+ ops->retlen = (mtd->writesize + mtd->oobsize) * pages_read;
+ ops->oobretlen = ops->ooblen - rw_params.oob_len_data;
+ if (err)
+ pr_err("0x%llx datalen 0x%x ooblen %x err %d corrected %d\n",
+ from, ops->datbuf ? ops->len : 0, ops->ooblen, err,
+ total_ecc_errors);
+ pr_debug("ret %d, retlen %d oobretlen %d\n",
+ err, ops->retlen, ops->oobretlen);
+
+ pr_debug("========================================================\n");
+ return err;
+}
+
+/**
+ * msm_nand_read_partial_page() - read partial page
+ * @mtd: pointer to mtd info
+ * @from: start address of the page
+ * @ops: pointer to mtd_oob_ops
+ *
+ * Reads a page into a bounce buffer and copies the required
+ * number of bytes to actual buffer. The pages that are aligned
+ * do not use bounce buffer.
+ */
+static int msm_nand_read_partial_page(struct mtd_info *mtd,
+ loff_t from, struct mtd_oob_ops *ops)
+{
+ int err = 0;
+ unsigned char *actual_buf;
+ unsigned char *bounce_buf;
+ loff_t aligned_from;
+ loff_t offset;
+ size_t len;
+ size_t actual_len, ret_len;
+ int is_euclean = 0;
+ int is_ebadmsg = 0;
+
+ actual_len = ops->len;
+ ret_len = 0;
+ actual_buf = ops->datbuf;
+
+ bounce_buf = kmalloc(mtd->writesize, GFP_KERNEL);
+ if (!bounce_buf) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ /* Get start address of page to read from */
+ ops->len = mtd->writesize;
+ offset = from & (mtd->writesize - 1);
+ aligned_from = from - offset;
+
+ for (;;) {
+ bool no_copy = false;
+
+ len = mtd->writesize - offset;
+ if (len > actual_len)
+ len = actual_len;
+
+ if (offset == 0 && len == mtd->writesize)
+ no_copy = true;
+
+ if (!virt_addr_valid(actual_buf) &&
+ !is_buffer_in_page(actual_buf, ops->len))
+ no_copy = false;
+
+ ops->datbuf = no_copy ? actual_buf : bounce_buf;
+ err = msm_nand_read_oob(mtd, aligned_from, ops);
+ if (err == -EUCLEAN) {
+ is_euclean = 1;
+ err = 0;
+ }
+
+ if (err == -EBADMSG) {
+ is_ebadmsg = 1;
+ err = 0;
+ }
+
+ if (err < 0) {
+ /* Clear previously set EUCLEAN / EBADMSG */
+ is_euclean = 0;
+ is_ebadmsg = 0;
+ ret_len = ops->retlen;
+ break;
+ }
+
+ if (!no_copy)
+ memcpy(actual_buf, bounce_buf + offset, len);
+
+ actual_len -= len;
+ ret_len += len;
+
+ if (actual_len == 0)
+ break;
+
+ actual_buf += len;
+ offset = 0;
+ aligned_from += mtd->writesize;
+ }
+
+ ops->retlen = ret_len;
+ kfree(bounce_buf);
+out:
+ if (is_euclean == 1)
+ err = -EUCLEAN;
+
+ /* Snub EUCLEAN if we also have EBADMSG */
+ if (is_ebadmsg == 1)
+ err = -EBADMSG;
+ return err;
+}
+
+/*
+ * Function that gets called from upper layers such as MTD/YAFFS2 to read a
+ * page with only main data.
+ */
+static int msm_nand_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ int ret;
+ int is_euclean = 0;
+ int is_ebadmsg = 0;
+ struct mtd_oob_ops ops;
+ unsigned char *bounce_buf = NULL;
+
+ ops.mode = MTD_OPS_AUTO_OOB;
+ ops.retlen = 0;
+ ops.ooblen = 0;
+ ops.oobbuf = NULL;
+ *retlen = 0;
+
+ if (!(from & (mtd->writesize - 1)) && !(len % mtd->writesize)) {
+ /*
+ * Handle reading of large size read buffer in vmalloc
+ * address space that does not fit in an MMU page.
+ */
+ if (!virt_addr_valid(buf) && !is_buffer_in_page(buf, len)) {
+ ops.len = mtd->writesize;
+
+ bounce_buf = kmalloc(ops.len, GFP_KERNEL);
+ if (!bounce_buf) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ for (;;) {
+ bool no_copy = false;
+
+ if (!is_buffer_in_page(buf, ops.len)) {
+ memcpy(bounce_buf, buf, ops.len);
+ ops.datbuf = (uint8_t *) bounce_buf;
+ } else {
+ ops.datbuf = (uint8_t *) buf;
+ no_copy = true;
+ }
+ ret = msm_nand_read_oob(mtd, from, &ops);
+ if (ret == -EUCLEAN) {
+ is_euclean = 1;
+ ret = 0;
+ }
+ if (ret == -EBADMSG) {
+ is_ebadmsg = 1;
+ ret = 0;
+ }
+ if (ret < 0) {
+ /* Clear previously set errors */
+ is_euclean = 0;
+ is_ebadmsg = 0;
+ break;
+ }
+
+
+ if (!no_copy)
+ memcpy(buf, bounce_buf, ops.retlen);
+
+ len -= ops.retlen;
+ *retlen += ops.retlen;
+ if (len == 0)
+ break;
+ buf += ops.retlen;
+ from += ops.retlen;
+
+ if (len < mtd->writesize) {
+ ops.len = len;
+ ops.datbuf = buf;
+ ret = msm_nand_read_partial_page(
+ mtd, from, &ops);
+ *retlen += ops.retlen;
+ break;
+ }
+ }
+ kfree(bounce_buf);
+ } else {
+ ops.len = len;
+ ops.datbuf = (uint8_t *)buf;
+ ret = msm_nand_read_oob(mtd, from, &ops);
+ *retlen = ops.retlen;
+ }
+ } else {
+ ops.len = len;
+ ops.datbuf = (uint8_t *)buf;
+ ret = msm_nand_read_partial_page(mtd, from, &ops);
+ *retlen = ops.retlen;
+ }
+out:
+ if (is_euclean == 1)
+ ret = -EUCLEAN;
+
+ /* Snub EUCLEAN if we also have EBADMSG */
+ if (is_ebadmsg == 1)
+ ret = -EBADMSG;
+
+ return ret;
+}
+
+/*
+ * Function that gets called from upper layers such as MTD/YAFFS2 to write a
+ * page with both main and spare data.
+ */
+static int msm_nand_write_oob(struct mtd_info *mtd, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ struct msm_nand_info *info = mtd->priv;
+ struct msm_nand_chip *chip = &info->nand_chip;
+ uint32_t cwperpage = (mtd->writesize >> 9);
+ uint32_t n, flash_sts, pages_written = 0;
+ int err = 0, submitted_num_desc = 0;
+ struct msm_nand_rw_params rw_params;
+ struct msm_nand_rw_reg_data data;
+ struct sps_iovec *iovec;
+ struct sps_iovec iovec_temp;
+ /*
+ * The following 7 commands will be sent only once :
+ * For first codeword (CW) - addr0, addr1, dev0_cfg0, dev0_cfg1,
+ * dev0_ecc_cfg, ebi2_ecc_buf_cfg.
+ * For last codeword (CW) - read_status(write)
+ *
+ * The following 4 commands will be sent for every CW :
+ * flash, exec, flash_status (read), flash_status (write).
+ */
+ struct {
+ struct sps_transfer xfer;
+ struct sps_iovec cmd_iovec[MAX_DESC + 1];
+ struct {
+ uint32_t count;
+ struct msm_nand_cmd_setup_desc setup_desc;
+ struct msm_nand_cmd_cw_desc cw_desc[MAX_DESC];
+ } cmd_list;
+ struct {
+ uint32_t flash_status;
+ } data[MAX_CW_PER_PAGE];
+ } *dma_buffer;
+ struct msm_nand_rw_cmd_desc *cmd_list = NULL;
+
+ memset(&rw_params, 0, sizeof(struct msm_nand_rw_params));
+ err = msm_nand_validate_mtd_params(mtd, false, to, ops, &rw_params);
+ if (err)
+ goto validate_mtd_params_failed;
+
+ wait_event(chip->dma_wait_queue, (dma_buffer =
+ msm_nand_get_dma_buffer(chip, sizeof(*dma_buffer))));
+
+ memset(&data, 0, sizeof(struct msm_nand_rw_reg_data));
+ msm_nand_update_rw_reg_data(chip, ops, &rw_params, &data);
+ cmd_list = (struct msm_nand_rw_cmd_desc *)&dma_buffer->cmd_list;
+
+ while (rw_params.page_count-- > 0) {
+ uint32_t cw_desc_cnt = 0;
+ struct sps_command_element *curr_ce, *start_ce;
+
+ data.addr0 = (rw_params.page << 16);
+ data.addr1 = (rw_params.page >> 16) & 0xff;
+
+ for (n = 0; n < cwperpage ; n++) {
+ dma_buffer->data[n].flash_status = 0xeeeeeeee;
+
+ msm_nand_prep_rw_cmd_desc(ops, &rw_params, &data, info,
+ n, cmd_list, &cw_desc_cnt, 0);
+
+ curr_ce = &cmd_list->cw_desc[cw_desc_cnt].ce[0];
+ cmd_list->cw_desc[cw_desc_cnt].flags = CMD;
+ cmd_list->count++;
+
+ msm_nand_prep_ce(curr_ce, MSM_NAND_FLASH_STATUS(info),
+ READ, msm_virt_to_dma(chip,
+ &dma_buffer->data[n].flash_status));
+ cmd_list->cw_desc[cw_desc_cnt++].num_ce = 1;
+ }
+
+ start_ce = &cmd_list->cw_desc[cw_desc_cnt].ce[0];
+ curr_ce = start_ce;
+ cmd_list->cw_desc[cw_desc_cnt].flags = CMD_INT_UNLCK;
+ cmd_list->count++;
+ msm_nand_prep_ce(curr_ce, MSM_NAND_FLASH_STATUS(info),
+ WRITE, data.clrfstatus);
+ curr_ce++;
+
+ msm_nand_prep_ce(curr_ce, MSM_NAND_READ_STATUS(info),
+ WRITE, data.clrrstatus);
+ curr_ce++;
+ cmd_list->cw_desc[cw_desc_cnt++].num_ce = curr_ce - start_ce;
+
+ dma_buffer->xfer.iovec_count = cmd_list->count;
+ dma_buffer->xfer.iovec = dma_buffer->cmd_iovec;
+ dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip,
+ &dma_buffer->cmd_iovec);
+ iovec = dma_buffer->xfer.iovec;
+
+ iovec->addr = msm_virt_to_dma(chip,
+ &cmd_list->setup_desc.ce[0]);
+ iovec->size = sizeof(struct sps_command_element) *
+ cmd_list->setup_desc.num_ce;
+ iovec->flags = cmd_list->setup_desc.flags;
+ iovec++;
+ for (n = 0; n < (cmd_list->count - 1); n++) {
+ iovec->addr = msm_virt_to_dma(chip,
+ &cmd_list->cw_desc[n].ce[0]);
+ iovec->size = sizeof(struct sps_command_element) *
+ cmd_list->cw_desc[n].num_ce;
+ iovec->flags = cmd_list->cw_desc[n].flags;
+ iovec++;
+ }
+ mutex_lock(&info->lock);
+ err = msm_nand_get_device(chip->dev);
+ if (err)
+ goto unlock_mutex;
+ /* Submit data descriptors */
+ for (n = 0; n < cwperpage; n++) {
+ err = msm_nand_submit_rw_data_desc(ops,
+ &rw_params, info, n, 0);
+ if (err) {
+ pr_err("Failed to submit data descs %d\n", err);
+ panic("Error in nand driver\n");
+ goto put_dev;
+ }
+ }
+
+ if (ops->mode == MTD_OPS_RAW) {
+ submitted_num_desc = n;
+ } else if (ops->mode == MTD_OPS_AUTO_OOB) {
+ if (ops->datbuf)
+ submitted_num_desc = n;
+ if (ops->oobbuf)
+ submitted_num_desc++;
+ }
+
+ /* Submit command descriptors */
+ err = sps_transfer(info->sps.cmd_pipe.handle,
+ &dma_buffer->xfer);
+ if (err) {
+ pr_err("Failed to submit commands %d\n", err);
+ goto put_dev;
+ }
+
+ err = msm_nand_sps_get_iovec(info->sps.cmd_pipe.handle,
+ info->sps.cmd_pipe.index,
+ dma_buffer->xfer.iovec_count,
+ &iovec_temp);
+ if (err) {
+ pr_err("Failed to get iovec for pipe %d (err:%d)\n",
+ (info->sps.cmd_pipe.index), err);
+ goto put_dev;
+ }
+ err = msm_nand_sps_get_iovec(info->sps.data_cons.handle,
+ info->sps.data_cons.index, submitted_num_desc,
+ &iovec_temp);
+ if (err) {
+ pr_err("Failed to get iovec for pipe %d (err:%d)\n",
+ (info->sps.data_cons.index), err);
+ goto put_dev;
+ }
+
+ err = msm_nand_put_device(chip->dev);
+ mutex_unlock(&info->lock);
+ if (err)
+ goto free_dma;
+
+ for (n = 0; n < cwperpage; n++)
+ pr_debug("write pg %d: flash_status[%d] = %x\n",
+ rw_params.page, n,
+ dma_buffer->data[n].flash_status);
+
+ /* Check for flash status errors */
+ for (n = 0; n < cwperpage; n++) {
+ flash_sts = dma_buffer->data[n].flash_status;
+ if (flash_sts & (FS_OP_ERR | FS_MPU_ERR)) {
+ pr_err("MPU/OP err (0x%x) set\n", flash_sts);
+ err = -EIO;
+ goto free_dma;
+ }
+ if (n == (cwperpage - 1)) {
+ if (!(flash_sts & FS_DEVICE_WP) ||
+ (flash_sts & FS_DEVICE_STS_ERR)) {
+ pr_err("Dev sts err 0x%x\n", flash_sts);
+ err = -EIO;
+ goto free_dma;
+ }
+ }
+ }
+ pages_written++;
+ rw_params.page++;
+ }
+ goto free_dma;
+put_dev:
+ msm_nand_put_device(chip->dev);
+unlock_mutex:
+ mutex_unlock(&info->lock);
+free_dma:
+ msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
+ if (ops->oobbuf)
+ dma_unmap_page(chip->dev, rw_params.oob_dma_addr,
+ ops->ooblen, DMA_TO_DEVICE);
+ if (ops->datbuf)
+ dma_unmap_page(chip->dev, rw_params.data_dma_addr,
+ ops->len, DMA_TO_DEVICE);
+validate_mtd_params_failed:
+ if (ops->mode != MTD_OPS_RAW)
+ ops->retlen = mtd->writesize * pages_written;
+ else
+ ops->retlen = (mtd->writesize + mtd->oobsize) * pages_written;
+
+ ops->oobretlen = ops->ooblen - rw_params.oob_len_data;
+ if (err)
+ pr_err("to %llx datalen %x ooblen %x failed with err %d\n",
+ to, ops->len, ops->ooblen, err);
+ pr_debug("ret %d, retlen %d oobretlen %d\n",
+ err, ops->retlen, ops->oobretlen);
+
+ pr_debug("================================================\n");
+ return err;
+}
+
+/*
+ * Function that gets called from upper layers such as MTD/YAFFS2 to write a
+ * page with only main data.
+ */
+static int msm_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ int ret;
+ struct mtd_oob_ops ops;
+ unsigned char *bounce_buf = NULL;
+
+ ops.mode = MTD_OPS_AUTO_OOB;
+ ops.retlen = 0;
+ ops.ooblen = 0;
+ ops.oobbuf = NULL;
+
+ /* partial page writes are not supported */
+ if ((to & (mtd->writesize - 1)) || (len % mtd->writesize)) {
+ ret = -EINVAL;
+ *retlen = ops.retlen;
+ pr_err("%s: partial page writes are not supported\n", __func__);
+ goto out;
+ }
+
+ /*
+ * Handle writing of large size write buffer in vmalloc
+ * address space that does not fit in an MMU page.
+ */
+ if (!virt_addr_valid(buf) && !is_buffer_in_page(buf, len)) {
+ ops.len = mtd->writesize;
+
+ bounce_buf = kmalloc(ops.len, GFP_KERNEL);
+ if (!bounce_buf) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ for (;;) {
+ if (!is_buffer_in_page(buf, ops.len)) {
+ memcpy(bounce_buf, buf, ops.len);
+ ops.datbuf = (uint8_t *) bounce_buf;
+ } else {
+ ops.datbuf = (uint8_t *) buf;
+ }
+ ret = msm_nand_write_oob(mtd, to, &ops);
+ if (ret < 0)
+ break;
+
+ len -= mtd->writesize;
+ *retlen += mtd->writesize;
+ if (len == 0)
+ break;
+
+ buf += mtd->writesize;
+ to += mtd->writesize;
+ }
+ kfree(bounce_buf);
+ } else {
+ ops.len = len;
+ ops.datbuf = (uint8_t *)buf;
+ ret = msm_nand_write_oob(mtd, to, &ops);
+ *retlen = ops.retlen;
+ }
+out:
+ return ret;
+}
+
+/*
+ * Structure that contains NANDc register data for commands required
+ * for Erase operation.
+ */
+struct msm_nand_erase_reg_data {
+ struct msm_nand_common_cfgs cfg;
+ uint32_t exec;
+ uint32_t flash_status;
+ uint32_t clrfstatus;
+ uint32_t clrrstatus;
+};
+
+/*
+ * Function that gets called from upper layers such as MTD/YAFFS2 to erase a
+ * block within NAND device.
+ */
+#define ERASE_CMDS 9
+static int msm_nand_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ int i = 0, err = 0;
+ struct msm_nand_info *info = mtd->priv;
+ struct msm_nand_chip *chip = &info->nand_chip;
+ uint32_t page = 0;
+ struct msm_nand_sps_cmd *cmd, *curr_cmd;
+ struct msm_nand_erase_reg_data data;
+ struct sps_iovec *iovec;
+ struct sps_iovec iovec_temp;
+ /*
+ * The following 9 commands are required to erase a page -
+ * flash, addr0, addr1, cfg0, cfg1, exec, flash_status(read),
+ * flash_status(write), read_status.
+ */
+ struct {
+ struct sps_transfer xfer;
+ struct sps_iovec cmd_iovec[ERASE_CMDS];
+ struct msm_nand_sps_cmd cmd[ERASE_CMDS];
+ uint32_t flash_status;
+ } *dma_buffer;
+
+ if (mtd->writesize == PAGE_SIZE_2K)
+ page = instr->addr >> 11;
+
+ if (mtd->writesize == PAGE_SIZE_4K)
+ page = instr->addr >> 12;
+
+ if (instr->addr & (mtd->erasesize - 1)) {
+ pr_err("unsupported erase address, 0x%llx\n", instr->addr);
+ err = -EINVAL;
+ goto out;
+ }
+ if (instr->len != mtd->erasesize) {
+ pr_err("unsupported erase len, %lld\n", instr->len);
+ err = -EINVAL;
+ goto out;
+ }
+
+ wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer(
+ chip, sizeof(*dma_buffer))));
+ cmd = dma_buffer->cmd;
+
+ memset(&data, 0, sizeof(struct msm_nand_erase_reg_data));
+ data.cfg.cmd = MSM_NAND_CMD_BLOCK_ERASE;
+ data.cfg.addr0 = page;
+ data.cfg.addr1 = 0;
+ data.cfg.cfg0 = chip->cfg0 & (~(7 << CW_PER_PAGE));
+ data.cfg.cfg1 = chip->cfg1;
+ data.exec = 1;
+ dma_buffer->flash_status = 0xeeeeeeee;
+ data.clrfstatus = MSM_NAND_RESET_FLASH_STS;
+ data.clrrstatus = MSM_NAND_RESET_READ_STS;
+
+ curr_cmd = cmd;
+ msm_nand_prep_cfg_cmd_desc(info, data.cfg, &curr_cmd);
+
+ cmd = curr_cmd;
+ msm_nand_prep_single_desc(cmd, MSM_NAND_EXEC_CMD(info), WRITE,
+ data.exec, SPS_IOVEC_FLAG_NWD);
+ cmd++;
+
+ msm_nand_prep_single_desc(cmd, MSM_NAND_FLASH_STATUS(info), READ,
+ msm_virt_to_dma(chip, &dma_buffer->flash_status), 0);
+ cmd++;
+
+ msm_nand_prep_single_desc(cmd, MSM_NAND_FLASH_STATUS(info), WRITE,
+ data.clrfstatus, 0);
+ cmd++;
+
+ msm_nand_prep_single_desc(cmd, MSM_NAND_READ_STATUS(info), WRITE,
+ data.clrrstatus,
+ SPS_IOVEC_FLAG_UNLOCK | SPS_IOVEC_FLAG_INT);
+ cmd++;
+
+ WARN_ON((cmd - dma_buffer->cmd) > ERASE_CMDS);
+ dma_buffer->xfer.iovec_count = (cmd - dma_buffer->cmd);
+ dma_buffer->xfer.iovec = dma_buffer->cmd_iovec;
+ dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip,
+ &dma_buffer->cmd_iovec);
+ iovec = dma_buffer->xfer.iovec;
+
+ for (i = 0; i < dma_buffer->xfer.iovec_count; i++) {
+ iovec->addr = msm_virt_to_dma(chip, &dma_buffer->cmd[i].ce);
+ iovec->size = sizeof(struct sps_command_element);
+ iovec->flags = dma_buffer->cmd[i].flags;
+ iovec++;
+ }
+ mutex_lock(&info->lock);
+ err = msm_nand_get_device(chip->dev);
+ if (err)
+ goto unlock_mutex;
+
+ err = sps_transfer(info->sps.cmd_pipe.handle, &dma_buffer->xfer);
+ if (err) {
+ pr_err("Failed to submit commands %d\n", err);
+ goto put_dev;
+ }
+ err = msm_nand_sps_get_iovec(info->sps.cmd_pipe.handle,
+ info->sps.cmd_pipe.index, dma_buffer->xfer.iovec_count,
+ &iovec_temp);
+ if (err) {
+ pr_err("Failed to get iovec for pipe %d (err: %d)\n",
+ (info->sps.cmd_pipe.index), err);
+ goto put_dev;
+ }
+ err = msm_nand_put_device(chip->dev);
+ if (err)
+ goto unlock_mutex;
+
+ /* Check for flash status errors */
+ if (dma_buffer->flash_status & (FS_OP_ERR |
+ FS_MPU_ERR | FS_DEVICE_STS_ERR)) {
+ pr_err("MPU/OP/DEV err (0x%x) set\n", dma_buffer->flash_status);
+ err = -EIO;
+ }
+ if (!(dma_buffer->flash_status & FS_DEVICE_WP)) {
+ pr_err("Device is write protected\n");
+ err = -EIO;
+ }
+ if (err) {
+ pr_err("Erase failed, 0x%llx\n", instr->addr);
+ instr->fail_addr = instr->addr;
+ instr->state = MTD_ERASE_FAILED;
+ } else {
+ instr->state = MTD_ERASE_DONE;
+ instr->fail_addr = 0xffffffff;
+ mtd_erase_callback(instr);
+ }
+ goto unlock_mutex;
+put_dev:
+ msm_nand_put_device(chip->dev);
+unlock_mutex:
+ mutex_unlock(&info->lock);
+ msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
+out:
+ return err;
+}
+
+/*
+ * Structure that contains NANDc register data for commands required
+ * for checking if a block is bad.
+ */
+struct msm_nand_blk_isbad_data {
+ struct msm_nand_common_cfgs cfg;
+ uint32_t ecc_bch_cfg;
+ uint32_t exec;
+ uint32_t read_offset;
+};
+
+/*
+ * Function that gets called from upper layers such as MTD/YAFFS2 to check if
+ * a block is bad. This is done by reading the first page within a block and
+ * checking whether the bad block byte location contains 0xFF or not. If it
+ * doesn't contain 0xFF, then it is considered as bad block.
+ */
+#define ISBAD_CMDS 9
+static int msm_nand_block_isbad(struct mtd_info *mtd, loff_t ofs)
+{
+ struct msm_nand_info *info = mtd->priv;
+ struct msm_nand_chip *chip = &info->nand_chip;
+ int i = 0, ret = 0, bad_block = 0, submitted_num_desc = 1;
+ uint8_t *buf;
+ uint32_t page = 0, rdata, cwperpage;
+ struct msm_nand_sps_cmd *cmd, *curr_cmd;
+ struct msm_nand_blk_isbad_data data;
+ struct sps_iovec *iovec;
+ struct sps_iovec iovec_temp;
+ /*
+ * The following 9 commands are required to check bad block -
+ * flash, addr0, addr1, cfg0, cfg1, ecc_cfg, read_loc_0,
+ * exec, flash_status(read).
+ */
+ struct {
+ struct sps_transfer xfer;
+ struct sps_iovec cmd_iovec[ISBAD_CMDS];
+ struct msm_nand_sps_cmd cmd[ISBAD_CMDS];
+ uint32_t flash_status;
+ } *dma_buffer;
+
+ if (mtd->writesize == PAGE_SIZE_2K)
+ page = ofs >> 11;
+
+ if (mtd->writesize == PAGE_SIZE_4K)
+ page = ofs >> 12;
+
+ cwperpage = (mtd->writesize >> 9);
+
+ if (ofs > mtd->size) {
+ pr_err("Invalid offset 0x%llx\n", ofs);
+ bad_block = -EINVAL;
+ goto out;
+ }
+ if (ofs & (mtd->erasesize - 1)) {
+ pr_err("unsupported block address, 0x%x\n", (uint32_t)ofs);
+ bad_block = -EINVAL;
+ goto out;
+ }
+
+ wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer(
+ chip, sizeof(*dma_buffer) + 4)));
+ buf = (uint8_t *)dma_buffer + sizeof(*dma_buffer);
+
+ cmd = dma_buffer->cmd;
+ memset(&data, 0, sizeof(struct msm_nand_blk_isbad_data));
+ data.cfg.cmd = MSM_NAND_CMD_PAGE_READ_ALL;
+ data.cfg.cfg0 = chip->cfg0_raw & ~(7U << CW_PER_PAGE);
+ data.cfg.cfg1 = chip->cfg1_raw;
+
+ if (chip->cfg1 & (1 << WIDE_FLASH))
+ data.cfg.addr0 = (page << 16) |
+ ((chip->cw_size * (cwperpage-1)) >> 1);
+ else
+ data.cfg.addr0 = (page << 16) |
+ (chip->cw_size * (cwperpage-1));
+
+ data.cfg.addr1 = (page >> 16) & 0xff;
+ data.ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
+ data.exec = 1;
+ data.read_offset = (mtd->writesize - (chip->cw_size * (cwperpage-1)));
+ dma_buffer->flash_status = 0xeeeeeeee;
+
+ curr_cmd = cmd;
+ msm_nand_prep_cfg_cmd_desc(info, data.cfg, &curr_cmd);
+
+ cmd = curr_cmd;
+ msm_nand_prep_single_desc(cmd, MSM_NAND_DEV0_ECC_CFG(info), WRITE,
+ data.ecc_bch_cfg, 0);
+ cmd++;
+
+ rdata = (data.read_offset << 0) | (4 << 16) | (1 << 31);
+ msm_nand_prep_single_desc(cmd, MSM_NAND_READ_LOCATION_0(info), WRITE,
+ rdata, 0);
+ cmd++;
+
+ msm_nand_prep_single_desc(cmd, MSM_NAND_EXEC_CMD(info), WRITE,
+ data.exec, SPS_IOVEC_FLAG_NWD);
+ cmd++;
+
+ msm_nand_prep_single_desc(cmd, MSM_NAND_FLASH_STATUS(info), READ,
+ msm_virt_to_dma(chip, &dma_buffer->flash_status),
+ SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_UNLOCK);
+ cmd++;
+
+ WARN_ON(cmd - dma_buffer->cmd > ISBAD_CMDS);
+ dma_buffer->xfer.iovec_count = (cmd - dma_buffer->cmd);
+ dma_buffer->xfer.iovec = dma_buffer->cmd_iovec;
+ dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip,
+ &dma_buffer->cmd_iovec);
+ iovec = dma_buffer->xfer.iovec;
+
+ for (i = 0; i < dma_buffer->xfer.iovec_count; i++) {
+ iovec->addr = msm_virt_to_dma(chip, &dma_buffer->cmd[i].ce);
+ iovec->size = sizeof(struct sps_command_element);
+ iovec->flags = dma_buffer->cmd[i].flags;
+ iovec++;
+ }
+ mutex_lock(&info->lock);
+ ret = msm_nand_get_device(chip->dev);
+ if (ret) {
+ mutex_unlock(&info->lock);
+ goto free_dma;
+ }
+ /* Submit data descriptor */
+ ret = sps_transfer_one(info->sps.data_prod.handle,
+ msm_virt_to_dma(chip, buf),
+ 4, NULL, SPS_IOVEC_FLAG_INT);
+
+ if (ret) {
+ pr_err("Failed to submit data desc %d\n", ret);
+ goto put_dev;
+ }
+ /* Submit command descriptor */
+ ret = sps_transfer(info->sps.cmd_pipe.handle, &dma_buffer->xfer);
+ if (ret) {
+ pr_err("Failed to submit commands %d\n", ret);
+ goto put_dev;
+ }
+
+ ret = msm_nand_sps_get_iovec(info->sps.cmd_pipe.handle,
+ info->sps.cmd_pipe.index, dma_buffer->xfer.iovec_count,
+ &iovec_temp);
+ if (ret) {
+ pr_err("Failed to get iovec for pipe %d (ret: %d)\n",
+ (info->sps.cmd_pipe.index), ret);
+ goto put_dev;
+ }
+ ret = msm_nand_sps_get_iovec(info->sps.data_prod.handle,
+ info->sps.data_prod.index, submitted_num_desc,
+ &iovec_temp);
+ if (ret) {
+ pr_err("Failed to get iovec for pipe %d (ret: %d)\n",
+ (info->sps.data_prod.index), ret);
+ goto put_dev;
+ }
+
+ ret = msm_nand_put_device(chip->dev);
+ mutex_unlock(&info->lock);
+ if (ret)
+ goto free_dma;
+
+ /* Check for flash status errors */
+ if (dma_buffer->flash_status & (FS_OP_ERR | FS_MPU_ERR)) {
+ pr_err("MPU/OP err set: %x\n", dma_buffer->flash_status);
+ bad_block = -EIO;
+ goto free_dma;
+ }
+
+ /* Check for bad block marker byte */
+ if (chip->cfg1 & (1 << WIDE_FLASH)) {
+ if (buf[0] != 0xFF || buf[1] != 0xFF)
+ bad_block = 1;
+ } else {
+ if (buf[0] != 0xFF)
+ bad_block = 1;
+ }
+ goto free_dma;
+put_dev:
+ msm_nand_put_device(chip->dev);
+ mutex_unlock(&info->lock);
+free_dma:
+ msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer) + 4);
+out:
+ return ret ? ret : bad_block;
+}
+
+/*
+ * Function that gets called from upper layers such as MTD/YAFFS2 to mark a
+ * block as bad. This is done by writing the first page within a block with 0,
+ * thus setting the bad block byte location as well to 0.
+ */
+static int msm_nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+ struct mtd_oob_ops ops;
+ int ret;
+ uint8_t *buf;
+ size_t len;
+
+ if (ofs > mtd->size) {
+ pr_err("Invalid offset 0x%llx\n", ofs);
+ ret = -EINVAL;
+ goto out;
+ }
+ if (ofs & (mtd->erasesize - 1)) {
+ pr_err("unsupported block address, 0x%x\n", (uint32_t)ofs);
+ ret = -EINVAL;
+ goto out;
+ }
+ len = mtd->writesize + mtd->oobsize;
+ buf = kzalloc(len, GFP_KERNEL);
+ if (!buf) {
+ pr_err("unable to allocate memory for 0x%x size\n", len);
+ ret = -ENOMEM;
+ goto out;
+ }
+ ops.mode = MTD_OPS_RAW;
+ ops.len = len;
+ ops.retlen = 0;
+ ops.ooblen = 0;
+ ops.datbuf = buf;
+ ops.oobbuf = NULL;
+ ret = msm_nand_write_oob(mtd, ofs, &ops);
+ kfree(buf);
+out:
+ return ret;
+}
+
+/*
+ * Function that scans for the attached NAND device. This fills out all
+ * the uninitialized function pointers with the defaults. The flash ID is
+ * read and the mtd/chip structures are filled with the appropriate values.
+ */
+static int msm_nand_scan(struct mtd_info *mtd)
+{
+ struct msm_nand_info *info = mtd->priv;
+ struct msm_nand_chip *chip = &info->nand_chip;
+ struct flash_identification *supported_flash = &info->flash_dev;
+ int err = 0;
+ uint32_t i, j, mtd_writesize;
+ uint8_t dev_found = 0, wide_bus;
+ uint32_t manid, devid, devcfg;
+ uint32_t flash_id = 0, flash_id2 = 0;
+ uint8_t id_byte[NAND_MAX_ID_LEN];
+ uint32_t bad_block_byte, spare_bytes;
+ struct nand_flash_dev *flashdev = NULL;
+ struct nand_manufacturers *flashman = NULL;
+
+ /* Probe the Flash device for ONFI compliance */
+ if (!msm_nand_flash_onfi_probe(info)) {
+ dev_found = 1;
+ } else {
+ err = msm_nand_flash_read_id(info, 0, &flash_id, &flash_id2);
+ if (err < 0) {
+ pr_err("Failed to read Flash ID\n");
+ err = -EINVAL;
+ goto out;
+ }
+ manid = id_byte[0] = flash_id & 0xFF;
+ devid = id_byte[1] = (flash_id >> 8) & 0xFF;
+ devcfg = id_byte[3] = (flash_id >> 24) & 0xFF;
+ id_byte[2] = (flash_id >> 16) & 0xFF;
+ id_byte[4] = flash_id2 & 0xFF;
+ id_byte[5] = (flash_id2 >> 8) & 0xFF;
+ id_byte[6] = (flash_id2 >> 16) & 0xFF;
+ id_byte[7] = (flash_id2 >> 24) & 0xFF;
+
+ for (i = 0; !flashman && nand_manuf_ids[i].id; ++i)
+ if (nand_manuf_ids[i].id == manid)
+ flashman = &nand_manuf_ids[i];
+ for (i = 0; !flashdev && nand_flash_ids[i].id; ++i) {
+ /*
+ * If id_len is specified for an entry in the nand ids
+ * array, then at least 4 bytes of the nand id is
+ * present in the nand ids array - use that to identify
+ * the nand device first. If that is not present, only
+ * then fall back to searching the legacy or extended
+ * ids in the nand ids array.
+ * The id_len number of bytes in the nand id read from
+ * the device are checked against those in the nand id
+ * table for exact match.
+ */
+ if (nand_flash_ids[i].id_len) {
+ for (j = 0; j < nand_flash_ids[i].id_len; j++) {
+ if (nand_flash_ids[i].id[j] ==
+ id_byte[j])
+ continue;
+ else
+ break;
+ }
+ if (j == nand_flash_ids[i].id_len)
+ flashdev = &nand_flash_ids[i];
+ } else if (!nand_flash_ids[i].id_len &&
+ nand_flash_ids[i].dev_id == devid)
+ flashdev = &nand_flash_ids[i];
+ }
+ if (!flashdev || !flashman) {
+ pr_err("unknown nand flashid=%x manuf=%x devid=%x\n",
+ flash_id, manid, devid);
+ err = -ENOENT;
+ goto out;
+ }
+ dev_found = 1;
+ if (!flashdev->pagesize) {
+ pr_err("missing page size info - extract from NAND ID\n");
+ supported_flash->widebus = devcfg & (1 << 6) ? 1 : 0;
+ supported_flash->pagesize = 1024 << (devcfg & 0x3);
+ supported_flash->blksize = (64 * 1024) <<
+ ((devcfg >> 4) & 0x3);
+ supported_flash->oobsize = (8 << ((devcfg >> 2) & 1)) *
+ (supported_flash->pagesize >> 9);
+ } else {
+ supported_flash->widebus = flashdev->options &
+ NAND_BUSWIDTH_16 ? 1 : 0;
+ supported_flash->pagesize = flashdev->pagesize;
+ supported_flash->blksize = flashdev->erasesize;
+ supported_flash->oobsize = flashdev->oobsize;
+ supported_flash->ecc_correctability =
+ flashdev->ecc.strength_ds;
+ if (!flashdev->ecc.strength_ds)
+ pr_err("num ecc correctable bit not specified and defaults to 4 bit BCH\n");
+ }
+ supported_flash->flash_id = flash_id;
+ supported_flash->density = ((uint64_t)flashdev->chipsize) << 20;
+ }
+
+ if (dev_found) {
+ wide_bus = supported_flash->widebus;
+ mtd->size = supported_flash->density;
+ mtd->writesize = supported_flash->pagesize;
+ mtd->oobsize = supported_flash->oobsize;
+ mtd->erasesize = supported_flash->blksize;
+ mtd->writebufsize = mtd->writesize;
+ mtd_writesize = mtd->writesize;
+
+ /* Check whether NAND device support 8bit ECC*/
+ if (supported_flash->ecc_correctability >= 8) {
+ chip->bch_caps = MSM_NAND_CAP_8_BIT_BCH;
+ supported_flash->ecc_capability = 8;
+ } else {
+ chip->bch_caps = MSM_NAND_CAP_4_BIT_BCH;
+ supported_flash->ecc_capability = 4;
+ }
+
+ pr_info("NAND Id: 0x%x Buswidth: %dBits Density: %lld MByte\n",
+ supported_flash->flash_id, (wide_bus) ? 16 : 8,
+ (mtd->size >> 20));
+ pr_info("pagesize: %d Erasesize: %d oobsize: %d (in Bytes)\n",
+ mtd->writesize, mtd->erasesize, mtd->oobsize);
+ pr_info("BCH ECC: %d Bit\n", supported_flash->ecc_capability);
+ }
+
+ chip->cw_size = (chip->bch_caps & MSM_NAND_CAP_8_BIT_BCH) ? 532 : 528;
+ chip->cfg0 = (((mtd_writesize >> 9) - 1) << CW_PER_PAGE)
+ | (516 << UD_SIZE_BYTES)
+ | (0 << DISABLE_STATUS_AFTER_WRITE)
+ | (5 << NUM_ADDR_CYCLES);
+
+ bad_block_byte = (mtd_writesize - (chip->cw_size * (
+ (mtd_writesize >> 9) - 1)) + 1);
+ chip->cfg1 = (7 << NAND_RECOVERY_CYCLES)
+ | (0 << CS_ACTIVE_BSY)
+ | (bad_block_byte << BAD_BLOCK_BYTE_NUM)
+ | (0 << BAD_BLOCK_IN_SPARE_AREA)
+ | (2 << WR_RD_BSY_GAP)
+ | ((wide_bus ? 1 : 0) << WIDE_FLASH)
+ | (1 << ENABLE_BCH_ECC);
+
+ /*
+ * For 4bit BCH ECC (default ECC), parity bytes = 7(x8) or 8(x16 I/O)
+ * For 8bit BCH ECC, parity bytes = 13 (x8) or 14 (x16 I/O).
+ */
+ chip->ecc_parity_bytes = (chip->bch_caps & MSM_NAND_CAP_8_BIT_BCH) ?
+ (wide_bus ? 14 : 13) : (wide_bus ? 8 : 7);
+
+ spare_bytes = chip->cw_size - (BYTES_512 + chip->ecc_parity_bytes);
+ chip->cfg0_raw = (((mtd_writesize >> 9) - 1) << CW_PER_PAGE)
+ | (5 << NUM_ADDR_CYCLES)
+ | (spare_bytes << SPARE_SIZE_BYTES)
+ | (BYTES_512 << UD_SIZE_BYTES);
+
+ chip->cfg1_raw = (2 << WR_RD_BSY_GAP)
+ | (1 << BAD_BLOCK_IN_SPARE_AREA)
+ | (21 << BAD_BLOCK_BYTE_NUM)
+ | (0 << CS_ACTIVE_BSY)
+ | (7 << NAND_RECOVERY_CYCLES)
+ | ((wide_bus ? 1 : 0) << WIDE_FLASH)
+ | (1 << DEV0_CFG1_ECC_DISABLE);
+
+ chip->ecc_bch_cfg = (0 << ECC_CFG_ECC_DISABLE)
+ | (0 << ECC_SW_RESET)
+ | (516 << ECC_NUM_DATA_BYTES)
+ | (chip->ecc_parity_bytes << ECC_PARITY_SIZE_BYTES)
+ | (1 << ECC_FORCE_CLK_OPEN);
+
+ chip->ecc_cfg_raw = (1 << ECC_FORCE_CLK_OPEN)
+ | (BYTES_512 << ECC_NUM_DATA_BYTES)
+ | (chip->ecc_parity_bytes << ECC_PARITY_SIZE_BYTES)
+ | (0 << ECC_SW_RESET)
+ | (1 << ECC_CFG_ECC_DISABLE);
+
+ if (chip->bch_caps & MSM_NAND_CAP_8_BIT_BCH) {
+ chip->cfg0 |= (wide_bus ? 0 << SPARE_SIZE_BYTES :
+ 2 << SPARE_SIZE_BYTES);
+ chip->ecc_bch_cfg |= (1 << ECC_MODE);
+ chip->ecc_cfg_raw |= (1 << ECC_MODE);
+ } else {
+ chip->cfg0 |= (wide_bus ? 2 << SPARE_SIZE_BYTES :
+ 4 << SPARE_SIZE_BYTES);
+ chip->ecc_bch_cfg |= (0 << ECC_MODE);
+ chip->ecc_cfg_raw |= (0 << ECC_MODE);
+ }
+
+ chip->ecc_buf_cfg = 0x203; /* No of bytes covered by ECC - 516 bytes */
+
+ pr_info("CFG0: 0x%08x, CFG1: 0x%08x\n"
+ " RAWCFG0: 0x%08x, RAWCFG1: 0x%08x\n"
+ " ECCBUFCFG: 0x%08x, ECCBCHCFG: 0x%08x\n"
+ " RAWECCCFG: 0x%08x, BAD BLOCK BYTE: 0x%08x\n",
+ chip->cfg0, chip->cfg1, chip->cfg0_raw, chip->cfg1_raw,
+ chip->ecc_buf_cfg, chip->ecc_bch_cfg,
+ chip->ecc_cfg_raw, bad_block_byte);
+
+ if (mtd->writesize == 2048)
+ mtd->oobavail = 16;
+ else if (mtd->writesize == 4096)
+ mtd->oobavail = 32;
+ else {
+ pr_err("Unsupported NAND pagesize: 0x%x\n", mtd->writesize);
+ err = -ENODEV;
+ goto out;
+ }
+
+ /* Fill in remaining MTD driver data */
+ mtd->type = MTD_NANDFLASH;
+ mtd->flags = MTD_CAP_NANDFLASH;
+ mtd->_erase = msm_nand_erase;
+ mtd->_block_isbad = msm_nand_block_isbad;
+ mtd->_block_markbad = msm_nand_block_markbad;
+ mtd->_read = msm_nand_read;
+ mtd->_write = msm_nand_write;
+ mtd->_read_oob = msm_nand_read_oob;
+ mtd->_write_oob = msm_nand_write_oob;
+ mtd->owner = THIS_MODULE;
+out:
+ return err;
+}
+
+#define BAM_APPS_PIPE_LOCK_GRP0 0
+#define BAM_APPS_PIPE_LOCK_GRP1 1
+/*
+ * This function allocates, configures, connects an end point and
+ * also registers event notification for an end point. It also allocates
+ * DMA memory for descriptor FIFO of a pipe.
+ */
+static int msm_nand_init_endpoint(struct msm_nand_info *info,
+ struct msm_nand_sps_endpt *end_point,
+ uint32_t pipe_index)
+{
+ int rc = 0;
+ struct sps_pipe *pipe_handle;
+ struct sps_connect *sps_config = &end_point->config;
+ struct sps_register_event *sps_event = &end_point->event;
+
+ pipe_handle = sps_alloc_endpoint();
+ if (!pipe_handle) {
+ pr_err("sps_alloc_endpoint() failed\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ rc = sps_get_config(pipe_handle, sps_config);
+ if (rc) {
+ pr_err("sps_get_config() failed %d\n", rc);
+ goto free_endpoint;
+ }
+
+ if (pipe_index == SPS_DATA_PROD_PIPE_INDEX) {
+ /* READ CASE: source - BAM; destination - system memory */
+ sps_config->source = info->sps.bam_handle;
+ sps_config->destination = SPS_DEV_HANDLE_MEM;
+ sps_config->mode = SPS_MODE_SRC;
+ sps_config->src_pipe_index = pipe_index;
+ } else if (pipe_index == SPS_DATA_CONS_PIPE_INDEX ||
+ pipe_index == SPS_CMD_CONS_PIPE_INDEX) {
+ /* WRITE CASE: source - system memory; destination - BAM */
+ sps_config->source = SPS_DEV_HANDLE_MEM;
+ sps_config->destination = info->sps.bam_handle;
+ sps_config->mode = SPS_MODE_DEST;
+ sps_config->dest_pipe_index = pipe_index;
+ }
+
+ sps_config->options = SPS_O_AUTO_ENABLE | SPS_O_POLL |
+ SPS_O_ACK_TRANSFERS;
+
+ if (pipe_index == SPS_DATA_PROD_PIPE_INDEX ||
+ pipe_index == SPS_DATA_CONS_PIPE_INDEX)
+ sps_config->lock_group = BAM_APPS_PIPE_LOCK_GRP0;
+ else if (pipe_index == SPS_CMD_CONS_PIPE_INDEX)
+ sps_config->lock_group = BAM_APPS_PIPE_LOCK_GRP1;
+
+ /*
+ * Descriptor FIFO is a cyclic FIFO. If SPS_MAX_DESC_NUM descriptors
+ * are allowed to be submitted before we get any ack for any of them,
+ * the descriptor FIFO size should be: (SPS_MAX_DESC_NUM + 1) *
+ * sizeof(struct sps_iovec).
+ */
+ sps_config->desc.size = (SPS_MAX_DESC_NUM + 1) *
+ sizeof(struct sps_iovec);
+ sps_config->desc.base = dmam_alloc_coherent(info->nand_chip.dev,
+ sps_config->desc.size,
+ &sps_config->desc.phys_base,
+ GFP_KERNEL);
+ if (!sps_config->desc.base) {
+ pr_err("dmam_alloc_coherent() failed for size %x\n",
+ sps_config->desc.size);
+ rc = -ENOMEM;
+ goto free_endpoint;
+ }
+ memset(sps_config->desc.base, 0x00, sps_config->desc.size);
+
+ rc = sps_connect(pipe_handle, sps_config);
+ if (rc) {
+ pr_err("sps_connect() failed %d\n", rc);
+ goto free_endpoint;
+ }
+
+ sps_event->options = SPS_O_EOT;
+ sps_event->mode = SPS_TRIGGER_WAIT;
+ sps_event->user = (void *)info;
+
+ rc = sps_register_event(pipe_handle, sps_event);
+ if (rc) {
+ pr_err("sps_register_event() failed %d\n", rc);
+ goto sps_disconnect;
+ }
+ end_point->index = pipe_index;
+ end_point->handle = pipe_handle;
+ pr_debug("pipe handle 0x%x for pipe %d\n", (uint32_t)pipe_handle,
+ pipe_index);
+ goto out;
+sps_disconnect:
+ sps_disconnect(pipe_handle);
+free_endpoint:
+ sps_free_endpoint(pipe_handle);
+out:
+ return rc;
+}
+
+/* This function disconnects and frees an end point */
+static void msm_nand_deinit_endpoint(struct msm_nand_info *info,
+ struct msm_nand_sps_endpt *end_point)
+{
+ sps_disconnect(end_point->handle);
+ sps_free_endpoint(end_point->handle);
+}
+
+/*
+ * This function registers BAM device and initializes its end points for
+ * the following pipes -
+ * system consumer pipe for data (pipe#0),
+ * system producer pipe for data (pipe#1),
+ * system consumer pipe for commands (pipe#2).
+ */
+static int msm_nand_bam_init(struct msm_nand_info *nand_info)
+{
+ struct sps_bam_props bam = {0};
+ int rc = 0;
+
+ bam.phys_addr = nand_info->bam_phys;
+ bam.virt_addr = nand_info->bam_base;
+ bam.irq = nand_info->bam_irq;
+ /*
+ * NAND device is accessible from both Apps and Modem processor and
+ * thus, NANDc and BAM are shared between both the processors. But BAM
+ * must be enabled and instantiated only once during boot up by
+ * Trustzone before Modem/Apps is brought out from reset.
+ *
+ * This is indicated to SPS driver on Apps by marking flag
+ * SPS_BAM_MGR_DEVICE_REMOTE. The following are the global
+ * initializations that will be done by Trustzone - Execution
+ * Environment, Pipes assignment to Apps/Modem, Pipe Super groups and
+ * Descriptor summing threshold.
+ *
+ * NANDc BAM device supports 2 execution environments - Modem and Apps
+ * and thus the flag SPS_BAM_MGR_MULTI_EE is set.
+ */
+ bam.manage = SPS_BAM_MGR_DEVICE_REMOTE | SPS_BAM_MGR_MULTI_EE;
+ bam.ipc_loglevel = QPIC_BAM_DEFAULT_IPC_LOGLVL;
+
+ rc = sps_phy2h(bam.phys_addr, &nand_info->sps.bam_handle);
+ if (!rc)
+ goto init_sps_ep;
+ rc = sps_register_bam_device(&bam, &nand_info->sps.bam_handle);
+ if (rc) {
+ pr_err("%s: sps_register_bam_device() failed with %d\n",
+ __func__, rc);
+ goto out;
+ }
+ pr_info("%s: BAM device registered: bam_handle 0x%lx\n",
+ __func__, nand_info->sps.bam_handle);
+init_sps_ep:
+ rc = msm_nand_init_endpoint(nand_info, &nand_info->sps.data_prod,
+ SPS_DATA_PROD_PIPE_INDEX);
+ if (rc)
+ goto out;
+ rc = msm_nand_init_endpoint(nand_info, &nand_info->sps.data_cons,
+ SPS_DATA_CONS_PIPE_INDEX);
+ if (rc)
+ goto deinit_data_prod;
+
+ rc = msm_nand_init_endpoint(nand_info, &nand_info->sps.cmd_pipe,
+ SPS_CMD_CONS_PIPE_INDEX);
+ if (rc)
+ goto deinit_data_cons;
+ goto out;
+deinit_data_cons:
+ msm_nand_deinit_endpoint(nand_info, &nand_info->sps.data_cons);
+deinit_data_prod:
+ msm_nand_deinit_endpoint(nand_info, &nand_info->sps.data_prod);
+out:
+ return rc;
+}
+
+/*
+ * This function disconnects and frees its end points for all the pipes.
+ * Since the BAM is shared resource, it is not deregistered as its handle
+ * might be in use with LCDC.
+ */
+static void msm_nand_bam_free(struct msm_nand_info *nand_info)
+{
+ msm_nand_deinit_endpoint(nand_info, &nand_info->sps.data_prod);
+ msm_nand_deinit_endpoint(nand_info, &nand_info->sps.data_cons);
+ msm_nand_deinit_endpoint(nand_info, &nand_info->sps.cmd_pipe);
+}
+
+/* This function enables DMA support for the NANDc in BAM mode. */
+static int msm_nand_enable_dma(struct msm_nand_info *info)
+{
+ struct msm_nand_sps_cmd *sps_cmd;
+ struct msm_nand_chip *chip = &info->nand_chip;
+ int ret, submitted_num_desc = 1;
+ struct sps_iovec iovec_temp;
+
+ wait_event(chip->dma_wait_queue,
+ (sps_cmd = msm_nand_get_dma_buffer(chip, sizeof(*sps_cmd))));
+
+ msm_nand_prep_single_desc(sps_cmd, MSM_NAND_CTRL(info), WRITE,
+ (1 << BAM_MODE_EN), SPS_IOVEC_FLAG_INT);
+
+ mutex_lock(&info->lock);
+ ret = msm_nand_get_device(chip->dev);
+ if (ret) {
+ mutex_unlock(&info->lock);
+ goto out;
+ }
+ ret = sps_transfer_one(info->sps.cmd_pipe.handle,
+ msm_virt_to_dma(chip, &sps_cmd->ce),
+ sizeof(struct sps_command_element), NULL,
+ sps_cmd->flags);
+ if (ret) {
+ pr_err("Failed to submit command: %d\n", ret);
+ goto put_dev;
+ }
+ ret = msm_nand_sps_get_iovec(info->sps.cmd_pipe.handle,
+ info->sps.cmd_pipe.index, submitted_num_desc,
+ &iovec_temp);
+ if (ret) {
+ pr_err("Failed to get iovec for pipe %d (ret: %d)\n",
+ (info->sps.cmd_pipe.index), ret);
+ goto put_dev;
+ }
+put_dev:
+ ret = msm_nand_put_device(chip->dev);
+out:
+ mutex_unlock(&info->lock);
+ msm_nand_release_dma_buffer(chip, sps_cmd, sizeof(*sps_cmd));
+ return ret;
+
+}
+
+static int msm_nand_parse_smem_ptable(int *nr_parts)
+{
+
+ uint32_t i, j;
+ uint32_t len = FLASH_PTABLE_HDR_LEN;
+ struct flash_partition_entry *pentry;
+ char *delimiter = ":";
+ void *temp_ptable = NULL;
+
+ pr_info("Parsing partition table info from SMEM\n");
+ temp_ptable = smem_get_entry(SMEM_AARM_PARTITION_TABLE, &len, 0,
+ SMEM_ANY_HOST_FLAG);
+
+ if (!temp_ptable) {
+ pr_err("Error reading partition table header\n");
+ goto out;
+ }
+
+ /* Read only the header portion of ptable */
+ ptable = *(struct flash_partition_table *)temp_ptable;
+
+ /* Verify ptable magic */
+ if (ptable.magic1 != FLASH_PART_MAGIC1 ||
+ ptable.magic2 != FLASH_PART_MAGIC2) {
+ pr_err("Partition table magic verification failed\n");
+ goto out;
+ }
+ /* Ensure that # of partitions is less than the max we have allocated */
+ if (ptable.numparts > FLASH_PTABLE_MAX_PARTS_V4) {
+ pr_err("Partition numbers exceed the max limit\n");
+ goto out;
+ }
+ /* Find out length of partition data based on table version. */
+ if (ptable.version <= FLASH_PTABLE_V3) {
+ len = FLASH_PTABLE_HDR_LEN + FLASH_PTABLE_MAX_PARTS_V3 *
+ sizeof(struct flash_partition_entry);
+ } else if (ptable.version == FLASH_PTABLE_V4) {
+ len = FLASH_PTABLE_HDR_LEN + FLASH_PTABLE_MAX_PARTS_V4 *
+ sizeof(struct flash_partition_entry);
+ } else {
+ pr_err("Unknown ptable version (%d)", ptable.version);
+ goto out;
+ }
+
+ *nr_parts = ptable.numparts;
+
+ /*
+ * Now that the partition table header has been parsed, verified
+ * and the length of the partition table calculated, read the
+ * complete partition table.
+ */
+ temp_ptable = smem_get_entry(SMEM_AARM_PARTITION_TABLE, &len, 0,
+ SMEM_ANY_HOST_FLAG);
+ if (!temp_ptable) {
+ pr_err("Error reading partition table\n");
+ goto out;
+ }
+
+ /* Read only the header portion of ptable */
+ ptable = *(struct flash_partition_table *)temp_ptable;
+
+ for (i = 0; i < ptable.numparts; i++) {
+ pentry = &ptable.part_entry[i];
+ if (pentry->name[0] == '\0')
+ continue;
+ /* Convert name to lower case and discard the initial chars */
+ mtd_part[i].name = pentry->name;
+ for (j = 0; j < strlen(mtd_part[i].name); j++)
+ *(mtd_part[i].name + j) =
+ tolower(*(mtd_part[i].name + j));
+ strsep(&(mtd_part[i].name), delimiter);
+ mtd_part[i].offset = pentry->offset;
+ mtd_part[i].mask_flags = pentry->attr;
+ mtd_part[i].size = pentry->length;
+ pr_debug("%d: %s offs=0x%08x size=0x%08x attr:0x%08x\n",
+ i, pentry->name, pentry->offset, pentry->length,
+ pentry->attr);
+ }
+ pr_info("SMEM partition table found: ver: %d len: %d\n",
+ ptable.version, ptable.numparts);
+ return 0;
+out:
+ return -EINVAL;
+}
+
+#define BOOT_DEV_MASK 0x1E
+#define BOOT_DEV_NAND 0x4
+
+/*
+ * This function gets called when its device named msm-nand is added to
+ * device tree .dts file with all its resources such as physical addresses
+ * for NANDc and BAM, BAM IRQ.
+ *
+ * It also expects the NAND flash partition information to be passed in .dts
+ * file so that it can parse the partitions by calling MTD function
+ * mtd_device_parse_register().
+ *
+ */
+static int msm_nand_probe(struct platform_device *pdev)
+{
+ struct msm_nand_info *info;
+ struct resource *res;
+ int i, err, nr_parts;
+ struct device *dev;
+ u32 adjustment_offset;
+ void __iomem *boot_cfg_base;
+ u32 boot_dev;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "boot_cfg");
+ if (res && res->start) {
+ boot_cfg_base = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!boot_cfg_base) {
+ pr_err("ioremap() failed for addr 0x%x size 0x%x\n",
+ res->start, resource_size(res));
+ return -ENOMEM;
+ }
+ boot_dev = (readl_relaxed(boot_cfg_base) & BOOT_DEV_MASK) >> 1;
+ if (boot_dev != BOOT_DEV_NAND) {
+ pr_err("disabling nand as boot device (%x) is not NAND\n",
+ boot_dev);
+ return -ENODEV;
+ }
+ }
+ /*
+ * The partition information can also be passed from kernel command
+ * line. Also, the MTD core layer supports adding the whole device as
+ * one MTD device when no partition information is available at all.
+ */
+ info = devm_kzalloc(&pdev->dev, sizeof(struct msm_nand_info),
+ GFP_KERNEL);
+ if (!info) {
+ err = -ENOMEM;
+ goto out;
+ }
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "nand_phys");
+ if (!res || !res->start) {
+ pr_err("NAND phys address range is not provided\n");
+ err = -ENODEV;
+ goto out;
+ }
+ info->nand_phys = res->start;
+
+ err = of_property_read_u32(pdev->dev.of_node,
+ "qcom,reg-adjustment-offset",
+ &adjustment_offset);
+ if (err) {
+ pr_err("adjustment_offset not found, err = %d\n", err);
+ WARN_ON(1);
+ return err;
+ }
+
+ info->nand_phys_adjusted = info->nand_phys + adjustment_offset;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "bam_phys");
+ if (!res || !res->start) {
+ pr_err("BAM phys address range is not provided\n");
+ err = -ENODEV;
+ goto out;
+ }
+ info->bam_phys = res->start;
+ info->bam_base = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!info->bam_base) {
+ pr_err("BAM ioremap() failed for addr 0x%x size 0x%x\n",
+ res->start, resource_size(res));
+ err = -ENOMEM;
+ goto out;
+ }
+
+ info->bam_irq = platform_get_irq_byname(pdev, "bam_irq");
+ if (info->bam_irq < 0) {
+ pr_err("BAM IRQ is not provided\n");
+ err = -ENODEV;
+ goto out;
+ }
+
+ info->mtd.name = dev_name(&pdev->dev);
+ info->mtd.priv = info;
+ info->mtd.owner = THIS_MODULE;
+ info->nand_chip.dev = &pdev->dev;
+ init_waitqueue_head(&info->nand_chip.dma_wait_queue);
+ mutex_init(&info->lock);
+
+ dev = &pdev->dev;
+ if (dma_supported(dev, DMA_BIT_MASK(32))) {
+ info->dma_mask = DMA_BIT_MASK(32);
+ dev->coherent_dma_mask = info->dma_mask;
+ }
+
+ info->nand_chip.dma_virt_addr =
+ dmam_alloc_coherent(&pdev->dev, MSM_NAND_DMA_BUFFER_SIZE,
+ &info->nand_chip.dma_phys_addr, GFP_KERNEL);
+ if (!info->nand_chip.dma_virt_addr) {
+ pr_err("No memory for DMA buffer size %x\n",
+ MSM_NAND_DMA_BUFFER_SIZE);
+ err = -ENOMEM;
+ goto out;
+ }
+ err = msm_nand_bus_register(pdev, info);
+ if (err)
+ goto out;
+ info->clk_data.qpic_clk = devm_clk_get(&pdev->dev, "core_clk");
+ if (!IS_ERR_OR_NULL(info->clk_data.qpic_clk)) {
+ err = clk_set_rate(info->clk_data.qpic_clk,
+ MSM_NAND_BUS_VOTE_MAX_RATE);
+ } else {
+ err = PTR_ERR(info->clk_data.qpic_clk);
+ pr_err("Failed to get clock handle, err=%d\n", err);
+ }
+ if (err)
+ goto bus_unregister;
+
+ err = msm_nand_setup_clocks_and_bus_bw(info, true);
+ if (err)
+ goto bus_unregister;
+ dev_set_drvdata(&pdev->dev, info);
+ err = pm_runtime_set_active(&pdev->dev);
+ if (err)
+ pr_err("pm_runtime_set_active() failed with error %d", err);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, MSM_NAND_IDLE_TIMEOUT);
+
+ err = msm_nand_bam_init(info);
+ if (err) {
+ pr_err("msm_nand_bam_init() failed %d\n", err);
+ goto clk_rpm_disable;
+ }
+ err = msm_nand_enable_dma(info);
+ if (err) {
+ pr_err("Failed to enable DMA in NANDc\n");
+ goto free_bam;
+ }
+ err = msm_nand_parse_smem_ptable(&nr_parts);
+ if (err < 0) {
+ pr_err("Failed to parse partition table in SMEM\n");
+ goto free_bam;
+ }
+ if (msm_nand_scan(&info->mtd)) {
+ pr_err("No nand device found\n");
+ err = -ENXIO;
+ goto free_bam;
+ }
+ for (i = 0; i < nr_parts; i++) {
+ mtd_part[i].offset *= info->mtd.erasesize;
+ mtd_part[i].size *= info->mtd.erasesize;
+ }
+ err = mtd_device_parse_register(&info->mtd, NULL, NULL,
+ &mtd_part[0], nr_parts);
+ if (err < 0) {
+ pr_err("Unable to register MTD partitions %d\n", err);
+ goto free_bam;
+ }
+
+ pr_info("NANDc phys addr 0x%lx, BAM phys addr 0x%lx, BAM IRQ %d\n",
+ info->nand_phys, info->bam_phys, info->bam_irq);
+ pr_info("Allocated DMA buffer at virt_addr 0x%pK, phys_addr 0x%x\n",
+ info->nand_chip.dma_virt_addr, info->nand_chip.dma_phys_addr);
+ goto out;
+free_bam:
+ msm_nand_bam_free(info);
+clk_rpm_disable:
+ msm_nand_setup_clocks_and_bus_bw(info, false);
+ pm_runtime_disable(&(pdev)->dev);
+ pm_runtime_set_suspended(&(pdev)->dev);
+bus_unregister:
+ msm_nand_bus_unregister(info);
+out:
+ return err;
+}
+
+/*
+ * Remove functionality that gets called when driver/device msm-nand
+ * is removed.
+ */
+static int msm_nand_remove(struct platform_device *pdev)
+{
+ struct msm_nand_info *info = dev_get_drvdata(&pdev->dev);
+
+ if (pm_runtime_suspended(&(pdev)->dev))
+ pm_runtime_resume(&(pdev)->dev);
+
+ pm_runtime_disable(&(pdev)->dev);
+ pm_runtime_set_suspended(&(pdev)->dev);
+
+ dev_set_drvdata(&pdev->dev, NULL);
+
+ if (info) {
+ msm_nand_setup_clocks_and_bus_bw(info, false);
+ if (info->clk_data.client_handle)
+ msm_nand_bus_unregister(info);
+ mtd_device_unregister(&info->mtd);
+ msm_nand_bam_free(info);
+ }
+ return 0;
+}
+
+#define DRIVER_NAME "msm_qpic_nand"
+static const struct of_device_id msm_nand_match_table[] = {
+ { .compatible = "qcom,msm-nand", },
+ {},
+};
+
+static const struct dev_pm_ops msm_nand_pm_ops = {
+ .suspend = msm_nand_suspend,
+ .resume = msm_nand_resume,
+ .runtime_suspend = msm_nand_runtime_suspend,
+ .runtime_resume = msm_nand_runtime_resume,
+};
+
+static struct platform_driver msm_nand_driver = {
+ .probe = msm_nand_probe,
+ .remove = msm_nand_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = msm_nand_match_table,
+ .pm = &msm_nand_pm_ops,
+ },
+};
+
+module_param(enable_euclean, bool, 0644);
+MODULE_PARM_DESC(enable_euclean, "Set this parameter to enable reporting EUCLEAN to upper layer when the correctable bitflips are equal to the max correctable limit.");
+
+module_platform_driver(msm_nand_driver);
+
+MODULE_ALIAS(DRIVER_NAME);
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM QPIC NAND flash driver");
diff --git a/drivers/mtd/devices/msm_qpic_nand.h b/drivers/mtd/devices/msm_qpic_nand.h
new file mode 100644
index 0000000..9b6701c
--- /dev/null
+++ b/drivers/mtd/devices/msm_qpic_nand.h
@@ -0,0 +1,403 @@
+/*
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __QPIC_NAND_H
+#define __QPIC_NAND_H
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/crc16.h>
+#include <linux/bitrev.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/ctype.h>
+#include <linux/msm-sps.h>
+#include <linux/msm-bus.h>
+#include <soc/qcom/smem.h>
+
+#define PAGE_SIZE_2K 2048
+#define PAGE_SIZE_4K 4096
+
+#undef WRITE /* To avoid redefinition in above header files */
+#undef READ /* To avoid redefinition in above header files */
+#define WRITE 1
+#define READ 0
+
+#define MSM_NAND_IDLE_TIMEOUT 200 /* msecs */
+#define MSM_NAND_BUS_VOTE_MAX_RATE 100000000 /* Hz */
+
+/*
+ * The maximum no of descriptors per transfer (page read/write) won't be more
+ * than 64. For more details on what those commands are, please refer to the
+ * page read and page write functions in the driver.
+ */
+#define SPS_MAX_DESC_NUM 64
+#define SPS_DATA_CONS_PIPE_INDEX 0
+#define SPS_DATA_PROD_PIPE_INDEX 1
+#define SPS_CMD_CONS_PIPE_INDEX 2
+
+#define msm_virt_to_dma(chip, vaddr) \
+ ((chip)->dma_phys_addr + \
+ ((uint8_t *)(vaddr) - (chip)->dma_virt_addr))
+
+/*
+ * A single page read/write request would typically need DMA memory of about
+ * 1K memory approximately. So for a single request this memory is more than
+ * enough.
+ *
+ * But to accommodate multiple clients we allocate 8K of memory. Though only
+ * one client request can be submitted to NANDc at any time, other clients can
+ * still prepare the descriptors while waiting for current client request to
+ * be done. Thus for a total memory of 8K, the driver can currently support
+ * maximum clients up to 7 or 8 at a time. The client for which there is no
+ * free DMA memory shall wait on the wait queue until other clients free up
+ * the required memory.
+ */
+#define MSM_NAND_DMA_BUFFER_SIZE SZ_8K
+/*
+ * This defines the granularity at which the buffer management is done. The
+ * total number of slots is based on the size of the atomic_t variable
+ * dma_buffer_busy(number of bits) within the structure msm_nand_chip.
+ */
+#define MSM_NAND_DMA_BUFFER_SLOT_SZ \
+ (MSM_NAND_DMA_BUFFER_SIZE / (sizeof(((atomic_t *)0)->counter) * 8))
+
+/* ONFI(Open NAND Flash Interface) parameters */
+#define MSM_NAND_CFG0_RAW_ONFI_IDENTIFIER 0x88000800
+#define MSM_NAND_CFG0_RAW_ONFI_PARAM_INFO 0x88040000
+#define MSM_NAND_CFG1_RAW_ONFI_IDENTIFIER 0x0005045d
+#define MSM_NAND_CFG1_RAW_ONFI_PARAM_INFO 0x0005045d
+#define ONFI_PARAM_INFO_LENGTH 0x0200
+#define ONFI_PARAM_PAGE_LENGTH 0x0100
+#define ONFI_PARAMETER_PAGE_SIGNATURE 0x49464E4F
+#define FLASH_READ_ONFI_SIGNATURE_ADDRESS 0x20
+#define FLASH_READ_ONFI_PARAMETERS_ADDRESS 0x00
+#define FLASH_READ_DEVICE_ID_ADDRESS 0x00
+
+#define MSM_NAND_RESET_FLASH_STS 0x00000020
+#define MSM_NAND_RESET_READ_STS 0x000000C0
+
+/* QPIC NANDc (NAND Controller) Register Set */
+#define MSM_NAND_REG(info, off) (info->nand_phys + off)
+#define MSM_NAND_REG_ADJUSTED(info, off) (info->nand_phys_adjusted + off)
+#define MSM_NAND_QPIC_VERSION(info) MSM_NAND_REG_ADJUSTED(info, 0x20100)
+#define MSM_NAND_FLASH_CMD(info) MSM_NAND_REG(info, 0x30000)
+#define MSM_NAND_ADDR0(info) MSM_NAND_REG(info, 0x30004)
+#define MSM_NAND_ADDR1(info) MSM_NAND_REG(info, 0x30008)
+#define MSM_NAND_EXEC_CMD(info) MSM_NAND_REG(info, 0x30010)
+#define MSM_NAND_FLASH_STATUS(info) MSM_NAND_REG(info, 0x30014)
+#define FS_OP_ERR (1 << 4)
+#define FS_MPU_ERR (1 << 8)
+#define FS_DEVICE_STS_ERR (1 << 16)
+#define FS_DEVICE_WP (1 << 23)
+
+#define MSM_NAND_BUFFER_STATUS(info) MSM_NAND_REG(info, 0x30018)
+#define BS_UNCORRECTABLE_BIT (1 << 8)
+#define BS_CORRECTABLE_ERR_MSK 0x1F
+
+#define MSM_NAND_DEV0_CFG0(info) MSM_NAND_REG(info, 0x30020)
+#define DISABLE_STATUS_AFTER_WRITE 4
+#define CW_PER_PAGE 6
+#define UD_SIZE_BYTES 9
+#define SPARE_SIZE_BYTES 23
+#define NUM_ADDR_CYCLES 27
+
+#define MSM_NAND_DEV0_CFG1(info) MSM_NAND_REG(info, 0x30024)
+#define DEV0_CFG1_ECC_DISABLE 0
+#define WIDE_FLASH 1
+#define NAND_RECOVERY_CYCLES 2
+#define CS_ACTIVE_BSY 5
+#define BAD_BLOCK_BYTE_NUM 6
+#define BAD_BLOCK_IN_SPARE_AREA 16
+#define WR_RD_BSY_GAP 17
+#define ENABLE_BCH_ECC 27
+
+#define BYTES_512 512
+#define BYTES_516 516
+#define BYTES_517 517
+
+#define MSM_NAND_DEV0_ECC_CFG(info) MSM_NAND_REG(info, 0x30028)
+#define ECC_CFG_ECC_DISABLE 0
+#define ECC_SW_RESET 1
+#define ECC_MODE 4
+#define ECC_PARITY_SIZE_BYTES 8
+#define ECC_NUM_DATA_BYTES 16
+#define ECC_FORCE_CLK_OPEN 30
+
+#define MSM_NAND_READ_ID(info) MSM_NAND_REG(info, 0x30040)
+#define MSM_NAND_READ_STATUS(info) MSM_NAND_REG(info, 0x30044)
+#define MSM_NAND_READ_ID2(info) MSM_NAND_REG(info, 0x30048)
+#define EXTENDED_FETCH_ID BIT(19)
+#define MSM_NAND_DEV_CMD1(info) MSM_NAND_REG(info, 0x300A4)
+#define MSM_NAND_DEV_CMD_VLD(info) MSM_NAND_REG(info, 0x300AC)
+#define MSM_NAND_EBI2_ECC_BUF_CFG(info) MSM_NAND_REG(info, 0x300F0)
+
+#define MSM_NAND_ERASED_CW_DETECT_CFG(info) MSM_NAND_REG(info, 0x300E8)
+#define ERASED_CW_ECC_MASK 1
+#define AUTO_DETECT_RES 0
+#define MASK_ECC (1 << ERASED_CW_ECC_MASK)
+#define RESET_ERASED_DET (1 << AUTO_DETECT_RES)
+#define ACTIVE_ERASED_DET (0 << AUTO_DETECT_RES)
+#define CLR_ERASED_PAGE_DET (RESET_ERASED_DET | MASK_ECC)
+#define SET_ERASED_PAGE_DET (ACTIVE_ERASED_DET | MASK_ECC)
+
+#define MSM_NAND_ERASED_CW_DETECT_STATUS(info) MSM_NAND_REG(info, 0x300EC)
+#define PAGE_ALL_ERASED 7
+#define CODEWORD_ALL_ERASED 6
+#define PAGE_ERASED 5
+#define CODEWORD_ERASED 4
+#define ERASED_PAGE ((1 << PAGE_ALL_ERASED) | (1 << PAGE_ERASED))
+#define ERASED_CW ((1 << CODEWORD_ALL_ERASED) | (1 << CODEWORD_ERASED))
+
+#define MSM_NAND_CTRL(info) MSM_NAND_REG(info, 0x30F00)
+#define BAM_MODE_EN 0
+#define MSM_NAND_VERSION(info) MSM_NAND_REG_ADJUSTED(info, 0x30F08)
+#define MSM_NAND_READ_LOCATION_0(info) MSM_NAND_REG(info, 0x30F20)
+#define MSM_NAND_READ_LOCATION_1(info) MSM_NAND_REG(info, 0x30F24)
+
+/* device commands */
+#define MSM_NAND_CMD_PAGE_READ 0x32
+#define MSM_NAND_CMD_PAGE_READ_ECC 0x33
+#define MSM_NAND_CMD_PAGE_READ_ALL 0x34
+#define MSM_NAND_CMD_PAGE_READ_ONFI 0x35
+#define MSM_NAND_CMD_PRG_PAGE 0x36
+#define MSM_NAND_CMD_PRG_PAGE_ECC 0x37
+#define MSM_NAND_CMD_PRG_PAGE_ALL 0x39
+#define MSM_NAND_CMD_BLOCK_ERASE 0x3A
+#define MSM_NAND_CMD_FETCH_ID 0x0B
+
+/* Version Mask */
+#define MSM_NAND_VERSION_MAJOR_MASK 0xF0000000
+#define MSM_NAND_VERSION_MAJOR_SHIFT 28
+#define MSM_NAND_VERSION_MINOR_MASK 0x0FFF0000
+#define MSM_NAND_VERSION_MINOR_SHIFT 16
+
+#define CMD SPS_IOVEC_FLAG_CMD
+#define CMD_LCK (CMD | SPS_IOVEC_FLAG_LOCK)
+#define INT SPS_IOVEC_FLAG_INT
+#define INT_UNLCK (INT | SPS_IOVEC_FLAG_UNLOCK)
+#define CMD_INT_UNLCK (CMD | INT_UNLCK)
+#define NWD SPS_IOVEC_FLAG_NWD
+
+/* Structure that defines a NAND SPS command element */
+struct msm_nand_sps_cmd {
+ struct sps_command_element ce;
+ uint32_t flags;
+};
+
+struct msm_nand_cmd_setup_desc {
+ struct sps_command_element ce[11];
+ uint32_t flags;
+ uint32_t num_ce;
+};
+
+struct msm_nand_cmd_cw_desc {
+ struct sps_command_element ce[3];
+ uint32_t flags;
+ uint32_t num_ce;
+};
+
+struct msm_nand_rw_cmd_desc {
+ uint32_t count;
+ struct msm_nand_cmd_setup_desc setup_desc;
+ struct msm_nand_cmd_cw_desc cw_desc[];
+};
+
+/*
+ * Structure that defines the NAND controller properties as per the
+ * NAND flash device/chip that is attached.
+ */
+struct msm_nand_chip {
+ struct device *dev;
+ /*
+ * DMA memory will be allocated only once during probe and this memory
+ * will be used by all NAND clients. This wait queue is needed to
+ * make the applications wait for DMA memory to be free'd when the
+ * complete memory is exhausted.
+ */
+ wait_queue_head_t dma_wait_queue;
+ atomic_t dma_buffer_busy;
+ uint8_t *dma_virt_addr;
+ dma_addr_t dma_phys_addr;
+ uint32_t ecc_parity_bytes;
+ uint32_t bch_caps; /* Controller BCH ECC capabilities */
+#define MSM_NAND_CAP_4_BIT_BCH (1 << 0)
+#define MSM_NAND_CAP_8_BIT_BCH (1 << 1)
+ uint32_t cw_size;
+ /* NANDc register configurations */
+ uint32_t cfg0, cfg1, cfg0_raw, cfg1_raw;
+ uint32_t ecc_buf_cfg;
+ uint32_t ecc_bch_cfg;
+ uint32_t ecc_cfg_raw;
+};
+
+/* Structure that defines an SPS end point for a NANDc BAM pipe. */
+struct msm_nand_sps_endpt {
+ struct sps_pipe *handle;
+ struct sps_connect config;
+ struct sps_register_event event;
+ struct completion completion;
+ uint32_t index;
+};
+
+/*
+ * Structure that defines NANDc SPS data - BAM handle and an end point
+ * for each BAM pipe.
+ */
+struct msm_nand_sps_info {
+ unsigned long bam_handle;
+ struct msm_nand_sps_endpt data_prod;
+ struct msm_nand_sps_endpt data_cons;
+ struct msm_nand_sps_endpt cmd_pipe;
+};
+
+/*
+ * Structure that contains flash device information. This gets updated after
+ * the NAND flash device detection.
+ */
+struct flash_identification {
+ uint32_t flash_id;
+ uint64_t density;
+ uint32_t widebus;
+ uint32_t pagesize;
+ uint32_t blksize;
+ uint32_t oobsize;
+ uint32_t ecc_correctability;
+ uint32_t ecc_capability; /* Set based on the ECC capability selected. */
+};
+
+struct msm_nand_clk_data {
+ struct clk *qpic_clk;
+ struct msm_bus_scale_pdata *use_cases;
+ uint32_t client_handle;
+ atomic_t clk_enabled;
+ atomic_t curr_vote;
+};
+
+/* Structure that defines NANDc private data. */
+struct msm_nand_info {
+ struct mtd_info mtd;
+ struct msm_nand_chip nand_chip;
+ struct msm_nand_sps_info sps;
+ unsigned long bam_phys;
+ unsigned long nand_phys;
+ unsigned long nand_phys_adjusted;
+ void __iomem *bam_base;
+ int bam_irq;
+ /*
+ * This lock must be acquired before submitting any command or data
+ * descriptors to BAM pipes and must be held until all the submitted
+ * descriptors are processed.
+ *
+ * This is required to ensure that both command and descriptors are
+ * submitted atomically without interruption from other clients,
+ * when there are requests from more than client at any time.
+ * Othewise, data and command descriptors can be submitted out of
+ * order for a request which can cause data corruption.
+ */
+ struct mutex lock;
+ struct flash_identification flash_dev;
+ struct msm_nand_clk_data clk_data;
+ u64 dma_mask;
+};
+
+/* Structure that defines an ONFI parameter page (512B) */
+struct onfi_param_page {
+ uint32_t parameter_page_signature;
+ uint16_t revision_number;
+ uint16_t features_supported;
+ uint16_t optional_commands_supported;
+ uint8_t reserved0[22];
+ uint8_t device_manufacturer[12];
+ uint8_t device_model[20];
+ uint8_t jedec_manufacturer_id;
+ uint16_t date_code;
+ uint8_t reserved1[13];
+ uint32_t number_of_data_bytes_per_page;
+ uint16_t number_of_spare_bytes_per_page;
+ uint32_t number_of_data_bytes_per_partial_page;
+ uint16_t number_of_spare_bytes_per_partial_page;
+ uint32_t number_of_pages_per_block;
+ uint32_t number_of_blocks_per_logical_unit;
+ uint8_t number_of_logical_units;
+ uint8_t number_of_address_cycles;
+ uint8_t number_of_bits_per_cell;
+ uint16_t maximum_bad_blocks_per_logical_unit;
+ uint16_t block_endurance;
+ uint8_t guaranteed_valid_begin_blocks;
+ uint16_t guaranteed_valid_begin_blocks_endurance;
+ uint8_t number_of_programs_per_page;
+ uint8_t partial_program_attributes;
+ uint8_t number_of_bits_ecc_correctability;
+ uint8_t number_of_interleaved_address_bits;
+ uint8_t interleaved_operation_attributes;
+ uint8_t reserved2[13];
+ uint8_t io_pin_capacitance;
+ uint16_t timing_mode_support;
+ uint16_t program_cache_timing_mode_support;
+ uint16_t maximum_page_programming_time;
+ uint16_t maximum_block_erase_time;
+ uint16_t maximum_page_read_time;
+ uint16_t maximum_change_column_setup_time;
+ uint8_t reserved3[23];
+ uint16_t vendor_specific_revision_number;
+ uint8_t vendor_specific[88];
+ uint16_t integrity_crc;
+} __attribute__((__packed__));
+
+#define FLASH_PART_MAGIC1 0x55EE73AA
+#define FLASH_PART_MAGIC2 0xE35EBDDB
+#define FLASH_PTABLE_V3 3
+#define FLASH_PTABLE_V4 4
+#define FLASH_PTABLE_MAX_PARTS_V3 16
+#define FLASH_PTABLE_MAX_PARTS_V4 32
+#define FLASH_PTABLE_HDR_LEN (4*sizeof(uint32_t))
+#define FLASH_PTABLE_ENTRY_NAME_SIZE 16
+
+struct flash_partition_entry {
+ char name[FLASH_PTABLE_ENTRY_NAME_SIZE];
+ u32 offset; /* Offset in blocks from beginning of device */
+ u32 length; /* Length of the partition in blocks */
+ u8 attr; /* Flags for this partition */
+};
+
+struct flash_partition_table {
+ u32 magic1;
+ u32 magic2;
+ u32 version;
+ u32 numparts;
+ struct flash_partition_entry part_entry[FLASH_PTABLE_MAX_PARTS_V4];
+};
+
+static struct flash_partition_table ptable;
+
+static struct mtd_partition mtd_part[FLASH_PTABLE_MAX_PARTS_V4];
+
+static inline bool is_buffer_in_page(const void *buf, size_t len)
+{
+ return !(((unsigned long) buf & ~PAGE_MASK) + len > PAGE_SIZE);
+}
+#endif /* __QPIC_NAND_H */
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index fccdd49..c62923b 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -617,7 +617,7 @@
return ret;
}
-int mtd_add_partition(struct mtd_info *master, const char *name,
+int mtd_add_partition(struct mtd_info *master, char *name,
long long offset, long long length)
{
struct mtd_partition part;
diff --git a/drivers/mtd/ofpart.c b/drivers/mtd/ofpart.c
index ede407d..2312412 100644
--- a/drivers/mtd/ofpart.c
+++ b/drivers/mtd/ofpart.c
@@ -112,7 +112,7 @@
partname = of_get_property(pp, "label", &len);
if (!partname)
partname = of_get_property(pp, "name", &len);
- parts[i].name = partname;
+ parts[i].name = (char *)partname;
if (of_get_property(pp, "read-only", &len))
parts[i].mask_flags |= MTD_WRITEABLE;
@@ -186,7 +186,7 @@
if (names && (plen > 0)) {
int len = strlen(names) + 1;
- parts[i].name = names;
+ parts[i].name = (char *)names;
plen -= len;
names += len;
} else {
diff --git a/drivers/pci/host/pci-msm.c b/drivers/pci/host/pci-msm.c
index 771a1f9..361d7dd0 100644
--- a/drivers/pci/host/pci-msm.c
+++ b/drivers/pci/host/pci-msm.c
@@ -564,7 +564,7 @@
msm_pcie_dev_tbl[MAX_RC_NUM * MAX_DEVICE_NUM];
/* PCIe driver state */
-struct pcie_drv_sta {
+static struct pcie_drv_sta {
u32 rc_num;
struct mutex drv_lock;
} pcie_drv;
@@ -690,14 +690,14 @@
/* resources */
static const struct msm_pcie_res_info_t msm_pcie_res_info[MSM_PCIE_MAX_RES] = {
- {"parf", 0, 0},
- {"phy", 0, 0},
- {"dm_core", 0, 0},
- {"elbi", 0, 0},
- {"conf", 0, 0},
- {"io", 0, 0},
- {"bars", 0, 0},
- {"tcsr", 0, 0}
+ {"parf", NULL, NULL},
+ {"phy", NULL, NULL},
+ {"dm_core", NULL, NULL},
+ {"elbi", NULL, NULL},
+ {"conf", NULL, NULL},
+ {"io", NULL, NULL},
+ {"bars", NULL, NULL},
+ {"tcsr", NULL, NULL}
};
/* irqs */
@@ -763,14 +763,14 @@
}
#endif
-static inline void msm_pcie_write_reg(void *base, u32 offset, u32 value)
+static inline void msm_pcie_write_reg(void __iomem *base, u32 offset, u32 value)
{
writel_relaxed(value, base + offset);
/* ensure that changes propagated to the hardware */
wmb();
}
-static inline void msm_pcie_write_reg_field(void *base, u32 offset,
+static inline void msm_pcie_write_reg_field(void __iomem *base, u32 offset,
const u32 mask, u32 val)
{
u32 shift = find_first_bit((void *)&mask, 32);
@@ -976,7 +976,7 @@
int i, j;
u32 val = 0;
u32 *shadow;
- void *cfg = dev->conf;
+ void __iomem *cfg = dev->conf;
for (i = 0; i < MAX_DEVICE_NUM; i++) {
if (!rc && !dev->pcidev_table[i].bdf)
@@ -1764,7 +1764,7 @@
return count;
}
-const struct file_operations msm_pcie_cmd_debug_ops = {
+static const struct file_operations msm_pcie_cmd_debug_ops = {
.write = msm_pcie_cmd_debug,
};
@@ -1807,7 +1807,7 @@
return count;
}
-const struct file_operations msm_pcie_rc_sel_ops = {
+static const struct file_operations msm_pcie_rc_sel_ops = {
.write = msm_pcie_set_rc_sel,
};
@@ -1865,7 +1865,7 @@
return count;
}
-const struct file_operations msm_pcie_base_sel_ops = {
+static const struct file_operations msm_pcie_base_sel_ops = {
.write = msm_pcie_set_base_sel,
};
@@ -1911,7 +1911,7 @@
return count;
}
-const struct file_operations msm_pcie_linkdown_panic_ops = {
+static const struct file_operations msm_pcie_linkdown_panic_ops = {
.write = msm_pcie_set_linkdown_panic,
};
@@ -1938,7 +1938,7 @@
return count;
}
-const struct file_operations msm_pcie_wr_offset_ops = {
+static const struct file_operations msm_pcie_wr_offset_ops = {
.write = msm_pcie_set_wr_offset,
};
@@ -1965,7 +1965,7 @@
return count;
}
-const struct file_operations msm_pcie_wr_mask_ops = {
+static const struct file_operations msm_pcie_wr_mask_ops = {
.write = msm_pcie_set_wr_mask,
};
static ssize_t msm_pcie_set_wr_value(struct file *file,
@@ -1991,7 +1991,7 @@
return count;
}
-const struct file_operations msm_pcie_wr_value_ops = {
+static const struct file_operations msm_pcie_wr_value_ops = {
.write = msm_pcie_set_wr_value,
};
@@ -2035,7 +2035,7 @@
return count;
}
-const struct file_operations msm_pcie_boot_option_ops = {
+static const struct file_operations msm_pcie_boot_option_ops = {
.write = msm_pcie_set_boot_option,
};
@@ -2091,7 +2091,7 @@
return count;
}
-const struct file_operations msm_pcie_aer_enable_ops = {
+static const struct file_operations msm_pcie_aer_enable_ops = {
.write = msm_pcie_set_aer_enable,
};
@@ -2118,7 +2118,7 @@
return count;
}
-const struct file_operations msm_pcie_corr_counter_limit_ops = {
+static const struct file_operations msm_pcie_corr_counter_limit_ops = {
.write = msm_pcie_set_corr_counter_limit,
};
@@ -2127,14 +2127,14 @@
rc_sel_max = (0x1 << MAX_RC_NUM) - 1;
wr_mask = 0xffffffff;
- dent_msm_pcie = debugfs_create_dir("pci-msm", 0);
+ dent_msm_pcie = debugfs_create_dir("pci-msm", NULL);
if (IS_ERR(dent_msm_pcie)) {
pr_err("PCIe: fail to create the folder for debug_fs.\n");
return;
}
dfile_rc_sel = debugfs_create_file("rc_sel", 0664,
- dent_msm_pcie, 0,
+ dent_msm_pcie, NULL,
&msm_pcie_rc_sel_ops);
if (!dfile_rc_sel || IS_ERR(dfile_rc_sel)) {
pr_err("PCIe: fail to create the file for debug_fs rc_sel.\n");
@@ -2142,7 +2142,7 @@
}
dfile_case = debugfs_create_file("case", 0664,
- dent_msm_pcie, 0,
+ dent_msm_pcie, NULL,
&msm_pcie_cmd_debug_ops);
if (!dfile_case || IS_ERR(dfile_case)) {
pr_err("PCIe: fail to create the file for debug_fs case.\n");
@@ -2150,7 +2150,7 @@
}
dfile_base_sel = debugfs_create_file("base_sel", 0664,
- dent_msm_pcie, 0,
+ dent_msm_pcie, NULL,
&msm_pcie_base_sel_ops);
if (!dfile_base_sel || IS_ERR(dfile_base_sel)) {
pr_err("PCIe: fail to create the file for debug_fs base_sel.\n");
@@ -2158,7 +2158,7 @@
}
dfile_linkdown_panic = debugfs_create_file("linkdown_panic", 0644,
- dent_msm_pcie, 0,
+ dent_msm_pcie, NULL,
&msm_pcie_linkdown_panic_ops);
if (!dfile_linkdown_panic || IS_ERR(dfile_linkdown_panic)) {
pr_err("PCIe: fail to create the file for debug_fs linkdown_panic.\n");
@@ -2166,7 +2166,7 @@
}
dfile_wr_offset = debugfs_create_file("wr_offset", 0664,
- dent_msm_pcie, 0,
+ dent_msm_pcie, NULL,
&msm_pcie_wr_offset_ops);
if (!dfile_wr_offset || IS_ERR(dfile_wr_offset)) {
pr_err("PCIe: fail to create the file for debug_fs wr_offset.\n");
@@ -2174,7 +2174,7 @@
}
dfile_wr_mask = debugfs_create_file("wr_mask", 0664,
- dent_msm_pcie, 0,
+ dent_msm_pcie, NULL,
&msm_pcie_wr_mask_ops);
if (!dfile_wr_mask || IS_ERR(dfile_wr_mask)) {
pr_err("PCIe: fail to create the file for debug_fs wr_mask.\n");
@@ -2182,7 +2182,7 @@
}
dfile_wr_value = debugfs_create_file("wr_value", 0664,
- dent_msm_pcie, 0,
+ dent_msm_pcie, NULL,
&msm_pcie_wr_value_ops);
if (!dfile_wr_value || IS_ERR(dfile_wr_value)) {
pr_err("PCIe: fail to create the file for debug_fs wr_value.\n");
@@ -2190,7 +2190,7 @@
}
dfile_boot_option = debugfs_create_file("boot_option", 0664,
- dent_msm_pcie, 0,
+ dent_msm_pcie, NULL,
&msm_pcie_boot_option_ops);
if (!dfile_boot_option || IS_ERR(dfile_boot_option)) {
pr_err("PCIe: fail to create the file for debug_fs boot_option.\n");
@@ -2198,7 +2198,7 @@
}
dfile_aer_enable = debugfs_create_file("aer_enable", 0664,
- dent_msm_pcie, 0,
+ dent_msm_pcie, NULL,
&msm_pcie_aer_enable_ops);
if (!dfile_aer_enable || IS_ERR(dfile_aer_enable)) {
pr_err("PCIe: fail to create the file for debug_fs aer_enable.\n");
@@ -2206,7 +2206,7 @@
}
dfile_corr_counter_limit = debugfs_create_file("corr_counter_limit",
- 0664, dent_msm_pcie, 0,
+ 0664, dent_msm_pcie, NULL,
&msm_pcie_corr_counter_limit_ops);
if (!dfile_corr_counter_limit || IS_ERR(dfile_corr_counter_limit)) {
pr_err("PCIe: fail to create the file for debug_fs corr_counter_limit.\n");
@@ -2609,7 +2609,7 @@
gpio_free(dev->gpio[i].num);
}
-int msm_pcie_vreg_init(struct msm_pcie_dev_t *dev)
+static int msm_pcie_vreg_init(struct msm_pcie_dev_t *dev)
{
int i, rc = 0;
struct regulator *vreg;
@@ -3229,7 +3229,7 @@
}
}
-void msm_pcie_config_msi_controller(struct msm_pcie_dev_t *dev)
+static void msm_pcie_config_msi_controller(struct msm_pcie_dev_t *dev)
{
int i;
@@ -3638,7 +3638,7 @@
dev->dev_io_res = NULL;
}
-int msm_pcie_enable(struct msm_pcie_dev_t *dev, u32 options)
+static int msm_pcie_enable(struct msm_pcie_dev_t *dev, u32 options)
{
int ret = 0;
uint32_t val;
@@ -3895,7 +3895,7 @@
return ret;
}
-void msm_pcie_disable(struct msm_pcie_dev_t *dev, u32 options)
+static void msm_pcie_disable(struct msm_pcie_dev_t *dev, u32 options)
{
PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
@@ -4721,7 +4721,7 @@
}
}
-void msm_pcie_destroy_irq(unsigned int irq, struct pci_dev *pdev)
+static void msm_pcie_destroy_irq(unsigned int irq, struct pci_dev *pdev)
{
int pos;
struct msi_desc *entry = irq_get_msi_desc(irq);
@@ -5093,7 +5093,7 @@
.map = msm_pcie_msi_map,
};
-int32_t msm_pcie_irq_init(struct msm_pcie_dev_t *dev)
+static int32_t msm_pcie_irq_init(struct msm_pcie_dev_t *dev)
{
int rc;
int msi_start = 0;
@@ -5233,7 +5233,7 @@
return 0;
}
-void msm_pcie_irq_deinit(struct msm_pcie_dev_t *dev)
+static void msm_pcie_irq_deinit(struct msm_pcie_dev_t *dev)
{
PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
@@ -5575,7 +5575,7 @@
msm_pcie_dev[rc_idx].pcidev_table[i].short_bdf = 0;
msm_pcie_dev[rc_idx].pcidev_table[i].sid = 0;
msm_pcie_dev[rc_idx].pcidev_table[i].domain = rc_idx;
- msm_pcie_dev[rc_idx].pcidev_table[i].conf_base = 0;
+ msm_pcie_dev[rc_idx].pcidev_table[i].conf_base = NULL;
msm_pcie_dev[rc_idx].pcidev_table[i].phy_address = 0;
msm_pcie_dev[rc_idx].pcidev_table[i].dev_ctrlstts_offset = 0;
msm_pcie_dev[rc_idx].pcidev_table[i].event_reg = NULL;
@@ -5725,7 +5725,7 @@
},
};
-int __init pcie_init(void)
+static int __init pcie_init(void)
{
int ret = 0, i;
char rc_name[MAX_RC_NAME_LEN];
@@ -5784,7 +5784,7 @@
msm_pcie_dev_tbl[i].short_bdf = 0;
msm_pcie_dev_tbl[i].sid = 0;
msm_pcie_dev_tbl[i].domain = -1;
- msm_pcie_dev_tbl[i].conf_base = 0;
+ msm_pcie_dev_tbl[i].conf_base = NULL;
msm_pcie_dev_tbl[i].phy_address = 0;
msm_pcie_dev_tbl[i].dev_ctrlstts_offset = 0;
msm_pcie_dev_tbl[i].event_reg = NULL;
@@ -5999,7 +5999,7 @@
return ret;
}
-void msm_pcie_fixup_resume(struct pci_dev *dev)
+static void msm_pcie_fixup_resume(struct pci_dev *dev)
{
int ret;
struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
@@ -6022,7 +6022,7 @@
DECLARE_PCI_FIXUP_RESUME(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP,
msm_pcie_fixup_resume);
-void msm_pcie_fixup_resume_early(struct pci_dev *dev)
+static void msm_pcie_fixup_resume_early(struct pci_dev *dev)
{
int ret;
struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
index f935bab..e8710a6 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
@@ -80,6 +80,9 @@
__stringify(ECM_DISCONNECT),
__stringify(IPA_TETHERING_STATS_UPDATE_STATS),
__stringify(IPA_TETHERING_STATS_UPDATE_NETWORK_STATS),
+ __stringify(IPA_QUOTA_REACH),
+ __stringify(IPA_SSR_BEFORE_SHUTDOWN),
+ __stringify(IPA_SSR_AFTER_POWERUP),
};
const char *ipa_hdr_l2_type_name[] = {
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.h b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.h
index 67dd031..4c504f1 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.h
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.h
@@ -147,6 +147,9 @@
int rmnet_ipa_query_tethering_stats(struct wan_ioctl_query_tether_stats *data,
bool reset);
+int rmnet_ipa_query_tethering_stats_all(
+ struct wan_ioctl_query_tether_stats_all *data);
+
int rmnet_ipa_reset_tethering_stats(struct wan_ioctl_reset_tether_stats *data);
int ipa_qmi_get_data_stats(struct ipa_get_data_stats_req_msg_v01 *req,
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
index 4ef7e1f..4652fc8 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
@@ -878,7 +878,7 @@
void ipa2_set_client(int index, enum ipacm_client_enum client, bool uplink)
{
- if (client >= IPACM_CLIENT_MAX || client < IPACM_CLIENT_USB) {
+ if (client > IPACM_CLIENT_MAX || client < IPACM_CLIENT_USB) {
IPAERR("Bad client number! client =%d\n", client);
} else if (index >= IPA_MAX_NUM_PIPES || index < 0) {
IPAERR("Bad pipe index! index =%d\n", index);
diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
index bcd602c..29766fb 100644
--- a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
@@ -2338,6 +2338,29 @@
.remove = ipa_wwan_remove,
};
+/**
+ * rmnet_ipa_send_ssr_notification(bool ssr_done) - send SSR notification
+ *
+ * This function sends the SSR notification before modem shutdown and
+ * after_powerup from SSR framework, to user-space module
+ */
+static void rmnet_ipa_send_ssr_notification(bool ssr_done)
+{
+ struct ipa_msg_meta msg_meta;
+ int rc;
+
+ memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
+ if (ssr_done)
+ msg_meta.msg_type = IPA_SSR_AFTER_POWERUP;
+ else
+ msg_meta.msg_type = IPA_SSR_BEFORE_SHUTDOWN;
+ rc = ipa_send_msg(&msg_meta, NULL, NULL);
+ if (rc) {
+ IPAWANERR("ipa_send_msg failed: %d\n", rc);
+ return;
+ }
+}
+
static int ssr_notifier_cb(struct notifier_block *this,
unsigned long code,
void *data)
@@ -2345,6 +2368,8 @@
if (ipa_rmnet_ctx.ipa_rmnet_ssr) {
if (code == SUBSYS_BEFORE_SHUTDOWN) {
pr_info("IPA received MPSS BEFORE_SHUTDOWN\n");
+ /* send SSR before-shutdown notification to IPACM */
+ rmnet_ipa_send_ssr_notification(false);
atomic_set(&is_ssr, 1);
ipa_q6_pre_shutdown_cleanup();
if (ipa_netdevs[0])
@@ -2520,6 +2545,26 @@
}
/**
+ * rmnet_ipa_send_quota_reach_ind() - send quota_reach notification from
+ * IPA Modem
+ * This function sends the quota_reach indication from the IPA Modem driver
+ * via QMI, to user-space module
+ */
+static void rmnet_ipa_send_quota_reach_ind(void)
+{
+ struct ipa_msg_meta msg_meta;
+ int rc;
+
+ memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
+ msg_meta.msg_type = IPA_QUOTA_REACH;
+ rc = ipa_send_msg(&msg_meta, NULL, NULL);
+ if (rc) {
+ IPAWANERR("ipa_send_msg failed: %d\n", rc);
+ return;
+ }
+}
+
+/**
* rmnet_ipa_poll_tethering_stats() - Tethering stats polling IOCTL handler
* @data - IOCTL data
*
@@ -2808,10 +2853,6 @@
kfree(req);
kfree(resp);
return rc;
- } else if (reset) {
- kfree(req);
- kfree(resp);
- return 0;
}
if (resp->dl_dst_pipe_stats_list_valid) {
@@ -2947,6 +2988,49 @@
return rc;
}
+int rmnet_ipa_query_tethering_stats_all(
+ struct wan_ioctl_query_tether_stats_all *data)
+{
+ struct wan_ioctl_query_tether_stats tether_stats;
+ enum ipa_upstream_type upstream_type;
+ int rc = 0;
+
+ memset(&tether_stats, 0, sizeof(struct wan_ioctl_query_tether_stats));
+ /* get IPA backhaul type */
+ upstream_type = find_upstream_type(data->upstreamIface);
+
+ if (upstream_type == IPA_UPSTEAM_MAX) {
+ IPAWANERR(" Wrong upstreamIface name %s\n",
+ data->upstreamIface);
+ } else if (upstream_type == IPA_UPSTEAM_WLAN) {
+ IPAWANDBG_LOW(" query wifi-backhaul stats\n");
+ rc = rmnet_ipa_query_tethering_stats_wifi(
+ &tether_stats, data->reset_stats);
+ if (rc) {
+ IPAWANERR("wlan WAN_IOC_QUERY_TETHER_STATS failed\n");
+ return rc;
+ }
+ data->tx_bytes = tether_stats.ipv4_tx_bytes
+ + tether_stats.ipv6_tx_bytes;
+ data->rx_bytes = tether_stats.ipv4_rx_bytes
+ + tether_stats.ipv6_rx_bytes;
+ } else {
+ IPAWANDBG_LOW(" query modem-backhaul stats\n");
+ tether_stats.ipa_client = data->ipa_client;
+ rc = rmnet_ipa_query_tethering_stats_modem(
+ &tether_stats, data->reset_stats);
+ if (rc) {
+ IPAWANERR("modem WAN_IOC_QUERY_TETHER_STATS failed\n");
+ return rc;
+ }
+ data->tx_bytes = tether_stats.ipv4_tx_bytes
+ + tether_stats.ipv6_tx_bytes;
+ data->rx_bytes = tether_stats.ipv4_rx_bytes
+ + tether_stats.ipv6_rx_bytes;
+ }
+ return rc;
+}
+
int rmnet_ipa_reset_tethering_stats(struct wan_ioctl_reset_tether_stats *data)
{
enum ipa_upstream_type upstream_type;
@@ -3048,6 +3132,8 @@
IPAWANERR("putting nlmsg: <%s> <%s> <%s>\n",
alert_msg, iface_name_l, iface_name_m);
kobject_uevent_env(&(ipa_netdevs[0]->dev.kobj), KOBJ_CHANGE, envp);
+
+ rmnet_ipa_send_quota_reach_ind();
}
/**
@@ -3072,6 +3158,9 @@
*/
ipa2_proxy_clk_unvote();
+ /* send SSR power-up notification to IPACM */
+ rmnet_ipa_send_ssr_notification(true);
+
/*
* It is required to recover the network stats after
* SSR recovery
diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa_fd_ioctl.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa_fd_ioctl.c
index 436cf21..793529d 100644
--- a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa_fd_ioctl.c
+++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa_fd_ioctl.c
@@ -47,6 +47,10 @@
#define WAN_IOC_QUERY_DL_FILTER_STATS32 _IOWR(WAN_IOC_MAGIC, \
WAN_IOCTL_QUERY_DL_FILTER_STATS, \
compat_uptr_t)
+#define WAN_IOC_QUERY_TETHER_STATS_ALL32 _IOWR(WAN_IOC_MAGIC, \
+ WAN_IOCTL_QUERY_TETHER_STATS_ALL, \
+ compat_uptr_t)
+
#endif
static unsigned int dev_num = 1;
@@ -242,6 +246,32 @@
}
break;
+ case WAN_IOC_QUERY_TETHER_STATS_ALL:
+ IPAWANDBG_LOW("got WAN_IOC_QUERY_TETHER_STATS_ALL :>>>\n");
+ pyld_sz = sizeof(struct wan_ioctl_query_tether_stats_all);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+
+ if (rmnet_ipa_query_tethering_stats_all(
+ (struct wan_ioctl_query_tether_stats_all *)param)) {
+ IPAWANERR("WAN_IOC_QUERY_TETHER_STATS failed\n");
+ retval = -EFAULT;
+ break;
+ }
+
+ if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
case WAN_IOC_RESET_TETHER_STATS:
IPAWANDBG("device %s got WAN_IOC_RESET_TETHER_STATS :>>>\n",
DRIVER_NAME);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
index 1634b1c..2a7b977 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
@@ -61,6 +61,9 @@
__stringify(ECM_DISCONNECT),
__stringify(IPA_TETHERING_STATS_UPDATE_STATS),
__stringify(IPA_TETHERING_STATS_UPDATE_NETWORK_STATS),
+ __stringify(IPA_QUOTA_REACH),
+ __stringify(IPA_SSR_BEFORE_SHUTDOWN),
+ __stringify(IPA_SSR_AFTER_POWERUP),
};
const char *ipa3_hdr_l2_type_name[] = {
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
index 6cd82f8..d5d8503 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
@@ -190,6 +190,9 @@
int rmnet_ipa3_query_tethering_stats(struct wan_ioctl_query_tether_stats *data,
bool reset);
+int rmnet_ipa3_query_tethering_stats_all(
+ struct wan_ioctl_query_tether_stats_all *data);
+
int rmnet_ipa3_reset_tethering_stats(struct wan_ioctl_reset_tether_stats *data);
int ipa3_qmi_get_data_stats(struct ipa_get_data_stats_req_msg_v01 *req,
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index dbe6cd6..0abe5fe 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -2200,7 +2200,7 @@
void ipa3_set_client(int index, enum ipacm_client_enum client, bool uplink)
{
- if (client >= IPACM_CLIENT_MAX || client < IPACM_CLIENT_USB) {
+ if (client > IPACM_CLIENT_MAX || client < IPACM_CLIENT_USB) {
IPAERR("Bad client number! client =%d\n", client);
} else if (index >= IPA3_MAX_NUM_PIPES || index < 0) {
IPAERR("Bad pipe index! index =%d\n", index);
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index f408f23..fcaabe3 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -2442,6 +2442,29 @@
.remove = ipa3_wwan_remove,
};
+/**
+ * rmnet_ipa_send_ssr_notification(bool ssr_done) - send SSR notification
+ *
+ * This function sends the SSR notification before modem shutdown and
+ * after_powerup from SSR framework, to user-space module
+ */
+static void rmnet_ipa_send_ssr_notification(bool ssr_done)
+{
+ struct ipa_msg_meta msg_meta;
+ int rc;
+
+ memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
+ if (ssr_done)
+ msg_meta.msg_type = IPA_SSR_AFTER_POWERUP;
+ else
+ msg_meta.msg_type = IPA_SSR_BEFORE_SHUTDOWN;
+ rc = ipa_send_msg(&msg_meta, NULL, NULL);
+ if (rc) {
+ IPAWANERR("ipa_send_msg failed: %d\n", rc);
+ return;
+ }
+}
+
static int ipa3_ssr_notifier_cb(struct notifier_block *this,
unsigned long code,
void *data)
@@ -2452,6 +2475,8 @@
switch (code) {
case SUBSYS_BEFORE_SHUTDOWN:
IPAWANINFO("IPA received MPSS BEFORE_SHUTDOWN\n");
+ /* send SSR before-shutdown notification to IPACM */
+ rmnet_ipa_send_ssr_notification(false);
atomic_set(&rmnet_ipa3_ctx->is_ssr, 1);
ipa3_q6_pre_shutdown_cleanup();
if (IPA_NETDEV())
@@ -2628,6 +2653,26 @@
}
/**
+ * rmnet_ipa_send_quota_reach_ind() - send quota_reach notification from
+ * IPA Modem
+ * This function sends the quota_reach indication from the IPA Modem driver
+ * via QMI, to user-space module
+ */
+static void rmnet_ipa_send_quota_reach_ind(void)
+{
+ struct ipa_msg_meta msg_meta;
+ int rc;
+
+ memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
+ msg_meta.msg_type = IPA_QUOTA_REACH;
+ rc = ipa_send_msg(&msg_meta, NULL, NULL);
+ if (rc) {
+ IPAWANERR("ipa_send_msg failed: %d\n", rc);
+ return;
+ }
+}
+
+/**
* rmnet_ipa3_poll_tethering_stats() - Tethering stats polling IOCTL handler
* @data - IOCTL data
*
@@ -2908,7 +2953,7 @@
IPAWANERR("reset the pipe stats\n");
} else {
/* print tethered-client enum */
- IPAWANDBG_LOW("Tethered-client enum(%d)\n", data->ipa_client);
+ IPAWANDBG("Tethered-client enum(%d)\n", data->ipa_client);
}
rc = ipa3_qmi_get_data_stats(req, resp);
@@ -2917,10 +2962,6 @@
kfree(req);
kfree(resp);
return rc;
- } else if (reset) {
- kfree(req);
- kfree(resp);
- return 0;
}
if (resp->dl_dst_pipe_stats_list_valid) {
@@ -3058,6 +3099,49 @@
return rc;
}
+int rmnet_ipa3_query_tethering_stats_all(
+ struct wan_ioctl_query_tether_stats_all *data)
+{
+ struct wan_ioctl_query_tether_stats tether_stats;
+ enum ipa_upstream_type upstream_type;
+ int rc = 0;
+
+ memset(&tether_stats, 0, sizeof(struct wan_ioctl_query_tether_stats));
+ /* get IPA backhaul type */
+ upstream_type = find_upstream_type(data->upstreamIface);
+
+ if (upstream_type == IPA_UPSTEAM_MAX) {
+ IPAWANERR(" Wrong upstreamIface name %s\n",
+ data->upstreamIface);
+ } else if (upstream_type == IPA_UPSTEAM_WLAN) {
+ IPAWANDBG_LOW(" query wifi-backhaul stats\n");
+ rc = rmnet_ipa3_query_tethering_stats_wifi(
+ &tether_stats, data->reset_stats);
+ if (rc) {
+ IPAWANERR("wlan WAN_IOC_QUERY_TETHER_STATS failed\n");
+ return rc;
+ }
+ data->tx_bytes = tether_stats.ipv4_tx_bytes
+ + tether_stats.ipv6_tx_bytes;
+ data->rx_bytes = tether_stats.ipv4_rx_bytes
+ + tether_stats.ipv6_rx_bytes;
+ } else {
+ IPAWANDBG_LOW(" query modem-backhaul stats\n");
+ tether_stats.ipa_client = data->ipa_client;
+ rc = rmnet_ipa3_query_tethering_stats_modem(
+ &tether_stats, data->reset_stats);
+ if (rc) {
+ IPAWANERR("modem WAN_IOC_QUERY_TETHER_STATS failed\n");
+ return rc;
+ }
+ data->tx_bytes = tether_stats.ipv4_tx_bytes
+ + tether_stats.ipv6_tx_bytes;
+ data->rx_bytes = tether_stats.ipv4_rx_bytes
+ + tether_stats.ipv6_rx_bytes;
+ }
+ return rc;
+}
+
int rmnet_ipa3_reset_tethering_stats(struct wan_ioctl_reset_tether_stats *data)
{
enum ipa_upstream_type upstream_type;
@@ -3155,6 +3239,8 @@
alert_msg, iface_name_l, iface_name_m);
kobject_uevent_env(&(IPA_NETDEV()->dev.kobj),
KOBJ_CHANGE, envp);
+
+ rmnet_ipa_send_quota_reach_ind();
}
/**
@@ -3179,6 +3265,9 @@
*/
ipa3_proxy_clk_unvote();
+ /* send SSR power-up notification to IPACM */
+ rmnet_ipa_send_ssr_notification(true);
+
/*
* It is required to recover the network stats after
* SSR recovery
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c
index 3ef17f6..c7a6186 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c
@@ -47,6 +47,9 @@
#define WAN_IOC_QUERY_DL_FILTER_STATS32 _IOWR(WAN_IOC_MAGIC, \
WAN_IOCTL_QUERY_DL_FILTER_STATS, \
compat_uptr_t)
+#define WAN_IOC_QUERY_TETHER_STATS_ALL32 _IOWR(WAN_IOC_MAGIC, \
+ WAN_IOCTL_QUERY_TETHER_STATS_ALL, \
+ compat_uptr_t)
#endif
static unsigned int dev_num = 1;
@@ -265,6 +268,32 @@
}
break;
+ case WAN_IOC_QUERY_TETHER_STATS_ALL:
+ IPAWANDBG_LOW("got WAN_IOC_QUERY_TETHER_STATS_ALL :>>>\n");
+ pyld_sz = sizeof(struct wan_ioctl_query_tether_stats_all);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (u8 __user *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+
+ if (rmnet_ipa3_query_tethering_stats_all(
+ (struct wan_ioctl_query_tether_stats_all *)param)) {
+ IPAWANERR("WAN_IOC_QUERY_TETHER_STATS failed\n");
+ retval = -EFAULT;
+ break;
+ }
+
+ if (copy_to_user((void __user *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
case WAN_IOC_RESET_TETHER_STATS:
IPAWANDBG_LOW("device %s got WAN_IOC_RESET_TETHER_STATS :>>>\n",
DRIVER_NAME);
diff --git a/drivers/platform/msm/sps/sps.c b/drivers/platform/msm/sps/sps.c
index 907c94e8..fbf8773 100644
--- a/drivers/platform/msm/sps/sps.c
+++ b/drivers/platform/msm/sps/sps.c
@@ -2088,7 +2088,7 @@
unsigned long *dev_handle)
{
struct sps_bam *bam = NULL;
- void *virt_addr = NULL;
+ void __iomem *virt_addr = NULL;
char bam_name[MAX_MSG_LEN];
u32 manage;
int ok;
diff --git a/drivers/platform/msm/sps/sps_bam.h b/drivers/platform/msm/sps/sps_bam.h
index 468c492..7cb0670 100644
--- a/drivers/platform/msm/sps/sps_bam.h
+++ b/drivers/platform/msm/sps/sps_bam.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -202,7 +202,7 @@
/* BAM device state */
u32 state;
struct mutex lock;
- void *base; /* BAM virtual base address */
+ void __iomem *base; /* BAM virtual base address */
u32 version;
spinlock_t isr_lock;
spinlock_t connection_lock;
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index 4660e31..9ee1fee 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -868,8 +868,8 @@
* n + 1 TRBs as per GSI h/w requirement. n Xfer TRBs + 1
* LINK TRB.
*/
- ch_info->xfer_ring_len = (request->num_bufs + 1) * 0x10;
- last_trb_index = request->num_bufs + 1;
+ ch_info->xfer_ring_len = (request->num_bufs + 2) * 0x10;
+ last_trb_index = request->num_bufs + 2;
}
/* Store last 16 bits of LINK TRB address as per GSI hw requirement */
@@ -941,13 +941,13 @@
}
/*
-* Rings Doorbell for IN GSI Channel
+* Rings Doorbell for GSI Channel
*
* @usb_ep - pointer to usb_ep instance.
* @request - pointer to GSI request. This is used to pass in the
* address of the GSI doorbell obtained from IPA driver
*/
-static void gsi_ring_in_db(struct usb_ep *ep, struct usb_gsi_request *request)
+static void gsi_ring_db(struct usb_ep *ep, struct usb_gsi_request *request)
{
void __iomem *gsi_dbl_address_lsb;
void __iomem *gsi_dbl_address_msb;
@@ -955,10 +955,11 @@
u64 dbl_addr = *((u64 *)request->buf_base_addr);
u32 dbl_lo_addr = (dbl_addr & 0xFFFFFFFF);
u32 dbl_hi_addr = (dbl_addr >> 32);
- u32 num_trbs = (request->num_bufs * 2 + 2);
struct dwc3_ep *dep = to_dwc3_ep(ep);
struct dwc3 *dwc = dep->dwc;
struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
+ int num_trbs = (dep->direction) ? (2 * (request->num_bufs) + 2)
+ : (request->num_bufs + 2);
gsi_dbl_address_lsb = devm_ioremap_nocache(mdwc->dev,
dbl_lo_addr, sizeof(u32));
@@ -971,8 +972,8 @@
dev_dbg(mdwc->dev, "Failed to get GSI DBL address MSB\n");
offset = dwc3_trb_dma_offset(dep, &dep->trb_pool[num_trbs-1]);
- dev_dbg(mdwc->dev, "Writing link TRB addr: %pa to %p (%x)\n",
- &offset, gsi_dbl_address_lsb, dbl_lo_addr);
+ dev_dbg(mdwc->dev, "Writing link TRB addr: %pa to %p (%x) for ep:%s\n",
+ &offset, gsi_dbl_address_lsb, dbl_lo_addr, ep->name);
writel_relaxed(offset, gsi_dbl_address_lsb);
writel_relaxed(0, gsi_dbl_address_msb);
@@ -1042,7 +1043,7 @@
struct dwc3 *dwc = dep->dwc;
struct dwc3_trb *trb;
int num_trbs = (dep->direction) ? (2 * (req->num_bufs) + 2)
- : (req->num_bufs + 1);
+ : (req->num_bufs + 2);
dep->trb_dma_pool = dma_pool_create(ep->name, dwc->sysdev,
num_trbs * sizeof(struct dwc3_trb),
@@ -1103,26 +1104,43 @@
trb = &dep->trb_pool[i];
memset(trb, 0, sizeof(*trb));
- trb->bpl = lower_32_bits(buffer_addr);
- trb->bph = 0;
- trb->size = req->buf_len;
- trb->ctrl = DWC3_TRBCTL_NORMAL | DWC3_TRB_CTRL_IOC
- | DWC3_TRB_CTRL_CSP
- | DWC3_TRB_CTRL_ISP_IMI;
- buffer_addr += req->buf_len;
-
- /* Set up the Link TRB at the end */
- if (i == (num_trbs - 1)) {
+ /* Setup LINK TRB to start with TRB ring */
+ if (i == 0) {
trb->bpl = dwc3_trb_dma_offset(dep,
- &dep->trb_pool[0]);
+ &dep->trb_pool[1]);
+ trb->ctrl = DWC3_TRBCTL_LINK_TRB;
+ } else if (i == (num_trbs - 1)) {
+ /* Set up the Link TRB at the end */
+ trb->bpl = dwc3_trb_dma_offset(dep,
+ &dep->trb_pool[0]);
trb->bph = (1 << 23) | (1 << 21)
| (ep->ep_intr_num << 16);
- trb->size = 0;
trb->ctrl = DWC3_TRBCTL_LINK_TRB
| DWC3_TRB_CTRL_HWO;
+ } else {
+ trb->bpl = lower_32_bits(buffer_addr);
+ trb->size = req->buf_len;
+ buffer_addr += req->buf_len;
+ trb->ctrl = DWC3_TRBCTL_NORMAL
+ | DWC3_TRB_CTRL_IOC
+ | DWC3_TRB_CTRL_CSP
+ | DWC3_TRB_CTRL_ISP_IMI;
}
}
}
+
+ pr_debug("%s: Initialized TRB Ring for %s\n", __func__, dep->name);
+ trb = &dep->trb_pool[0];
+ if (trb) {
+ for (i = 0; i < num_trbs; i++) {
+ pr_debug("TRB(%d): ADDRESS:%lx bpl:%x bph:%x size:%x ctrl:%x\n",
+ i, (unsigned long)dwc3_trb_dma_offset(dep,
+ &dep->trb_pool[i]), trb->bpl, trb->bph,
+ trb->size, trb->ctrl);
+ trb++;
+ }
+ }
+
return 0;
}
@@ -1363,10 +1381,10 @@
ch_info = (struct gsi_channel_info *)op_data;
gsi_get_channel_info(ep, ch_info);
break;
- case GSI_EP_OP_RING_IN_DB:
+ case GSI_EP_OP_RING_DB:
request = (struct usb_gsi_request *)op_data;
- dev_dbg(mdwc->dev, "RING IN EP DB\n");
- gsi_ring_in_db(ep, request);
+ dbg_print(0xFF, "RING_DB", 0, ep->name);
+ gsi_ring_db(ep, request);
break;
case GSI_EP_OP_UPDATEXFER:
request = (struct usb_gsi_request *)op_data;
diff --git a/drivers/usb/gadget/function/f_gsi.c b/drivers/usb/gadget/function/f_gsi.c
index 308a49c..4df2dc6 100644
--- a/drivers/usb/gadget/function/f_gsi.c
+++ b/drivers/usb/gadget/function/f_gsi.c
@@ -40,6 +40,7 @@
static struct workqueue_struct *ipa_usb_wq;
+static void gsi_rndis_ipa_reset_trigger(struct gsi_data_port *d_port);
static void ipa_disconnect_handler(struct gsi_data_port *d_port);
static int gsi_ctrl_send_notification(struct f_gsi *gsi);
static int gsi_alloc_trb_buffer(struct f_gsi *gsi);
@@ -472,6 +473,7 @@
usb_gsi_ep_op(d_port->in_ep, (void *)&block_db,
GSI_EP_OP_SET_CLR_BLOCK_DBL);
+ /* GSI channel DBL address for USB IN endpoint */
dbl_register_addr = gsi->d_port.in_db_reg_phs_addr_msb;
dbl_register_addr = dbl_register_addr << 32;
dbl_register_addr =
@@ -481,11 +483,18 @@
req.buf_base_addr = &dbl_register_addr;
req.num_bufs = gsi->d_port.in_request.num_bufs;
- usb_gsi_ep_op(gsi->d_port.in_ep, &req, GSI_EP_OP_RING_IN_DB);
+ usb_gsi_ep_op(gsi->d_port.in_ep, &req, GSI_EP_OP_RING_DB);
if (gsi->d_port.out_ep) {
- usb_gsi_ep_op(gsi->d_port.out_ep, &gsi->d_port.out_request,
- GSI_EP_OP_UPDATEXFER);
+ /* GSI channel DBL address for USB OUT endpoint */
+ dbl_register_addr = gsi->d_port.out_db_reg_phs_addr_msb;
+ dbl_register_addr = dbl_register_addr << 32;
+ dbl_register_addr = dbl_register_addr |
+ gsi->d_port.out_db_reg_phs_addr_lsb;
+ /* use temp request to pass 64 bit dbl reg addr and num_bufs */
+ req.buf_base_addr = &dbl_register_addr;
+ req.num_bufs = gsi->d_port.out_request.num_bufs;
+ usb_gsi_ep_op(gsi->d_port.out_ep, &req, GSI_EP_OP_RING_DB);
}
}
@@ -503,14 +512,11 @@
*/
usb_gsi_ep_op(d_port->in_ep, (void *)&block_db,
GSI_EP_OP_SET_CLR_BLOCK_DBL);
- gsi->in_ep_desc_backup = gsi->d_port.in_ep->desc;
usb_gsi_ep_op(gsi->d_port.in_ep, NULL, GSI_EP_OP_DISABLE);
}
- if (gsi->d_port.out_ep) {
- gsi->out_ep_desc_backup = gsi->d_port.out_ep->desc;
+ if (gsi->d_port.out_ep)
usb_gsi_ep_op(gsi->d_port.out_ep, NULL, GSI_EP_OP_DISABLE);
- }
gsi->d_port.net_ready_trigger = false;
}
@@ -616,6 +622,7 @@
struct device *dev;
struct device *gad_dev;
struct f_gsi *gsi = d_port_to_gsi(d_port);
+ bool block_db;
event = read_event(d_port);
@@ -676,28 +683,6 @@
break;
}
- /*
- * Update desc and reconfigure USB GSI OUT and IN
- * endpoint for RNDIS Adaptor enable case.
- */
- if (d_port->out_ep && !d_port->out_ep->desc &&
- gsi->out_ep_desc_backup) {
- d_port->out_ep->desc = gsi->out_ep_desc_backup;
- d_port->out_ep->ep_intr_num = 1;
- log_event_dbg("%s: OUT ep_op_config", __func__);
- usb_gsi_ep_op(d_port->out_ep,
- &d_port->out_request, GSI_EP_OP_CONFIG);
- }
-
- if (d_port->in_ep && !d_port->in_ep->desc &&
- gsi->in_ep_desc_backup) {
- d_port->in_ep->desc = gsi->in_ep_desc_backup;
- d_port->in_ep->ep_intr_num = 2;
- log_event_dbg("%s: IN ep_op_config", __func__);
- usb_gsi_ep_op(d_port->in_ep,
- &d_port->in_request, GSI_EP_OP_CONFIG);
- }
-
ipa_connect_channels(d_port);
ipa_data_path_enable(d_port);
d_port->sm_state = STATE_CONNECTED;
@@ -759,7 +744,15 @@
if (event == EVT_HOST_NRDY) {
log_event_dbg("%s: ST_CON_HOST_NRDY\n",
__func__);
- ipa_disconnect_handler(d_port);
+ block_db = true;
+ /* stop USB ringing doorbell to GSI(OUT_EP) */
+ usb_gsi_ep_op(d_port->in_ep, (void *)&block_db,
+ GSI_EP_OP_SET_CLR_BLOCK_DBL);
+ gsi_rndis_ipa_reset_trigger(d_port);
+ usb_gsi_ep_op(d_port->in_ep, NULL,
+ GSI_EP_OP_ENDXFER);
+ usb_gsi_ep_op(d_port->out_ep, NULL,
+ GSI_EP_OP_ENDXFER);
}
ipa_disconnect_work_handler(d_port);
@@ -1385,6 +1378,17 @@
rndis_signal_connect(gsi->params);
}
+static void gsi_rndis_ipa_reset_trigger(struct gsi_data_port *d_port)
+{
+ unsigned long flags;
+ struct f_gsi *gsi = d_port_to_gsi(d_port);
+
+ log_event_dbg("%s: setting net_ready_trigger\n", __func__);
+ spin_lock_irqsave(&d_port->lock, flags);
+ d_port->net_ready_trigger = false;
+ spin_unlock_irqrestore(&d_port->lock, flags);
+}
+
void gsi_rndis_flow_ctrl_enable(bool enable, struct rndis_params *param)
{
struct f_gsi *gsi = param->v;
@@ -2618,7 +2622,7 @@
info.in_req_num_buf = num_in_bufs;
gsi->d_port.out_aggr_size = GSI_ECM_AGGR_SIZE;
info.out_req_buf_len = GSI_OUT_ECM_BUF_LEN;
- info.out_req_num_buf = GSI_ECM_NUM_OUT_BUFFERS;
+ info.out_req_num_buf = num_out_bufs;
info.notify_buf_len = GSI_CTRL_NOTIFY_BUFF_LEN;
/* export host's Ethernet address in CDC format */
diff --git a/drivers/usb/gadget/function/f_gsi.h b/drivers/usb/gadget/function/f_gsi.h
index 43aae8f..0fe3665 100644
--- a/drivers/usb/gadget/function/f_gsi.h
+++ b/drivers/usb/gadget/function/f_gsi.h
@@ -37,8 +37,7 @@
#define GSI_NUM_IN_BUFFERS 15
#define GSI_IN_BUFF_SIZE 2048
-#define GSI_NUM_OUT_BUFFERS 15
-#define GSI_ECM_NUM_OUT_BUFFERS 31
+#define GSI_NUM_OUT_BUFFERS 14
#define GSI_OUT_AGGR_SIZE 24576
#define GSI_IN_RNDIS_AGGR_SIZE 9216
diff --git a/include/dt-bindings/clock/qcom,camcc-sdm845.h b/include/dt-bindings/clock/qcom,camcc-sdm845.h
index e16b69a..7218261 100644
--- a/include/dt-bindings/clock/qcom,camcc-sdm845.h
+++ b/include/dt-bindings/clock/qcom,camcc-sdm845.h
@@ -102,26 +102,15 @@
#define CAM_CC_SOC_AHB_CLK 85
#define CAM_CC_SYS_TMR_CLK 86
-#define TITAN_CAM_CC_BPS_BCR 0
-#define TITAN_CAM_CC_CAMNOC_BCR 1
-#define TITAN_CAM_CC_CCI_BCR 2
-#define TITAN_CAM_CC_CPAS_BCR 3
-#define TITAN_CAM_CC_CSI0PHY_BCR 4
-#define TITAN_CAM_CC_CSI1PHY_BCR 5
-#define TITAN_CAM_CC_CSI2PHY_BCR 6
-#define TITAN_CAM_CC_FD_BCR 7
-#define TITAN_CAM_CC_ICP_BCR 8
-#define TITAN_CAM_CC_IFE_0_BCR 9
-#define TITAN_CAM_CC_IFE_1_BCR 10
-#define TITAN_CAM_CC_IFE_LITE_BCR 11
-#define TITAN_CAM_CC_IPE_0_BCR 12
-#define TITAN_CAM_CC_IPE_1_BCR 13
-#define TITAN_CAM_CC_JPEG_BCR 14
-#define TITAN_CAM_CC_LRME_BCR 15
-#define TITAN_CAM_CC_MCLK0_BCR 16
-#define TITAN_CAM_CC_MCLK1_BCR 17
-#define TITAN_CAM_CC_MCLK2_BCR 18
-#define TITAN_CAM_CC_MCLK3_BCR 19
-#define TITAN_CAM_CC_TITAN_TOP_BCR 20
+#define TITAN_CAM_CC_CCI_BCR 0
+#define TITAN_CAM_CC_CPAS_BCR 1
+#define TITAN_CAM_CC_CSI0PHY_BCR 2
+#define TITAN_CAM_CC_CSI1PHY_BCR 3
+#define TITAN_CAM_CC_CSI2PHY_BCR 4
+#define TITAN_CAM_CC_MCLK0_BCR 5
+#define TITAN_CAM_CC_MCLK1_BCR 6
+#define TITAN_CAM_CC_MCLK2_BCR 7
+#define TITAN_CAM_CC_MCLK3_BCR 8
+#define TITAN_CAM_CC_TITAN_TOP_BCR 9
#endif
diff --git a/include/dt-bindings/clock/qcom,dispcc-sdm845.h b/include/dt-bindings/clock/qcom,dispcc-sdm845.h
index 91ea077..42bb59f 100644
--- a/include/dt-bindings/clock/qcom,dispcc-sdm845.h
+++ b/include/dt-bindings/clock/qcom,dispcc-sdm845.h
@@ -56,9 +56,6 @@
#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 39
#define DISP_CC_MDSS_BYTE1_DIV_CLK_SRC 40
-#define DISP_CC_MDSS_CORE_BCR 0
-#define DISP_CC_MDSS_GCC_CLOCKS_BCR 1
-#define DISP_CC_MDSS_RSCC_BCR 2
-#define DISP_CC_MDSS_SPDM_BCR 3
+#define DISP_CC_MDSS_RSCC_BCR 0
#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-sdm845.h b/include/dt-bindings/clock/qcom,gcc-sdm845.h
index f6f4bc3..678a885 100644
--- a/include/dt-bindings/clock/qcom,gcc-sdm845.h
+++ b/include/dt-bindings/clock/qcom,gcc-sdm845.h
@@ -204,34 +204,33 @@
/* GCC reset clocks */
-#define GCC_GPU_BCR 0
-#define GCC_MMSS_BCR 1
-#define GCC_PCIE_0_BCR 2
-#define GCC_PCIE_1_BCR 3
-#define GCC_PCIE_PHY_BCR 4
-#define GCC_PDM_BCR 5
-#define GCC_PRNG_BCR 6
-#define GCC_QUPV3_WRAPPER_0_BCR 7
-#define GCC_QUPV3_WRAPPER_1_BCR 8
-#define GCC_QUSB2PHY_PRIM_BCR 9
-#define GCC_QUSB2PHY_SEC_BCR 10
-#define GCC_SDCC2_BCR 11
-#define GCC_SDCC4_BCR 12
-#define GCC_TSIF_BCR 13
-#define GCC_UFS_CARD_BCR 14
-#define GCC_UFS_PHY_BCR 15
-#define GCC_USB30_PRIM_BCR 16
-#define GCC_USB30_SEC_BCR 17
-#define GCC_USB3_PHY_PRIM_BCR 18
-#define GCC_USB3PHY_PHY_PRIM_BCR 19
-#define GCC_USB3_DP_PHY_PRIM_BCR 20
-#define GCC_USB3_PHY_SEC_BCR 21
-#define GCC_USB3PHY_PHY_SEC_BCR 22
-#define GCC_USB3_DP_PHY_SEC_BCR 23
-#define GCC_USB_PHY_CFG_AHB2PHY_BCR 24
-#define GCC_PCIE_0_PHY_BCR 25
-#define GCC_PCIE_1_PHY_BCR 26
-#define GCC_SDCC1_BCR 27
+#define GCC_MMSS_BCR 0
+#define GCC_PCIE_0_BCR 1
+#define GCC_PCIE_1_BCR 2
+#define GCC_PCIE_PHY_BCR 3
+#define GCC_PDM_BCR 4
+#define GCC_PRNG_BCR 5
+#define GCC_QUPV3_WRAPPER_0_BCR 6
+#define GCC_QUPV3_WRAPPER_1_BCR 7
+#define GCC_QUSB2PHY_PRIM_BCR 8
+#define GCC_QUSB2PHY_SEC_BCR 9
+#define GCC_SDCC2_BCR 10
+#define GCC_SDCC4_BCR 11
+#define GCC_TSIF_BCR 12
+#define GCC_UFS_CARD_BCR 13
+#define GCC_UFS_PHY_BCR 14
+#define GCC_USB30_PRIM_BCR 15
+#define GCC_USB30_SEC_BCR 16
+#define GCC_USB3_PHY_PRIM_BCR 17
+#define GCC_USB3PHY_PHY_PRIM_BCR 18
+#define GCC_USB3_DP_PHY_PRIM_BCR 19
+#define GCC_USB3_PHY_SEC_BCR 20
+#define GCC_USB3PHY_PHY_SEC_BCR 21
+#define GCC_USB3_DP_PHY_SEC_BCR 22
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR 23
+#define GCC_PCIE_0_PHY_BCR 24
+#define GCC_PCIE_1_PHY_BCR 25
+#define GCC_SDCC1_BCR 26
/* Dummy clocks for rate measurement */
#define MEASURE_ONLY_SNOC_CLK 0
diff --git a/include/dt-bindings/clock/qcom,videocc-sdm845.h b/include/dt-bindings/clock/qcom,videocc-sdm845.h
index b362852d..21b5092 100644
--- a/include/dt-bindings/clock/qcom,videocc-sdm845.h
+++ b/include/dt-bindings/clock/qcom,videocc-sdm845.h
@@ -28,9 +28,4 @@
#define VIDEO_CC_VENUS_CTL_CORE_CLK 11
#define VIDEO_PLL0 12
-#define VIDEO_CC_INTERFACE_BCR 0
-#define VIDEO_CC_VCODEC0_BCR 1
-#define VIDEO_CC_VCODEC1_BCR 2
-#define VIDEO_CC_VENUS_BCR 3
-
#endif
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 744ea4f..2b8b6e0 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -125,7 +125,15 @@
* BVEC_POOL_IDX()
*/
#define BIO_RESET_BITS 10
-#define BIO_INLINECRYPT 15
+
+
+/*
+ * Added for Req based dm which need to perform post processing. This flag
+ * ensures blk_update_request does not free the bios or request, this is done
+ * at the dm level
+ */
+#define BIO_DONTFREE 10
+#define BIO_INLINECRYPT 11
/*
* We support 6 different bvec pools, the last one is magic in that it
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index e47a7f7..fb910c6 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -816,6 +816,7 @@
extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
struct scsi_ioctl_command __user *);
+extern void blk_recalc_rq_segments(struct request *rq);
extern int blk_queue_enter(struct request_queue *q, bool nowait);
extern void blk_queue_exit(struct request_queue *q);
extern void blk_start_queue(struct request_queue *q);
@@ -1031,6 +1032,8 @@
extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
+extern int blk_rq_map_sg_no_cluster(struct request_queue *q, struct request *rq,
+ struct scatterlist *sglist);
extern void blk_dump_rq_flags(struct request *, char *);
extern long nr_blockdev_pages(void);
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index cf86f52..20e26d9 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -650,4 +650,12 @@
return (n << SECTOR_SHIFT);
}
+/*-----------------------------------------------------------------
+ * Helper for block layer and dm core operations
+ *-----------------------------------------------------------------
+ */
+void dm_dispatch_request(struct request *rq);
+void dm_kill_unmapped_request(struct request *rq, int error);
+void dm_end_request(struct request *clone, int error);
+
#endif /* _LINUX_DEVICE_MAPPER_H */
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index b718105..0f2e651 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -233,7 +233,6 @@
bool lock_needed);
extern void mmc_cmdq_clk_scaling_stop_busy(struct mmc_host *host,
bool lock_needed, bool is_cmdq_dcmd);
-extern void mmc_recovery_fallback_lower_speed(struct mmc_host *host);
/**
* mmc_claim_host - exclusively claim a host
diff --git a/include/linux/msm-sps.h b/include/linux/msm-sps.h
index 4a9b8a8..662cd9f 100644
--- a/include/linux/msm-sps.h
+++ b/include/linux/msm-sps.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -430,7 +430,7 @@
u32 options;
phys_addr_t phys_addr;
- void *virt_addr;
+ void __iomem *virt_addr;
u32 virt_size;
u32 irq;
u32 num_pipes;
diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h
index 70736e1..33a95d2 100644
--- a/include/linux/mtd/partitions.h
+++ b/include/linux/mtd/partitions.h
@@ -37,7 +37,7 @@
*/
struct mtd_partition {
- const char *name; /* identifier string */
+ char *name; /* identifier string */
uint64_t size; /* partition size */
uint64_t offset; /* offset within the master MTD space */
uint32_t mask_flags; /* master MTD flags to mask out for this partition */
@@ -97,7 +97,7 @@
deregister_mtd_parser)
int mtd_is_partition(const struct mtd_info *mtd);
-int mtd_add_partition(struct mtd_info *master, const char *name,
+int mtd_add_partition(struct mtd_info *master, char *name,
long long offset, long long length);
int mtd_del_partition(struct mtd_info *master, int partno);
uint64_t mtd_get_device_size(const struct mtd_info *mtd);
diff --git a/include/linux/qcrypto.h b/include/linux/qcrypto.h
index 252464a..ff0e64c 100644
--- a/include/linux/qcrypto.h
+++ b/include/linux/qcrypto.h
@@ -15,6 +15,7 @@
#include <linux/crypto.h>
#include <crypto/hash.h>
+#include <crypto/skcipher.h>
#define QCRYPTO_CTX_KEY_MASK 0x000000ff
#define QCRYPTO_CTX_USE_HW_KEY 0x00000001
@@ -29,7 +30,7 @@
int qcrypto_ahash_set_device(struct ahash_request *req, unsigned int dev);
/*int qcrypto_aead_set_device(struct aead_request *req, unsigned int dev);*/
-int qcrypto_cipher_set_flag(struct ablkcipher_request *req, unsigned int flags);
+int qcrypto_cipher_set_flag(struct skcipher_request *req, unsigned int flags);
int qcrypto_ahash_set_flag(struct ahash_request *req, unsigned int flags);
/*int qcrypto_aead_set_flag(struct aead_request *req, unsigned int flags);*/
@@ -47,16 +48,16 @@
int qcrypto_get_num_engines(void);
void qcrypto_get_engine_list(size_t num_engines,
struct crypto_engine_entry *arr);
-int qcrypto_cipher_set_device_hw(struct ablkcipher_request *req,
+int qcrypto_cipher_set_device_hw(struct skcipher_request *req,
unsigned int fde_pfe,
unsigned int hw_inst);
struct qcrypto_func_set {
- int (*cipher_set)(struct ablkcipher_request *req,
+ int (*cipher_set)(struct skcipher_request *req,
unsigned int fde_pfe,
unsigned int hw_inst);
- int (*cipher_flag)(struct ablkcipher_request *req, unsigned int flags);
+ int (*cipher_flag)(struct skcipher_request *req, unsigned int flags);
int (*get_num_engines)(void);
void (*get_engine_list)(size_t num_engines,
struct crypto_engine_entry *arr);
diff --git a/include/linux/sde_rsc.h b/include/linux/sde_rsc.h
index f921909..1450caa 100644
--- a/include/linux/sde_rsc.h
+++ b/include/linux/sde_rsc.h
@@ -179,13 +179,14 @@
* sde_rsc_client_vote() - ab/ib vote from rsc client
*
* @client: Client pointer provided by sde_rsc_client_create().
+ * @bus_id: data bus identifier
* @ab: aggregated bandwidth vote from client.
* @ib: instant bandwidth vote from client.
*
* Return: error code.
*/
int sde_rsc_client_vote(struct sde_rsc_client *caller_client,
- u64 ab_vote, u64 ib_vote);
+ u32 bus_id, u64 ab_vote, u64 ib_vote);
/**
* sde_rsc_register_event - register a callback function for an event
@@ -243,7 +244,7 @@
}
static inline int sde_rsc_client_vote(struct sde_rsc_client *caller_client,
- u64 ab_vote, u64 ib_vote)
+ u32 bus_id, u64 ab_vote, u64 ib_vote)
{
return 0;
}
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index fd09a1b..ddd8f4d 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -58,7 +58,7 @@
GSI_EP_OP_STORE_DBL_INFO,
GSI_EP_OP_ENABLE_GSI,
GSI_EP_OP_UPDATEXFER,
- GSI_EP_OP_RING_IN_DB,
+ GSI_EP_OP_RING_DB,
GSI_EP_OP_ENDXFER,
GSI_EP_OP_GET_CH_INFO,
GSI_EP_OP_GET_XFER_IDX,
diff --git a/include/media/msm_vidc.h b/include/media/msm_vidc.h
index 237fb4a..bb5a21c 100644
--- a/include/media/msm_vidc.h
+++ b/include/media/msm_vidc.h
@@ -61,15 +61,17 @@
};
struct msm_smem {
- int mem_type;
- size_t size;
+ u32 refcount;
+ int fd;
+ void *dma_buf;
+ void *handle;
void *kvaddr;
- ion_phys_addr_t device_addr;
+ u32 device_addr;
+ unsigned int offset;
+ unsigned int size;
unsigned long flags;
- void *smem_priv;
enum hal_buffer buffer_type;
struct dma_mapping_info mapping_info;
- unsigned int offset;
};
enum smem_cache_ops {
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 1ea6e0d..bf8f149 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -881,6 +881,11 @@
TP_ARGS(p, task_cpu, task_util, nominated_cpu, target_cpu, ediff, need_idle)
);
+DEFINE_EVENT(sched_task_util, sched_task_util_boosted,
+ TP_PROTO(struct task_struct *p, int task_cpu, unsigned long task_util, int nominated_cpu, int target_cpu, int ediff, bool need_idle),
+ TP_ARGS(p, task_cpu, task_util, nominated_cpu, target_cpu, ediff, need_idle)
+);
+
DEFINE_EVENT(sched_task_util, sched_task_util_overutilzed,
TP_PROTO(struct task_struct *p, int task_cpu, unsigned long task_util, int nominated_cpu, int target_cpu, int ediff, bool need_idle),
TP_ARGS(p, task_cpu, task_util, nominated_cpu, target_cpu, ediff, need_idle)
diff --git a/include/uapi/linux/msm_ipa.h b/include/uapi/linux/msm_ipa.h
index 9773480..48cfe31 100644
--- a/include/uapi/linux/msm_ipa.h
+++ b/include/uapi/linux/msm_ipa.h
@@ -438,10 +438,20 @@
IPA_TETHERING_STATS_UPDATE_STATS = IPA_ECM_EVENT_MAX,
IPA_TETHERING_STATS_UPDATE_NETWORK_STATS,
IPA_TETHERING_STATS_EVENT_MAX,
- IPA_EVENT_MAX_NUM = IPA_TETHERING_STATS_EVENT_MAX
};
-#define IPA_EVENT_MAX ((int)IPA_EVENT_MAX_NUM)
+enum ipa_quota_event {
+ IPA_QUOTA_REACH = IPA_TETHERING_STATS_EVENT_MAX,
+ IPA_QUOTA_EVENT_MAX,
+};
+
+enum ipa_ssr_event {
+ IPA_SSR_BEFORE_SHUTDOWN = IPA_QUOTA_EVENT_MAX,
+ IPA_SSR_AFTER_POWERUP,
+ IPA_SSR_EVENT_MAX
+};
+
+#define IPA_EVENT_MAX_NUM ((int)IPA_SSR_EVENT_MAX)
/**
* enum ipa_rm_resource_name - IPA RM clients identification names
diff --git a/include/uapi/linux/rmnet_ipa_fd_ioctl.h b/include/uapi/linux/rmnet_ipa_fd_ioctl.h
index 228bfe8..f04ac49 100644
--- a/include/uapi/linux/rmnet_ipa_fd_ioctl.h
+++ b/include/uapi/linux/rmnet_ipa_fd_ioctl.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -32,6 +32,7 @@
#define WAN_IOCTL_RESET_TETHER_STATS 7
#define WAN_IOCTL_QUERY_DL_FILTER_STATS 8
#define WAN_IOCTL_ADD_FLT_RULE_EX 9
+#define WAN_IOCTL_QUERY_TETHER_STATS_ALL 10
/* User space may not have this defined. */
#ifndef IFNAMSIZ
@@ -99,6 +100,16 @@
uint64_t ipv6_rx_bytes;
};
+struct wan_ioctl_query_tether_stats_all {
+ /* Name of the upstream interface */
+ char upstreamIface[IFNAMSIZ];
+ /* enum of tether interface */
+ enum ipacm_client_enum ipa_client;
+ uint8_t reset_stats;
+ uint64_t tx_bytes;
+ uint64_t rx_bytes;
+};
+
struct wan_ioctl_reset_tether_stats {
/* Name of the upstream interface, not support now */
char upstreamIface[IFNAMSIZ];
@@ -155,4 +166,8 @@
WAN_IOCTL_ADD_FLT_RULE_EX, \
struct ipa_install_fltr_rule_req_ex_msg_v01 *)
+#define WAN_IOC_QUERY_TETHER_STATS_ALL _IOWR(WAN_IOC_MAGIC, \
+ WAN_IOCTL_QUERY_TETHER_STATS_ALL, \
+ struct wan_ioctl_query_tether_stats_all *)
+
#endif /* _RMNET_IPA_FD_IOCTL_H */
diff --git a/include/uapi/media/msm_vidc.h b/include/uapi/media/msm_vidc.h
index 7161102..038dd48 100644
--- a/include/uapi/media/msm_vidc.h
+++ b/include/uapi/media/msm_vidc.h
@@ -354,6 +354,15 @@
MSM_VIDC_TRANSFER_SRGB = 13,
MSM_VIDC_TRANSFER_BT_2020_10 = 14,
MSM_VIDC_TRANSFER_BT_2020_12 = 15,
+#define MSM_VIDC_TRANSFER_SMPTE_ST2084 \
+ MSM_VIDC_TRANSFER_SMPTE_ST2084
+ MSM_VIDC_TRANSFER_SMPTE_ST2084 = 16,
+#define MSM_VIDC_TRANSFER_SMPTE_ST428_1 \
+ MSM_VIDC_TRANSFER_SMPTE_ST428_1
+ MSM_VIDC_TRANSFER_SMPTE_ST428_1 = 17,
+#define MSM_VIDC_TRANSFER_HLG \
+ MSM_VIDC_TRANSFER_HLG
+ MSM_VIDC_TRANSFER_HLG = 18,
};
enum msm_vidc_pixel_depth {
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index e4b706d..45f404b 100755
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6870,8 +6870,7 @@
if (new_util > capacity_orig_of(i))
continue;
- cpu_idle_idx = cpu_rq(i)->nr_running ? -1 :
- idle_get_state_idx(cpu_rq(i));
+ cpu_idle_idx = idle_get_state_idx(cpu_rq(i));
if (!need_idle &&
add_capacity_margin(new_util_cum) <
@@ -6999,6 +6998,18 @@
return target_cpu;
}
+ /*
+ * We always want to migrate the task to the best CPU when
+ * placement boost is active.
+ */
+ if (placement_boost) {
+ trace_sched_task_util_boosted(p, task_cpu(p),
+ task_util(p),
+ target_cpu,
+ target_cpu, 0, need_idle);
+ return target_cpu;
+ }
+
#ifdef CONFIG_SCHED_WALT
if (walt_disabled || !sysctl_sched_use_walt_cpu_util)
task_util_boosted = 0;
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index ec90319..65b34b4 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1970,9 +1970,6 @@
if (sysctl_sched_cstate_aware)
cpu_idle_idx =
- (cpu == smp_processor_id() ||
- cpu_rq(cpu)->nr_running) ?
- -1 :
idle_get_state_idx(cpu_rq(cpu));
if (add_capacity_margin(new_util_cum) <
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index d4a0612..566e103 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1491,6 +1491,10 @@
static inline int idle_get_state_idx(struct rq *rq)
{
WARN_ON(!rcu_read_lock_held());
+
+ if (rq->nr_running || cpu_of(rq) == raw_smp_processor_id())
+ return -1;
+
return rq->idle_state_idx;
}
#else
diff --git a/net/netfilter/xt_HARDIDLETIMER.c b/net/netfilter/xt_HARDIDLETIMER.c
index eb5b452..fc0b83f 100644
--- a/net/netfilter/xt_HARDIDLETIMER.c
+++ b/net/netfilter/xt_HARDIDLETIMER.c
@@ -180,6 +180,8 @@
pr_debug("couldn't add file to sysfs");
goto out_free_attr;
}
+ /* notify userspace */
+ kobject_uevent(hardidletimer_tg_kobj, KOBJ_ADD);
list_add(&info->timer->entry, &hardidletimer_tg_list);
diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c
index f11aa28..04a1b97 100644
--- a/net/netfilter/xt_IDLETIMER.c
+++ b/net/netfilter/xt_IDLETIMER.c
@@ -301,6 +301,8 @@
pr_debug("couldn't add file to sysfs");
goto out_free_attr;
}
+ /* notify userspace */
+ kobject_uevent(idletimer_tg_kobj, KOBJ_ADD);
list_add(&info->timer->entry, &idletimer_tg_list);