Merge "msm: Kconfig: Fix the defconfig warnings for 8974"
diff --git a/Documentation/devicetree/bindings/arm/msm/lpm-resources.txt b/Documentation/devicetree/bindings/arm/msm/lpm-resources.txt
index 7f2a21b..ccb3465 100644
--- a/Documentation/devicetree/bindings/arm/msm/lpm-resources.txt
+++ b/Documentation/devicetree/bindings/arm/msm/lpm-resources.txt
@@ -23,6 +23,7 @@
- qcom,resource-type: The type of the LPM resource.
MSM_LPM_RPM_RS_TYPE = 0
MSM_LPM_LOCAL_RS_TYPE = 1
+- qcom,init-value: Initialization value of the LPM resource.
Optional Nodes:
@@ -41,5 +42,6 @@
qcom,type = <0x62706d73>; /* "smpb" */
qcom,id = <0x02>;
qcom,key = <0x6e726f63>; /* "corn" */
+ qcom,init-value= <5>; /* Active Corner*/
};
diff --git a/arch/arm/boot/dts/msm-pm8941.dtsi b/arch/arm/boot/dts/msm-pm8941.dtsi
index 6538db5..7a9a80d 100644
--- a/arch/arm/boot/dts/msm-pm8941.dtsi
+++ b/arch/arm/boot/dts/msm-pm8941.dtsi
@@ -83,7 +83,7 @@
"ocv_for_r",
"cc_thr";
- qcom,bms-r-sense-mohm = <10>;
+ qcom,bms-r-sense-mohm = <2>;
qcom,bms-v-cutoff-uv = <3400000>;
qcom,bms-max-voltage-uv = <4200000>;
qcom,bms-r-conn-mohm = <18>;
@@ -95,7 +95,6 @@
qcom,bms-calculate-soc-ms = <20000>;
qcom,bms-chg-term-ua = <100000>;
qcom,bms-batt-type = <0>;
- qcom,bms-use-voltage-soc;
};
clkdiv@5b00 {
diff --git a/arch/arm/boot/dts/msm8226.dtsi b/arch/arm/boot/dts/msm8226.dtsi
index 927ebcd..a07788b 100644
--- a/arch/arm/boot/dts/msm8226.dtsi
+++ b/arch/arm/boot/dts/msm8226.dtsi
@@ -55,6 +55,13 @@
status = "disabled";
};
+ qcom,sps@f9984000 {
+ compatible = "qcom,msm_sps";
+ reg = <0xf9984000 0x15000>,
+ <0xf9999000 0xb000>;
+ interrupts = <0 94 0>;
+ };
+
usb@f9a55000 {
compatible = "qcom,hsusb-otg";
reg = <0xf9a55000 0x400>;
diff --git a/arch/arm/boot/dts/msm8974-pm.dtsi b/arch/arm/boot/dts/msm8974-pm.dtsi
index 52f2a41..c877134 100644
--- a/arch/arm/boot/dts/msm8974-pm.dtsi
+++ b/arch/arm/boot/dts/msm8974-pm.dtsi
@@ -143,6 +143,7 @@
qcom,type = <0x62706d73>; /* "smpb" */
qcom,id = <0x02>;
qcom,key = <0x6e726f63>; /* "corn" */
+ qcom,init-value = <5>; /* Super Turbo */
};
qcom,lpm-resources@1 {
@@ -152,6 +153,7 @@
qcom,type = <0x62706d73>; /* "smpb" */
qcom,id = <0x01>;
qcom,key = <0x7675>; /* "uv" */
+ qcom,init-value = <1050000>; /* Super Turbo */
};
qcom,lpm-resources@2 {
@@ -161,12 +163,14 @@
qcom,type = <0x306b6c63>; /* "clk0" */
qcom,id = <0x00>;
qcom,key = <0x62616e45>; /* "Enab" */
+ qcom,init-value = <1>; /* On */
};
qcom,lpm-resources@3 {
reg = <0x3>;
qcom,name = "l2";
qcom,resource-type = <1>;
+ qcom,init-value = <2>; /* Retention */
};
};
diff --git a/arch/arm/boot/dts/msm8974-regulator.dtsi b/arch/arm/boot/dts/msm8974-regulator.dtsi
index 3f7e9de..2cef567 100644
--- a/arch/arm/boot/dts/msm8974-regulator.dtsi
+++ b/arch/arm/boot/dts/msm8974-regulator.dtsi
@@ -121,9 +121,6 @@
regulator-min-microvolt = <1300000>;
regulator-max-microvolt = <1300000>;
qcom,init-voltage = <1300000>;
- qcom,init-current = <100>;
- qcom,system-load = <100000>;
- regulator-always-on;
status = "okay";
};
};
@@ -136,14 +133,6 @@
qcom,init-voltage = <2150000>;
status = "okay";
};
- pm8941_s2_ao: regulator-s2-ao {
- regulator-name = "8941_s2_ao";
- qcom,set = <1>;
- regulator-min-microvolt = <2150000>;
- regulator-max-microvolt = <2150000>;
- status = "okay";
- compatible = "qcom,rpm-regulator-smd";
- };
};
rpm-regulator-smpa3 {
@@ -152,9 +141,6 @@
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
qcom,init-voltage = <1800000>;
- qcom,init-current = <100>;
- qcom,system-load = <100000>;
- regulator-always-on;
status = "okay";
};
};
@@ -162,13 +148,9 @@
rpm-regulator-ldoa1 {
status = "okay";
pm8941_l1: regulator-l1 {
- parent-supply = <&pm8941_s1>;
regulator-min-microvolt = <1225000>;
regulator-max-microvolt = <1225000>;
qcom,init-voltage = <1225000>;
- qcom,init-current = <10>;
- qcom,system-load = <10000>;
- regulator-always-on;
status = "okay";
};
};
@@ -176,7 +158,6 @@
rpm-regulator-ldoa2 {
status = "okay";
pm8941_l2: regulator-l2 {
- parent-supply = <&pm8941_s3>;
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <1200000>;
qcom,init-voltage = <1200000>;
@@ -187,7 +168,6 @@
rpm-regulator-ldoa3 {
status = "okay";
pm8941_l3: regulator-l3 {
- parent-supply = <&pm8941_s1>;
regulator-min-microvolt = <1225000>;
regulator-max-microvolt = <1225000>;
qcom,init-voltage = <1225000>;
@@ -198,7 +178,6 @@
rpm-regulator-ldoa4 {
status = "okay";
pm8941_l4: regulator-l4 {
- parent-supply = <&pm8941_s1>;
regulator-min-microvolt = <1225000>;
regulator-max-microvolt = <1225000>;
qcom,init-voltage = <1225000>;
@@ -209,7 +188,6 @@
rpm-regulator-ldoa5 {
status = "okay";
pm8941_l5: regulator-l5 {
- parent-supply = <&pm8941_s2>;
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
qcom,init-voltage = <1800000>;
@@ -220,7 +198,6 @@
rpm-regulator-ldoa6 {
status = "okay";
pm8941_l6: regulator-l6 {
- parent-supply = <&pm8941_s2>;
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
qcom,init-voltage = <1800000>;
@@ -231,7 +208,6 @@
rpm-regulator-ldoa7 {
status = "okay";
pm8941_l7: regulator-l7 {
- parent-supply = <&pm8941_s2>;
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
qcom,init-voltage = <1800000>;
@@ -272,7 +248,6 @@
rpm-regulator-ldoa11 {
status = "okay";
pm8941_l11: regulator-l11 {
- parent-supply = <&pm8941_s1>;
regulator-min-microvolt = <1300000>;
regulator-max-microvolt = <1300000>;
qcom,init-voltage = <1300000>;
@@ -283,14 +258,12 @@
rpm-regulator-ldoa12 {
status = "okay";
pm8941_l12: regulator-l12 {
- parent-supply = <&pm8941_s2>;
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
status = "okay";
};
pm8941_l12_ao: regulator-l12-ao {
regulator-name = "8941_l12_ao";
- parent-supply = <&pm8941_s2_ao>;
qcom,set = <1>;
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
@@ -312,7 +285,6 @@
rpm-regulator-ldoa14 {
status = "okay";
pm8941_l14: regulator-l14 {
- parent-supply = <&pm8941_s2>;
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
qcom,init-voltage = <1800000>;
@@ -323,7 +295,6 @@
rpm-regulator-ldoa15 {
status = "okay";
pm8941_l15: regulator-l15 {
- parent-supply = <&pm8941_s2>;
regulator-min-microvolt = <2050000>;
regulator-max-microvolt = <2050000>;
qcom,init-voltage = <2050000>;
@@ -424,7 +395,6 @@
rpm-regulator-vsa1 {
status = "okay";
pm8941_lvs1: regulator-lvs1 {
- parent-supply = <&pm8941_s3>;
status = "okay";
};
};
@@ -432,7 +402,6 @@
rpm-regulator-vsa2 {
status = "okay";
pm8941_lvs2: regulator-lvs2 {
- parent-supply = <&pm8941_s3>;
status = "okay";
};
};
@@ -440,7 +409,6 @@
rpm-regulator-vsa3 {
status = "okay";
pm8941_lvs3: regulator-lvs3 {
- parent-supply = <&pm8941_s3>;
status = "okay";
};
};
diff --git a/arch/arm/boot/dts/msm8974.dtsi b/arch/arm/boot/dts/msm8974.dtsi
index eeb05d4..6a7e81e 100644
--- a/arch/arm/boot/dts/msm8974.dtsi
+++ b/arch/arm/boot/dts/msm8974.dtsi
@@ -648,16 +648,11 @@
krait1_dig-supply = <&pm8841_s2_corner_ao>;
krait2_dig-supply = <&pm8841_s2_corner_ao>;
krait3_dig-supply = <&pm8841_s2_corner_ao>;
- krait0_hfpll_a-supply = <&pm8941_s2_ao>;
- krait1_hfpll_a-supply = <&pm8941_s2_ao>;
- krait2_hfpll_a-supply = <&pm8941_s2_ao>;
- krait3_hfpll_a-supply = <&pm8941_s2_ao>;
- l2_hfpll_a-supply = <&pm8941_s2_ao>;
- krait0_hfpll_b-supply = <&pm8941_l12_ao>;
- krait1_hfpll_b-supply = <&pm8941_l12_ao>;
- krait2_hfpll_b-supply = <&pm8941_l12_ao>;
- krait3_hfpll_b-supply = <&pm8941_l12_ao>;
- l2_hfpll_b-supply = <&pm8941_l12_ao>;
+ krait0_hfpll-supply = <&pm8941_l12_ao>;
+ krait1_hfpll-supply = <&pm8941_l12_ao>;
+ krait2_hfpll-supply = <&pm8941_l12_ao>;
+ krait3_hfpll-supply = <&pm8941_l12_ao>;
+ l2_hfpll-supply = <&pm8941_l12_ao>;
};
usb3: qcom,ssusb@f9200000 {
diff --git a/arch/arm/boot/dts/msm9625-pm.dtsi b/arch/arm/boot/dts/msm9625-pm.dtsi
index 2839864..dbdddb6 100644
--- a/arch/arm/boot/dts/msm9625-pm.dtsi
+++ b/arch/arm/boot/dts/msm9625-pm.dtsi
@@ -42,6 +42,7 @@
qcom,type = <0x616F646C>; /* "ldoa" */
qcom,id = <0x0A>;
qcom,key = <0x6e726f63>; /* "corn" */
+ qcom,init-value = <5>; /* Super Turbo */
};
qcom,lpm-resources@1 {
@@ -51,6 +52,7 @@
qcom,type = <0x616F646C>; /* "ldoa" */
qcom,id = <0x0C>;
qcom,key = <0x7675>; /* "uv" */
+ qcom,init-value = <1050000>; /* Super Turbo */
};
qcom,lpm-resources@2 {
@@ -60,6 +62,7 @@
qcom,type = <0x306b6c63>; /* "clk0" */
qcom,id = <0x00>;
qcom,key = <0x62616e45>; /* "Enab" */
+ qcom,init-value = <1>; /* On */
};
};
diff --git a/arch/arm/boot/dts/msm9625.dtsi b/arch/arm/boot/dts/msm9625.dtsi
index 7462911..cbd93df 100644
--- a/arch/arm/boot/dts/msm9625.dtsi
+++ b/arch/arm/boot/dts/msm9625.dtsi
@@ -523,6 +523,57 @@
compatible = "qcom,pil-q6v5-mss";
interrupts = <0 24 1>;
};
+
+ qcom,smem@fa00000 {
+ compatible = "qcom,smem";
+ reg = <0xfa00000 0x200000>,
+ <0xfa006000 0x1000>,
+ <0xfc428000 0x4000>;
+ reg-names = "smem", "irq-reg-base", "aux-mem1";
+
+ qcom,smd-modem {
+ compatible = "qcom,smd";
+ qcom,smd-edge = <0>;
+ qcom,smd-irq-offset = <0x8>;
+ qcom,smd-irq-bitmask = <0x1000>;
+ qcom,pil-string = "modem";
+ interrupts = <0 25 1>;
+ };
+
+ qcom,smsm-modem {
+ compatible = "qcom,smsm";
+ qcom,smsm-edge = <0>;
+ qcom,smsm-irq-offset = <0x8>;
+ qcom,smsm-irq-bitmask = <0x2000>;
+ interrupts = <0 26 1>;
+ };
+
+ qcom,smd-adsp {
+ compatible = "qcom,smd";
+ qcom,smd-edge = <1>;
+ qcom,smd-irq-offset = <0x8>;
+ qcom,smd-irq-bitmask = <0x100>;
+ qcom,pil-string = "adsp";
+ interrupts = <0 156 1>;
+ };
+
+ qcom,smsm-adsp {
+ compatible = "qcom,smsm";
+ qcom,smsm-edge = <1>;
+ qcom,smsm-irq-offset = <0x8>;
+ qcom,smsm-irq-bitmask = <0x200>;
+ interrupts = <0 157 1>;
+ };
+
+ qcom,smd-rpm {
+ compatible = "qcom,smd";
+ qcom,smd-edge = <15>;
+ qcom,smd-irq-offset = <0x8>;
+ qcom,smd-irq-bitmask = <0x1>;
+ interrupts = <0 168 1>;
+ qcom,irq-no-suspend;
+ };
+ };
};
/include/ "msm-pm8019-rpm-regulator.dtsi"
diff --git a/arch/arm/configs/fsm9xxx-perf_defconfig b/arch/arm/configs/fsm9xxx-perf_defconfig
index 1dc853b..8a7928b 100644
--- a/arch/arm/configs/fsm9xxx-perf_defconfig
+++ b/arch/arm/configs/fsm9xxx-perf_defconfig
@@ -37,6 +37,8 @@
# CONFIG_MSM_HW3D is not set
# CONFIG_QSD_AUDIO is not set
# CONFIG_SURF_FFA_GPIO_KEYPAD is not set
+CONFIG_MSM_SMCMOD=m
+CONFIG_MSM_SCM=y
CONFIG_MSM_WATCHDOG=y
CONFIG_MSM_RPC_PMIC=y
CONFIG_MSM_RPC_USB=y
@@ -142,6 +144,8 @@
# CONFIG_MFD_PM8XXX_MISC is not set
CONFIG_REGULATOR=y
CONFIG_REGULATOR_PM8058_XO=y
+CONFIG_ION=y
+CONFIG_ION_MSM=y
# CONFIG_USB_SUPPORT is not set
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
diff --git a/arch/arm/configs/fsm9xxx_defconfig b/arch/arm/configs/fsm9xxx_defconfig
index 203d3b7..db2f25d 100644
--- a/arch/arm/configs/fsm9xxx_defconfig
+++ b/arch/arm/configs/fsm9xxx_defconfig
@@ -36,6 +36,8 @@
# CONFIG_MSM_HW3D is not set
# CONFIG_QSD_AUDIO is not set
# CONFIG_SURF_FFA_GPIO_KEYPAD is not set
+CONFIG_MSM_SMCMOD=m
+CONFIG_MSM_SCM=y
CONFIG_MSM_WATCHDOG=y
CONFIG_MSM_RPC_PMIC=y
CONFIG_MSM_RPC_USB=y
@@ -141,6 +143,8 @@
# CONFIG_MFD_PM8XXX_MISC is not set
CONFIG_REGULATOR=y
CONFIG_REGULATOR_PM8058_XO=y
+CONFIG_ION=y
+CONFIG_ION_MSM=y
# CONFIG_USB_SUPPORT is not set
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
diff --git a/arch/arm/configs/msm8910_defconfig b/arch/arm/configs/msm8910_defconfig
index de78559..b49a83f 100644
--- a/arch/arm/configs/msm8910_defconfig
+++ b/arch/arm/configs/msm8910_defconfig
@@ -47,6 +47,7 @@
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_SMP=y
+CONFIG_SCHED_MC=y
CONFIG_ARM_ARCH_TIMER=y
CONFIG_HOTPLUG_CPU=y
CONFIG_PREEMPT=y
diff --git a/arch/arm/configs/msm8960-perf_defconfig b/arch/arm/configs/msm8960-perf_defconfig
index d5e15f1..e5fd0d5 100644
--- a/arch/arm/configs/msm8960-perf_defconfig
+++ b/arch/arm/configs/msm8960-perf_defconfig
@@ -100,6 +100,7 @@
CONFIG_HIGH_RES_TIMERS=y
CONFIG_SMP=y
# CONFIG_SMP_ON_UP is not set
+CONFIG_SCHED_MC=y
CONFIG_PREEMPT=y
CONFIG_AEABI=y
CONFIG_HIGHMEM=y
@@ -242,6 +243,7 @@
CONFIG_RFKILL=y
CONFIG_GENLOCK=y
CONFIG_GENLOCK_MISCDEVICE=y
+CONFIG_CMA=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_TSPP=m
@@ -466,6 +468,7 @@
CONFIG_MSM_SSBI=y
CONFIG_SPS=y
CONFIG_SPS_SUPPORT_BAMDMA=y
+CONFIG_MSM_AVTIMER=y
CONFIG_MSM_IOMMU=y
CONFIG_MOBICORE_SUPPORT=m
CONFIG_MOBICORE_API=m
diff --git a/arch/arm/configs/msm8960_defconfig b/arch/arm/configs/msm8960_defconfig
index 386f311..02756e9 100644
--- a/arch/arm/configs/msm8960_defconfig
+++ b/arch/arm/configs/msm8960_defconfig
@@ -105,6 +105,7 @@
CONFIG_HIGH_RES_TIMERS=y
CONFIG_SMP=y
# CONFIG_SMP_ON_UP is not set
+CONFIG_SCHED_MC=y
CONFIG_PREEMPT=y
CONFIG_AEABI=y
CONFIG_HIGHMEM=y
@@ -247,6 +248,7 @@
CONFIG_RFKILL=y
CONFIG_GENLOCK=y
CONFIG_GENLOCK_MISCDEVICE=y
+CONFIG_CMA=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_TSPP=m
diff --git a/arch/arm/configs/msm8974-perf_defconfig b/arch/arm/configs/msm8974-perf_defconfig
index bf44665..94e2f36 100644
--- a/arch/arm/configs/msm8974-perf_defconfig
+++ b/arch/arm/configs/msm8974-perf_defconfig
@@ -77,6 +77,7 @@
CONFIG_HIGH_RES_TIMERS=y
CONFIG_SMP=y
# CONFIG_SMP_ON_UP is not set
+CONFIG_SCHED_MC=y
CONFIG_ARM_ARCH_TIMER=y
CONFIG_PREEMPT=y
CONFIG_AEABI=y
@@ -248,6 +249,7 @@
CONFIG_INPUT_EVBUG=m
CONFIG_KEYBOARD_GPIO=y
CONFIG_INPUT_JOYSTICK=y
+CONFIG_JOYSTICK_XPAD=y
CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_TOUCHSCREEN_ATMEL_MXT=y
CONFIG_INPUT_MISC=y
@@ -282,6 +284,7 @@
CONFIG_SENSORS_QPNP_ADC_CURRENT=y
CONFIG_THERMAL=y
CONFIG_THERMAL_TSENS8974=y
+CONFIG_THERMAL_MONITOR=y
CONFIG_THERMAL_QPNP=y
CONFIG_WCD9320_CODEC=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
diff --git a/arch/arm/configs/msm8974_defconfig b/arch/arm/configs/msm8974_defconfig
index f9dbc85..0e9bf1d 100644
--- a/arch/arm/configs/msm8974_defconfig
+++ b/arch/arm/configs/msm8974_defconfig
@@ -79,6 +79,7 @@
CONFIG_HIGH_RES_TIMERS=y
CONFIG_SMP=y
# CONFIG_SMP_ON_UP is not set
+CONFIG_SCHED_MC=y
CONFIG_ARM_ARCH_TIMER=y
CONFIG_PREEMPT=y
CONFIG_AEABI=y
@@ -250,6 +251,7 @@
CONFIG_INPUT_EVBUG=m
CONFIG_KEYBOARD_GPIO=y
CONFIG_INPUT_JOYSTICK=y
+CONFIG_JOYSTICK_XPAD=y
CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_TOUCHSCREEN_ATMEL_MXT=y
CONFIG_INPUT_MISC=y
@@ -284,6 +286,7 @@
CONFIG_SENSORS_QPNP_ADC_CURRENT=y
CONFIG_THERMAL=y
CONFIG_THERMAL_TSENS8974=y
+CONFIG_THERMAL_MONTIOR=y
CONFIG_THERMAL_QPNP=y
CONFIG_WCD9320_CODEC=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
diff --git a/arch/arm/include/asm/smcmod.h b/arch/arm/include/asm/smcmod.h
new file mode 100644
index 0000000..06918c4
--- /dev/null
+++ b/arch/arm/include/asm/smcmod.h
@@ -0,0 +1,123 @@
+/* Qualcomm SMC Module API */
+
+#ifndef __SMCMOD_H_
+#define __SMCMOD_H_
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define SMCMOD_DEV "smcmod"
+
+#define SMCMOD_REG_REQ_MAX_ARGS 2
+
+/**
+ * struct smcmod_reg_req - for SMC register ioctl request
+ *
+ * @service_id - requested service.
+ * @command_id - requested command.
+ * @num_args - number of arguments.
+ * @args - argument(s) to be passed to the secure world.
+ * @return_val - return value from secure world operation.
+ */
+struct smcmod_reg_req {
+ uint32_t service_id; /* in */
+ uint32_t command_id; /* in */
+ uint8_t num_args; /* in */
+ uint32_t args[SMCMOD_REG_REQ_MAX_ARGS]; /* in */
+ uint32_t return_val; /* out */
+};
+
+/**
+ * struct smcmod_buf_req - for SMC buffer ioctl request
+ *
+ * @service_id - requested service.
+ * @command_id - requested command.
+ * @ion_cmd_fd - fd obtained from ION_IOC_MAP or ION_IOC_SHARE.
+ * @cmd_len - length of command data buffer in bytes.
+ * @ion_resp_fd - fd obtained from ION_IOC_MAP or ION_IOC_SHARE.
+ * @resp_len - length of response data buffer in bytes.
+ * @return_val - return value from secure world operation.
+ */
+struct smcmod_buf_req {
+ uint32_t service_id;/* in */
+ uint32_t command_id; /* in */
+ int32_t ion_cmd_fd; /* in */
+ uint32_t cmd_len; /* in */
+ int32_t ion_resp_fd; /* in */
+ uint32_t resp_len; /* in */
+ uint32_t return_val; /* out */
+};
+
+/**
+ * struct smcmod_cipher_req - for SMC cipher command ioctl
+ *
+ * @algorithm - specifies the cipher algorithm.
+ * @operation - specifies encryption or decryption.
+ * @mode - specifies cipher mode.
+ * @ion_key_fd - fd obtained form ION_IOC_MAP or ION_IOC_SHARE.
+ * @key_size - key size in bytes.
+ * @ion_plain_text_fd - fd obtained form ION_IOC_MAP or ION_IOC_SHARE.
+ * @plain_text_size - size of plain text in bytes.
+ * @ion_cipher_text_fd - fd obtained form ION_IOC_MAP or ION_IOC_SHARE.
+ * @cipher_text_size - cipher text size in bytes.
+ * @ion_init_vector_fd - fd obtained form ION_IOC_MAP or ION_IOC_SHARE.
+ * @init_vector_size - size of initialization vector in bytes.
+ * @key_is_null - indicates that the key is null.
+ * @return_val - return value from secure world opreation.
+ */
+struct smcmod_cipher_req {
+ uint32_t algorithm; /* in */
+ uint32_t operation; /* in */
+ uint32_t mode; /* in */
+ int32_t ion_key_fd; /* in */
+ uint32_t key_size; /* in */
+ int32_t ion_plain_text_fd; /* in (encrypt)/out (decrypt) */
+ uint32_t plain_text_size; /* in */
+ int32_t ion_cipher_text_fd; /* out (encrypt)/in (decrypt) */
+ uint32_t cipher_text_size; /* in */
+ int32_t ion_init_vector_fd; /* in */
+ uint32_t init_vector_size; /* in */
+ uint32_t key_is_null; /* in */
+ uint32_t return_val; /* out */
+};
+
+/**
+ * struct smcmod_msg_digest_req - for message digest command ioctl
+ *
+ * @algorithm - specifies the cipher algorithm.
+ * @ion_key_fd - fd obtained form ION_IOC_MAP or ION_IOC_SHARE.
+ * @key_size - hash key size in bytes.
+ * @ion_input_fd - fd obtained form ION_IOC_MAP or ION_IOC_SHARE.
+ * @input_size - input data size in bytes.
+ * @ion_output_fd - fd obtained form ION_IOC_MAP or ION_IOC_SHARE.
+ * @output_size - size of output buffer in bytes.
+ * @fixed_block - indicates whether this is a fixed block digest.
+ * @key_is_null - indicates that the key is null.
+ * @return_val - return value from secure world opreation.
+ */
+struct smcmod_msg_digest_req {
+ uint32_t algorithm; /* in */
+ int32_t ion_key_fd; /* in */
+ uint32_t key_size; /* in */
+ int32_t ion_input_fd; /* in */
+ uint32_t input_size; /* in */
+ int32_t ion_output_fd; /* in/out */
+ uint32_t output_size; /* in */
+ uint32_t fixed_block; /* in */
+ uint32_t key_is_null; /* in */
+ uint32_t return_val; /* out */
+} __packed;
+
+#define SMCMOD_IOC_MAGIC 0x97
+
+/* Number chosen to avoid any conflicts */
+#define SMCMOD_IOCTL_SEND_REG_CMD \
+ _IOWR(SMCMOD_IOC_MAGIC, 32, struct smcmod_reg_req)
+#define SMCMOD_IOCTL_SEND_BUF_CMD \
+ _IOWR(SMCMOD_IOC_MAGIC, 33, struct smcmod_buf_req)
+#define SMCMOD_IOCTL_SEND_CIPHER_CMD \
+ _IOWR(SMCMOD_IOC_MAGIC, 34, struct smcmod_cipher_req)
+#define SMCMOD_IOCTL_SEND_MSG_DIGEST_CMD \
+ _IOWR(SMCMOD_IOC_MAGIC, 35, struct smcmod_msg_digest_req)
+#define SMCMOD_IOCTL_GET_VERSION _IOWR(SMCMOD_IOC_MAGIC, 36, uint32_t)
+#endif /* __SMCMOD_H_ */
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 3a52ddc..d2e2e44 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -444,7 +444,7 @@
if (plat && plat->free_pmu_irq)
armpmu->free_pmu_irq = plat->free_pmu_irq;
- else if (!armpmu->request_pmu_irq)
+ else if (!armpmu->free_pmu_irq)
armpmu->free_pmu_irq = armpmu_generic_free_irq;
irqs = min(pmu_device->num_resources, num_possible_cpus());
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig
index 473a665..e6255f2 100644
--- a/arch/arm/mach-msm/Kconfig
+++ b/arch/arm/mach-msm/Kconfig
@@ -1948,6 +1948,16 @@
be used on systems which contain an RPM which communicates with the
application processor over SMD.
+config MSM_SMCMOD
+ tristate "Secure Monitor Call (SMC) Module"
+ default n
+ depends on (ARCH_FSM9XXX && ION && ION_MSM && MSM_SCM)
+ help
+ Enable support for smcmod driver. This driver provides a mechanism
+ to execute the Secure Monitor Call (SMC) to switch from non-secure
+ to secure execution in the fsm9xxx targets. This module utilizes Ion
+ for buffer management.
+
config MSM_SUBSYSTEM_RESTART
bool "MSM Subsystem Restart"
help
@@ -2102,7 +2112,7 @@
config MSM_BUSPM_DEV
tristate "MSM Bus Performance Monitor Kernel Module"
- depends on (ARCH_MSM8X60 || ARCH_MSM8960)
+ depends on (ARCH_MSM8X60 || ARCH_MSM8960 || ARCH_MSM8974)
default m
help
This kernel module is used to mmap() hardware registers for the
diff --git a/arch/arm/mach-msm/Makefile b/arch/arm/mach-msm/Makefile
index 7555e51..af77726 100644
--- a/arch/arm/mach-msm/Makefile
+++ b/arch/arm/mach-msm/Makefile
@@ -28,7 +28,7 @@
ifdef CONFIG_ARCH_MSM_KRAIT
obj-$(CONFIG_DEBUG_FS) += acpuclock-krait-debug.o
endif
-obj-$(CONFIG_ARCH_MSM7X27) += acpuclock-7627.o clock-pll.o
+obj-$(CONFIG_ARCH_MSM7X27) += acpuclock-7627.o acpuclock-8625q.o clock-pll.o
obj-$(CONFIG_ARCH_MSM_SCORPION) += pmu.o
obj-$(CONFIG_ARCH_MSM_SCORPIONMP) += perf_event_msm_l2.o
obj-$(CONFIG_ARCH_MSM_KRAIT) += msm-krait-l2-accessors.o pmu.o perf_event_msm_krait_l2.o
@@ -410,3 +410,5 @@
obj-$(CONFIG_MSM_FIQ) += msm7k_fiq_handler.o
obj-$(CONFIG_MEMORY_HOLE_CARVEOUT) += msm_mem_hole.o
+
+obj-$(CONFIG_MSM_SMCMOD) += smcmod.o
diff --git a/arch/arm/mach-msm/acpuclock-8625q.c b/arch/arm/mach-msm/acpuclock-8625q.c
new file mode 100644
index 0000000..00022ff
--- /dev/null
+++ b/arch/arm/mach-msm/acpuclock-8625q.c
@@ -0,0 +1,762 @@
+/*
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2007-2012, Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/cpufreq.h>
+#include <linux/mutex.h>
+#include <linux/io.h>
+#include <linux/sort.h>
+#include <linux/regulator/consumer.h>
+#include <linux/smp.h>
+
+#include <mach/board.h>
+#include <mach/msm_iomap.h>
+#include <mach/clk-provider.h>
+
+#include <asm/cpu.h>
+
+#include "acpuclock.h"
+#include "acpuclock-8625q.h"
+
+#define A11S_CLK_CNTL_ADDR (MSM_CSR_BASE + 0x100)
+#define A11S_CLK_SEL_ADDR (MSM_CSR_BASE + 0x104)
+
+#define PLL4_L_VAL_ADDR (MSM_CLK_CTL_BASE + 0x378)
+#define PLL4_M_VAL_ADDR (MSM_CLK_CTL_BASE + 0x37C)
+#define PLL4_N_VAL_ADDR (MSM_CLK_CTL_BASE + 0x380)
+
+#define POWER_COLLAPSE_KHZ 19200
+
+/* Max CPU frequency allowed by hardware while in standby waiting for an irq. */
+#define MAX_WAIT_FOR_IRQ_KHZ 128000
+
+/**
+ * enum - For acpuclock PLL IDs
+ */
+enum {
+ ACPU_PLL_0 = 0,
+ ACPU_PLL_1,
+ ACPU_PLL_2,
+ ACPU_PLL_3,
+ ACPU_PLL_4,
+ ACPU_PLL_TCXO,
+ ACPU_PLL_END,
+};
+
+struct acpu_clk_src {
+ struct clk *clk;
+ const char *name;
+};
+
+struct pll_config {
+ unsigned int l;
+ unsigned int m;
+ unsigned int n;
+};
+
+static struct acpu_clk_src pll_clk[ACPU_PLL_END] = {
+ [ACPU_PLL_0] = { .name = "pll0_clk" },
+ [ACPU_PLL_1] = { .name = "pll1_clk" },
+ [ACPU_PLL_2] = { .name = "pll2_clk" },
+ [ACPU_PLL_4] = { .name = "pll4_clk" },
+};
+
+static struct pll_config pll4_cfg_tbl[] = {
+ [0] = { 36, 1, 2 }, /* 700.8 MHz */
+ [1] = { 52, 1, 2 }, /* 1008 MHz */
+ [2] = { 63, 0, 1 }, /* 1209.6 MHz */
+ [3] = { 73, 0, 1 }, /* 1401.6 MHz */
+};
+
+struct clock_state {
+ struct clkctl_acpu_speed *current_speed;
+ struct mutex lock;
+ uint32_t max_speed_delta_khz;
+ struct clk *ebi1_clk;
+ struct regulator *vreg_cpu;
+};
+
+struct clkctl_acpu_speed {
+ unsigned int use_for_scaling;
+ unsigned int a11clk_khz;
+ int pll;
+ unsigned int a11clk_src_sel;
+ unsigned int a11clk_src_div;
+ unsigned int ahbclk_khz;
+ unsigned int ahbclk_div;
+ int vdd;
+ unsigned int axiclk_khz;
+ struct pll_config *pll_rate;
+ unsigned long lpj;
+};
+
+static struct clock_state drv_state = { 0 };
+
+/* PVS MAX Voltage in uV as per frequencies*/
+
+# define MAX_14GHZ_VOLTAGE 1350000
+# define MAX_12GHZ_VOLTAGE 1275000
+# define MAX_1GHZ_VOLTAGE 1175000
+# define MAX_NOMINAL_VOLTAGE 1150000
+
+/* PVS deltas as per formula*/
+# define DELTA_LEVEL_1_UV 0
+# define DELTA_LEVEL_2_UV 75000
+# define DELTA_LEVEL_3_UV 150000
+
+
+static struct clkctl_acpu_speed acpu_freq_tbl_cmn[] = {
+ { 0, 19200, ACPU_PLL_TCXO, 0, 0, 2400, 3, 0, 30720 },
+ { 1, 245760, ACPU_PLL_1, 1, 0, 30720, 3, MAX_NOMINAL_VOLTAGE, 61440 },
+ { 1, 320000, ACPU_PLL_0, 4, 2, 40000, 3, MAX_NOMINAL_VOLTAGE, 122880 },
+ { 1, 480000, ACPU_PLL_0, 4, 1, 60000, 3, MAX_NOMINAL_VOLTAGE, 122880 },
+ { 0, 600000, ACPU_PLL_2, 2, 1, 75000, 3, 0, 160000 },
+ { 1, 700800, ACPU_PLL_4, 6, 0, 87500, 3, MAX_NOMINAL_VOLTAGE, 160000,
+ &pll4_cfg_tbl[0]},
+ { 1, 1008000, ACPU_PLL_4, 6, 0, 126000, 3, MAX_1GHZ_VOLTAGE, 200000,
+ &pll4_cfg_tbl[1]},
+};
+
+static struct clkctl_acpu_speed acpu_freq_tbl_1209[] = {
+ { 1, 1209600, ACPU_PLL_4, 6, 0, 151200, 3, MAX_12GHZ_VOLTAGE, 200000,
+ &pll4_cfg_tbl[2]},
+};
+
+static struct clkctl_acpu_speed acpu_freq_tbl_1401[] = {
+ { 1, 1401600, ACPU_PLL_4, 6, 0, 175000, 3, MAX_14GHZ_VOLTAGE, 200000,
+ &pll4_cfg_tbl[3]},
+};
+
+/* Entry corresponding to CDMA build*/
+static struct clkctl_acpu_speed acpu_freq_tbl_196608[] = {
+ { 1, 196608, ACPU_PLL_1, 1, 0, 65536, 2, MAX_NOMINAL_VOLTAGE, 98304 },
+};
+
+static struct clkctl_acpu_speed acpu_freq_tbl_null[] = {
+ { 0 },
+};
+
+static struct clkctl_acpu_speed acpu_freq_tbl[ARRAY_SIZE(acpu_freq_tbl_cmn)
+ + ARRAY_SIZE(acpu_freq_tbl_1209)
+ + ARRAY_SIZE(acpu_freq_tbl_1401)
+ + ARRAY_SIZE(acpu_freq_tbl_null)];
+
+/* Switch to this when reprogramming PLL4 */
+static struct clkctl_acpu_speed *backup_s;
+
+#ifdef CONFIG_CPU_FREQ_MSM
+static struct cpufreq_frequency_table freq_table[NR_CPUS][20];
+
+static void __devinit cpufreq_table_init(void)
+{
+ int cpu;
+ for_each_possible_cpu(cpu) {
+ unsigned int i, freq_cnt = 0;
+
+ /* Construct the freq_table table from acpu_freq_tbl since
+ * the freq_table values need to match frequencies specified
+ * in acpu_freq_tbl and acpu_freq_tbl needs to be fixed up
+ * during init.
+ */
+ for (i = 0; acpu_freq_tbl[i].a11clk_khz != 0
+ && freq_cnt < ARRAY_SIZE(*freq_table)-1; i++) {
+ if (acpu_freq_tbl[i].use_for_scaling) {
+ freq_table[cpu][freq_cnt].index = freq_cnt;
+ freq_table[cpu][freq_cnt].frequency
+ = acpu_freq_tbl[i].a11clk_khz;
+ freq_cnt++;
+ }
+ }
+
+ /* freq_table not big enough to store all usable freqs. */
+ BUG_ON(acpu_freq_tbl[i].a11clk_khz != 0);
+
+ freq_table[cpu][freq_cnt].index = freq_cnt;
+ freq_table[cpu][freq_cnt].frequency = CPUFREQ_TABLE_END;
+ /* Register table with CPUFreq. */
+ cpufreq_frequency_table_get_attr(freq_table[cpu], cpu);
+ pr_info("CPU%d: %d scaling frequencies supported.\n",
+ cpu, freq_cnt);
+ }
+}
+#else
+static void __devinit cpufreq_table_init(void) { }
+#endif
+
+static void update_jiffies(int cpu, unsigned long loops)
+{
+#ifdef CONFIG_SMP
+ for_each_possible_cpu(cpu) {
+ per_cpu(cpu_data, cpu).loops_per_jiffy =
+ loops;
+ }
+#endif
+ /* Adjust the global one */
+ loops_per_jiffy = loops;
+}
+
+/* Assumes PLL4 is off and the acpuclock isn't sourced from PLL4 */
+static void acpuclk_config_pll4(struct pll_config *pll)
+{
+ /*
+ * Make sure write to disable PLL_4 has completed
+ * before reconfiguring that PLL.
+ */
+ mb();
+ writel_relaxed(pll->l, PLL4_L_VAL_ADDR);
+ writel_relaxed(pll->m, PLL4_M_VAL_ADDR);
+ writel_relaxed(pll->n, PLL4_N_VAL_ADDR);
+ /* Make sure PLL is programmed before returning. */
+ mb();
+}
+
+/* Set proper dividers for the given clock speed. */
+static void acpuclk_set_div(const struct clkctl_acpu_speed *hunt_s)
+{
+ uint32_t reg_clkctl, reg_clksel, clk_div, src_sel;
+
+ reg_clksel = readl_relaxed(A11S_CLK_SEL_ADDR);
+
+ /* AHB_CLK_DIV */
+ clk_div = (reg_clksel >> 1) & 0x03;
+ /* CLK_SEL_SRC1NO */
+ src_sel = reg_clksel & 1;
+
+ /*
+ * If the new clock divider is higher than the previous, then
+ * program the divider before switching the clock
+ */
+ if (hunt_s->ahbclk_div > clk_div) {
+ reg_clksel &= ~(0x3 << 1);
+ reg_clksel |= (hunt_s->ahbclk_div << 1);
+ writel_relaxed(reg_clksel, A11S_CLK_SEL_ADDR);
+ }
+
+ /* Program clock source and divider */
+ reg_clkctl = readl_relaxed(A11S_CLK_CNTL_ADDR);
+ reg_clkctl &= ~(0xFF << (8 * src_sel));
+ reg_clkctl |= hunt_s->a11clk_src_sel << (4 + 8 * src_sel);
+ reg_clkctl |= hunt_s->a11clk_src_div << (0 + 8 * src_sel);
+ writel_relaxed(reg_clkctl, A11S_CLK_CNTL_ADDR);
+
+ /* Program clock source selection */
+ reg_clksel ^= 1;
+ writel_relaxed(reg_clksel, A11S_CLK_SEL_ADDR);
+
+ /* Wait for the clock switch to complete */
+ mb();
+ udelay(50);
+
+ /*
+ * If the new clock divider is lower than the previous, then
+ * program the divider after switching the clock
+ */
+ if (hunt_s->ahbclk_div < clk_div) {
+ reg_clksel &= ~(0x3 << 1);
+ reg_clksel |= (hunt_s->ahbclk_div << 1);
+ writel_relaxed(reg_clksel, A11S_CLK_SEL_ADDR);
+ }
+}
+
+static int acpuclk_set_vdd_level(int vdd)
+{
+ int rc;
+
+ rc = regulator_set_voltage(drv_state.vreg_cpu, vdd, vdd);
+ if (rc) {
+ pr_err("failed to set vdd=%d uV\n", vdd);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int acpuclk_8625q_set_rate(int cpu, unsigned long rate,
+ enum setrate_reason reason)
+{
+ uint32_t reg_clkctl;
+ struct clkctl_acpu_speed *cur_s, *tgt_s, *strt_s;
+ int res, rc = 0;
+ unsigned int plls_enabled = 0, pll;
+ int delta;
+
+
+ if (reason == SETRATE_CPUFREQ)
+ mutex_lock(&drv_state.lock);
+
+ strt_s = cur_s = drv_state.current_speed;
+
+ WARN_ONCE(cur_s == NULL, "%s: not initialized\n", __func__);
+ if (cur_s == NULL) {
+ rc = -ENOENT;
+ goto out;
+ }
+
+ cur_s->vdd = regulator_get_voltage(drv_state.vreg_cpu);
+ if (cur_s->vdd <= 0)
+ goto out;
+
+ pr_debug("current freq=%dKhz vdd=%duV\n",
+ cur_s->a11clk_khz, cur_s->vdd);
+
+ if (rate == cur_s->a11clk_khz)
+ goto out;
+
+ for (tgt_s = acpu_freq_tbl; tgt_s->a11clk_khz != 0; tgt_s++) {
+ if (tgt_s->a11clk_khz == rate)
+ break;
+ }
+
+ if (tgt_s->a11clk_khz == 0) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* Choose the highest speed at or below 'rate' with same PLL. */
+ if (reason != SETRATE_CPUFREQ
+ && tgt_s->a11clk_khz < cur_s->a11clk_khz) {
+ while (tgt_s->pll != ACPU_PLL_TCXO &&
+ tgt_s->pll != cur_s->pll) {
+ pr_debug("Intermediate frequency changes: %u\n",
+ tgt_s->a11clk_khz);
+ tgt_s--;
+ }
+ }
+
+ if (strt_s->pll != ACPU_PLL_TCXO)
+ plls_enabled |= 1 << strt_s->pll;
+
+ /* Need to do this when coming out of power collapse since some modem
+ * firmwares reset the VDD when the application processor enters power
+ * collapse.
+ */
+ if (reason == SETRATE_CPUFREQ || reason == SETRATE_PC) {
+ /* Increase VDD if needed. */
+ if (tgt_s->vdd > cur_s->vdd) {
+ rc = acpuclk_set_vdd_level(tgt_s->vdd);
+ if (rc < 0) {
+ pr_err("Unable to switch ACPU vdd (%d)\n", rc);
+ goto out;
+ }
+ pr_debug("Increased Vdd to %duV\n", tgt_s->vdd);
+ }
+ }
+
+ /* Set wait states for CPU inbetween frequency changes */
+ reg_clkctl = readl_relaxed(A11S_CLK_CNTL_ADDR);
+ reg_clkctl |= (100 << 16); /* set WT_ST_CNT */
+ writel_relaxed(reg_clkctl, A11S_CLK_CNTL_ADDR);
+
+ pr_debug("Switching from ACPU rate %u KHz -> %u KHz\n",
+ strt_s->a11clk_khz, tgt_s->a11clk_khz);
+
+ delta = abs((int)(strt_s->a11clk_khz - tgt_s->a11clk_khz));
+
+ if (tgt_s->pll == ACPU_PLL_4) {
+ if (strt_s->pll == ACPU_PLL_4 ||
+ delta > drv_state.max_speed_delta_khz) {
+ /*
+ * Enable the backup PLL if required
+ * and switch to it.
+ */
+ clk_enable(pll_clk[backup_s->pll].clk);
+ acpuclk_set_div(backup_s);
+ update_jiffies(cpu, backup_s->lpj);
+ }
+ /* Make sure PLL4 is off before reprogramming */
+ if ((plls_enabled & (1 << tgt_s->pll))) {
+ clk_disable(pll_clk[tgt_s->pll].clk);
+ plls_enabled &= ~(1 << tgt_s->pll);
+ }
+ acpuclk_config_pll4(tgt_s->pll_rate);
+ pll_clk[tgt_s->pll].clk->rate = tgt_s->a11clk_khz*1000;
+
+ } else if (strt_s->pll == ACPU_PLL_4) {
+ if (delta > drv_state.max_speed_delta_khz) {
+ /*
+ * Enable the bcackup PLL if required
+ * and switch to it.
+ */
+ clk_enable(pll_clk[backup_s->pll].clk);
+ acpuclk_set_div(backup_s);
+ update_jiffies(cpu, backup_s->lpj);
+ }
+ }
+
+ if ((tgt_s->pll != ACPU_PLL_TCXO) &&
+ !(plls_enabled & (1 << tgt_s->pll))) {
+ rc = clk_enable(pll_clk[tgt_s->pll].clk);
+ if (rc < 0) {
+ pr_err("PLL%d enable failed (%d)\n",
+ tgt_s->pll, rc);
+ goto out;
+ }
+ plls_enabled |= 1 << tgt_s->pll;
+ }
+ acpuclk_set_div(tgt_s);
+ drv_state.current_speed = tgt_s;
+ pr_debug("The new clock speed is %u\n", tgt_s->a11clk_khz);
+ /* Re-adjust lpj for the new clock speed. */
+ update_jiffies(cpu, tgt_s->lpj);
+
+ /* Disable the backup PLL */
+ if ((delta > drv_state.max_speed_delta_khz)
+ || (strt_s->pll == ACPU_PLL_4 &&
+ tgt_s->pll == ACPU_PLL_4))
+ clk_disable(pll_clk[backup_s->pll].clk);
+
+ /* Nothing else to do for SWFI. */
+ if (reason == SETRATE_SWFI)
+ goto out;
+
+ /* Change the AXI bus frequency if we can. */
+ if (reason != SETRATE_PC &&
+ strt_s->axiclk_khz != tgt_s->axiclk_khz) {
+ res = clk_set_rate(drv_state.ebi1_clk,
+ tgt_s->axiclk_khz * 1000);
+ pr_debug("AXI bus set freq %d\n",
+ tgt_s->axiclk_khz * 1000);
+ if (res < 0)
+ pr_warning("Setting AXI min rate failed (%d)\n", res);
+ }
+
+ /* Disable PLLs we are not using anymore. */
+ if (tgt_s->pll != ACPU_PLL_TCXO)
+ plls_enabled &= ~(1 << tgt_s->pll);
+ for (pll = ACPU_PLL_0; pll < ACPU_PLL_END; pll++)
+ if (plls_enabled & (1 << pll))
+ clk_disable(pll_clk[pll].clk);
+
+ /* Nothing else to do for power collapse. */
+ if (reason == SETRATE_PC)
+ goto out;
+
+ /* Drop VDD level if we can. */
+ if (tgt_s->vdd < strt_s->vdd) {
+ res = acpuclk_set_vdd_level(tgt_s->vdd);
+ if (res < 0)
+ pr_warning("Unable to drop ACPU vdd (%d)\n", res);
+ pr_debug("Decreased Vdd to %duV\n", tgt_s->vdd);
+ }
+
+ pr_debug("ACPU speed change complete\n");
+out:
+ if (reason == SETRATE_CPUFREQ)
+ mutex_unlock(&drv_state.lock);
+
+ return rc;
+}
+
+static int __devinit acpuclk_hw_init(void)
+{
+ struct clkctl_acpu_speed *speed;
+ uint32_t div, sel, reg_clksel;
+ int res;
+
+ /*
+ * Prepare all the PLLs because we enable/disable them
+ * from atomic context and can't always ensure they're
+ * all prepared in non-atomic context. Same goes for
+ * ebi1_acpu_clk.
+ */
+ BUG_ON(clk_prepare(pll_clk[ACPU_PLL_0].clk));
+ BUG_ON(clk_prepare(pll_clk[ACPU_PLL_1].clk));
+ BUG_ON(clk_prepare(pll_clk[ACPU_PLL_2].clk));
+ BUG_ON(clk_prepare(pll_clk[ACPU_PLL_4].clk));
+ BUG_ON(clk_prepare(drv_state.ebi1_clk));
+
+ /*
+ * Determine the rate of ACPU clock
+ */
+
+ if (!(readl_relaxed(A11S_CLK_SEL_ADDR) & 0x01)) { /* CLK_SEL_SRC1N0 */
+ /* CLK_SRC0_SEL */
+ sel = (readl_relaxed(A11S_CLK_CNTL_ADDR) >> 12) & 0x7;
+ /* CLK_SRC0_DIV */
+ div = (readl_relaxed(A11S_CLK_CNTL_ADDR) >> 8) & 0x0f;
+ } else {
+ /* CLK_SRC1_SEL */
+ sel = (readl_relaxed(A11S_CLK_CNTL_ADDR) >> 4) & 0x07;
+ /* CLK_SRC1_DIV */
+ div = readl_relaxed(A11S_CLK_CNTL_ADDR) & 0x0f;
+ }
+
+ for (speed = acpu_freq_tbl; speed->a11clk_khz != 0; speed++) {
+ if (speed->a11clk_src_sel == sel
+ && (speed->a11clk_src_div == div))
+ break;
+ }
+ if (speed->a11clk_khz == 0) {
+ pr_err("Error - ACPU clock reports invalid speed\n");
+ return -EINVAL;
+ }
+
+ drv_state.current_speed = speed;
+ if (speed->pll != ACPU_PLL_TCXO) {
+ if (clk_enable(pll_clk[speed->pll].clk)) {
+ pr_warning("Failed to vote for boot PLL\n");
+ return -ENODEV;
+ }
+ }
+
+ reg_clksel = readl_relaxed(A11S_CLK_SEL_ADDR);
+ reg_clksel &= ~(0x3 << 14);
+ reg_clksel |= (0x1 << 14);
+ writel_relaxed(reg_clksel, A11S_CLK_SEL_ADDR);
+
+ res = clk_set_rate(drv_state.ebi1_clk, speed->axiclk_khz * 1000);
+ if (res < 0) {
+ pr_warning("Setting AXI min rate failed (%d)\n", res);
+ return -ENODEV;
+ }
+ res = clk_enable(drv_state.ebi1_clk);
+ if (res < 0) {
+ pr_warning("Enabling AXI clock failed (%d)\n", res);
+ return -ENODEV;
+ }
+
+ drv_state.vreg_cpu = regulator_get(NULL, "vddx_cx");
+ if (IS_ERR(drv_state.vreg_cpu)) {
+ res = PTR_ERR(drv_state.vreg_cpu);
+ pr_err("could not get regulator: %d\n", res);
+ }
+
+ pr_info("ACPU running at %d KHz\n", speed->a11clk_khz);
+ return 0;
+}
+
+static unsigned long acpuclk_8625q_get_rate(int cpu)
+{
+ WARN_ONCE(drv_state.current_speed == NULL,
+ "%s: not initialized\n", __func__);
+ if (drv_state.current_speed)
+ return drv_state.current_speed->a11clk_khz;
+ else
+ return 0;
+}
+
+#define MHZ 1000000
+
+static void __devinit select_freq_plan(unsigned int pvs_voltage)
+{
+ unsigned long pll_mhz[ACPU_PLL_END];
+ int i;
+ int size;
+ int delta[3] = {DELTA_LEVEL_1_UV, DELTA_LEVEL_2_UV, DELTA_LEVEL_3_UV};
+ struct clkctl_acpu_speed *tbl;
+
+ /* Get PLL clocks */
+ for (i = 0; i < ACPU_PLL_END; i++) {
+ if (pll_clk[i].name) {
+ pll_clk[i].clk = clk_get_sys("acpu", pll_clk[i].name);
+ if (IS_ERR(pll_clk[i].clk)) {
+ pll_mhz[i] = 0;
+ continue;
+ }
+ /* Get PLL's Rate */
+ pll_mhz[i] = clk_get_rate(pll_clk[i].clk)/MHZ;
+ }
+ }
+
+ memcpy(acpu_freq_tbl, acpu_freq_tbl_cmn, sizeof(acpu_freq_tbl_cmn));
+ size = ARRAY_SIZE(acpu_freq_tbl_cmn);
+
+ i = 0; /* needed if we have a 1Ghz part */
+ /* select if it is a 1.2Ghz part */
+ if (pll_mhz[ACPU_PLL_4] == 1209) {
+ memcpy(acpu_freq_tbl + size, acpu_freq_tbl_1209,
+ sizeof(acpu_freq_tbl_1209));
+ size += sizeof(acpu_freq_tbl_1209);
+ i = 1; /* set the delta index */
+ }
+ /* select if it is a 1.4Ghz part */
+ if (pll_mhz[ACPU_PLL_4] == 1401) {
+ memcpy(acpu_freq_tbl + size, acpu_freq_tbl_1209,
+ sizeof(acpu_freq_tbl_1209));
+ size += ARRAY_SIZE(acpu_freq_tbl_1209);
+ memcpy(acpu_freq_tbl + size, acpu_freq_tbl_1401,
+ sizeof(acpu_freq_tbl_1401));
+ size += ARRAY_SIZE(acpu_freq_tbl_1401);
+ i = 2; /* set the delta index */
+ }
+
+ memcpy(acpu_freq_tbl + size, acpu_freq_tbl_null,
+ sizeof(acpu_freq_tbl_null));
+ size += sizeof(acpu_freq_tbl_null);
+
+ /* Alter the freq value in freq_tbl if it is a CDMA build*/
+ if (pll_mhz[ACPU_PLL_1] == 196) {
+
+ for (tbl = acpu_freq_tbl; tbl->a11clk_khz; tbl++) {
+ if (tbl->a11clk_khz == 245760 &&
+ tbl->pll == ACPU_PLL_1) {
+ pr_debug("Upgrading pll1 freq to 196 Mhz\n");
+ memcpy(tbl, acpu_freq_tbl_196608,
+ sizeof(acpu_freq_tbl_196608));
+ break;
+ }
+ }
+ }
+
+ /*
+ *PVS Voltage calculation formula
+ *1.4 Ghz device
+ *1.4 Ghz: Max(PVS_voltage,1.35V)
+ *1.2 Ghz: Max(PVS_volatge - 75mV,1.275V)
+ *1.0 Ghz: Max(PVS_voltage - 150mV, 1.175V)
+ *1.2 Ghz device
+ *1.2 Ghz: Max(PVS_voltage,1.275V)
+ *1.0 Ghz: Max(PVS_volatge - 75mV,1.175V)
+ *Nominal Mode: 1.15V
+ */
+ for (tbl = acpu_freq_tbl; tbl->a11clk_khz; tbl++) {
+ if (tbl->a11clk_khz >= 1008000) {
+ /*
+ * Change voltage as per PVS formula,
+ * i is initialized above with 2 or 1
+ * depending upon whether it is a 1.4Ghz
+ * or 1.2Ghz, so, we get the proper value
+ * from delta[i] which is to be deducted
+ * from PVS voltage.
+ */
+
+ tbl->vdd = max((int)(pvs_voltage - delta[i]), tbl->vdd);
+ i--;
+ }
+ }
+
+
+ /* find the backup PLL entry from the table */
+ for (tbl = acpu_freq_tbl; tbl->a11clk_khz; tbl++) {
+ if (tbl->pll == ACPU_PLL_2 &&
+ tbl->a11clk_src_div == 1) {
+ backup_s = tbl;
+ break;
+ }
+ }
+
+ BUG_ON(!backup_s);
+
+}
+
+/*
+ * Hardware requires the CPU to be dropped to less than MAX_WAIT_FOR_IRQ_KHZ
+ * before entering a wait for irq low-power mode. Find a suitable rate.
+ */
+static unsigned long __devinit find_wait_for_irq_khz(void)
+{
+ unsigned long found_khz = 0;
+ int i;
+
+ for (i = 0; acpu_freq_tbl[i].a11clk_khz &&
+ acpu_freq_tbl[i].a11clk_khz <= MAX_WAIT_FOR_IRQ_KHZ; i++)
+ found_khz = acpu_freq_tbl[i].a11clk_khz;
+
+ return found_khz;
+}
+
+static void __devinit lpj_init(void)
+{
+ int i = 0, cpu;
+ const struct clkctl_acpu_speed *base_clk = drv_state.current_speed;
+ unsigned long loops;
+
+ for_each_possible_cpu(cpu) {
+#ifdef CONFIG_SMP
+ loops = per_cpu(cpu_data, cpu).loops_per_jiffy;
+#else
+ loops = loops_per_jiffy;
+#endif
+ for (i = 0; acpu_freq_tbl[i].a11clk_khz; i++) {
+ acpu_freq_tbl[i].lpj = cpufreq_scale(
+ loops,
+ base_clk->a11clk_khz,
+ acpu_freq_tbl[i].a11clk_khz);
+ }
+
+ }
+
+}
+
+static struct acpuclk_data acpuclk_8625q_data = {
+ .set_rate = acpuclk_8625q_set_rate,
+ .get_rate = acpuclk_8625q_get_rate,
+ .power_collapse_khz = POWER_COLLAPSE_KHZ,
+ .switch_time_us = 50,
+};
+
+static void __devinit print_acpu_freq_tbl(void)
+{
+ struct clkctl_acpu_speed *t;
+ int i;
+
+ pr_info("Id CPU-KHz PLL DIV AHB-KHz ADIV AXI-KHz Vdd\n");
+
+ t = &acpu_freq_tbl[0];
+ for (i = 0; t->a11clk_khz != 0; i++) {
+ pr_info("%2d %7d %3d %3d %7d %4d %7d %3d\n",
+ i, t->a11clk_khz, t->pll, t->a11clk_src_div + 1,
+ t->ahbclk_khz, t->ahbclk_div + 1, t->axiclk_khz,
+ t->vdd);
+ t++;
+ }
+}
+
+static int __devinit acpuclk_8625q_probe(struct platform_device *pdev)
+{
+ const struct acpuclk_pdata_8625q *pdata = pdev->dev.platform_data;
+ unsigned int pvs_voltage = pdata->pvs_voltage_uv;
+
+ drv_state.max_speed_delta_khz = pdata->acpu_clk_data->
+ max_speed_delta_khz;
+
+ drv_state.ebi1_clk = clk_get(NULL, "ebi1_acpu_clk");
+ BUG_ON(IS_ERR(drv_state.ebi1_clk));
+
+ mutex_init(&drv_state.lock);
+ select_freq_plan(pvs_voltage);
+ acpuclk_8625q_data.wait_for_irq_khz = find_wait_for_irq_khz();
+
+ if (acpuclk_hw_init() < 0)
+ pr_err("acpuclk_hw_init not successful.\n");
+
+ print_acpu_freq_tbl();
+ lpj_init();
+ acpuclk_register(&acpuclk_8625q_data);
+
+ cpufreq_table_init();
+
+ return 0;
+}
+
+static struct platform_driver acpuclk_8625q_driver = {
+ .probe = acpuclk_8625q_probe,
+ .driver = {
+ .name = "acpuclock-8625q",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init acpuclk_8625q_init(void)
+{
+
+ return platform_driver_register(&acpuclk_8625q_driver);
+}
+postcore_initcall(acpuclk_8625q_init);
diff --git a/arch/arm/mach-msm/acpuclock-8625q.h b/arch/arm/mach-msm/acpuclock-8625q.h
new file mode 100644
index 0000000..ca2058f
--- /dev/null
+++ b/arch/arm/mach-msm/acpuclock-8625q.h
@@ -0,0 +1,28 @@
+/*
+ * MSM architecture CPU clock driver header
+ *
+ * Copyright (c) 2012, Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __ARCH_ARM_MACH_MSM_ACPUCLOCK_8625Q_H
+#define __ARCH_ARM_MACH_MSM_ACPUCLOCK_8625Q_H
+
+# include "acpuclock.h"
+/**
+ * struct acpuclk_pdata_8625q - Platform data for acpuclk
+ */
+struct acpuclk_pdata_8625q {
+ struct acpuclk_pdata *acpu_clk_data;
+ unsigned int pvs_voltage_uv;
+};
+
+#endif /* __ARCH_ARM_MACH_MSM_ACPUCLOCK_8625Q_H */
diff --git a/arch/arm/mach-msm/acpuclock-8974.c b/arch/arm/mach-msm/acpuclock-8974.c
index 0fbd6dc..b98fcdd 100644
--- a/arch/arm/mach-msm/acpuclock-8974.c
+++ b/arch/arm/mach-msm/acpuclock-8974.c
@@ -56,8 +56,7 @@
.vreg[VREG_CORE] = { "krait0", 1050000 },
.vreg[VREG_MEM] = { "krait0_mem", 1050000 },
.vreg[VREG_DIG] = { "krait0_dig", LVL_HIGH },
- .vreg[VREG_HFPLL_A] = { "krait0_hfpll_a", 2150000 },
- .vreg[VREG_HFPLL_B] = { "krait0_hfpll_b", 1800000 },
+ .vreg[VREG_HFPLL_A] = { "krait0_hfpll", 1800000 },
},
[CPU1] = {
.hfpll_phys_base = 0xF909A000,
@@ -66,8 +65,7 @@
.vreg[VREG_CORE] = { "krait1", 1050000 },
.vreg[VREG_MEM] = { "krait1_mem", 1050000 },
.vreg[VREG_DIG] = { "krait1_dig", LVL_HIGH },
- .vreg[VREG_HFPLL_A] = { "krait1_hfpll_a", 2150000 },
- .vreg[VREG_HFPLL_B] = { "krait1_hfpll_b", 1800000 },
+ .vreg[VREG_HFPLL_A] = { "krait1_hfpll", 1800000 },
},
[CPU2] = {
.hfpll_phys_base = 0xF90AA000,
@@ -76,8 +74,7 @@
.vreg[VREG_CORE] = { "krait2", 1050000 },
.vreg[VREG_MEM] = { "krait2_mem", 1050000 },
.vreg[VREG_DIG] = { "krait2_dig", LVL_HIGH },
- .vreg[VREG_HFPLL_A] = { "krait2_hfpll_a", 2150000 },
- .vreg[VREG_HFPLL_B] = { "krait2_hfpll_b", 1800000 },
+ .vreg[VREG_HFPLL_A] = { "krait2_hfpll", 1800000 },
},
[CPU3] = {
.hfpll_phys_base = 0xF90BA000,
@@ -86,15 +83,13 @@
.vreg[VREG_CORE] = { "krait3", 1050000 },
.vreg[VREG_MEM] = { "krait3_mem", 1050000 },
.vreg[VREG_DIG] = { "krait3_dig", LVL_HIGH },
- .vreg[VREG_HFPLL_A] = { "krait3_hfpll_a", 2150000 },
- .vreg[VREG_HFPLL_B] = { "krait3_hfpll_b", 1800000 },
+ .vreg[VREG_HFPLL_A] = { "krait3_hfpll", 1800000 },
},
[L2] = {
.hfpll_phys_base = 0xF9016000,
.l2cpmr_iaddr = 0x0500,
.sec_clk_sel = 2,
- .vreg[VREG_HFPLL_A] = { "l2_hfpll_a", 2150000 },
- .vreg[VREG_HFPLL_B] = { "l2_hfpll_b", 1800000 },
+ .vreg[VREG_HFPLL_A] = { "l2_hfpll", 1800000 },
},
};
diff --git a/arch/arm/mach-msm/board-8064.c b/arch/arm/mach-msm/board-8064.c
index e5212a4..f70e41a 100644
--- a/arch/arm/mach-msm/board-8064.c
+++ b/arch/arm/mach-msm/board-8064.c
@@ -25,6 +25,7 @@
#include <linux/mfd/pm8xxx/misc.h>
#include <linux/msm_ssbi.h>
#include <linux/spi/spi.h>
+#include <linux/dma-contiguous.h>
#include <linux/dma-mapping.h>
#include <linux/platform_data/qcom_crypto_device.h>
#include <linux/msm_ion.h>
@@ -296,6 +297,7 @@
.reusable = FMEM_ENABLED,
.mem_is_fmem = FMEM_ENABLED,
.fixed_position = FIXED_MIDDLE,
+ .is_cma = 1,
};
static struct ion_cp_heap_pdata cp_mfc_apq8064_ion_pdata = {
@@ -320,6 +322,17 @@
};
#endif
+static u64 msm_dmamask = DMA_BIT_MASK(32);
+
+static struct platform_device ion_mm_heap_device = {
+ .name = "ion-mm-heap-device",
+ .id = -1,
+ .dev = {
+ .dma_mask = &msm_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ }
+};
+
/**
* These heaps are listed in the order they will be allocated. Due to
* video hardware restrictions and content protection the FW heap has to
@@ -345,6 +358,7 @@
.size = MSM_ION_MM_SIZE,
.memory_type = ION_EBI_TYPE,
.extra_data = (void *) &cp_mm_apq8064_ion_pdata,
+ .priv = &ion_mm_heap_device.dev
},
{
.id = ION_MM_FIRMWARE_HEAP_ID,
@@ -454,26 +468,45 @@
{
#if defined(CONFIG_ION_MSM) && defined(CONFIG_MSM_MULTIMEDIA_USE_ION)
unsigned int i;
+ unsigned int ret;
unsigned int fixed_size = 0;
unsigned int fixed_low_size, fixed_middle_size, fixed_high_size;
unsigned long fixed_low_start, fixed_middle_start, fixed_high_start;
+ unsigned long cma_alignment;
+ unsigned int low_use_cma = 0;
+ unsigned int middle_use_cma = 0;
+ unsigned int high_use_cma = 0;
+
fixed_low_size = 0;
fixed_middle_size = 0;
fixed_high_size = 0;
+ cma_alignment = PAGE_SIZE << max(MAX_ORDER, pageblock_order);
+
for (i = 0; i < apq8064_ion_pdata.nr; ++i) {
- const struct ion_platform_heap *heap =
+ struct ion_platform_heap *heap =
&(apq8064_ion_pdata.heaps[i]);
+ int use_cma = 0;
+
if (heap->extra_data) {
int fixed_position = NOT_FIXED;
switch ((int)heap->type) {
case ION_HEAP_TYPE_CP:
+ if (((struct ion_cp_heap_pdata *)
+ heap->extra_data)->is_cma) {
+ heap->size = ALIGN(heap->size,
+ cma_alignment);
+ use_cma = 1;
+ }
fixed_position = ((struct ion_cp_heap_pdata *)
heap->extra_data)->fixed_position;
break;
+ case ION_HEAP_TYPE_DMA:
+ use_cma = 1;
+ /* Purposely fall through here */
case ION_HEAP_TYPE_CARVEOUT:
fixed_position = ((struct ion_co_heap_pdata *)
heap->extra_data)->fixed_position;
@@ -487,28 +520,70 @@
else
reserve_mem_for_ion(MEMTYPE_EBI1, heap->size);
- if (fixed_position == FIXED_LOW)
+ if (fixed_position == FIXED_LOW) {
fixed_low_size += heap->size;
- else if (fixed_position == FIXED_MIDDLE)
+ low_use_cma = use_cma;
+ } else if (fixed_position == FIXED_MIDDLE) {
fixed_middle_size += heap->size;
- else if (fixed_position == FIXED_HIGH)
+ middle_use_cma = use_cma;
+ } else if (fixed_position == FIXED_HIGH) {
fixed_high_size += heap->size;
+ high_use_cma = use_cma;
+ } else if (use_cma) {
+ /*
+ * Heaps that use CMA but are not part of the
+ * fixed set. Create wherever.
+ */
+ dma_declare_contiguous(
+ heap->priv,
+ heap->size,
+ 0,
+ 0xb0000000);
+
+ }
}
}
if (!fixed_size)
return;
- /* Since the fixed area may be carved out of lowmem,
- * make sure the length is a multiple of 1M.
+ /*
+ * Given the setup for the fixed area, we can't round up all sizes.
+ * Some sizes must be set up exactly and aligned correctly. Incorrect
+ * alignments are considered a configuration issue
*/
- fixed_size = (fixed_size + HOLE_SIZE + SECTION_SIZE - 1)
- & SECTION_MASK;
- apq8064_reserve_fixed_area(fixed_size);
fixed_low_start = APQ8064_FIXED_AREA_START;
+ if (low_use_cma) {
+ BUG_ON(!IS_ALIGNED(fixed_low_size + HOLE_SIZE, cma_alignment));
+ BUG_ON(!IS_ALIGNED(fixed_low_start, cma_alignment));
+ } else {
+ BUG_ON(!IS_ALIGNED(fixed_low_size + HOLE_SIZE, SECTION_SIZE));
+ ret = memblock_remove(fixed_low_start,
+ fixed_low_size + HOLE_SIZE);
+ BUG_ON(ret);
+ }
+
fixed_middle_start = fixed_low_start + fixed_low_size + HOLE_SIZE;
+ if (middle_use_cma) {
+ BUG_ON(!IS_ALIGNED(fixed_middle_start, cma_alignment));
+ BUG_ON(!IS_ALIGNED(fixed_middle_size, cma_alignment));
+ } else {
+ BUG_ON(!IS_ALIGNED(fixed_middle_size, SECTION_SIZE));
+ ret = memblock_remove(fixed_middle_start, fixed_middle_size);
+ BUG_ON(ret);
+ }
+
fixed_high_start = fixed_middle_start + fixed_middle_size;
+ if (high_use_cma) {
+ fixed_high_size = ALIGN(fixed_high_size, cma_alignment);
+ BUG_ON(!IS_ALIGNED(fixed_high_start, cma_alignment));
+ } else {
+ /* This is the end of the fixed area so it's okay to round up */
+ fixed_high_size = ALIGN(fixed_high_size, SECTION_SIZE);
+ ret = memblock_remove(fixed_high_start, fixed_high_size);
+ BUG_ON(ret);
+ }
for (i = 0; i < apq8064_ion_pdata.nr; ++i) {
struct ion_platform_heap *heap = &(apq8064_ion_pdata.heaps[i]);
@@ -524,6 +599,7 @@
fixed_position = pdata->fixed_position;
break;
case ION_HEAP_TYPE_CARVEOUT:
+ case ION_HEAP_TYPE_DMA:
fixed_position = ((struct ion_co_heap_pdata *)
heap->extra_data)->fixed_position;
break;
@@ -537,6 +613,14 @@
break;
case FIXED_MIDDLE:
heap->base = fixed_middle_start;
+ if (middle_use_cma) {
+ ret = dma_declare_contiguous(
+ heap->priv,
+ heap->size,
+ fixed_middle_start,
+ 0xa0000000);
+ WARN_ON(ret);
+ }
pdata->secure_base = fixed_middle_start
- HOLE_SIZE;
pdata->secure_size = HOLE_SIZE + heap->size;
@@ -3162,6 +3246,11 @@
},
};
+static struct platform_device msm_dev_avtimer_device = {
+ .name = "dev_avtimer",
+ .dev = { .platform_data = &dev_avtimer_pdata },
+};
+
/* Sensors DSPS platform data */
#define DSPS_PIL_GENERIC_NAME "dsps"
static void __init apq8064_init_dsps(void)
@@ -3601,6 +3690,9 @@
platform_device_register(&mpq_keypad_device);
} else if (machine_is_mpq8064_hrd())
platform_device_register(&mpq_hrd_keys_pdev);
+ if (machine_is_mpq8064_cdp() || machine_is_mpq8064_hrd() ||
+ machine_is_mpq8064_dtv())
+ platform_device_register(&msm_dev_avtimer_device);
}
MACHINE_START(APQ8064_CDP, "QCT APQ8064 CDP")
diff --git a/arch/arm/mach-msm/board-8226.c b/arch/arm/mach-msm/board-8226.c
index 3e90489..fde82f4 100644
--- a/arch/arm/mach-msm/board-8226.c
+++ b/arch/arm/mach-msm/board-8226.c
@@ -64,6 +64,8 @@
CLK_DUMMY("iface_clk", BLSP1_UART_CLK, "f991f000.serial", OFF),
CLK_DUMMY("iface_clk", HSUSB_IFACE_CLK, "f9a55000.usb", OFF),
CLK_DUMMY("core_clk", HSUSB_CORE_CLK, "f9a55000.usb", OFF),
+ CLK_DUMMY("dfab_clk", DFAB_CLK, "msm_sps", OFF),
+ CLK_DUMMY("dma_bam_pclk", DMA_BAM_P_CLK, "msm_sps", OFF),
CLK_DUMMY("iface_clk", NULL, "msm_sdcc.1", OFF),
CLK_DUMMY("core_clk", NULL, "msm_sdcc.1", OFF),
CLK_DUMMY("bus_clk", NULL, "msm_sdcc.1", OFF),
diff --git a/arch/arm/mach-msm/board-8930-camera.c b/arch/arm/mach-msm/board-8930-camera.c
index be55031..e35b3c1 100644
--- a/arch/arm/mach-msm/board-8930-camera.c
+++ b/arch/arm/mach-msm/board-8930-camera.c
@@ -264,7 +264,7 @@
{
.src = MSM_BUS_MASTER_VFE,
.dst = MSM_BUS_SLAVE_EBI_CH0,
- .ab = 274406400,
+ .ab = 600000000,
.ib = 2656000000UL,
},
{
diff --git a/arch/arm/mach-msm/board-8930.c b/arch/arm/mach-msm/board-8930.c
index 790afe1..13b16f2 100644
--- a/arch/arm/mach-msm/board-8930.c
+++ b/arch/arm/mach-msm/board-8930.c
@@ -29,6 +29,7 @@
#ifdef CONFIG_ANDROID_PMEM
#include <linux/android_pmem.h>
#endif
+#include <linux/dma-contiguous.h>
#include <linux/dma-mapping.h>
#include <linux/platform_data/qcom_crypto_device.h>
#include <linux/platform_data/qcom_wcnss_device.h>
@@ -341,6 +342,7 @@
.reusable = FMEM_ENABLED,
.mem_is_fmem = FMEM_ENABLED,
.fixed_position = FIXED_MIDDLE,
+ .is_cma = 1,
};
static struct ion_cp_heap_pdata cp_mfc_msm8930_ion_pdata = {
@@ -365,6 +367,18 @@
};
#endif
+
+static u64 msm_dmamask = DMA_BIT_MASK(32);
+
+static struct platform_device ion_mm_heap_device = {
+ .name = "ion-mm-heap-device",
+ .id = -1,
+ .dev = {
+ .dma_mask = &msm_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ }
+};
+
/**
* These heaps are listed in the order they will be allocated. Due to
* video hardware restrictions and content protection the FW heap has to
@@ -390,6 +404,7 @@
.size = MSM_ION_MM_SIZE,
.memory_type = ION_EBI_TYPE,
.extra_data = (void *) &cp_mm_msm8930_ion_pdata,
+ .priv = &ion_mm_heap_device.dev
},
{
.id = ION_MM_FIRMWARE_HEAP_ID,
@@ -500,26 +515,44 @@
{
#if defined(CONFIG_ION_MSM) && defined(CONFIG_MSM_MULTIMEDIA_USE_ION)
unsigned int i;
+ int ret;
unsigned int fixed_size = 0;
unsigned int fixed_low_size, fixed_middle_size, fixed_high_size;
unsigned long fixed_low_start, fixed_middle_start, fixed_high_start;
+ unsigned long cma_alignment;
+ unsigned int low_use_cma = 0;
+ unsigned int middle_use_cma = 0;
+ unsigned int high_use_cma = 0;
fixed_low_size = 0;
fixed_middle_size = 0;
fixed_high_size = 0;
+ cma_alignment = PAGE_SIZE << max(MAX_ORDER, pageblock_order);
+
for (i = 0; i < msm8930_ion_pdata.nr; ++i) {
- const struct ion_platform_heap *heap =
+ struct ion_platform_heap *heap =
&(msm8930_ion_pdata.heaps[i]);
+ int use_cma = 0;
+
if (heap->extra_data) {
int fixed_position = NOT_FIXED;
switch ((int) heap->type) {
case ION_HEAP_TYPE_CP:
+ if (((struct ion_cp_heap_pdata *)
+ heap->extra_data)->is_cma) {
+ heap->size = ALIGN(heap->size,
+ cma_alignment);
+ use_cma = 1;
+ }
fixed_position = ((struct ion_cp_heap_pdata *)
heap->extra_data)->fixed_position;
break;
+ case ION_HEAP_TYPE_DMA:
+ use_cma = 1;
+ /* Purposely fall through here */
case ION_HEAP_TYPE_CARVEOUT:
fixed_position = ((struct ion_co_heap_pdata *)
heap->extra_data)->fixed_position;
@@ -533,29 +566,68 @@
else
reserve_mem_for_ion(MEMTYPE_EBI1, heap->size);
- if (fixed_position == FIXED_LOW)
+ if (fixed_position == FIXED_LOW) {
fixed_low_size += heap->size;
- else if (fixed_position == FIXED_MIDDLE)
+ low_use_cma = use_cma;
+ } else if (fixed_position == FIXED_MIDDLE) {
fixed_middle_size += heap->size;
- else if (fixed_position == FIXED_HIGH)
+ middle_use_cma = use_cma;
+ } else if (fixed_position == FIXED_HIGH) {
fixed_high_size += heap->size;
-
+ high_use_cma = use_cma;
+ } else if (use_cma) {
+ /*
+ * Heaps that use CMA but are not part of the
+ * fixed set. Create wherever.
+ */
+ dma_declare_contiguous(
+ heap->priv,
+ heap->size,
+ 0,
+ 0xb0000000);
+ }
}
}
if (!fixed_size)
return;
-
- /* Since the fixed area may be carved out of lowmem,
- * make sure the length is a multiple of 1M.
+ /*
+ * Given the setup for the fixed area, we can't round up all sizes.
+ * Some sizes must be set up exactly and aligned correctly. Incorrect
+ * alignments are considered a configuration issue
*/
- fixed_size = (fixed_size + MSM_MM_FW_SIZE + SECTION_SIZE - 1)
- & SECTION_MASK;
- msm8930_reserve_fixed_area(fixed_size);
fixed_low_start = MSM8930_FIXED_AREA_START;
+ if (low_use_cma) {
+ BUG_ON(!IS_ALIGNED(fixed_low_size + HOLE_SIZE, cma_alignment));
+ BUG_ON(!IS_ALIGNED(fixed_low_start, cma_alignment));
+ } else {
+ BUG_ON(!IS_ALIGNED(fixed_low_size + HOLE_SIZE, SECTION_SIZE));
+ ret = memblock_remove(fixed_low_start,
+ fixed_low_size + HOLE_SIZE);
+ BUG_ON(ret);
+ }
+
fixed_middle_start = fixed_low_start + fixed_low_size + HOLE_SIZE;
+ if (middle_use_cma) {
+ BUG_ON(!IS_ALIGNED(fixed_middle_start, cma_alignment));
+ BUG_ON(!IS_ALIGNED(fixed_middle_size, cma_alignment));
+ } else {
+ BUG_ON(!IS_ALIGNED(fixed_middle_size, SECTION_SIZE));
+ ret = memblock_remove(fixed_middle_start, fixed_middle_size);
+ BUG_ON(ret);
+ }
+
fixed_high_start = fixed_middle_start + fixed_middle_size;
+ if (high_use_cma) {
+ fixed_high_size = ALIGN(fixed_high_size, cma_alignment);
+ BUG_ON(!IS_ALIGNED(fixed_high_start, cma_alignment));
+ } else {
+ /* This is the end of the fixed area so it's okay to round up */
+ fixed_high_size = ALIGN(fixed_high_size, SECTION_SIZE);
+ ret = memblock_remove(fixed_high_start, fixed_high_size);
+ BUG_ON(ret);
+ }
for (i = 0; i < msm8930_ion_pdata.nr; ++i) {
struct ion_platform_heap *heap = &(msm8930_ion_pdata.heaps[i]);
@@ -570,6 +642,7 @@
(struct ion_cp_heap_pdata *)heap->extra_data;
fixed_position = pdata->fixed_position;
break;
+ case ION_HEAP_TYPE_DMA:
case ION_HEAP_TYPE_CARVEOUT:
fixed_position = ((struct ion_co_heap_pdata *)
heap->extra_data)->fixed_position;
@@ -584,6 +657,12 @@
break;
case FIXED_MIDDLE:
heap->base = fixed_middle_start;
+ if (middle_use_cma)
+ dma_declare_contiguous(
+ &ion_mm_heap_device.dev,
+ heap->size,
+ fixed_middle_start,
+ 0xa0000000);
pdata->secure_base = fixed_middle_start
- HOLE_SIZE;
pdata->secure_size = HOLE_SIZE + heap->size;
diff --git a/arch/arm/mach-msm/board-8960-camera.c b/arch/arm/mach-msm/board-8960-camera.c
index 7a2e9e1..3853e4c 100644
--- a/arch/arm/mach-msm/board-8960-camera.c
+++ b/arch/arm/mach-msm/board-8960-camera.c
@@ -15,6 +15,7 @@
#include <linux/gpio.h>
#include <mach/camera.h>
#include <mach/msm_bus_board.h>
+#include <mach/socinfo.h>
#include <mach/gpiomux.h>
#include "devices.h"
#include "board-8960.h"
@@ -182,6 +183,23 @@
},
};
+static struct msm_gpiomux_config msm8960_cam_2d_configs_sglte[] = {
+ {
+ .gpio = 20,
+ .settings = {
+ [GPIOMUX_ACTIVE] = &cam_settings[3],
+ [GPIOMUX_SUSPENDED] = &cam_settings[8],
+ },
+ },
+ {
+ .gpio = 21,
+ .settings = {
+ [GPIOMUX_ACTIVE] = &cam_settings[3],
+ [GPIOMUX_SUSPENDED] = &cam_settings[8],
+ },
+ },
+};
+
#define VFE_CAMIF_TIMER1_GPIO 2
#define VFE_CAMIF_TIMER2_GPIO 3
#define VFE_CAMIF_TIMER3_GPIO_INT 4
@@ -828,6 +846,16 @@
void __init msm8960_init_cam(void)
{
+ if (socinfo_get_platform_subtype() == PLATFORM_SUBTYPE_SGLTE) {
+ msm_8960_front_cam_gpio_conf.cam_gpiomux_conf_tbl =
+ msm8960_cam_2d_configs_sglte;
+ msm_8960_front_cam_gpio_conf.cam_gpiomux_conf_tbl_size =
+ ARRAY_SIZE(msm8960_cam_2d_configs_sglte);
+ msm_8960_back_cam_gpio_conf.cam_gpiomux_conf_tbl =
+ msm8960_cam_2d_configs_sglte;
+ msm_8960_back_cam_gpio_conf.cam_gpiomux_conf_tbl_size =
+ ARRAY_SIZE(msm8960_cam_2d_configs_sglte);
+ }
msm_gpiomux_install(msm8960_cam_common_configs,
ARRAY_SIZE(msm8960_cam_common_configs));
diff --git a/arch/arm/mach-msm/board-8960.c b/arch/arm/mach-msm/board-8960.c
index adf4ac0..97639d6 100644
--- a/arch/arm/mach-msm/board-8960.c
+++ b/arch/arm/mach-msm/board-8960.c
@@ -30,6 +30,7 @@
#include <linux/android_pmem.h>
#endif
#include <linux/cyttsp-qc.h>
+#include <linux/dma-contiguous.h>
#include <linux/dma-mapping.h>
#include <linux/platform_data/qcom_crypto_device.h>
#include <linux/platform_data/qcom_wcnss_device.h>
@@ -364,6 +365,7 @@
.fixed_position = FIXED_MIDDLE,
.iommu_map_all = 1,
.iommu_2x_map_domain = VIDEO_DOMAIN,
+ .is_cma = 1,
};
static struct ion_cp_heap_pdata cp_mfc_msm8960_ion_pdata = {
@@ -388,6 +390,17 @@
};
#endif
+static u64 msm_dmamask = DMA_BIT_MASK(32);
+
+static struct platform_device ion_mm_heap_device = {
+ .name = "ion-mm-heap-device",
+ .id = -1,
+ .dev = {
+ .dma_mask = &msm_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ }
+};
+
/**
* These heaps are listed in the order they will be allocated. Due to
* video hardware restrictions and content protection the FW heap has to
@@ -413,6 +426,7 @@
.size = MSM_ION_MM_SIZE,
.memory_type = ION_EBI_TYPE,
.extra_data = (void *) &cp_mm_msm8960_ion_pdata,
+ .priv = &ion_mm_heap_device.dev,
},
{
.id = ION_MM_FIRMWARE_HEAP_ID,
@@ -549,21 +563,29 @@
{
#if defined(CONFIG_ION_MSM) && defined(CONFIG_MSM_MULTIMEDIA_USE_ION)
unsigned int i;
+ int ret;
unsigned int fixed_size = 0;
unsigned int fixed_low_size, fixed_middle_size, fixed_high_size;
unsigned long fixed_low_start, fixed_middle_start, fixed_high_start;
+ unsigned long cma_alignment;
+ unsigned int low_use_cma = 0;
+ unsigned int middle_use_cma = 0;
+ unsigned int high_use_cma = 0;
adjust_mem_for_liquid();
fixed_low_size = 0;
fixed_middle_size = 0;
fixed_high_size = 0;
+ cma_alignment = PAGE_SIZE << max(MAX_ORDER, pageblock_order);
+
for (i = 0; i < msm8960_ion_pdata.nr; ++i) {
struct ion_platform_heap *heap =
&(msm8960_ion_pdata.heaps[i]);
int align = SZ_4K;
int iommu_map_all = 0;
int adjacent_mem_id = INVALID_HEAP_ID;
+ int use_cma = 0;
if (heap->extra_data) {
int fixed_position = NOT_FIXED;
@@ -577,7 +599,16 @@
iommu_map_all =
((struct ion_cp_heap_pdata *)
heap->extra_data)->iommu_map_all;
+ if (((struct ion_cp_heap_pdata *)
+ heap->extra_data)->is_cma) {
+ heap->size = ALIGN(heap->size,
+ cma_alignment);
+ use_cma = 1;
+ }
break;
+ case ION_HEAP_TYPE_DMA:
+ use_cma = 1;
+ /* Purposely fall through here */
case ION_HEAP_TYPE_CARVEOUT:
fixed_position = ((struct ion_co_heap_pdata *)
heap->extra_data)->fixed_position;
@@ -601,28 +632,71 @@
else
reserve_mem_for_ion(MEMTYPE_EBI1, heap->size);
- if (fixed_position == FIXED_LOW)
+ if (fixed_position == FIXED_LOW) {
fixed_low_size += heap->size;
- else if (fixed_position == FIXED_MIDDLE)
+ low_use_cma = use_cma;
+ } else if (fixed_position == FIXED_MIDDLE) {
fixed_middle_size += heap->size;
- else if (fixed_position == FIXED_HIGH)
+ middle_use_cma = use_cma;
+ } else if (fixed_position == FIXED_HIGH) {
fixed_high_size += heap->size;
+ high_use_cma = use_cma;
+ } else if (use_cma) {
+ /*
+ * Heaps that use CMA but are not part of the
+ * fixed set. Create wherever.
+ */
+ dma_declare_contiguous(
+ heap->priv,
+ heap->size,
+ 0,
+ 0xb0000000);
+ }
}
}
if (!fixed_size)
return;
- /* Since the fixed area may be carved out of lowmem,
- * make sure the length is a multiple of 1M.
+ /*
+ * Given the setup for the fixed area, we can't round up all sizes.
+ * Some sizes must be set up exactly and aligned correctly. Incorrect
+ * alignments are considered a configuration issue
*/
- fixed_size = (fixed_size + MSM_MM_FW_SIZE + SECTION_SIZE - 1)
- & SECTION_MASK;
- msm8960_reserve_fixed_area(fixed_size);
fixed_low_start = MSM8960_FIXED_AREA_START;
+ if (low_use_cma) {
+ BUG_ON(!IS_ALIGNED(fixed_low_start, cma_alignment));
+ BUG_ON(!IS_ALIGNED(fixed_low_size + HOLE_SIZE, cma_alignment));
+ } else {
+ BUG_ON(!IS_ALIGNED(fixed_low_size + HOLE_SIZE, SECTION_SIZE));
+ ret = memblock_remove(fixed_low_start,
+ fixed_low_size + HOLE_SIZE);
+ BUG_ON(ret);
+ }
+
fixed_middle_start = fixed_low_start + fixed_low_size + HOLE_SIZE;
+ if (middle_use_cma) {
+ BUG_ON(!IS_ALIGNED(fixed_middle_start, cma_alignment));
+ BUG_ON(!IS_ALIGNED(fixed_middle_size, cma_alignment));
+ } else {
+ BUG_ON(!IS_ALIGNED(fixed_middle_size, SECTION_SIZE));
+ ret = memblock_remove(fixed_middle_start, fixed_middle_size);
+ BUG_ON(ret);
+ }
+
fixed_high_start = fixed_middle_start + fixed_middle_size;
+ if (high_use_cma) {
+ fixed_high_size = ALIGN(fixed_high_size, cma_alignment);
+ BUG_ON(!IS_ALIGNED(fixed_high_start, cma_alignment));
+ } else {
+ /* This is the end of the fixed area so it's okay to round up */
+ fixed_high_size = ALIGN(fixed_high_size, SECTION_SIZE);
+ ret = memblock_remove(fixed_high_start, fixed_high_size);
+ BUG_ON(ret);
+ }
+
+
for (i = 0; i < msm8960_ion_pdata.nr; ++i) {
struct ion_platform_heap *heap = &(msm8960_ion_pdata.heaps[i]);
@@ -638,6 +712,7 @@
fixed_position = pdata->fixed_position;
break;
case ION_HEAP_TYPE_CARVEOUT:
+ case ION_HEAP_TYPE_DMA:
fixed_position = ((struct ion_co_heap_pdata *)
heap->extra_data)->fixed_position;
break;
@@ -651,6 +726,14 @@
break;
case FIXED_MIDDLE:
heap->base = fixed_middle_start;
+ if (middle_use_cma) {
+ ret = dma_declare_contiguous(
+ &ion_mm_heap_device.dev,
+ heap->size,
+ fixed_middle_start,
+ 0xa0000000);
+ WARN_ON(ret);
+ }
pdata->secure_base = fixed_middle_start
- HOLE_SIZE;
pdata->secure_size = HOLE_SIZE + heap->size;
diff --git a/arch/arm/mach-msm/board-9625.c b/arch/arm/mach-msm/board-9625.c
index 42f3f41..f6a354f 100644
--- a/arch/arm/mach-msm/board-9625.c
+++ b/arch/arm/mach-msm/board-9625.c
@@ -125,135 +125,6 @@
msm_reserve();
}
-static struct resource smd_resource[] = {
- {
- .name = "modem_smd_in",
- .start = 32 + 25, /* mss_sw_to_kpss_ipc_irq0 */
- .flags = IORESOURCE_IRQ,
- },
- {
- .name = "modem_smsm_in",
- .start = 32 + 26, /* mss_sw_to_kpss_ipc_irq1 */
- .flags = IORESOURCE_IRQ,
- },
- {
- .name = "adsp_smd_in",
- .start = 32 + 156, /* lpass_to_kpss_ipc_irq0 */
- .flags = IORESOURCE_IRQ,
- },
- {
- .name = "adsp_smsm_in",
- .start = 32 + 157, /* lpass_to_kpss_ipc_irq1 */
- .flags = IORESOURCE_IRQ,
- },
- {
- .name = "rpm_smd_in",
- .start = 32 + 168, /* rpm_to_kpss_ipc_irq4 */
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct smd_subsystem_config smd_config_list[] = {
- {
- .irq_config_id = SMD_MODEM,
- .subsys_name = "modem",
- .edge = SMD_APPS_MODEM,
-
- .smd_int.irq_name = "modem_smd_in",
- .smd_int.flags = IRQF_TRIGGER_RISING,
- .smd_int.irq_id = -1,
- .smd_int.device_name = "smd_dev",
- .smd_int.dev_id = 0,
- .smd_int.out_bit_pos = 1 << 12,
- .smd_int.out_base = (void __iomem *)MSM_APCS_GCC_BASE,
- .smd_int.out_offset = 0x8,
-
- .smsm_int.irq_name = "modem_smsm_in",
- .smsm_int.flags = IRQF_TRIGGER_RISING,
- .smsm_int.irq_id = -1,
- .smsm_int.device_name = "smsm_dev",
- .smsm_int.dev_id = 0,
- .smsm_int.out_bit_pos = 1 << 13,
- .smsm_int.out_base = (void __iomem *)MSM_APCS_GCC_BASE,
- .smsm_int.out_offset = 0x8,
- },
- {
- .irq_config_id = SMD_Q6,
- .subsys_name = "adsp",
- .edge = SMD_APPS_QDSP,
-
- .smd_int.irq_name = "adsp_smd_in",
- .smd_int.flags = IRQF_TRIGGER_RISING,
- .smd_int.irq_id = -1,
- .smd_int.device_name = "smd_dev",
- .smd_int.dev_id = 0,
- .smd_int.out_bit_pos = 1 << 8,
- .smd_int.out_base = (void __iomem *)MSM_APCS_GCC_BASE,
- .smd_int.out_offset = 0x8,
-
- .smsm_int.irq_name = "adsp_smsm_in",
- .smsm_int.flags = IRQF_TRIGGER_RISING,
- .smsm_int.irq_id = -1,
- .smsm_int.device_name = "smsm_dev",
- .smsm_int.dev_id = 0,
- .smsm_int.out_bit_pos = 1 << 9,
- .smsm_int.out_base = (void __iomem *)MSM_APCS_GCC_BASE,
- .smsm_int.out_offset = 0x8,
- },
- {
- .irq_config_id = SMD_RPM,
- .subsys_name = NULL, /* do not use PIL to load RPM */
- .edge = SMD_APPS_RPM,
-
- .smd_int.irq_name = "rpm_smd_in",
- .smd_int.flags = IRQF_TRIGGER_RISING,
- .smd_int.irq_id = -1,
- .smd_int.device_name = "smd_dev",
- .smd_int.dev_id = 0,
- .smd_int.out_bit_pos = 1 << 0,
- .smd_int.out_base = (void __iomem *)MSM_APCS_GCC_BASE,
- .smd_int.out_offset = 0x8,
-
- .smsm_int.irq_name = NULL, /* RPM does not support SMSM */
- .smsm_int.flags = 0,
- .smsm_int.irq_id = 0,
- .smsm_int.device_name = NULL,
- .smsm_int.dev_id = 0,
- .smsm_int.out_bit_pos = 0,
- .smsm_int.out_base = NULL,
- .smsm_int.out_offset = 0,
- },
-};
-
-static struct smd_smem_regions aux_smem_areas[] = {
- {
- .phys_addr = (void *)(0xfc428000),
- .size = 0x4000,
- },
-};
-
-static struct smd_subsystem_restart_config smd_ssr_cfg = {
- .disable_smsm_reset_handshake = 1,
-};
-
-static struct smd_platform smd_platform_data = {
- .num_ss_configs = ARRAY_SIZE(smd_config_list),
- .smd_ss_configs = smd_config_list,
- .smd_ssr_config = &smd_ssr_cfg,
- .num_smem_areas = ARRAY_SIZE(aux_smem_areas),
- .smd_smem_areas = aux_smem_areas,
-};
-
-struct platform_device msm_device_smd_9625 = {
- .name = "msm_smd",
- .id = -1,
- .resource = smd_resource,
- .num_resources = ARRAY_SIZE(smd_resource),
- .dev = {
- .platform_data = &smd_platform_data,
- }
-};
-
#define BIMC_BASE 0xfc380000
#define BIMC_SIZE 0x0006A000
#define SYS_NOC_BASE 0xfc460000
@@ -345,11 +216,6 @@
ARRAY_SIZE(msm_bus_9625_devices));
}
-void __init msm9625_add_devices(void)
-{
- platform_device_register(&msm_device_smd_9625);
-}
-
/*
* Used to satisfy dependencies for devices that need to be
* run early or in a particular order. Most likely your device doesn't fall
@@ -376,7 +242,6 @@
msm9625_init_gpiomux();
of_platform_populate(NULL, of_default_bus_match_table,
msm9625_auxdata_lookup, NULL);
- msm9625_add_devices();
msm9625_add_drivers();
}
diff --git a/arch/arm/mach-msm/board-fsm9xxx.c b/arch/arm/mach-msm/board-fsm9xxx.c
index 1d6eb01..274b338 100644
--- a/arch/arm/mach-msm/board-fsm9xxx.c
+++ b/arch/arm/mach-msm/board-fsm9xxx.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -46,6 +46,7 @@
#include <linux/msm_adc.h>
#include <linux/m_adcproc.h>
#include <linux/platform_data/qcom_crypto_device.h>
+#include <linux/msm_ion.h>
#define PMIC_GPIO_INT 144
#define PMIC_VREG_WLAN_LEVEL 2900
@@ -723,32 +724,32 @@
static struct resource qcrypto_resources[] = {
[0] = {
- .start = QCE_0_BASE,
- .end = QCE_0_BASE + QCE_SIZE - 1,
+ .start = QCE_1_BASE,
+ .end = QCE_1_BASE + QCE_SIZE - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.name = "crypto_channels",
- .start = DMOV_CE1_IN_CHAN,
- .end = DMOV_CE1_OUT_CHAN,
+ .start = DMOV_CE2_IN_CHAN,
+ .end = DMOV_CE2_OUT_CHAN,
.flags = IORESOURCE_DMA,
},
[2] = {
.name = "crypto_crci_in",
- .start = DMOV_CE1_IN_CRCI,
- .end = DMOV_CE1_IN_CRCI,
+ .start = DMOV_CE2_IN_CRCI,
+ .end = DMOV_CE2_IN_CRCI,
.flags = IORESOURCE_DMA,
},
[3] = {
.name = "crypto_crci_out",
- .start = DMOV_CE1_OUT_CRCI,
- .end = DMOV_CE1_OUT_CRCI,
+ .start = DMOV_CE2_OUT_CRCI,
+ .end = DMOV_CE2_OUT_CRCI,
.flags = IORESOURCE_DMA,
},
[4] = {
.name = "crypto_crci_hash",
- .start = DMOV_CE1_HASH_CRCI,
- .end = DMOV_CE1_HASH_CRCI,
+ .start = DMOV_CE2_HASH_CRCI,
+ .end = DMOV_CE2_HASH_CRCI,
.flags = IORESOURCE_DMA,
},
};
@@ -774,57 +775,6 @@
static struct resource qcedev_resources[] = {
[0] = {
- .start = QCE_0_BASE,
- .end = QCE_0_BASE + QCE_SIZE - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .name = "crypto_channels",
- .start = DMOV_CE1_IN_CHAN,
- .end = DMOV_CE1_OUT_CHAN,
- .flags = IORESOURCE_DMA,
- },
- [2] = {
- .name = "crypto_crci_in",
- .start = DMOV_CE1_IN_CRCI,
- .end = DMOV_CE1_IN_CRCI,
- .flags = IORESOURCE_DMA,
- },
- [3] = {
- .name = "crypto_crci_out",
- .start = DMOV_CE1_OUT_CRCI,
- .end = DMOV_CE1_OUT_CRCI,
- .flags = IORESOURCE_DMA,
- },
- [4] = {
- .name = "crypto_crci_hash",
- .start = DMOV_CE1_HASH_CRCI,
- .end = DMOV_CE1_HASH_CRCI,
- .flags = IORESOURCE_DMA,
- },
-};
-
-static struct msm_ce_hw_support qcedev_ce_hw_suppport = {
- .ce_shared = QCE_NO_CE_SHARED,
- .shared_ce_resource = QCE_NO_SHARE_CE_RESOURCE,
- .hw_key_support = QCE_NO_HW_KEY_SUPPORT,
- .sha_hmac = QCE_NO_SHA_HMAC_SUPPORT,
- .bus_scale_table = NULL,
-};
-
-static struct platform_device qcedev_device = {
- .name = "qce",
- .id = 0,
- .num_resources = ARRAY_SIZE(qcedev_resources),
- .resource = qcedev_resources,
- .dev = {
- .coherent_dma_mask = DMA_BIT_MASK(32),
- .platform_data = &qcedev_ce_hw_suppport,
- },
-};
-
-static struct resource ota_qcrypto_resources[] = {
- [0] = {
.start = QCE_1_BASE,
.end = QCE_1_BASE + QCE_SIZE - 1,
.flags = IORESOURCE_MEM,
@@ -855,6 +805,57 @@
},
};
+static struct msm_ce_hw_support qcedev_ce_hw_suppport = {
+ .ce_shared = QCE_NO_CE_SHARED,
+ .shared_ce_resource = QCE_NO_SHARE_CE_RESOURCE,
+ .hw_key_support = QCE_NO_HW_KEY_SUPPORT,
+ .sha_hmac = QCE_NO_SHA_HMAC_SUPPORT,
+ .bus_scale_table = NULL,
+};
+
+static struct platform_device qcedev_device = {
+ .name = "qce",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(qcedev_resources),
+ .resource = qcedev_resources,
+ .dev = {
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &qcedev_ce_hw_suppport,
+ },
+};
+
+static struct resource ota_qcrypto_resources[] = {
+ [0] = {
+ .start = QCE_2_BASE,
+ .end = QCE_2_BASE + QCE_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .name = "crypto_channels",
+ .start = DMOV_CE3_IN_CHAN,
+ .end = DMOV_CE3_OUT_CHAN,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .name = "crypto_crci_in",
+ .start = DMOV_CE3_IN_CRCI,
+ .end = DMOV_CE3_IN_CRCI,
+ .flags = IORESOURCE_DMA,
+ },
+ [3] = {
+ .name = "crypto_crci_out",
+ .start = DMOV_CE3_OUT_CRCI,
+ .end = DMOV_CE3_OUT_CRCI,
+ .flags = IORESOURCE_DMA,
+ },
+ [4] = {
+ .name = "crypto_crci_hash",
+ .start = DMOV_CE3_HASH_DONE_CRCI,
+ .end = DMOV_CE3_HASH_DONE_CRCI,
+ .flags = IORESOURCE_DMA,
+ },
+};
+
struct platform_device ota_qcrypto_device = {
.name = "qcota",
.id = 0,
@@ -870,6 +871,27 @@
.id = -1,
};
+struct ion_platform_heap msm_ion_heaps[] = {
+ {
+ .id = ION_SYSTEM_HEAP_ID,
+ .type = ION_HEAP_TYPE_SYSTEM_CONTIG,
+ .name = "kmalloc",
+ },
+};
+
+static struct ion_platform_data msm_ion_pdata = {
+ .nr = 1,
+ .heaps = msm_ion_heaps,
+};
+
+static struct platform_device msm_ion_device = {
+ .name = "ion-msm",
+ .id = 1,
+ .dev = {
+ .platform_data = &msm_ion_pdata,
+ },
+};
+
/*
* Devices
*/
@@ -905,6 +927,7 @@
&ota_qcrypto_device,
&fsm_xo_device,
&fsm9xxx_device_watchdog,
+ &msm_ion_device,
};
static void __init fsm9xxx_init_irq(void)
diff --git a/arch/arm/mach-msm/board-msm7627a-storage.c b/arch/arm/mach-msm/board-msm7627a-storage.c
index 07ff389..5351d41 100644
--- a/arch/arm/mach-msm/board-msm7627a-storage.c
+++ b/arch/arm/mach-msm/board-msm7627a-storage.c
@@ -369,6 +369,14 @@
if (!(machine_is_msm7627a_qrd3() || machine_is_msm8625_qrd7())) {
if (mmc_regulator_init(3, "emmc", 3000000))
return;
+ /*
+ * On 7x25A FFA data CRC errors are seen, which are
+ * probably due to the proximity of SIM card and eMMC.
+ * Hence, reducing the clock to 24.7Mhz from 49Mhz.
+ */
+ if (machine_is_msm7625a_ffa())
+ sdc3_plat_data.msmsdcc_fmax =
+ sdc3_plat_data.msmsdcc_fmid;
msm_add_sdcc(3, &sdc3_plat_data);
}
#endif
diff --git a/arch/arm/mach-msm/board-msm7x27a.c b/arch/arm/mach-msm/board-msm7x27a.c
index 9fd5218..5cabe64 100644
--- a/arch/arm/mach-msm/board-msm7x27a.c
+++ b/arch/arm/mach-msm/board-msm7x27a.c
@@ -954,7 +954,7 @@
}
rpc_adsp_pdev->prog = ADSP_RPC_PROG;
- if (cpu_is_msm8625())
+ if (cpu_is_msm8625() || cpu_is_msm8625q())
rpc_adsp_pdev->pdev = msm8625_device_adsp;
else
rpc_adsp_pdev->pdev = msm_adsp_device;
@@ -1031,7 +1031,7 @@
{
msm7x27a_cfg_uart2dm_serial();
msm_uart_dm1_pdata.wakeup_irq = gpio_to_irq(UART1DM_RX_GPIO);
- if (cpu_is_msm8625())
+ if (cpu_is_msm8625() || cpu_is_msm8625q())
msm8625_device_uart_dm1.dev.platform_data =
&msm_uart_dm1_pdata;
else
@@ -1040,7 +1040,7 @@
static void __init msm7x27a_otg_gadget(void)
{
- if (cpu_is_msm8625()) {
+ if (cpu_is_msm8625() || cpu_is_msm8625q()) {
msm_otg_pdata.swfi_latency =
msm8625_pm_data[MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT].latency;
msm8625_device_otg.dev.platform_data = &msm_otg_pdata;
@@ -1080,7 +1080,7 @@
/* Initialize regulators first so that other devices can use them */
msm7x27a_init_regulators();
msm_adsp_add_pdev();
- if (cpu_is_msm8625())
+ if (cpu_is_msm8625() || cpu_is_msm8625q())
msm8625_device_i2c_init();
else
msm7x27a_device_i2c_init();
diff --git a/arch/arm/mach-msm/board-qrd7627a.c b/arch/arm/mach-msm/board-qrd7627a.c
index 47a3120..d15b67d 100644
--- a/arch/arm/mach-msm/board-qrd7627a.c
+++ b/arch/arm/mach-msm/board-qrd7627a.c
@@ -933,7 +933,7 @@
}
rpc_adsp_pdev->prog = ADSP_RPC_PROG;
- if (cpu_is_msm8625())
+ if (cpu_is_msm8625() || cpu_is_msm8625q())
rpc_adsp_pdev->pdev = msm8625_device_adsp;
else
rpc_adsp_pdev->pdev = msm_adsp_device;
@@ -1041,7 +1041,7 @@
static void __init qrd7627a_uart1dm_config(void)
{
msm_uart_dm1_pdata.wakeup_irq = gpio_to_irq(UART1DM_RX_GPIO);
- if (cpu_is_msm8625())
+ if (cpu_is_msm8625() || cpu_is_msm8625q())
msm8625_device_uart_dm1.dev.platform_data =
&msm_uart_dm1_pdata;
else
@@ -1050,7 +1050,7 @@
static void __init qrd7627a_otg_gadget(void)
{
- if (cpu_is_msm8625()) {
+ if (cpu_is_msm8625() || cpu_is_msm8625q()) {
msm_otg_pdata.swfi_latency = msm8625_pm_data
[MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT].latency;
msm8625_device_otg.dev.platform_data = &msm_otg_pdata;
@@ -1069,7 +1069,7 @@
static void __init msm_pm_init(void)
{
- if (!cpu_is_msm8625()) {
+ if (!cpu_is_msm8625() && !cpu_is_msm8625q()) {
msm_pm_set_platform_data(msm7627a_pm_data,
ARRAY_SIZE(msm7627a_pm_data));
BUG_ON(msm_pm_boot_init(&msm_pm_boot_pdata));
@@ -1088,7 +1088,7 @@
msm7627a_init_regulators();
msmqrd_adsp_add_pdev();
- if (cpu_is_msm8625())
+ if (cpu_is_msm8625() || cpu_is_msm8625q())
msm8625_device_i2c_init();
else
msm7627a_device_i2c_init();
diff --git a/arch/arm/mach-msm/cpufreq.c b/arch/arm/mach-msm/cpufreq.c
index e0d98b7..d862d6d 100644
--- a/arch/arm/mach-msm/cpufreq.c
+++ b/arch/arm/mach-msm/cpufreq.c
@@ -227,7 +227,7 @@
* be changed independently. Each cpu is bound to
* same frequency. Hence set the cpumask to all cpu.
*/
- if (cpu_is_msm8625())
+ if (cpu_is_msm8625() || cpu_is_msm8625q())
cpumask_setall(policy->cpus);
if (cpufreq_frequency_table_cpuinfo(policy, table)) {
diff --git a/arch/arm/mach-msm/devices-8064.c b/arch/arm/mach-msm/devices-8064.c
index 8d10d6a..0bfaa71 100644
--- a/arch/arm/mach-msm/devices-8064.c
+++ b/arch/arm/mach-msm/devices-8064.c
@@ -19,6 +19,7 @@
#include <linux/clkdev.h>
#include <linux/dma-mapping.h>
#include <linux/coresight.h>
+#include <linux/avtimer.h>
#include <mach/irqs-8064.h>
#include <mach/board.h>
#include <mach/msm_iomap.h>
@@ -103,6 +104,9 @@
#define MSM8064_PC_CNTR_PHYS (APQ8064_IMEM_PHYS + 0x664)
#define MSM8064_PC_CNTR_SIZE 0x40
#define MSM8064_RPM_MASTER_STATS_BASE 0x10BB00
+/* avtimer */
+#define AVTIMER_MSW_PHYSICAL_ADDRESS 0x2800900C
+#define AVTIMER_LSW_PHYSICAL_ADDRESS 0x28009008
static struct resource msm8064_resources_pccntr[] = {
{
@@ -3292,3 +3296,8 @@
.platform_data = &apq8064_cache_dump_pdata,
},
};
+
+struct dev_avtimer_data dev_avtimer_pdata = {
+ .avtimer_msw_phy_addr = AVTIMER_MSW_PHYSICAL_ADDRESS,
+ .avtimer_lsw_phy_addr = AVTIMER_LSW_PHYSICAL_ADDRESS,
+};
diff --git a/arch/arm/mach-msm/devices-8960.c b/arch/arm/mach-msm/devices-8960.c
index c59461a..2421646 100644
--- a/arch/arm/mach-msm/devices-8960.c
+++ b/arch/arm/mach-msm/devices-8960.c
@@ -2485,6 +2485,17 @@
.bus_port0 = MSM_BUS_MASTER_GRAPHICS_3D,
};
+static struct fs_driver_data gfx3d_fs_data_8960ab = {
+ .clks = (struct fs_clk_data[]){
+ { .name = "core_clk", .reset_rate = 27000000 },
+ { .name = "iface_clk" },
+ { .name = "bus_clk" },
+ { 0 }
+ },
+ .bus_port0 = MSM_BUS_MASTER_GRAPHICS_3D,
+ .bus_port1 = MSM_BUS_MASTER_GRAPHICS_3D_PORT1,
+};
+
static struct fs_driver_data ijpeg_fs_data = {
.clks = (struct fs_clk_data[]){
{ .name = "core_clk" },
@@ -2583,7 +2594,7 @@
FS_8X60(FS_IJPEG, "vdd", "msm_gemini.0", &ijpeg_fs_data),
FS_8X60(FS_VFE, "vdd", "msm_vfe.0", &vfe_fs_data),
FS_8X60(FS_VPE, "vdd", "msm_vpe.0", &vpe_fs_data),
- FS_8X60(FS_GFX3D, "vdd", "kgsl-3d0.0", &gfx3d_fs_data),
+ FS_8X60(FS_GFX3D, "vdd", "kgsl-3d0.0", &gfx3d_fs_data_8960ab),
FS_8X60(FS_VED, "vdd", "msm_vidc.0", &ved_fs_data_8960ab),
};
unsigned msm8960ab_num_footswitch __initdata = ARRAY_SIZE(msm8960ab_footswitch);
diff --git a/arch/arm/mach-msm/devices-msm7x27a.c b/arch/arm/mach-msm/devices-msm7x27a.c
index 8fc5020..b3d887e 100644
--- a/arch/arm/mach-msm/devices-msm7x27a.c
+++ b/arch/arm/mach-msm/devices-msm7x27a.c
@@ -515,7 +515,7 @@
void __init msm_pm_register_irqs(void)
{
- if (cpu_is_msm8625())
+ if (cpu_is_msm8625() || cpu_is_msm8625q())
msm_pm_set_irq_extns(&msm8625_pm_irq_calls);
else
msm_pm_set_irq_extns(&msm7x27a_pm_irq_calls);
@@ -530,8 +530,9 @@
void __init msm_pm_register_cpr_ops(void)
{
/* CPR presents on revision >= v2.0 chipsets */
- if (cpu_is_msm8625() &&
+ if ((cpu_is_msm8625() &&
SOCINFO_VERSION_MAJOR(socinfo_get_version()) >= 2)
+ || cpu_is_msm8625q())
msm_pm_set_cpr_ops(&msm8625_pm_cpr_ops);
}
@@ -952,7 +953,7 @@
void __init msm8x25_kgsl_3d0_init(void)
{
- if (cpu_is_msm8625()) {
+ if (cpu_is_msm8625() || cpu_is_msm8625q()) {
kgsl_3d0_pdata.idle_timeout = HZ/5;
kgsl_3d0_pdata.strtstp_sleepwake = false;
@@ -1406,7 +1407,7 @@
if (controller < 1 || controller > 4)
return -EINVAL;
- if (cpu_is_msm8625())
+ if (cpu_is_msm8625() || cpu_is_msm8625q())
pdev = msm8625_sdcc_devices[controller-1];
else
pdev = msm_sdcc_devices[controller-1];
@@ -1495,7 +1496,7 @@
{
struct platform_device *pdev;
- if (cpu_is_msm8625())
+ if (cpu_is_msm8625() || cpu_is_msm8625q())
pdev = msm8625_host_devices[host];
else
pdev = msm_host_devices[host];
@@ -1598,12 +1599,12 @@
void __init msm_fb_register_device(char *name, void *data)
{
if (!strncmp(name, "mdp", 3)) {
- if (cpu_is_msm8625())
+ if (cpu_is_msm8625() || cpu_is_msm8625q())
msm_register_device(&msm8625_mdp_device, data);
else
msm_register_device(&msm_mdp_device, data);
} else if (!strncmp(name, "mipi_dsi", 8)) {
- if (cpu_is_msm8625()) {
+ if (cpu_is_msm8625() || cpu_is_msm8625q()) {
msm_register_device(&msm8625_mipi_dsi_device, data);
mipi_dsi_device = msm8625_mipi_dsi_device;
} else {
@@ -2046,7 +2047,7 @@
msm_clock_init(&msm7x27a_clock_init_data);
if (cpu_is_msm7x27aa() || cpu_is_msm7x25ab())
platform_device_register(&msm7x27aa_device_acpuclk);
- else if (cpu_is_msm8625()) {
+ else if (cpu_is_msm8625() || cpu_is_msm8625q()) {
if (msm8625_cpu_id() == MSM8625)
platform_device_register(&msm7x27aa_device_acpuclk);
else if (msm8625_cpu_id() == MSM8625A)
@@ -2057,11 +2058,11 @@
platform_device_register(&msm7x27a_device_acpuclk);
}
- if (cpu_is_msm8625() &&
- (SOCINFO_VERSION_MAJOR(socinfo_get_version()) >= 2))
+ if (cpu_is_msm8625() || (cpu_is_msm8625q() &&
+ SOCINFO_VERSION_MAJOR(socinfo_get_version()) >= 2))
msm_cpr_init();
- if (!cpu_is_msm8625())
+ if (!cpu_is_msm8625() && !cpu_is_msm8625q())
pl310_resources[1].start = INT_L2CC_INTR;
platform_device_register(&pl310_erp_device);
@@ -2083,7 +2084,7 @@
(0x2 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT) | \
(0x1 << L2X0_AUX_CTRL_EVNT_MON_BUS_EN_SHIFT);
- if (cpu_is_msm8625()) {
+ if (cpu_is_msm8625() || cpu_is_msm8625q()) {
/* Way Size 011(0x3) 64KB */
aux_ctrl |= (0x3 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT) | \
(0x1 << L2X0_AUX_CTRL_DATA_PREFETCH_SHIFT) | \
@@ -2099,7 +2100,7 @@
}
l2x0_init(MSM_L2CC_BASE, aux_ctrl, L2X0_AUX_CTRL_MASK);
- if (cpu_is_msm8625()) {
+ if (cpu_is_msm8625() || cpu_is_msm8625q()) {
pctrl = readl_relaxed(MSM_L2CC_BASE + L2X0_PREFETCH_CTRL);
pr_info("Prfetch Ctrl: 0x%08x\n", pctrl);
}
@@ -2136,7 +2137,7 @@
static int msm7627a_init_gpio(void)
{
- if (cpu_is_msm8625())
+ if (cpu_is_msm8625() || cpu_is_msm8625q())
platform_device_register(&msm8625_device_gpio);
else
platform_device_register(&msm_device_gpio);
diff --git a/arch/arm/mach-msm/devices.h b/arch/arm/mach-msm/devices.h
index c0d73c2..b676518 100644
--- a/arch/arm/mach-msm/devices.h
+++ b/arch/arm/mach-msm/devices.h
@@ -471,4 +471,5 @@
extern struct platform_device apq_cpudai_mi2s;
extern struct platform_device apq_cpudai_i2s_rx;
extern struct platform_device apq_cpudai_i2s_tx;
+extern struct dev_avtimer_data dev_avtimer_pdata;
diff --git a/arch/arm/mach-msm/include/mach/ipa.h b/arch/arm/mach-msm/include/mach/ipa.h
index dae6d3b..c37b518 100644
--- a/arch/arm/mach-msm/include/mach/ipa.h
+++ b/arch/arm/mach-msm/include/mach/ipa.h
@@ -457,7 +457,7 @@
int ipa_teardown_sys_pipe(u32 clnt_hdl);
-#else
+#else /* CONFIG_IPA */
/*
* Connect / Disconnect
diff --git a/arch/arm/mach-msm/lpm_resources.c b/arch/arm/mach-msm/lpm_resources.c
index c21ea33..5d7fc94 100644
--- a/arch/arm/mach-msm/lpm_resources.c
+++ b/arch/arm/mach-msm/lpm_resources.c
@@ -47,8 +47,6 @@
static bool msm_lpm_get_rpm_notif = true;
/*Macros*/
-#define VDD_DIG_ACTIVE (5)
-#define VDD_MEM_ACTIVE (1050000)
#define MAX_RS_NAME (16)
#define MAX_RS_SIZE (4)
#define IS_RPM_CTL(rs) \
@@ -133,10 +131,6 @@
.flush = msm_lpm_flush_l2,
.notify = NULL,
.valid = false,
- .rs_data = {
- .value = MSM_LPM_L2_CACHE_ACTIVE,
- .default_value = MSM_LPM_L2_CACHE_ACTIVE,
- },
.ko_attr = RPMRS_ATTR(l2),
};
@@ -147,10 +141,6 @@
.flush = msm_lpm_flush_vdd_dig,
.notify = msm_lpm_notify_vdd_dig,
.valid = false,
- .rs_data = {
- .value = VDD_DIG_ACTIVE,
- .default_value = VDD_DIG_ACTIVE,
- },
.ko_attr = RPMRS_ATTR(vdd_dig),
};
@@ -161,10 +151,6 @@
.flush = msm_lpm_flush_vdd_mem,
.notify = msm_lpm_notify_vdd_mem,
.valid = false,
- .rs_data = {
- .value = VDD_MEM_ACTIVE,
- .default_value = VDD_MEM_ACTIVE,
- },
.ko_attr = RPMRS_ATTR(vdd_mem),
};
@@ -175,10 +161,6 @@
.flush = msm_lpm_flush_pxo,
.notify = msm_lpm_notify_pxo,
.valid = false,
- .rs_data = {
- .value = MSM_LPM_PXO_ON,
- .default_value = MSM_LPM_PXO_ON,
- },
.ko_attr = RPMRS_ATTR(pxo),
};
@@ -421,13 +403,13 @@
trace_lpm_resources(rs->sleep_value, rs->name);
}
-static void msm_lpm_flush_l2(int notify_rpm)
+static void msm_lpm_set_l2_mode(int sleep_mode, int notify_rpm)
{
- struct msm_lpm_resource *rs = &msm_lpm_l2;
- int lpm;
- int rc;
+ int lpm, rc;
- switch (rs->sleep_value) {
+ msm_pm_set_l2_flush_flag(0);
+
+ switch (sleep_mode) {
case MSM_LPM_L2_CACHE_HSFS_OPEN:
lpm = MSM_SPM_L2_MODE_POWER_COLLAPSE;
msm_pm_set_l2_flush_flag(1);
@@ -455,6 +437,13 @@
__func__, lpm);
}
+static void msm_lpm_flush_l2(int notify_rpm)
+{
+ struct msm_lpm_resource *rs = &msm_lpm_l2;
+
+ msm_lpm_set_l2_mode(rs->sleep_value, notify_rpm);
+}
+
/* RPM CTL */
static void msm_lpm_flush_rpm_ctl(int notify_rpm)
{
@@ -679,8 +668,7 @@
}
msm_lpm_get_rpm_notif = true;
- if (msm_lpm_use_mpm(limits))
- msm_mpm_enter_sleep(sclk_count, from_idle);
+ msm_mpm_enter_sleep(sclk_count, from_idle);
return ret;
}
@@ -688,11 +676,11 @@
void msm_lpmrs_exit_sleep(struct msm_rpmrs_limits *limits,
bool from_idle, bool notify_rpm, bool collapsed)
{
- /* MPM exit sleep
if (msm_lpm_use_mpm(limits))
- msm_mpm_exit_sleep(from_idle);*/
+ msm_mpm_exit_sleep(from_idle);
- msm_spm_l2_set_low_power_mode(MSM_SPM_MODE_DISABLED, notify_rpm);
+ if (msm_lpm_l2.valid)
+ msm_lpm_set_l2_mode(msm_lpm_l2.rs_data.default_value, false);
}
static int msm_lpm_cpu_callback(struct notifier_block *cpu_nb,
@@ -702,12 +690,12 @@
switch (action) {
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
- rs->rs_data.value = MSM_LPM_L2_CACHE_ACTIVE;
+ rs->rs_data.value = rs->rs_data.default_value;
break;
case CPU_ONLINE_FROZEN:
case CPU_ONLINE:
if (num_online_cpus() > 1)
- rs->rs_data.value = MSM_LPM_L2_CACHE_ACTIVE;
+ rs->rs_data.value = rs->rs_data.default_value;
break;
case CPU_DEAD_FROZEN:
case CPU_DEAD:
@@ -825,6 +813,16 @@
continue;
}
+ key = "qcom,init-value";
+ ret = of_property_read_u32(node, key,
+ &rs->rs_data.default_value);
+ if (ret) {
+ pr_err("%s():Failed to read %s\n", __func__, key);
+ goto fail;
+ }
+
+ rs->rs_data.value = rs->rs_data.default_value;
+
key = "qcom,resource-type";
ret = of_property_read_u32(node, key, &resource_type);
if (ret) {
diff --git a/arch/arm/mach-msm/msm-buspm-dev.c b/arch/arm/mach-msm/msm-buspm-dev.c
index a818eed..ec0f1bd 100644
--- a/arch/arm/mach-msm/msm-buspm-dev.c
+++ b/arch/arm/mach-msm/msm-buspm-dev.c
@@ -22,10 +22,17 @@
#include <linux/uaccess.h>
#include <linux/miscdevice.h>
#include <linux/memory_alloc.h>
+#include <mach/rpm-smd.h>
#include "msm-buspm-dev.h"
#define MSM_BUSPM_DRV_NAME "msm-buspm-dev"
+enum msm_buspm_spdm_res {
+ SPDM_RES_ID = 0,
+ SPDM_RES_TYPE = 0x63707362,
+ SPDM_KEY = 0x00006e65,
+ SPDM_SIZE = 4,
+};
/*
* Allocate kernel buffer.
* Currently limited to one buffer per file descriptor. If alloc() is
@@ -113,6 +120,61 @@
return 0;
}
+static int msm_bus_rpm_req(u32 rsc_type, u32 key, u32 hwid,
+ int ctx, u32 val)
+{
+ struct msm_rpm_request *rpm_req;
+ int ret, msg_id;
+
+ rpm_req = msm_rpm_create_request(ctx, rsc_type, SPDM_RES_ID, 1);
+ if (rpm_req == NULL) {
+ pr_err("RPM: Couldn't create RPM Request\n");
+ return -ENXIO;
+ }
+
+ ret = msm_rpm_add_kvp_data(rpm_req, key, (const uint8_t *)&val,
+ (int)(sizeof(uint32_t)));
+ if (ret) {
+ pr_err("RPM: Add KVP failed for RPM Req:%u\n",
+ rsc_type);
+ goto err;
+ }
+
+ pr_debug("Added Key: %d, Val: %u, size: %d\n", key,
+ (uint32_t)val, sizeof(uint32_t));
+ msg_id = msm_rpm_send_request(rpm_req);
+ if (!msg_id) {
+ pr_err("RPM: No message ID for req\n");
+ ret = -ENXIO;
+ goto err;
+ }
+
+ ret = msm_rpm_wait_for_ack(msg_id);
+ if (ret) {
+ pr_err("RPM: Ack failed\n");
+ goto err;
+ }
+
+err:
+ msm_rpm_free_request(rpm_req);
+ return ret;
+}
+
+static int msm_buspm_ioc_cmds(uint32_t arg)
+{
+ switch (arg) {
+ case MSM_BUSPM_SPDM_CLK_DIS:
+ case MSM_BUSPM_SPDM_CLK_EN:
+ return msm_bus_rpm_req(SPDM_RES_TYPE, SPDM_KEY, 0,
+ MSM_RPM_CTX_ACTIVE_SET, arg);
+ default:
+ pr_warn("Unsupported ioctl command: %d\n", arg);
+ return -EINVAL;
+ }
+}
+
+
+
static long
msm_buspm_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
@@ -124,6 +186,11 @@
unsigned int buflen = msm_buspm_dev_get_buflen(filp);
unsigned char *dbgbuf = buf;
+ if (_IOC_TYPE(cmd) != MSM_BUSPM_IOC_MAGIC) {
+ pr_err("Wrong IOC_MAGIC.Exiting\n");
+ return -ENOTTY;
+ }
+
switch (cmd) {
case MSM_BUSPM_IOC_FREE:
pr_debug("cmd = 0x%x (FREE)\n", cmd);
@@ -193,6 +260,11 @@
}
break;
+ case MSM_BUSPM_IOC_CMD:
+ pr_debug("IOCTL command: cmd: %d arg: %lu\n", cmd, arg);
+ retval = msm_buspm_ioc_cmds(arg);
+ break;
+
default:
pr_debug("Unknown command 0x%x\n", cmd);
retval = -EINVAL;
diff --git a/arch/arm/mach-msm/msm-buspm-dev.h b/arch/arm/mach-msm/msm-buspm-dev.h
index 5839087..854626d 100644
--- a/arch/arm/mach-msm/msm-buspm-dev.h
+++ b/arch/arm/mach-msm/msm-buspm-dev.h
@@ -31,6 +31,11 @@
int size;
};
+enum msm_buspm_ioc_cmds {
+ MSM_BUSPM_SPDM_CLK_DIS = 0,
+ MSM_BUSPM_SPDM_CLK_EN,
+};
+
#define MSM_BUSPM_IOC_MAGIC 'p'
#define MSM_BUSPM_IOC_FREE \
@@ -47,4 +52,7 @@
#define MSM_BUSPM_IOC_RD_PHYS_ADDR \
_IOR(MSM_BUSPM_IOC_MAGIC, 4, unsigned long)
+
+#define MSM_BUSPM_IOC_CMD \
+ _IOR(MSM_BUSPM_IOC_MAGIC, 5, uint32_t)
#endif
diff --git a/arch/arm/mach-msm/msm7k_fiq.c b/arch/arm/mach-msm/msm7k_fiq.c
index 421b4f9..d644121 100644
--- a/arch/arm/mach-msm/msm7k_fiq.c
+++ b/arch/arm/mach-msm/msm7k_fiq.c
@@ -75,7 +75,7 @@
static int __init init7k_fiq(void)
{
- if (!cpu_is_msm8625())
+ if (!cpu_is_msm8625() && !cpu_is_msm8625q())
return 0;
if (msm_setup_fiq_handler())
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_arb.c b/arch/arm/mach-msm/msm_bus/msm_bus_arb.c
index 65539c6..e61eb6d 100644
--- a/arch/arm/mach-msm/msm_bus/msm_bus_arb.c
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_arb.c
@@ -614,6 +614,15 @@
MSM_BUS_DBG("ab: %llu ib: %llu\n", curr_bw, curr_clk);
}
+ if (index == 0) {
+ /* This check protects the bus driver from clients
+ * that can leave non-zero requests after
+ * unregistering.
+ * */
+ req_clk = 0;
+ req_bw = 0;
+ }
+
if (!pdata->active_only) {
ret = update_path(src, pnode, req_clk, req_bw,
curr_clk, curr_bw, 0, pdata->active_only);
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_board_8960.c b/arch/arm/mach-msm/msm_bus/msm_bus_board_8960.c
index 70bb406..d079e77 100644
--- a/arch/arm/mach-msm/msm_bus/msm_bus_board_8960.c
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_board_8960.c
@@ -447,10 +447,8 @@
static int mport_mdp1[] = {MSM_BUS_MASTER_PORT_MDP_PORT1,};
static int mport_rotator[] = {MSM_BUS_MASTER_PORT_ROTATOR,};
static int mport_graphics_3d[] = {MSM_BUS_MASTER_PORT_GRAPHICS_3D,};
-static int pro_mport_graphics_3d[] = {
- MSM_BUS_MASTER_PORT_GRAPHICS_3D_PORT0,
- MSM_BUS_MASTER_PORT_GRAPHICS_3D,
-};
+static int pro_mport_graphics_3d[] = {MSM_BUS_MASTER_PORT_GRAPHICS_3D_PORT0,};
+static int pro_mport_graphics_3d_p1[] = {MSM_BUS_MASTER_PORT_GRAPHICS_3D,};
static int mport_jpeg_dec[] = {MSM_BUS_MASTER_PORT_JPEG_DEC,};
static int mport_graphics_2d_core0[] = {MSM_BUS_MASTER_PORT_GRAPHICS_2D_CORE0,};
static int mport_vfe[] = {MSM_BUS_MASTER_PORT_VFE,};
@@ -627,6 +625,13 @@
.num_tiers = ARRAY_SIZE(tier2),
},
{
+ .id = MSM_BUS_MASTER_GRAPHICS_3D_PORT1,
+ .masterp = pro_mport_graphics_3d_p1,
+ .num_mports = ARRAY_SIZE(pro_mport_graphics_3d_p1),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ },
+ {
.id = MSM_BUS_MASTER_JPEG_DEC,
.masterp = mport_jpeg_dec,
.num_mports = ARRAY_SIZE(mport_jpeg_dec),
diff --git a/arch/arm/mach-msm/perf_event_msm_krait_l2.c b/arch/arm/mach-msm/perf_event_msm_krait_l2.c
index 103eef0..26c5e58 100644
--- a/arch/arm/mach-msm/perf_event_msm_krait_l2.c
+++ b/arch/arm/mach-msm/perf_event_msm_krait_l2.c
@@ -72,9 +72,11 @@
*/
struct pmu_constraints {
u64 pmu_bitmap;
+ u8 codes[64];
raw_spinlock_t lock;
} l2_pmu_constraints = {
.pmu_bitmap = 0,
+ .codes = {-1},
.lock = __RAW_SPIN_LOCK_UNLOCKED(l2_pmu_constraints.lock),
};
@@ -335,8 +337,10 @@
int ctr = 0;
if (hwc->config_base == L2CYCLE_CTR_RAW_CODE) {
- if (!test_and_set_bit(l2_cycle_ctr_idx, cpuc->used_mask))
- return l2_cycle_ctr_idx;
+ if (test_and_set_bit(l2_cycle_ctr_idx, cpuc->used_mask))
+ return -EAGAIN;
+
+ return l2_cycle_ctr_idx;
}
for (ctr = 0; ctr < total_l2_ctrs - 1; ctr++) {
@@ -453,22 +457,48 @@
{
u32 evt_type = event->attr.config & L2_EVT_MASK;
u8 reg = (evt_type & 0x0F000) >> 12;
- u8 group = evt_type & 0x0000F;
+ u8 group = evt_type & 0x0000F;
+ u8 code = (evt_type & 0x00FF0) >> 4;
unsigned long flags;
u32 err = 0;
u64 bitmap_t;
+ u32 shift_idx;
+
+ /*
+ * Cycle counter collision is detected in
+ * get_event_idx().
+ */
+ if (evt_type == L2CYCLE_CTR_RAW_CODE)
+ return err;
raw_spin_lock_irqsave(&l2_pmu_constraints.lock, flags);
- bitmap_t = 1 << ((reg * 4) + group);
+ shift_idx = ((reg * 4) + group);
+
+ bitmap_t = 1 << shift_idx;
if (!(l2_pmu_constraints.pmu_bitmap & bitmap_t)) {
l2_pmu_constraints.pmu_bitmap |= bitmap_t;
+ l2_pmu_constraints.codes[shift_idx] = code;
goto out;
+ } else {
+ /*
+ * If NRCCG's are identical,
+ * its not column exclusion.
+ */
+ if (l2_pmu_constraints.codes[shift_idx] != code)
+ err = -EPERM;
+ else
+ /*
+ * If the event is counted in syswide mode
+ * then we want to count only on one CPU
+ * and set its filter to count from all.
+ * This sets the event OFF on all but one
+ * CPU.
+ */
+ if (!(event->cpu < 0))
+ event->state = PERF_EVENT_STATE_OFF;
}
-
- /* Bit is already set. Constraint failed. */
- err = -EPERM;
out:
raw_spin_unlock_irqrestore(&l2_pmu_constraints.lock, flags);
return err;
@@ -481,14 +511,20 @@
u8 group = evt_type & 0x0000F;
unsigned long flags;
u64 bitmap_t;
+ u32 shift_idx;
raw_spin_lock_irqsave(&l2_pmu_constraints.lock, flags);
- bitmap_t = 1 << ((reg * 4) + group);
+ shift_idx = ((reg * 4) + group);
+
+ bitmap_t = 1 << shift_idx;
/* Clear constraint bit. */
l2_pmu_constraints.pmu_bitmap &= ~bitmap_t;
+ /* Clear code. */
+ l2_pmu_constraints.codes[shift_idx] = -1;
+
raw_spin_unlock_irqrestore(&l2_pmu_constraints.lock, flags);
return 1;
}
diff --git a/arch/arm/mach-msm/perf_event_msm_l2.c b/arch/arm/mach-msm/perf_event_msm_l2.c
index 2ad36df..c1b7d23 100644
--- a/arch/arm/mach-msm/perf_event_msm_l2.c
+++ b/arch/arm/mach-msm/perf_event_msm_l2.c
@@ -38,9 +38,11 @@
*/
struct pmu_constraints {
u64 pmu_bitmap;
+ u8 codes[64];
raw_spinlock_t lock;
} l2_pmu_constraints = {
.pmu_bitmap = 0,
+ .codes = {-1},
.lock = __RAW_SPIN_LOCK_UNLOCKED(l2_pmu_constraints.lock),
};
@@ -667,9 +669,11 @@
int ctr = 0;
if (hwc->config_base == SCORPION_L2CYCLE_CTR_RAW_CODE) {
- if (!test_and_set_bit(l2_cycle_ctr_idx,
+ if (test_and_set_bit(l2_cycle_ctr_idx,
cpuc->used_mask))
- return l2_cycle_ctr_idx;
+ return -EAGAIN;
+
+ return l2_cycle_ctr_idx;
}
for (ctr = 0; ctr < total_l2_ctrs - 1; ctr++) {
@@ -792,25 +796,50 @@
u8 prefix = (evt_type & 0xF0000) >> 16;
u8 reg = (evt_type & 0x0F000) >> 12;
u8 group = evt_type & 0x0000F;
+ u8 code = (evt_type & 0x00FF0) >> 4;
unsigned long flags;
u32 err = 0;
u64 bitmap_t;
+ u32 shift_idx;
if (!prefix)
return 0;
+ /*
+ * Cycle counter collision is detected in
+ * get_event_idx().
+ */
+ if (evt_type == SCORPION_L2CYCLE_CTR_RAW_CODE)
+ return err;
raw_spin_lock_irqsave(&l2_pmu_constraints.lock, flags);
- bitmap_t = 1 << ((reg * 4) + group);
+ shift_idx = ((reg * 4) + group);
+
+ bitmap_t = 1 << shift_idx;
if (!(l2_pmu_constraints.pmu_bitmap & bitmap_t)) {
l2_pmu_constraints.pmu_bitmap |= bitmap_t;
+ l2_pmu_constraints.codes[shift_idx] = code;
goto out;
+ } else {
+ /*
+ * If NRCCG's are identical,
+ * its not column exclusion.
+ */
+ if (l2_pmu_constraints.codes[shift_idx] != code)
+ err = -EPERM;
+ else
+ /*
+ * If the event is counted in syswide mode
+ * then we want to count only on one CPU
+ * and set its filter to count from all.
+ * This sets the event OFF on all but one
+ * CPU.
+ */
+ if (!(event->cpu < 0))
+ event->state = PERF_EVENT_STATE_OFF;
}
- /* Bit is already set. Constraint failed. */
- err = -EPERM;
-
out:
raw_spin_unlock_irqrestore(&l2_pmu_constraints.lock, flags);
return err;
@@ -824,13 +853,16 @@
u8 group = evt_type & 0x0000F;
unsigned long flags;
u64 bitmap_t;
+ u32 shift_idx;
if (!prefix)
return 0;
raw_spin_lock_irqsave(&l2_pmu_constraints.lock, flags);
- bitmap_t = 1 << ((reg * 4) + group);
+ shift_idx = ((reg * 4) + group);
+
+ bitmap_t = 1 << shift_idx;
/* Clear constraint bit. */
l2_pmu_constraints.pmu_bitmap &= ~bitmap_t;
diff --git a/arch/arm/mach-msm/peripheral-loader.c b/arch/arm/mach-msm/peripheral-loader.c
index e3a3563..cb8d756 100644
--- a/arch/arm/mach-msm/peripheral-loader.c
+++ b/arch/arm/mach-msm/peripheral-loader.c
@@ -33,6 +33,7 @@
#include <asm/setup.h>
#include "peripheral-loader.h"
+#include "ramdump.h"
#define pil_err(desc, fmt, ...) \
dev_err(desc->dev, "%s: " fmt, desc->name, ##__VA_ARGS__)
@@ -112,6 +113,42 @@
struct ion_handle *region;
};
+/**
+ * pil_do_ramdump() - Ramdump an image
+ * @desc: descriptor from pil_desc_init()
+ * @ramdump_dev: ramdump device returned from create_ramdump_device()
+ *
+ * Calls the ramdump API with a list of segments generated from the addresses
+ * that the descriptor corresponds to.
+ */
+int pil_do_ramdump(struct pil_desc *desc, void *ramdump_dev)
+{
+ struct pil_priv *priv = desc->priv;
+ struct pil_seg *seg;
+ int count = 0, ret;
+ struct ramdump_segment *ramdump_segs, *s;
+
+ list_for_each_entry(seg, &priv->segs, list)
+ count++;
+
+ ramdump_segs = kmalloc_array(count, sizeof(*ramdump_segs), GFP_KERNEL);
+ if (!ramdump_segs)
+ return -ENOMEM;
+
+ s = ramdump_segs;
+ list_for_each_entry(seg, &priv->segs, list) {
+ s->address = seg->paddr;
+ s->size = seg->sz;
+ s++;
+ }
+
+ ret = do_elf_ramdump(ramdump_dev, ramdump_segs, count);
+ kfree(ramdump_segs);
+
+ return ret;
+}
+EXPORT_SYMBOL(pil_do_ramdump);
+
static struct ion_client *ion;
/**
diff --git a/arch/arm/mach-msm/peripheral-loader.h b/arch/arm/mach-msm/peripheral-loader.h
index 1c2faf7..8442289 100644
--- a/arch/arm/mach-msm/peripheral-loader.h
+++ b/arch/arm/mach-msm/peripheral-loader.h
@@ -65,6 +65,7 @@
extern void pil_shutdown(struct pil_desc *desc);
extern void pil_desc_release(struct pil_desc *desc);
extern phys_addr_t pil_get_entry_addr(struct pil_desc *desc);
+extern int pil_do_ramdump(struct pil_desc *desc, void *ramdump_dev);
#else
static inline int pil_desc_init(struct pil_desc *desc) { return 0; }
static inline int pil_boot(struct pil_desc *desc) { return 0; }
@@ -74,6 +75,10 @@
{
return 0;
}
+static inline int pil_do_ramdump(struct pil_desc *desc, void *ramdump_dev)
+{
+ return 0;
+}
#endif
#endif
diff --git a/arch/arm/mach-msm/pil-dsps.c b/arch/arm/mach-msm/pil-dsps.c
index 519e1c9..d315d82 100644
--- a/arch/arm/mach-msm/pil-dsps.c
+++ b/arch/arm/mach-msm/pil-dsps.c
@@ -48,7 +48,6 @@
void __iomem *ppss_base;
void *ramdump_dev;
- struct ramdump_segment fw_ramdump_segments[4];
void *smem_ramdump_dev;
struct ramdump_segment smem_ramdump_segments[1];
@@ -212,16 +211,13 @@
if (!enable)
return 0;
- ret = do_ramdump(drv->ramdump_dev,
- drv->fw_ramdump_segments,
- ARRAY_SIZE(drv->fw_ramdump_segments));
+ ret = pil_do_ramdump(&drv->desc, drv->ramdump_dev);
if (ret < 0) {
pr_err("%s: Unable to dump DSPS memory (rc = %d).\n",
__func__, ret);
return ret;
}
- ret = do_ramdump(drv->smem_ramdump_dev,
- drv->smem_ramdump_segments,
+ ret = do_elf_ramdump(drv->smem_ramdump_dev, drv->smem_ramdump_segments,
ARRAY_SIZE(drv->smem_ramdump_segments));
if (ret < 0) {
pr_err("%s: Unable to dump smem memory (rc = %d).\n",
@@ -293,14 +289,6 @@
if (ret)
return ret;
- drv->fw_ramdump_segments[0].address = 0x12000000;
- drv->fw_ramdump_segments[0].size = 0x28000;
- drv->fw_ramdump_segments[1].address = 0x12040000;
- drv->fw_ramdump_segments[1].size = 0x4000;
- drv->fw_ramdump_segments[2].address = 0x12800000;
- drv->fw_ramdump_segments[2].size = 0x4000;
- drv->fw_ramdump_segments[3].address = 0x8fe00000;
- drv->fw_ramdump_segments[3].size = 0x100000;
drv->ramdump_dev = create_ramdump_device("dsps", &pdev->dev);
if (!drv->ramdump_dev) {
ret = -ENOMEM;
diff --git a/arch/arm/mach-msm/pil-gss.c b/arch/arm/mach-msm/pil-gss.c
index a6d13d0..f4d4449 100644
--- a/arch/arm/mach-msm/pil-gss.c
+++ b/arch/arm/mach-msm/pil-gss.c
@@ -404,11 +404,6 @@
smsm_reset_modem(SMSM_RESET);
}
-/* FIXME: Get address, size from PIL */
-static struct ramdump_segment gss_segments[] = {
- {0x89000000, 0x00D00000}
-};
-
static struct ramdump_segment smem_segments[] = {
{0x80000000, 0x00200000},
};
@@ -418,20 +413,20 @@
int ret;
struct gss_data *drv = container_of(desc, struct gss_data, subsys_desc);
- if (enable) {
- ret = do_ramdump(drv->ramdump_dev, gss_segments,
- ARRAY_SIZE(gss_segments));
- if (ret < 0) {
- pr_err("Unable to dump gss memory\n");
- return ret;
- }
+ if (!enable)
+ return 0;
- ret = do_ramdump(drv->smem_ramdump_dev, smem_segments,
- ARRAY_SIZE(smem_segments));
- if (ret < 0) {
- pr_err("Unable to dump smem memory (rc = %d).\n", ret);
- return ret;
- }
+ ret = pil_do_ramdump(&drv->pil_desc, drv->ramdump_dev);
+ if (ret < 0) {
+ pr_err("Unable to dump gss memory\n");
+ return ret;
+ }
+
+ ret = do_elf_ramdump(drv->smem_ramdump_dev, smem_segments,
+ ARRAY_SIZE(smem_segments));
+ if (ret < 0) {
+ pr_err("Unable to dump smem memory (rc = %d).\n", ret);
+ return ret;
}
return 0;
diff --git a/arch/arm/mach-msm/pil-modem.c b/arch/arm/mach-msm/pil-modem.c
index d3c832b..3546705 100644
--- a/arch/arm/mach-msm/pil-modem.c
+++ b/arch/arm/mach-msm/pil-modem.c
@@ -393,21 +393,15 @@
return ret;
}
-/* FIXME: Get address, size from PIL */
-static struct ramdump_segment modem_segments[] = {
- { 0x42F00000, 0x46000000 - 0x42F00000 },
-};
-
static int modem_ramdump(int enable, const struct subsys_desc *subsys)
{
struct modem_data *drv;
drv = container_of(subsys, struct modem_data, subsys_desc);
- if (enable)
- return do_ramdump(drv->ramdump_dev, modem_segments,
- ARRAY_SIZE(modem_segments));
- else
+ if (!enable)
return 0;
+
+ return pil_do_ramdump(&drv->pil_desc, drv->ramdump_dev);
}
static int __devinit pil_modem_driver_probe(struct platform_device *pdev)
diff --git a/arch/arm/mach-msm/pil-pronto.c b/arch/arm/mach-msm/pil-pronto.c
index 162a7f7..b457599 100644
--- a/arch/arm/mach-msm/pil-pronto.c
+++ b/arch/arm/mach-msm/pil-pronto.c
@@ -385,19 +385,14 @@
smsm_change_state(SMSM_APPS_STATE, SMSM_RESET, SMSM_RESET);
}
-static struct ramdump_segment pronto_segments[] = {
- { 0x0D200000, 0x0D980000 - 0x0D200000 }
-};
-
static int wcnss_ramdump(int enable, const struct subsys_desc *subsys)
{
struct pronto_data *drv = subsys_to_drv(subsys);
- if (enable)
- return do_ramdump(drv->ramdump_dev, pronto_segments,
- ARRAY_SIZE(pronto_segments));
- else
+ if (!enable)
return 0;
+
+ return pil_do_ramdump(&drv->desc, drv->ramdump_dev);
}
static int __devinit pil_pronto_probe(struct platform_device *pdev)
diff --git a/arch/arm/mach-msm/pil-q6v3.c b/arch/arm/mach-msm/pil-q6v3.c
index d7e712c..1f53f17 100644
--- a/arch/arm/mach-msm/pil-q6v3.c
+++ b/arch/arm/mach-msm/pil-q6v3.c
@@ -279,22 +279,15 @@
return ret;
}
-/* FIXME: Get address, size from PIL */
-static struct ramdump_segment q6_segments[] = {
- { 0x46700000, 0x47f00000 - 0x46700000 },
- { 0x28400000, 0x12800 }
-};
-
static int lpass_q6_ramdump(int enable, const struct subsys_desc *subsys)
{
struct q6v3_data *drv;
drv = container_of(subsys, struct q6v3_data, subsys_desc);
- if (enable)
- return do_ramdump(drv->ramdump_dev, q6_segments,
- ARRAY_SIZE(q6_segments));
- else
+ if (!enable)
return 0;
+
+ return pil_do_ramdump(&drv->pil_desc, drv->ramdump_dev);
}
static void lpass_q6_crash_shutdown(const struct subsys_desc *subsys)
diff --git a/arch/arm/mach-msm/pil-q6v4-lpass.c b/arch/arm/mach-msm/pil-q6v4-lpass.c
index 1e6c1f6..1387433 100644
--- a/arch/arm/mach-msm/pil-q6v4-lpass.c
+++ b/arch/arm/mach-msm/pil-q6v4-lpass.c
@@ -231,18 +231,14 @@
return ret;
}
-static struct ramdump_segment segments[] = {
- {0x8da00000, 0x8f200000 - 0x8da00000},
- {0x28400000, 0x20000}
-};
-
static int lpass_ramdump(int enable, const struct subsys_desc *subsys)
{
struct lpass_q6v4 *drv = subsys_to_lpass(subsys);
if (!enable)
return 0;
- return do_ramdump(drv->ramdump_dev, segments, ARRAY_SIZE(segments));
+
+ return pil_do_ramdump(&drv->q6.desc, drv->ramdump_dev);
}
static void lpass_crash_shutdown(const struct subsys_desc *subsys)
diff --git a/arch/arm/mach-msm/pil-q6v4-mss.c b/arch/arm/mach-msm/pil-q6v4-mss.c
index ee01f04..f2b090f 100644
--- a/arch/arm/mach-msm/pil-q6v4-mss.c
+++ b/arch/arm/mach-msm/pil-q6v4-mss.c
@@ -243,14 +243,6 @@
smsm_reset_modem(SMSM_RESET);
}
-static struct ramdump_segment sw_segments[] = {
- {0x89000000, 0x8D400000 - 0x89000000},
-};
-
-static struct ramdump_segment fw_segments[] = {
- {0x8D400000, 0x8DA00000 - 0x8D400000},
-};
-
static struct ramdump_segment smem_segments[] = {
{0x80000000, 0x00200000},
};
@@ -263,17 +255,15 @@
if (!enable)
return 0;
- ret = do_ramdump(drv->sw_ramdump_dev, sw_segments,
- ARRAY_SIZE(sw_segments));
+ ret = pil_do_ramdump(&drv->q6_sw.desc, drv->sw_ramdump_dev);
if (ret < 0)
return ret;
- ret = do_ramdump(drv->fw_ramdump_dev, fw_segments,
- ARRAY_SIZE(fw_segments));
+ ret = pil_do_ramdump(&drv->q6_fw.desc, drv->fw_ramdump_dev);
if (ret < 0)
return ret;
- ret = do_ramdump(drv->smem_ramdump_dev, smem_segments,
+ ret = do_elf_ramdump(drv->smem_ramdump_dev, smem_segments,
ARRAY_SIZE(smem_segments));
if (ret < 0)
return ret;
diff --git a/arch/arm/mach-msm/pil-q6v5-lpass.c b/arch/arm/mach-msm/pil-q6v5-lpass.c
index 662377d..94632da 100644
--- a/arch/arm/mach-msm/pil-q6v5-lpass.c
+++ b/arch/arm/mach-msm/pil-q6v5-lpass.c
@@ -307,15 +307,14 @@
return ret;
}
-static struct ramdump_segment segments = { 0xdc00000, 0x1800000 };
-
static int adsp_ramdump(int enable, const struct subsys_desc *subsys)
{
struct lpass_data *drv = subsys_to_lpass(subsys);
if (!enable)
return 0;
- return do_ramdump(drv->ramdump_dev, &segments, 1);
+
+ return pil_do_ramdump(&drv->q6->desc, drv->ramdump_dev);
}
static void adsp_crash_shutdown(const struct subsys_desc *subsys)
diff --git a/arch/arm/mach-msm/pil-q6v5-mss.c b/arch/arm/mach-msm/pil-q6v5-mss.c
index b8309e5..ed85c95 100644
--- a/arch/arm/mach-msm/pil-q6v5-mss.c
+++ b/arch/arm/mach-msm/pil-q6v5-mss.c
@@ -496,10 +496,6 @@
smsm_reset_modem(SMSM_RESET);
}
-static struct ramdump_segment modem_segments[] = {
- {0x08400000, 0x0D100000 - 0x08400000},
-};
-
static struct ramdump_segment smem_segments[] = {
{0x0FA00000, 0x0FC00000 - 0x0FA00000},
};
@@ -516,14 +512,13 @@
if (ret)
return ret;
- ret = do_ramdump(drv->ramdump_dev, modem_segments,
- ARRAY_SIZE(modem_segments));
+ ret = pil_do_ramdump(&drv->q6->desc, drv->ramdump_dev);
if (ret < 0) {
pr_err("Unable to dump modem fw memory (rc = %d).\n", ret);
goto out;
}
- ret = do_ramdump(drv->smem_ramdump_dev, smem_segments,
+ ret = do_elf_ramdump(drv->smem_ramdump_dev, smem_segments,
ARRAY_SIZE(smem_segments));
if (ret < 0) {
pr_err("Unable to dump smem memory (rc = %d).\n", ret);
diff --git a/arch/arm/mach-msm/pil-riva.c b/arch/arm/mach-msm/pil-riva.c
index 74fae98..96b9882 100644
--- a/arch/arm/mach-msm/pil-riva.c
+++ b/arch/arm/mach-msm/pil-riva.c
@@ -412,26 +412,16 @@
return ret;
}
-/*
- * 7MB RAM segments for Riva SS;
- * Riva 1.1 0x8f000000 - 0x8f700000
- * Riva 1.0 0x8f200000 - 0x8f700000
- */
-static struct ramdump_segment riva_segments[] = {
- {0x8f000000, 0x8f700000 - 0x8f000000}
-};
-
static int riva_ramdump(int enable, const struct subsys_desc *desc)
{
struct riva_data *drv;
drv = container_of(desc, struct riva_data, subsys_desc);
- if (enable)
- return do_ramdump(drv->ramdump_dev, riva_segments,
- ARRAY_SIZE(riva_segments));
- else
+ if (!enable)
return 0;
+
+ return pil_do_ramdump(&drv->pil_desc, drv->ramdump_dev);
}
/* Riva crash handler */
diff --git a/arch/arm/mach-msm/pm-boot.c b/arch/arm/mach-msm/pm-boot.c
index f32e149..53cc0f5 100644
--- a/arch/arm/mach-msm/pm-boot.c
+++ b/arch/arm/mach-msm/pm-boot.c
@@ -137,7 +137,7 @@
= msm_pm_config_rst_vector_after_pc;
break;
case MSM_PM_BOOT_CONFIG_REMAP_BOOT_ADDR:
- if (!cpu_is_msm8625()) {
+ if (!cpu_is_msm8625() && !cpu_is_msm8625q()) {
void *remapped;
/*
@@ -200,7 +200,7 @@
pdata->v_addr + mpa5_cfg_ctl[0]);
/* 8x25Q changes */
- if (SOCINFO_VERSION_MAJOR(socinfo_get_version()) >= 3) {
+ if (cpu_is_msm8625q()) {
/* write 'entry' to boot remapper register */
__raw_writel(entry, (pdata->v_addr +
mpa5_boot_remap_addr[1]));
diff --git a/arch/arm/mach-msm/pm2.c b/arch/arm/mach-msm/pm2.c
index 96c1218..ec9f030 100644
--- a/arch/arm/mach-msm/pm2.c
+++ b/arch/arm/mach-msm/pm2.c
@@ -562,7 +562,7 @@
__raw_writel(0, APPS_PWRDOWN);
mb();
msm_spm_reinit();
- } else if (cpu_is_msm8625()) {
+ } else if (cpu_is_msm8625() || cpu_is_msm8625q()) {
__raw_writel(0, APPS_PWRDOWN);
mb();
@@ -881,7 +881,7 @@
memset(msm_pm_smem_data, 0, sizeof(*msm_pm_smem_data));
- if (cpu_is_msm8625()) {
+ if (cpu_is_msm8625() || cpu_is_msm8625q()) {
/* Program the SPM */
ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_POWER_COLLAPSE,
false);
@@ -971,7 +971,7 @@
#endif
#ifdef CONFIG_CACHE_L2X0
- if (!cpu_is_msm8625())
+ if (!cpu_is_msm8625() && !cpu_is_msm8625q())
l2cc_suspend();
else
apps_power_collapse = 1;
@@ -983,7 +983,7 @@
* TBD: Currently recognise the MODEM early exit
* path by reading the MPA5_GDFS_CNT_VAL register.
*/
- if (cpu_is_msm8625()) {
+ if (cpu_is_msm8625() || cpu_is_msm8625q()) {
int cpu;
/*
* on system reset, default value of MPA5_GDFS_CNT_VAL
@@ -997,7 +997,7 @@
val = __raw_readl(MSM_CFG_CTL_BASE + 0x38);
/* 8x25Q */
- if (SOCINFO_VERSION_MAJOR(socinfo_get_version()) >= 3) {
+ if (cpu_is_msm8625q()) {
if (val != 0x000F0002) {
for_each_possible_cpu(cpu) {
if (!cpu)
@@ -1031,7 +1031,7 @@
}
#ifdef CONFIG_CACHE_L2X0
- if (!cpu_is_msm8625())
+ if (!cpu_is_msm8625() && !cpu_is_msm8625q())
l2cc_resume();
else
apps_power_collapse = 0;
@@ -1153,7 +1153,7 @@
smd_sleep_exit();
- if (cpu_is_msm8625()) {
+ if (cpu_is_msm8625() || cpu_is_msm8625q()) {
ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_CLOCK_GATING,
false);
WARN_ON(ret);
@@ -1220,7 +1220,7 @@
msm_cpr_ops->cpr_resume();
power_collapse_bail:
- if (cpu_is_msm8625()) {
+ if (cpu_is_msm8625() || cpu_is_msm8625q()) {
ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_CLOCK_GATING,
false);
WARN_ON(ret);
@@ -1258,14 +1258,14 @@
#endif
#ifdef CONFIG_CACHE_L2X0
- if (!cpu_is_msm8625())
+ if (!cpu_is_msm8625() && !cpu_is_msm8625q())
l2cc_suspend();
#endif
collapsed = msm_pm_collapse();
#ifdef CONFIG_CACHE_L2X0
- if (!cpu_is_msm8625())
+ if (!cpu_is_msm8625() && !cpu_is_msm8625q())
l2cc_resume();
#endif
@@ -1310,7 +1310,7 @@
return -EIO;
}
- if (!cpu_is_msm8625())
+ if (!cpu_is_msm8625() && !cpu_is_msm8625q())
msm_pm_config_hw_before_swfi();
msm_arch_idle();
@@ -1713,7 +1713,7 @@
return ret;
}
- if (cpu_is_msm8625()) {
+ if (cpu_is_msm8625() || cpu_is_msm8625q()) {
target_type = TARGET_IS_8625;
clean_caches((unsigned long)&target_type, sizeof(target_type),
virt_to_phys(&target_type));
@@ -1725,7 +1725,7 @@
* MPA5_GDFS_CNT_VAL[9:0] = Delay counter for
* GDFS control.
*/
- if (SOCINFO_VERSION_MAJOR(socinfo_get_version()) >= 3)
+ if (cpu_is_msm8625q())
val = 0x000F0002;
else
val = 0x00030002;
diff --git a/arch/arm/mach-msm/qdsp6v2/pcm_in_proxy.c b/arch/arm/mach-msm/qdsp6v2/pcm_in_proxy.c
index 84f136a..5faee21 100644
--- a/arch/arm/mach-msm/qdsp6v2/pcm_in_proxy.c
+++ b/arch/arm/mach-msm/qdsp6v2/pcm_in_proxy.c
@@ -72,31 +72,36 @@
if (pcm->start) {
if (pcm->dsp_idx == pcm->buffer_count)
pcm->dsp_idx = 0;
- rc = wait_event_timeout(pcm->wait,
- (pcm->dma_buf[pcm->dsp_idx].used == 0) ||
- atomic_read(&pcm->in_stopped), 1 * HZ);
- if (!rc) {
- pr_err("%s: wait_event_timeout failed\n", __func__);
- goto fail;
+ if (pcm->dma_buf[pcm->dsp_idx].used == 0) {
+ if (atomic_read(&pcm->in_stopped)) {
+ pr_err("%s: Driver closed - return\n",
+ __func__);
+ return HRTIMER_NORESTART;
+ }
+ rc = afe_rt_proxy_port_read(
+ pcm->dma_buf[pcm->dsp_idx].addr,
+ pcm->buffer_size);
+ if (rc < 0) {
+ pr_err("%s afe_rt_proxy_port_read fail\n",
+ __func__);
+ goto fail;
+ }
+ pcm->dma_buf[pcm->dsp_idx].used = 1;
+ pcm->dsp_idx++;
+ pr_debug("sending frame rec to DSP: poll_time: %d\n",
+ pcm->poll_time);
+ } else {
+ pr_err("Qcom: Used flag not reset retry after %d msec\n",
+ (pcm->poll_time/10));
+ goto fail_timer;
}
- if (atomic_read(&pcm->in_stopped)) {
- pr_err("%s: Driver closed - return\n", __func__);
- return HRTIMER_NORESTART;
- }
- rc = afe_rt_proxy_port_read(
- pcm->dma_buf[pcm->dsp_idx].addr,
- pcm->buffer_size);
- if (rc < 0) {
- pr_err("%s afe_rt_proxy_port_read fail\n", __func__);
- goto fail;
- }
- pcm->dma_buf[pcm->dsp_idx].used = 1;
- pcm->dsp_idx++;
- pr_debug("%s: sending frame rec to DSP: poll_time: %d\n",
- __func__, pcm->poll_time);
fail:
hrtimer_forward_now(hrt, ns_to_ktime(pcm->poll_time
* 1000));
+ return HRTIMER_RESTART;
+fail_timer:
+ hrtimer_forward_now(hrt, ns_to_ktime((pcm->poll_time/10)
+ * 1000));
return HRTIMER_RESTART;
} else {
diff --git a/arch/arm/mach-msm/ramdump.c b/arch/arm/mach-msm/ramdump.c
index e33ec48..689c4bb 100644
--- a/arch/arm/mach-msm/ramdump.c
+++ b/arch/arm/mach-msm/ramdump.c
@@ -26,6 +26,7 @@
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/uaccess.h>
+#include <linux/elf.h>
#include <asm-generic/poll.h>
@@ -46,6 +47,8 @@
wait_queue_head_t dump_wait_q;
int nsegments;
struct ramdump_segment *segments;
+ size_t elfcore_size;
+ char *elfcore_buf;
};
static int ramdump_open(struct inode *inode, struct file *filep)
@@ -107,13 +110,29 @@
size_t copy_size = 0;
int ret = 0;
- if (rd_dev->data_ready == 0) {
- pr_err("Ramdump(%s): Read when there's no dump available!",
- rd_dev->name);
- return -EPIPE;
+ if ((filep->f_flags & O_NONBLOCK) && !rd_dev->data_ready)
+ return -EAGAIN;
+
+ ret = wait_event_interruptible(rd_dev->dump_wait_q, rd_dev->data_ready);
+ if (ret)
+ return ret;
+
+ if (*pos < rd_dev->elfcore_size) {
+ copy_size = min(rd_dev->elfcore_size, count);
+
+ if (copy_to_user(buf, rd_dev->elfcore_buf, copy_size)) {
+ ret = -EFAULT;
+ goto ramdump_done;
+ }
+ *pos += copy_size;
+ count -= copy_size;
+ buf += copy_size;
+ if (count == 0)
+ return copy_size;
}
- addr = offset_translate(*pos, rd_dev, &data_left);
+ addr = offset_translate(*pos - rd_dev->elfcore_size, rd_dev,
+ &data_left);
/* EOF check */
if (data_left == 0) {
@@ -234,11 +253,14 @@
kfree(rd_dev);
}
-int do_ramdump(void *handle, struct ramdump_segment *segments,
- int nsegments)
+static int _do_ramdump(void *handle, struct ramdump_segment *segments,
+ int nsegments, bool use_elf)
{
int ret, i;
struct ramdump_device *rd_dev = (struct ramdump_device *)handle;
+ Elf32_Phdr *phdr;
+ Elf32_Ehdr *ehdr;
+ unsigned long offset;
if (!rd_dev->consumer_present) {
pr_err("Ramdump(%s): No consumers. Aborting..\n", rd_dev->name);
@@ -251,6 +273,38 @@
rd_dev->segments = segments;
rd_dev->nsegments = nsegments;
+ if (use_elf) {
+ rd_dev->elfcore_size = sizeof(*ehdr) +
+ sizeof(*phdr) * nsegments;
+ ehdr = kzalloc(rd_dev->elfcore_size, GFP_KERNEL);
+ rd_dev->elfcore_buf = (char *)ehdr;
+ if (!rd_dev->elfcore_buf)
+ return -ENOMEM;
+
+ memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
+ ehdr->e_ident[EI_CLASS] = ELFCLASS32;
+ ehdr->e_ident[EI_DATA] = ELFDATA2LSB;
+ ehdr->e_ident[EI_VERSION] = EV_CURRENT;
+ ehdr->e_ident[EI_OSABI] = ELFOSABI_NONE;
+ ehdr->e_type = ET_CORE;
+ ehdr->e_version = EV_CURRENT;
+ ehdr->e_phoff = sizeof(*ehdr);
+ ehdr->e_ehsize = sizeof(*ehdr);
+ ehdr->e_phentsize = sizeof(*phdr);
+ ehdr->e_phnum = nsegments;
+
+ offset = rd_dev->elfcore_size;
+ phdr = (Elf32_Phdr *)(ehdr + 1);
+ for (i = 0; i < nsegments; i++, phdr++) {
+ phdr->p_type = PT_LOAD;
+ phdr->p_offset = offset;
+ phdr->p_vaddr = phdr->p_paddr = segments[i].address;
+ phdr->p_filesz = phdr->p_memsz = segments[i].size;
+ phdr->p_flags = PF_R | PF_W | PF_X;
+ offset += phdr->p_filesz;
+ }
+ }
+
rd_dev->data_ready = 1;
rd_dev->ramdump_status = -1;
@@ -271,5 +325,20 @@
ret = (rd_dev->ramdump_status == 0) ? 0 : -EPIPE;
rd_dev->data_ready = 0;
+ rd_dev->elfcore_size = 0;
+ kfree(rd_dev->elfcore_buf);
+ rd_dev->elfcore_buf = NULL;
return ret;
+
+}
+
+int do_ramdump(void *handle, struct ramdump_segment *segments, int nsegments)
+{
+ return _do_ramdump(handle, segments, nsegments, false);
+}
+
+int
+do_elf_ramdump(void *handle, struct ramdump_segment *segments, int nsegments)
+{
+ return _do_ramdump(handle, segments, nsegments, true);
}
diff --git a/arch/arm/mach-msm/ramdump.h b/arch/arm/mach-msm/ramdump.h
index 3e5bfaf..5fb41ec 100644
--- a/arch/arm/mach-msm/ramdump.h
+++ b/arch/arm/mach-msm/ramdump.h
@@ -24,5 +24,7 @@
void destroy_ramdump_device(void *dev);
int do_ramdump(void *handle, struct ramdump_segment *segments,
int nsegments);
+int do_elf_ramdump(void *handle, struct ramdump_segment *segments,
+ int nsegments);
#endif
diff --git a/arch/arm/mach-msm/smcmod.c b/arch/arm/mach-msm/smcmod.c
new file mode 100644
index 0000000..705bab5
--- /dev/null
+++ b/arch/arm/mach-msm/smcmod.c
@@ -0,0 +1,727 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define KMSG_COMPONENT "SMCMOD"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/miscdevice.h>
+#include <linux/mutex.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/printk.h>
+#include <linux/msm_ion.h>
+#include <asm/smcmod.h>
+#include <mach/scm.h>
+
+static DEFINE_MUTEX(ioctl_lock);
+
+#define SMCMOD_SVC_DEFAULT (0)
+#define SMCMOD_SVC_CRYPTO (1)
+#define SMCMOD_CRYPTO_CMD_CIPHER (1)
+#define SMCMOD_CRYPTO_CMD_MSG_DIGEST_FIXED (2)
+#define SMCMOD_CRYPTO_CMD_MSG_DIGEST (3)
+
+/**
+ * struct smcmod_cipher_scm_req - structure for sending the cipher cmd to
+ * scm_call.
+ *
+ * @algorithm - specifies cipher algorithm
+ * @operation - specifies encryption or decryption.
+ * @mode - specifies cipher mode.
+ * @key_phys_addr - physical address for key buffer.
+ * @key_size - key size in bytes.
+ * @plain_text_phys_addr - physical address for plain text buffer.
+ * @plain_text_size - size of plain text in bytes.
+ * @cipher_text_phys_addr - physical address for cipher text buffer.
+ * @cipher_text_size - cipher text size in bytes.
+ * @init_vector_phys_addr - physical address for init vector buffer.
+ * @init_vector_size - size of initialization vector in bytes.
+ */
+struct smcmod_cipher_scm_req {
+ uint32_t algorithm;
+ uint32_t operation;
+ uint32_t mode;
+ uint32_t key_phys_addr;
+ uint32_t key_size;
+ uint32_t plain_text_phys_addr;
+ uint32_t plain_text_size;
+ uint32_t cipher_text_phys_addr;
+ uint32_t cipher_text_size;
+ uint32_t init_vector_phys_addr;
+ uint32_t init_vector_size;
+};
+
+/**
+ * struct smcmod_msg_digest_scm_req - structure for sending message digest
+ * to scm_call.
+ *
+ * @algorithm - specifies the cipher algorithm.
+ * @key_phys_addr - physical address of key buffer.
+ * @key_size - hash key size in bytes.
+ * @input_phys_addr - physical address of input buffer.
+ * @input_size - input data size in bytes.
+ * @output_phys_addr - physical address of output buffer.
+ * @output_size - size of output buffer in bytes.
+ * @verify - indicates whether to verify the hash value.
+ */
+struct smcmod_msg_digest_scm_req {
+ uint32_t algorithm;
+ uint32_t key_phys_addr;
+ uint32_t key_size;
+ uint32_t input_phys_addr;
+ uint32_t input_size;
+ uint32_t output_phys_addr;
+ uint32_t output_size;
+ uint8_t verify;
+} __packed;
+
+static void smcmod_inv_range(unsigned long start, unsigned long end)
+{
+ uint32_t cacheline_size;
+ uint32_t ctr;
+
+ /* get cache line size */
+ asm volatile("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr));
+ cacheline_size = 4 << ((ctr >> 16) & 0xf);
+
+ /* invalidate the range */
+ start = round_down(start, cacheline_size);
+ end = round_up(end, cacheline_size);
+ while (start < end) {
+ asm ("mcr p15, 0, %0, c7, c6, 1" : : "r" (start)
+ : "memory");
+ start += cacheline_size;
+ }
+ mb();
+ isb();
+}
+
+static int smcmod_ion_fd_to_phys(int32_t fd, struct ion_client *ion_clientp,
+ struct ion_handle **ion_handlep, uint32_t *phys_addrp, size_t *sizep)
+{
+ int ret = 0;
+
+ /* sanity check args */
+ if ((fd < 0) || IS_ERR_OR_NULL(ion_clientp) ||
+ IS_ERR_OR_NULL(ion_handlep) || IS_ERR_OR_NULL(phys_addrp) ||
+ IS_ERR_OR_NULL(sizep))
+ return -EINVAL;
+
+ /* import the buffer fd */
+ *ion_handlep = ion_import_dma_buf(ion_clientp, fd);
+
+ /* sanity check the handle */
+ if (IS_ERR_OR_NULL(*ion_handlep))
+ return -EINVAL;
+
+ /* get the physical address */
+ ret = ion_phys(ion_clientp, *ion_handlep, (ion_phys_addr_t *)phys_addrp,
+ sizep);
+
+ return ret;
+}
+
+static int smcmod_send_buf_cmd(struct smcmod_buf_req *reqp)
+{
+ int ret = 0;
+ struct ion_client *ion_clientp = NULL;
+ struct ion_handle *ion_cmd_handlep = NULL;
+ struct ion_handle *ion_resp_handlep = NULL;
+ void *cmd_vaddrp = NULL;
+ void *resp_vaddrp = NULL;
+ unsigned long cmd_buf_size = 0;
+ unsigned long resp_buf_size = 0;
+
+ /* sanity check the argument */
+ if (IS_ERR_OR_NULL(reqp))
+ return -EINVAL;
+
+ /* sanity check the fds */
+ if (reqp->ion_cmd_fd < 0)
+ return -EINVAL;
+
+ /* create an ion client */
+ ion_clientp = msm_ion_client_create(UINT_MAX, "smcmod");
+
+ /* check for errors */
+ if (IS_ERR_OR_NULL(ion_clientp))
+ return -EINVAL;
+
+ /* import the command buffer fd */
+ ion_cmd_handlep = ion_import_dma_buf(ion_clientp, reqp->ion_cmd_fd);
+
+ /* sanity check the handle */
+ if (IS_ERR_OR_NULL(ion_cmd_handlep)) {
+ ret = -EINVAL;
+ goto buf_cleanup;
+ }
+
+ /* retrieve the size of the buffer */
+ if (ion_handle_get_size(ion_clientp, ion_cmd_handlep,
+ &cmd_buf_size) < 0) {
+ ret = -EINVAL;
+ goto buf_cleanup;
+ }
+
+ /* ensure that the command buffer size is not
+ * greater than the size of the buffer.
+ */
+ if (reqp->cmd_len > cmd_buf_size) {
+ ret = -EINVAL;
+ goto buf_cleanup;
+ }
+
+ /* map the area to get a virtual address */
+ cmd_vaddrp = ion_map_kernel(ion_clientp, ion_cmd_handlep);
+
+ /* sanity check the address */
+ if (IS_ERR_OR_NULL(cmd_vaddrp)) {
+ ret = -EINVAL;
+ goto buf_cleanup;
+ }
+
+ /* check if there is a response buffer */
+ if (reqp->ion_resp_fd >= 0) {
+ /* import the handle */
+ ion_resp_handlep =
+ ion_import_dma_buf(ion_clientp, reqp->ion_resp_fd);
+
+ /* sanity check the handle */
+ if (IS_ERR_OR_NULL(ion_resp_handlep)) {
+ ret = -EINVAL;
+ goto buf_cleanup;
+ }
+
+ /* retrieve the size of the buffer */
+ if (ion_handle_get_size(ion_clientp, ion_resp_handlep,
+ &resp_buf_size) < 0) {
+ ret = -EINVAL;
+ goto buf_cleanup;
+ }
+
+ /* ensure that the command buffer size is not
+ * greater than the size of the buffer.
+ */
+ if (reqp->resp_len > resp_buf_size) {
+ ret = -EINVAL;
+ goto buf_cleanup;
+ }
+
+ /* map the area to get a virtual address */
+ resp_vaddrp = ion_map_kernel(ion_clientp, ion_resp_handlep);
+
+ /* sanity check the address */
+ if (IS_ERR_OR_NULL(resp_vaddrp)) {
+ ret = -EINVAL;
+ goto buf_cleanup;
+ }
+ }
+
+ /* call scm function to switch to secure world */
+ reqp->return_val = scm_call(reqp->service_id, reqp->command_id,
+ cmd_vaddrp, reqp->cmd_len, resp_vaddrp, reqp->resp_len);
+
+buf_cleanup:
+ /* if the client and handle(s) are valid, free them */
+ if (!IS_ERR_OR_NULL(ion_clientp)) {
+ if (!IS_ERR_OR_NULL(ion_cmd_handlep)) {
+ if (!IS_ERR_OR_NULL(cmd_vaddrp))
+ ion_unmap_kernel(ion_clientp, ion_cmd_handlep);
+ ion_free(ion_clientp, ion_cmd_handlep);
+ }
+
+ if (!IS_ERR_OR_NULL(ion_resp_handlep)) {
+ if (!IS_ERR_OR_NULL(resp_vaddrp))
+ ion_unmap_kernel(ion_clientp, ion_resp_handlep);
+ ion_free(ion_clientp, ion_resp_handlep);
+ }
+
+ ion_client_destroy(ion_clientp);
+ }
+
+ return ret;
+}
+
+static int smcmod_send_cipher_cmd(struct smcmod_cipher_req *reqp)
+{
+ int ret = 0;
+ struct smcmod_cipher_scm_req scm_req;
+ struct ion_client *ion_clientp = NULL;
+ struct ion_handle *ion_key_handlep = NULL;
+ struct ion_handle *ion_plain_handlep = NULL;
+ struct ion_handle *ion_cipher_handlep = NULL;
+ struct ion_handle *ion_iv_handlep = NULL;
+ size_t size = 0;
+
+ if (IS_ERR_OR_NULL(reqp))
+ return -EINVAL;
+
+ /* sanity check the fds */
+ if ((reqp->ion_plain_text_fd < 0) ||
+ (reqp->ion_cipher_text_fd < 0) ||
+ (reqp->ion_init_vector_fd < 0))
+ return -EINVAL;
+
+ /* create an ion client */
+ ion_clientp = msm_ion_client_create(UINT_MAX, "smcmod");
+
+ /* check for errors */
+ if (IS_ERR_OR_NULL(ion_clientp))
+ return -EINVAL;
+
+ /* fill in the scm request structure */
+ scm_req.algorithm = reqp->algorithm;
+ scm_req.operation = reqp->operation;
+ scm_req.mode = reqp->mode;
+ scm_req.key_phys_addr = 0;
+ scm_req.key_size = reqp->key_size;
+ scm_req.plain_text_size = reqp->plain_text_size;
+ scm_req.cipher_text_size = reqp->cipher_text_size;
+ scm_req.init_vector_size = reqp->init_vector_size;
+
+ if (!reqp->key_is_null) {
+ /* import the key buffer and get the physical address */
+ ret = smcmod_ion_fd_to_phys(reqp->ion_key_fd, ion_clientp,
+ &ion_key_handlep, &scm_req.key_phys_addr, &size);
+ if (ret < 0)
+ goto buf_cleanup;
+
+ /* ensure that the key size is not
+ * greater than the size of the buffer.
+ */
+ if (reqp->key_size > size) {
+ ret = -EINVAL;
+ goto buf_cleanup;
+ }
+ }
+
+ /* import the plain text buffer and get the physical address */
+ ret = smcmod_ion_fd_to_phys(reqp->ion_plain_text_fd, ion_clientp,
+ &ion_plain_handlep, &scm_req.plain_text_phys_addr, &size);
+
+ if (ret < 0)
+ goto buf_cleanup;
+
+ /* ensure that the plain text size is not
+ * greater than the size of the buffer.
+ */
+ if (reqp->plain_text_size > size) {
+ ret = -EINVAL;
+ goto buf_cleanup;
+ }
+
+ /* import the cipher text buffer and get the physical address */
+ ret = smcmod_ion_fd_to_phys(reqp->ion_cipher_text_fd, ion_clientp,
+ &ion_cipher_handlep, &scm_req.cipher_text_phys_addr, &size);
+ if (ret < 0)
+ goto buf_cleanup;
+
+ /* ensure that the cipher text size is not
+ * greater than the size of the buffer.
+ */
+ if (reqp->cipher_text_size > size) {
+ ret = -EINVAL;
+ goto buf_cleanup;
+ }
+
+ /* import the init vector buffer and get the physical address */
+ ret = smcmod_ion_fd_to_phys(reqp->ion_init_vector_fd, ion_clientp,
+ &ion_iv_handlep, &scm_req.init_vector_phys_addr, &size);
+ if (ret < 0)
+ goto buf_cleanup;
+
+ /* ensure that the init vector size is not
+ * greater than the size of the buffer.
+ */
+ if (reqp->init_vector_size > size) {
+ ret = -EINVAL;
+ goto buf_cleanup;
+ }
+
+ /* call scm function to switch to secure world */
+ reqp->return_val = scm_call(SMCMOD_SVC_CRYPTO,
+ SMCMOD_CRYPTO_CMD_CIPHER, &scm_req,
+ sizeof(scm_req), NULL, 0);
+
+ /* for decrypt, plain text is the output, otherwise it's cipher text */
+ if (reqp->operation) {
+ void *vaddrp = NULL;
+
+ /* map the plain text region to get the virtual address */
+ vaddrp = ion_map_kernel(ion_clientp, ion_plain_handlep);
+ if (IS_ERR_OR_NULL(vaddrp)) {
+ ret = -EINVAL;
+ goto buf_cleanup;
+ }
+
+ /* invalidate the range */
+ smcmod_inv_range((unsigned long)vaddrp,
+ (unsigned long)(vaddrp + scm_req.plain_text_size));
+
+ /* unmap the mapped area */
+ ion_unmap_kernel(ion_clientp, ion_plain_handlep);
+ } else {
+ void *vaddrp = NULL;
+
+ /* map the cipher text region to get the virtual address */
+ vaddrp = ion_map_kernel(ion_clientp, ion_cipher_handlep);
+ if (IS_ERR_OR_NULL(vaddrp)) {
+ ret = -EINVAL;
+ goto buf_cleanup;
+ }
+
+ /* invalidate the range */
+ smcmod_inv_range((unsigned long)vaddrp,
+ (unsigned long)(vaddrp + scm_req.cipher_text_size));
+
+ /* unmap the mapped area */
+ ion_unmap_kernel(ion_clientp, ion_cipher_handlep);
+ }
+
+buf_cleanup:
+ /* if the client and handles are valid, free them */
+ if (!IS_ERR_OR_NULL(ion_clientp)) {
+ if (!IS_ERR_OR_NULL(ion_key_handlep))
+ ion_free(ion_clientp, ion_key_handlep);
+
+ if (!IS_ERR_OR_NULL(ion_plain_handlep))
+ ion_free(ion_clientp, ion_plain_handlep);
+
+ if (!IS_ERR_OR_NULL(ion_cipher_handlep))
+ ion_free(ion_clientp, ion_cipher_handlep);
+
+ if (!IS_ERR_OR_NULL(ion_iv_handlep))
+ ion_free(ion_clientp, ion_iv_handlep);
+
+ ion_client_destroy(ion_clientp);
+ }
+
+ return ret;
+}
+static int smcmod_send_msg_digest_cmd(struct smcmod_msg_digest_req *reqp)
+{
+ int ret = 0;
+ struct smcmod_msg_digest_scm_req scm_req;
+ struct ion_client *ion_clientp = NULL;
+ struct ion_handle *ion_key_handlep = NULL;
+ struct ion_handle *ion_input_handlep = NULL;
+ struct ion_handle *ion_output_handlep = NULL;
+ size_t size = 0;
+ void *vaddrp = NULL;
+
+ if (IS_ERR_OR_NULL(reqp))
+ return -EINVAL;
+
+ /* sanity check the fds */
+ if ((reqp->ion_input_fd < 0) || (reqp->ion_output_fd < 0))
+ return -EINVAL;
+
+ /* create an ion client */
+ ion_clientp = msm_ion_client_create(UINT_MAX, "smcmod");
+
+ /* check for errors */
+ if (IS_ERR_OR_NULL(ion_clientp))
+ return -EINVAL;
+
+ /* fill in the scm request structure */
+ scm_req.algorithm = reqp->algorithm;
+ scm_req.key_phys_addr = 0;
+ scm_req.key_size = reqp->key_size;
+ scm_req.input_size = reqp->input_size;
+ scm_req.output_size = reqp->output_size;
+ scm_req.verify = 0;
+
+ if (!reqp->key_is_null) {
+ /* import the key buffer and get the physical address */
+ ret = smcmod_ion_fd_to_phys(reqp->ion_key_fd, ion_clientp,
+ &ion_key_handlep, &scm_req.key_phys_addr, &size);
+ if (ret < 0)
+ goto buf_cleanup;
+
+ /* ensure that the key size is not
+ * greater than the size of the buffer.
+ */
+ if (reqp->key_size > size) {
+ ret = -EINVAL;
+ goto buf_cleanup;
+ }
+ }
+
+ /* import the input buffer and get the physical address */
+ ret = smcmod_ion_fd_to_phys(reqp->ion_input_fd, ion_clientp,
+ &ion_input_handlep, &scm_req.input_phys_addr, &size);
+ if (ret < 0)
+ goto buf_cleanup;
+
+ /* ensure that the input size is not
+ * greater than the size of the buffer.
+ */
+ if (reqp->input_size > size) {
+ ret = -EINVAL;
+ goto buf_cleanup;
+ }
+
+ /* import the output buffer and get the physical address */
+ ret = smcmod_ion_fd_to_phys(reqp->ion_output_fd, ion_clientp,
+ &ion_output_handlep, &scm_req.output_phys_addr, &size);
+ if (ret < 0)
+ goto buf_cleanup;
+
+ /* ensure that the output size is not
+ * greater than the size of the buffer.
+ */
+ if (reqp->output_size > size) {
+ ret = -EINVAL;
+ goto buf_cleanup;
+ }
+
+ /* call scm function to switch to secure world */
+ if (reqp->fixed_block)
+ reqp->return_val = scm_call(SMCMOD_SVC_CRYPTO,
+ SMCMOD_CRYPTO_CMD_MSG_DIGEST_FIXED,
+ &scm_req,
+ sizeof(scm_req),
+ NULL, 0);
+ else
+ reqp->return_val = scm_call(SMCMOD_SVC_CRYPTO,
+ SMCMOD_CRYPTO_CMD_MSG_DIGEST,
+ &scm_req,
+ sizeof(scm_req),
+ NULL, 0);
+
+
+ /* map the output region to get the virtual address */
+ vaddrp = ion_map_kernel(ion_clientp, ion_output_handlep);
+ if (IS_ERR_OR_NULL(vaddrp)) {
+ ret = -EINVAL;
+ goto buf_cleanup;
+ }
+
+ /* invalidate the range */
+ smcmod_inv_range((unsigned long)vaddrp,
+ (unsigned long)(vaddrp + scm_req.output_size));
+
+ /* unmap the mapped area */
+ ion_unmap_kernel(ion_clientp, ion_output_handlep);
+
+buf_cleanup:
+ /* if the client and handles are valid, free them */
+ if (!IS_ERR_OR_NULL(ion_clientp)) {
+ if (!IS_ERR_OR_NULL(ion_key_handlep))
+ ion_free(ion_clientp, ion_key_handlep);
+
+ if (!IS_ERR_OR_NULL(ion_input_handlep))
+ ion_free(ion_clientp, ion_input_handlep);
+
+ if (!IS_ERR_OR_NULL(ion_output_handlep))
+ ion_free(ion_clientp, ion_output_handlep);
+
+ ion_client_destroy(ion_clientp);
+ }
+
+ return ret;
+}
+
+static long smcmod_ioctl(struct file *file, unsigned cmd, unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+ int ret = 0;
+
+ /* sanity check */
+ if (!argp)
+ return -EINVAL;
+
+ /*
+ * The SMC instruction should only be initiated by one process
+ * at a time, hence the critical section here. Note that this
+ * does not prevent user space from modifying the
+ * allocated buffer contents. Extra steps are needed to
+ * prevent that from happening.
+ */
+ mutex_lock(&ioctl_lock);
+
+ switch (cmd) {
+ case SMCMOD_IOCTL_SEND_REG_CMD:
+ {
+ struct smcmod_reg_req req;
+
+ /* copy struct from user */
+ if (copy_from_user((void *)&req, argp, sizeof(req))) {
+ ret = -EFAULT;
+ goto cleanup;
+ }
+
+ /* call the correct scm function to switch to secure
+ * world
+ */
+ if (req.num_args == 1) {
+ req.return_val =
+ scm_call_atomic1(req.service_id,
+ req.command_id, req.args[0]);
+ } else if (req.num_args == 2) {
+ req.return_val =
+ scm_call_atomic2(req.service_id,
+ req.command_id, req.args[0],
+ req.args[1]);
+ } else {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ /* copy result back to user */
+ if (copy_to_user(argp, (void *)&req, sizeof(req))) {
+ ret = -EFAULT;
+ goto cleanup;
+ }
+ }
+ break;
+
+ /* This is an example of how to pass buffers to/from the secure
+ * side using the ion driver.
+ */
+ case SMCMOD_IOCTL_SEND_BUF_CMD:
+ {
+ struct smcmod_buf_req req;
+
+ /* copy struct from user */
+ if (copy_from_user((void *)&req, argp, sizeof(req))) {
+ ret = -EFAULT;
+ goto cleanup;
+ }
+
+ /* send the command */
+ ret = smcmod_send_buf_cmd(&req);
+ if (ret < 0)
+ goto cleanup;
+
+ /* copy result back to user */
+ if (copy_to_user(argp, (void *)&req, sizeof(req))) {
+ ret = -EFAULT;
+ goto cleanup;
+ }
+ }
+ break;
+
+ case SMCMOD_IOCTL_SEND_CIPHER_CMD:
+ {
+ struct smcmod_cipher_req req;
+
+ /* copy struct from user */
+ if (copy_from_user((void *)&req, argp, sizeof(req))) {
+ ret = -EFAULT;
+ goto cleanup;
+ }
+
+ ret = smcmod_send_cipher_cmd(&req);
+ if (ret < 0)
+ goto cleanup;
+
+ /* copy result back to user */
+ if (copy_to_user(argp, (void *)&req, sizeof(req))) {
+ ret = -EFAULT;
+ goto cleanup;
+ }
+ }
+ break;
+
+ case SMCMOD_IOCTL_SEND_MSG_DIGEST_CMD:
+ {
+ struct smcmod_msg_digest_req req;
+
+ /* copy struct from user */
+ if (copy_from_user((void *)&req, argp, sizeof(req))) {
+ ret = -EFAULT;
+ goto cleanup;
+ }
+
+ ret = smcmod_send_msg_digest_cmd(&req);
+ if (ret < 0)
+ goto cleanup;
+
+ /* copy result back to user */
+ if (copy_to_user(argp, (void *)&req, sizeof(req))) {
+ ret = -EFAULT;
+ goto cleanup;
+ }
+ }
+ break;
+
+ case SMCMOD_IOCTL_GET_VERSION:
+ {
+ uint32_t req;
+
+ /* call scm function to switch to secure world */
+ req = scm_get_version();
+
+ /* copy result back to user */
+ if (copy_to_user(argp, (void *)&req, sizeof(req))) {
+ ret = -EFAULT;
+ goto cleanup;
+ }
+ }
+ break;
+
+ default:
+ ret = -EINVAL;
+ }
+
+cleanup:
+ mutex_unlock(&ioctl_lock);
+ return ret;
+}
+
+static int smcmod_open(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static int smcmod_release(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static const struct file_operations smcmod_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = smcmod_ioctl,
+ .open = smcmod_open,
+ .release = smcmod_release,
+};
+
+static struct miscdevice smcmod_misc_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = SMCMOD_DEV,
+ .fops = &smcmod_fops
+};
+
+static int __init smcmod_init(void)
+{
+ return misc_register(&smcmod_misc_dev);
+}
+
+static void __exit smcmod_exit(void)
+{
+ misc_deregister(&smcmod_misc_dev);
+}
+
+MODULE_DESCRIPTION("Qualcomm SMC Module");
+MODULE_LICENSE("GPL v2");
+
+module_init(smcmod_init);
+module_exit(smcmod_exit);
diff --git a/arch/arm/mach-msm/spm_devices.c b/arch/arm/mach-msm/spm_devices.c
index b378d3b..e77a7ac 100644
--- a/arch/arm/mach-msm/spm_devices.c
+++ b/arch/arm/mach-msm/spm_devices.c
@@ -374,7 +374,7 @@
};
struct mode_of of_l2_modes[] = {
- {"qcom,saw2-spm-cmd-ret", MSM_SPM_L2_MODE_RETENTION, 1},
+ {"qcom,saw2-spm-cmd-ret", MSM_SPM_L2_MODE_RETENTION, 0},
{"qcom,saw2-spm-cmd-gdhs", MSM_SPM_L2_MODE_GDHS, 1},
{"qcom,saw2-spm-cmd-pc", MSM_SPM_L2_MODE_POWER_COLLAPSE, 1},
};
diff --git a/arch/arm/mach-msm/timer.c b/arch/arm/mach-msm/timer.c
index 212ad77..e360906 100644
--- a/arch/arm/mach-msm/timer.c
+++ b/arch/arm/mach-msm/timer.c
@@ -1035,7 +1035,8 @@
if (cpu_is_msm7x01() || cpu_is_msm7x25() || cpu_is_msm7x27() ||
cpu_is_msm7x25a() || cpu_is_msm7x27a() || cpu_is_msm7x25aa() ||
- cpu_is_msm7x27aa() || cpu_is_msm8625() || cpu_is_msm7x25ab()) {
+ cpu_is_msm7x27aa() || cpu_is_msm8625() || cpu_is_msm7x25ab() ||
+ cpu_is_msm8625q()) {
dgt->shift = MSM_DGT_SHIFT;
dgt->freq = 19200000 >> MSM_DGT_SHIFT;
dgt->clockevent.shift = 32 + MSM_DGT_SHIFT;
@@ -1045,7 +1046,7 @@
gpt->flags |= MSM_CLOCK_FLAGS_UNSTABLE_COUNT
| MSM_CLOCK_FLAGS_ODD_MATCH_WRITE
| MSM_CLOCK_FLAGS_DELAYED_WRITE_POST;
- if (cpu_is_msm8625())
+ if (cpu_is_msm8625() || cpu_is_msm8625q())
fixup_msm8625_timer();
} else if (cpu_is_qsd8x50()) {
dgt->freq = 4800000;
@@ -1134,8 +1135,8 @@
ce->irq = clock->irq;
if (cpu_is_msm8x60() || cpu_is_msm9615() || cpu_is_msm8625() ||
- soc_class_is_msm8960() || soc_class_is_apq8064() ||
- soc_class_is_msm8930()) {
+ cpu_is_msm8625q() || soc_class_is_msm8960() ||
+ soc_class_is_apq8064() || soc_class_is_msm8930()) {
clock->percpu_evt = alloc_percpu(struct clock_event_device *);
if (!clock->percpu_evt) {
pr_err("msm_timer_init: memory allocation "
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 4b03cfd..8f3c107 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -92,6 +92,8 @@
unsigned int freq_lo_jiffies;
unsigned int freq_hi_jiffies;
unsigned int rate_mult;
+ unsigned int prev_load;
+ unsigned int max_load;
int cpu;
unsigned int sample_type:1;
/*
@@ -125,17 +127,27 @@
static struct dbs_tuners {
unsigned int sampling_rate;
unsigned int up_threshold;
+ unsigned int up_threshold_multi_core;
unsigned int down_differential;
+ unsigned int down_differential_multi_core;
+ unsigned int optimal_freq;
+ unsigned int up_threshold_any_cpu_load;
+ unsigned int sync_freq;
unsigned int ignore_nice;
unsigned int sampling_down_factor;
int powersave_bias;
unsigned int io_is_busy;
} dbs_tuners_ins = {
+ .up_threshold_multi_core = DEF_FREQUENCY_UP_THRESHOLD,
.up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
.sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
.down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL,
+ .down_differential_multi_core = MICRO_FREQUENCY_DOWN_DIFFERENTIAL,
+ .up_threshold_any_cpu_load = DEF_FREQUENCY_UP_THRESHOLD,
.ignore_nice = 0,
.powersave_bias = 0,
+ .sync_freq = 0,
+ .optimal_freq = 0,
};
static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
@@ -293,9 +305,13 @@
show_one(sampling_rate, sampling_rate);
show_one(io_is_busy, io_is_busy);
show_one(up_threshold, up_threshold);
+show_one(up_threshold_multi_core, up_threshold_multi_core);
show_one(down_differential, down_differential);
show_one(sampling_down_factor, sampling_down_factor);
show_one(ignore_nice_load, ignore_nice);
+show_one(optimal_freq, optimal_freq);
+show_one(up_threshold_any_cpu_load, up_threshold_any_cpu_load);
+show_one(sync_freq, sync_freq);
static ssize_t show_powersave_bias
(struct kobject *kobj, struct attribute *attr, char *buf)
@@ -371,6 +387,19 @@
return count;
}
+static ssize_t store_sync_freq(struct kobject *a, struct attribute *b,
+ const char *buf, size_t count)
+{
+ unsigned int input;
+ int ret;
+
+ ret = sscanf(buf, "%u", &input);
+ if (ret != 1)
+ return -EINVAL;
+ dbs_tuners_ins.sync_freq = input;
+ return count;
+}
+
static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b,
const char *buf, size_t count)
{
@@ -384,6 +413,19 @@
return count;
}
+static ssize_t store_optimal_freq(struct kobject *a, struct attribute *b,
+ const char *buf, size_t count)
+{
+ unsigned int input;
+ int ret;
+
+ ret = sscanf(buf, "%u", &input);
+ if (ret != 1)
+ return -EINVAL;
+ dbs_tuners_ins.optimal_freq = input;
+ return count;
+}
+
static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
const char *buf, size_t count)
{
@@ -399,6 +441,36 @@
return count;
}
+static ssize_t store_up_threshold_multi_core(struct kobject *a,
+ struct attribute *b, const char *buf, size_t count)
+{
+ unsigned int input;
+ int ret;
+ ret = sscanf(buf, "%u", &input);
+
+ if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
+ input < MIN_FREQUENCY_UP_THRESHOLD) {
+ return -EINVAL;
+ }
+ dbs_tuners_ins.up_threshold_multi_core = input;
+ return count;
+}
+
+static ssize_t store_up_threshold_any_cpu_load(struct kobject *a,
+ struct attribute *b, const char *buf, size_t count)
+{
+ unsigned int input;
+ int ret;
+ ret = sscanf(buf, "%u", &input);
+
+ if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
+ input < MIN_FREQUENCY_UP_THRESHOLD) {
+ return -EINVAL;
+ }
+ dbs_tuners_ins.up_threshold_any_cpu_load = input;
+ return count;
+}
+
static ssize_t store_down_differential(struct kobject *a, struct attribute *b,
const char *buf, size_t count)
{
@@ -581,6 +653,10 @@
define_one_global_rw(sampling_down_factor);
define_one_global_rw(ignore_nice_load);
define_one_global_rw(powersave_bias);
+define_one_global_rw(up_threshold_multi_core);
+define_one_global_rw(optimal_freq);
+define_one_global_rw(up_threshold_any_cpu_load);
+define_one_global_rw(sync_freq);
static struct attribute *dbs_attributes[] = {
&sampling_rate_min.attr,
@@ -591,6 +667,10 @@
&ignore_nice_load.attr,
&powersave_bias.attr,
&io_is_busy.attr,
+ &up_threshold_multi_core.attr,
+ &optimal_freq.attr,
+ &up_threshold_any_cpu_load.attr,
+ &sync_freq.attr,
NULL
};
@@ -619,7 +699,7 @@
unsigned int max_load_freq;
/* Current load across this CPU */
unsigned int cur_load = 0;
-
+ unsigned int max_load_other_cpu = 0;
struct cpufreq_policy *policy;
unsigned int j;
@@ -696,7 +776,8 @@
continue;
cur_load = 100 * (wall_time - idle_time) / wall_time;
-
+ j_dbs_info->max_load = max(cur_load, j_dbs_info->prev_load);
+ j_dbs_info->prev_load = cur_load;
freq_avg = __cpufreq_driver_getavg(policy, j);
if (freq_avg <= 0)
freq_avg = policy->cur;
@@ -705,11 +786,37 @@
if (load_freq > max_load_freq)
max_load_freq = load_freq;
}
+
+ for_each_online_cpu(j) {
+ struct cpu_dbs_info_s *j_dbs_info;
+ j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
+
+ if (j == policy->cpu)
+ continue;
+
+ if (max_load_other_cpu < j_dbs_info->max_load)
+ max_load_other_cpu = j_dbs_info->max_load;
+ /*
+ * The other cpu could be running at higher frequency
+ * but may not have completed it's sampling_down_factor.
+ * For that case consider other cpu is loaded so that
+ * frequency imbalance does not occur.
+ */
+
+ if ((j_dbs_info->cur_policy != NULL)
+ && (j_dbs_info->cur_policy->cur ==
+ j_dbs_info->cur_policy->max)) {
+
+ if (policy->cur >= dbs_tuners_ins.optimal_freq)
+ max_load_other_cpu =
+ dbs_tuners_ins.up_threshold_any_cpu_load;
+ }
+ }
+
/* calculate the scaled load across CPU */
load_at_max_freq = (cur_load * policy->cur)/policy->cpuinfo.max_freq;
cpufreq_notify_utilization(policy, load_at_max_freq);
-
/* Check for frequency increase */
if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) {
/* If switching to max speed, apply sampling_down_factor */
@@ -720,6 +827,25 @@
return;
}
+ if (num_online_cpus() > 1) {
+
+ if (max_load_other_cpu >
+ dbs_tuners_ins.up_threshold_any_cpu_load) {
+ if (policy->cur < dbs_tuners_ins.sync_freq)
+ dbs_freq_increase(policy,
+ dbs_tuners_ins.sync_freq);
+ return;
+ }
+
+ if (max_load_freq > dbs_tuners_ins.up_threshold_multi_core *
+ policy->cur) {
+ if (policy->cur < dbs_tuners_ins.optimal_freq)
+ dbs_freq_increase(policy,
+ dbs_tuners_ins.optimal_freq);
+ return;
+ }
+ }
+
/* Check for frequency decrease */
/* if we cannot reduce the frequency anymore, break out early */
if (policy->cur == policy->min)
@@ -744,6 +870,20 @@
if (freq_next < policy->min)
freq_next = policy->min;
+ if (num_online_cpus() > 1) {
+ if (max_load_other_cpu >
+ (dbs_tuners_ins.up_threshold_multi_core -
+ dbs_tuners_ins.down_differential) &&
+ freq_next < dbs_tuners_ins.sync_freq)
+ freq_next = dbs_tuners_ins.sync_freq;
+
+ if (max_load_freq >
+ (dbs_tuners_ins.up_threshold_multi_core -
+ dbs_tuners_ins.down_differential_multi_core) *
+ policy->cur)
+ freq_next = dbs_tuners_ins.optimal_freq;
+
+ }
if (!dbs_tuners_ins.powersave_bias) {
__cpufreq_driver_target(policy, freq_next,
CPUFREQ_RELATION_L);
@@ -997,6 +1137,12 @@
max(min_sampling_rate,
latency * LATENCY_MULTIPLIER);
dbs_tuners_ins.io_is_busy = should_io_be_busy();
+
+ if (dbs_tuners_ins.optimal_freq == 0)
+ dbs_tuners_ins.optimal_freq = policy->min;
+
+ if (dbs_tuners_ins.sync_freq == 0)
+ dbs_tuners_ins.sync_freq = policy->min;
}
if (!cpu)
rc = input_register_handler(&dbs_input_handler);
diff --git a/drivers/gpu/ion/ion_iommu_heap.c b/drivers/gpu/ion/ion_iommu_heap.c
index 345c07d..0b691f3 100644
--- a/drivers/gpu/ion/ion_iommu_heap.c
+++ b/drivers/gpu/ion/ion_iommu_heap.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -19,6 +19,7 @@
#include <linux/vmalloc.h>
#include <linux/iommu.h>
#include <linux/pfn.h>
+#include <linux/dma-mapping.h>
#include "ion_priv.h"
#include <asm/mach/map.h>
@@ -80,8 +81,13 @@
goto err3;
sg_set_page(sg, data->pages[i], PAGE_SIZE, 0);
+ sg_dma_address(sg) = sg_phys(sg);
}
+ if (!ION_IS_CACHED(flags))
+ dma_sync_sg_for_device(NULL, table->sgl, table->nents,
+ DMA_BIDIRECTIONAL);
+
buffer->priv_virt = data;
return 0;
diff --git a/drivers/gpu/ion/ion_system_heap.c b/drivers/gpu/ion/ion_system_heap.c
index ad2ef83..8b63216 100644
--- a/drivers/gpu/ion/ion_system_heap.c
+++ b/drivers/gpu/ion/ion_system_heap.c
@@ -518,14 +518,26 @@
return ret;
}
+void *ion_system_contig_heap_map_kernel(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ return buffer->priv_virt;
+}
+
+void ion_system_contig_heap_unmap_kernel(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ return;
+}
+
static struct ion_heap_ops kmalloc_ops = {
.allocate = ion_system_contig_heap_allocate,
.free = ion_system_contig_heap_free,
.phys = ion_system_contig_heap_phys,
.map_dma = ion_system_contig_heap_map_dma,
.unmap_dma = ion_system_heap_unmap_dma,
- .map_kernel = ion_system_heap_map_kernel,
- .unmap_kernel = ion_system_heap_unmap_kernel,
+ .map_kernel = ion_system_contig_heap_map_kernel,
+ .unmap_kernel = ion_system_contig_heap_unmap_kernel,
.map_user = ion_system_contig_heap_map_user,
.cache_op = ion_system_contig_heap_cache_ops,
.print_debug = ion_system_contig_print_debug,
diff --git a/drivers/gpu/ion/msm/msm_ion.c b/drivers/gpu/ion/msm/msm_ion.c
index e422fd26..697587b 100644
--- a/drivers/gpu/ion/msm/msm_ion.c
+++ b/drivers/gpu/ion/msm/msm_ion.c
@@ -777,19 +777,6 @@
if (pdata_needs_to_be_freed)
free_pdata(pdata);
- /* Check if each heap has been removed from the memblock */
- for (i = 0; i < num_heaps; i++) {
- struct ion_platform_heap *heap_data = &pdata->heaps[i];
- if (!heap_data->base)
- continue;
- err = memblock_overlaps_memory(heap_data->base,
- heap_data->size);
- if (err) {
- panic("ION heap %s not removed from memblock\n",
- heap_data->name);
- }
- }
-
check_for_heap_overlap(pdata->heaps, num_heaps);
platform_set_drvdata(pdev, idev);
return 0;
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 060e89a..373c517 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -29,6 +29,7 @@
#include "kgsl_cffdump.h"
#include "kgsl_sharedmem.h"
#include "kgsl_iommu.h"
+#include "kgsl_trace.h"
#include "adreno.h"
#include "adreno_pm4types.h"
@@ -573,7 +574,7 @@
/* 8x25 returns 0 for minor id, but it should be 1 */
if (cpu_is_qsd8x50())
patchid = 1;
- else if (cpu_is_msm8625() && minorid == 0)
+ else if ((cpu_is_msm8625() || cpu_is_msm8625q()) && minorid == 0)
minorid = 1;
chipid |= (minorid << 8) | patchid;
@@ -2067,6 +2068,8 @@
if (!in_interrupt())
kgsl_pre_hwaccess(device);
+ trace_kgsl_regwrite(device, offsetwords, value);
+
kgsl_cffdump_regwrite(device->id, offsetwords << 2, value);
reg = (unsigned int *)(device->reg_virt + (offsetwords << 2));
@@ -2157,7 +2160,6 @@
int status;
unsigned int ref_ts, enableflag;
unsigned int context_id;
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
mutex_lock(&device->mutex);
context_id = _get_context_id(context);
@@ -2204,13 +2206,9 @@
cmds[0] = cp_type3_packet(CP_NOP, 1);
cmds[1] = 0;
- if (adreno_dev->drawctxt_active)
+ if (context)
adreno_ringbuffer_issuecmds_intr(device,
context, &cmds[0], 2);
- else
- /* We would never call this function if there
- * was no active contexts running */
- BUG();
}
}
unlock:
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index 1bccd4d..31491d5 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -897,8 +897,6 @@
kgsl_regwrite(mmu->device, MH_MMU_MPU_END,
mh->mpu_base + mh->mpu_range);
- } else {
- kgsl_regwrite(mmu->device, MH_MMU_CONFIG, 0x00000000);
}
mmu->hwpagetable = mmu->defaultpagetable;
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index f04fae1..d33df60 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -93,11 +93,8 @@
static inline int _adjust_pwrlevel(struct kgsl_pwrctrl *pwr, int level)
{
- unsigned int max_pwrlevel = max_t(int, pwr->thermal_pwrlevel,
- pwr->max_pwrlevel);
-
- unsigned int min_pwrlevel = max_t(int, pwr->thermal_pwrlevel,
- pwr->min_pwrlevel);
+ int max_pwrlevel = max_t(int, pwr->thermal_pwrlevel, pwr->max_pwrlevel);
+ int min_pwrlevel = max_t(int, pwr->thermal_pwrlevel, pwr->min_pwrlevel);
if (level < max_pwrlevel)
return max_pwrlevel;
@@ -688,6 +685,9 @@
clkstats->on_time_old = on_time;
clkstats->elapsed_old = clkstats->elapsed;
clkstats->elapsed = 0;
+
+ trace_kgsl_gpubusy(device, clkstats->on_time_old,
+ clkstats->elapsed_old);
}
/* Track the amount of time the gpu is on vs the total system time. *
diff --git a/drivers/gpu/msm/kgsl_trace.h b/drivers/gpu/msm/kgsl_trace.h
index 9662fce..bbef139 100644
--- a/drivers/gpu/msm/kgsl_trace.h
+++ b/drivers/gpu/msm/kgsl_trace.h
@@ -274,6 +274,32 @@
)
);
+TRACE_EVENT(kgsl_gpubusy,
+ TP_PROTO(struct kgsl_device *device, unsigned int busy,
+ unsigned int elapsed),
+
+ TP_ARGS(device, busy, elapsed),
+
+ TP_STRUCT__entry(
+ __string(device_name, device->name)
+ __field(unsigned int, busy)
+ __field(unsigned int, elapsed)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device_name, device->name);
+ __entry->busy = busy;
+ __entry->elapsed = elapsed;
+ ),
+
+ TP_printk(
+ "d_name=%s busy=%d elapsed=%d",
+ __get_str(device_name),
+ __entry->busy,
+ __entry->elapsed
+ )
+);
+
DECLARE_EVENT_CLASS(kgsl_pwrstate_template,
TP_PROTO(struct kgsl_device *device, unsigned int state),
@@ -608,6 +634,31 @@
)
);
+TRACE_EVENT(kgsl_regwrite,
+
+ TP_PROTO(struct kgsl_device *device, unsigned int offset,
+ unsigned int value),
+
+ TP_ARGS(device, offset, value),
+
+ TP_STRUCT__entry(
+ __string(device_name, device->name)
+ __field(unsigned int, offset)
+ __field(unsigned int, value)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device_name, device->name);
+ __entry->offset = offset;
+ __entry->value = value;
+ ),
+
+ TP_printk(
+ "d_name=%s reg=%x value=%x",
+ __get_str(device_name), __entry->offset, __entry->value
+ )
+);
+
#endif /* _KGSL_TRACE_H */
/* This part must be outside protection */
diff --git a/drivers/leds/leds-pm8xxx.c b/drivers/leds/leds-pm8xxx.c
index a641ce9..f493129 100644
--- a/drivers/leds/leds-pm8xxx.c
+++ b/drivers/leds/leds-pm8xxx.c
@@ -70,6 +70,7 @@
#define WLED_CTL_DLY_BIT_SHFT 0x05
#define WLED_MAX_CURR 25
#define WLED_MAX_CURR_MASK 0x1F
+#define WLED_BRIGHTNESS_MSB_MASK 0x0F
#define WLED_OP_FDBCK_MASK 0x1C
#define WLED_OP_FDBCK_BIT_SHFT 0x02
@@ -283,7 +284,8 @@
return rc;
}
- val = (val & ~WLED_MAX_CURR_MASK) | (duty >> WLED_8_BIT_SHFT);
+ val = (val & ~WLED_BRIGHTNESS_MSB_MASK) |
+ (duty >> WLED_8_BIT_SHFT);
rc = pm8xxx_writeb(led->dev->parent,
WLED_BRIGHTNESS_CNTL_REG1(i), val);
if (rc) {
diff --git a/drivers/media/video/msm_vidc/msm_vdec.c b/drivers/media/video/msm_vidc/msm_vdec.c
index e0a341a..711b3007 100644
--- a/drivers/media/video/msm_vidc/msm_vdec.c
+++ b/drivers/media/video/msm_vidc/msm_vdec.c
@@ -430,7 +430,15 @@
core);
goto exit;
}
-
+ if (!inst->in_reconfig) {
+ rc = msm_comm_try_state(inst, MSM_VIDC_RELEASE_RESOURCES_DONE);
+ if (rc) {
+ dprintk(VIDC_ERR,
+ "Failed to move inst: %p to relase res done\n",
+ inst);
+ goto exit;
+ }
+ }
switch (b->type) {
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
break;
diff --git a/drivers/media/video/msm_vidc/msm_venc.c b/drivers/media/video/msm_vidc/msm_venc.c
index f436cf3..41518d7 100644
--- a/drivers/media/video/msm_vidc/msm_venc.c
+++ b/drivers/media/video/msm_vidc/msm_venc.c
@@ -1552,7 +1552,13 @@
int rc = 0;
int i;
struct vidc_buffer_addr_info buffer_info;
-
+ rc = msm_comm_try_state(inst, MSM_VIDC_RELEASE_RESOURCES_DONE);
+ if (rc) {
+ dprintk(VIDC_ERR,
+ "Failed to move inst: %p to release res done state\n",
+ inst);
+ goto exit;
+ }
switch (b->type) {
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
break;
@@ -1581,6 +1587,7 @@
dprintk(VIDC_ERR, "Buffer type not recognized: %d\n", b->type);
break;
}
+exit:
return rc;
}
diff --git a/drivers/media/video/msm_vidc/msm_vidc.c b/drivers/media/video/msm_vidc/msm_vidc.c
index 64897c7..6ecea30 100644
--- a/drivers/media/video/msm_vidc/msm_vidc.c
+++ b/drivers/media/video/msm_vidc/msm_vidc.c
@@ -559,6 +559,8 @@
if (inst->state != MSM_VIDC_CORE_INVALID &&
core->state != VIDC_CORE_INVALID)
rc = msm_comm_try_state(inst, MSM_VIDC_CORE_UNINIT);
+ else
+ rc = msm_comm_force_cleanup(inst);
if (rc)
dprintk(VIDC_ERR,
"Failed to move video instance to uninit state\n");
diff --git a/drivers/media/video/msm_vidc/msm_vidc_common.c b/drivers/media/video/msm_vidc/msm_vidc_common.c
index 46a88c2..d797ba7 100644
--- a/drivers/media/video/msm_vidc/msm_vidc_common.c
+++ b/drivers/media/video/msm_vidc/msm_vidc_common.c
@@ -1125,6 +1125,11 @@
dprintk(VIDC_ERR, "Invalid params, core:%p\n", core);
return -EINVAL;
}
+ if (core->state == VIDC_CORE_INVALID) {
+ dprintk(VIDC_ERR,
+ "Core is in bad state. Cannot unset ocmem\n");
+ return -EIO;
+ }
rhdr.resource_id = VIDC_RESOURCE_OCMEM;
rhdr.resource_handle = (u32) &core->resources.ocmem;
init_completion(
@@ -1345,6 +1350,11 @@
return rc;
}
+int msm_comm_force_cleanup(struct msm_vidc_inst *inst)
+{
+ return msm_vidc_deinit_core(inst);
+}
+
static enum hal_domain get_hal_domain(int session_type)
{
enum hal_domain domain;
diff --git a/drivers/media/video/msm_vidc/msm_vidc_common.h b/drivers/media/video/msm_vidc/msm_vidc_common.h
index 916a3ca..d225a51 100644
--- a/drivers/media/video/msm_vidc/msm_vidc_common.h
+++ b/drivers/media/video/msm_vidc/msm_vidc_common.h
@@ -41,6 +41,7 @@
enum instance_state state);
int msm_comm_unset_ocmem(struct msm_vidc_core *core);
int msm_comm_free_ocmem(struct msm_vidc_core *core);
+int msm_comm_force_cleanup(struct msm_vidc_inst *inst);
enum hal_extradata_id msm_comm_get_hal_extradata_index(
enum v4l2_mpeg_vidc_extradata index);
#define IS_PRIV_CTRL(idx) (\
diff --git a/drivers/media/video/msm_vidc/vidc_hal_interrupt_handler.c b/drivers/media/video/msm_vidc/vidc_hal_interrupt_handler.c
index 3bedb92..333bad9 100644
--- a/drivers/media/video/msm_vidc/vidc_hal_interrupt_handler.c
+++ b/drivers/media/video/msm_vidc/vidc_hal_interrupt_handler.c
@@ -149,7 +149,6 @@
static void hal_process_sys_error(struct hal_device *device)
{
struct msm_vidc_cb_cmd_done cmd_done;
- disable_irq_nosync(device->hal_data->irq);
memset(&cmd_done, 0, sizeof(struct msm_vidc_cb_cmd_done));
cmd_done.device_id = device->device_id;
device->callback(SYS_ERROR, &cmd_done);
@@ -177,11 +176,12 @@
switch (pkt->event_id) {
case HFI_EVENT_SYS_ERROR:
- dprintk(VIDC_INFO, "HFI_EVENT_SYS_ERROR");
+ dprintk(VIDC_ERR, "HFI_EVENT_SYS_ERROR: %d\n",
+ pkt->event_data1);
hal_process_sys_error(device);
break;
case HFI_EVENT_SESSION_ERROR:
- dprintk(VIDC_INFO, "HFI_EVENT_SESSION_ERROR");
+ dprintk(VIDC_ERR, "HFI_EVENT_SESSION_ERROR");
hal_process_session_error(device, pkt);
break;
case HFI_EVENT_SESSION_SEQUENCE_CHANGED:
@@ -192,7 +192,7 @@
dprintk(VIDC_INFO, "HFI_EVENT_SESSION_PROPERTY_CHANGED");
break;
default:
- dprintk(VIDC_INFO, "hal_process_event_notify:unkown_event_id");
+ dprintk(VIDC_WARN, "hal_process_event_notify:unkown_event_id");
break;
}
}
diff --git a/drivers/media/video/msm_wfd/enc-mfc-subdev.c b/drivers/media/video/msm_wfd/enc-mfc-subdev.c
index 21fc719..aadf5ed 100644
--- a/drivers/media/video/msm_wfd/enc-mfc-subdev.c
+++ b/drivers/media/video/msm_wfd/enc-mfc-subdev.c
@@ -903,25 +903,13 @@
struct vcd_property_req_i_frame vcd_property_req_i_frame;
struct vcd_property_hdr vcd_property_hdr;
- int rc = 0;
- switch (type) {
- case V4L2_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE_DISABLED:
- /*So...nothing to do?*/
- break;
- case V4L2_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE_I_FRAME:
- vcd_property_hdr.prop_id = VCD_I_REQ_IFRAME;
- vcd_property_hdr.sz = sizeof(struct vcd_property_req_i_frame);
- vcd_property_req_i_frame.req_i_frame = 1;
+ vcd_property_hdr.prop_id = VCD_I_REQ_IFRAME;
+ vcd_property_hdr.sz = sizeof(struct vcd_property_req_i_frame);
+ vcd_property_req_i_frame.req_i_frame = 1;
- rc = vcd_set_property(client_ctx->vcd_handle,
- &vcd_property_hdr, &vcd_property_req_i_frame);
- break;
- case V4L2_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE_NOT_CODED:
- default:
- rc = -ENOTSUPP;
- }
+ return vcd_set_property(client_ctx->vcd_handle,
+ &vcd_property_hdr, &vcd_property_req_i_frame);
- return rc;
}
static long venc_set_bitrate(struct video_client_ctx *client_ctx,
@@ -1348,10 +1336,10 @@
int level = 0;
switch (value) {
- case V4L2_CID_MPEG_QCOM_PERF_LEVEL_PERFORMANCE:
+ case V4L2_CID_MPEG_VIDC_PERF_LEVEL_PERFORMANCE:
level = VCD_PERF_LEVEL2;
break;
- case V4L2_CID_MPEG_QCOM_PERF_LEVEL_TURBO:
+ case V4L2_CID_MPEG_VIDC_PERF_LEVEL_TURBO:
level = VCD_PERF_LEVEL_TURBO;
break;
default:
@@ -2304,7 +2292,7 @@
case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
rc = venc_set_codec_profile(client_ctx, ctrl->id, ctrl->value);
break;
- case V4L2_CID_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE:
+ case V4L2_CID_MPEG_VIDC_VIDEO_REQUEST_IFRAME:
rc = venc_request_frame(client_ctx, ctrl->value);
break;
case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP:
@@ -2335,7 +2323,7 @@
rc = venc_set_multislicing_mode(client_ctx, ctrl->id,
ctrl->value);
break;
- case V4L2_CID_MPEG_QCOM_SET_PERF_LEVEL:
+ case V4L2_CID_MPEG_VIDC_SET_PERF_LEVEL:
rc = venc_set_max_perf_level(client_ctx, ctrl->value);
break;
case V4L2_CID_MPEG_VIDC_VIDEO_H264_AU_DELIMITER:
diff --git a/drivers/media/video/msm_wfd/enc-venus-subdev.c b/drivers/media/video/msm_wfd/enc-venus-subdev.c
index 64c18da..480fe35 100644
--- a/drivers/media/video/msm_wfd/enc-venus-subdev.c
+++ b/drivers/media/video/msm_wfd/enc-venus-subdev.c
@@ -136,7 +136,7 @@
msm_vidc_dqevent(inst->vidc_context, &event);
if (event.type == V4L2_EVENT_MSM_VIDC_CLOSE_DONE) {
- WFD_MSG_ERR("enc callback thread shutting " \
+ WFD_MSG_DBG("enc callback thread shutting " \
"down normally\n");
bail_out = true;
} else {
diff --git a/drivers/media/video/msm_wfd/wfd-ioctl.c b/drivers/media/video/msm_wfd/wfd-ioctl.c
index bb2e606..74194ff 100644
--- a/drivers/media/video/msm_wfd/wfd-ioctl.c
+++ b/drivers/media/video/msm_wfd/wfd-ioctl.c
@@ -536,7 +536,7 @@
struct mem_region mregion;
if (minfo == NULL) {
- WFD_MSG_ERR("not freeing buffers since allocation failed");
+ WFD_MSG_DBG("not freeing buffers since allocation failed");
return;
}
diff --git a/drivers/media/video/msm_wfd/wfd-util.c b/drivers/media/video/msm_wfd/wfd-util.c
index 5c00e5c..28a6084 100644
--- a/drivers/media/video/msm_wfd/wfd-util.c
+++ b/drivers/media/video/msm_wfd/wfd-util.c
@@ -198,7 +198,7 @@
int wfd_stats_deinit(struct wfd_stats *stats)
{
- WFD_MSG_ERR("Latencies: avg enc. latency %d",
+ WFD_MSG_DBG("Latencies: avg enc. latency %d",
stats->enc_avg_latency);
/* Delete all debugfs files in one shot :) */
if (stats->d_parent)
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index 3715417..c415952 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -235,6 +235,8 @@
uint32_t user_virt_sb_base;
size_t sb_length;
struct ion_handle *ihandle; /* Retrieve phy addr */
+ bool perf_enabled;
+ bool fast_load_enabled;
};
struct qseecom_listener_handle {
@@ -266,8 +268,8 @@
};
/* Function proto types */
-static int qsee_vote_for_clock(int32_t);
-static void qsee_disable_clock_vote(int32_t);
+static int qsee_vote_for_clock(struct qseecom_dev_handle *, int32_t);
+static void qsee_disable_clock_vote(struct qseecom_dev_handle *, int32_t);
static int __qseecom_is_svc_unique(struct qseecom_dev_handle *data,
struct qseecom_register_listener_req *svc)
@@ -701,7 +703,7 @@
return -EFAULT;
}
/* Vote for the SFPB clock */
- ret = qsee_vote_for_clock(CLK_SFPB);
+ ret = qsee_vote_for_clock(data, CLK_SFPB);
if (ret)
pr_warning("Unable to vote for SFPB clock");
req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
@@ -734,7 +736,7 @@
load_img_req.ifd_data_fd);
if (IS_ERR_OR_NULL(ihandle)) {
pr_err("Ion client could not retrieve the handle\n");
- qsee_disable_clock_vote(CLK_SFPB);
+ qsee_disable_clock_vote(data, CLK_SFPB);
return -ENOMEM;
}
@@ -762,7 +764,7 @@
pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
if (!IS_ERR_OR_NULL(ihandle))
ion_free(qseecom.ion_clnt, ihandle);
- qsee_disable_clock_vote(CLK_SFPB);
+ qsee_disable_clock_vote(data, CLK_SFPB);
return -EFAULT;
}
@@ -773,7 +775,7 @@
ret);
if (!IS_ERR_OR_NULL(ihandle))
ion_free(qseecom.ion_clnt, ihandle);
- qsee_disable_clock_vote(CLK_SFPB);
+ qsee_disable_clock_vote(data, CLK_SFPB);
return ret;
}
}
@@ -783,7 +785,7 @@
resp.result);
if (!IS_ERR_OR_NULL(ihandle))
ion_free(qseecom.ion_clnt, ihandle);
- qsee_disable_clock_vote(CLK_SFPB);
+ qsee_disable_clock_vote(data, CLK_SFPB);
return -EFAULT;
}
@@ -792,7 +794,7 @@
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry) {
pr_err("kmalloc failed\n");
- qsee_disable_clock_vote(CLK_SFPB);
+ qsee_disable_clock_vote(data, CLK_SFPB);
return -ENOMEM;
}
entry->app_id = app_id;
@@ -815,10 +817,10 @@
if (copy_to_user(argp, &load_img_req, sizeof(load_img_req))) {
pr_err("copy_to_user failed\n");
kzfree(entry);
- qsee_disable_clock_vote(CLK_SFPB);
+ qsee_disable_clock_vote(data, CLK_SFPB);
return -EFAULT;
}
- qsee_disable_clock_vote(CLK_SFPB);
+ qsee_disable_clock_vote(data, CLK_SFPB);
return 0;
}
@@ -1411,7 +1413,7 @@
/* Populate the remaining parameters */
load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
memcpy(load_req.app_name, appname, MAX_APP_NAME_SIZE);
- ret = qsee_vote_for_clock(CLK_SFPB);
+ ret = qsee_vote_for_clock(data, CLK_SFPB);
if (ret) {
kzfree(img_data);
pr_warning("Unable to vote for SFPB clock");
@@ -1425,7 +1427,7 @@
kzfree(img_data);
if (ret) {
pr_err("scm_call to load failed : ret %d\n", ret);
- qsee_disable_clock_vote(CLK_SFPB);
+ qsee_disable_clock_vote(data, CLK_SFPB);
return -EIO;
}
@@ -1448,7 +1450,7 @@
ret = -EINVAL;
break;
}
- qsee_disable_clock_vote(CLK_SFPB);
+ qsee_disable_clock_vote(data, CLK_SFPB);
return ret;
}
@@ -1713,6 +1715,10 @@
pr_err("Unable to find the handle, exiting\n");
else
ret = qseecom_unload_app(data);
+ if (data->client.fast_load_enabled == true)
+ qsee_disable_clock_vote(data, CLK_SFPB);
+ if (data->client.perf_enabled == true)
+ qsee_disable_clock_vote(data, CLK_DFAB);
if (ret == 0) {
kzfree(data);
kzfree(*handle);
@@ -1770,9 +1776,9 @@
return -EINVAL;
}
if (high)
- return qsee_vote_for_clock(CLK_DFAB);
+ return qsee_vote_for_clock(handle->dev, CLK_DFAB);
else {
- qsee_disable_clock_vote(CLK_DFAB);
+ qsee_disable_clock_vote(handle->dev, CLK_DFAB);
return 0;
}
}
@@ -1802,7 +1808,8 @@
return 0;
}
-static int qsee_vote_for_clock(int32_t clk_type)
+static int qsee_vote_for_clock(struct qseecom_dev_handle *data,
+ int32_t clk_type)
{
int ret = 0;
@@ -1826,10 +1833,13 @@
if (ret)
pr_err("DFAB Bandwidth req failed (%d)\n",
ret);
- else
+ else {
qsee_bw_count++;
+ data->client.perf_enabled = true;
+ }
} else {
qsee_bw_count++;
+ data->client.perf_enabled = true;
}
mutex_unlock(&qsee_bw_mutex);
break;
@@ -1850,10 +1860,13 @@
if (ret)
pr_err("SFPB Bandwidth req failed (%d)\n",
ret);
- else
+ else {
qsee_sfpb_bw_count++;
+ data->client.fast_load_enabled = true;
+ }
} else {
qsee_sfpb_bw_count++;
+ data->client.fast_load_enabled = true;
}
mutex_unlock(&qsee_bw_mutex);
break;
@@ -1864,7 +1877,8 @@
return ret;
}
-static void qsee_disable_clock_vote(int32_t clk_type)
+static void qsee_disable_clock_vote(struct qseecom_dev_handle *data,
+ int32_t clk_type)
{
int32_t ret = 0;
@@ -1880,7 +1894,7 @@
return;
}
- if ((qsee_bw_count > 0) && (qsee_bw_count-- == 1)) {
+ if (qsee_bw_count == 1) {
if (qsee_sfpb_bw_count > 0)
ret = msm_bus_scale_client_update_request(
qsee_perf_client, 2);
@@ -1894,6 +1908,13 @@
if (ret)
pr_err("SFPB Bandwidth req fail (%d)\n",
ret);
+ else {
+ qsee_bw_count--;
+ data->client.perf_enabled = false;
+ }
+ } else {
+ qsee_bw_count--;
+ data->client.perf_enabled = false;
}
mutex_unlock(&qsee_bw_mutex);
break;
@@ -1904,7 +1925,7 @@
mutex_unlock(&qsee_bw_mutex);
return;
}
- if ((qsee_sfpb_bw_count > 0) && (qsee_sfpb_bw_count-- == 1)) {
+ if (qsee_sfpb_bw_count == 1) {
if (qsee_bw_count > 0)
ret = msm_bus_scale_client_update_request(
qsee_perf_client, 1);
@@ -1918,6 +1939,13 @@
if (ret)
pr_err("SFPB Bandwidth req fail (%d)\n",
ret);
+ else {
+ qsee_sfpb_bw_count--;
+ data->client.fast_load_enabled = false;
+ }
+ } else {
+ qsee_sfpb_bw_count--;
+ data->client.fast_load_enabled = false;
}
mutex_unlock(&qsee_bw_mutex);
break;
@@ -2246,7 +2274,7 @@
}
case QSEECOM_IOCTL_PERF_ENABLE_REQ:{
atomic_inc(&data->ioctl_count);
- ret = qsee_vote_for_clock(CLK_DFAB);
+ ret = qsee_vote_for_clock(data, CLK_DFAB);
if (ret)
pr_err("Failed to vote for DFAB clock%d\n", ret);
atomic_dec(&data->ioctl_count);
@@ -2254,7 +2282,7 @@
}
case QSEECOM_IOCTL_PERF_DISABLE_REQ:{
atomic_inc(&data->ioctl_count);
- qsee_disable_clock_vote(CLK_DFAB);
+ qsee_disable_clock_vote(data, CLK_DFAB);
atomic_dec(&data->ioctl_count);
break;
}
@@ -2356,6 +2384,11 @@
return ret;
}
}
+ if (data->client.fast_load_enabled == true)
+ qsee_disable_clock_vote(data, CLK_SFPB);
+ if (data->client.perf_enabled == true)
+ qsee_disable_clock_vote(data, CLK_DFAB);
+
if (qseecom.qseos_version == QSEOS_VERSION_13) {
mutex_lock(&pil_access_lock);
if (pil_ref_cnt == 1)
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 76e9b9c..0b5449e 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -136,6 +136,10 @@
module_param(perdev_minors, int, 0444);
MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
+static inline int mmc_blk_part_switch(struct mmc_card *card,
+ struct mmc_blk_data *md);
+static int get_card_status(struct mmc_card *card, u32 *status, int retries);
+
static inline void mmc_blk_clear_packed(struct mmc_queue_req *mqrq)
{
mqrq->packed_cmd = MMC_PACKED_NONE;
@@ -463,6 +467,38 @@
return ERR_PTR(err);
}
+static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status,
+ u32 retries_max)
+{
+ int err;
+ u32 retry_count = 0;
+
+ if (!status || !retries_max)
+ return -EINVAL;
+
+ do {
+ err = get_card_status(card, status, 5);
+ if (err)
+ break;
+
+ if (!R1_STATUS(*status) &&
+ (R1_CURRENT_STATE(*status) != R1_STATE_PRG))
+ break; /* RPMB programming operation complete */
+
+ /*
+ * Rechedule to give the MMC device a chance to continue
+ * processing the previous command without being polled too
+ * frequently.
+ */
+ usleep_range(1000, 5000);
+ } while (++retry_count < retries_max);
+
+ if (retry_count == retries_max)
+ err = -EPERM;
+
+ return err;
+}
+
static int mmc_blk_ioctl_cmd(struct block_device *bdev,
struct mmc_ioc_cmd __user *ic_ptr)
{
@@ -474,6 +510,8 @@
struct mmc_request mrq = {NULL};
struct scatterlist sg;
int err;
+ int is_rpmb = false;
+ u32 status = 0;
/*
* The caller must have CAP_SYS_RAWIO, and must be calling this on the
@@ -493,6 +531,9 @@
goto cmd_done;
}
+ if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
+ is_rpmb = true;
+
card = md->queue.card;
if (IS_ERR(card)) {
err = PTR_ERR(card);
@@ -543,12 +584,23 @@
mmc_claim_host(card->host);
+ err = mmc_blk_part_switch(card, md);
+ if (err)
+ goto cmd_rel_host;
+
if (idata->ic.is_acmd) {
err = mmc_app_cmd(card->host, card);
if (err)
goto cmd_rel_host;
}
+ if (is_rpmb) {
+ err = mmc_set_blockcount(card, data.blocks,
+ idata->ic.write_flag & (1 << 31));
+ if (err)
+ goto cmd_rel_host;
+ }
+
mmc_wait_for_req(card->host, &mrq);
if (cmd.error) {
@@ -584,6 +636,18 @@
}
}
+ if (is_rpmb) {
+ /*
+ * Ensure RPMB command has completed by polling CMD13
+ * "Send Status".
+ */
+ err = ioctl_rpmb_card_status_poll(card, &status, 5);
+ if (err)
+ dev_err(mmc_dev(card->host),
+ "%s: Card Status=0x%08X, error %d\n",
+ __func__, status, err);
+ }
+
cmd_rel_host:
mmc_release_host(card->host);
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index f91ba89..89f834a 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -2214,6 +2214,20 @@
}
EXPORT_SYMBOL(mmc_set_blocklen);
+int mmc_set_blockcount(struct mmc_card *card, unsigned int blockcount,
+ bool is_rel_write)
+{
+ struct mmc_command cmd = {0};
+
+ cmd.opcode = MMC_SET_BLOCK_COUNT;
+ cmd.arg = blockcount & 0x0000FFFF;
+ if (is_rel_write)
+ cmd.arg |= 1 << 31;
+ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
+ return mmc_wait_for_cmd(card->host, &cmd, 5);
+}
+EXPORT_SYMBOL(mmc_set_blockcount);
+
static void mmc_hw_reset_for_init(struct mmc_host *host)
{
if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c
index c8b47b9..00dc5bf 100644
--- a/drivers/mmc/host/msm_sdcc.c
+++ b/drivers/mmc/host/msm_sdcc.c
@@ -6536,19 +6536,19 @@
#if CONFIG_DEBUG_FS
static void msmsdcc_print_pm_stats(struct msmsdcc_host *host, ktime_t start,
- const char *func)
+ const char *func, int err)
{
ktime_t diff;
- if (host->print_pm_stats) {
+ if (host->print_pm_stats && !err) {
diff = ktime_sub(ktime_get(), start);
- pr_info("%s: %s: Completed in %llu usec\n", func,
- mmc_hostname(host->mmc), (u64)ktime_to_us(diff));
+ pr_info("%s: %s: Completed in %llu usec\n",
+ mmc_hostname(host->mmc), func, (u64)ktime_to_us(diff));
}
}
#else
static void msmsdcc_print_pm_stats(struct msmsdcc_host *host, ktime_t start,
- const char *func) {}
+ const char *func, int err) {}
#endif
static int
@@ -6615,7 +6615,7 @@
out:
/* set bus bandwidth to 0 immediately */
msmsdcc_msm_bus_cancel_work_and_set_vote(host, NULL);
- msmsdcc_print_pm_stats(host, start, __func__);
+ msmsdcc_print_pm_stats(host, start, __func__, rc);
return rc;
}
@@ -6664,7 +6664,7 @@
host->pending_resume = false;
pr_debug("%s: %s: end\n", mmc_hostname(mmc), __func__);
out:
- msmsdcc_print_pm_stats(host, start, __func__);
+ msmsdcc_print_pm_stats(host, start, __func__, 0);
return 0;
}
@@ -6701,7 +6701,7 @@
if (!pm_runtime_suspended(dev))
rc = msmsdcc_runtime_suspend(dev);
out:
- msmsdcc_print_pm_stats(host, start, __func__);
+ msmsdcc_print_pm_stats(host, start, __func__, rc);
return rc;
}
@@ -6757,7 +6757,7 @@
enable_irq(host->plat->status_irq);
}
out:
- msmsdcc_print_pm_stats(host, start, __func__);
+ msmsdcc_print_pm_stats(host, start, __func__, rc);
return rc;
}
diff --git a/drivers/platform/msm/Kconfig b/drivers/platform/msm/Kconfig
index 872a9b5..9394986 100644
--- a/drivers/platform/msm/Kconfig
+++ b/drivers/platform/msm/Kconfig
@@ -85,5 +85,11 @@
for the IPA core.
Kernel and user-space processes can call the IPA driver
to configure IPA core.
+config MSM_AVTIMER
+ tristate "Avtimer Driver"
+ depends on ARCH_MSM8960
+ help
+ This driver gets the Q6 out of power collapsed state and
+ exposes ioctl control to read avtimer tick.
endmenu
diff --git a/drivers/platform/msm/Makefile b/drivers/platform/msm/Makefile
index 0a755d3..919c07f 100644
--- a/drivers/platform/msm/Makefile
+++ b/drivers/platform/msm/Makefile
@@ -8,3 +8,4 @@
obj-$(CONFIG_QPNP_PWM) += qpnp-pwm.o
obj-$(CONFIG_QPNP_POWER_ON) += qpnp-power-on.o
obj-$(CONFIG_QPNP_CLKDIV) += qpnp-clkdiv.o
+obj-$(CONFIG_MSM_AVTIMER) += avtimer.o
diff --git a/drivers/platform/msm/avtimer.c b/drivers/platform/msm/avtimer.c
new file mode 100644
index 0000000..f513ceb
--- /dev/null
+++ b/drivers/platform/msm/avtimer.c
@@ -0,0 +1,369 @@
+
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 and
+* only version 2 as published by the Free Software Foundation.
+
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/uaccess.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/avtimer.h>
+#include <mach/qdsp6v2/apr.h>
+
+#define DEVICE_NAME "avtimer"
+
+
+#define ADSP_CMD_SET_POWER_COLLAPSE_STATE 0x0001115C
+
+static int major; /* Major number assigned to our device driver */
+struct avtimer_t {
+ struct cdev myc;
+ struct class *avtimer_class;
+ struct mutex avtimer_lock;
+ int avtimer_open_cnt;
+ struct dev_avtimer_data *avtimer_pdata;
+};
+static struct avtimer_t avtimer;
+
+static struct apr_svc *core_handle;
+
+struct adsp_power_collapse {
+ struct apr_hdr hdr;
+ uint32_t power_collapse;
+};
+
+static int32_t avcs_core_callback(struct apr_client_data *data, void *priv)
+{
+ uint32_t *payload;
+
+ pr_debug("core msg: payload len = %u, apr resp opcode = 0x%X\n",
+ data->payload_size, data->opcode);
+
+ switch (data->opcode) {
+
+ case APR_BASIC_RSP_RESULT:{
+
+ if (data->payload_size == 0) {
+ pr_err("%s: APR_BASIC_RSP_RESULT No Payload ",
+ __func__);
+ return 0;
+ }
+
+ payload = data->payload;
+
+ switch (payload[0]) {
+
+ case ADSP_CMD_SET_POWER_COLLAPSE_STATE:
+ pr_debug("CMD_SET_POWER_COLLAPSE_STATE status[0x%x]\n",
+ payload[1]);
+ break;
+ default:
+ pr_err("Invalid cmd rsp[0x%x][0x%x]\n",
+ payload[0], payload[1]);
+ break;
+ }
+ break;
+ }
+ case RESET_EVENTS:{
+ pr_debug("Reset event received in Core service");
+ apr_reset(core_handle);
+ core_handle = NULL;
+ break;
+ }
+
+ default:
+ pr_err("Message id from adsp core svc: %d\n", data->opcode);
+ break;
+ }
+
+ return 0;
+}
+
+int avcs_core_open(void)
+{
+ if (core_handle == NULL)
+ core_handle = apr_register("ADSP", "CORE",
+ avcs_core_callback, 0xFFFFFFFF, NULL);
+
+ pr_debug("Open_q %p\n", core_handle);
+ if (core_handle == NULL) {
+ pr_err("%s: Unable to register CORE\n", __func__);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+int avcs_core_disable_power_collapse(int disable)
+{
+ struct adsp_power_collapse pc;
+ int rc = 0;
+
+ if (core_handle) {
+ pc.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_EVENT,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ pc.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+ sizeof(uint32_t));
+ pc.hdr.src_port = 0;
+ pc.hdr.dest_port = 0;
+ pc.hdr.token = 0;
+ pc.hdr.opcode = ADSP_CMD_SET_POWER_COLLAPSE_STATE;
+ /*
+ * When power_collapse set to 1 -- If the aDSP is in the power
+ * collapsed state when this command is received, it is awakened
+ * from this state. The aDSP does not power collapse again until
+ * the client revokes this command
+ * When power_collapse set to 0 -- This indicates to the aDSP
+ * that the remote client does not need it to be out of power
+ * collapse any longer. This may not always put the aDSP into
+ * power collapse; the aDSP must honor an internal client's
+ * power requirements as well.
+ */
+ pc.power_collapse = disable;
+ rc = apr_send_pkt(core_handle, (uint32_t *)&pc);
+ if (rc < 0) {
+ pr_debug("disable power collapse = %d failed\n",
+ disable);
+ return rc;
+ }
+ pr_debug("disable power collapse = %d\n", disable);
+ }
+ return 0;
+}
+
+static int avtimer_open(struct inode *inode, struct file *file)
+{
+ int rc = 0;
+ struct avtimer_t *pavtimer = &avtimer;
+
+ pr_debug("avtimer_open\n");
+ mutex_lock(&pavtimer->avtimer_lock);
+
+ if (pavtimer->avtimer_open_cnt != 0) {
+ pavtimer->avtimer_open_cnt++;
+ pr_debug("%s: opened avtimer open count=%d\n",
+ __func__, pavtimer->avtimer_open_cnt);
+ mutex_unlock(&pavtimer->avtimer_lock);
+ return 0;
+ }
+ try_module_get(THIS_MODULE);
+
+ rc = avcs_core_open();
+ if (core_handle)
+ rc = avcs_core_disable_power_collapse(1);
+
+ pavtimer->avtimer_open_cnt++;
+ pr_debug("%s: opened avtimer open count=%d\n",
+ __func__, pavtimer->avtimer_open_cnt);
+ mutex_unlock(&pavtimer->avtimer_lock);
+ pr_debug("avtimer_open leave rc=%d\n", rc);
+
+ return rc;
+}
+
+static int avtimer_release(struct inode *inode, struct file *file)
+{
+ int rc = 0;
+ struct avtimer_t *pavtimer = &avtimer;
+
+ mutex_lock(&pavtimer->avtimer_lock);
+ pavtimer->avtimer_open_cnt--;
+
+ if (core_handle && pavtimer->avtimer_open_cnt == 0)
+ rc = avcs_core_disable_power_collapse(0);
+
+ pr_debug("device_release(%p,%p) open count=%d\n",
+ inode, file, pavtimer->avtimer_open_cnt);
+
+ module_put(THIS_MODULE);
+
+ mutex_unlock(&pavtimer->avtimer_lock);
+
+ return rc;
+}
+
+/*
+ * ioctl call provides GET_AVTIMER
+ */
+static long avtimer_ioctl(struct file *file, unsigned int ioctl_num,
+ unsigned long ioctl_param)
+{
+ struct avtimer_t *pavtimer = &avtimer;
+ pr_debug("avtimer_ioctl: ioctlnum=%d,param=%lx\n",
+ ioctl_num, ioctl_param);
+
+ switch (ioctl_num) {
+ case IOCTL_GET_AVTIMER_TICK:
+ {
+ void __iomem *p_avtimer_msw = NULL, *p_avtimer_lsw = NULL;
+ uint32_t avtimer_msw_1st = 0, avtimer_lsw = 0;
+ uint32_t avtimer_msw_2nd = 0;
+ uint64_t avtimer_tick;
+
+ if (pavtimer->avtimer_pdata) {
+ p_avtimer_lsw = ioremap(
+ pavtimer->avtimer_pdata->avtimer_lsw_phy_addr, 4);
+ p_avtimer_msw = ioremap(
+ pavtimer->avtimer_pdata->avtimer_msw_phy_addr, 4);
+ }
+ if (!p_avtimer_lsw || !p_avtimer_msw) {
+ pr_err("ioremap failed\n");
+ return -EIO;
+ }
+ do {
+ avtimer_msw_1st = ioread32(p_avtimer_msw);
+ avtimer_lsw = ioread32(p_avtimer_lsw);
+ avtimer_msw_2nd = ioread32(p_avtimer_msw);
+ } while (avtimer_msw_1st != avtimer_msw_2nd);
+
+ avtimer_tick =
+ ((uint64_t) avtimer_msw_1st << 32) | avtimer_lsw;
+
+ pr_debug("AV Timer tick: msw: %d, lsw: %d\n", avtimer_msw_1st,
+ avtimer_lsw);
+ if (copy_to_user((void *) ioctl_param, &avtimer_tick,
+ sizeof(avtimer_tick))) {
+ pr_err("copy_to_user failed\n");
+ iounmap(p_avtimer_lsw);
+ iounmap(p_avtimer_msw);
+ return -EFAULT;
+ }
+ iounmap(p_avtimer_lsw);
+ iounmap(p_avtimer_msw);
+ }
+ break;
+
+ default:
+ pr_err("invalid cmd\n");
+ break;
+ }
+
+ return 0;
+}
+
+static const struct file_operations avtimer_fops = {
+ .unlocked_ioctl = avtimer_ioctl,
+ .open = avtimer_open,
+ .release = avtimer_release
+};
+
+static int dev_avtimer_probe(struct platform_device *pdev)
+{
+ int result;
+ dev_t dev = MKDEV(major, 0);
+ struct device *device_handle;
+ struct avtimer_t *pavtimer = &avtimer;
+
+ /* get the device number */
+ if (major)
+ result = register_chrdev_region(dev, 1, DEVICE_NAME);
+ else {
+ result = alloc_chrdev_region(&dev, 0, 1, DEVICE_NAME);
+ major = MAJOR(dev);
+ }
+
+ if (result < 0) {
+ pr_err("Registering avtimer device failed\n");
+ return result;
+ }
+
+ pavtimer->avtimer_class = class_create(THIS_MODULE, "avtimer");
+ if (IS_ERR(pavtimer->avtimer_class)) {
+ result = PTR_ERR(pavtimer->avtimer_class);
+ pr_err("Error creating avtimer class: %d\n", result);
+ goto unregister_chrdev_region;
+ }
+ pavtimer->avtimer_pdata = pdev->dev.platform_data;
+
+ cdev_init(&pavtimer->myc, &avtimer_fops);
+ result = cdev_add(&pavtimer->myc, dev, 1);
+
+ if (result < 0) {
+ pr_err("Registering file operations failed\n");
+ goto class_destroy;
+ }
+
+ device_handle = device_create(pavtimer->avtimer_class,
+ NULL, pavtimer->myc.dev, NULL, "avtimer");
+ if (IS_ERR(device_handle)) {
+ result = PTR_ERR(device_handle);
+ pr_err("device_create failed: %d\n", result);
+ goto class_destroy;
+ }
+
+ mutex_init(&pavtimer->avtimer_lock);
+ core_handle = NULL;
+ pavtimer->avtimer_open_cnt = 0;
+
+ pr_debug("Device create done for avtimer major=%d\n", major);
+
+ return 0;
+
+class_destroy:
+ class_destroy(pavtimer->avtimer_class);
+unregister_chrdev_region:
+ unregister_chrdev_region(MKDEV(major, 0), 1);
+ return result;
+
+}
+
+static int __devexit dev_avtimer_remove(struct platform_device *pdev)
+{
+ struct avtimer_t *pavtimer = &avtimer;
+
+ pr_debug("dev_avtimer_remove\n");
+
+ device_destroy(pavtimer->avtimer_class, pavtimer->myc.dev);
+ cdev_del(&pavtimer->myc);
+ class_destroy(pavtimer->avtimer_class);
+ unregister_chrdev_region(MKDEV(major, 0), 1);
+
+ return 0;
+}
+
+static struct platform_driver dev_avtimer_driver = {
+ .probe = dev_avtimer_probe,
+ .remove = __exit_p(dev_avtimer_remove),
+ .driver = {.name = "dev_avtimer"}
+};
+
+static int __init avtimer_init(void)
+{
+ s32 rc;
+ rc = platform_driver_register(&dev_avtimer_driver);
+ if (IS_ERR_VALUE(rc)) {
+ pr_err("platform_driver_register failed.\n");
+ goto error_platform_driver;
+ }
+ pr_debug("dev_avtimer_init : done\n");
+
+ return 0;
+error_platform_driver:
+
+ pr_err("encounterd error\n");
+ return -ENODEV;
+}
+
+static void __exit avtimer_exit(void)
+{
+ pr_debug("avtimer_exit\n");
+ platform_driver_unregister(&dev_avtimer_driver);
+}
+
+module_init(avtimer_init);
+module_exit(avtimer_exit);
+
+MODULE_DESCRIPTION("avtimer driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/msm/ipa/ipa.c b/drivers/platform/msm/ipa/ipa.c
index 8f68ef5..7973cfe 100644
--- a/drivers/platform/msm/ipa/ipa.c
+++ b/drivers/platform/msm/ipa/ipa.c
@@ -637,7 +637,7 @@
switch (tx_pkt->cnt) {
case 1:
- ipa_write_done(&tx_pkt->work);
+ ipa_wq_write_done(&tx_pkt->work);
break;
case 0xFFFF:
/* reached end of set */
@@ -651,7 +651,7 @@
list_first_entry(&sys->head_desc_list,
struct ipa_tx_pkt_wrapper, link);
spin_unlock_irqrestore(&sys->spinlock, irq_flags);
- ipa_write_done(&tx_pkt->work);
+ ipa_wq_write_done(&tx_pkt->work);
break;
default:
/* keep looping till reach the end of the set */
diff --git a/drivers/platform/msm/ipa/ipa_bridge.c b/drivers/platform/msm/ipa/ipa_bridge.c
index cf51ab6..a6221b8 100644
--- a/drivers/platform/msm/ipa/ipa_bridge.c
+++ b/drivers/platform/msm/ipa/ipa_bridge.c
@@ -24,7 +24,7 @@
static int polling_min_sleep[IPA_DIR_MAX] = { 950, 950 };
static int polling_max_sleep[IPA_DIR_MAX] = { 1050, 1050 };
-static int polling_inactivity[IPA_DIR_MAX] = { 20, 20 };
+static int polling_inactivity[IPA_DIR_MAX] = { 4, 4 };
struct ipa_pkt_info {
void *buffer;
@@ -167,6 +167,34 @@
return -ENOMEM;
}
+static int ipa_reclaim_tx(struct ipa_bridge_pipe_context *sys_tx, bool all)
+{
+ struct sps_iovec iov;
+ struct ipa_pkt_info *tx_pkt;
+ int cnt = 0;
+ int ret;
+
+ do {
+ iov.addr = 0;
+ ret = sps_get_iovec(sys_tx->pipe, &iov);
+ if (ret || iov.addr == 0) {
+ break;
+ } else {
+ tx_pkt = list_first_entry(&sys_tx->head_desc_list,
+ struct ipa_pkt_info,
+ list_node);
+ list_move_tail(&tx_pkt->list_node,
+ &sys_tx->free_desc_list);
+ sys_tx->len--;
+ sys_tx->free_len++;
+ tx_pkt->len = ~0;
+ cnt++;
+ }
+ } while (all);
+
+ return cnt;
+}
+
static void ipa_do_bridge_work(enum ipa_bridge_dir dir)
{
struct ipa_bridge_pipe_context *sys_rx = &bridge[2 * dir];
@@ -180,22 +208,9 @@
while (1) {
++inactive_cycles;
- iov.addr = 0;
- ret = sps_get_iovec(sys_tx->pipe, &iov);
- if (ret || iov.addr == 0) {
- /* no-op */
- } else {
- inactive_cycles = 0;
- tx_pkt = list_first_entry(&sys_tx->head_desc_list,
- struct ipa_pkt_info,
- list_node);
- list_move_tail(&tx_pkt->list_node,
- &sys_tx->free_desc_list);
- sys_tx->len--;
- sys_tx->free_len++;
- tx_pkt->len = ~0;
- }
+ if (ipa_reclaim_tx(sys_tx, false))
+ inactive_cycles = 0;
iov.addr = 0;
ret = sps_get_iovec(sys_rx->pipe, &iov);
@@ -216,7 +231,7 @@
tmp_pkt = kmalloc(sizeof(struct ipa_pkt_info),
GFP_KERNEL);
if (!tmp_pkt) {
- pr_err_ratelimited("%s: unable to alloc tx_pkt_info\n",
+ pr_debug_ratelimited("%s: unable to alloc tx_pkt_info\n",
__func__);
usleep_range(polling_min_sleep[dir],
polling_max_sleep[dir]);
@@ -226,7 +241,7 @@
tmp_pkt->buffer = kmalloc(IPA_RX_SKB_SIZE,
GFP_KERNEL | GFP_DMA);
if (!tmp_pkt->buffer) {
- pr_err_ratelimited("%s: unable to alloc tx_pkt_buffer\n",
+ pr_debug_ratelimited("%s: unable to alloc tx_pkt_buffer\n",
__func__);
kfree(tmp_pkt);
usleep_range(polling_min_sleep[dir],
@@ -240,7 +255,7 @@
DMA_BIDIRECTIONAL);
if (tmp_pkt->dma_address == 0 ||
tmp_pkt->dma_address == ~0) {
- pr_err_ratelimited("%s: dma_map_single failure %p for %p\n",
+ pr_debug_ratelimited("%s: dma_map_single failure %p for %p\n",
__func__,
(void *)tmp_pkt->dma_address,
tmp_pkt->buffer);
@@ -271,7 +286,7 @@
SPS_IOVEC_FLAG_EOT);
if (ret) {
list_del(&tx_pkt->list_node);
- pr_err_ratelimited("%s: sps_transfer_one failed %d\n",
+ pr_debug_ratelimited("%s: sps_transfer_one failed %d\n",
__func__, ret);
usleep_range(polling_min_sleep[dir],
polling_max_sleep[dir]);
@@ -289,9 +304,10 @@
SPS_IOVEC_FLAG_INT |
SPS_IOVEC_FLAG_EOT);
if (ret) {
- pr_err_ratelimited("%s: fail to add to TX dir=%d\n",
+ pr_debug_ratelimited("%s: fail to add to TX dir=%d\n",
__func__, dir);
list_del(&rx_pkt->list_node);
+ ipa_reclaim_tx(sys_tx, true);
usleep_range(polling_min_sleep[dir],
polling_max_sleep[dir]);
goto retry_add_tx;
@@ -306,7 +322,7 @@
}
}
-static void ipa_rx_notify(struct sps_event_notify *notify)
+static void ipa_sps_irq_rx_notify(struct sps_event_notify *notify)
{
switch (notify->event_id) {
case SPS_EVENT_EOT:
@@ -457,7 +473,7 @@
sys->register_event.options = SPS_O_EOT;
sys->register_event.mode = SPS_TRIGGER_CALLBACK;
sys->register_event.xfer_done = NULL;
- sys->register_event.callback = ipa_rx_notify;
+ sys->register_event.callback = ipa_sps_irq_rx_notify;
sys->register_event.user = NULL;
ret = sps_register_event(sys->pipe, &sys->register_event);
if (ret < 0) {
diff --git a/drivers/platform/msm/ipa/ipa_client.c b/drivers/platform/msm/ipa/ipa_client.c
index 823b17d..dc9da7d 100644
--- a/drivers/platform/msm/ipa/ipa_client.c
+++ b/drivers/platform/msm/ipa/ipa_client.c
@@ -157,7 +157,7 @@
ep->valid = 1;
ep->client = in->client;
- ep->notify = in->notify;
+ ep->client_notify = in->notify;
ep->priv = in->priv;
if (ipa_cfg_ep(ipa_ep_idx, &in->ipa_ep_cfg)) {
diff --git a/drivers/platform/msm/ipa/ipa_dp.c b/drivers/platform/msm/ipa/ipa_dp.c
index c677a6e..4de19d2 100644
--- a/drivers/platform/msm/ipa/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_dp.c
@@ -18,12 +18,21 @@
#define list_next_entry(pos, member) \
list_entry(pos->member.next, typeof(*pos), member)
+#define IPA_LAST_DESC_COOKIE 0xFFFF
/**
- * ipa_write_done - this function will be (enevtually) called when a Tx
+ * ipa_write_done() - this function will be (eventually) called when a Tx
* operation is complete
- * @work: work_struct used by the work queue
+ * * @work: work_struct used by the work queue
+ *
+ * Will be called in deferred context.
+ * - invoke the callback supplied by the client who sent this command
+ * - iterate over all packets and validate that
+ * the order for sent packet is the same as expected
+ * - delete all the tx packet descriptors from the system
+ * pipe context (not needed anymore)
+ * - return the tx buffer back to one_kb_no_straddle_pool
*/
-void ipa_write_done(struct work_struct *work)
+void ipa_wq_write_done(struct work_struct *work)
{
struct ipa_tx_pkt_wrapper *tx_pkt;
struct ipa_tx_pkt_wrapper *next_pkt;
@@ -40,7 +49,7 @@
if (unlikely(cnt == 0))
WARN_ON(1);
- if (cnt > 1 && cnt != 0xFFFF)
+ if (cnt > 1 && cnt != IPA_LAST_DESC_COOKIE)
mult = tx_pkt->mult;
for (i = 0; i < cnt; i++) {
@@ -77,6 +86,14 @@
* @sys: system pipe context
* @desc: descriptor to send
*
+ * - Allocate tx_packet wrapper
+ * - Allocate a bounce buffer due to HW constrains
+ * (This buffer will be used for the DMA command)
+ * - Copy the data (desc->pyld) to the bounce buffer
+ * - transfer data to the IPA
+ * - after the transfer was done the SPS will
+ * notify the sending user via ipa_sps_irq_comp_tx()
+ *
* Return codes: 0: success, -EFAULT: failure
*/
int ipa_send_one(struct ipa_sys_context *sys, struct ipa_desc *desc)
@@ -117,7 +134,7 @@
}
INIT_LIST_HEAD(&tx_pkt->link);
- INIT_WORK(&tx_pkt->work, ipa_write_done);
+ INIT_WORK(&tx_pkt->work, ipa_wq_write_done);
tx_pkt->type = desc->type;
tx_pkt->cnt = 1; /* only 1 desc in this "set" */
@@ -175,7 +192,20 @@
* ipa_send() - Send multiple descriptors in one HW transaction
* @sys: system pipe context
* @num_desc: number of packets
- * @desc: packets to send
+ * @desc: packets to send (may be immediate command or data)
+ *
+ * This function is used for system-to-bam connection.
+ * - SPS driver expect struct sps_transfer which will contain all the data
+ * for a transaction
+ * - The sps_transfer struct will be pointing to bounce buffers for
+ * its DMA command (immediate command and data)
+ * - ipa_tx_pkt_wrapper will be used for each ipa
+ * descriptor (allocated from wrappers cache)
+ * - The wrapper struct will be configured for each ipa-desc payload and will
+ * contain information which will be later used by the user callbacks
+ * - each transfer will be made by calling to sps_transfer()
+ * - Each packet (command or data) that will be sent will also be saved in
+ * ipa_sys_context for later check that all data was sent
*
* Return codes: 0: success, -EFAULT: failure
*/
@@ -187,12 +217,20 @@
struct sps_iovec *iovec;
unsigned long irq_flags;
dma_addr_t dma_addr;
- int i;
+ int i = 0;
int j;
int result;
- int fail_dma_wrap;
+ int fail_dma_wrap = 0;
uint size = num_desc * sizeof(struct sps_iovec);
+ transfer.iovec = dma_alloc_coherent(NULL, size, &dma_addr, 0);
+ transfer.iovec_phys = dma_addr;
+ transfer.iovec_count = num_desc;
+ if (!transfer.iovec) {
+ IPAERR("fail to alloc DMA mem for sps xfr buff\n");
+ goto failure;
+ }
+
for (i = 0; i < num_desc; i++) {
fail_dma_wrap = 0;
tx_pkt = kmem_cache_zalloc(ipa_ctx->tx_pkt_wrapper_cache,
@@ -207,14 +245,6 @@
*/
if (i == 0) {
transfer.user = tx_pkt;
- transfer.iovec =
- dma_alloc_coherent(NULL, size, &dma_addr, 0);
- transfer.iovec_phys = dma_addr;
- transfer.iovec_count = num_desc;
- if (!transfer.iovec) {
- IPAERR("fail alloc DMA mem for sps xfr buff\n");
- goto failure;
- }
tx_pkt->mult.phys_base = dma_addr;
tx_pkt->mult.base = transfer.iovec;
@@ -226,7 +256,7 @@
iovec->flags = 0;
INIT_LIST_HEAD(&tx_pkt->link);
- INIT_WORK(&tx_pkt->work, ipa_write_done);
+ INIT_WORK(&tx_pkt->work, ipa_wq_write_done);
tx_pkt->type = desc[i].type;
tx_pkt->mem.base = desc[i].pyld;
@@ -263,6 +293,10 @@
tx_pkt->user1 = desc[i].user1;
tx_pkt->user2 = desc[i].user2;
+ /*
+ * Point the iovec to the bounce buffer and
+ * add this packet to system pipe context.
+ */
iovec->addr = tx_pkt->mem.phys_base;
spin_lock_irqsave(&sys->spinlock, irq_flags);
list_add_tail(&tx_pkt->link, &sys->head_desc_list);
@@ -284,7 +318,7 @@
iovec->flags |= (SPS_IOVEC_FLAG_EOT |
SPS_IOVEC_FLAG_INT);
/* "mark" the last desc */
- tx_pkt->cnt = 0xFFFF;
+ tx_pkt->cnt = IPA_LAST_DESC_COOKIE;
}
}
@@ -320,7 +354,7 @@
}
/**
- * ipa_cmd_ack - callback function which will be called by SPS driver after an
+ * ipa_sps_irq_cmd_ack - callback function which will be called by SPS driver after an
* immediate command is complete.
* @user1: pointer to the descriptor of the transfer
* @user2:
@@ -328,7 +362,7 @@
* Complete the immediate commands completion object, this will release the
* thread which waits on this completion object (ipa_send_cmd())
*/
-static void ipa_cmd_ack(void *user1, void *user2)
+static void ipa_sps_irq_cmd_ack(void *user1, void *user2)
{
struct ipa_desc *desc = (struct ipa_desc *)user1;
@@ -340,11 +374,13 @@
/**
* ipa_send_cmd - send immediate commands
- * @num_desc: number of descriptors within the descr struct
+ * @num_desc: number of descriptors within the desc struct
* @descr: descriptor structure
*
* Function will block till command gets ACK from IPA HW, caller needs
* to free any resources it allocated after function returns
+ * The callback in ipa_desc should not be set by the caller
+ * for this function.
*/
int ipa_send_cmd(u16 num_desc, struct ipa_desc *descr)
{
@@ -353,11 +389,10 @@
if (num_desc == 1) {
init_completion(&descr->xfer_done);
- /* client should not set these */
if (descr->callback || descr->user1)
WARN_ON(1);
- descr->callback = ipa_cmd_ack;
+ descr->callback = ipa_sps_irq_cmd_ack;
descr->user1 = descr;
if (ipa_send_one(&ipa_ctx->sys[IPA_A5_CMD], descr)) {
IPAERR("fail to send immediate command\n");
@@ -368,11 +403,10 @@
desc = &descr[num_desc - 1];
init_completion(&desc->xfer_done);
- /* client should not set these */
if (desc->callback || desc->user1)
WARN_ON(1);
- desc->callback = ipa_cmd_ack;
+ desc->callback = ipa_sps_irq_cmd_ack;
desc->user1 = desc;
if (ipa_send(&ipa_ctx->sys[IPA_A5_CMD], num_desc, descr)) {
IPAERR("fail to send multiple immediate command set\n");
@@ -385,11 +419,15 @@
}
/**
- * ipa_tx_notify() - Callback function which will be called by the SPS driver
- * after a Tx operation is complete. Called in an interrupt context.
+ * ipa_sps_irq_tx_notify() - Callback function which will be called by
+ * the SPS driver after a Tx operation is complete.
+ * Called in an interrupt context.
* @notify: SPS driver supplied notification struct
+ *
+ * This function defer the work for this event to the tx workqueue.
+ * This event will be later handled by ipa_write_done.
*/
-static void ipa_tx_notify(struct sps_event_notify *notify)
+static void ipa_sps_irq_tx_notify(struct sps_event_notify *notify)
{
struct ipa_tx_pkt_wrapper *tx_pkt;
@@ -473,19 +511,19 @@
mux_hdr = (struct ipa_a5_mux_hdr *)rx_skb->data;
IPADBG("RX pkt len=%d IID=0x%x src=%d, flags=0x%x, meta=0x%x\n",
- rx_skb->len, ntohs(mux_hdr->interface_id),
- mux_hdr->src_pipe_index,
- mux_hdr->flags, ntohl(mux_hdr->metadata));
+ rx_skb->len, ntohs(mux_hdr->interface_id),
+ mux_hdr->src_pipe_index,
+ mux_hdr->flags, ntohl(mux_hdr->metadata));
IPA_DUMP_BUFF(rx_skb->data, 0, rx_skb->len);
if (mux_hdr->src_pipe_index >= IPA_NUM_PIPES ||
- !ipa_ctx->ep[mux_hdr->src_pipe_index].valid ||
- !ipa_ctx->ep[mux_hdr->src_pipe_index].notify) {
- IPAERR("drop pipe=%d ep_valid=%d notify=%p\n",
- mux_hdr->src_pipe_index,
- ipa_ctx->ep[mux_hdr->src_pipe_index].valid,
- ipa_ctx->ep[mux_hdr->src_pipe_index].notify);
+ !ipa_ctx->ep[mux_hdr->src_pipe_index].valid ||
+ !ipa_ctx->ep[mux_hdr->src_pipe_index].client_notify) {
+ IPAERR("drop pipe=%d ep_valid=%d client_notify=%p\n",
+ mux_hdr->src_pipe_index,
+ ipa_ctx->ep[mux_hdr->src_pipe_index].valid,
+ ipa_ctx->ep[mux_hdr->src_pipe_index].client_notify);
dev_kfree_skb_any(rx_skb);
ipa_replenish_rx_cache();
continue;
@@ -505,7 +543,8 @@
IPADBG("pulling %d bytes from skb\n", pull_len);
skb_pull(rx_skb, pull_len);
- ep->notify(ep->priv, IPA_RECEIVE, (unsigned long)(rx_skb));
+ ep->client_notify(ep->priv, IPA_RECEIVE,
+ (unsigned long)(rx_skb));
ipa_replenish_rx_cache();
} while (1);
}
@@ -587,7 +626,7 @@
* This comes to prevent the CPU from handling too many interrupts when the
* throughput is high.
*/
-static void ipa_rx_notify(struct sps_event_notify *notify)
+static void ipa_sps_irq_rx_notify(struct sps_event_notify *notify)
{
struct ipa_rx_pkt_wrapper *rx_pkt;
@@ -609,9 +648,17 @@
/**
* ipa_setup_sys_pipe() - Setup an IPA end-point in system-BAM mode and perform
* IPA EP configuration
- * @sys_in: [in] input needed to setup BAM pipe and config EP
+ * @sys_in: [in] input needed to setup BAM pipe and configure EP
* @clnt_hdl: [out] client handle
*
+ * - configure the end-point registers with the supplied
+ * parameters from the user.
+ * - call SPS APIs to create a system-to-bam connection with IPA.
+ * - allocate descriptor FIFO
+ * - register callback function(ipa_sps_irq_rx_notify or
+ * ipa_sps_irq_tx_notify - depends on client type) in case the driver is
+ * not configured to pulling mode
+ *
* Returns: 0 on success, negative on failure
*/
int ipa_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
@@ -729,34 +776,21 @@
}
if (!ipa_ctx->polling_mode) {
- if (IPA_CLIENT_IS_CONS(sys_in->client)) {
- ipa_ctx->sys[sys_idx].event.options = SPS_O_EOT;
- ipa_ctx->sys[sys_idx].event.mode = SPS_TRIGGER_CALLBACK;
- ipa_ctx->sys[sys_idx].event.xfer_done = NULL;
- ipa_ctx->sys[sys_idx].event.callback = ipa_rx_notify;
- ipa_ctx->sys[sys_idx].event.user =
- &ipa_ctx->sys[sys_idx];
- result =
- sps_register_event(ipa_ctx->ep[ipa_ep_idx].ep_hdl,
- &ipa_ctx->sys[sys_idx].event);
- if (result < 0) {
- IPAERR("rx register event error %d\n", result);
- goto fail_register_event;
- }
- } else {
- ipa_ctx->sys[sys_idx].event.options = SPS_O_EOT;
- ipa_ctx->sys[sys_idx].event.mode = SPS_TRIGGER_CALLBACK;
- ipa_ctx->sys[sys_idx].event.xfer_done = NULL;
- ipa_ctx->sys[sys_idx].event.callback = ipa_tx_notify;
- ipa_ctx->sys[sys_idx].event.user =
- &ipa_ctx->sys[sys_idx];
- result =
- sps_register_event(ipa_ctx->ep[ipa_ep_idx].ep_hdl,
- &ipa_ctx->sys[sys_idx].event);
- if (result < 0) {
- IPAERR("tx register event error %d\n", result);
- goto fail_register_event;
- }
+
+ ipa_ctx->sys[sys_idx].event.options = SPS_O_EOT;
+ ipa_ctx->sys[sys_idx].event.mode = SPS_TRIGGER_CALLBACK;
+ ipa_ctx->sys[sys_idx].event.xfer_done = NULL;
+ ipa_ctx->sys[sys_idx].event.user =
+ &ipa_ctx->sys[sys_idx];
+ ipa_ctx->sys[sys_idx].event.callback =
+ IPA_CLIENT_IS_CONS(sys_in->client) ?
+ ipa_sps_irq_rx_notify :
+ ipa_sps_irq_tx_notify;
+ result = sps_register_event(ipa_ctx->ep[ipa_ep_idx].ep_hdl,
+ &ipa_ctx->sys[sys_idx].event);
+ if (result < 0) {
+ IPAERR("register event error %d\n", result);
+ goto fail_register_event;
}
}
@@ -801,21 +835,25 @@
EXPORT_SYMBOL(ipa_teardown_sys_pipe);
/**
- * ipa_tx_comp() - Callback function which will call the user supplied callback
- * function to release the skb, or release it on its own if no callback function
- * was supplied.
+ * ipa_tx_comp_usr_notify_release() - Callback function which will call the
+ * user supplied callback function to release the skb, or release it on
+ * its own if no callback function was supplied.
* @user1
* @user2
+ *
+ * This notified callback (client_notify) is for
+ * the destination client.
+ * This function is supplied in ipa_connect.
*/
-static void ipa_tx_comp(void *user1, void *user2)
+static void ipa_tx_comp_usr_notify_release(void *user1, void *user2)
{
struct sk_buff *skb = (struct sk_buff *)user1;
u32 ep_idx = (u32)user2;
IPADBG("skb=%p ep=%d\n", skb, ep_idx);
- if (ipa_ctx->ep[ep_idx].notify)
- ipa_ctx->ep[ep_idx].notify(ipa_ctx->ep[ep_idx].priv,
+ if (ipa_ctx->ep[ep_idx].client_notify)
+ ipa_ctx->ep[ep_idx].client_notify(ipa_ctx->ep[ep_idx].priv,
IPA_WRITE_DONE, (unsigned long)skb);
else
dev_kfree_skb_any(skb);
@@ -832,10 +870,20 @@
* dst is a "valid" CONS type, then SW data-path is used. If dst is the
* WLAN_AMPDU PROD type, then HW data-path for WLAN AMPDU is used. Anything else
* is an error. For errors, client needs to free the skb as needed. For success,
- * IPA driver will later invoke client calback if one was supplied. That
+ * IPA driver will later invoke client callback if one was supplied. That
* callback should free the skb. If no callback supplied, IPA driver will free
* the skb internally
*
+ * The function will use two descriptors for this send command
+ * (for A5_WLAN_AMPDU_PROD only one desciprtor will be sent),
+ * the first descriptor will be used to inform the IPA hardware that
+ * apps need to push data into the IPA (IP_PACKET_INIT immediate command).
+ * Once this send was done from SPS point-of-view the IPA driver will
+ * get notified by the supplied callback - ipa_sps_irq_tx_comp()
+ *
+ * ipa_sps_irq_tx_comp will call to the user supplied
+ * callback (supplied in ipa_connect())
+ *
* Returns: 0 on success, negative on failure
*/
int ipa_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
@@ -864,6 +912,7 @@
IPAERR("failed to alloc immediate command object\n");
goto fail_mem_alloc;
}
+ memset(cmd, 0x00, sizeof(*cmd));
cmd->destination_pipe_index = ipa_ep_idx;
if (meta && meta->mbim_stream_id_valid)
@@ -875,7 +924,7 @@
desc[1].pyld = skb->data;
desc[1].len = skb->len;
desc[1].type = IPA_DATA_DESC_SKB;
- desc[1].callback = ipa_tx_comp;
+ desc[1].callback = ipa_tx_comp_usr_notify_release;
desc[1].user1 = skb;
desc[1].user2 = (void *)ipa_ep_idx;
@@ -887,7 +936,7 @@
desc[0].pyld = skb->data;
desc[0].len = skb->len;
desc[0].type = IPA_DATA_DESC_SKB;
- desc[0].callback = ipa_tx_comp;
+ desc[0].callback = ipa_tx_comp_usr_notify_release;
desc[0].user1 = skb;
desc[0].user2 = (void *)ipa_ep_idx;
@@ -919,7 +968,7 @@
* ipa_handle_rx_core() is run in polling mode. After all packets has been
* received, the driver switches back to interrupt mode.
*/
-void ipa_handle_rx(struct work_struct *work)
+void ipa_wq_handle_rx(struct work_struct *work)
{
ipa_handle_rx_core();
ipa_rx_switch_to_intr_mode();
@@ -962,7 +1011,7 @@
}
INIT_LIST_HEAD(&rx_pkt->link);
- INIT_WORK(&rx_pkt->work, ipa_handle_rx);
+ INIT_WORK(&rx_pkt->work, ipa_wq_handle_rx);
rx_pkt->skb = __dev_alloc_skb(IPA_RX_SKB_SIZE, GFP_KERNEL);
if (rx_pkt->skb == NULL) {
diff --git a/drivers/platform/msm/ipa/ipa_i.h b/drivers/platform/msm/ipa/ipa_i.h
index 63ef5fb..3be2369 100644
--- a/drivers/platform/msm/ipa/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_i.h
@@ -296,8 +296,9 @@
* @dst_pipe_index: destination pipe index
* @rt_tbl_idx: routing table index
* @connect: SPS connect
- * @priv: user provided information
- * @notify: user provided CB for EP events notification
+ * @priv: user provided information which will forwarded once the user is
+ * notified for new data avail
+ * @client_notify: user provided CB for EP events notification
* @desc_fifo_in_pipe_mem: flag indicating if descriptors FIFO uses pipe memory
* @data_fifo_in_pipe_mem: flag indicating if data FIFO uses pipe memory
* @desc_fifo_pipe_mem_ofst: descriptors FIFO pipe memory offset
@@ -314,7 +315,7 @@
u32 rt_tbl_idx;
struct sps_connect connect;
void *priv;
- void (*notify)(void *priv, enum ipa_dp_evt_type evt,
+ void (*client_notify)(void *priv, enum ipa_dp_evt_type evt,
unsigned long data);
bool desc_fifo_in_pipe_mem;
bool data_fifo_in_pipe_mem;
@@ -357,7 +358,8 @@
/**
* struct ipa_tx_pkt_wrapper - IPA Tx packet wrapper
- * @type: info for the skb or immediate command param
+ * @type: specify if this packet is a data packet (skb) or
+ * an immediate command
* @mem: memory buffer used by this Tx packet
* @work: work struct for current Tx packet
* @link: linked to the wrappers on that pipe
@@ -371,6 +373,8 @@
* >1 and <0xFFFF for first of a "multiple" tranfer,
* 0xFFFF for last desc, 0 for rest of "multiple' transfer
* @bounce: va of bounce buffer
+ *
+ * This struct can wrap both data packet and immediate command packet.
*/
struct ipa_tx_pkt_wrapper {
enum ipa_desc_type type;
@@ -693,8 +697,8 @@
void ipa_replenish_rx_cache(void);
void ipa_cleanup_rx(void);
int ipa_cfg_filter(u32 disable);
-void ipa_write_done(struct work_struct *work);
-void ipa_handle_rx(struct work_struct *work);
+void ipa_wq_write_done(struct work_struct *work);
+void ipa_wq_handle_rx(struct work_struct *work);
void ipa_handle_rx_core(void);
int ipa_pipe_mem_init(u32 start_ofst, u32 size);
int ipa_pipe_mem_alloc(u32 *ofst, u32 size);
diff --git a/drivers/power/pm8921-charger.c b/drivers/power/pm8921-charger.c
index c6dd57b..7beb24c 100644
--- a/drivers/power/pm8921-charger.c
+++ b/drivers/power/pm8921-charger.c
@@ -280,6 +280,7 @@
struct delayed_work eoc_work;
struct delayed_work unplug_check_work;
struct delayed_work vin_collapse_check_work;
+ struct delayed_work btc_override_work;
struct wake_lock eoc_wake_lock;
enum pm8921_chg_cold_thr cold_thr;
enum pm8921_chg_hot_thr hot_thr;
@@ -291,6 +292,11 @@
int recent_reported_soc;
int battery_less_hardware;
int ibatmax_max_adj_ma;
+ int btc_override;
+ int btc_override_cold_decidegc;
+ int btc_override_hot_decidegc;
+ int btc_delay_ms;
+ bool btc_panic_if_cant_stop_chg;
};
/* user space parameter to limit usb current */
@@ -2300,6 +2306,10 @@
*/
schedule_delayed_work(&chip->eoc_work, delay);
wake_lock(&chip->eoc_wake_lock);
+ if (chip->btc_override)
+ schedule_delayed_work(&chip->btc_override_work,
+ round_jiffies_relative(msecs_to_jiffies
+ (chip->btc_delay_ms)));
/* Update battery charging LEDs and user space battery info */
power_supply_changed(&chip->batt_psy);
}
@@ -2912,6 +2922,13 @@
round_jiffies_relative(msecs_to_jiffies
(EOC_CHECK_PERIOD_MS)));
}
+ if (high_transition
+ && chip->btc_override
+ && !delayed_work_pending(&chip->btc_override_work)) {
+ schedule_delayed_work(&chip->btc_override_work,
+ round_jiffies_relative(msecs_to_jiffies
+ (chip->btc_delay_ms)));
+ }
power_supply_changed(&chip->batt_psy);
bms_notify_check(chip);
return IRQ_HANDLED;
@@ -3404,6 +3421,155 @@
return CHG_FINISHED;
}
+#define COMP_OVERRIDE_HOT_BANK 6
+#define COMP_OVERRIDE_COLD_BANK 7
+#define COMP_OVERRIDE_BIT BIT(1)
+static int pm_chg_override_cold(struct pm8921_chg_chip *chip, int flag)
+{
+ u8 val;
+ int rc = 0;
+
+ val = 0x80 | COMP_OVERRIDE_COLD_BANK << 2 | COMP_OVERRIDE_BIT;
+
+ if (flag)
+ val |= 0x01;
+
+ rc = pm8xxx_writeb(chip->dev->parent, COMPARATOR_OVERRIDE, val);
+ if (rc < 0)
+ pr_err("Could not write 0x%x to override rc = %d\n", val, rc);
+
+ pr_debug("btc cold = %d val = 0x%x\n", flag, val);
+ return rc;
+}
+
+static int pm_chg_override_hot(struct pm8921_chg_chip *chip, int flag)
+{
+ u8 val;
+ int rc = 0;
+
+ val = 0x80 | COMP_OVERRIDE_HOT_BANK << 2 | COMP_OVERRIDE_BIT;
+
+ if (flag)
+ val |= 0x01;
+
+ rc = pm8xxx_writeb(chip->dev->parent, COMPARATOR_OVERRIDE, val);
+ if (rc < 0)
+ pr_err("Could not write 0x%x to override rc = %d\n", val, rc);
+
+ pr_debug("btc hot = %d val = 0x%x\n", flag, val);
+ return rc;
+}
+
+static void __devinit pm8921_chg_btc_override_init(struct pm8921_chg_chip *chip)
+{
+ int rc = 0;
+ u8 reg;
+ u8 val;
+
+ val = COMP_OVERRIDE_HOT_BANK << 2;
+ rc = pm8xxx_writeb(chip->dev->parent, COMPARATOR_OVERRIDE, val);
+ if (rc < 0) {
+ pr_err("Could not write 0x%x to override rc = %d\n", val, rc);
+ goto cold_init;
+ }
+ rc = pm8xxx_readb(chip->dev->parent, COMPARATOR_OVERRIDE, ®);
+ if (rc < 0) {
+ pr_err("Could not read bank %d of override rc = %d\n",
+ COMP_OVERRIDE_HOT_BANK, rc);
+ goto cold_init;
+ }
+ if ((reg & COMP_OVERRIDE_BIT) != COMP_OVERRIDE_BIT) {
+ /* for now override it as not hot */
+ rc = pm_chg_override_hot(chip, 0);
+ if (rc < 0)
+ pr_err("Could not override hot rc = %d\n", rc);
+ }
+
+cold_init:
+ val = COMP_OVERRIDE_COLD_BANK << 2;
+ rc = pm8xxx_writeb(chip->dev->parent, COMPARATOR_OVERRIDE, val);
+ if (rc < 0) {
+ pr_err("Could not write 0x%x to override rc = %d\n", val, rc);
+ return;
+ }
+ rc = pm8xxx_readb(chip->dev->parent, COMPARATOR_OVERRIDE, ®);
+ if (rc < 0) {
+ pr_err("Could not read bank %d of override rc = %d\n",
+ COMP_OVERRIDE_COLD_BANK, rc);
+ return;
+ }
+ if ((reg & COMP_OVERRIDE_BIT) != COMP_OVERRIDE_BIT) {
+ /* for now override it as not cold */
+ rc = pm_chg_override_cold(chip, 0);
+ if (rc < 0)
+ pr_err("Could not override cold rc = %d\n", rc);
+ }
+}
+
+static void btc_override_worker(struct work_struct *work)
+{
+ int decidegc;
+ int temp;
+ int rc = 0;
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct pm8921_chg_chip *chip = container_of(dwork,
+ struct pm8921_chg_chip, btc_override_work);
+
+ if (!chip->btc_override) {
+ pr_err("called when not enabled\n");
+ return;
+ }
+
+ decidegc = get_prop_batt_temp(chip);
+
+ pr_debug("temp=%d\n", decidegc);
+
+ temp = pm_chg_get_rt_status(chip, BATTTEMP_HOT_IRQ);
+ if (temp) {
+ if (decidegc < chip->btc_override_hot_decidegc)
+ /* stop forcing batt hot */
+ rc = pm_chg_override_hot(chip, 0);
+ if (rc)
+ pr_err("Couldnt write 0 to hot comp\n");
+ } else {
+ if (decidegc >= chip->btc_override_hot_decidegc)
+ /* start forcing batt hot */
+ rc = pm_chg_override_hot(chip, 1);
+ if (rc && chip->btc_panic_if_cant_stop_chg)
+ panic("Couldnt override comps to stop chg\n");
+ }
+
+ temp = pm_chg_get_rt_status(chip, BATTTEMP_COLD_IRQ);
+ if (temp) {
+ if (decidegc > chip->btc_override_cold_decidegc)
+ /* stop forcing batt cold */
+ rc = pm_chg_override_cold(chip, 0);
+ if (rc)
+ pr_err("Couldnt write 0 to cold comp\n");
+ } else {
+ if (decidegc <= chip->btc_override_cold_decidegc)
+ /* start forcing batt cold */
+ rc = pm_chg_override_cold(chip, 1);
+ if (rc && chip->btc_panic_if_cant_stop_chg)
+ panic("Couldnt override comps to stop chg\n");
+ }
+
+ if ((is_dc_chg_plugged_in(the_chip) || is_usb_chg_plugged_in(the_chip))
+ && get_prop_batt_status(chip) != POWER_SUPPLY_STATUS_FULL) {
+ schedule_delayed_work(&chip->btc_override_work,
+ round_jiffies_relative(msecs_to_jiffies
+ (chip->btc_delay_ms)));
+ return;
+ }
+
+ rc = pm_chg_override_hot(chip, 0);
+ if (rc)
+ pr_err("Couldnt write 0 to hot comp\n");
+ rc = pm_chg_override_cold(chip, 0);
+ if (rc)
+ pr_err("Couldnt write 0 to cold comp\n");
+}
+
/**
* eoc_worker - internal function to check if battery EOC
* has happened
@@ -4494,6 +4660,16 @@
chip->led_src_config = pdata->led_src_config;
chip->has_dc_supply = pdata->has_dc_supply;
chip->battery_less_hardware = pdata->battery_less_hardware;
+ chip->btc_override = pdata->btc_override;
+ if (chip->btc_override) {
+ chip->btc_delay_ms = pdata->btc_delay_ms;
+ chip->btc_override_cold_decidegc
+ = pdata->btc_override_cold_degc * 10;
+ chip->btc_override_hot_decidegc
+ = pdata->btc_override_hot_degc * 10;
+ chip->btc_panic_if_cant_stop_chg
+ = pdata->btc_panic_if_cant_stop_chg;
+ }
if (chip->battery_less_hardware)
charging_disabled = 1;
@@ -4507,6 +4683,9 @@
goto free_chip;
}
+ if (chip->btc_override)
+ pm8921_chg_btc_override_init(chip);
+
chip->usb_psy.name = "usb",
chip->usb_psy.type = POWER_SUPPLY_TYPE_USB,
chip->usb_psy.supplied_to = pm_power_supplied_to,
@@ -4561,6 +4740,7 @@
INIT_WORK(&chip->battery_id_valid_work, battery_id_valid);
INIT_DELAYED_WORK(&chip->update_heartbeat_work, update_heartbeat);
+ INIT_DELAYED_WORK(&chip->btc_override_work, btc_override_worker);
rc = request_irqs(chip, pdev);
if (rc) {
@@ -4598,6 +4778,7 @@
free_irq:
free_irqs(chip);
unregister_batt:
+ wake_lock_destroy(&chip->eoc_wake_lock);
power_supply_unregister(&chip->batt_psy);
unregister_dc:
power_supply_unregister(&chip->dc_psy);
diff --git a/drivers/power/qpnp-bms.c b/drivers/power/qpnp-bms.c
index 6623d81..0a072b1 100644
--- a/drivers/power/qpnp-bms.c
+++ b/drivers/power/qpnp-bms.c
@@ -33,7 +33,7 @@
#define BMS1_MODE_CTL 0X40
/* Coulomb counter clear registers */
#define BMS1_CC_DATA_CTL 0x42
-#define BMS1_CC_CLEAR_CTRL 0x43
+#define BMS1_CC_CLEAR_CTL 0x43
/* OCV limit registers */
#define BMS1_OCV_USE_LOW_LIMIT_THR0 0x48
#define BMS1_OCV_USE_LOW_LIMIT_THR1 0x49
@@ -484,12 +484,49 @@
pr_debug("last_good_ocv_uv = %d\n", raw->last_good_ocv_uv);
}
+#define CLEAR_CC BIT(7)
+#define CLEAR_SW_CC BIT(6)
+/**
+ * reset both cc and sw-cc.
+ * note: this should only be ever called from one thread
+ * or there may be a race condition where CC is never enabled
+ * again
+ */
+static void reset_cc(struct qpnp_bms_chip *chip)
+{
+ int rc;
+
+ pr_debug("resetting cc manually\n");
+ rc = qpnp_masked_write(chip, BMS1_CC_CLEAR_CTL,
+ CLEAR_CC | CLEAR_SW_CC,
+ CLEAR_CC | CLEAR_SW_CC);
+ if (rc)
+ pr_err("cc reset failed: %d\n", rc);
+
+ /* wait for 100us for cc to reset */
+ udelay(100);
+
+ rc = qpnp_masked_write(chip, BMS1_CC_CLEAR_CTL,
+ CLEAR_CC | CLEAR_SW_CC, 0);
+ if (rc)
+ pr_err("cc reenable failed: %d\n", rc);
+}
+
static int read_soc_params_raw(struct qpnp_bms_chip *chip,
struct raw_soc_params *raw)
{
int rc;
mutex_lock(&chip->bms_output_lock);
+
+ if (chip->prev_last_good_ocv_raw == 0) {
+ /* software workaround for BMS 1.0
+ * The coulomb counter does not reset upon PON, so reset it
+ * manually upon probe. */
+ if (chip->revision1 == 0 && chip->revision2 == 0)
+ reset_cc(chip);
+ }
+
lock_output_data(chip);
rc = qpnp_read_wrapper(chip, (u8 *)&raw->last_good_ocv_raw,
@@ -1254,6 +1291,34 @@
return soc;
}
+static int clamp_soc_based_on_voltage(struct qpnp_bms_chip *chip, int soc)
+{
+ int rc, vbat_uv;
+ struct qpnp_vadc_result result;
+
+ rc = qpnp_vadc_read(VBAT_SNS, &result);
+ if (rc) {
+ pr_err("error reading vbat_sns adc channel = %d, rc = %d\n",
+ VBAT_SNS, rc);
+ return rc;
+ }
+
+ vbat_uv = (int)result.physical;
+ if (soc == 0 && vbat_uv > chip->v_cutoff_uv) {
+ pr_debug("clamping soc to 1, vbat (%d) > cutoff (%d)\n",
+ vbat_uv, chip->v_cutoff_uv);
+ return 1;
+ } else if (soc > 0 && vbat_uv < chip->v_cutoff_uv) {
+ pr_debug("forcing soc to 0, vbat (%d) < cutoff (%d)\n",
+ vbat_uv, chip->v_cutoff_uv);
+ return 0;
+ } else {
+ pr_debug("not clamping, using soc = %d, vbat = %d and cutoff = %d\n",
+ soc, vbat_uv, chip->v_cutoff_uv);
+ return soc;
+ }
+}
+
static int calculate_state_of_charge(struct qpnp_bms_chip *chip,
struct raw_soc_params *raw,
int batt_temp)
@@ -1348,6 +1413,11 @@
pr_debug("SOC before adjustment = %d\n", soc);
new_calculated_soc = adjust_soc(chip, ¶ms, soc, batt_temp);
+ /* clamp soc due to BMS HW inaccuracies in pm8941v2.0 */
+ if (chip->revision1 == 0 && chip->revision2 == 0)
+ new_calculated_soc = clamp_soc_based_on_voltage(chip,
+ new_calculated_soc);
+
if (new_calculated_soc != chip->calculated_soc
&& chip->bms_psy.name != NULL) {
power_supply_changed(&chip->bms_psy);
@@ -1947,6 +2017,7 @@
pr_err("Error reading version register %d\n", rc);
goto error_read;
}
+ pr_debug("BMS version: %hhu.%hhu\n", chip->revision2, chip->revision1);
rc = qpnp_vadc_is_ready();
if (rc) {
diff --git a/drivers/power/qpnp-charger.c b/drivers/power/qpnp-charger.c
index b5559db..a0d84df 100644
--- a/drivers/power/qpnp-charger.c
+++ b/drivers/power/qpnp-charger.c
@@ -71,6 +71,7 @@
#define CHGR_CHG_WDOG_EN 0x65
#define CHGR_USB_IUSB_MAX 0x44
#define CHGR_USB_USB_SUSP 0x47
+#define CHGR_USB_USB_OTG_CTL 0x48
#define CHGR_USB_ENUM_T_STOP 0x4E
#define CHGR_CHG_TEMP_THRESH 0x66
#define CHGR_BAT_IF_PRES_STATUS 0x08
@@ -271,6 +272,27 @@
return 0;
}
+#define USB_OTG_EN_BIT BIT(0)
+static int
+qpnp_chg_is_otg_en_set(struct qpnp_chg_chip *chip)
+{
+ u8 usb_otg_en;
+ int rc;
+
+ rc = qpnp_chg_read(chip, &usb_otg_en,
+ chip->usb_chgpth_base + CHGR_USB_USB_OTG_CTL,
+ 1);
+
+ if (rc) {
+ pr_err("spmi read failed: addr=%03X, rc=%d\n",
+ chip->usb_chgpth_base + CHGR_STATUS, rc);
+ return rc;
+ }
+ pr_debug("usb otg en 0x%x\n", usb_otg_en);
+
+ return (usb_otg_en & USB_OTG_EN_BIT) ? 1 : 0;
+}
+
#define USB_VALID_BIT BIT(7)
static int
qpnp_chg_is_usb_chg_plugged_in(struct qpnp_chg_chip *chip)
@@ -380,10 +402,16 @@
qpnp_chg_usb_usbin_valid_irq_handler(int irq, void *_chip)
{
struct qpnp_chg_chip *chip = _chip;
- int usb_present;
+ int usb_present, host_mode;
usb_present = qpnp_chg_is_usb_chg_plugged_in(chip);
- pr_debug("usbin-valid triggered: %d\n", usb_present);
+ host_mode = qpnp_chg_is_otg_en_set(chip);
+ pr_debug("usbin-valid triggered: %d host_mode: %d\n",
+ usb_present, host_mode);
+
+ /* In host mode notifications cmoe from USB supply */
+ if (host_mode)
+ return IRQ_HANDLED;
if (chip->usb_present ^ usb_present) {
chip->usb_present = usb_present;
@@ -436,6 +464,80 @@
return 0;
}
+static int
+qpnp_chg_charge_en(struct qpnp_chg_chip *chip, int enable)
+{
+ return qpnp_chg_masked_write(chip, chip->chgr_base + CHGR_CHG_CTRL,
+ CHGR_CHG_EN,
+ enable ? CHGR_CHG_EN : 0, 1);
+}
+
+static int
+qpnp_chg_force_run_on_batt(struct qpnp_chg_chip *chip, int disable)
+{
+ /* This bit forces the charger to run off of the battery rather
+ * than a connected charger */
+ return qpnp_chg_masked_write(chip, chip->chgr_base + CHGR_CHG_CTRL,
+ CHGR_ON_BAT_FORCE_BIT,
+ disable ? CHGR_ON_BAT_FORCE_BIT : 0, 1);
+}
+
+static
+int switch_usb_to_charge_mode(struct qpnp_chg_chip *chip)
+{
+ int rc;
+
+ pr_debug("switch to charge mode\n");
+ if (!qpnp_chg_is_otg_en_set(chip))
+ return 0;
+
+ /* enable usb ovp fet */
+ rc = qpnp_chg_masked_write(chip,
+ chip->usb_chgpth_base + CHGR_USB_USB_OTG_CTL,
+ USB_OTG_EN_BIT,
+ 0, 1);
+ if (rc) {
+ pr_err("Failed to turn on usb ovp rc = %d\n", rc);
+ return rc;
+ }
+
+ rc = qpnp_chg_force_run_on_batt(chip, chip->charging_disabled);
+ if (rc) {
+ pr_err("Failed re-enable charging rc = %d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static
+int switch_usb_to_host_mode(struct qpnp_chg_chip *chip)
+{
+ int rc;
+
+ pr_debug("switch to host mode\n");
+ if (qpnp_chg_is_otg_en_set(chip))
+ return 0;
+
+ rc = qpnp_chg_force_run_on_batt(chip, 1);
+ if (rc) {
+ pr_err("Failed to disable charging rc = %d\n", rc);
+ return rc;
+ }
+
+ /* force usb ovp fet off */
+ rc = qpnp_chg_masked_write(chip,
+ chip->usb_chgpth_base + CHGR_USB_USB_OTG_CTL,
+ USB_OTG_EN_BIT,
+ USB_OTG_EN_BIT, 1);
+ if (rc) {
+ pr_err("Failed to turn off usb ovp rc = %d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
static enum power_supply_property pm_power_props_mains[] = {
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_ONLINE,
@@ -682,6 +784,21 @@
chip->bms_psy = power_supply_get_by_name("bms");
chip->usb_psy->get_property(chip->usb_psy,
+ POWER_SUPPLY_PROP_SCOPE, &ret);
+ if (ret.intval) {
+ if ((ret.intval == POWER_SUPPLY_SCOPE_SYSTEM)
+ && !qpnp_chg_is_otg_en_set(chip)) {
+ switch_usb_to_host_mode(chip);
+ return;
+ }
+ if ((ret.intval == POWER_SUPPLY_SCOPE_DEVICE)
+ && qpnp_chg_is_otg_en_set(chip)) {
+ switch_usb_to_charge_mode(chip);
+ return;
+ }
+ }
+
+ chip->usb_psy->get_property(chip->usb_psy,
POWER_SUPPLY_PROP_ONLINE, &ret);
if (ret.intval && qpnp_chg_is_usb_chg_plugged_in(chip)) {
@@ -702,24 +819,6 @@
}
static int
-qpnp_chg_force_run_on_batt(struct qpnp_chg_chip *chip, int disable)
-{
- /* This bit forces the charger to run off of the battery rather
- * than a connected charger */
- return qpnp_chg_masked_write(chip, chip->chgr_base + CHGR_CHG_CTRL,
- CHGR_ON_BAT_FORCE_BIT,
- disable ? CHGR_ON_BAT_FORCE_BIT : 0, 1);
-}
-
-static int
-qpnp_chg_charge_en(struct qpnp_chg_chip *chip, int enable)
-{
- return qpnp_chg_masked_write(chip, chip->chgr_base + CHGR_CHG_CTRL,
- CHGR_CHG_EN,
- enable ? CHGR_CHG_EN : 0, 1);
-}
-
-static int
qpnp_batt_power_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
diff --git a/drivers/slimbus/slim-msm-ngd.c b/drivers/slimbus/slim-msm-ngd.c
index 02e1952..78e8a6f 100644
--- a/drivers/slimbus/slim-msm-ngd.c
+++ b/drivers/slimbus/slim-msm-ngd.c
@@ -172,13 +172,16 @@
u8 *tid, struct completion *done)
{
struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
+ mutex_lock(&ctrl->m_ctrl);
if (ctrl->last_tid <= 255) {
ctrl->txnt = krealloc(ctrl->txnt,
(ctrl->last_tid + 1) *
sizeof(struct slim_msg_txn *),
GFP_KERNEL);
- if (!ctrl->txnt)
+ if (!ctrl->txnt) {
+ mutex_unlock(&ctrl->m_ctrl);
return -ENOMEM;
+ }
dev->msg_cnt = ctrl->last_tid;
ctrl->last_tid++;
} else {
@@ -190,6 +193,7 @@
}
if (i >= 256) {
dev_err(&ctrl->dev, "out of TID");
+ mutex_unlock(&ctrl->m_ctrl);
return -ENOMEM;
}
}
@@ -197,6 +201,7 @@
txn->tid = dev->msg_cnt;
txn->comp = done;
*tid = dev->msg_cnt;
+ mutex_unlock(&ctrl->m_ctrl);
return 0;
}
static int ngd_xfer_msg(struct slim_controller *ctrl, struct slim_msg_txn *txn)
@@ -369,6 +374,9 @@
pr_err("connect/disc :0x%x, tid:%d timed out", txn->mc,
txn->tid);
ret = -ETIMEDOUT;
+ mutex_lock(&ctrl->m_ctrl);
+ ctrl->txnt[txn->tid] = NULL;
+ mutex_unlock(&ctrl->m_ctrl);
} else {
ret = txn->ec;
}
@@ -394,6 +402,9 @@
pr_err("master req:0x%x, tid:%d timed out", txn->mc,
txn->tid);
ret = -ETIMEDOUT;
+ mutex_lock(&ctrl->m_ctrl);
+ ctrl->txnt[txn->tid] = NULL;
+ mutex_unlock(&ctrl->m_ctrl);
} else {
ret = txn->ec;
}
@@ -526,10 +537,8 @@
txn.dt = SLIM_MSG_DEST_LOGICALADDR;
txn.la = SLIM_LA_MGR;
txn.ec = 0;
- mutex_lock(&ctrl->m_ctrl);
ret = ngd_get_tid(ctrl, &txn, &wbuf[0], &done);
if (ret) {
- mutex_unlock(&ctrl->m_ctrl);
return ret;
}
memcpy(&wbuf[1], ea, elen);
@@ -543,7 +552,6 @@
ret = -ENXIO;
else if (!ret)
*laddr = txn.la;
- mutex_unlock(&ctrl->m_ctrl);
return ret;
}
@@ -606,20 +614,33 @@
}
if (mc == SLIM_USR_MC_ADDR_REPLY &&
mt == SLIM_MSG_MT_SRC_REFERRED_USER) {
- struct slim_msg_txn *txn = dev->ctrl.txnt[buf[3]];
+ struct slim_msg_txn *txn;
u8 failed_ea[6] = {0, 0, 0, 0, 0, 0};
- if (!txn)
+ mutex_lock(&dev->ctrl.m_ctrl);
+ txn = dev->ctrl.txnt[buf[3]];
+ if (!txn) {
+ pr_err("LADDR response after timeout, tid:0x%x",
+ buf[3]);
+ mutex_unlock(&dev->ctrl.m_ctrl);
return;
+ }
if (memcmp(&buf[4], failed_ea, 6))
txn->la = buf[10];
dev->ctrl.txnt[buf[3]] = NULL;
+ mutex_unlock(&dev->ctrl.m_ctrl);
complete(txn->comp);
}
if (mc == SLIM_USR_MC_GENERIC_ACK &&
mt == SLIM_MSG_MT_SRC_REFERRED_USER) {
- struct slim_msg_txn *txn = dev->ctrl.txnt[buf[3]];
- if (!txn)
+ struct slim_msg_txn *txn;
+ mutex_lock(&dev->ctrl.m_ctrl);
+ txn = dev->ctrl.txnt[buf[3]];
+ if (!txn) {
+ pr_err("ACK received after timeout, tid:0x%x",
+ buf[3]);
+ mutex_unlock(&dev->ctrl.m_ctrl);
return;
+ }
dev_dbg(dev->dev, "got response:tid:%d, response:0x%x",
(int)buf[3], buf[4]);
if (!(buf[4] & MSM_SAT_SUCCSS)) {
@@ -628,6 +649,7 @@
txn->ec = -EIO;
}
dev->ctrl.txnt[buf[3]] = NULL;
+ mutex_unlock(&dev->ctrl.m_ctrl);
complete(txn->comp);
}
}
diff --git a/drivers/slimbus/slimbus.c b/drivers/slimbus/slimbus.c
index d5d6e0c..c320e46 100644
--- a/drivers/slimbus/slimbus.c
+++ b/drivers/slimbus/slimbus.c
@@ -1216,7 +1216,7 @@
if (flow != SLIM_SRC)
return -EINVAL;
- mutex_lock(&ctrl->m_ctrl);
+ mutex_lock(&ctrl->sched.m_reconf);
if (slc->state == SLIM_CH_FREE) {
ret = -ENOTCONN;
@@ -1238,7 +1238,7 @@
slc->srch = srch;
connect_src_err:
- mutex_unlock(&ctrl->m_ctrl);
+ mutex_unlock(&ctrl->sched.m_reconf);
return ret;
}
EXPORT_SYMBOL_GPL(slim_connect_src);
@@ -1265,7 +1265,7 @@
if (!sinkh || !nsink)
return -EINVAL;
- mutex_lock(&ctrl->m_ctrl);
+ mutex_lock(&ctrl->sched.m_reconf);
/*
* Once channel is removed, its ports can be considered disconnected
@@ -1303,7 +1303,7 @@
slc->nsink += nsink;
connect_sink_err:
- mutex_unlock(&ctrl->m_ctrl);
+ mutex_unlock(&ctrl->sched.m_reconf);
return ret;
}
EXPORT_SYMBOL_GPL(slim_connect_sink);
@@ -1320,11 +1320,11 @@
struct slim_controller *ctrl = sb->ctrl;
int i;
- mutex_lock(&ctrl->m_ctrl);
+ mutex_lock(&ctrl->sched.m_reconf);
for (i = 0; i < nph; i++)
disconnect_port_ch(ctrl, ph[i]);
- mutex_unlock(&ctrl->m_ctrl);
+ mutex_unlock(&ctrl->sched.m_reconf);
return 0;
}
EXPORT_SYMBOL_GPL(slim_disconnect_ports);
@@ -1660,13 +1660,13 @@
if (!ctrl)
return -EINVAL;
- mutex_lock(&ctrl->m_ctrl);
+ mutex_lock(&ctrl->sched.m_reconf);
for (i = 0; i < ctrl->nchans; i++) {
if (ctrl->chans[i].state == SLIM_CH_FREE)
break;
}
if (i >= ctrl->nchans) {
- mutex_unlock(&ctrl->m_ctrl);
+ mutex_unlock(&ctrl->sched.m_reconf);
return -EXFULL;
}
*chanh = i;
@@ -1674,7 +1674,7 @@
ctrl->chans[i].state = SLIM_CH_ALLOCATED;
ctrl->chans[i].chan = (u8)(ctrl->reserved + i);
- mutex_unlock(&ctrl->m_ctrl);
+ mutex_unlock(&ctrl->sched.m_reconf);
return 0;
}
EXPORT_SYMBOL_GPL(slim_alloc_ch);
@@ -1698,7 +1698,7 @@
int ret = 0;
if (!ctrl || !chanh)
return -EINVAL;
- mutex_lock(&ctrl->m_ctrl);
+ mutex_lock(&ctrl->sched.m_reconf);
/* start with modulo number */
i = ch % ctrl->nchans;
@@ -1729,7 +1729,7 @@
i = (i + 1) % ctrl->nchans;
}
query_out:
- mutex_unlock(&ctrl->m_ctrl);
+ mutex_unlock(&ctrl->sched.m_reconf);
dev_dbg(&ctrl->dev, "query ch:%d,hdl:%d,ref:%d,ret:%d",
ch, i, ctrl->chans[i].ref, ret);
return ret;
@@ -1751,26 +1751,26 @@
if (!ctrl)
return -EINVAL;
- mutex_lock(&ctrl->m_ctrl);
+ mutex_lock(&ctrl->sched.m_reconf);
if (slc->state == SLIM_CH_FREE) {
- mutex_unlock(&ctrl->m_ctrl);
+ mutex_unlock(&ctrl->sched.m_reconf);
return -ENOTCONN;
}
if (slc->ref > 1) {
slc->ref--;
- mutex_unlock(&ctrl->m_ctrl);
+ mutex_unlock(&ctrl->sched.m_reconf);
dev_dbg(&ctrl->dev, "remove chan:%d,hdl:%d,ref:%d",
slc->chan, chanh, slc->ref);
return 0;
}
if (slc->state >= SLIM_CH_PENDING_ACTIVE) {
dev_err(&ctrl->dev, "Channel:%d should be removed first", chan);
- mutex_unlock(&ctrl->m_ctrl);
+ mutex_unlock(&ctrl->sched.m_reconf);
return -EISCONN;
}
slc->ref--;
slc->state = SLIM_CH_FREE;
- mutex_unlock(&ctrl->m_ctrl);
+ mutex_unlock(&ctrl->sched.m_reconf);
dev_dbg(&ctrl->dev, "remove chan:%d,hdl:%d,ref:%d",
slc->chan, chanh, slc->ref);
return 0;
@@ -1812,7 +1812,7 @@
if (!ctrl || !chanh || !prop || !nchan)
return -EINVAL;
- mutex_lock(&ctrl->m_ctrl);
+ mutex_lock(&ctrl->sched.m_reconf);
for (i = 0; i < nchan; i++) {
u8 chan = SLIM_HDL_TO_CHIDX(chanh[i]);
struct slim_ich *slc = &ctrl->chans[chan];
@@ -1856,7 +1856,7 @@
}
err_define_ch:
dev_dbg(&ctrl->dev, "define_ch: ch:%d, ret:%d", *chanh, ret);
- mutex_unlock(&ctrl->m_ctrl);
+ mutex_unlock(&ctrl->sched.m_reconf);
return ret;
}
EXPORT_SYMBOL_GPL(slim_define_ch);
@@ -2607,7 +2607,6 @@
struct slim_pending_ch *pch;
mutex_lock(&ctrl->sched.m_reconf);
- mutex_lock(&ctrl->m_ctrl);
/*
* If there are no pending changes from this client, avoid sending
* the reconfiguration sequence
@@ -2631,7 +2630,6 @@
}
}
if (list_empty(&sb->mark_removal)) {
- mutex_unlock(&ctrl->m_ctrl);
mutex_unlock(&ctrl->sched.m_reconf);
pr_info("SLIM_CL: skip reconfig sequence");
return 0;
@@ -2820,7 +2818,6 @@
ctrl->sched.msgsl = ctrl->sched.pending_msgsl;
sb->cur_msgsl = sb->pending_msgsl;
slim_chan_changes(sb, false);
- mutex_unlock(&ctrl->m_ctrl);
mutex_unlock(&ctrl->sched.m_reconf);
return 0;
}
@@ -2828,7 +2825,6 @@
revert_reconfig:
/* Revert channel changes */
slim_chan_changes(sb, true);
- mutex_unlock(&ctrl->m_ctrl);
mutex_unlock(&ctrl->sched.m_reconf);
return ret;
}
@@ -2876,7 +2872,6 @@
return -EINVAL;
mutex_lock(&sb->sldev_reconf);
- mutex_lock(&ctrl->m_ctrl);
do {
struct slim_pending_ch *pch;
u8 add_mark_removal = true;
@@ -2935,7 +2930,6 @@
if (nchan < SLIM_GRP_TO_NCHAN(chanh))
chan = SLIM_HDL_TO_CHIDX(slc->nextgrp);
} while (nchan < SLIM_GRP_TO_NCHAN(chanh));
- mutex_unlock(&ctrl->m_ctrl);
if (!ret && commit == true)
ret = slim_reconfigure_now(sb);
mutex_unlock(&sb->sldev_reconf);
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
index c67b75b..7b8788d 100644
--- a/drivers/staging/android/lowmemorykiller.c
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -62,65 +62,6 @@
printk(x); \
} while (0)
-static int nr_free_zone_mtype_pages(struct zone *zone, int mtype)
-{
- int order;
- int sum = 0;
-
- for (order = 0; order < MAX_ORDER; ++order) {
- unsigned long freecount = 0;
- struct free_area *area;
- struct list_head *curr;
-
- area = &(zone->free_area[order]);
-
- list_for_each(curr, &area->free_list[mtype])
- freecount++;
-
- sum += freecount << order;
- }
- return sum;
-}
-
-static int nr_free_zone_pages(struct zone *zone, gfp_t gfp_mask)
-{
- int sum = 0;
- int mtype = allocflags_to_migratetype(gfp_mask);
- int i = 0;
- int *mtype_fallbacks = get_migratetype_fallbacks(mtype);
-
- sum = nr_free_zone_mtype_pages(zone, mtype);
-
- /*
- * Also count the fallback pages
- */
- for (i = 0;; i++) {
- int fallbacktype = mtype_fallbacks[i];
- sum += nr_free_zone_mtype_pages(zone, fallbacktype);
-
- if (fallbacktype == MIGRATE_RESERVE)
- break;
- }
-
- return sum;
-}
-
-static int nr_free_pages(gfp_t gfp_mask)
-{
- struct zoneref *z;
- struct zone *zone;
- int sum = 0;
-
- struct zonelist *zonelist = node_zonelist(numa_node_id(), gfp_mask);
-
- for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
- sum += nr_free_zone_pages(zone, gfp_mask);
- }
-
- return sum;
-}
-
-
static int test_task_flag(struct task_struct *p, int flag)
{
struct task_struct *t = p;
@@ -152,15 +93,6 @@
int other_file = global_page_state(NR_FILE_PAGES) -
global_page_state(NR_SHMEM);
- if (sc->nr_to_scan > 0 && other_free > other_file) {
- /*
- * If the number of free pages is going to affect the decision
- * of which process is selected then ensure only free pages
- * which can satisfy the request are considered.
- */
- other_free = nr_free_pages(sc->gfp_mask);
- }
-
if (lowmem_adj_size < array_size)
array_size = lowmem_adj_size;
if (lowmem_minfree_size < array_size)
diff --git a/drivers/tty/serial/msm_serial_hs_lite.c b/drivers/tty/serial/msm_serial_hs_lite.c
index 3de990c..0189a07 100644
--- a/drivers/tty/serial/msm_serial_hs_lite.c
+++ b/drivers/tty/serial/msm_serial_hs_lite.c
@@ -692,9 +692,9 @@
* Configure Rx Watermark as 3/4 size of Rx FIFO.
* RFWR register takes value in Words for UARTDM Core
* whereas it is consider to be in Bytes for UART Core.
- * Hence configuring Rx Watermark as 12 Words.
+ * Hence configuring Rx Watermark as 48 Words.
*/
- watermark = (port->fifosize * 3) / (4*4);
+ watermark = (port->fifosize * 3) / 4;
msm_hsl_write(port, watermark, regmap[vid][UARTDM_RFWR]);
/* set TX watermark */
@@ -775,18 +775,15 @@
#endif
pm_runtime_get_sync(port->dev);
- /* Set RFR Level as 3/4 of UARTDM FIFO Size */
+ /*
+ * Set RFR Level as 3/4 of UARTDM FIFO Size
+ * i.e. 48 Words = 192 bytes as Rx FIFO is 64 words ( 256 bytes).
+ */
if (likely(port->fifosize > 48))
rfr_level = port->fifosize - 16;
else
rfr_level = port->fifosize;
- /*
- * Use rfr_level value in Words to program
- * MR1 register for UARTDM Core.
- */
- rfr_level = (rfr_level / 4);
-
spin_lock_irqsave(&port->lock, flags);
vid = msm_hsl_port->ver_id;
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index 7430e5a..d416904 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -114,6 +114,12 @@
#define ALT_INTERRUPT_EN_REG (QSCRATCH_REG_OFFSET + 0x20)
#define HS_PHY_IRQ_STAT_REG (QSCRATCH_REG_OFFSET + 0x24)
#define SS_PHY_CTRL_REG (QSCRATCH_REG_OFFSET + 0x30)
+#define SS_CR_PROTOCOL_DATA_IN_REG (QSCRATCH_REG_OFFSET + 0x3C)
+#define SS_CR_PROTOCOL_DATA_OUT_REG (QSCRATCH_REG_OFFSET + 0x40)
+#define SS_CR_PROTOCOL_CAP_ADDR_REG (QSCRATCH_REG_OFFSET + 0x44)
+#define SS_CR_PROTOCOL_CAP_DATA_REG (QSCRATCH_REG_OFFSET + 0x48)
+#define SS_CR_PROTOCOL_READ_REG (QSCRATCH_REG_OFFSET + 0x4C)
+#define SS_CR_PROTOCOL_WRITE_REG (QSCRATCH_REG_OFFSET + 0x50)
struct dwc3_msm_req_complete {
struct list_head list_item;
@@ -280,6 +286,54 @@
}
/**
+ *
+ * Write SSPHY register with debug info.
+ *
+ * @base - DWC3 base virtual address.
+ * @addr - SSPHY address to write.
+ * @val - value to write.
+ *
+ */
+static void dwc3_msm_ssusb_write_phycreg(void *base, u32 addr, u32 val)
+{
+ iowrite32(addr, base + SS_CR_PROTOCOL_DATA_IN_REG);
+ iowrite32(0x1, base + SS_CR_PROTOCOL_CAP_ADDR_REG);
+ while (ioread32(base + SS_CR_PROTOCOL_CAP_ADDR_REG))
+ cpu_relax();
+
+ iowrite32(val, base + SS_CR_PROTOCOL_DATA_IN_REG);
+ iowrite32(0x1, base + SS_CR_PROTOCOL_CAP_DATA_REG);
+ while (ioread32(base + SS_CR_PROTOCOL_CAP_DATA_REG))
+ cpu_relax();
+
+ iowrite32(0x1, base + SS_CR_PROTOCOL_WRITE_REG);
+ while (ioread32(base + SS_CR_PROTOCOL_WRITE_REG))
+ cpu_relax();
+}
+
+/**
+ *
+ * Read SSPHY register with debug info.
+ *
+ * @base - DWC3 base virtual address.
+ * @addr - SSPHY address to read.
+ *
+ */
+static u32 dwc3_msm_ssusb_read_phycreg(void *base, u32 addr)
+{
+ iowrite32(addr, base + SS_CR_PROTOCOL_DATA_IN_REG);
+ iowrite32(0x1, base + SS_CR_PROTOCOL_CAP_ADDR_REG);
+ while (ioread32(base + SS_CR_PROTOCOL_CAP_ADDR_REG))
+ cpu_relax();
+
+ iowrite32(0x1, base + SS_CR_PROTOCOL_READ_REG);
+ while (ioread32(base + SS_CR_PROTOCOL_READ_REG))
+ cpu_relax();
+
+ return ioread32(base + SS_CR_PROTOCOL_DATA_OUT_REG);
+}
+
+/**
* Return DBM EP number according to usb endpoint number.
*
*/
@@ -1608,6 +1662,7 @@
int ret = 0;
int len = 0;
u32 tmp[3];
+ u32 data = 0;
msm = devm_kzalloc(&pdev->dev, sizeof(*msm), GFP_KERNEL);
if (!msm) {
@@ -1851,6 +1906,20 @@
/* Disable (bypass) VBUS and ID filters */
dwc3_msm_write_reg(msm->base, QSCRATCH_GENERAL_CFG, 0x78);
+ /*
+ * WORKAROUND: There is SSPHY suspend bug due to which USB enumerates
+ * in HS mode instead of SS mode. Workaround it by asserting
+ * LANE0.TX_ALT_BLOCK.EN_ALT_BUS to enable TX to use alt bus mode
+ */
+ data = dwc3_msm_ssusb_read_phycreg(msm->base, 0x102D);
+ data |= (1 << 7);
+ dwc3_msm_ssusb_write_phycreg(msm->base, 0x102D, data);
+
+ data = dwc3_msm_ssusb_read_phycreg(msm->base, 0x1010);
+ data &= ~0xFF0;
+ data |= 0x40;
+ dwc3_msm_ssusb_write_phycreg(msm->base, 0x1010, data);
+
pm_runtime_set_active(msm->dev);
pm_runtime_enable(msm->dev);
diff --git a/drivers/video/msm/mdss/mdss_fb.c b/drivers/video/msm/mdss/mdss_fb.c
index f363206..581ec17 100644
--- a/drivers/video/msm/mdss/mdss_fb.c
+++ b/drivers/video/msm/mdss/mdss_fb.c
@@ -317,7 +317,8 @@
return 0;
}
-static inline int mdss_fb_send_panel_event(struct msm_fb_data_type *mfd, int e)
+static inline int mdss_fb_send_panel_event(
+ struct msm_fb_data_type *mfd, int e, void *arg)
{
struct mdss_panel_data *pdata;
@@ -330,7 +331,7 @@
pr_debug("sending event=%d for fb%d\n", e, mfd->index);
if (pdata->event_handler)
- return pdata->event_handler(pdata, e, NULL);
+ return pdata->event_handler(pdata, e, arg);
return 0;
}
@@ -344,7 +345,7 @@
pr_debug("mdss_fb suspend index=%d\n", mfd->index);
- ret = mdss_fb_send_panel_event(mfd, MDSS_EVENT_SUSPEND);
+ ret = mdss_fb_send_panel_event(mfd, MDSS_EVENT_SUSPEND, NULL);
if (ret) {
pr_warn("unable to suspend fb%d (%d)\n", mfd->index, ret);
return ret;
@@ -375,7 +376,7 @@
pr_debug("mdss_fb resume index=%d\n", mfd->index);
- ret = mdss_fb_send_panel_event(mfd, MDSS_EVENT_RESUME);
+ ret = mdss_fb_send_panel_event(mfd, MDSS_EVENT_RESUME, NULL);
if (ret) {
pr_warn("unable to resume fb%d (%d)\n", mfd->index, ret);
return ret;
@@ -854,10 +855,6 @@
var->hsync_len = panel_info->lcdc.h_pulse_width;
var->pixclock = panel_info->clk_rate / 1000;
- mfd->var_xres = var->xres;
- mfd->var_yres = var->yres;
- mfd->var_pixclock = var->pixclock;
-
if (panel_info->type == MIPI_VIDEO_PANEL) {
var->reserved[4] = panel_info->mipi.frame_rate;
} else {
@@ -1004,6 +1001,22 @@
return 0;
}
+static void mdss_fb_var_to_panelinfo(struct fb_var_screeninfo *var,
+ struct mdss_panel_info *pinfo)
+{
+ pinfo->xres = var->xres;
+ pinfo->yres = var->yres;
+ pinfo->lcdc.v_front_porch = var->upper_margin;
+ pinfo->lcdc.v_back_porch = var->lower_margin;
+ pinfo->lcdc.v_pulse_width = var->vsync_len;
+ pinfo->lcdc.h_front_porch = var->left_margin;
+ pinfo->lcdc.h_back_porch = var->right_margin;
+ pinfo->lcdc.h_pulse_width = var->hsync_len;
+ pinfo->clk_rate = var->pixclock;
+ /* todo: find how to pass CEA vic through framebuffer APIs */
+ pinfo->vic = var->reserved[3];
+}
+
static int mdss_fb_check_var(struct fb_var_screeninfo *var,
struct fb_info *info)
{
@@ -1096,16 +1109,25 @@
if ((var->xres == 0) || (var->yres == 0))
return -EINVAL;
- if ((var->xres > mfd->panel_info->xres) ||
- (var->yres > mfd->panel_info->yres))
- return -EINVAL;
-
if (var->xoffset > (var->xres_virtual - var->xres))
return -EINVAL;
if (var->yoffset > (var->yres_virtual - var->yres))
return -EINVAL;
+ if (mfd->panel_info) {
+ struct mdss_panel_info panel_info;
+ int rc;
+
+ memcpy(&panel_info, mfd->panel_info, sizeof(panel_info));
+ mdss_fb_var_to_panelinfo(var, &panel_info);
+ rc = mdss_fb_send_panel_event(mfd, MDSS_EVENT_CHECK_PARAMS,
+ &panel_info);
+ if (IS_ERR_VALUE(rc))
+ return rc;
+ mfd->panel_reconfig = rc;
+ }
+
return 0;
}
@@ -1114,7 +1136,6 @@
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
struct fb_var_screeninfo *var = &info->var;
int old_imgType;
- int blank = 0;
old_imgType = mfd->fb_imgType;
switch (var->bits_per_pixel) {
@@ -1146,22 +1167,14 @@
return -EINVAL;
}
- if ((mfd->var_pixclock != var->pixclock) ||
- (mfd->hw_refresh && ((mfd->fb_imgType != old_imgType) ||
- (mfd->var_pixclock != var->pixclock) ||
- (mfd->var_xres != var->xres) ||
- (mfd->var_yres != var->yres)))) {
- mfd->var_xres = var->xres;
- mfd->var_yres = var->yres;
- mfd->var_pixclock = var->pixclock;
- blank = 1;
- }
mfd->fbi->fix.line_length = mdss_fb_line_length(mfd->index, var->xres,
var->bits_per_pixel / 8);
- if (blank) {
+ if (mfd->panel_reconfig || (mfd->fb_imgType != old_imgType)) {
mdss_fb_blank_sub(FB_BLANK_POWERDOWN, info, mfd->op_enable);
+ mdss_fb_var_to_panelinfo(var, mfd->panel_info);
mdss_fb_blank_sub(FB_BLANK_UNBLANK, info, mfd->op_enable);
+ mfd->panel_reconfig = false;
}
return 0;
diff --git a/drivers/video/msm/mdss/mdss_fb.h b/drivers/video/msm/mdss/mdss_fb.h
index b7bd4e6..11bb859 100644
--- a/drivers/video/msm/mdss/mdss_fb.h
+++ b/drivers/video/msm/mdss/mdss_fb.h
@@ -62,6 +62,8 @@
int op_enable;
u32 fb_imgType;
+ int panel_reconfig;
+
u32 dst_format;
int vsync_pending;
ktime_t vsync_time;
@@ -100,10 +102,6 @@
struct platform_device *pdev;
- u32 var_xres;
- u32 var_yres;
- u32 var_pixclock;
-
u32 mdp_fb_page_protection;
struct mdss_mdp_ctl *ctl;
diff --git a/drivers/video/msm/mdss/mdss_hdmi_tx.c b/drivers/video/msm/mdss/mdss_hdmi_tx.c
index 539cd49..37bbbdf 100644
--- a/drivers/video/msm/mdss/mdss_hdmi_tx.c
+++ b/drivers/video/msm/mdss/mdss_hdmi_tx.c
@@ -163,6 +163,66 @@
}
} /* hdmi_tx_io_name */
+static int hdmi_tx_get_vic_from_panel_info(struct hdmi_tx_ctrl *hdmi_ctrl,
+ struct mdss_panel_info *pinfo)
+{
+ int new_vic = -1;
+ u32 h_total, v_total;
+ struct hdmi_disp_mode_timing_type timing;
+
+ if (!hdmi_ctrl || !pinfo) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ if (pinfo->vic) {
+ if (hdmi_get_supported_mode(pinfo->vic - 1)) {
+ new_vic = pinfo->vic - 1;
+ DEV_DBG("%s: %s is supported\n", __func__,
+ hdmi_get_video_fmt_2string(new_vic));
+ } else {
+ DEV_ERR("%s: invalid or not supported vic\n",
+ __func__);
+ return -EPERM;
+ }
+ } else {
+ timing.active_h = pinfo->xres;
+ timing.back_porch_h = pinfo->lcdc.h_back_porch;
+ timing.front_porch_h = pinfo->lcdc.h_front_porch;
+ timing.pulse_width_h = pinfo->lcdc.h_pulse_width;
+ h_total = timing.active_h + timing.back_porch_h +
+ timing.front_porch_h + timing.pulse_width_h;
+ DEV_DBG("%s: ah=%d bph=%d fph=%d pwh=%d ht=%d\n", __func__,
+ timing.active_h, timing.back_porch_h,
+ timing.front_porch_h, timing.pulse_width_h, h_total);
+
+ timing.active_v = pinfo->yres;
+ timing.back_porch_v = pinfo->lcdc.v_back_porch;
+ timing.front_porch_v = pinfo->lcdc.v_front_porch;
+ timing.pulse_width_v = pinfo->lcdc.v_pulse_width;
+ v_total = timing.active_v + timing.back_porch_v +
+ timing.front_porch_v + timing.pulse_width_v;
+ DEV_DBG("%s: av=%d bpv=%d fpv=%d pwv=%d vt=%d\n", __func__,
+ timing.active_v, timing.back_porch_v,
+ timing.front_porch_v, timing.pulse_width_v, v_total);
+
+ timing.pixel_freq = pinfo->clk_rate / 1000;
+ if (h_total && v_total) {
+ timing.refresh_rate = ((timing.pixel_freq * 1000) /
+ (h_total * v_total)) * 1000;
+ } else {
+ DEV_ERR("%s: cannot cal refresh rate\n", __func__);
+ return -EPERM;
+ }
+ DEV_DBG("%s: pixel_freq=%d refresh_rate=%d\n", __func__,
+ timing.pixel_freq, timing.refresh_rate);
+
+ new_vic = hdmi_get_video_id_code(&timing);
+ }
+
+ return new_vic;
+} /* hdmi_tx_get_vic_from_panel_info */
+
static struct hdmi_tx_ctrl *hdmi_tx_get_drvdata_from_panel_data(
struct mdss_panel_data *mpd)
{
@@ -170,14 +230,8 @@
if (mpd) {
hdmi_ctrl = container_of(mpd, struct hdmi_tx_ctrl, panel_data);
- if (hdmi_ctrl) {
- hdmi_ctrl->pixel_clk =
- mpd->panel_info.fbi->var.pixclock;
- hdmi_ctrl->xres = mpd->panel_info.fbi->var.xres;
- hdmi_ctrl->yres = mpd->panel_info.fbi->var.yres;
- } else {
+ if (!hdmi_ctrl)
DEV_ERR("%s: hdmi_ctrl = NULL\n", __func__);
- }
} else {
DEV_ERR("%s: mdss_panel_data = NULL\n", __func__);
}
@@ -546,90 +600,40 @@
return 0;
} /* hdmi_tx_check_capability */
-static int hdmi_tx_set_video_fmt(struct hdmi_tx_ctrl *hdmi_ctrl)
+static int hdmi_tx_set_video_fmt(struct hdmi_tx_ctrl *hdmi_ctrl,
+ struct mdss_panel_info *pinfo)
{
- int rc = 0;
+ int new_vic = -1;
const struct hdmi_disp_mode_timing_type *timing = NULL;
- struct hdmi_tx_platform_data *pdata = NULL;
- u32 format = DEFAULT_VIDEO_RESOLUTION;
- if (!hdmi_ctrl) {
+ if (!hdmi_ctrl || !pinfo) {
DEV_ERR("%s: invalid input\n", __func__);
- rc = -EINVAL;
- goto end;
+ return -EINVAL;
}
- pdata = &hdmi_ctrl->pdata;
-
- DEV_DBG("%s: Resolution wanted=%dx%d\n", __func__, hdmi_ctrl->xres,
- hdmi_ctrl->yres);
- switch (hdmi_ctrl->xres) {
- default:
- case 640:
- format = HDMI_VFRMT_640x480p60_4_3;
- break;
- case 720:
- format = (hdmi_ctrl->yres == 480)
- ? HDMI_VFRMT_720x480p60_16_9
- : HDMI_VFRMT_720x576p50_16_9;
- break;
- case 1280:
- if (hdmi_ctrl->frame_rate == 50000)
- format = HDMI_VFRMT_1280x720p50_16_9;
- else
- format = HDMI_VFRMT_1280x720p60_16_9;
- break;
- case 1440:
- /* interlaced has half of y res. */
- format = (hdmi_ctrl->yres == 240)
- ? HDMI_VFRMT_1440x480i60_16_9
- : HDMI_VFRMT_1440x576i50_16_9;
- break;
- case 1920:
- if (hdmi_ctrl->yres == 540) {/* interlaced */
- format = HDMI_VFRMT_1920x1080i60_16_9;
- } else if (hdmi_ctrl->yres == 1080) {
- if (hdmi_ctrl->frame_rate == 50000)
- format = HDMI_VFRMT_1920x1080p50_16_9;
- else if (hdmi_ctrl->frame_rate == 24000)
- format = HDMI_VFRMT_1920x1080p24_16_9;
- else if (hdmi_ctrl->frame_rate == 25000)
- format = HDMI_VFRMT_1920x1080p25_16_9;
- else if (hdmi_ctrl->frame_rate == 30000)
- format = HDMI_VFRMT_1920x1080p30_16_9;
- else
- format = HDMI_VFRMT_1920x1080p60_16_9;
- }
- break;
+ new_vic = hdmi_tx_get_vic_from_panel_info(hdmi_ctrl, pinfo);
+ if ((new_vic < 0) || (new_vic > HDMI_VFRMT_MAX)) {
+ DEV_ERR("%s: invalid or not supported vic\n", __func__);
+ return -EPERM;
}
- if (hdmi_ctrl->video_resolution != format)
- DEV_DBG("%s: switching %s => %s", __func__,
- hdmi_get_video_fmt_2string(
- hdmi_ctrl->video_resolution),
- hdmi_get_video_fmt_2string(format));
- else
- DEV_DBG("resolution %s", hdmi_get_video_fmt_2string(
- hdmi_ctrl->video_resolution));
+ DEV_DBG("%s: switching from %s => %s", __func__,
+ hdmi_get_video_fmt_2string(hdmi_ctrl->video_resolution),
+ hdmi_get_video_fmt_2string(new_vic));
- timing = hdmi_get_supported_mode(format);
- if (!timing) {
- DEV_ERR("%s: invalid video fmt=%d\n", __func__,
- hdmi_ctrl->video_resolution);
- rc = -EPERM;
- goto end;
- }
+ hdmi_ctrl->video_resolution = (u32)new_vic;
+
+ timing = hdmi_get_supported_mode(hdmi_ctrl->video_resolution);
/* todo: find a better way */
hdmi_ctrl->pdata.power_data[HDMI_TX_CORE_PM].clk_config[0].rate =
timing->pixel_freq * 1000;
- hdmi_ctrl->video_resolution = format;
hdmi_edid_set_video_resolution(
- hdmi_ctrl->feature_data[HDMI_TX_FEAT_EDID], format);
+ hdmi_ctrl->feature_data[HDMI_TX_FEAT_EDID],
+ hdmi_ctrl->video_resolution);
-end:
- return rc;
+ return 0;
} /* hdmi_tx_set_video_fmt */
static void hdmi_tx_video_setup(struct hdmi_tx_ctrl *hdmi_ctrl,
@@ -1710,8 +1714,6 @@
if (hdmi_ctrl->hpd_off_pending) {
hdmi_tx_hpd_off(hdmi_ctrl);
hdmi_ctrl->hpd_off_pending = false;
- } else {
- hdmi_tx_hpd_polarity_setup(hdmi_ctrl, HPD_CONNECT_POLARITY);
}
DEV_INFO("%s: HDMI Core: OFF\n", __func__);
@@ -1764,15 +1766,15 @@
/* If a power down is already underway, wait for it to finish */
flush_work_sync(&hdmi_ctrl->power_off_work);
- DEV_INFO("power: ON (%dx%d %ld)\n", hdmi_ctrl->xres, hdmi_ctrl->yres,
- hdmi_ctrl->pixel_clk);
-
- rc = hdmi_tx_set_video_fmt(hdmi_ctrl);
+ rc = hdmi_tx_set_video_fmt(hdmi_ctrl, &panel_data->panel_info);
if (rc) {
DEV_ERR("%s: cannot set video_fmt.rc=%d\n", __func__, rc);
return rc;
}
+ DEV_INFO("power: ON (%s)\n", hdmi_get_video_fmt_2string(
+ hdmi_ctrl->video_resolution));
+
rc = hdmi_tx_core_on(hdmi_ctrl);
if (rc) {
DEV_ERR("%s: hdmi_msm_core_on failed\n", __func__);
@@ -2044,7 +2046,7 @@
static int hdmi_tx_panel_event_handler(struct mdss_panel_data *panel_data,
int event, void *arg)
{
- int rc = 0;
+ int rc = 0, new_vic = -1;
struct hdmi_tx_ctrl *hdmi_ctrl =
hdmi_tx_get_drvdata_from_panel_data(panel_data);
@@ -2057,6 +2059,25 @@
event, hdmi_ctrl->panel_suspend, hdmi_ctrl->hpd_feature_on);
switch (event) {
+ case MDSS_EVENT_CHECK_PARAMS:
+ new_vic = hdmi_tx_get_vic_from_panel_info(hdmi_ctrl,
+ (struct mdss_panel_info *)arg);
+ if ((new_vic < 0) || (new_vic > HDMI_VFRMT_MAX)) {
+ DEV_ERR("%s: invalid or not supported vic\n", __func__);
+ return -EPERM;
+ }
+
+ /*
+ * return value of 1 lets mdss know that panel
+ * needs a reconfig due to new resolution and
+ * it will issue close and open subsequently.
+ */
+ if (new_vic != hdmi_ctrl->video_resolution)
+ rc = 1;
+ else
+ DEV_DBG("%s: no res change.\n", __func__);
+ break;
+
case MDSS_EVENT_RESUME:
if (hdmi_ctrl->hpd_feature_on) {
INIT_COMPLETION(hdmi_ctrl->hpd_done);
@@ -2104,8 +2125,6 @@
if (!hdmi_ctrl->panel_power_on) {
if (hdmi_ctrl->hpd_feature_on)
hdmi_tx_hpd_off(hdmi_ctrl);
- else
- DEV_ERR("%s: invalid state\n", __func__);
hdmi_ctrl->panel_suspend = false;
} else {
@@ -2131,6 +2150,12 @@
if (hdmi_ctrl->panel_suspend)
flush_work_sync(&hdmi_ctrl->power_off_work);
break;
+
+ case MDSS_EVENT_CLOSE:
+ if (hdmi_ctrl->hpd_feature_on)
+ hdmi_tx_hpd_polarity_setup(hdmi_ctrl,
+ HPD_CONNECT_POLARITY);
+ break;
}
return rc;
diff --git a/drivers/video/msm/mdss/mdss_hdmi_tx.h b/drivers/video/msm/mdss/mdss_hdmi_tx.h
index 2d431b7..5f8094f 100644
--- a/drivers/video/msm/mdss/mdss_hdmi_tx.h
+++ b/drivers/video/msm/mdss/mdss_hdmi_tx.h
@@ -50,6 +50,7 @@
struct workqueue_struct *workq;
uint32_t video_resolution;
+
u32 panel_power_on;
u32 panel_suspend;
@@ -62,11 +63,6 @@
struct work_struct power_off_work;
- unsigned long pixel_clk;
- u32 xres;
- u32 yres;
- u32 frame_rate;
-
u32 present_hdcp;
u8 spd_vendor_name[8];
diff --git a/drivers/video/msm/mdss/mdss_hdmi_util.c b/drivers/video/msm/mdss/mdss_hdmi_util.c
index e7ea8c9..a3d76be 100644
--- a/drivers/video/msm/mdss/mdss_hdmi_util.c
+++ b/drivers/video/msm/mdss/mdss_hdmi_util.c
@@ -98,6 +98,57 @@
return ret;
} /* hdmi_get_supported_mode */
+int hdmi_get_video_id_code(struct hdmi_disp_mode_timing_type *timing_in)
+{
+ int i, vic = -1;
+
+ if (!timing_in) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ goto exit;
+ }
+
+ /* active_low_h, active_low_v and interlaced are not checked against */
+ for (i = 0; i < HDMI_VFRMT_MAX; i++) {
+ struct hdmi_disp_mode_timing_type *supported_timing =
+ &hdmi_supported_video_mode_lut[i];
+
+ if (!supported_timing->supported)
+ continue;
+ if (timing_in->active_h != supported_timing->active_h)
+ continue;
+ if (timing_in->front_porch_h != supported_timing->front_porch_h)
+ continue;
+ if (timing_in->pulse_width_h != supported_timing->pulse_width_h)
+ continue;
+ if (timing_in->back_porch_h != supported_timing->back_porch_h)
+ continue;
+ if (timing_in->active_v != supported_timing->active_v)
+ continue;
+ if (timing_in->front_porch_v != supported_timing->front_porch_v)
+ continue;
+ if (timing_in->pulse_width_v != supported_timing->pulse_width_v)
+ continue;
+ if (timing_in->back_porch_v != supported_timing->back_porch_v)
+ continue;
+ if (timing_in->pixel_freq != supported_timing->pixel_freq)
+ continue;
+ if (timing_in->refresh_rate != supported_timing->refresh_rate)
+ continue;
+
+ vic = (int)supported_timing->video_format;
+ break;
+ }
+
+ if (vic < 0)
+ DEV_ERR("%s: timing asked is not yet supported\n", __func__);
+
+exit:
+ DEV_DBG("%s: vic = %d timing = %s\n", __func__, vic,
+ hdmi_get_video_fmt_2string((u32)vic));
+
+ return vic;
+} /* hdmi_get_video_id_code */
+
void hdmi_set_supported_mode(u32 mode)
{
switch (mode) {
diff --git a/drivers/video/msm/mdss/mdss_hdmi_util.h b/drivers/video/msm/mdss/mdss_hdmi_util.h
index 852a93c..c970ebe 100644
--- a/drivers/video/msm/mdss/mdss_hdmi_util.h
+++ b/drivers/video/msm/mdss/mdss_hdmi_util.h
@@ -397,6 +397,7 @@
int retry;
};
+int hdmi_get_video_id_code(struct hdmi_disp_mode_timing_type *timing_in);
const struct hdmi_disp_mode_timing_type *hdmi_get_supported_mode(u32 mode);
void hdmi_set_supported_mode(u32 mode);
const char *hdmi_get_video_fmt_2string(u32 format);
diff --git a/drivers/video/msm/mdss/mdss_mdp.h b/drivers/video/msm/mdss/mdss_mdp.h
index 157a7eb..d273201 100644
--- a/drivers/video/msm/mdss/mdss_mdp.h
+++ b/drivers/video/msm/mdss/mdss_mdp.h
@@ -243,7 +243,9 @@
unsigned long smp[MAX_PLANES];
- struct mdss_mdp_data buffers[2];
+ struct mdss_mdp_data back_buf;
+ struct mdss_mdp_data front_buf;
+
struct list_head used_list;
struct list_head cleanup_list;
diff --git a/drivers/video/msm/mdss/mdss_mdp_ctl.c b/drivers/video/msm/mdss/mdss_mdp_ctl.c
index bd7371b..31cc527 100644
--- a/drivers/video/msm/mdss/mdss_mdp_ctl.c
+++ b/drivers/video/msm/mdss/mdss_mdp_ctl.c
@@ -391,8 +391,8 @@
return -ENODEV;
}
- width = mfd->fbi->var.xres;
- height = mfd->fbi->var.yres;
+ width = pdata->panel_info.xres;
+ height = pdata->panel_info.yres;
if (width > (2 * MAX_MIXER_WIDTH)) {
pr_err("unsupported resolution\n");
diff --git a/drivers/video/msm/mdss/mdss_mdp_overlay.c b/drivers/video/msm/mdss/mdss_mdp_overlay.c
index 61c1e9f..5b6d009 100644
--- a/drivers/video/msm/mdss/mdss_mdp_overlay.c
+++ b/drivers/video/msm/mdss/mdss_mdp_overlay.c
@@ -430,35 +430,24 @@
{
struct mdss_mdp_pipe *pipe, *tmp;
LIST_HEAD(destroy_pipes);
- int i;
- mutex_lock(&mfd->ov_lock);
mutex_lock(&mfd->lock);
list_for_each_entry_safe(pipe, tmp, &mfd->pipes_cleanup, cleanup_list) {
list_move(&pipe->cleanup_list, &destroy_pipes);
- for (i = 0; i < ARRAY_SIZE(pipe->buffers); i++)
- mdss_mdp_overlay_free_buf(&pipe->buffers[i]);
+ mdss_mdp_overlay_free_buf(&pipe->back_buf);
+ mdss_mdp_overlay_free_buf(&pipe->front_buf);
}
- if (!list_empty(&mfd->pipes_used)) {
- struct mdss_mdp_data *data;
- int buf_ndx;
-
- list_for_each_entry(pipe, &mfd->pipes_used, used_list) {
- buf_ndx = (pipe->play_cnt - 1) & 1; /* prev buffer */
- data = &pipe->buffers[buf_ndx];
-
- if (data->num_planes) {
- pr_debug("free buffer ndx=%d pnum=%d\n",
- buf_ndx, pipe->num);
- mdss_mdp_overlay_free_buf(data);
- }
+ list_for_each_entry(pipe, &mfd->pipes_used, used_list) {
+ if (pipe->back_buf.num_planes) {
+ /* make back buffer active */
+ mdss_mdp_overlay_free_buf(&pipe->front_buf);
+ swap(pipe->back_buf, pipe->front_buf);
}
}
mutex_unlock(&mfd->lock);
list_for_each_entry_safe(pipe, tmp, &destroy_pipes, cleanup_list)
mdss_mdp_pipe_destroy(pipe);
- mutex_unlock(&mfd->ov_lock);
return 0;
}
@@ -468,12 +457,16 @@
struct msm_fb_data_type *mfd = ctl->mfd;
int ret;
+ mutex_lock(&mfd->ov_lock);
+
if (mfd->kickoff_fnc)
ret = mfd->kickoff_fnc(ctl);
else
ret = mdss_mdp_display_commit(ctl, NULL);
- if (IS_ERR_VALUE(ret))
+ if (IS_ERR_VALUE(ret)) {
+ mutex_unlock(&mfd->ov_lock);
return ret;
+ }
complete(&mfd->update.comp);
mutex_lock(&mfd->no_update.lock);
@@ -486,6 +479,8 @@
ret = mdss_mdp_overlay_cleanup(mfd);
+ mutex_unlock(&mfd->ov_lock);
+
return ret;
}
@@ -630,7 +625,7 @@
struct mdss_mdp_ctl *ctl;
struct mdss_mdp_pipe *pipe;
struct mdss_mdp_data *src_data;
- int ret, buf_ndx;
+ int ret;
u32 flags;
pipe = mdss_mdp_pipe_get_locked(req->id);
@@ -643,9 +638,12 @@
flags = (pipe->flags & MDP_SECURE_OVERLAY_SESSION);
- buf_ndx = (pipe->play_cnt + 1) & 1; /* next buffer */
- src_data = &pipe->buffers[buf_ndx];
- mdss_mdp_overlay_free_buf(src_data);
+ src_data = &pipe->back_buf;
+ if (src_data->num_planes) {
+ pr_warn("dropped buffer pnum=%d play=%d addr=0x%x\n",
+ pipe->num, pipe->play_cnt, src_data->p[0].addr);
+ mdss_mdp_overlay_free_buf(src_data);
+ }
ret = mdss_mdp_overlay_get_buf(mfd, src_data, &req->data, 1, flags);
if (IS_ERR_VALUE(ret)) {
diff --git a/drivers/video/msm/mdss/mdss_panel.h b/drivers/video/msm/mdss/mdss_panel.h
index 28d7051..d807493 100644
--- a/drivers/video/msm/mdss/mdss_panel.h
+++ b/drivers/video/msm/mdss/mdss_panel.h
@@ -64,6 +64,7 @@
MDSS_EVENT_CLOSE,
MDSS_EVENT_SUSPEND,
MDSS_EVENT_RESUME,
+ MDSS_EVENT_CHECK_PARAMS,
};
/* panel info type */
@@ -179,6 +180,7 @@
u32 frame_count;
u32 is_3d_panel;
u32 out_format;
+ u32 vic; /* video identification code */
struct lcd_panel_info lcd;
struct lcdc_panel_info lcdc;
diff --git a/drivers/video/msm/mdss/mdss_wb.c b/drivers/video/msm/mdss/mdss_wb.c
index c3dc06b..47dc2c8 100644
--- a/drivers/video/msm/mdss/mdss_wb.c
+++ b/drivers/video/msm/mdss/mdss_wb.c
@@ -25,11 +25,49 @@
#include "mdss_panel.h"
+/**
+ * mdss_wb_check_params - check new panel info params
+ * @pdata: current panel information
+ * @new: updates to panel info
+ *
+ * Checks if there are any changes that require panel reconfiguration
+ * in order to be reflected on writeback buffer.
+ *
+ * Return negative errno if invalid input, zero if there is no panel reconfig
+ * needed and non-zero if reconfiguration is needed.
+ */
+static int mdss_wb_check_params(struct mdss_panel_data *pdata,
+ struct mdss_panel_info *new)
+{
+ struct mdss_panel_info *old;
+
+ if (!pdata || !new) {
+ pr_err("%s: Invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ old = &pdata->panel_info;
+
+ if ((old->xres != new->xres) || (old->yres != new->yres))
+ return 1;
+
+ return 0;
+}
+
static int mdss_wb_event_handler(struct mdss_panel_data *pdata,
int event, void *arg)
{
- pr_debug("%s: event=%d\n", __func__, event);
- return 0;
+ int rc = 0;
+
+ switch (event) {
+ case MDSS_EVENT_CHECK_PARAMS:
+ rc = mdss_wb_check_params(pdata, (struct mdss_panel_info *)arg);
+ break;
+ default:
+ pr_debug("%s: panel event (%d) not handled\n", __func__, event);
+ break;
+ }
+ return rc;
}
static int mdss_wb_parse_dt(struct platform_device *pdev,
diff --git a/drivers/video/msm/mipi_dsi.h b/drivers/video/msm/mipi_dsi.h
index 2711c1a..d4d7288 100644
--- a/drivers/video/msm/mipi_dsi.h
+++ b/drivers/video/msm/mipi_dsi.h
@@ -264,7 +264,8 @@
typedef void (*fxn)(u32 data);
#define CMD_REQ_RX 0x0001
-#define CMD_REQ_COMMIT 0x0002
+#define CMD_REQ_COMMIT 0x0002
+#define CMD_CLK_CTRL 0x0004
#define CMD_REQ_NO_MAX_PKT_SIZE 0x0008
struct dcs_cmd_req {
diff --git a/drivers/video/msm/mipi_dsi_host.c b/drivers/video/msm/mipi_dsi_host.c
index bea6b4e..ee4a578 100644
--- a/drivers/video/msm/mipi_dsi_host.c
+++ b/drivers/video/msm/mipi_dsi_host.c
@@ -1249,7 +1249,6 @@
/* transmit read comamnd to client */
mipi_dsi_cmd_dma_tx(tp);
- mipi_dsi_disable_irq(DSI_CMD_TERM);
/*
* once cmd_dma_done interrupt received,
* return data from client is ready and stored
@@ -1359,7 +1358,6 @@
/* transmit read comamnd to client */
mipi_dsi_cmd_dma_tx(tp);
- mipi_dsi_disable_irq(DSI_CMD_TERM);
/*
* once cmd_dma_done interrupt received,
* return data from client is ready and stored
@@ -1580,7 +1578,6 @@
void mipi_dsi_cmdlist_commit(int from_mdp)
{
struct dcs_cmd_req *req;
- int video;
u32 dsi_ctrl;
mutex_lock(&cmd_mutex);
@@ -1592,12 +1589,6 @@
if (req == NULL)
goto need_lock;
- video = MIPI_INP(MIPI_DSI_BASE + 0x0000);
- video &= 0x02; /* VIDEO_MODE */
-
- if (!video)
- mipi_dsi_clk_cfg(1);
-
pr_debug("%s: from_mdp=%d pid=%d\n", __func__, from_mdp, current->pid);
dsi_ctrl = MIPI_INP(MIPI_DSI_BASE + 0x0000);
@@ -1619,9 +1610,6 @@
else
mipi_dsi_cmdlist_tx(req);
- if (!video)
- mipi_dsi_clk_cfg(0);
-
need_lock:
if (from_mdp) /* from pipe_commit */
@@ -1655,9 +1643,15 @@
pr_debug("%s: tot=%d put=%d get=%d\n", __func__,
cmdlist.tot, cmdlist.put, cmdlist.get);
+ if (req->flags & CMD_CLK_CTRL)
+ mipi_dsi_clk_cfg(1);
+
if (req->flags & CMD_REQ_COMMIT)
mipi_dsi_cmdlist_commit(0);
+ if (req->flags & CMD_CLK_CTRL)
+ mipi_dsi_clk_cfg(0);
+
return ret;
}
diff --git a/drivers/video/msm/mipi_novatek.c b/drivers/video/msm/mipi_novatek.c
index ecac82d..68bc65e 100644
--- a/drivers/video/msm/mipi_novatek.c
+++ b/drivers/video/msm/mipi_novatek.c
@@ -476,7 +476,7 @@
cmdreq.cmds = &backlight_cmd;
cmdreq.cmds_cnt = 1;
- cmdreq.flags = CMD_REQ_COMMIT;
+ cmdreq.flags = CMD_REQ_COMMIT | CMD_CLK_CTRL;
cmdreq.rlen = 0;
cmdreq.cb = NULL;
diff --git a/fs/buffer.c b/fs/buffer.c
index ad5938c..35ac651 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -1399,12 +1399,49 @@
return 0;
}
+static void __evict_bh_lru(void *arg)
+{
+ struct bh_lru *b = &get_cpu_var(bh_lrus);
+ struct buffer_head *bh = arg;
+ int i;
+
+ for (i = 0; i < BH_LRU_SIZE; i++) {
+ if (b->bhs[i] == bh) {
+ brelse(b->bhs[i]);
+ b->bhs[i] = NULL;
+ goto out;
+ }
+ }
+out:
+ put_cpu_var(bh_lrus);
+}
+
+static bool bh_exists_in_lru(int cpu, void *arg)
+{
+ struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu);
+ struct buffer_head *bh = arg;
+ int i;
+
+ for (i = 0; i < BH_LRU_SIZE; i++) {
+ if (b->bhs[i] == bh)
+ return 1;
+ }
+
+ return 0;
+
+}
void invalidate_bh_lrus(void)
{
on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1, GFP_KERNEL);
}
EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
+void evict_bh_lrus(struct buffer_head *bh)
+{
+ on_each_cpu_cond(bh_exists_in_lru, __evict_bh_lru, bh, 1, GFP_ATOMIC);
+}
+EXPORT_SYMBOL_GPL(evict_bh_lrus);
+
void set_bh_page(struct buffer_head *bh,
struct page *page, unsigned long offset)
{
@@ -3054,8 +3091,15 @@
do {
if (buffer_write_io_error(bh) && page->mapping)
set_bit(AS_EIO, &page->mapping->flags);
- if (buffer_busy(bh))
- goto failed;
+ if (buffer_busy(bh)) {
+ /*
+ * Check if the busy failure was due to an
+ * outstanding LRU reference
+ */
+ evict_bh_lrus(bh);
+ if (buffer_busy(bh))
+ goto failed;
+ }
bh = bh->b_this_page;
} while (bh != head);
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index 29546b7..46ae59f 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -447,3 +447,4 @@
header-y += coresight-stm.h
header-y += ci-bridge-spi.h
header-y += msm_audio_amrwbplus.h
+header-y += avtimer.h
diff --git a/include/linux/avtimer.h b/include/linux/avtimer.h
new file mode 100644
index 0000000..e68da6a
--- /dev/null
+++ b/include/linux/avtimer.h
@@ -0,0 +1,21 @@
+#ifndef AVTIMER_H
+#define AVTIMER_H
+
+#include <linux/ioctl.h>
+
+#define MAJOR_NUM 100
+
+#define IOCTL_GET_AVTIMER_TICK _IOR(MAJOR_NUM, 0, char *)
+/*
+ * This IOCTL is used read the avtimer tick value.
+ * Avtimer is a 64 bit timer tick, hence the expected
+ * argument is of type uint64_t
+ */
+struct dev_avtimer_data {
+ uint32_t avtimer_msw_phy_addr;
+ uint32_t avtimer_lsw_phy_addr;
+};
+int avcs_core_open(void);
+int avcs_core_disable_power_collapse(int disable);/* true or flase */
+
+#endif
diff --git a/include/linux/mfd/pm8xxx/pm8921-charger.h b/include/linux/mfd/pm8xxx/pm8921-charger.h
index ae2c3d8..44f8538 100644
--- a/include/linux/mfd/pm8xxx/pm8921-charger.h
+++ b/include/linux/mfd/pm8xxx/pm8921-charger.h
@@ -123,6 +123,21 @@
* resistance of the pads, connectors, battery terminals
* and rsense.
* @led_src_config: Power source for anode of charger indicator LED.
+ * @btc_override: disable the comparators for conifugrations where a
+ * suitable voltages don't appear on vbatt therm line
+ * for the charger to detect battery is either cold / hot.
+ * @btc_override_cold_degc: Temperature in degCelcius when the battery is
+ * deemed cold and charging never happens. Used
+ * only if btc_override = 1
+ * @btc_override_hot_degc: Temperature in degCelcius when the battery is
+ * deemed hot and charging never happens. Used
+ * only if btc_override = 1
+ * @btc_delay_ms: Delay in milliseconds to monitor the battery temperature
+ * while charging when btc_override = 1
+ * @btc_panic_if_cant_stop_chg: flag to instruct the driver to panic if the
+ * driver couldn't stop charging when battery
+ * temperature is out of bounds. Used only if
+ * btc_override = 1
*/
struct pm8921_charger_platform_data {
struct pm8xxx_charger_core_data charger_cdata;
@@ -163,6 +178,11 @@
int rconn_mohm;
enum pm8921_chg_led_src_config led_src_config;
int battery_less_hardware;
+ int btc_override;
+ int btc_override_cold_degc;
+ int btc_override_hot_degc;
+ int btc_delay_ms;
+ int btc_panic_if_cant_stop_chg;
};
enum pm8921_charger_source {
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index 7247696..24b9790 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -178,6 +178,8 @@
extern unsigned int mmc_calc_max_discard(struct mmc_card *card);
extern int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen);
+extern int mmc_set_blockcount(struct mmc_card *card, unsigned int blockcount,
+ bool is_rel_write);
extern int mmc_hw_reset(struct mmc_host *host);
extern int mmc_hw_reset_check(struct mmc_host *host);
extern int mmc_can_reset(struct mmc_card *card);
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 08f74e6..f8a3a10 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -61,14 +61,6 @@
MIGRATE_TYPES
};
-/*
- * Returns a list which contains the migrate types on to which
- * an allocation falls back when the free list for the migrate
- * type mtype is depleted.
- * The end of the list is delimited by the type MIGRATE_RESERVE.
- */
-extern int *get_migratetype_fallbacks(int mtype);
-
#ifdef CONFIG_CMA
# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
# define cma_wmark_pages(zone) zone->min_cma_pages
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index e5e0bb4..3858022 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -1840,6 +1840,12 @@
V4L2_MPEG_VIDC_VIDEO_H264_VUI_TIMING_INFO_ENABLED = 1
};
+#define V4L2_CID_MPEG_VIDC_SET_PERF_LEVEL (V4L2_CID_MPEG_MSM_VIDC_BASE + 26)
+enum v4l2_mpeg_vidc_perf_level {
+ V4L2_CID_MPEG_VIDC_PERF_LEVEL_PERFORMANCE = 0,
+ V4L2_CID_MPEG_VIDC_PERF_LEVEL_TURBO = 1,
+};
+
/* Camera class control IDs */
#define V4L2_CID_CAMERA_CLASS_BASE (V4L2_CTRL_CLASS_CAMERA | 0x900)
#define V4L2_CID_CAMERA_CLASS (V4L2_CTRL_CLASS_CAMERA | 1)
diff --git a/include/media/msm_camera.h b/include/media/msm_camera.h
index a932011..971c9b3 100644
--- a/include/media/msm_camera.h
+++ b/include/media/msm_camera.h
@@ -1032,7 +1032,13 @@
#define CAMERA_EFFECT_EMBOSS 9
#define CAMERA_EFFECT_SKETCH 10
#define CAMERA_EFFECT_NEON 11
-#define CAMERA_EFFECT_MAX 12
+#define CAMERA_EFFECT_FADED 12
+#define CAMERA_EFFECT_VINTAGECOOL 13
+#define CAMERA_EFFECT_VINTAGEWARM 14
+#define CAMERA_EFFECT_ACCENT_BLUE 15
+#define CAMERA_EFFECT_ACCENT_GREEN 16
+#define CAMERA_EFFECT_ACCENT_ORANGE 17
+#define CAMERA_EFFECT_MAX 18
/* QRD */
#define CAMERA_EFFECT_BW 10
diff --git a/include/sound/apr_audio-v2.h b/include/sound/apr_audio-v2.h
index 3dd0ccd..9201a0a 100644
--- a/include/sound/apr_audio-v2.h
+++ b/include/sound/apr_audio-v2.h
@@ -1893,6 +1893,11 @@
struct afe_param_id_internal_bt_fm_cfg int_bt_fm;
} __packed;
+struct afe_audioif_config_command_no_payload {
+ struct apr_hdr hdr;
+ struct afe_port_cmd_set_param_v2 param;
+} __packed;
+
struct afe_audioif_config_command {
struct apr_hdr hdr;
struct afe_port_cmd_set_param_v2 param;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 45a8d86..a2bad88 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -401,9 +401,16 @@
static void __hrtick_start(void *arg)
{
struct rq *rq = arg;
+ struct hrtimer *timer = &rq->hrtick_timer;
+ ktime_t soft, hard;
+ unsigned long delta;
+
+ soft = hrtimer_get_softexpires(timer);
+ hard = hrtimer_get_expires(timer);
+ delta = ktime_to_ns(ktime_sub(hard, soft));
raw_spin_lock(&rq->lock);
- hrtimer_restart(&rq->hrtick_timer);
+ __hrtimer_start_range_ns(timer, soft, delta, HRTIMER_MODE_ABS, 0);
rq->hrtick_csd_pending = 0;
raw_spin_unlock(&rq->lock);
}
@@ -421,7 +428,8 @@
hrtimer_set_expires(timer, time);
if (rq == this_rq()) {
- hrtimer_restart(timer);
+ __hrtimer_start_range_ns(timer, ns_to_ktime(delay), 0,
+ HRTIMER_MODE_REL_PINNED, 0);
} else if (!rq->hrtick_csd_pending) {
__smp_call_function_single(cpu_of(rq), &rq->hrtick_csd, 0);
rq->hrtick_csd_pending = 1;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index c3142e8..92dd060 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -913,11 +913,6 @@
[MIGRATE_ISOLATE] = { MIGRATE_RESERVE }, /* Never used */
};
-int *get_migratetype_fallbacks(int mtype)
-{
- return fallbacks[mtype];
-}
-
/*
* Move the free pages in a range to the free lists of the requested type.
* Note that start_page and end_pages are not aligned on a pageblock
@@ -1400,15 +1395,18 @@
unsigned int order;
unsigned long watermark;
struct zone *zone;
+ int mt;
BUG_ON(!PageBuddy(page));
zone = page_zone(page);
order = page_order(page);
+ mt = get_pageblock_migratetype(page);
/* Obey watermarks as if the page was being allocated */
watermark = low_wmark_pages(zone) + (1 << order);
- if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
+ if (!is_migrate_cma(mt) && mt != MIGRATE_ISOLATE &&
+ !zone_watermark_ok(zone, 0, watermark, 0, 0))
return 0;
/* Remove page from free list */
@@ -1424,7 +1422,7 @@
if (order >= pageblock_order - 1) {
struct page *endpage = page + (1 << order) - 1;
for (; page < endpage; page += pageblock_nr_pages) {
- int mt = get_pageblock_migratetype(page);
+ mt = get_pageblock_migratetype(page);
if (mt != MIGRATE_ISOLATE && !is_migrate_cma(mt))
set_pageblock_migratetype(page,
MIGRATE_MOVABLE);
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index e5f43ec..ea116e9 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -1840,15 +1840,6 @@
struct hci_cp_auth_requested cp;
hci_remove_link_key(hdev, &conn->dst);
cp.handle = cpu_to_le16(conn->handle);
- /*Initiates dedicated bonding as pin or key is missing
- on remote device*/
- /*In case if remote device is ssp supported,
- reduce the security level to MEDIUM if it is HIGH*/
- if (conn->ssp_mode && conn->auth_initiator &&
- conn->io_capability != 0x03) {
- conn->pending_sec_level = BT_SECURITY_HIGH;
- conn->auth_type = HCI_AT_DEDICATED_BONDING_MITM;
- }
hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
sizeof(cp), &cp);
hci_dev_unlock(hdev);
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 23526f3..8568dae7 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -3094,6 +3094,12 @@
goto no_auto_confirm;
}
+ /* Show bonding dialog if neither side requires no bonding */
+ if ((conn->auth_type > 0x01) && (conn->remote_auth > 0x01)) {
+ if (!loc_mitm && !rem_mitm)
+ value = 0;
+ goto no_auto_confirm;
+ }
if ((!loc_mitm || rem_cap == 0x03) && (!rem_mitm || loc_cap == 0x03))
ev.auto_confirm = 1;
diff --git a/sound/soc/codecs/wcd9310.c b/sound/soc/codecs/wcd9310.c
index e11b985..e672cdb 100644
--- a/sound/soc/codecs/wcd9310.c
+++ b/sound/soc/codecs/wcd9310.c
@@ -295,6 +295,11 @@
(1 << AIF1_CAP) | (1 << AIF2_CAP), /* AIF2_CAP */
};
+static const u32 vport_i2s_check_table[NUM_CODEC_DAIS] = {
+ 0, /* AIF1_PB */
+ 0, /* AIF1_CAP */
+};
+
struct tabla_priv {
struct snd_soc_codec *codec;
struct tabla_reg_address reg_addr;
@@ -1746,6 +1751,7 @@
u32 dai_id = widget->shift;
u32 port_id = mixer->shift;
u32 enable = ucontrol->value.integer.value[0];
+ u32 vtable = vport_check_table[dai_id];
pr_debug("%s: wname %s cname %s value %u shift %d item %ld\n", __func__,
widget->name, ucontrol->id.name, widget->value, widget->shift,
@@ -1767,8 +1773,13 @@
/* only add to the list if value not set
*/
if (enable && !(widget->value & 1 << port_id)) {
+ if (tabla_p->intf_type ==
+ WCD9XXX_INTERFACE_TYPE_SLIMBUS)
+ vtable = vport_check_table[dai_id];
+ if (tabla_p->intf_type == WCD9XXX_INTERFACE_TYPE_I2C)
+ vtable = vport_i2s_check_table[dai_id];
if (wcd9xxx_tx_vport_validation(
- vport_check_table[dai_id],
+ vtable,
port_id,
tabla_p->dai)) {
pr_info("%s: TX%u is used by other virtual port\n",
diff --git a/sound/soc/codecs/wcd9320.c b/sound/soc/codecs/wcd9320.c
index 5ffb60a..6aa5bbb 100644
--- a/sound/soc/codecs/wcd9320.c
+++ b/sound/soc/codecs/wcd9320.c
@@ -181,6 +181,11 @@
(1 << AIF1_CAP) | (1 << AIF2_CAP), /* AIF2_CAP */
};
+static const u32 vport_i2s_check_table[NUM_CODEC_DAIS] = {
+ 0, /* AIF1_PB */
+ 0, /* AIF1_CAP */
+};
+
struct taiko_priv {
struct snd_soc_codec *codec;
u32 adc_count;
@@ -1523,6 +1528,7 @@
u32 dai_id = widget->shift;
u32 port_id = mixer->shift;
u32 enable = ucontrol->value.integer.value[0];
+ u32 vtable = vport_check_table[dai_id];
pr_debug("%s: wname %s cname %s value %u shift %d item %ld\n", __func__,
@@ -1539,7 +1545,6 @@
return -EINVAL;
}
}
- if (taiko_p->intf_type == WCD9XXX_INTERFACE_TYPE_SLIMBUS) {
switch (dai_id) {
case AIF1_CAP:
case AIF2_CAP:
@@ -1547,8 +1552,16 @@
/* only add to the list if value not set
*/
if (enable && !(widget->value & 1 << port_id)) {
+
+ if (taiko_p->intf_type ==
+ WCD9XXX_INTERFACE_TYPE_SLIMBUS)
+ vtable = vport_check_table[dai_id];
+ if (taiko_p->intf_type ==
+ WCD9XXX_INTERFACE_TYPE_I2C)
+ vtable = vport_i2s_check_table[dai_id];
+
if (wcd9xxx_tx_vport_validation(
- vport_check_table[dai_id],
+ vtable,
port_id,
taiko_p->dai)) {
pr_debug("%s: TX%u is used by other\n"
@@ -1583,7 +1596,6 @@
mutex_unlock(&codec->mutex);
return -EINVAL;
}
- }
pr_debug("%s: name %s sname %s updated value %u shift %d\n", __func__,
widget->name, widget->sname, widget->value, widget->shift);
@@ -4625,6 +4637,8 @@
*/
{TAIKO_A_RX_HPH_OCP_CTL, 0xE1, 0x61},
{TAIKO_A_RX_COM_OCP_COUNT, 0xFF, 0xFF},
+ {TAIKO_A_RX_HPH_L_TEST, 0x01, 0x01},
+ {TAIKO_A_RX_HPH_R_TEST, 0x01, 0x01},
/* Initialize gain registers to use register gain */
{TAIKO_A_RX_HPH_L_GAIN, 0x20, 0x20},
diff --git a/sound/soc/msm/msm8974.c b/sound/soc/msm/msm8974.c
index 37a4234..f8185bb 100644
--- a/sound/soc/msm/msm8974.c
+++ b/sound/soc/msm/msm8974.c
@@ -1076,6 +1076,22 @@
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
},
+ {
+ .name = "VoLTE",
+ .stream_name = "VoLTE",
+ .cpu_dai_name = "VoLTE",
+ .platform_name = "msm-pcm-voice",
+ .dynamic = 1,
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+ .ignore_suspend = 1,
+ /* this dainlink has playback support */
+ .ignore_pmdown_time = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ .be_id = MSM_FRONTEND_DAI_VOLTE,
+ },
/* Backend BT/FM DAI Links */
{
.name = LPASS_BE_INT_BT_SCO_RX,
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.c
index f1e0f3a..a1e461d 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.c
@@ -33,7 +33,9 @@
static struct snd_pcm_hardware msm_pcm_hardware = {
- .info = SNDRV_PCM_INFO_INTERLEAVED,
+ .info = (SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_RESUME),
.formats = SNDRV_PCM_FMTBIT_S16_LE,
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000,
.rate_min = 8000,
@@ -205,6 +207,55 @@
return 0;
}
+static int msm_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+ int ret = 0;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct msm_voice *prtd = runtime->private_data;
+ uint16_t session_id = 0;
+
+ pr_debug("%s: cmd = %d\n", __func__, cmd);
+ if (is_volte(prtd))
+ session_id = voc_get_session_id(VOLTE_SESSION_NAME);
+ else
+ session_id = voc_get_session_id(VOICE_SESSION_NAME);
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_STOP:
+ pr_debug("Start & Stop Voice call not handled in Trigger.\n");
+ break;
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ pr_debug("%s: resume call session_id = %d\n", __func__,
+ session_id);
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ ret = msm_pcm_playback_prepare(substream);
+ else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+ ret = msm_pcm_capture_prepare(substream);
+ if (prtd->playback_start && prtd->capture_start)
+ voc_resume_voice_call(session_id);
+ break;
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ pr_debug("%s: pause call session_id=%d\n",
+ __func__, session_id);
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ if (prtd->playback_start)
+ prtd->playback_start = 0;
+ } else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+ if (prtd->capture_start)
+ prtd->capture_start = 0;
+ }
+ voc_standby_voice_call(session_id);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
static int msm_voice_volume_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
@@ -437,6 +488,7 @@
.hw_params = msm_pcm_hw_params,
.close = msm_pcm_close,
.prepare = msm_pcm_prepare,
+ .trigger = msm_pcm_trigger,
};
diff --git a/sound/soc/msm/qdsp6v2/q6adm.c b/sound/soc/msm/qdsp6v2/q6adm.c
index 7267a82..985f76b 100644
--- a/sound/soc/msm/qdsp6v2/q6adm.c
+++ b/sound/soc/msm/qdsp6v2/q6adm.c
@@ -37,17 +37,16 @@
atomic_t copp_id[AFE_MAX_PORTS];
atomic_t copp_cnt[AFE_MAX_PORTS];
atomic_t copp_stat[AFE_MAX_PORTS];
- u32 mem_map_handle[AFE_MAX_PORTS];
wait_queue_head_t wait[AFE_MAX_PORTS];
-};
-static struct acdb_cal_block mem_addr_audproc[MAX_AUDPROC_TYPES];
-static struct acdb_cal_block mem_addr_audvol[MAX_AUDPROC_TYPES];
+ struct acdb_cal_block mem_addr_audproc[MAX_AUDPROC_TYPES];
+ struct acdb_cal_block mem_addr_audvol[MAX_AUDPROC_TYPES];
/* 0 - (MAX_AUDPROC_TYPES -1): audproc handles */
/* (MAX_AUDPROC_TYPES -1) - (2 * MAX_AUDPROC_TYPES -1): audvol handles */
-atomic_t mem_map_handles[(2 * MAX_AUDPROC_TYPES)];
-atomic_t mem_map_index;
+ atomic_t mem_map_cal_handles[(2 * MAX_AUDPROC_TYPES)];
+ atomic_t mem_map_cal_index;
+};
static struct adm_ctl this_adm;
@@ -92,14 +91,14 @@
pr_debug("Resetting calibration blocks");
for (i = 0; i < MAX_AUDPROC_TYPES; i++) {
/* Device calibration */
- mem_addr_audproc[i].cal_size = 0;
- mem_addr_audproc[i].cal_kvaddr = 0;
- mem_addr_audproc[i].cal_paddr = 0;
+ this_adm.mem_addr_audproc[i].cal_size = 0;
+ this_adm.mem_addr_audproc[i].cal_kvaddr = 0;
+ this_adm.mem_addr_audproc[i].cal_paddr = 0;
/* Volume calibration */
- mem_addr_audvol[i].cal_size = 0;
- mem_addr_audvol[i].cal_kvaddr = 0;
- mem_addr_audvol[i].cal_paddr = 0;
+ this_adm.mem_addr_audvol[i].cal_size = 0;
+ this_adm.mem_addr_audvol[i].cal_kvaddr = 0;
+ this_adm.mem_addr_audvol[i].cal_paddr = 0;
}
return 0;
}
@@ -199,8 +198,9 @@
case ADM_CMDRSP_SHARED_MEM_MAP_REGIONS:
pr_debug("%s: ADM_CMDRSP_SHARED_MEM_MAP_REGIONS\n",
__func__);
- atomic_set(&mem_map_handles[
- atomic_read(&mem_map_index)], *payload);
+ atomic_set(&this_adm.mem_map_cal_handles[
+ atomic_read(&this_adm.mem_map_cal_index)],
+ *payload);
atomic_set(&this_adm.copp_stat[0], 1);
wake_up(&this_adm.wait[index]);
break;
@@ -247,8 +247,8 @@
adm_params.hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5;
adm_params.payload_addr_lsw = aud_cal->cal_paddr;
adm_params.payload_addr_msw = 0;
- adm_params.mem_map_handle = atomic_read(&mem_map_handles[
- atomic_read(&mem_map_index)]);
+ adm_params.mem_map_handle = atomic_read(&this_adm.mem_map_cal_handles[
+ atomic_read(&this_adm.mem_map_cal_index)]);
adm_params.payload_size = aud_cal->cal_size;
atomic_set(&this_adm.copp_stat[index], 0);
@@ -293,15 +293,16 @@
get_audproc_cal(acdb_path, &aud_cal);
/* map & cache buffers used */
- atomic_set(&mem_map_index, acdb_path);
- if (((mem_addr_audproc[acdb_path].cal_paddr != aud_cal.cal_paddr) &&
- (aud_cal.cal_size > 0)) ||
- (aud_cal.cal_size > mem_addr_audproc[acdb_path].cal_size)) {
+ atomic_set(&this_adm.mem_map_cal_index, acdb_path);
+ if (((this_adm.mem_addr_audproc[acdb_path].cal_paddr !=
+ aud_cal.cal_paddr) && (aud_cal.cal_size > 0)) ||
+ (aud_cal.cal_size >
+ this_adm.mem_addr_audproc[acdb_path].cal_size)) {
- if (mem_addr_audproc[acdb_path].cal_paddr != 0)
+ if (this_adm.mem_addr_audproc[acdb_path].cal_paddr != 0)
adm_memory_unmap_regions(port_id,
- &mem_addr_audproc[acdb_path].cal_paddr,
- &size, 1);
+ &this_adm.mem_addr_audproc[acdb_path].
+ cal_paddr, &size, 1);
result = adm_memory_map_regions(port_id, &aud_cal.cal_paddr,
0, &size, 1);
@@ -310,9 +311,9 @@
acdb_path, aud_cal.cal_paddr,
aud_cal.cal_size);
} else {
- mem_addr_audproc[acdb_path].cal_paddr =
+ this_adm.mem_addr_audproc[acdb_path].cal_paddr =
aud_cal.cal_paddr;
- mem_addr_audproc[acdb_path].cal_size = size;
+ this_adm.mem_addr_audproc[acdb_path].cal_size = size;
}
}
@@ -327,14 +328,16 @@
get_audvol_cal(acdb_path, &aud_cal);
/* map & cache buffers used */
- atomic_set(&mem_map_index, (acdb_path + MAX_AUDPROC_TYPES));
- if (((mem_addr_audvol[acdb_path].cal_paddr != aud_cal.cal_paddr) &&
- (aud_cal.cal_size > 0)) ||
- (aud_cal.cal_size > mem_addr_audvol[acdb_path].cal_size)) {
+ atomic_set(&this_adm.mem_map_cal_index,
+ (acdb_path + MAX_AUDPROC_TYPES));
+ if (((this_adm.mem_addr_audvol[acdb_path].cal_paddr !=
+ aud_cal.cal_paddr) && (aud_cal.cal_size > 0)) ||
+ (aud_cal.cal_size >
+ this_adm.mem_addr_audvol[acdb_path].cal_size)) {
- if (mem_addr_audvol[acdb_path].cal_paddr != 0)
+ if (this_adm.mem_addr_audvol[acdb_path].cal_paddr != 0)
adm_memory_unmap_regions(port_id,
- &mem_addr_audvol[acdb_path].cal_paddr,
+ &this_adm.mem_addr_audvol[acdb_path].cal_paddr,
&size, 1);
result = adm_memory_map_regions(port_id, &aud_cal.cal_paddr,
@@ -344,9 +347,9 @@
acdb_path, aud_cal.cal_paddr,
aud_cal.cal_size);
} else {
- mem_addr_audvol[acdb_path].cal_paddr =
+ this_adm.mem_addr_audvol[acdb_path].cal_paddr =
aud_cal.cal_paddr;
- mem_addr_audvol[acdb_path].cal_size = size;
+ this_adm.mem_addr_audvol[acdb_path].cal_size = size;
}
}
@@ -817,8 +820,8 @@
unmap_regions.hdr.dest_port = atomic_read(&this_adm.copp_id[index]);
unmap_regions.hdr.token = port_id;
unmap_regions.hdr.opcode = ADM_CMD_SHARED_MEM_UNMAP_REGIONS;
- unmap_regions.mem_map_handle = atomic_read(&mem_map_handles[
- atomic_read(&mem_map_index)]);
+ unmap_regions.mem_map_handle = atomic_read(&this_adm.
+ mem_map_cal_handles[atomic_read(&this_adm.mem_map_cal_index)]);
atomic_set(&this_adm.copp_stat[0], 0);
ret = apr_send_pkt(this_adm.apr, (uint32_t *) &unmap_regions);
if (ret < 0) {
diff --git a/sound/soc/msm/qdsp6v2/q6afe.c b/sound/soc/msm/qdsp6v2/q6afe.c
index de9841a..fb6b56e 100644
--- a/sound/soc/msm/qdsp6v2/q6afe.c
+++ b/sound/soc/msm/qdsp6v2/q6afe.c
@@ -39,12 +39,14 @@
void *tx_private_data;
void *rx_private_data;
uint32_t mmap_handle;
+
+ struct acdb_cal_block afe_cal_addr[MAX_AUDPROC_TYPES];
+ atomic_t mem_map_cal_handles[MAX_AUDPROC_TYPES];
+ atomic_t mem_map_cal_index;
};
static struct afe_ctl this_afe;
-static struct acdb_cal_block afe_cal_addr[MAX_AUDPROC_TYPES];
-
#define TIMEOUT_MS 1000
#define Q6AFE_MAX_VOLUME 0x3FFF
@@ -113,7 +115,13 @@
AFE_SERVICE_CMDRSP_SHARED_MEM_MAP_REGIONS) {
pr_debug("%s: mmap_handle: 0x%x\n",
__func__, payload[0]);
- this_afe.mmap_handle = (uint32_t)payload[0];
+ if (atomic_read(&this_afe.mem_map_cal_index) != -1)
+ atomic_set(&this_afe.mem_map_cal_handles[
+ atomic_read(
+ &this_afe.mem_map_cal_index)],
+ (uint32_t)payload[0]);
+ else
+ this_afe.mmap_handle = (uint32_t)payload[0];
atomic_set(&this_afe.state, 0);
wake_up(&this_afe.wait[data->token]);
} else if (data->opcode == AFE_EVENT_RT_PROXY_PORT_STATUS) {
@@ -248,9 +256,77 @@
}
return ret;
}
+
static void afe_send_cal_block(int32_t path, u16 port_id)
{
- /* To come back */
+ int result = 0;
+ int index = 0;
+ int size = 4096;
+ struct acdb_cal_block cal_block;
+ struct afe_audioif_config_command_no_payload afe_cal;
+ pr_debug("%s: path %d\n", __func__, path);
+
+ get_afe_cal(path, &cal_block);
+ if (cal_block.cal_size <= 0) {
+ pr_debug("%s: No AFE cal to send!\n", __func__);
+ goto done;
+ }
+
+ if ((this_afe.afe_cal_addr[path].cal_paddr != cal_block.cal_paddr) ||
+ (cal_block.cal_size > this_afe.afe_cal_addr[path].cal_size)) {
+ atomic_set(&this_afe.mem_map_cal_index, path);
+ if (this_afe.afe_cal_addr[path].cal_paddr != 0)
+ afe_cmd_memory_unmap(
+ this_afe.afe_cal_addr[path].cal_paddr);
+
+ afe_cmd_memory_map(cal_block.cal_paddr, size);
+ atomic_set(&this_afe.mem_map_cal_index, -1);
+ this_afe.afe_cal_addr[path].cal_paddr = cal_block.cal_paddr;
+ this_afe.afe_cal_addr[path].cal_size = size;
+ }
+
+ index = q6audio_get_port_index(port_id);
+ if (index < 0) {
+ pr_debug("%s: AFE port index invalid!\n", __func__);
+ goto done;
+ }
+
+ afe_cal.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ afe_cal.hdr.pkt_size = sizeof(afe_cal);
+ afe_cal.hdr.src_port = 0;
+ afe_cal.hdr.dest_port = 0;
+ afe_cal.hdr.token = index;
+ afe_cal.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
+ afe_cal.param.port_id = port_id;
+ afe_cal.param.payload_size = cal_block.cal_size;
+ afe_cal.param.payload_address_lsw = cal_block.cal_paddr;
+ afe_cal.param.payload_address_msw = 0;
+ afe_cal.param.mem_map_handle =
+ atomic_read(&this_afe.mem_map_cal_handles[path]);
+
+ pr_debug("%s: AFE cal sent for device port = %d, path = %d, cal size = %d, cal addr = 0x%x\n",
+ __func__, port_id, path,
+ cal_block.cal_size, cal_block.cal_paddr);
+
+ atomic_set(&this_afe.state, 1);
+ result = apr_send_pkt(this_afe.apr, (uint32_t *) &afe_cal);
+ if (result < 0) {
+ pr_err("%s: AFE cal for port %d failed\n",
+ __func__, port_id);
+ }
+
+ result = wait_event_timeout(this_afe.wait[index],
+ (atomic_read(&this_afe.state) == 0),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!result) {
+ pr_err("%s: wait_event timeout SET AFE CAL\n", __func__);
+ goto done;
+ }
+
+ pr_debug("%s: AFE cal sent for path %d device!\n", __func__, path);
+done:
+ return;
}
void afe_send_cal(u16 port_id)
@@ -1933,6 +2009,7 @@
int i = 0;
atomic_set(&this_afe.state, 0);
atomic_set(&this_afe.status, 0);
+ atomic_set(&this_afe.mem_map_cal_index, -1);
this_afe.apr = NULL;
this_afe.mmap_handle = 0;
for (i = 0; i < AFE_MAX_PORTS; i++)
@@ -1948,9 +2025,9 @@
config_debug_fs_exit();
for (i = 0; i < MAX_AUDPROC_TYPES; i++) {
- if (afe_cal_addr[i].cal_paddr != 0)
+ if (this_afe.afe_cal_addr[i].cal_paddr != 0)
afe_cmd_memory_unmap_nowait(
- afe_cal_addr[i].cal_paddr);
+ this_afe.afe_cal_addr[i].cal_paddr);
}
}
diff --git a/sound/soc/msm/qdsp6v2/q6voice.c b/sound/soc/msm/qdsp6v2/q6voice.c
index b799e59..263f47f 100644
--- a/sound/soc/msm/qdsp6v2/q6voice.c
+++ b/sound/soc/msm/qdsp6v2/q6voice.c
@@ -505,9 +505,9 @@
cvs_session_cmd.hdr.opcode =
VSS_ISTREAM_CMD_CREATE_PASSIVE_CONTROL_SESSION;
if (is_volte_session(v->session_id)) {
- strlcpy(mvm_session_cmd.mvm_session.name,
+ strlcpy(cvs_session_cmd.cvs_session.name,
"default volte voice",
- sizeof(mvm_session_cmd.mvm_session.name));
+ sizeof(cvs_session_cmd.cvs_session.name));
} else {
strlcpy(cvs_session_cmd.cvs_session.name,
"default modem voice",
@@ -3455,7 +3455,9 @@
v->dev_tx.mute = mute;
- if (v->voc_state == VOC_RUN)
+ if ((v->voc_state == VOC_RUN) ||
+ (v->voc_state == VOC_CHANGE) ||
+ (v->voc_state == VOC_STANDBY))
ret = voice_send_mute_cmd(v);
mutex_unlock(&v->lock);
@@ -3661,7 +3663,9 @@
v->dev_rx.volume = vol_idx;
- if (v->voc_state == VOC_RUN)
+ if ((v->voc_state == VOC_RUN) ||
+ (v->voc_state == VOC_CHANGE) ||
+ (v->voc_state == VOC_STANDBY))
ret = voice_send_vol_index_cmd(v);
mutex_unlock(&v->lock);
@@ -3753,7 +3757,9 @@
mutex_lock(&v->lock);
- if (v->voc_state == VOC_RUN || v->voc_state == VOC_ERROR) {
+ if (v->voc_state == VOC_RUN || v->voc_state == VOC_ERROR ||
+ v->voc_state == VOC_STANDBY) {
+
pr_debug("%s: VOC_STATE: %d\n", __func__, v->voc_state);
ret = voice_destroy_vocproc(v);
@@ -3767,6 +3773,69 @@
return ret;
}
+int voc_standby_voice_call(uint16_t session_id)
+{
+ struct voice_data *v = voice_get_session(session_id);
+ struct apr_hdr mvm_standby_voice_cmd;
+ void *apr_mvm;
+ u16 mvm_handle;
+ int ret = 0;
+
+ pr_debug("%s: voc state=%d", __func__, v->voc_state);
+ if (v == NULL) {
+ pr_err("%s: v is NULL\n", __func__);
+ return -EINVAL;
+ }
+ if (v->voc_state == VOC_RUN) {
+ apr_mvm = common.apr_q6_mvm;
+ if (!apr_mvm) {
+ pr_err("%s: apr_mvm is NULL.\n", __func__);
+ ret = -EINVAL;
+ goto fail;
+ }
+ mvm_handle = voice_get_mvm_handle(v);
+ mvm_standby_voice_cmd.hdr_field =
+ APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ mvm_standby_voice_cmd.pkt_size =
+ APR_PKT_SIZE(APR_HDR_SIZE,
+ sizeof(mvm_standby_voice_cmd) - APR_HDR_SIZE);
+ pr_debug("send mvm_standby_voice_cmd pkt size = %d\n",
+ mvm_standby_voice_cmd.pkt_size);
+ mvm_standby_voice_cmd.src_port = v->session_id;
+ mvm_standby_voice_cmd.dest_port = mvm_handle;
+ mvm_standby_voice_cmd.token = 0;
+ mvm_standby_voice_cmd.opcode = VSS_IMVM_CMD_STANDBY_VOICE;
+ v->mvm_state = CMD_STATUS_FAIL;
+ ret = apr_send_pkt(apr_mvm,
+ (uint32_t *)&mvm_standby_voice_cmd);
+ if (ret < 0) {
+ pr_err("Fail in sending VSS_IMVM_CMD_STANDBY_VOICE\n");
+ ret = -EINVAL;
+ goto fail;
+ }
+ v->voc_state = VOC_STANDBY;
+ }
+fail:
+ return ret;
+}
+
+int voc_resume_voice_call(uint16_t session_id)
+{
+ struct voice_data *v = voice_get_session(session_id);
+ int ret = 0;
+
+ ret = voice_send_start_voice_cmd(v);
+ if (ret < 0) {
+ pr_err("Fail in sending START_VOICE\n");
+ goto fail;
+ }
+ v->voc_state = VOC_RUN;
+ return 0;
+fail:
+ return -EINVAL;
+}
+
int voc_start_voice_call(uint16_t session_id)
{
struct voice_data *v = voice_get_session(session_id);
@@ -3845,6 +3914,10 @@
}
v->voc_state = VOC_RUN;
+ } else if (v->voc_state == VOC_STANDBY) {
+ pr_err("Error: start voice in Standby\n");
+ ret = -EINVAL;
+ goto fail;
}
fail:
mutex_unlock(&v->lock);
@@ -3961,6 +4034,7 @@
case VSS_IMVM_CMD_SET_CAL_NETWORK:
case VSS_IMVM_CMD_SET_CAL_MEDIA_TYPE:
case VSS_IMEMORY_CMD_UNMAP:
+ case VSS_IMVM_CMD_STANDBY_VOICE:
pr_debug("%s: cmd = 0x%x\n", __func__, ptr[0]);
v->mvm_state = CMD_STATUS_SUCCESS;
wake_up(&v->mvm_wait);
diff --git a/sound/soc/msm/qdsp6v2/q6voice.h b/sound/soc/msm/qdsp6v2/q6voice.h
index aef463f..6fb4b04 100644
--- a/sound/soc/msm/qdsp6v2/q6voice.h
+++ b/sound/soc/msm/qdsp6v2/q6voice.h
@@ -68,6 +68,7 @@
VOC_CHANGE,
VOC_RELEASE,
VOC_ERROR,
+ VOC_STANDBY,
};
struct mem_buffer {
@@ -171,6 +172,9 @@
#define VSS_IMVM_CMD_START_VOICE 0x00011190
/**< No payload. Wait for APRV2_IBASIC_RSP_RESULT response. */
+#define VSS_IMVM_CMD_STANDBY_VOICE 0x00011191
+/**< No payload. Wait for APRV2_IBASIC_RSP_RESULT response. */
+
#define VSS_IMVM_CMD_STOP_VOICE 0x00011192
/**< No payload. Wait for APRV2_IBASIC_RSP_RESULT response. */
@@ -1227,6 +1231,8 @@
int voc_set_tty_mode(uint16_t session_id, uint8_t tty_mode);
int voc_start_voice_call(uint16_t session_id);
int voc_end_voice_call(uint16_t session_id);
+int voc_standby_voice_call(uint16_t session_id);
+int voc_resume_voice_call(uint16_t session_id);
int voc_set_rxtx_port(uint16_t session_id,
uint32_t dev_port_id,
uint32_t dev_type);