Merge "selinux: fix double free in selinux_parse_opts_str()"
diff --git a/Android.bp b/Android.bp
new file mode 100644
index 0000000..4341e3a
--- /dev/null
+++ b/Android.bp
@@ -0,0 +1,27 @@
+cc_binary_host {
+ name: "unifdef",
+ srcs: ["scripts/unifdef.c"],
+ sanitize: {
+ never: true,
+ }
+}
+
+gensrcs {
+ name: "qseecom-kernel-includes",
+
+ // move to out/ as root for header generation because of scripts/unifdef
+ // storage - at the expense of extra ../ references
+ cmd: "pushd out && mkdir -p scripts && rm -f scripts/unifdef && ln -s ../../$(location unifdef) scripts/unifdef && ../$(location scripts/headers_install.sh) `dirname ../$(out)` ../ $(in) && popd",
+
+ tools: ["unifdef"],
+ tool_files: ["scripts/headers_install.sh"],
+ export_include_dirs: ["include/uapi"],
+ srcs: ["include/uapi/linux/qseecom.h"],
+ output_extension: "h",
+}
+
+cc_library_headers {
+ name: "qseecom-kernel-headers",
+ generated_headers: ["qseecom-kernel-includes"],
+ export_generated_headers: ["qseecom-kernel-includes"],
+}
diff --git a/Documentation/devicetree/bindings/arm/msm/lpm-workarounds.txt b/Documentation/devicetree/bindings/arm/msm/lpm-workarounds.txt
new file mode 100644
index 0000000..0304035
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/lpm-workarounds.txt
@@ -0,0 +1,55 @@
+* LPM Workarounds
+
+The required properties are:
+
+- compatible: "qcom,lpm-workarounds"
+
+The optional properties are:
+- reg: The physical address and the size of the l1_l2_gcc and l2_pwr_sts
+ regitsters of performance cluster.
+
+- reg-names: "l2_pwr_sts" - string to identify l2_pwr_sts physical address.
+ "l1_l2_gcc" - string to identify l1_l2_gcc physical address.
+
+- qcom,lpm-wa-cx-turbo-unvote: Indicates the workaround to unvote CX turbo
+ vote when system is coming out of rpm assisted power collaspe.
+ lpm-cx-supply is required if this is present.
+
+- lpm-cx-supply: will hold handle for CX regulator supply which is used
+ to unvote.
+
+- qcom,lpm-wa-skip-l2-spm: Due to a hardware bug on 8939 and 8909, secure
+ world needs to disable and enable L2 SPM to get the proper context
+ in secure watchdog bite cases. With this workaround there is a race
+ in programming L2 SPM between HLOS and secure world. This leads to
+ stability issues. To avoid this program L2 SPM only in secure world
+ based on the L2 mode flag passed. Set lpm-wa-skip-l2-spm node if this
+ is required.
+
+- qcom,lpm-wa-dynamic-clock-gating: Due to a hardware bug on 8952, L1/L2 dynamic
+ clock gating needs to be enabled by software for performance cluster
+ cores and L2. Set lpm-wa-dynamic-clock-gating node if this workaround is
+ required.
+
+- qcom,cpu-offline-mask: Dynamic clock gating should be enabled when cluster is
+ in L2 PC. Each bit of cpu-offline-mask lists the cpu no. to hotplug by KTM
+ driver.
+
+- qcom,non-boot-cpu-index: will hold index of non boot cluster cpu.
+
+- qcom,l1-l2-gcc-secure: indicates L1/L2 clock enabling register is secure.
+
+Example:
+
+qcom,lpm-workarounds {
+ compatible = "qcom,lpm-workarounds";
+ reg = <0x0B011018 0x4>,
+ <0x0B011088 0x4>;
+ reg-names = "l2-pwr-sts", "l1-l2-gcc";
+ lpm-cx-supply = <&pm8916_s2_corner>;
+ qcom,lpm-wa-cx-turbo-unvote;
+ qcom,lpm-wa-skip-l2-spm;
+ qcom,lpm-wa-dynamic-clock-gating;
+ qcom,cpu-offline-mask = "0xF";
+ qcom,non-boot-cpu-index = <4>;
+}
diff --git a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
index 4c29cda..fdc3418 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
@@ -57,6 +57,10 @@
occupied by the redistributors. Required if more than one such
region is present.
+- ignored-save-restore-irqs: Array of u32 elements, specifying the interrupts
+ which are ignored while doing gicd save/restore. Maximum of 10 elements
+ is supported at present.
+
Sub-nodes:
PPI affinity can be expressed as a single "ppi-partitions" node,
diff --git a/Documentation/devicetree/bindings/leds/leds-qpnp-haptics.txt b/Documentation/devicetree/bindings/leds/leds-qpnp-haptics.txt
index 1a76d5d..258504e 100644
--- a/Documentation/devicetree/bindings/leds/leds-qpnp-haptics.txt
+++ b/Documentation/devicetree/bindings/leds/leds-qpnp-haptics.txt
@@ -116,6 +116,11 @@
Definition: Short circuit debounce cycles for internal PWM.
Allowed values: 0, 8, 16 or 32.
+- vcc_pon-supply
+ Usage: optional
+ Value type: <phandle>
+ Definition: PON driver regulator required to force MBG_ON
+
Following properties are specific only to LRA vibrators.
- qcom,lra-auto-mode
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg.txt
index f6a7a1b..1e44686 100644
--- a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg.txt
+++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg.txt
@@ -109,6 +109,10 @@
this. If this property is not specified,
low battery voltage threshold will be
configured to 4200 mV.
+- qcom,fg-rconn-mohm: Battery connector resistance (Rconn) in
+ milliohms. If Rconn is specified, then
+ Rslow values will be updated to account
+ it for an accurate ESR.
- qcom,cycle-counter-en: Boolean property which enables the cycle
counter feature. If this property is
present, then the following properties
@@ -143,6 +147,14 @@
battery voltage shadow and the current
predicted voltage in uV to initiate
capacity learning.
+- qcom,cl-max-limit-deciperc: The maximum percent that the capacity
+ cannot go above during any capacity
+ learning cycle. This property is in the
+ unit of .1% increments.
+- qcom,cl-min-limit-deciperc: The minimum percent that the capacity
+ cannot go below during any capacity
+ learning cycle. This property is in the
+ unit of .1% increments.
- qcom,capacity-estimation-on: A boolean property to have the fuel
gauge driver attempt to estimate the
battery capacity using battery
@@ -178,6 +190,97 @@
settings will be different from default.
Once SOC crosses 5%, ESR pulse timings
will be restored back to default.
+- qcom,fg-control-slope-limiter: A boolean property to specify if SOC
+ slope limiter coefficients needs to
+ be modified based on charging status
+ and battery temperature threshold.
+- qcom,fg-slope-limit-temp-threshold: Temperature threshold in decidegC used
+ for applying the slope coefficient based
+ on charging status and battery
+ temperature. If this property is not
+ specified, a default value of 100 (10C)
+ will be applied by default.
+- qcom,fg-slope-limit-low-temp-chg: When the temperature goes below the
+ specified temperature threshold and
+ battery is charging, slope coefficient
+ specified with this property will be
+ applied. If this property is not
+ specified, a default value of 45 will be
+ applied.
+- qcom,fg-slope-limit-low-temp-dischg: Same as "qcom,fg-slope-limit-low-temp-chg"
+ except this is when the battery is
+ discharging.
+- qcom,fg-slope-limit-high-temp-chg: When the temperature goes above the
+ specified temperature threshold and
+ battery is charging, slope coefficient
+ specified with this property will be
+ applied. If this property is not
+ specified, a default value of 2 will be
+ applied.
+- qcom,fg-slope-limit-high-temp-dischg: Same as "qcom,fg-slope-limit-high-temp-chg"
+ except this is when the battery is
+ discharging.
+- qcom,fg-dischg-voltage-gain-ctrl: A boolean property to specify if the
+ voltage gain needs to be modified
+ during discharging based on monotonic
+ soc.
+- qcom,fg-dischg-voltage-gain-soc: Array of monotonic SOC threshold values
+ to change the voltage gain settings
+ during discharge. This should be defined
+ in the ascending order and in the range
+ of 0-100. Array limit is set to 3.
+ If qcom,fg-dischg-voltage-gain-ctrl is
+ set, then this property should be
+ specified to apply the gain settings.
+- qcom,fg-dischg-med-voltage-gain: Array of voltage gain values that needs
+ to be applied to medC voltage gain when
+ the monotonic SOC goes below the SOC
+ threshold specified under
+ qcom,fg-dischg-voltage-gain-soc. Array
+ limit is set to 3.
+ If qcom,fg-dischg-voltage-gain-ctrl is
+ set, then this property should be
+ specified to apply the gain setting.
+- qcom,fg-dischg-high-voltage-gain: Array of voltage gain values that needs
+ to be applied to highC voltage gain when
+ the monotonic SOC goes below the SOC
+ threshold specified under
+ qcom,fg-dischg-voltage-gain-soc. Array
+ limit is set to 3.
+ If qcom,fg-dischg-voltage-gain-ctrl is
+ set, then this property should be
+ specified to apply the gain setting.
+- qcom,fg-use-vbat-low-empty-soc: A boolean property to specify whether
+ vbatt-low interrupt is used to handle
+ empty battery condition. If this is
+ not specified, empty battery condition
+ is detected by empty-soc interrupt.
+- qcom,fg-batt-temp-low-limit: Battery temperature (in decidegC) low
+ limit which will be used to validate
+ the battery temperature reading from FG.
+ If the battery temperature goes below
+ this limit, last read good temperature
+ will be notified to userspace. If this
+ limit is not specified, then the
+ default limit would be -60C.
+- qcom,fg-batt-temp-high-limit: Battery temperature (in decidegC) high
+ limit which will be used to validate
+ the battery temperature reading from FG.
+ If the battery temperature goes above
+ this limit, last read good temperature
+ will be notified to userspace. If this
+ limit is not specified, then the
+ default limit would be 150C.
+- qcom,fg-cc-soc-limit-pct: Percentage of CC_SOC before resetting
+ FG and restore the full CC_SOC value.
+- qcom,fg-restore-batt-info: A boolean property to specify whether
+ battery parameters needs to be
+ restored. If this feature is enabled,
+ then validating the battery parameters
+ by OCV/battery SOC, validation range
+ in percentage should be specified via
+ appropriate module parameters to make
+ it work properly.
qcom,fg-soc node required properties:
- reg : offset and length of the PMIC peripheral register map.
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-cdp-256.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-cdp-256.dtsi
index dafd0b8..1428f37 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-cdp-256.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-cdp-256.dtsi
@@ -18,7 +18,8 @@
&usb {
status = "okay";
- extcon = <&vbus_detect>;
+ qcom,connector-type-uAB;
+ extcon = <0>, <0>, <0>, <&vbus_detect>;
};
&pcie_ep {
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-cdp.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-cdp.dtsi
index 1428f37..81877b7 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-cdp.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-cdp.dtsi
@@ -16,12 +16,6 @@
status = "okay";
};
-&usb {
- status = "okay";
- qcom,connector-type-uAB;
- extcon = <0>, <0>, <0>, <&vbus_detect>;
-};
-
&pcie_ep {
status = "okay";
};
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-mtp-256.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-mtp-256.dtsi
index 6c5f3c3..d7c0d13 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-mtp-256.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-mtp-256.dtsi
@@ -18,7 +18,8 @@
&usb {
status = "okay";
- extcon = <&vbus_detect>;
+ qcom,connector-type-uAB;
+ extcon = <0>, <0>, <0>, <&vbus_detect>;
};
&pcie_ep {
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-mtp.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-mtp.dtsi
index d7c0d13..6ceac6e 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-mtp.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-mtp.dtsi
@@ -16,12 +16,6 @@
status = "okay";
};
-&usb {
- status = "okay";
- qcom,connector-type-uAB;
- extcon = <0>, <0>, <0>, <&vbus_detect>;
-};
-
&pcie_ep {
status = "okay";
};
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
index 50bbebf..d2178cb 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
@@ -933,24 +933,24 @@
<1 676 0 0>,
<143 777 0 0>,
/* SVS2 */
- <90 512 3616000 7232000>,
+ <90 512 900000 1800000>,
<90 585 300000 600000>,
- <1 676 90000 180000>, /*gcc_config_noc_clk_src */
+ <1 676 90000 179000>, /*gcc_config_noc_clk_src */
<143 777 0 120>, /* IB defined for IPA2X_clk in MHz*/
/* SVS */
- <90 512 6640000 13280000>,
+ <90 512 1530000 3060000>,
<90 585 400000 800000>,
- <1 676 100000 200000>,
+ <1 676 100000 199000>,
<143 777 0 250>, /* IB defined for IPA2X_clk in MHz*/
/* NOMINAL */
- <90 512 10400000 20800000>,
+ <90 512 2592000 5184000>,
<90 585 800000 1600000>,
- <1 676 200000 400000>,
+ <1 676 200000 399000>,
<143 777 0 440>, /* IB defined for IPA2X_clk in MHz*/
/* TURBO */
- <90 512 10400000 20800000>,
+ <90 512 2592000 5184000>,
<90 585 960000 1920000>,
- <1 676 266000 532000>,
+ <1 676 266000 531000>,
<143 777 0 500>; /* IB defined for IPA clk in MHz*/
qcom,bus-vector-names = "MIN", "SVS2", "SVS", "NOMINAL",
"TURBO";
diff --git a/arch/arm/configs/msm8909w_defconfig b/arch/arm/configs/msm8909w_defconfig
index af47269..17baa04 100644
--- a/arch/arm/configs/msm8909w_defconfig
+++ b/arch/arm/configs/msm8909w_defconfig
@@ -546,8 +546,13 @@
CONFIG_PID_IN_CONTEXTIDR=y
CONFIG_DEBUG_SET_MODULE_RONX=y
CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
+CONFIG_CORESIGHT_SINK_TPIU=y
+CONFIG_CORESIGHT_SOURCE_ETM3X=y
CONFIG_CORESIGHT_REMOTE_ETM=y
CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0
+CONFIG_CORESIGHT_QCOM_REPLICATOR=y
+CONFIG_CORESIGHT_DBGUI=y
CONFIG_CORESIGHT_STM=y
CONFIG_CORESIGHT_TPDA=y
CONFIG_CORESIGHT_TPDM=y
diff --git a/arch/arm/configs/msm8937-perf_defconfig b/arch/arm/configs/msm8937-perf_defconfig
index b113ebd..11308ba 100644
--- a/arch/arm/configs/msm8937-perf_defconfig
+++ b/arch/arm/configs/msm8937-perf_defconfig
@@ -306,6 +306,10 @@
# CONFIG_INPUT_MOUSE is not set
CONFIG_INPUT_JOYSTICK=y
CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_v26=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_RMI_DEV_v26=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_v26=y
+CONFIG_SECURE_TOUCH_SYNAPTICS_DSX_V26=y
CONFIG_TOUCHSCREEN_FT5X06=y
CONFIG_TOUCHSCREEN_GEN_VKEYS=y
CONFIG_INPUT_MISC=y
@@ -354,11 +358,19 @@
CONFIG_MSM_APM=y
CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
CONFIG_THERMAL=y
+CONFIG_THERMAL_WRITABLE_TRIPS=y
+CONFIG_THERMAL_GOV_USER_SPACE=y
+CONFIG_THERMAL_GOV_LOW_LIMITS=y
+CONFIG_CPU_THERMAL=y
+CONFIG_DEVFREQ_THERMAL=y
CONFIG_THERMAL_QPNP=y
CONFIG_THERMAL_QPNP_ADC_TM=y
CONFIG_THERMAL_TSENS=y
-CONFIG_MSM_BCL_PERIPHERAL_CTL=y
-CONFIG_QTI_THERMAL_LIMITS_DCVS=y
+CONFIG_QTI_VIRTUAL_SENSOR=y
+CONFIG_QTI_QMI_COOLING_DEVICE=y
+CONFIG_REGULATOR_COOLING_DEVICE=y
+CONFIG_QTI_BCL_PMIC5=y
+CONFIG_QTI_BCL_SOC_DRIVER=y
CONFIG_MFD_SPMI_PMIC=y
CONFIG_REGULATOR=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
diff --git a/arch/arm/configs/msm8937_defconfig b/arch/arm/configs/msm8937_defconfig
index 1cccfd3..2f7941f 100644
--- a/arch/arm/configs/msm8937_defconfig
+++ b/arch/arm/configs/msm8937_defconfig
@@ -311,6 +311,10 @@
# CONFIG_INPUT_MOUSE is not set
CONFIG_INPUT_JOYSTICK=y
CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_v26=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_RMI_DEV_v26=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_v26=y
+CONFIG_SECURE_TOUCH_SYNAPTICS_DSX_V26=y
CONFIG_TOUCHSCREEN_FT5X06=y
CONFIG_TOUCHSCREEN_GEN_VKEYS=y
CONFIG_INPUT_MISC=y
@@ -361,11 +365,19 @@
CONFIG_MSM_APM=y
CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
CONFIG_THERMAL=y
+CONFIG_THERMAL_WRITABLE_TRIPS=y
+CONFIG_THERMAL_GOV_USER_SPACE=y
+CONFIG_THERMAL_GOV_LOW_LIMITS=y
+CONFIG_CPU_THERMAL=y
+CONFIG_DEVFREQ_THERMAL=y
CONFIG_THERMAL_QPNP=y
CONFIG_THERMAL_QPNP_ADC_TM=y
CONFIG_THERMAL_TSENS=y
-CONFIG_MSM_BCL_PERIPHERAL_CTL=y
-CONFIG_QTI_THERMAL_LIMITS_DCVS=y
+CONFIG_QTI_VIRTUAL_SENSOR=y
+CONFIG_QTI_QMI_COOLING_DEVICE=y
+CONFIG_REGULATOR_COOLING_DEVICE=y
+CONFIG_QTI_BCL_PMIC5=y
+CONFIG_QTI_BCL_SOC_DRIVER=y
CONFIG_MFD_SPMI_PMIC=y
CONFIG_REGULATOR=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
diff --git a/arch/arm/configs/msm8953-batcam-perf_defconfig b/arch/arm/configs/msm8953-batcam-perf_defconfig
index 1610d29..a006ee1 100644
--- a/arch/arm/configs/msm8953-batcam-perf_defconfig
+++ b/arch/arm/configs/msm8953-batcam-perf_defconfig
@@ -141,11 +141,6 @@
CONFIG_VIDEO_V4L2_SUBDEV_API=y
CONFIG_V4L_PLATFORM_DRIVERS=y
CONFIG_FB=y
-CONFIG_FB_MSM=y
-CONFIG_FB_MSM_MDSS=y
-CONFIG_FB_MSM_MDSS_WRITEBACK=y
-CONFIG_FB_MSM_MDSS_DSI_CTRL_STATUS=y
-CONFIG_FB_MSM_MDSS_XLOG_DEBUG=y
CONFIG_SOUND=y
CONFIG_SND=y
CONFIG_SND_DYNAMIC_MINORS=y
@@ -177,6 +172,7 @@
CONFIG_RTC_DRV_QPNP=y
CONFIG_DMADEVICES=y
CONFIG_QCOM_SPS_DMA=y
+CONFIG_SYNC_FILE=y
CONFIG_UIO=y
CONFIG_UIO_MSM_SHAREDMEM=y
CONFIG_STAGING=y
@@ -190,7 +186,6 @@
CONFIG_SPS_SUPPORT_NDP_BAM=y
CONFIG_QPNP_COINCELL=y
CONFIG_QPNP_REVID=y
-CONFIG_MSM_MDSS_PLL=y
CONFIG_REMOTE_SPINLOCK_MSM=y
CONFIG_MAILBOX=y
CONFIG_ARM_SMMU=y
@@ -213,7 +208,6 @@
CONFIG_MSM_SUBSYSTEM_RESTART=y
CONFIG_MSM_PIL=y
CONFIG_MSM_PIL_SSR_GENERIC=y
-CONFIG_MSM_PIL_MSS_QDSP6V5=y
CONFIG_ICNSS=y
CONFIG_MSM_PERFORMANCE=y
CONFIG_MSM_EVENT_TIMER=y
diff --git a/arch/arm/configs/msm8953-batcam_defconfig b/arch/arm/configs/msm8953-batcam_defconfig
index 1ba9d96..4a0c147 100644
--- a/arch/arm/configs/msm8953-batcam_defconfig
+++ b/arch/arm/configs/msm8953-batcam_defconfig
@@ -142,11 +142,6 @@
CONFIG_VIDEO_V4L2_SUBDEV_API=y
CONFIG_V4L_PLATFORM_DRIVERS=y
CONFIG_FB=y
-CONFIG_FB_MSM=y
-CONFIG_FB_MSM_MDSS=y
-CONFIG_FB_MSM_MDSS_WRITEBACK=y
-CONFIG_FB_MSM_MDSS_DSI_CTRL_STATUS=y
-CONFIG_FB_MSM_MDSS_XLOG_DEBUG=y
CONFIG_SOUND=y
CONFIG_SND=y
CONFIG_SND_DYNAMIC_MINORS=y
@@ -178,6 +173,7 @@
CONFIG_RTC_DRV_QPNP=y
CONFIG_DMADEVICES=y
CONFIG_QCOM_SPS_DMA=y
+CONFIG_SYNC_FILE=y
CONFIG_UIO=y
CONFIG_UIO_MSM_SHAREDMEM=y
CONFIG_STAGING=y
@@ -191,7 +187,6 @@
CONFIG_SPS_SUPPORT_NDP_BAM=y
CONFIG_QPNP_COINCELL=y
CONFIG_QPNP_REVID=y
-CONFIG_MSM_MDSS_PLL=y
CONFIG_REMOTE_SPINLOCK_MSM=y
CONFIG_MAILBOX=y
CONFIG_ARM_SMMU=y
@@ -214,7 +209,6 @@
CONFIG_MSM_SUBSYSTEM_RESTART=y
CONFIG_MSM_PIL=y
CONFIG_MSM_PIL_SSR_GENERIC=y
-CONFIG_MSM_PIL_MSS_QDSP6V5=y
CONFIG_ICNSS=y
CONFIG_MSM_PERFORMANCE=y
CONFIG_MSM_EVENT_TIMER=y
diff --git a/arch/arm/include/asm/arch_gicv3.h b/arch/arm/include/asm/arch_gicv3.h
index a808829..4d1065c 100644
--- a/arch/arm/include/asm/arch_gicv3.h
+++ b/arch/arm/include/asm/arch_gicv3.h
@@ -242,6 +242,15 @@
writel_relaxed((u32)(val >> 32), addr + 4);
}
+static inline u64 gic_read_irouter(const volatile void __iomem *addr)
+{
+ u64 val;
+
+ val = readl_relaxed(addr);
+ val |= (u64)readl_relaxed(addr + 4) << 32;
+ return val;
+}
+
static inline u64 gic_read_typer(const volatile void __iomem *addr)
{
u64 val;
diff --git a/arch/arm/mach-qcom/board-msm8953.c b/arch/arm/mach-qcom/board-msm8953.c
index de4538f..9a82e3a 100644
--- a/arch/arm/mach-qcom/board-msm8953.c
+++ b/arch/arm/mach-qcom/board-msm8953.c
@@ -37,8 +37,8 @@
/* Explicitly parent the /soc devices to the root node to preserve
* the kernel ABI (sysfs structure, etc) until userspace is updated
*/
- of_platform_populate(of_find_node_by_path("/soc"),
- of_default_bus_match_table, NULL, NULL);
+ return of_platform_populate(of_find_node_by_path("/soc"),
+ of_default_bus_match_table, NULL, NULL);
}
late_initcall(msm8953_dt_populate);
#endif
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index b57aafc..f216025 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -2372,6 +2372,7 @@
mapping->nr_bitmaps = 1;
mapping->extensions = extensions;
+ mapping->bits = BITS_PER_BYTE * bitmap_size;
spin_lock_init(&mapping->lock);
mapping->ops = &iommu_ops;
diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile
index 98238d9..9cfb7cb 100644
--- a/arch/arm64/boot/dts/qcom/Makefile
+++ b/arch/arm64/boot/dts/qcom/Makefile
@@ -404,6 +404,10 @@
apq8053-iot-mtp.dtb \
apq8053-lite-dragon-v1.0.dtb \
apq8053-lite-dragon-v2.0.dtb \
+ apq8053-lite-dragon-v2.1.dtb \
+ apq8053-lite-dragon-v2.2.dtb \
+ apq8053-lite-dragon-v2.3.dtb \
+ apq8053-lite-dragon-v2.4.dtb \
apq8053-lite-lenovo-v1.0.dtb \
apq8053-lite-lenovo-v1.1.dtb \
apq8053-lite-harman-v1.0.dtb \
diff --git a/arch/arm64/boot/dts/qcom/apq8009-robot-som-refboard.dts b/arch/arm64/boot/dts/qcom/apq8009-robot-som-refboard.dts
index 958f7c8..3e4cbca 100644
--- a/arch/arm64/boot/dts/qcom/apq8009-robot-som-refboard.dts
+++ b/arch/arm64/boot/dts/qcom/apq8009-robot-som-refboard.dts
@@ -186,3 +186,12 @@
&blsp1_uart2_hs {
status = "ok";
};
+
+&i2c_1 {
+ status = "okay";
+ vl53l0x@52 {
+ compatible = "st,stmvl53l0";
+ reg = <0x29>;
+ status = "ok";
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.1.dts b/arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.1.dts
new file mode 100644
index 0000000..6c9c266
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.1.dts
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "apq8053-lite-dragon-v2.1.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. APQ8053 Lite DragonBoard V2.1";
+ compatible = "qcom,apq8053-lite-dragonboard", "qcom,apq8053",
+ "qcom,dragonboard";
+ qcom,board-id= <0x01010020 0>;
+};
+
+&blsp2_uart0 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.1.dtsi b/arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.1.dtsi
new file mode 100644
index 0000000..4d9c40c
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.1.dtsi
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "apq8053-lite-dragon.dtsi"
+
+&mdss_dsi0 {
+ qcom,ext_vdd-gpio = <&tlmm 100 0>;
+ qcom,platform-bklight-en-gpio = <&tlmm 95 0>;
+
+ qcom,platform-lane-config = [00 00 ff 0f
+ 00 00 ff 0f
+ 00 00 ff 0f
+ 00 00 ff 0f
+ 00 00 ff 8f];
+};
+
+&eeprom0 {
+ gpios = <&tlmm 26 0>,
+ <&tlmm 40 0>,
+ <&tlmm 118 0>,
+ <&tlmm 119 0>,
+ <&tlmm 39 0>;
+ qcom,gpio-vdig = <3>;
+ qcom,gpio-vana = <4>;
+ qcom,gpio-req-tbl-num = <0 1 2 3 4>;
+ qcom,gpio-req-tbl-flags = <1 0 0 0 0>;
+ qcom,gpio-req-tbl-label = "CAMIF_MCLK0",
+ "CAM_RESET0",
+ "CAM_VDIG",
+ "CAM_VANA",
+ "CAM_STANDBY0";
+};
+
+&camera0 {
+ qcom,mount-angle = <270>;
+ gpios = <&tlmm 26 0>,
+ <&tlmm 40 0>,
+ <&tlmm 39 0>,
+ <&tlmm 118 0>,
+ <&tlmm 119 0>;
+ qcom,gpio-vdig = <3>;
+ qcom,gpio-vana = <4>;
+ qcom,gpio-req-tbl-num = <0 1 2 3 4>;
+ qcom,gpio-req-tbl-flags = <1 0 0 0 0>;
+ qcom,gpio-req-tbl-label = "CAMIF_MCLK0",
+ "CAM_RESET0",
+ "CAM_STANDBY0",
+ "CAM_VDIG",
+ "CAM_VANA";
+};
+
+&camera1 {
+ qcom,mount-angle = <270>;
+};
+
+&camera2{
+ qcom,mount-angle = <270>;
+};
diff --git a/arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.2.dts b/arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.2.dts
new file mode 100644
index 0000000..ecc4fea
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.2.dts
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "apq8053-lite-dragon-v2.2.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. APQ8053 Lite DragonBoard V2.2";
+ compatible = "qcom,apq8053-lite-dragonboard", "qcom,apq8053",
+ "qcom,dragonboard";
+ qcom,board-id= <0x01010120 0>;
+};
+
+&blsp2_uart0 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.2.dtsi b/arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.2.dtsi
new file mode 100644
index 0000000..396fd55
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.2.dtsi
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "apq8053-lite-dragon.dtsi"
+
+&i2c_3 {
+ status = "okay";
+ /delete-node/ himax_ts@48;
+};
+
+&eeprom0 {
+ gpios = <&tlmm 26 0>,
+ <&tlmm 40 0>,
+ <&tlmm 118 0>,
+ <&tlmm 119 0>,
+ <&tlmm 39 0>;
+ qcom,gpio-vdig = <3>;
+ qcom,gpio-vana = <4>;
+ qcom,gpio-req-tbl-num = <0 1 2 3 4>;
+ qcom,gpio-req-tbl-flags = <1 0 0 0 0>;
+ qcom,gpio-req-tbl-label = "CAMIF_MCLK0",
+ "CAM_RESET0",
+ "CAM_VDIG",
+ "CAM_VANA",
+ "CAM_STANDBY0";
+};
+
+&camera0 {
+ qcom,mount-angle = <270>;
+ gpios = <&tlmm 26 0>,
+ <&tlmm 40 0>,
+ <&tlmm 39 0>,
+ <&tlmm 118 0>,
+ <&tlmm 119 0>;
+ qcom,gpio-vdig = <3>;
+ qcom,gpio-vana = <4>;
+ qcom,gpio-req-tbl-num = <0 1 2 3 4>;
+ qcom,gpio-req-tbl-flags = <1 0 0 0 0>;
+ qcom,gpio-req-tbl-label = "CAMIF_MCLK0",
+ "CAM_RESET0",
+ "CAM_STANDBY0",
+ "CAM_VDIG",
+ "CAM_VANA";
+};
+
+&camera1 {
+ qcom,mount-angle = <270>;
+};
+
+&camera2{
+ qcom,mount-angle = <270>;
+};
diff --git a/arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.3.dts b/arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.3.dts
new file mode 100644
index 0000000..e3f80be
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.3.dts
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "apq8053-lite-dragon-v2.3.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. APQ8053 Lite DragonBoard V2.3";
+ compatible = "qcom,apq8053-lite-dragonboard", "qcom,apq8053",
+ "qcom,dragonboard";
+ qcom,board-id= <0x01020020 0>;
+};
+
+&blsp2_uart0 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.3.dtsi b/arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.3.dtsi
new file mode 100644
index 0000000..5cf8ac0
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.3.dtsi
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "apq8053-lite-dragon.dtsi"
+
+&pm8953_l4 {
+ status = "okay";
+ regulator-always-on;
+};
+
+&pm8953_l10 {
+ status = "okay";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+};
+
+&pm8953_l2 {
+ status = "okay";
+ regulator-always-on;
+};
+
+&pm8953_l17 {
+ status = "okay";
+ regulator-always-on;
+};
+
+&pm8953_l22 {
+ status = "okay";
+ regulator-always-on;
+};
+
+&i2c_3 {
+ status = "okay";
+ /delete-node/ himax_ts@48;
+ focaltech_ts@38 {
+ compatible = "focaltech,fts";
+ reg = <0x38>;
+ interrupt-parent = <&tlmm>;
+ interrupts = <65 0x2>;
+ vdd-supply = <&pm8953_l10>;
+ avdd-supply = <&pm8953_l6>;
+ pinctrl-names = "pmx_ts_active","pmx_ts_suspend",
+ "pmx_ts_release";
+ pinctrl-0 = <&ts_int_active &ts_reset_active>;
+ pinctrl-1 = <&ts_int_suspend &ts_reset_suspend>;
+ pinctrl-2 = <&ts_release>;
+ focaltech,display-coords = <0 0 800 1280>;
+ focaltech,reset-gpio = <&tlmm 64 0x0>;
+ focaltech,irq-gpio = <&tlmm 65 0x2>;
+ focaltech,max-touch-number = <5>;
+ report_type = <1>;
+ };
+};
+
+&wled {
+ qcom,led-strings-list = [00 01 02];
+};
+
+&camera0 {
+ qcom,mount-angle = <90>;
+};
+
+&camera1 {
+ qcom,mount-angle = <90>;
+};
+
+&camera2{
+ qcom,mount-angle = <90>;
+};
diff --git a/arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.4.dts b/arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.4.dts
new file mode 100644
index 0000000..1f40ef8
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.4.dts
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "apq8053-lite-dragon-v2.4.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. APQ8053 Lite DragonBoard V2.4";
+ compatible = "qcom,apq8053-lite-dragonboard", "qcom,apq8053",
+ "qcom,dragonboard";
+ qcom,board-id= <0x01030020 0>;
+};
+
+&blsp2_uart0 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.4.dtsi b/arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.4.dtsi
new file mode 100644
index 0000000..db0331e
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/apq8053-lite-dragon-v2.4.dtsi
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "apq8053-lite-dragon.dtsi"
+
+&wled {
+ qcom,fs-curr-ua = <17500>;
+};
+
+&camera0 {
+ qcom,mount-angle = <180>;
+};
+
+&camera1 {
+ qcom,mount-angle = <180>;
+};
+
+&camera2 {
+ qcom,mount-angle = <180>;
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8917-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/msm8917-pinctrl.dtsi
index b9229e1..26fb25c 100644
--- a/arch/arm64/boot/dts/qcom/msm8917-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8917-pinctrl.dtsi
@@ -15,10 +15,11 @@
tlmm: pinctrl@1000000 {
compatible = "qcom,msm8917-pinctrl";
reg = <0x1000000 0x300000>;
- interrupts = <0 208 0>;
+ interrupts-extended = <&wakegic GIC_SPI 208 IRQ_TYPE_NONE>;
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
+ interrupt-parent = <&wakegpio>;
#interrupt-cells = <2>;
diff --git a/arch/arm64/boot/dts/qcom/msm8917-pm.dtsi b/arch/arm64/boot/dts/qcom/msm8917-pm.dtsi
index 575d1b5..a3b4679 100644
--- a/arch/arm64/boot/dts/qcom/msm8917-pm.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8917-pm.dtsi
@@ -31,7 +31,6 @@
qcom,lpm-levels {
compatible = "qcom,lpm-levels";
- qcom,use-psci;
#address-cells = <1>;
#size-cells = <0>;
@@ -40,8 +39,6 @@
#address-cells = <1>;
#size-cells = <0>;
label = "perf";
- qcom,spm-device-names = "l2";
- qcom,default-level=<0>;
qcom,psci-mode-shift = <4>;
qcom,psci-mode-mask = <0xf>;
@@ -98,11 +95,12 @@
#size-cells = <0>;
qcom,psci-mode-shift = <0>;
qcom,psci-mode-mask = <0xf>;
+ qcom,cpu = <&CPU0 &CPU1 &CPU2 &CPU3>;
qcom,pm-cpu-level@0 {
reg = <0>;
- qcom,psci-cpu-mode = <0>;
label = "wfi";
+ qcom,psci-cpu-mode = <1>;
qcom,latency-us = <12>;
qcom,ss-power = <463>;
qcom,energy-overhead = <23520>;
@@ -111,8 +109,8 @@
qcom,pm-cpu-level@1 {
reg = <1>;
- qcom,psci-cpu-mode = <3>;
label = "pc";
+ qcom,psci-cpu-mode = <3>;
qcom,latency-us = <180>;
qcom,ss-power = <429>;
qcom,energy-overhead = <162991>;
@@ -125,204 +123,10 @@
};
};
- qcom,mpm@601d0 {
- compatible = "qcom,mpm-v2";
- reg = <0x601d0 0x1000>, /* MSM_RPM_MPM_BASE 4K */
- <0xb011008 0x4>;
- reg-names = "vmpm", "ipc";
- interrupts = <0 171 1>;
- clocks = <&clock_gcc clk_xo_lpm_clk>;
- clock-names = "xo";
- qcom,ipc-bit-offset = <1>;
- qcom,gic-parent = <&intc>;
- qcom,gic-map = <2 216>, /* tsens_upper_lower_int */
- <49 172>, /* usb1_hs_async_wakeup_irq */
- <58 166>, /* usb_hs_irq */
- <53 104>, /* mdss_irq */
- <62 222>, /* ee0_krait_hlos_spmi_periph_irq */
- <0xff 18>, /* APC_qgicQTmrSecPhysIrptReq */
- <0xff 19>, /* APC_qgicQTmrNonSecPhysIrptReq */
- <0xff 20>, /* qgicQTmrVirtIrptReq */
- <0xff 35>, /* WDT_barkInt */
- <0xff 39>, /* arch_mem_timer */
- <0xff 40>, /* qtmr_phy_irq[0] */
- <0xff 47>, /* rbif_irq[0] */
- <0xff 56>, /* q6_wdog_expired_irq */
- <0xff 57>, /* mss_to_apps_irq(0) */
- <0xff 58>, /* mss_to_apps_irq(1) */
- <0xff 59>, /* mss_to_apps_irq(2) */
- <0xff 60>, /* mss_to_apps_irq(3) */
- <0xff 61>, /* mss_a2_bam_irq */
- <0xff 65>, /* o_gc_sys_irq[0] */
- <0xff 69>, /* vbif_irpt */
- <0xff 73>, /* smmu_intr_bus[1] */
- <0xff 74>, /* smmu_bus_intr[2] */
- <0xff 75>, /* smmu_bus_intr[3] */
- <0xff 76>, /* venus_irq */
- <0xff 78>, /* smmu_bus_intr[5] */
- <0xff 79>, /* smmu_bus_intr[6] */
- <0xff 85>, /* smmu_bus_intr[31] */
- <0xff 86>, /* smmu_bus_intr[32] */
- <0xff 90>, /* smmu_bus_intr[33] */
- <0xff 92>, /* smmu_bus_intr[34] */
- <0xff 93>, /* smmu_bus_intr[35] */
- <0xff 97>, /* smmu_bus_intr[10] */
- <0xff 102>, /* smmu_bus_intr[14] */
- <0xff 108>, /* smmu_bus_intr[36] */
- <0xff 109>, /* smmu_bus_intr[37] */
- <0xff 112>, /* smmu_bus_intr[38] */
- <0xff 114>, /* qdsd_intr_out */
- <0xff 126>, /* smmu_bus_intr[39] */
- <0xff 128>, /* blsp1_peripheral_irq[3] */
- <0xff 129>, /* blsp1_peripheral_irq[4] */
- <0xff 131>, /* qup_irq */
- <0xff 136>, /* smmu_bus_intr[43] */
- <0xff 137>, /* smmu_intr_bus[44] */
- <0xff 138>, /* smmu_intr_bus[45] */
- <0xff 140>, /* uart_dm_intr */
- <0xff 141>, /* smmu_bus_intr[46] */
- <0xff 142>, /* smmu_bus_intr[47] */
- <0xff 143>, /* smmu_bus_intr[48] */
- <0xff 144>, /* smmu_bus_intr[49] */
- <0xff 145>, /* smmu_bus_intr[50] */
- <0xff 146>, /* smmu_bus_intr[51] */
- <0xff 147>, /* smmu_bus_intr[52] */
- <0xff 148>, /* smmu_bus_intr[53] */
- <0xff 149>, /* smmu_bus_intr[54] */
- <0xff 150>, /* smmu_bus_intr[55] */
- <0xff 151>, /* smmu_bus_intr[56] */
- <0xff 152>, /* smmu_bus_intr[57] */
- <0xff 153>, /* smmu_bus_intr[58] */
- <0xff 155>, /* sdc1_irq(0) */
- <0xff 157>, /* sdc2_irq(0) */
- <0xff 167>, /* bam_irq(0) */
- <0xff 170>, /* sdc1_pwr_cmd_irq */
- <0xff 173>, /* o_wcss_apss_smd_hi */
- <0xff 174>, /* o_wcss_apss_smd_med */
- <0xff 175>, /* o_wcss_apss_smd_low */
- <0xff 176>, /* o_wcss_apss_smsm_irq */
- <0xff 177>, /* o_wcss_apss_wlan_data_xfer_done */
- <0xff 178>, /* o_wcss_apss_wlan_rx_data_avail */
- <0xff 179>, /* o_wcss_apss_asic_intr */
- <0xff 181>, /* o_wcss_apss_wdog_bite_and_reset_rdy */
- <0xff 188>, /* lpass_irq_out_apcs(0) */
- <0xff 189>, /* lpass_irq_out_apcs(1) */
- <0xff 190>, /* lpass_irq_out_apcs(2) */
- <0xff 191>, /* lpass_irq_out_apcs(3) */
- <0xff 192>, /* lpass_irq_out_apcs(4) */
- <0xff 193>, /* lpass_irq_out_apcs(5) */
- <0xff 194>, /* lpass_irq_out_apcs(6) */
- <0xff 195>, /* lpass_irq_out_apcs(7) */
- <0xff 196>, /* lpass_irq_out_apcs(8) */
- <0xff 197>, /* lpass_irq_out_apcs(9) */
- <0xff 198>, /* coresight-tmc-etr interrupt */
- <0xff 200>, /* rpm_ipc(4) */
- <0xff 201>, /* rpm_ipc(5) */
- <0xff 202>, /* rpm_ipc(6) */
- <0xff 203>, /* rpm_ipc(7) */
- <0xff 204>, /* rpm_ipc(24) */
- <0xff 205>, /* rpm_ipc(25) */
- <0xff 206>, /* rpm_ipc(26) */
- <0xff 207>, /* rpm_ipc(27) */
- <0xff 215>, /* o_bimc_intr[0] */
- <0xff 224>, /* SPDM interrupt */
- <0xff 239>, /* crypto_bam_irq[1]*/
- <0xff 240>, /* summary_irq_kpss */
- <0xff 253>, /* sdcc_pwr_cmd_irq */
- <0xff 260>, /* ipa_irq[0] */
- <0xff 261>, /* ipa_irq[2] */
- <0xff 262>, /* ipa_bam_irq[0] */
- <0xff 263>, /* ipa_bam_irq[2] */
- <0xff 269>, /* rpm_wdog_expired_irq */
- <0xff 270>, /* blsp1_bam_irq[0] */
- <0xff 272>, /* smmu_intr_bus[17] */
- <0xff 273>, /* smmu_bus_intr[18] */
- <0xff 274>, /* smmu_bus_intr[19] */
- <0xff 275>, /* rpm_ipc(30) */
- <0xff 276>, /* rpm_ipc(31) */
- <0xff 277>, /* smmu_intr_bus[20] */
- <0xff 285>, /* smmu_bus_intr[28] */
- <0xff 286>, /* smmu_bus_intr[29] */
- <0xff 287>, /* smmu_bus_intr[30] */
- <0xff 321>, /* q6ss_irq_out(4) */
- <0xff 322>, /* q6ss_irq_out(5) */
- <0xff 323>, /* q6ss_irq_out(6) */
- <0xff 325>, /* q6ss_wdog_exp_irq */
- <0xff 344>; /* sdcc1ice */
-
- qcom,gpio-parent = <&tlmm>;
- qcom,gpio-map = <3 38 >,
- <4 1 >,
- <5 5 >,
- <6 9 >,
- <8 37>,
- <9 36>,
- <10 13>,
- <11 35>,
- <12 17>,
- <13 21>,
- <14 54>,
- <15 34>,
- <16 31>,
- <17 58>,
- <18 28>,
- <19 42>,
- <20 25>,
- <21 12>,
- <22 43>,
- <23 44>,
- <24 45>,
- <25 46>,
- <26 48>,
- <27 65>,
- <28 93>,
- <29 97>,
- <30 63>,
- <31 70>,
- <32 71>,
- <33 72>,
- <34 81>,
- <35 126>,
- <36 90>,
- <37 128>,
- <38 91>,
- <39 41>,
- <40 127>,
- <41 86>,
- <50 67>,
- <51 73>,
- <52 74>,
- <53 62>,
- <54 124>,
- <55 61>,
- <56 130>,
- <57 59>,
- <59 50>;
- };
-
- qcom,cpu-sleep-status {
- compatible = "qcom,cpu-sleep-status";
- };
-
- qcom,rpm-log@29dc00 {
- compatible = "qcom,rpm-log";
- reg = <0x29dc00 0x4000>;
- qcom,rpm-addr-phys = <0x200000>;
- qcom,offset-version = <4>;
- qcom,offset-page-buffer-addr = <36>;
- qcom,offset-log-len = <40>;
- qcom,offset-log-len-mask = <44>;
- qcom,offset-page-indices = <56>;
- };
-
qcom,rpm-stats@29dba0 {
compatible = "qcom,rpm-stats";
- reg = <0x200000 0x1000>,
- <0x290014 0x4>,
- <0x29001c 0x4>;
- reg-names = "phys_addr_base", "offset_addr",
- "heap_phys_addrbase";
- qcom,sleep-stats-version = <2>;
+ reg = <0x200000 0x1000>, <0x290014 0x4>;
+ reg-names = "phys_addr_base", "offset_addr";
};
qcom,rpm-master-stats@60150 {
diff --git a/arch/arm64/boot/dts/qcom/msm8917.dtsi b/arch/arm64/boot/dts/qcom/msm8917.dtsi
index a285110..d080539 100644
--- a/arch/arm64/boot/dts/qcom/msm8917.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8917.dtsi
@@ -21,7 +21,7 @@
model = "Qualcomm Technologies, Inc. MSM8917";
compatible = "qcom,msm8917";
qcom,msm-id = <303 0x0>, <308 0x0>, <309 0x0>;
- interrupt-parent = <&intc>;
+ interrupt-parent = <&wakegic>;
chosen {
bootargs = "sched_enable_hmp=1";
@@ -197,19 +197,19 @@
};
wakegic: wake-gic {
- compatible = "qcom,mpm-gic", "qcom,mpm-gic-msm8937";
+ compatible = "qcom,mpm-gic-msm8937", "qcom,mpm-gic";
interrupts = <GIC_SPI 171 IRQ_TYPE_EDGE_RISING>;
reg = <0x601d0 0x1000>,
<0xb011008 0x4>; /* MSM_APCS_GCC_BASE 4K */
reg-names = "vmpm", "ipc";
- qcom,num-mpm-irqs = <96>;
+ qcom,num-mpm-irqs = <64>;
interrupt-controller;
interrupt-parent = <&intc>;
#interrupt-cells = <3>;
};
wakegpio: wake-gpio {
- compatible = "qcom,mpm-gpio", "qcom,mpm-gpio-msm8937";
+ compatible = "qcom,mpm-gpio-msm8937", "qcom,mpm-gpio";
interrupt-controller;
interrupt-parent = <&intc>;
#interrupt-cells = <2>;
diff --git a/arch/arm64/boot/dts/qcom/msm8937-mtp.dtsi b/arch/arm64/boot/dts/qcom/msm8937-mtp.dtsi
index 57823b8..90685e9 100644
--- a/arch/arm64/boot/dts/qcom/msm8937-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8937-mtp.dtsi
@@ -156,6 +156,39 @@
};
};
+&pm8937_gpios {
+ nfc_clk {
+ nfc_clk_default: nfc_clk_default {
+ pins = "gpio5";
+ function = "normal";
+ input-enable;
+ power-source = <1>;
+ };
+ };
+};
+
+&i2c_5 { /* BLSP2 QUP1 (NFC) */
+ status = "ok";
+ nq@28 {
+ compatible = "qcom,nq-nci";
+ reg = <0x28>;
+ qcom,nq-irq = <&tlmm 17 0x00>;
+ qcom,nq-ven = <&tlmm 16 0x00>;
+ qcom,nq-firm = <&tlmm 130 0x00>;
+ qcom,nq-clkreq = <&pm8937_gpios 5 0x00>;
+ interrupt-parent = <&tlmm>;
+ qcom,clk-src = "BBCLK2";
+ interrupts = <17 0>;
+ interrupt-names = "nfc_irq";
+ pinctrl-names = "nfc_active", "nfc_suspend";
+ pinctrl-0 = <&nfc_int_active &nfc_disable_active
+ &nfc_clk_default>;
+ pinctrl-1 = <&nfc_int_suspend &nfc_disable_suspend>;
+ clocks = <&clock_gcc clk_bb_clk2_pin>;
+ clock-names = "ref_clk";
+ };
+};
+
&thermal_zones {
quiet-therm-step {
status = "disabled";
diff --git a/arch/arm64/boot/dts/qcom/pmi632.dtsi b/arch/arm64/boot/dts/qcom/pmi632.dtsi
index 0b1a50a..65390cb 100644
--- a/arch/arm64/boot/dts/qcom/pmi632.dtsi
+++ b/arch/arm64/boot/dts/qcom/pmi632.dtsi
@@ -315,6 +315,7 @@
dpdm-supply = <&qusb_phy>;
qcom,auto-recharge-soc = <98>;
qcom,chg-vadc = <&pmi632_vadc>;
+ qcom,flash-disable-soc = <10>;
qcom,thermal-mitigation
= <3000000 2500000 2000000 1500000
@@ -464,7 +465,6 @@
"ilim1-s1",
"ilim2-s2",
"vreg-ok";
- qcom,flash-disable-soc = <10>;
};
smb5_vbus: qcom,smb5-vbus {
diff --git a/arch/arm64/boot/dts/qcom/sdm439-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm439-cdp.dtsi
index fd66f4b..6ecd0dd 100644
--- a/arch/arm64/boot/dts/qcom/sdm439-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm439-cdp.dtsi
@@ -239,6 +239,19 @@
qcom,mdss-dsi-bl-pmic-pwm-frequency = <100>;
qcom,mdss-dsi-bl-pmic-bank-select = <0>;
qcom,mdss-dsi-pwm-gpio = <&pm8953_gpios 8 0>;
+ qcom,esd-check-enabled;
+ qcom,mdss-dsi-panel-status-check-mode = "reg_read";
+ qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a];
+ qcom,mdss-dsi-panel-status-command-state = "dsi_lp_mode";
+ qcom,mdss-dsi-panel-status-value = <0x9d 0x9d 0x9d 0x9d>;
+ qcom,mdss-dsi-panel-on-check-value = <0x9d 0x9d 0x9d 0x9d>;
+ qcom,mdss-dsi-panel-status-read-length = <4>;
+ qcom,mdss-dsi-panel-max-error-count = <3>;
+ qcom,mdss-dsi-min-refresh-rate = <48>;
+ qcom,mdss-dsi-max-refresh-rate = <60>;
+ qcom,mdss-dsi-pan-enable-dynamic-fps;
+ qcom,mdss-dsi-pan-fps-update =
+ "dfps_immediate_porch_mode_vfp";
};
&dsi_nt35695b_truly_fhd_cmd {
diff --git a/arch/arm64/boot/dts/qcom/sdm439-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm439-mtp.dtsi
index 9aa6d58..9a9603d 100644
--- a/arch/arm64/boot/dts/qcom/sdm439-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm439-mtp.dtsi
@@ -214,6 +214,19 @@
qcom,mdss-dsi-bl-pmic-pwm-frequency = <100>;
qcom,mdss-dsi-bl-pmic-bank-select = <0>;
qcom,mdss-dsi-pwm-gpio = <&pm8953_gpios 8 0>;
+ qcom,esd-check-enabled;
+ qcom,mdss-dsi-panel-status-check-mode = "reg_read";
+ qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a];
+ qcom,mdss-dsi-panel-status-command-state = "dsi_lp_mode";
+ qcom,mdss-dsi-panel-status-value = <0x9d 0x9d 0x9d 0x9d>;
+ qcom,mdss-dsi-panel-on-check-value = <0x9d 0x9d 0x9d 0x9d>;
+ qcom,mdss-dsi-panel-status-read-length = <4>;
+ qcom,mdss-dsi-panel-max-error-count = <3>;
+ qcom,mdss-dsi-min-refresh-rate = <48>;
+ qcom,mdss-dsi-max-refresh-rate = <60>;
+ qcom,mdss-dsi-pan-enable-dynamic-fps;
+ qcom,mdss-dsi-pan-fps-update =
+ "dfps_immediate_porch_mode_vfp";
};
&i2c_2 {
diff --git a/arch/arm64/boot/dts/qcom/sdm439-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdm439-qrd.dtsi
index df85d95..5662e17 100644
--- a/arch/arm64/boot/dts/qcom/sdm439-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm439-qrd.dtsi
@@ -311,6 +311,19 @@
qcom,mdss-dsi-bl-pmic-pwm-frequency = <100>;
qcom,mdss-dsi-bl-pmic-bank-select = <0>;
qcom,mdss-dsi-pwm-gpio = <&pm8953_gpios 8 0>;
+ qcom,esd-check-enabled;
+ qcom,mdss-dsi-panel-status-check-mode = "reg_read";
+ qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a];
+ qcom,mdss-dsi-panel-status-command-state = "dsi_lp_mode";
+ qcom,mdss-dsi-panel-status-value = <0x9d 0x9d 0x9d 0x9d>;
+ qcom,mdss-dsi-panel-on-check-value = <0x9d 0x9d 0x9d 0x9d>;
+ qcom,mdss-dsi-panel-status-read-length = <4>;
+ qcom,mdss-dsi-panel-max-error-count = <3>;
+ qcom,mdss-dsi-min-refresh-rate = <48>;
+ qcom,mdss-dsi-max-refresh-rate = <60>;
+ qcom,mdss-dsi-pan-enable-dynamic-fps;
+ qcom,mdss-dsi-pan-fps-update =
+ "dfps_immediate_porch_mode_vfp";
};
&i2c_2 {
diff --git a/arch/arm64/boot/dts/qcom/sdm439-regulator.dtsi b/arch/arm64/boot/dts/qcom/sdm439-regulator.dtsi
index e2f2dea..4c4c4bd 100644
--- a/arch/arm64/boot/dts/qcom/sdm439-regulator.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm439-regulator.dtsi
@@ -479,7 +479,7 @@
<0 0 0>;
qcom,cpr-voltage-ceiling-override =
- <(-1) (-1) 795000 795000 835000 910000 910000>;
+ <(-1) (-1) 810000 845000 885000 960000 960000>;
qcom,cpr-enable;
};
diff --git a/arch/arm64/boot/dts/qcom/sdm439.dtsi b/arch/arm64/boot/dts/qcom/sdm439.dtsi
index 0e4f666..0b01035 100644
--- a/arch/arm64/boot/dts/qcom/sdm439.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm439.dtsi
@@ -341,10 +341,10 @@
reg = <0x001a94400 0x400>,
<0x0184d074 0x8>;
reg-names = "pll_base", "gdsc_base";
- /delete-property/ qcom,dsi-pll-ssc-en;
- /delete-property/ qcom,dsi-pll-ssc-mode;
- /delete-property/ qcom,ssc-frequency-hz;
- /delete-property/ qcom,ssc-ppm;
+ qcom,dsi-pll-ssc-en;
+ qcom,dsi-pll-ssc-mode = "down-spread";
+ qcom,ssc-frequency-hz = <31500>;
+ qcom,ssc-ppm = <5000>;
};
&mdss_dsi1_pll {
@@ -352,10 +352,10 @@
reg = <0x001a96400 0x400>,
<0x0184d074 0x8>;
reg-names = "pll_base", "gdsc_base";
- /delete-property/ qcom,dsi-pll-ssc-en;
- /delete-property/ qcom,dsi-pll-ssc-mode;
- /delete-property/ qcom,ssc-frequency-hz;
- /delete-property/ qcom,ssc-ppm;
+ qcom,dsi-pll-ssc-en;
+ qcom,dsi-pll-ssc-mode = "down-spread";
+ qcom,ssc-frequency-hz = <31500>;
+ qcom,ssc-ppm = <5000>;
};
&mdss_dsi {
diff --git a/arch/arm64/boot/dts/qcom/sdm450-pmi632.dtsi b/arch/arm64/boot/dts/qcom/sdm450-pmi632.dtsi
index e09d637..6e39327 100644
--- a/arch/arm64/boot/dts/qcom/sdm450-pmi632.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm450-pmi632.dtsi
@@ -372,3 +372,24 @@
};
};
};
+
+&tlmm {
+ pmx_mdss {
+ mdss_dsi_active: mdss_dsi_active {
+ mux {
+ pins = "gpio61";
+ };
+ config {
+ pins = "gpio61";
+ };
+ };
+ mdss_dsi_suspend: mdss_dsi_suspend {
+ mux {
+ pins = "gpio61";
+ };
+ config {
+ pins = "gpio61";
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm632-cdp-s2.dts b/arch/arm64/boot/dts/qcom/sdm632-cdp-s2.dts
index 2669d1f..9d6543f 100644
--- a/arch/arm64/boot/dts/qcom/sdm632-cdp-s2.dts
+++ b/arch/arm64/boot/dts/qcom/sdm632-cdp-s2.dts
@@ -24,33 +24,3 @@
qcom,pmic-id = <0x010016 0x25 0x0 0x0>;
};
-
-&soc {
- gpio_keys {
- /delete-node/home;
- };
-};
-
-&tlmm {
- tlmm_gpio_key {
- gpio_key_active: gpio_key_active {
- mux {
- pins = "gpio85", "gpio86", "gpio87";
- };
-
- config {
- pins = "gpio85", "gpio86", "gpio87";
- };
- };
-
- gpio_key_suspend: gpio_key_suspend {
- mux {
- pins = "gpio85", "gpio86", "gpio87";
- };
-
- config {
- pins = "gpio85", "gpio86", "gpio87";
- };
- };
- };
-};
diff --git a/arch/arm64/boot/dts/qcom/sdm632-ext-codec-cdp-s3.dts b/arch/arm64/boot/dts/qcom/sdm632-ext-codec-cdp-s3.dts
index 17ae9d1..60b149d 100644
--- a/arch/arm64/boot/dts/qcom/sdm632-ext-codec-cdp-s3.dts
+++ b/arch/arm64/boot/dts/qcom/sdm632-ext-codec-cdp-s3.dts
@@ -24,34 +24,3 @@
qcom,pmic-id = <0x010016 0x25 0x0 0x0>;
};
-
-&soc {
- gpio_keys {
- /delete-node/home;
- };
-};
-
-&tlmm {
- tlmm_gpio_key {
- gpio_key_active: gpio_key_active {
- mux {
- pins = "gpio85", "gpio86", "gpio87";
- };
-
- config {
- pins = "gpio85", "gpio86", "gpio87";
- };
- };
-
- gpio_key_suspend: gpio_key_suspend {
- mux {
- pins = "gpio85", "gpio86", "gpio87";
- };
-
- config {
- pins = "gpio85", "gpio86", "gpio87";
- };
- };
- };
-};
-
diff --git a/arch/arm64/boot/dts/qcom/sdm632-pm8004-cdp-s2.dts b/arch/arm64/boot/dts/qcom/sdm632-pm8004-cdp-s2.dts
index 4d68901..e0e6b4b 100644
--- a/arch/arm64/boot/dts/qcom/sdm632-pm8004-cdp-s2.dts
+++ b/arch/arm64/boot/dts/qcom/sdm632-pm8004-cdp-s2.dts
@@ -25,34 +25,3 @@
qcom,pmic-id = <0x010016 0x25 0xC 0x0>;
};
-
-&soc {
- gpio_keys {
- /delete-node/home;
- };
-};
-
-&tlmm {
- tlmm_gpio_key {
- gpio_key_active: gpio_key_active {
- mux {
- pins = "gpio85", "gpio86", "gpio87";
- };
-
- config {
- pins = "gpio85", "gpio86", "gpio87";
- };
- };
-
- gpio_key_suspend: gpio_key_suspend {
- mux {
- pins = "gpio85", "gpio86", "gpio87";
- };
-
- config {
- pins = "gpio85", "gpio86", "gpio87";
- };
- };
- };
-};
-
diff --git a/arch/arm64/boot/dts/qcom/sdm632-pm8004-ext-codec-cdp-s3.dts b/arch/arm64/boot/dts/qcom/sdm632-pm8004-ext-codec-cdp-s3.dts
index 6ca2940..413e85f 100644
--- a/arch/arm64/boot/dts/qcom/sdm632-pm8004-ext-codec-cdp-s3.dts
+++ b/arch/arm64/boot/dts/qcom/sdm632-pm8004-ext-codec-cdp-s3.dts
@@ -25,34 +25,3 @@
qcom,pmic-id = <0x010016 0x25 0xC 0x0>;
};
-
-&soc {
- gpio_keys {
- /delete-node/home;
- };
-};
-
-&tlmm {
- tlmm_gpio_key {
- gpio_key_active: gpio_key_active {
- mux {
- pins = "gpio85", "gpio86", "gpio87";
- };
-
- config {
- pins = "gpio85", "gpio86", "gpio87";
- };
- };
-
- gpio_key_suspend: gpio_key_suspend {
- mux {
- pins = "gpio85", "gpio86", "gpio87";
- };
-
- config {
- pins = "gpio85", "gpio86", "gpio87";
- };
- };
- };
-};
-
diff --git a/arch/arm64/boot/dts/qcom/sdm632-rcm.dts b/arch/arm64/boot/dts/qcom/sdm632-rcm.dts
index fe7ab38..68f0ea0 100644
--- a/arch/arm64/boot/dts/qcom/sdm632-rcm.dts
+++ b/arch/arm64/boot/dts/qcom/sdm632-rcm.dts
@@ -24,32 +24,3 @@
qcom,pmic-id = <0x010016 0x010011 0x0 0x0>;
};
-&soc {
- gpio_keys {
- /delete-node/home;
- };
-};
-
-&tlmm {
- tlmm_gpio_key {
- gpio_key_active: gpio_key_active {
- mux {
- pins = "gpio85", "gpio86", "gpio87";
- };
-
- config {
- pins = "gpio85", "gpio86", "gpio87";
- };
- };
-
- gpio_key_suspend: gpio_key_suspend {
- mux {
- pins = "gpio85", "gpio86", "gpio87";
- };
-
- config {
- pins = "gpio85", "gpio86", "gpio87";
- };
- };
- };
-};
diff --git a/arch/arm64/boot/dts/qcom/sdm632-rcm.dtsi b/arch/arm64/boot/dts/qcom/sdm632-rcm.dtsi
index 14ba3b4..aa20680 100644
--- a/arch/arm64/boot/dts/qcom/sdm632-rcm.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm632-rcm.dtsi
@@ -13,3 +13,34 @@
#include "sdm450-pmi632-cdp-s2.dtsi"
+&soc {
+ gpio_keys {
+ home {
+ status = "disabled";
+ };
+ };
+};
+
+&tlmm {
+ tlmm_gpio_key {
+ gpio_key_active {
+ mux {
+ pins = "gpio85", "gpio86", "gpio87";
+ };
+
+ config {
+ pins = "gpio85", "gpio86", "gpio87";
+ };
+ };
+
+ gpio_key_suspend {
+ mux {
+ pins = "gpio85", "gpio86", "gpio87";
+ };
+
+ config {
+ pins = "gpio85", "gpio86", "gpio87";
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index e9a913f..6c71212 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -776,6 +776,7 @@
<0x17a60000 0x100000>; /* GICR * 8 */
interrupts = <1 9 4>;
interrupt-parent = <&intc>;
+ ignored-save-restore-irqs = <38>;
};
pdc: interrupt-controller@b220000{
diff --git a/arch/arm64/configs/msm8937-perf_defconfig b/arch/arm64/configs/msm8937-perf_defconfig
index 11eb070..48d2bec 100644
--- a/arch/arm64/configs/msm8937-perf_defconfig
+++ b/arch/arm64/configs/msm8937-perf_defconfig
@@ -307,6 +307,10 @@
# CONFIG_INPUT_MOUSE is not set
CONFIG_INPUT_JOYSTICK=y
CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_v26=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_RMI_DEV_v26=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_v26=y
+CONFIG_SECURE_TOUCH_SYNAPTICS_DSX_V26=y
CONFIG_INPUT_MISC=y
CONFIG_INPUT_HBTP_INPUT=y
CONFIG_INPUT_QPNP_POWER_ON=y
diff --git a/arch/arm64/configs/msm8937_defconfig b/arch/arm64/configs/msm8937_defconfig
index 3b73b06c..acb4a25 100644
--- a/arch/arm64/configs/msm8937_defconfig
+++ b/arch/arm64/configs/msm8937_defconfig
@@ -313,6 +313,10 @@
# CONFIG_INPUT_MOUSE is not set
CONFIG_INPUT_JOYSTICK=y
CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_v26=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_RMI_DEV_v26=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_v26=y
+CONFIG_SECURE_TOUCH_SYNAPTICS_DSX_V26=y
CONFIG_INPUT_MISC=y
CONFIG_INPUT_HBTP_INPUT=y
CONFIG_INPUT_QPNP_POWER_ON=y
diff --git a/arch/arm64/configs/msm8953-perf_defconfig b/arch/arm64/configs/msm8953-perf_defconfig
index fe7fdc7..cfbfdca 100644
--- a/arch/arm64/configs/msm8953-perf_defconfig
+++ b/arch/arm64/configs/msm8953-perf_defconfig
@@ -582,6 +582,7 @@
CONFIG_WCNSS_CORE=y
CONFIG_WCNSS_CORE_PRONTO=y
CONFIG_WCNSS_REGISTER_DUMP_ON_BITE=y
+CONFIG_BIG_CLUSTER_MIN_FREQ_ADJUST=y
CONFIG_QCOM_BIMC_BWMON=y
CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
CONFIG_DEVFREQ_SIMPLE_DEV=y
diff --git a/arch/arm64/configs/msm8953_defconfig b/arch/arm64/configs/msm8953_defconfig
index 5ace927..d006f30 100644
--- a/arch/arm64/configs/msm8953_defconfig
+++ b/arch/arm64/configs/msm8953_defconfig
@@ -602,6 +602,7 @@
CONFIG_WCNSS_CORE=y
CONFIG_WCNSS_CORE_PRONTO=y
CONFIG_WCNSS_REGISTER_DUMP_ON_BITE=y
+CONFIG_BIG_CLUSTER_MIN_FREQ_ADJUST=y
CONFIG_QCOM_BIMC_BWMON=y
CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
CONFIG_DEVFREQ_SIMPLE_DEV=y
diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h
index c4cc771..f8b5c48 100644
--- a/arch/arm64/include/asm/arch_gicv3.h
+++ b/arch/arm64/include/asm/arch_gicv3.h
@@ -228,6 +228,7 @@
}
#define gic_read_typer(c) readq_relaxed_no_log(c)
+#define gic_read_irouter(c) readq_relaxed_no_log(c)
#define gic_write_irouter(v, c) writeq_relaxed_no_log(v, c)
#endif /* __ASSEMBLY__ */
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index c9a2ab4..f035ff6 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -32,11 +32,16 @@
void *module_alloc(unsigned long size)
{
+ gfp_t gfp_mask = GFP_KERNEL;
void *p;
+ /* Silence the initial allocation */
+ if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS))
+ gfp_mask |= __GFP_NOWARN;
+
p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
module_alloc_base + MODULES_VSIZE,
- GFP_KERNEL, PAGE_KERNEL_EXEC, 0,
+ gfp_mask, PAGE_KERNEL_EXEC, 0,
NUMA_NO_NODE, __builtin_return_address(0));
if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
diff --git a/drivers/base/core.c b/drivers/base/core.c
index ac43d6f..b20d016 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -877,7 +877,7 @@
int error;
if (of_node) {
- error = sysfs_create_link(&dev->kobj, &of_node->kobj,"of_node");
+ error = sysfs_create_link(&dev->kobj, of_node_kobj(of_node), "of_node");
if (error)
dev_warn(dev, "Error %d creating of_node link\n",error);
/* An error here doesn't warrant bringing down the device */
diff --git a/drivers/char/diag/diag_dci.c b/drivers/char/diag/diag_dci.c
index 286418f..a089e7c 100644
--- a/drivers/char/diag/diag_dci.c
+++ b/drivers/char/diag/diag_dci.c
@@ -2713,7 +2713,7 @@
}
}
-static int diag_dci_init_remote(void)
+int diag_dci_init_remote(void)
{
int i;
struct dci_ops_tbl_t *temp = NULL;
@@ -2740,11 +2740,6 @@
return 0;
}
-#else
-static int diag_dci_init_remote(void)
-{
- return 0;
-}
#endif
static int diag_dci_init_ops_tbl(void)
@@ -2754,10 +2749,6 @@
err = diag_dci_init_local();
if (err)
goto err;
- err = diag_dci_init_remote();
- if (err)
- goto err;
-
return 0;
err:
diff --git a/drivers/char/diag/diag_dci.h b/drivers/char/diag/diag_dci.h
index 61eb3f5..2fb0e3f 100644
--- a/drivers/char/diag/diag_dci.h
+++ b/drivers/char/diag/diag_dci.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2016, 2018 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -323,6 +323,7 @@
int diag_dci_write_bridge(int token, unsigned char *buf, int len);
int diag_dci_write_done_bridge(int index, unsigned char *buf, int len);
int diag_dci_send_handshake_pkt(int index);
+int diag_dci_init_remote(void);
#endif
#endif
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index 54a4d98..a169230 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -970,6 +970,8 @@
diagmem_setsize(POOL_TYPE_QSC_MUX, itemsize_qsc_usb,
poolsize_qsc_usb);
diag_md_mdm_init();
+ if (diag_dci_init_remote())
+ return -ENOMEM;
driver->hdlc_encode_buf = kzalloc(DIAG_MAX_HDLC_BUF_SIZE, GFP_KERNEL);
if (!driver->hdlc_encode_buf)
return -ENOMEM;
diff --git a/drivers/clk/msm/mdss/mdss-dsi-pll-12nm-util.c b/drivers/clk/msm/mdss/mdss-dsi-pll-12nm-util.c
index f2ed36c..0bdfeeb 100644
--- a/drivers/clk/msm/mdss/mdss-dsi-pll-12nm-util.c
+++ b/drivers/clk/msm/mdss/mdss-dsi-pll-12nm-util.c
@@ -165,7 +165,8 @@
return sel;
}
-static bool pll_is_pll_locked_12nm(struct mdss_pll_resources *pll)
+static bool pll_is_pll_locked_12nm(struct mdss_pll_resources *pll,
+ bool is_handoff)
{
u32 status;
bool pll_locked;
@@ -177,8 +178,9 @@
((status & BIT(1)) > 0),
DSI_PLL_POLL_MAX_READS,
DSI_PLL_POLL_TIMEOUT_US)) {
- pr_err("DSI PLL ndx=%d status=%x failed to Lock\n",
- pll->index, status);
+ if (!is_handoff)
+ pr_err("DSI PLL ndx=%d status=%x failed to Lock\n",
+ pll->index, status);
pll_locked = false;
} else {
pll_locked = true;
@@ -213,7 +215,7 @@
wmb(); /* make sure register committed before enabling branch clocks */
udelay(50); /* h/w recommended delay */
- if (!pll_is_pll_locked_12nm(pll)) {
+ if (!pll_is_pll_locked_12nm(pll, false)) {
pr_err("DSI PLL ndx=%d lock failed!\n",
pll->index);
rc = -EINVAL;
@@ -261,7 +263,7 @@
MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_POWERUP_CTRL, data);
ndelay(500); /* h/w recommended delay */
- if (!pll_is_pll_locked_12nm(pll)) {
+ if (!pll_is_pll_locked_12nm(pll, false)) {
pr_err("DSI PLL ndx=%d lock failed!\n",
pll->index);
rc = -EINVAL;
@@ -556,6 +558,142 @@
param->gmp_cntrl = 0x1;
}
+static u32 __mdss_dsi_get_multi_intX100(u64 vco_rate, u32 *rem)
+{
+ u32 reminder = 0;
+ u64 temp = 0;
+ const u32 ref_clk_rate = 19200000, quarterX100 = 25;
+
+ temp = div_u64_rem(vco_rate, ref_clk_rate, &reminder);
+ temp *= 100;
+
+ /*
+ * Multiplication integer needs to be floored in steps of 0.25
+ * Hence multi_intX100 needs to be rounded off in steps of 25
+ */
+ if (reminder < (ref_clk_rate / 4)) {
+ *rem = reminder;
+ return temp;
+ } else if ((reminder >= (ref_clk_rate / 4)) &&
+ reminder < (ref_clk_rate / 2)) {
+ *rem = (reminder - (ref_clk_rate / 4));
+ return (temp + quarterX100);
+ } else if ((reminder >= (ref_clk_rate / 2)) &&
+ (reminder < ((3 * ref_clk_rate) / 4))) {
+ *rem = (reminder - (ref_clk_rate / 2));
+ return (temp + (quarterX100 * 2));
+ }
+
+ *rem = (reminder - ((3 * ref_clk_rate) / 4));
+ return (temp + (quarterX100 * 3));
+}
+
+static u32 __calc_gcd(u32 num1, u32 num2)
+{
+ if (num2 != 0)
+ return __calc_gcd(num2, (num1 % num2));
+ else
+ return num1;
+}
+
+static void mdss_dsi_pll_12nm_calc_ssc(struct mdss_pll_resources *pll,
+ struct dsi_pll_db *pdb)
+{
+ struct dsi_pll_param *param = &pdb->param;
+ u64 multi_intX100 = 0, temp = 0;
+ u32 temp_rem1 = 0, temp_rem2 = 0;
+ const u64 power_2_17 = 131072, power_2_10 = 1024;
+ const u32 ref_clk_rate = 19200000;
+
+ multi_intX100 = __mdss_dsi_get_multi_intX100(pll->vco_current_rate,
+ &temp_rem1);
+
+ /* Calculation for mpll_ssc_peak_i */
+ temp = (multi_intX100 * pll->ssc_ppm * power_2_17);
+ temp = div_u64(temp, 100); /* 100 div for multi_intX100 */
+ param->mpll_ssc_peak_i =
+ (u32) div_u64(temp, 1000000); /*10^6 for SSC PPM */
+
+ /* Calculation for mpll_stepsize_i */
+ param->mpll_stepsize_i = (u32) div_u64((param->mpll_ssc_peak_i *
+ pll->ssc_freq * power_2_10), ref_clk_rate);
+
+ /* Calculation for mpll_mint_i */
+ param->mpll_mint_i = (u32) (div_u64((multi_intX100 * 4), 100) - 32);
+
+ /* Calculation for mpll_frac_den */
+ param->mpll_frac_den = (u32) div_u64(ref_clk_rate,
+ __calc_gcd((u32)pll->vco_current_rate, ref_clk_rate));
+
+ /* Calculation for mpll_frac_quot_i */
+ temp = (temp_rem1 * power_2_17);
+ param->mpll_frac_quot_i =
+ (u32) div_u64_rem(temp, ref_clk_rate, &temp_rem2);
+
+ /* Calculation for mpll_frac_rem */
+ param->mpll_frac_rem = (u32) div_u64(((u64)temp_rem2 *
+ param->mpll_frac_den), ref_clk_rate);
+
+ pr_debug("mpll_ssc_peak_i=%d mpll_stepsize_i=%d mpll_mint_i=%d\n",
+ param->mpll_ssc_peak_i, param->mpll_stepsize_i,
+ param->mpll_mint_i);
+ pr_debug("mpll_frac_den=%d mpll_frac_quot_i=%d mpll_frac_rem=%d",
+ param->mpll_frac_den, param->mpll_frac_quot_i,
+ param->mpll_frac_rem);
+}
+
+static void pll_db_commit_12nm_ssc(struct mdss_pll_resources *pll,
+ struct dsi_pll_db *pdb)
+{
+ void __iomem *pll_base = pll->pll_base;
+ struct dsi_pll_param *param = &pdb->param;
+ char data = 0;
+
+ MDSS_PLL_REG_W(pll_base, DSIPHY_SSC0, 0x27);
+
+ data = (param->mpll_mint_i & 0xff);
+ MDSS_PLL_REG_W(pll_base, DSIPHY_SSC7, data);
+
+ data = ((param->mpll_mint_i & 0xff00) >> 8);
+ MDSS_PLL_REG_W(pll_base, DSIPHY_SSC8, data);
+
+ data = (param->mpll_ssc_peak_i & 0xff);
+ MDSS_PLL_REG_W(pll_base, DSIPHY_SSC1, data);
+
+ data = ((param->mpll_ssc_peak_i & 0xff00) >> 8);
+ MDSS_PLL_REG_W(pll_base, DSIPHY_SSC2, data);
+
+ data = ((param->mpll_ssc_peak_i & 0xf0000) >> 16);
+ MDSS_PLL_REG_W(pll_base, DSIPHY_SSC3, data);
+
+ data = (param->mpll_stepsize_i & 0xff);
+ MDSS_PLL_REG_W(pll_base, DSIPHY_SSC4, data);
+
+ data = ((param->mpll_stepsize_i & 0xff00) >> 8);
+ MDSS_PLL_REG_W(pll_base, DSIPHY_SSC5, data);
+
+ data = ((param->mpll_stepsize_i & 0x1f0000) >> 16);
+ MDSS_PLL_REG_W(pll_base, DSIPHY_SSC6, data);
+
+ data = (param->mpll_frac_quot_i & 0xff);
+ MDSS_PLL_REG_W(pll_base, DSIPHY_SSC10, data);
+
+ data = ((param->mpll_frac_quot_i & 0xff00) >> 8);
+ MDSS_PLL_REG_W(pll_base, DSIPHY_SSC11, data);
+
+ data = (param->mpll_frac_rem & 0xff);
+ MDSS_PLL_REG_W(pll_base, DSIPHY_SSC12, data);
+
+ data = ((param->mpll_frac_rem & 0xff00) >> 8);
+ MDSS_PLL_REG_W(pll_base, DSIPHY_SSC13, data);
+
+ data = (param->mpll_frac_den & 0xff);
+ MDSS_PLL_REG_W(pll_base, DSIPHY_SSC14, data);
+
+ data = ((param->mpll_frac_den & 0xff00) >> 8);
+ MDSS_PLL_REG_W(pll_base, DSIPHY_SSC15, data);
+}
+
static void pll_db_commit_12nm(struct mdss_pll_resources *pll,
struct dsi_pll_db *pdb)
{
@@ -616,6 +754,9 @@
MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PRO_DLY_RELOCK, 0x0c);
MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_LOCK_DET_MODE_SEL, 0x02);
+ if (pll->ssc_en)
+ pll_db_commit_12nm_ssc(pll, pdb);
+
pr_debug("pll:%d\n", pll->index);
wmb(); /* make sure register committed before preparing the clocks */
}
@@ -710,7 +851,7 @@
return ret;
}
- if (pll_is_pll_locked_12nm(pll)) {
+ if (pll_is_pll_locked_12nm(pll, true)) {
pll->handoff_resources = true;
pll->pll_on = true;
c->rate = pll_vco_get_rate_12nm(c);
@@ -756,18 +897,27 @@
rc, pll->index);
goto error;
}
+ }
- data = MDSS_PLL_REG_R(pll->pll_base, DSIPHY_SYS_CTRL);
- if (data & BIT(7)) { /* DSI PHY in LP-11 or ULPS */
- rc = dsi_pll_relock(pll);
- if (rc)
- goto error;
- else
- goto end;
- }
+ /*
+ * For cases where DSI PHY is already enabled like:
+ * 1.) LP-11 during static screen
+ * 2.) ULPS during static screen
+ * 3.) Boot up with cont splash enabled where PHY is programmed in LK
+ * Execute the Re-lock sequence to enable the DSI PLL.
+ */
+ data = MDSS_PLL_REG_R(pll->pll_base, DSIPHY_SYS_CTRL);
+ if (data & BIT(7)) {
+ rc = dsi_pll_relock(pll);
+ if (rc)
+ goto error;
+ else
+ goto end;
}
mdss_dsi_pll_12nm_calc_reg(pll, pdb);
+ if (pll->ssc_en)
+ mdss_dsi_pll_12nm_calc_ssc(pll, pdb);
/* commit DSI vco */
pll_db_commit_12nm(pll, pdb);
@@ -802,6 +952,7 @@
{
struct dsi_pll_vco_clk *vco = to_vco_clk(c);
struct mdss_pll_resources *pll = vco->priv;
+ u32 data = 0;
if (!pll) {
pr_err("Dsi pll resources are not available\n");
@@ -813,7 +964,9 @@
return -EINVAL;
}
- MDSS_PLL_REG_W(pll->pll_base, DSIPHY_SSC0, 0x40);
+ data = MDSS_PLL_REG_R(pll->pll_base, DSIPHY_SSC0);
+ data |= BIT(6); /* enable GP_CLK_EN */
+ MDSS_PLL_REG_W(pll->pll_base, DSIPHY_SSC0, data);
wmb(); /* make sure register committed before enabling branch clocks */
return 0;
diff --git a/drivers/clk/msm/mdss/mdss-dsi-pll-12nm.c b/drivers/clk/msm/mdss/mdss-dsi-pll-12nm.c
index 210742b..5d2fa9a 100644
--- a/drivers/clk/msm/mdss/mdss-dsi-pll-12nm.c
+++ b/drivers/clk/msm/mdss/mdss-dsi-pll-12nm.c
@@ -636,11 +636,13 @@
};
static struct clk_lookup mdss_dsi_pllcc_12nm[] = {
+ CLK_LIST(dsi0pll_vco_clk),
CLK_LIST(dsi0pll_byte_clk_src),
CLK_LIST(dsi0pll_pixel_clk_src),
};
static struct clk_lookup mdss_dsi_pllcc_12nm_1[] = {
+ CLK_LIST(dsi1pll_vco_clk),
CLK_LIST(dsi1pll_byte_clk_src),
CLK_LIST(dsi1pll_pixel_clk_src),
};
@@ -650,6 +652,9 @@
{
int rc = 0, ndx;
struct dsi_pll_db *pdb;
+ int const ssc_freq_min = 30000; /* min. recommended freq. value */
+ int const ssc_freq_max = 33000; /* max. recommended freq. value */
+ int const ssc_ppm_max = 5000; /* max. recommended ppm */
if (!pdev || !pdev->dev.of_node) {
pr_err("Invalid input parameters\n");
@@ -680,6 +685,21 @@
pixel_div_clk_src_ops = clk_ops_div;
pixel_div_clk_src_ops.prepare = dsi_pll_div_prepare;
+ if (pll_res->ssc_en) {
+ if (!pll_res->ssc_freq || (pll_res->ssc_freq < ssc_freq_min) ||
+ (pll_res->ssc_freq > ssc_freq_max)) {
+ pll_res->ssc_freq = ssc_freq_min;
+ pr_debug("SSC frequency out of recommended range. Set to default=%d\n",
+ pll_res->ssc_freq);
+ }
+
+ if (!pll_res->ssc_ppm || (pll_res->ssc_ppm > ssc_ppm_max)) {
+ pll_res->ssc_ppm = ssc_ppm_max;
+ pr_debug("SSC PPM out of recommended range. Set to default=%d\n",
+ pll_res->ssc_ppm);
+ }
+ }
+
/* Set client data to mux, div and vco clocks. */
if (pll_res->index == DSI_PLL_1) {
dsi1pll_byte_clk_src.priv = pll_res;
diff --git a/drivers/clk/msm/mdss/mdss-dsi-pll-12nm.h b/drivers/clk/msm/mdss/mdss-dsi-pll-12nm.h
index 6912ff4..0974717 100644
--- a/drivers/clk/msm/mdss/mdss-dsi-pll-12nm.h
+++ b/drivers/clk/msm/mdss/mdss-dsi-pll-12nm.h
@@ -37,6 +37,22 @@
#define DSIPHY_PLL_LOOP_DIV_RATIO_1 0x2e8
#define DSIPHY_SLEWRATE_DDL_CYC_FRQ_ADJ_1 0x328
#define DSIPHY_SSC0 0x394
+#define DSIPHY_SSC7 0x3b0
+#define DSIPHY_SSC8 0x3b4
+#define DSIPHY_SSC1 0x398
+#define DSIPHY_SSC2 0x39c
+#define DSIPHY_SSC3 0x3a0
+#define DSIPHY_SSC4 0x3a4
+#define DSIPHY_SSC5 0x3a8
+#define DSIPHY_SSC6 0x3ac
+#define DSIPHY_SSC10 0x360
+#define DSIPHY_SSC11 0x364
+#define DSIPHY_SSC12 0x368
+#define DSIPHY_SSC13 0x36c
+#define DSIPHY_SSC14 0x370
+#define DSIPHY_SSC15 0x374
+#define DSIPHY_SSC7 0x3b0
+#define DSIPHY_SSC8 0x3b4
#define DSIPHY_SSC9 0x3b8
#define DSIPHY_STAT0 0x3e0
#define DSIPHY_CTRL0 0x3e8
@@ -58,6 +74,14 @@
u32 post_div_mux;
u32 pixel_divhf;
u32 fsm_ovr_ctrl;
+
+ /* ssc_params */
+ u32 mpll_ssc_peak_i;
+ u32 mpll_stepsize_i;
+ u32 mpll_mint_i;
+ u32 mpll_frac_den;
+ u32 mpll_frac_quot_i;
+ u32 mpll_frac_rem;
};
enum {
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile
index cb3c48a..99ad22e 100644
--- a/drivers/cpuidle/Makefile
+++ b/drivers/cpuidle/Makefile
@@ -27,4 +27,8 @@
# POWERPC drivers
obj-$(CONFIG_PSERIES_CPUIDLE) += cpuidle-pseries.o
obj-$(CONFIG_POWERNV_CPUIDLE) += cpuidle-powernv.o
+ifeq ($(CONFIG_MSM_PM_LEGACY), y)
+obj-y += lpm-levels-legacy.o lpm-levels-of-legacy.o lpm-workarounds.o
+else
obj-$(CONFIG_MSM_PM) += lpm-levels.o lpm-levels-of.o
+endif
diff --git a/drivers/cpuidle/lpm-levels-legacy.c b/drivers/cpuidle/lpm-levels-legacy.c
new file mode 100644
index 0000000..17e4286
--- /dev/null
+++ b/drivers/cpuidle/lpm-levels-legacy.c
@@ -0,0 +1,1523 @@
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/cpu.h>
+#include <linux/of.h>
+#include <linux/hrtimer.h>
+#include <linux/ktime.h>
+#include <linux/tick.h>
+#include <linux/suspend.h>
+#include <linux/pm_qos.h>
+#include <linux/of_platform.h>
+#include <linux/smp.h>
+#include <linux/remote_spinlock.h>
+#include <linux/msm_remote_spinlock.h>
+#include <linux/dma-mapping.h>
+#include <linux/coresight-cti.h>
+#include <linux/moduleparam.h>
+#include <linux/sched.h>
+#include <linux/cpu_pm.h>
+#include <soc/qcom/spm.h>
+#include <soc/qcom/pm-legacy.h>
+#include <soc/qcom/rpm-notifier.h>
+#include <soc/qcom/event_timer.h>
+#include <soc/qcom/lpm-stats.h>
+#include <soc/qcom/lpm_levels.h>
+#include <soc/qcom/jtag.h>
+#include <asm/cputype.h>
+#include <asm/arch_timer.h>
+#include <asm/cacheflush.h>
+#include <asm/suspend.h>
+#include "lpm-levels-legacy.h"
+#include "lpm-workarounds.h"
+#include <trace/events/power.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/trace_msm_low_power.h>
+#if defined(CONFIG_COMMON_CLK)
+#include "../clk/clk.h"
+#elif defined(CONFIG_COMMON_CLK_MSM)
+#include "../../drivers/clk/msm/clock.h"
+#endif /* CONFIG_COMMON_CLK */
+#include <soc/qcom/minidump.h>
+
+#define SCLK_HZ (32768)
+#define SCM_HANDOFF_LOCK_ID "S:7"
+#define PSCI_POWER_STATE(reset) (reset << 30)
+#define PSCI_AFFINITY_LEVEL(lvl) ((lvl & 0x3) << 24)
+static remote_spinlock_t scm_handoff_lock;
+
+enum {
+ MSM_LPM_LVL_DBG_SUSPEND_LIMITS = BIT(0),
+ MSM_LPM_LVL_DBG_IDLE_LIMITS = BIT(1),
+};
+
+enum debug_event {
+ CPU_ENTER,
+ CPU_EXIT,
+ CLUSTER_ENTER,
+ CLUSTER_EXIT,
+ PRE_PC_CB,
+ CPU_HP_STARTING,
+ CPU_HP_DYING,
+};
+
+struct lpm_debug {
+ cycle_t time;
+ enum debug_event evt;
+ int cpu;
+ uint32_t arg1;
+ uint32_t arg2;
+ uint32_t arg3;
+ uint32_t arg4;
+};
+
+static struct system_pm_ops *sys_pm_ops;
+struct lpm_cluster *lpm_root_node;
+
+static DEFINE_PER_CPU(struct lpm_cluster*, cpu_cluster);
+static bool suspend_in_progress;
+static struct hrtimer lpm_hrtimer;
+static struct lpm_debug *lpm_debug;
+static phys_addr_t lpm_debug_phys;
+
+static const int num_dbg_elements = 0x100;
+
+static void cluster_unprepare(struct lpm_cluster *cluster,
+ const struct cpumask *cpu, int child_idx, bool from_idle,
+ int64_t time);
+static void cluster_prepare(struct lpm_cluster *cluster,
+ const struct cpumask *cpu, int child_idx, bool from_idle,
+ int64_t time);
+
+static bool menu_select;
+module_param_named(
+ menu_select, menu_select, bool, 0664
+);
+
+static bool print_parsed_dt;
+module_param_named(
+ print_parsed_dt, print_parsed_dt, bool, 0664
+);
+
+static bool sleep_disabled;
+module_param_named(sleep_disabled,
+ sleep_disabled, bool, 0664);
+
+s32 msm_cpuidle_get_deep_idle_latency(void)
+{
+ return 10;
+}
+EXPORT_SYMBOL(msm_cpuidle_get_deep_idle_latency);
+
+uint32_t register_system_pm_ops(struct system_pm_ops *pm_ops)
+{
+ if (sys_pm_ops)
+ return -EUSERS;
+
+ sys_pm_ops = pm_ops;
+
+ return 0;
+}
+
+static uint32_t least_cluster_latency(struct lpm_cluster *cluster,
+ struct latency_level *lat_level)
+{
+ struct list_head *list;
+ struct lpm_cluster_level *level;
+ struct lpm_cluster *n;
+ struct power_params *pwr_params;
+ uint32_t latency = 0;
+ int i;
+
+ if (!cluster->list.next) {
+ for (i = 0; i < cluster->nlevels; i++) {
+ level = &cluster->levels[i];
+ pwr_params = &level->pwr;
+ if (lat_level->reset_level == level->reset_level) {
+ if ((latency > pwr_params->latency_us)
+ || (!latency))
+ latency = pwr_params->latency_us;
+ break;
+ }
+ }
+ } else {
+ list_for_each(list, &cluster->parent->child) {
+ n = list_entry(list, typeof(*n), list);
+ if (lat_level->level_name) {
+ if (strcmp(lat_level->level_name,
+ n->cluster_name))
+ continue;
+ }
+ for (i = 0; i < n->nlevels; i++) {
+ level = &n->levels[i];
+ pwr_params = &level->pwr;
+ if (lat_level->reset_level ==
+ level->reset_level) {
+ if ((latency > pwr_params->latency_us)
+ || (!latency))
+ latency =
+ pwr_params->latency_us;
+ break;
+ }
+ }
+ }
+ }
+ return latency;
+}
+
+static uint32_t least_cpu_latency(struct list_head *child,
+ struct latency_level *lat_level)
+{
+ struct list_head *list;
+ struct lpm_cpu_level *level;
+ struct power_params *pwr_params;
+ struct lpm_cpu *cpu;
+ struct lpm_cluster *n;
+ uint32_t latency = 0;
+ int i;
+
+ list_for_each(list, child) {
+ n = list_entry(list, typeof(*n), list);
+ if (lat_level->level_name) {
+ if (strcmp(lat_level->level_name, n->cluster_name))
+ continue;
+ }
+ cpu = n->cpu;
+ for (i = 0; i < cpu->nlevels; i++) {
+ level = &cpu->levels[i];
+ pwr_params = &level->pwr;
+ if (lat_level->reset_level == level->reset_level) {
+ if ((latency > pwr_params->latency_us)
+ || (!latency))
+ latency = pwr_params->latency_us;
+ break;
+ }
+ }
+ }
+ return latency;
+}
+
+static struct lpm_cluster *cluster_aff_match(struct lpm_cluster *cluster,
+ int affinity_level)
+{
+ struct lpm_cluster *n;
+
+ if ((cluster->aff_level == affinity_level)
+ || ((cluster->cpu) && (affinity_level == 0)))
+ return cluster;
+ else if (!cluster->cpu) {
+ n = list_entry(cluster->child.next, typeof(*n), list);
+ return cluster_aff_match(n, affinity_level);
+ } else
+ return NULL;
+}
+
+int lpm_get_latency(struct latency_level *level, uint32_t *latency)
+{
+ struct lpm_cluster *cluster;
+ uint32_t val;
+
+ if (!lpm_root_node) {
+ pr_err("%s: lpm_probe not completed\n", __func__);
+ return -EAGAIN;
+ }
+
+ if ((level->affinity_level < 0)
+ || (level->affinity_level > lpm_root_node->aff_level)
+ || (level->reset_level < LPM_RESET_LVL_RET)
+ || (level->reset_level > LPM_RESET_LVL_PC)
+ || !latency)
+ return -EINVAL;
+
+ cluster = cluster_aff_match(lpm_root_node, level->affinity_level);
+ if (!cluster) {
+ pr_err("%s:No matching cluster found for affinity_level:%d\n",
+ __func__, level->affinity_level);
+ return -EINVAL;
+ }
+
+ if (level->affinity_level == 0)
+ val = least_cpu_latency(&cluster->parent->child, level);
+ else
+ val = least_cluster_latency(cluster, level);
+
+ if (!val) {
+ pr_err("%s:No mode with affinity_level:%d reset_level:%d\n",
+ __func__, level->affinity_level, level->reset_level);
+ return -EINVAL;
+ }
+
+ *latency = val;
+
+ return 0;
+}
+EXPORT_SYMBOL(lpm_get_latency);
+
+static void update_debug_pc_event(enum debug_event event, uint32_t arg1,
+ uint32_t arg2, uint32_t arg3, uint32_t arg4)
+{
+ struct lpm_debug *dbg;
+ int idx;
+ static DEFINE_SPINLOCK(debug_lock);
+ static int pc_event_index;
+
+ if (!lpm_debug)
+ return;
+
+ spin_lock(&debug_lock);
+ idx = pc_event_index++;
+ dbg = &lpm_debug[idx & (num_dbg_elements - 1)];
+
+ dbg->evt = event;
+ dbg->time = arch_counter_get_cntpct();
+ dbg->cpu = raw_smp_processor_id();
+ dbg->arg1 = arg1;
+ dbg->arg2 = arg2;
+ dbg->arg3 = arg3;
+ dbg->arg4 = arg4;
+ spin_unlock(&debug_lock);
+}
+
+static enum hrtimer_restart lpm_hrtimer_cb(struct hrtimer *h)
+{
+ return HRTIMER_NORESTART;
+}
+
+static void msm_pm_set_timer(uint32_t modified_time_us)
+{
+ u64 modified_time_ns = modified_time_us * NSEC_PER_USEC;
+ ktime_t modified_ktime = ns_to_ktime(modified_time_ns);
+
+ lpm_hrtimer.function = lpm_hrtimer_cb;
+ hrtimer_start(&lpm_hrtimer, modified_ktime, HRTIMER_MODE_REL_PINNED);
+}
+
+int set_l2_mode(struct low_power_ops *ops, int mode,
+ struct lpm_cluster_level *level)
+{
+ int lpm = mode;
+ int rc = 0;
+ bool notify_rpm = level->notify_rpm;
+ struct low_power_ops *cpu_ops = per_cpu(cpu_cluster,
+ smp_processor_id())->lpm_dev;
+
+ if (cpu_ops->tz_flag & MSM_SCM_L2_OFF ||
+ cpu_ops->tz_flag & MSM_SCM_L2_GDHS)
+ coresight_cti_ctx_restore();
+
+ switch (mode) {
+ case MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE:
+ case MSM_SPM_MODE_POWER_COLLAPSE:
+ case MSM_SPM_MODE_FASTPC:
+ if (level->no_cache_flush)
+ cpu_ops->tz_flag = MSM_SCM_L2_GDHS;
+ else
+ cpu_ops->tz_flag = MSM_SCM_L2_OFF;
+ coresight_cti_ctx_save();
+ break;
+ case MSM_SPM_MODE_GDHS:
+ cpu_ops->tz_flag = MSM_SCM_L2_GDHS;
+ coresight_cti_ctx_save();
+ break;
+ case MSM_SPM_MODE_CLOCK_GATING:
+ case MSM_SPM_MODE_RETENTION:
+ case MSM_SPM_MODE_DISABLED:
+ cpu_ops->tz_flag = MSM_SCM_L2_ON;
+ break;
+ default:
+ cpu_ops->tz_flag = MSM_SCM_L2_ON;
+ lpm = MSM_SPM_MODE_DISABLED;
+ break;
+ }
+
+ rc = msm_spm_config_low_power_mode(ops->spm, lpm, notify_rpm);
+
+ if (rc)
+ pr_err("%s: Failed to set L2 low power mode %d, ERR %d",
+ __func__, lpm, rc);
+
+ return rc;
+}
+
+int set_l3_mode(struct low_power_ops *ops, int mode,
+ struct lpm_cluster_level *level)
+{
+ bool notify_rpm = level->notify_rpm;
+ struct low_power_ops *cpu_ops = per_cpu(cpu_cluster,
+ smp_processor_id())->lpm_dev;
+
+ switch (mode) {
+ case MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE:
+ case MSM_SPM_MODE_POWER_COLLAPSE:
+ case MSM_SPM_MODE_FASTPC:
+ cpu_ops->tz_flag |= MSM_SCM_L3_PC_OFF;
+ break;
+ default:
+ break;
+ }
+ return msm_spm_config_low_power_mode(ops->spm, mode, notify_rpm);
+}
+
+
+int set_system_mode(struct low_power_ops *ops, int mode,
+ struct lpm_cluster_level *level)
+{
+ bool notify_rpm = level->notify_rpm;
+
+ return msm_spm_config_low_power_mode(ops->spm, mode, notify_rpm);
+}
+
+static int set_device_mode(struct lpm_cluster *cluster, int ndevice,
+ struct lpm_cluster_level *level)
+{
+ struct low_power_ops *ops;
+
+ if (use_psci)
+ return 0;
+
+ ops = &cluster->lpm_dev[ndevice];
+ if (ops && ops->set_mode)
+ return ops->set_mode(ops, level->mode[ndevice],
+ level);
+ else
+ return -EINVAL;
+}
+
+static int cpu_power_select(struct cpuidle_device *dev,
+ struct lpm_cpu *cpu)
+{
+ int best_level = 0;
+ uint32_t latency_us = pm_qos_request_for_cpu(PM_QOS_CPU_DMA_LATENCY,
+ dev->cpu);
+ s64 sleep_us = ktime_to_us(tick_nohz_get_sleep_length());
+ uint32_t modified_time_us = 0;
+ uint32_t next_event_us = 0;
+ int i;
+ uint32_t lvl_latency_us = 0;
+ uint32_t *residency = get_per_cpu_max_residency(dev->cpu);
+
+ if (!cpu)
+ return best_level;
+
+ if ((sleep_disabled && !cpu_isolated(dev->cpu)) || sleep_us < 0)
+ return 0;
+
+ next_event_us = (uint32_t)(ktime_to_us(get_next_event_time(dev->cpu)));
+
+ for (i = 0; i < cpu->nlevels; i++) {
+ struct lpm_cpu_level *level = &cpu->levels[i];
+ struct power_params *pwr_params = &level->pwr;
+ uint32_t next_wakeup_us = (uint32_t)sleep_us;
+ enum msm_pm_sleep_mode mode = level->mode;
+ bool allow;
+
+ allow = lpm_cpu_mode_allow(dev->cpu, i, true);
+
+ if (!allow)
+ continue;
+
+ lvl_latency_us = pwr_params->latency_us;
+
+ if (latency_us < lvl_latency_us)
+ break;
+
+ if (next_event_us) {
+ if (next_event_us < lvl_latency_us)
+ break;
+
+ if (((next_event_us - lvl_latency_us) < sleep_us) ||
+ (next_event_us < sleep_us))
+ next_wakeup_us = next_event_us - lvl_latency_us;
+ }
+
+ best_level = i;
+
+ if (next_event_us && next_event_us < sleep_us &&
+ (mode != MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT))
+ modified_time_us
+ = next_event_us - lvl_latency_us;
+ else
+ modified_time_us = 0;
+
+ if (next_wakeup_us <= residency[i])
+ break;
+ }
+
+ if (modified_time_us)
+ msm_pm_set_timer(modified_time_us);
+
+ trace_cpu_power_select(best_level, sleep_us, latency_us, next_event_us);
+
+ return best_level;
+}
+
+static uint64_t get_cluster_sleep_time(struct lpm_cluster *cluster,
+ struct cpumask *mask, bool from_idle)
+{
+ int cpu;
+ int next_cpu = raw_smp_processor_id();
+ ktime_t next_event;
+ struct cpumask online_cpus_in_cluster;
+
+ next_event.tv64 = KTIME_MAX;
+ if (!from_idle) {
+ if (mask)
+ cpumask_copy(mask, cpumask_of(raw_smp_processor_id()));
+ return ~0ULL;
+ }
+
+ cpumask_and(&online_cpus_in_cluster,
+ &cluster->num_children_in_sync, cpu_online_mask);
+
+ for_each_cpu(cpu, &online_cpus_in_cluster) {
+ ktime_t *next_event_c;
+
+ next_event_c = get_next_event_cpu(cpu);
+ if (next_event_c->tv64 < next_event.tv64) {
+ next_event.tv64 = next_event_c->tv64;
+ next_cpu = cpu;
+ }
+ }
+
+ if (mask)
+ cpumask_copy(mask, cpumask_of(next_cpu));
+
+
+ if (ktime_to_us(next_event) > ktime_to_us(ktime_get()))
+ return ktime_to_us(ktime_sub(next_event, ktime_get()));
+ else
+ return 0;
+}
+
+static int cluster_select(struct lpm_cluster *cluster, bool from_idle)
+{
+ int best_level = -1;
+ int i;
+ struct cpumask mask;
+ uint32_t latency_us = ~0U;
+ uint32_t sleep_us;
+
+ if (!cluster)
+ return -EINVAL;
+
+ sleep_us = (uint32_t)get_cluster_sleep_time(cluster, NULL, from_idle);
+
+ if (cpumask_and(&mask, cpu_online_mask, &cluster->child_cpus))
+ latency_us = pm_qos_request_for_cpumask(PM_QOS_CPU_DMA_LATENCY,
+ &mask);
+
+ /*
+ * If atleast one of the core in the cluster is online, the cluster
+ * low power modes should be determined by the idle characteristics
+ * even if the last core enters the low power mode as a part of
+ * hotplug.
+ */
+
+ if (!from_idle && num_online_cpus() > 1 &&
+ cpumask_intersects(&cluster->child_cpus, cpu_online_mask))
+ from_idle = true;
+
+ for (i = 0; i < cluster->nlevels; i++) {
+ struct lpm_cluster_level *level = &cluster->levels[i];
+ struct power_params *pwr_params = &level->pwr;
+
+ if (!lpm_cluster_mode_allow(cluster, i, from_idle))
+ continue;
+
+ if (level->last_core_only &&
+ cpumask_weight(cpu_online_mask) > 1)
+ continue;
+
+ if (!cpumask_equal(&cluster->num_children_in_sync,
+ &level->num_cpu_votes))
+ continue;
+
+ if (from_idle && latency_us < pwr_params->latency_us)
+ break;
+
+ if (sleep_us < pwr_params->time_overhead_us)
+ break;
+
+ if (suspend_in_progress && from_idle && level->notify_rpm)
+ continue;
+
+ if (level->notify_rpm) {
+ if (!(sys_pm_ops && sys_pm_ops->sleep_allowed))
+ continue;
+ if (!sys_pm_ops->sleep_allowed())
+ continue;
+ }
+
+ best_level = i;
+
+ if (from_idle && sleep_us <= pwr_params->max_residency)
+ break;
+ }
+
+ return best_level;
+}
+
+static void cluster_notify(struct lpm_cluster *cluster,
+ struct lpm_cluster_level *level, bool enter)
+{
+ if (level->is_reset && enter)
+ cpu_cluster_pm_enter(cluster->aff_level);
+ else if (level->is_reset && !enter)
+ cpu_cluster_pm_exit(cluster->aff_level);
+}
+
+static unsigned int get_next_online_cpu(bool from_idle)
+{
+ unsigned int cpu;
+ ktime_t next_event;
+ unsigned int next_cpu = raw_smp_processor_id();
+
+ if (!from_idle)
+ return next_cpu;
+ next_event.tv64 = KTIME_MAX;
+ for_each_online_cpu(cpu) {
+ ktime_t *next_event_c;
+
+ next_event_c = get_next_event_cpu(cpu);
+ if (next_event_c->tv64 < next_event.tv64) {
+ next_event.tv64 = next_event_c->tv64;
+ next_cpu = cpu;
+ }
+ }
+ return next_cpu;
+}
+
+static int cluster_configure(struct lpm_cluster *cluster, int idx,
+ bool from_idle)
+{
+ struct lpm_cluster_level *level = &cluster->levels[idx];
+ struct cpumask cpumask;
+ unsigned int cpu;
+ int ret, i;
+
+ if (!cpumask_equal(&cluster->num_children_in_sync, &cluster->child_cpus)
+ || is_IPI_pending(&cluster->num_children_in_sync)) {
+ return -EPERM;
+ }
+
+ if (idx != cluster->default_level) {
+ update_debug_pc_event(CLUSTER_ENTER, idx,
+ cluster->num_children_in_sync.bits[0],
+ cluster->child_cpus.bits[0], from_idle);
+ trace_cluster_enter(cluster->cluster_name, idx,
+ cluster->num_children_in_sync.bits[0],
+ cluster->child_cpus.bits[0], from_idle);
+ lpm_stats_cluster_enter(cluster->stats, idx);
+ }
+
+ for (i = 0; i < cluster->ndevices; i++) {
+ ret = set_device_mode(cluster, i, level);
+ if (ret)
+ goto failed_set_mode;
+ }
+
+ if (level->notify_rpm) {
+ struct cpumask *nextcpu;
+
+ cpu = get_next_online_cpu(from_idle);
+ cpumask_copy(&cpumask, cpumask_of(cpu));
+ nextcpu = level->disable_dynamic_routing ? NULL : &cpumask;
+
+ if (sys_pm_ops && sys_pm_ops->enter)
+ if ((sys_pm_ops->enter(nextcpu)))
+ return -EBUSY;
+ }
+
+ /* Notify cluster enter event after successfully config completion */
+ cluster_notify(cluster, level, true);
+
+ cluster->last_level = idx;
+ return 0;
+
+failed_set_mode:
+
+ for (i = 0; i < cluster->ndevices; i++) {
+ int rc = 0;
+
+ level = &cluster->levels[cluster->default_level];
+ rc = set_device_mode(cluster, i, level);
+ WARN_ON(rc);
+ }
+ return ret;
+}
+
+static void cluster_prepare(struct lpm_cluster *cluster,
+ const struct cpumask *cpu, int child_idx, bool from_idle,
+ int64_t start_time)
+{
+ int i;
+
+ if (!cluster)
+ return;
+
+ if (cluster->min_child_level > child_idx)
+ return;
+
+ spin_lock(&cluster->sync_lock);
+ cpumask_or(&cluster->num_children_in_sync, cpu,
+ &cluster->num_children_in_sync);
+
+ for (i = 0; i < cluster->nlevels; i++) {
+ struct lpm_cluster_level *lvl = &cluster->levels[i];
+
+ if (child_idx >= lvl->min_child_level)
+ cpumask_or(&lvl->num_cpu_votes, cpu,
+ &lvl->num_cpu_votes);
+ }
+
+ /*
+ * cluster_select() does not make any configuration changes. So its ok
+ * to release the lock here. If a core wakes up for a rude request,
+ * it need not wait for another to finish its cluster selection and
+ * configuration process
+ */
+
+ if (!cpumask_equal(&cluster->num_children_in_sync,
+ &cluster->child_cpus))
+ goto failed;
+
+ i = cluster_select(cluster, from_idle);
+
+ if (i < 0)
+ goto failed;
+
+ if (cluster_configure(cluster, i, from_idle))
+ goto failed;
+
+ cluster->stats->sleep_time = start_time;
+ cluster_prepare(cluster->parent, &cluster->num_children_in_sync, i,
+ from_idle, start_time);
+
+ spin_unlock(&cluster->sync_lock);
+
+ if (!use_psci) {
+ struct lpm_cluster_level *level = &cluster->levels[i];
+
+ if (level->notify_rpm)
+ if (sys_pm_ops && sys_pm_ops->update_wakeup)
+ sys_pm_ops->update_wakeup(from_idle);
+ }
+
+ return;
+failed:
+ spin_unlock(&cluster->sync_lock);
+ cluster->stats->sleep_time = 0;
+}
+
+static void cluster_unprepare(struct lpm_cluster *cluster,
+ const struct cpumask *cpu, int child_idx, bool from_idle,
+ int64_t end_time)
+{
+ struct lpm_cluster_level *level;
+ bool first_cpu;
+ int last_level, i, ret;
+
+ if (!cluster)
+ return;
+
+ if (cluster->min_child_level > child_idx)
+ return;
+
+ spin_lock(&cluster->sync_lock);
+ last_level = cluster->default_level;
+ first_cpu = cpumask_equal(&cluster->num_children_in_sync,
+ &cluster->child_cpus);
+ cpumask_andnot(&cluster->num_children_in_sync,
+ &cluster->num_children_in_sync, cpu);
+
+ for (i = 0; i < cluster->nlevels; i++) {
+ struct lpm_cluster_level *lvl = &cluster->levels[i];
+
+ if (child_idx >= lvl->min_child_level)
+ cpumask_andnot(&lvl->num_cpu_votes,
+ &lvl->num_cpu_votes, cpu);
+ }
+
+ if (!first_cpu || cluster->last_level == cluster->default_level)
+ goto unlock_return;
+
+ if (cluster->stats->sleep_time)
+ cluster->stats->sleep_time = end_time -
+ cluster->stats->sleep_time;
+ lpm_stats_cluster_exit(cluster->stats, cluster->last_level, true);
+
+ level = &cluster->levels[cluster->last_level];
+ if (level->notify_rpm) {
+ if (sys_pm_ops && sys_pm_ops->exit)
+ sys_pm_ops->exit();
+
+ /* If RPM bumps up CX to turbo, unvote CX turbo vote
+ * during exit of rpm assisted power collapse to
+ * reduce the power impact
+ */
+ lpm_wa_cx_unvote_send();
+
+ }
+
+ update_debug_pc_event(CLUSTER_EXIT, cluster->last_level,
+ cluster->num_children_in_sync.bits[0],
+ cluster->child_cpus.bits[0], from_idle);
+ trace_cluster_exit(cluster->cluster_name, cluster->last_level,
+ cluster->num_children_in_sync.bits[0],
+ cluster->child_cpus.bits[0], from_idle);
+
+ last_level = cluster->last_level;
+ cluster->last_level = cluster->default_level;
+
+ for (i = 0; i < cluster->ndevices; i++) {
+ level = &cluster->levels[cluster->default_level];
+ ret = set_device_mode(cluster, i, level);
+
+ WARN_ON(ret);
+
+ }
+
+ cluster_notify(cluster, &cluster->levels[last_level], false);
+ cluster_unprepare(cluster->parent, &cluster->child_cpus,
+ last_level, from_idle, end_time);
+unlock_return:
+ spin_unlock(&cluster->sync_lock);
+}
+
+static inline void cpu_prepare(struct lpm_cluster *cluster, int cpu_index,
+ bool from_idle)
+{
+ struct lpm_cpu_level *cpu_level = &cluster->cpu->levels[cpu_index];
+ bool jtag_save_restore =
+ cluster->cpu->levels[cpu_index].jtag_save_restore;
+
+ /* Use broadcast timer for aggregating sleep mode within a cluster.
+ * A broadcast timer could be used in the following scenarios
+ * 1) The architected timer HW gets reset during certain low power
+ * modes and the core relies on a external(broadcast) timer to wake up
+ * from sleep. This information is passed through device tree.
+ * 2) The CPU low power mode could trigger a system low power mode.
+ * The low power module relies on Broadcast timer to aggregate the
+ * next wakeup within a cluster, in which case, CPU switches over to
+ * use broadcast timer.
+ */
+ if (from_idle && (cpu_level->use_bc_timer ||
+ (cpu_index >= cluster->min_child_level)))
+ tick_broadcast_enter();
+
+ if (from_idle && ((cpu_level->mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE)
+ || (cpu_level->mode ==
+ MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE)
+ || (cpu_level->is_reset)))
+ cpu_pm_enter();
+
+ /*
+ * Save JTAG registers for 8996v1.0 & 8996v2.x in C4 LPM
+ */
+ if (jtag_save_restore)
+ msm_jtag_save_state();
+}
+
+static inline void cpu_unprepare(struct lpm_cluster *cluster, int cpu_index,
+ bool from_idle)
+{
+ struct lpm_cpu_level *cpu_level = &cluster->cpu->levels[cpu_index];
+ bool jtag_save_restore =
+ cluster->cpu->levels[cpu_index].jtag_save_restore;
+
+ if (from_idle && (cpu_level->use_bc_timer ||
+ (cpu_index >= cluster->min_child_level)))
+ tick_broadcast_exit();
+
+ if (from_idle && ((cpu_level->mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE)
+ || (cpu_level->mode ==
+ MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE)
+ || cpu_level->is_reset))
+ cpu_pm_exit();
+
+ /*
+ * Restore JTAG registers for 8996v1.0 & 8996v2.x in C4 LPM
+ */
+ if (jtag_save_restore)
+ msm_jtag_restore_state();
+}
+
+#if defined(CONFIG_ARM_PSCI) || !defined(CONFIG_CPU_V7)
+static int get_cluster_id(struct lpm_cluster *cluster, int *aff_lvl)
+{
+ int state_id = 0;
+
+ if (!cluster)
+ return 0;
+
+ spin_lock(&cluster->sync_lock);
+
+ if (!cpumask_equal(&cluster->num_children_in_sync,
+ &cluster->child_cpus))
+ goto unlock_and_return;
+
+ state_id |= get_cluster_id(cluster->parent, aff_lvl);
+
+ if (cluster->last_level != cluster->default_level) {
+ struct lpm_cluster_level *level
+ = &cluster->levels[cluster->last_level];
+
+ state_id |= (level->psci_id & cluster->psci_mode_mask)
+ << cluster->psci_mode_shift;
+ (*aff_lvl)++;
+ }
+unlock_and_return:
+ spin_unlock(&cluster->sync_lock);
+ return state_id;
+}
+#endif
+
+#if !defined(CONFIG_CPU_V7)
+asmlinkage int __invoke_psci_fn_smc(u64, u64, u64, u64);
+static bool psci_enter_sleep(struct lpm_cluster *cluster,
+ int idx, bool from_idle)
+
+{
+ bool ret;
+ /*
+ * idx = 0 is the default LPM state
+ */
+ if (!idx) {
+ stop_critical_timings();
+ wfi();
+ start_critical_timings();
+ ret = true;
+ } else {
+ int affinity_level = 0;
+ int state_id = get_cluster_id(cluster, &affinity_level);
+ int power_state =
+ PSCI_POWER_STATE(cluster->cpu->levels[idx].is_reset);
+ bool success = false;
+
+ if (cluster->cpu->levels[idx].hyp_psci) {
+ stop_critical_timings();
+ __invoke_psci_fn_smc(0xC4000021, 0, 0, 0);
+ start_critical_timings();
+ return 1;
+ }
+
+ affinity_level = PSCI_AFFINITY_LEVEL(affinity_level);
+ state_id |= (power_state | affinity_level
+ | cluster->cpu->levels[idx].psci_id);
+
+ update_debug_pc_event(CPU_ENTER, state_id,
+ 0xdeaffeed, 0xdeaffeed, true);
+ stop_critical_timings();
+ success = !arm_cpuidle_suspend(state_id);
+ start_critical_timings();
+ update_debug_pc_event(CPU_EXIT, state_id,
+ success, 0xdeaffeed, true);
+ ret = success;
+ }
+ return ret;
+}
+#elif defined(CONFIG_ARM_PSCI)
+static bool psci_enter_sleep(struct lpm_cluster *cluster,
+ int idx, bool from_idle)
+{
+ bool ret;
+
+ if (!idx) {
+ stop_critical_timings();
+ wfi();
+ start_critical_timings();
+ ret = true;
+ } else {
+ int affinity_level = 0;
+ int state_id = get_cluster_id(cluster, &affinity_level);
+ int power_state =
+ PSCI_POWER_STATE(cluster->cpu->levels[idx].is_reset);
+ bool success = false;
+
+ affinity_level = PSCI_AFFINITY_LEVEL(affinity_level);
+ state_id |= (power_state | affinity_level
+ | cluster->cpu->levels[idx].psci_id);
+
+ update_debug_pc_event(CPU_ENTER, state_id,
+ 0xdeaffeed, 0xdeaffeed, true);
+ stop_critical_timings();
+ success = !arm_cpuidle_suspend(state_id);
+ start_critical_timings();
+ update_debug_pc_event(CPU_EXIT, state_id,
+ success, 0xdeaffeed, true);
+ ret = success;
+ }
+ return ret;
+}
+#else
+static bool psci_enter_sleep(struct lpm_cluster *cluster,
+ int idx, bool from_idle)
+{
+ WARN_ONCE(true, "PSCI cpu_suspend ops not supported\n");
+ return false;
+}
+#endif
+
+static int lpm_cpuidle_select(struct cpuidle_driver *drv,
+ struct cpuidle_device *dev)
+{
+ struct lpm_cluster *cluster = per_cpu(cpu_cluster, dev->cpu);
+ int idx;
+
+ if (!cluster)
+ return 0;
+
+ idx = cpu_power_select(dev, cluster->cpu);
+
+ return idx;
+}
+
+static int lpm_cpuidle_enter(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int idx)
+{
+ struct lpm_cluster *cluster = per_cpu(cpu_cluster, dev->cpu);
+ bool success = true;
+ const struct cpumask *cpumask = get_cpu_mask(dev->cpu);
+ ktime_t start = ktime_get();
+ int64_t start_time = ktime_to_ns(ktime_get()), end_time;
+
+ if (idx < 0)
+ return -EINVAL;
+
+ cpu_prepare(cluster, idx, true);
+ cluster_prepare(cluster, cpumask, idx, true, ktime_to_ns(ktime_get()));
+
+ trace_cpu_idle_enter(idx);
+ lpm_stats_cpu_enter(idx, start_time);
+
+ if (need_resched())
+ goto exit;
+
+ if (!use_psci) {
+ if (idx > 0)
+ update_debug_pc_event(CPU_ENTER, idx, 0xdeaffeed,
+ 0xdeaffeed, true);
+ success = msm_cpu_pm_enter_sleep(cluster->cpu->levels[idx].mode,
+ true);
+
+ if (idx > 0)
+ update_debug_pc_event(CPU_EXIT, idx, success,
+ 0xdeaffeed, true);
+ } else {
+ success = psci_enter_sleep(cluster, idx, true);
+ }
+
+exit:
+ end_time = ktime_to_ns(ktime_get());
+ lpm_stats_cpu_exit(idx, end_time, success);
+
+ cluster_unprepare(cluster, cpumask, idx, true, end_time);
+ cpu_unprepare(cluster, idx, true);
+
+ trace_cpu_idle_exit(idx, success);
+ dev->last_residency = ktime_us_delta(ktime_get(), start);
+ local_irq_enable();
+
+ return idx;
+}
+
+#ifdef CONFIG_CPU_IDLE_MULTIPLE_DRIVERS
+static int cpuidle_register_cpu(struct cpuidle_driver *drv,
+ struct cpumask *mask)
+{
+ struct cpuidle_device *device;
+ int cpu, ret;
+
+
+ if (!mask || !drv)
+ return -EINVAL;
+
+ drv->cpumask = mask;
+ ret = cpuidle_register_driver(drv);
+ if (ret) {
+ pr_err("Failed to register cpuidle driver %d\n", ret);
+ goto failed_driver_register;
+ }
+
+ for_each_cpu(cpu, mask) {
+ device = &per_cpu(cpuidle_dev, cpu);
+ device->cpu = cpu;
+
+ ret = cpuidle_register_device(device);
+ if (ret) {
+ pr_err("Failed to register cpuidle driver for cpu:%u\n",
+ cpu);
+ goto failed_driver_register;
+ }
+ }
+ return ret;
+failed_driver_register:
+ for_each_cpu(cpu, mask)
+ cpuidle_unregister_driver(drv);
+ return ret;
+}
+#else
+static int cpuidle_register_cpu(struct cpuidle_driver *drv,
+ struct cpumask *mask)
+{
+ return cpuidle_register(drv, NULL);
+}
+#endif
+
+static struct cpuidle_governor lpm_governor = {
+ .name = "qcom",
+ .rating = 30,
+ .select = lpm_cpuidle_select,
+ .owner = THIS_MODULE,
+};
+
+static int cluster_cpuidle_register(struct lpm_cluster *cl)
+{
+ int i = 0, ret = 0;
+ unsigned int cpu;
+ struct lpm_cluster *p = NULL;
+
+ if (!cl->cpu) {
+ struct lpm_cluster *n;
+
+ list_for_each_entry(n, &cl->child, list) {
+ ret = cluster_cpuidle_register(n);
+ if (ret)
+ break;
+ }
+ return ret;
+ }
+
+ cl->drv = kzalloc(sizeof(*cl->drv), GFP_KERNEL);
+ if (!cl->drv)
+ return -ENOMEM;
+
+ cl->drv->name = "msm_idle";
+
+ for (i = 0; i < cl->cpu->nlevels; i++) {
+ struct cpuidle_state *st = &cl->drv->states[i];
+ struct lpm_cpu_level *cpu_level = &cl->cpu->levels[i];
+
+ snprintf(st->name, CPUIDLE_NAME_LEN, "C%u\n", i);
+ snprintf(st->desc, CPUIDLE_DESC_LEN, "%s", cpu_level->name);
+ st->flags = 0;
+ st->exit_latency = cpu_level->pwr.latency_us;
+ st->power_usage = cpu_level->pwr.ss_power;
+ st->target_residency = 0;
+ st->enter = lpm_cpuidle_enter;
+ }
+
+ cl->drv->state_count = cl->cpu->nlevels;
+ cl->drv->safe_state_index = 0;
+ for_each_cpu(cpu, &cl->child_cpus)
+ per_cpu(cpu_cluster, cpu) = cl;
+
+ for_each_possible_cpu(cpu) {
+ if (cpu_online(cpu))
+ continue;
+ p = per_cpu(cpu_cluster, cpu);
+ while (p) {
+ int j;
+
+ spin_lock(&p->sync_lock);
+ cpumask_set_cpu(cpu, &p->num_children_in_sync);
+ for (j = 0; j < p->nlevels; j++)
+ cpumask_copy(&p->levels[j].num_cpu_votes,
+ &p->num_children_in_sync);
+ spin_unlock(&p->sync_lock);
+ p = p->parent;
+ }
+ }
+ ret = cpuidle_register_cpu(cl->drv, &cl->child_cpus);
+
+ if (ret) {
+ kfree(cl->drv);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+/**
+ * init_lpm - initializes the governor
+ */
+static int __init init_lpm(void)
+{
+ return cpuidle_register_governor(&lpm_governor);
+}
+
+postcore_initcall(init_lpm);
+
+static void register_cpu_lpm_stats(struct lpm_cpu *cpu,
+ struct lpm_cluster *parent)
+{
+ const char **level_name;
+ int i;
+
+ level_name = kcalloc(cpu->nlevels, sizeof(*level_name), GFP_KERNEL);
+
+ if (!level_name)
+ return;
+
+ for (i = 0; i < cpu->nlevels; i++)
+ level_name[i] = cpu->levels[i].name;
+
+ lpm_stats_config_level("cpu", level_name, cpu->nlevels,
+ parent->stats, &parent->child_cpus);
+
+ kfree(level_name);
+}
+
+static void register_cluster_lpm_stats(struct lpm_cluster *cl,
+ struct lpm_cluster *parent)
+{
+ const char **level_name;
+ int i;
+ struct lpm_cluster *child;
+
+ if (!cl)
+ return;
+
+ level_name = kcalloc(cl->nlevels, sizeof(*level_name), GFP_KERNEL);
+
+ if (!level_name)
+ return;
+
+ for (i = 0; i < cl->nlevels; i++)
+ level_name[i] = cl->levels[i].level_name;
+
+ cl->stats = lpm_stats_config_level(cl->cluster_name, level_name,
+ cl->nlevels, parent ? parent->stats : NULL, NULL);
+
+ kfree(level_name);
+
+ if (cl->cpu) {
+ register_cpu_lpm_stats(cl->cpu, cl);
+ return;
+ }
+
+ list_for_each_entry(child, &cl->child, list)
+ register_cluster_lpm_stats(child, cl);
+}
+
+static int lpm_suspend_prepare(void)
+{
+ suspend_in_progress = true;
+ lpm_stats_suspend_enter();
+
+ return 0;
+}
+
+static void lpm_suspend_wake(void)
+{
+ suspend_in_progress = false;
+ lpm_stats_suspend_exit();
+}
+
+static int lpm_suspend_enter(suspend_state_t state)
+{
+ int cpu = raw_smp_processor_id();
+ struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu);
+ struct lpm_cpu *lpm_cpu = cluster->cpu;
+ const struct cpumask *cpumask = get_cpu_mask(cpu);
+ int idx;
+
+ for (idx = lpm_cpu->nlevels - 1; idx >= 0; idx--) {
+
+ if (lpm_cpu_mode_allow(cpu, idx, false))
+ break;
+ }
+ if (idx < 0) {
+ pr_err("Failed suspend\n");
+ return 0;
+ }
+ cpu_prepare(cluster, idx, false);
+ cluster_prepare(cluster, cpumask, idx, false, 0);
+ if (idx > 0)
+ update_debug_pc_event(CPU_ENTER, idx, 0xdeaffeed,
+ 0xdeaffeed, false);
+
+ /*
+ * Print the clocks which are enabled during system suspend
+ * This debug information is useful to know which are the
+ * clocks that are enabled and preventing the system level
+ * LPMs(XO and Vmin).
+ */
+ clock_debug_print_enabled(true);
+
+ if (!use_psci)
+ msm_cpu_pm_enter_sleep(cluster->cpu->levels[idx].mode, false);
+ else
+ psci_enter_sleep(cluster, idx, true);
+
+ if (idx > 0)
+ update_debug_pc_event(CPU_EXIT, idx, true, 0xdeaffeed,
+ false);
+
+ cluster_unprepare(cluster, cpumask, idx, false, 0);
+ cpu_unprepare(cluster, idx, false);
+ return 0;
+}
+
+static int lpm_dying_cpu(unsigned int cpu)
+{
+ struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu);
+
+ update_debug_pc_event(CPU_HP_DYING, cpu,
+ cluster->num_children_in_sync.bits[0],
+ cluster->child_cpus.bits[0], false);
+ cluster_prepare(cluster, get_cpu_mask(cpu), NR_LPM_LEVELS, false, 0);
+ return 0;
+}
+
+static int lpm_starting_cpu(unsigned int cpu)
+{
+ struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu);
+
+ update_debug_pc_event(CPU_HP_STARTING, cpu,
+ cluster->num_children_in_sync.bits[0],
+ cluster->child_cpus.bits[0], false);
+ cluster_unprepare(cluster, get_cpu_mask(cpu), NR_LPM_LEVELS, false, 0);
+ return 0;
+}
+
+static const struct platform_suspend_ops lpm_suspend_ops = {
+ .enter = lpm_suspend_enter,
+ .valid = suspend_valid_only_mem,
+ .prepare_late = lpm_suspend_prepare,
+ .wake = lpm_suspend_wake,
+};
+
+static int lpm_probe(struct platform_device *pdev)
+{
+ int ret;
+ int size;
+ struct kobject *module_kobj = NULL;
+ struct md_region md_entry;
+
+ get_online_cpus();
+ lpm_root_node = lpm_of_parse_cluster(pdev);
+
+ if (IS_ERR_OR_NULL(lpm_root_node)) {
+ pr_err("%s(): Failed to probe low power modes\n", __func__);
+ put_online_cpus();
+ return PTR_ERR(lpm_root_node);
+ }
+
+ if (print_parsed_dt)
+ cluster_dt_walkthrough(lpm_root_node);
+
+ /*
+ * Register hotplug notifier before broadcast time to ensure there
+ * to prevent race where a broadcast timer might not be setup on for a
+ * core. BUG in existing code but no known issues possibly because of
+ * how late lpm_levels gets initialized.
+ */
+ suspend_set_ops(&lpm_suspend_ops);
+ hrtimer_init(&lpm_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+
+ ret = remote_spin_lock_init(&scm_handoff_lock, SCM_HANDOFF_LOCK_ID);
+ if (ret) {
+ pr_err("%s: Failed initializing scm_handoff_lock (%d)\n",
+ __func__, ret);
+ put_online_cpus();
+ return ret;
+ }
+ size = num_dbg_elements * sizeof(struct lpm_debug);
+ lpm_debug = dma_alloc_coherent(&pdev->dev, size,
+ &lpm_debug_phys, GFP_KERNEL);
+ register_cluster_lpm_stats(lpm_root_node, NULL);
+
+ ret = cluster_cpuidle_register(lpm_root_node);
+ put_online_cpus();
+ if (ret) {
+ pr_err("%s()Failed to register with cpuidle framework\n",
+ __func__);
+ goto failed;
+ }
+ ret = cpuhp_setup_state(CPUHP_AP_QCOM_SLEEP_STARTING,
+ "AP_QCOM_SLEEP_STARTING",
+ lpm_starting_cpu, lpm_dying_cpu);
+ if (ret)
+ goto failed;
+
+ module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
+ if (!module_kobj) {
+ pr_err("%s: cannot find kobject for module %s\n",
+ __func__, KBUILD_MODNAME);
+ ret = -ENOENT;
+ goto failed;
+ }
+
+ ret = create_cluster_lvl_nodes(lpm_root_node, module_kobj);
+ if (ret) {
+ pr_err("%s(): Failed to create cluster level nodes\n",
+ __func__);
+ goto failed;
+ }
+
+ /* Add lpm_debug to Minidump*/
+ strlcpy(md_entry.name, "KLPMDEBUG", sizeof(md_entry.name));
+ md_entry.virt_addr = (uintptr_t)lpm_debug;
+ md_entry.phys_addr = lpm_debug_phys;
+ md_entry.size = size;
+ if (msm_minidump_add_region(&md_entry))
+ pr_info("Failed to add lpm_debug in Minidump\n");
+
+ return 0;
+failed:
+ free_cluster_node(lpm_root_node);
+ lpm_root_node = NULL;
+ return ret;
+}
+
+static const struct of_device_id lpm_mtch_tbl[] = {
+ {.compatible = "qcom,lpm-levels"},
+ {},
+};
+
+static struct platform_driver lpm_driver = {
+ .probe = lpm_probe,
+ .driver = {
+ .name = "lpm-levels",
+ .owner = THIS_MODULE,
+ .of_match_table = lpm_mtch_tbl,
+ },
+};
+
+static int __init lpm_levels_module_init(void)
+{
+ int rc;
+
+ rc = platform_driver_register(&lpm_driver);
+ if (rc) {
+ pr_info("Error registering %s\n", lpm_driver.driver.name);
+ goto fail;
+ }
+
+fail:
+ return rc;
+}
+late_initcall(lpm_levels_module_init);
+
+enum msm_pm_l2_scm_flag lpm_cpu_pre_pc_cb(unsigned int cpu)
+{
+ struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu);
+ enum msm_pm_l2_scm_flag retflag = MSM_SCM_L2_ON;
+
+ /*
+ * No need to acquire the lock if probe isn't completed yet
+ * In the event of the hotplug happening before lpm probe, we want to
+ * flush the cache to make sure that L2 is flushed. In particular, this
+ * could cause incoherencies for a cluster architecture. This wouldn't
+ * affect the idle case as the idle driver wouldn't be registered
+ * before the probe function
+ */
+ if (!cluster)
+ return MSM_SCM_L2_OFF;
+
+ /*
+ * Assumes L2 only. What/How parameters gets passed into TZ will
+ * determine how this function reports this info back in msm-pm.c
+ */
+ spin_lock(&cluster->sync_lock);
+
+ if (!cluster->lpm_dev) {
+ retflag = MSM_SCM_L2_OFF;
+ goto unlock_and_return;
+ }
+
+ if (!cpumask_equal(&cluster->num_children_in_sync,
+ &cluster->child_cpus))
+ goto unlock_and_return;
+
+ if (cluster->lpm_dev)
+ retflag = cluster->lpm_dev->tz_flag;
+ /*
+ * The scm_handoff_lock will be release by the secure monitor.
+ * It is used to serialize power-collapses from this point on,
+ * so that both Linux and the secure context have a consistent
+ * view regarding the number of running cpus (cpu_count).
+ *
+ * It must be acquired before releasing the cluster lock.
+ */
+unlock_and_return:
+ update_debug_pc_event(PRE_PC_CB, retflag, 0xdeadbeef, 0xdeadbeef,
+ 0xdeadbeef);
+ trace_pre_pc_cb(retflag);
+ remote_spin_lock_rlock_id(&scm_handoff_lock,
+ REMOTE_SPINLOCK_TID_START + cpu);
+ spin_unlock(&cluster->sync_lock);
+ return retflag;
+}
+
+/**
+ * lpm_cpu_hotplug_enter(): Called by dying CPU to terminate in low power mode
+ *
+ * @cpu: cpuid of the dying CPU
+ *
+ * Called from platform_cpu_kill() to terminate hotplug in a low power mode
+ */
+void lpm_cpu_hotplug_enter(unsigned int cpu)
+{
+ enum msm_pm_sleep_mode mode = MSM_PM_SLEEP_MODE_NR;
+ struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu);
+ int i;
+ int idx = -1;
+
+ /*
+ * If lpm isn't probed yet, try to put cpu into the one of the modes
+ * available
+ */
+ if (!cluster) {
+ if (msm_spm_is_mode_avail(
+ MSM_SPM_MODE_POWER_COLLAPSE)){
+ mode = MSM_PM_SLEEP_MODE_POWER_COLLAPSE;
+ } else if (msm_spm_is_mode_avail(
+ MSM_SPM_MODE_FASTPC)) {
+ mode = MSM_PM_SLEEP_MODE_FASTPC;
+ } else if (msm_spm_is_mode_avail(
+ MSM_SPM_MODE_RETENTION)) {
+ mode = MSM_PM_SLEEP_MODE_RETENTION;
+ } else {
+ pr_err("No mode avail for cpu%d hotplug\n", cpu);
+ WARN_ON(1);
+ return;
+ }
+ } else {
+ struct lpm_cpu *lpm_cpu;
+ uint32_t ss_pwr = ~0U;
+
+ lpm_cpu = cluster->cpu;
+ for (i = 0; i < lpm_cpu->nlevels; i++) {
+ if (ss_pwr < lpm_cpu->levels[i].pwr.ss_power)
+ continue;
+ ss_pwr = lpm_cpu->levels[i].pwr.ss_power;
+ idx = i;
+ mode = lpm_cpu->levels[i].mode;
+ }
+
+ if (mode == MSM_PM_SLEEP_MODE_NR)
+ return;
+
+ WARN_ON(idx < 0);
+ cluster_prepare(cluster, get_cpu_mask(cpu), idx, false, 0);
+ }
+
+ msm_cpu_pm_enter_sleep(mode, false);
+}
diff --git a/drivers/cpuidle/lpm-levels-legacy.h b/drivers/cpuidle/lpm-levels-legacy.h
new file mode 100644
index 0000000..4a07355
--- /dev/null
+++ b/drivers/cpuidle/lpm-levels-legacy.h
@@ -0,0 +1,152 @@
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <soc/qcom/pm-legacy.h>
+#include <soc/qcom/spm.h>
+
+#define NR_LPM_LEVELS 8
+
+extern bool use_psci;
+
+struct lpm_lookup_table {
+ uint32_t modes;
+ const char *mode_name;
+};
+
+struct power_params {
+ uint32_t latency_us; /* Enter + Exit latency */
+ uint32_t ss_power; /* Steady state power */
+ uint32_t energy_overhead; /* Enter + exit over head */
+ uint32_t time_overhead_us; /* Enter + exit overhead */
+ uint32_t residencies[NR_LPM_LEVELS];
+ uint32_t max_residency;
+};
+
+struct lpm_cpu_level {
+ const char *name;
+ enum msm_pm_sleep_mode mode;
+ bool use_bc_timer;
+ struct power_params pwr;
+ unsigned int psci_id;
+ bool is_reset;
+ bool jtag_save_restore;
+ bool hyp_psci;
+ int reset_level;
+};
+
+struct lpm_cpu {
+ struct lpm_cpu_level levels[NR_LPM_LEVELS];
+ int nlevels;
+ unsigned int psci_mode_shift;
+ unsigned int psci_mode_mask;
+ struct lpm_cluster *parent;
+};
+
+struct lpm_level_avail {
+ bool idle_enabled;
+ bool suspend_enabled;
+ struct kobject *kobj;
+ struct kobj_attribute idle_enabled_attr;
+ struct kobj_attribute suspend_enabled_attr;
+ void *data;
+ int idx;
+ bool cpu_node;
+};
+
+struct lpm_cluster_level {
+ const char *level_name;
+ int *mode; /* SPM mode to enter */
+ int min_child_level;
+ struct cpumask num_cpu_votes;
+ struct power_params pwr;
+ bool notify_rpm;
+ bool disable_dynamic_routing;
+ bool sync_level;
+ bool last_core_only;
+ struct lpm_level_avail available;
+ unsigned int psci_id;
+ bool is_reset;
+ int reset_level;
+ bool no_cache_flush;
+};
+
+struct low_power_ops {
+ struct msm_spm_device *spm;
+ int (*set_mode)(struct low_power_ops *ops, int mode,
+ struct lpm_cluster_level *level);
+ enum msm_pm_l2_scm_flag tz_flag;
+};
+
+struct lpm_cluster {
+ struct list_head list;
+ struct list_head child;
+ const char *cluster_name;
+ const char **name;
+ unsigned long aff_level; /* Affinity level of the node */
+ struct low_power_ops *lpm_dev;
+ int ndevices;
+ struct lpm_cluster_level levels[NR_LPM_LEVELS];
+ int nlevels;
+ enum msm_pm_l2_scm_flag l2_flag;
+ int min_child_level;
+ int default_level;
+ int last_level;
+ struct lpm_cpu *cpu;
+ struct cpuidle_driver *drv;
+ spinlock_t sync_lock;
+ struct cpumask child_cpus;
+ struct cpumask num_children_in_sync;
+ struct lpm_cluster *parent;
+ struct lpm_stats *stats;
+ unsigned int psci_mode_shift;
+ unsigned int psci_mode_mask;
+ bool no_saw_devices;
+};
+
+int set_l2_mode(struct low_power_ops *ops, int mode,
+ struct lpm_cluster_level *level);
+int set_system_mode(struct low_power_ops *ops, int mode,
+ struct lpm_cluster_level *level);
+int set_l3_mode(struct low_power_ops *ops, int mode,
+ struct lpm_cluster_level *level);
+void lpm_suspend_wake_time(uint64_t wakeup_time);
+
+struct lpm_cluster *lpm_of_parse_cluster(struct platform_device *pdev);
+void free_cluster_node(struct lpm_cluster *cluster);
+void cluster_dt_walkthrough(struct lpm_cluster *cluster);
+
+int create_cluster_lvl_nodes(struct lpm_cluster *p, struct kobject *kobj);
+bool lpm_cpu_mode_allow(unsigned int cpu,
+ unsigned int mode, bool from_idle);
+bool lpm_cluster_mode_allow(struct lpm_cluster *cluster,
+ unsigned int mode, bool from_idle);
+uint32_t *get_per_cpu_max_residency(int cpu);
+extern struct lpm_cluster *lpm_root_node;
+
+#ifdef CONFIG_SMP
+extern DEFINE_PER_CPU(bool, pending_ipi);
+static inline bool is_IPI_pending(const struct cpumask *mask)
+{
+ unsigned int cpu;
+
+ for_each_cpu(cpu, mask) {
+ if per_cpu(pending_ipi, cpu)
+ return true;
+ }
+ return false;
+}
+#else
+static inline bool is_IPI_pending(const struct cpumask *mask)
+{
+ return false;
+}
+#endif
diff --git a/drivers/cpuidle/lpm-levels-of-legacy.c b/drivers/cpuidle/lpm-levels-of-legacy.c
new file mode 100644
index 0000000..bf74124
--- /dev/null
+++ b/drivers/cpuidle/lpm-levels-of-legacy.c
@@ -0,0 +1,1006 @@
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/err.h>
+#include <linux/sysfs.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/moduleparam.h>
+#include "lpm-levels-legacy.h"
+
+bool use_psci;
+enum lpm_type {
+ IDLE = 0,
+ SUSPEND,
+ LPM_TYPE_NR
+};
+
+struct lpm_type_str {
+ enum lpm_type type;
+ char *str;
+};
+
+static const struct lpm_type_str lpm_types[] = {
+ {IDLE, "idle_enabled"},
+ {SUSPEND, "suspend_enabled"},
+};
+
+static DEFINE_PER_CPU(uint32_t *, max_residency);
+static struct lpm_level_avail *cpu_level_available[NR_CPUS];
+static struct platform_device *lpm_pdev;
+
+static void *get_enabled_ptr(struct kobj_attribute *attr,
+ struct lpm_level_avail *avail)
+{
+ void *arg = NULL;
+
+ if (!strcmp(attr->attr.name, lpm_types[IDLE].str))
+ arg = (void *) &avail->idle_enabled;
+ else if (!strcmp(attr->attr.name, lpm_types[SUSPEND].str))
+ arg = (void *) &avail->suspend_enabled;
+
+ return arg;
+}
+
+static struct lpm_level_avail *get_avail_ptr(struct kobject *kobj,
+ struct kobj_attribute *attr)
+{
+ struct lpm_level_avail *avail = NULL;
+
+ if (!strcmp(attr->attr.name, lpm_types[IDLE].str))
+ avail = container_of(attr, struct lpm_level_avail,
+ idle_enabled_attr);
+ else if (!strcmp(attr->attr.name, lpm_types[SUSPEND].str))
+ avail = container_of(attr, struct lpm_level_avail,
+ suspend_enabled_attr);
+
+ return avail;
+}
+
+static void set_optimum_cpu_residency(struct lpm_cpu *cpu, int cpu_id,
+ bool probe_time)
+{
+ int i, j;
+ bool mode_avail;
+ uint32_t *residency = per_cpu(max_residency, cpu_id);
+
+ for (i = 0; i < cpu->nlevels; i++) {
+ struct power_params *pwr = &cpu->levels[i].pwr;
+
+ mode_avail = probe_time ||
+ lpm_cpu_mode_allow(cpu_id, i, true);
+
+ if (!mode_avail) {
+ residency[i] = 0;
+ continue;
+ }
+
+ residency[i] = ~0;
+ for (j = i + 1; j < cpu->nlevels; j++) {
+ mode_avail = probe_time ||
+ lpm_cpu_mode_allow(cpu_id, j, true);
+
+ if (mode_avail &&
+ (residency[i] > pwr->residencies[j]) &&
+ (pwr->residencies[j] != 0))
+ residency[i] = pwr->residencies[j];
+ }
+ }
+}
+
+static void set_optimum_cluster_residency(struct lpm_cluster *cluster,
+ bool probe_time)
+{
+ int i, j;
+ bool mode_avail;
+
+ for (i = 0; i < cluster->nlevels; i++) {
+ struct power_params *pwr = &cluster->levels[i].pwr;
+
+ mode_avail = probe_time ||
+ lpm_cluster_mode_allow(cluster, i,
+ true);
+
+ if (!mode_avail) {
+ pwr->max_residency = 0;
+ continue;
+ }
+
+ pwr->max_residency = ~0;
+ for (j = i+1; j < cluster->nlevels; j++) {
+ mode_avail = probe_time ||
+ lpm_cluster_mode_allow(cluster, j,
+ true);
+ if (mode_avail &&
+ (pwr->max_residency > pwr->residencies[j]) &&
+ (pwr->residencies[j] != 0))
+ pwr->max_residency = pwr->residencies[j];
+ }
+ }
+}
+
+uint32_t *get_per_cpu_max_residency(int cpu)
+{
+ return per_cpu(max_residency, cpu);
+}
+
+static ssize_t lpm_enable_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ int ret = 0;
+ struct kernel_param kp;
+
+ kp.arg = get_enabled_ptr(attr, get_avail_ptr(kobj, attr));
+ ret = param_get_bool(buf, &kp);
+ if (ret > 0) {
+ strlcat(buf, "\n", PAGE_SIZE);
+ ret++;
+ }
+
+ return ret;
+}
+
+static ssize_t lpm_enable_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t len)
+{
+ int ret = 0;
+ struct kernel_param kp;
+ struct lpm_level_avail *avail;
+
+ avail = get_avail_ptr(kobj, attr);
+ if (WARN_ON(!avail))
+ return -EINVAL;
+
+ kp.arg = get_enabled_ptr(attr, avail);
+ ret = param_set_bool(buf, &kp);
+
+ if (avail->cpu_node)
+ set_optimum_cpu_residency(avail->data, avail->idx, false);
+ else
+ set_optimum_cluster_residency(avail->data, false);
+
+ return ret ? ret : len;
+}
+
+static int create_lvl_avail_nodes(const char *name,
+ struct kobject *parent, struct lpm_level_avail *avail,
+ void *data, int index, bool cpu_node)
+{
+ struct attribute_group *attr_group = NULL;
+ struct attribute **attr = NULL;
+ struct kobject *kobj = NULL;
+ int ret = 0;
+
+ kobj = kobject_create_and_add(name, parent);
+ if (!kobj)
+ return -ENOMEM;
+
+ attr_group = devm_kzalloc(&lpm_pdev->dev, sizeof(*attr_group),
+ GFP_KERNEL);
+ if (!attr_group) {
+ ret = -ENOMEM;
+ goto failed;
+ }
+
+ attr = devm_kzalloc(&lpm_pdev->dev,
+ sizeof(*attr) * (LPM_TYPE_NR + 1), GFP_KERNEL);
+ if (!attr) {
+ ret = -ENOMEM;
+ goto failed;
+ }
+
+ sysfs_attr_init(&avail->idle_enabled_attr.attr);
+ avail->idle_enabled_attr.attr.name = lpm_types[IDLE].str;
+ avail->idle_enabled_attr.attr.mode = 0644;
+ avail->idle_enabled_attr.show = lpm_enable_show;
+ avail->idle_enabled_attr.store = lpm_enable_store;
+
+ sysfs_attr_init(&avail->suspend_enabled_attr.attr);
+ avail->suspend_enabled_attr.attr.name = lpm_types[SUSPEND].str;
+ avail->suspend_enabled_attr.attr.mode = 0644;
+ avail->suspend_enabled_attr.show = lpm_enable_show;
+ avail->suspend_enabled_attr.store = lpm_enable_store;
+
+ attr[0] = &avail->idle_enabled_attr.attr;
+ attr[1] = &avail->suspend_enabled_attr.attr;
+ attr[2] = NULL;
+ attr_group->attrs = attr;
+
+ ret = sysfs_create_group(kobj, attr_group);
+ if (ret) {
+ ret = -ENOMEM;
+ goto failed;
+ }
+
+ avail->idle_enabled = true;
+ avail->suspend_enabled = true;
+ avail->kobj = kobj;
+ avail->data = data;
+ avail->idx = index;
+ avail->cpu_node = cpu_node;
+
+ return ret;
+
+failed:
+ kobject_put(kobj);
+ return ret;
+}
+
+static int create_cpu_lvl_nodes(struct lpm_cluster *p, struct kobject *parent)
+{
+ int cpu;
+ int i, cpu_idx;
+ struct kobject **cpu_kobj = NULL;
+ struct lpm_level_avail *level_list = NULL;
+ char cpu_name[20] = {0};
+ int ret = 0;
+
+ cpu_kobj = devm_kzalloc(&lpm_pdev->dev, sizeof(*cpu_kobj) *
+ cpumask_weight(&p->child_cpus), GFP_KERNEL);
+ if (!cpu_kobj)
+ return -ENOMEM;
+
+ cpu_idx = 0;
+ for_each_cpu(cpu, &p->child_cpus) {
+ snprintf(cpu_name, sizeof(cpu_name), "cpu%d", cpu);
+ cpu_kobj[cpu_idx] = kobject_create_and_add(cpu_name, parent);
+ if (!cpu_kobj[cpu_idx]) {
+ ret = -ENOMEM;
+ goto release_kobj;
+ }
+
+ level_list = devm_kzalloc(&lpm_pdev->dev,
+ p->cpu->nlevels * sizeof(*level_list),
+ GFP_KERNEL);
+ if (!level_list) {
+ ret = -ENOMEM;
+ goto release_kobj;
+ }
+
+ for (i = 0; i < p->cpu->nlevels; i++) {
+
+ ret = create_lvl_avail_nodes(p->cpu->levels[i].name,
+ cpu_kobj[cpu_idx], &level_list[i],
+ (void *)p->cpu, cpu, true);
+ if (ret)
+ goto release_kobj;
+ }
+
+ cpu_level_available[cpu] = level_list;
+ cpu_idx++;
+ }
+
+ return ret;
+
+release_kobj:
+ for (i = 0; i < cpumask_weight(&p->child_cpus); i++)
+ kobject_put(cpu_kobj[i]);
+
+ return ret;
+}
+
+int create_cluster_lvl_nodes(struct lpm_cluster *p, struct kobject *kobj)
+{
+ int ret = 0;
+ struct lpm_cluster *child = NULL;
+ int i;
+ struct kobject *cluster_kobj = NULL;
+
+ if (!p)
+ return -ENODEV;
+
+ cluster_kobj = kobject_create_and_add(p->cluster_name, kobj);
+ if (!cluster_kobj)
+ return -ENOMEM;
+
+ for (i = 0; i < p->nlevels; i++) {
+ ret = create_lvl_avail_nodes(p->levels[i].level_name,
+ cluster_kobj, &p->levels[i].available,
+ (void *)p, 0, false);
+ if (ret)
+ return ret;
+ }
+
+ list_for_each_entry(child, &p->child, list) {
+ ret = create_cluster_lvl_nodes(child, cluster_kobj);
+ if (ret)
+ return ret;
+ }
+
+ if (p->cpu) {
+ ret = create_cpu_lvl_nodes(p, cluster_kobj);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+bool lpm_cpu_mode_allow(unsigned int cpu,
+ unsigned int index, bool from_idle)
+{
+ struct lpm_level_avail *avail = cpu_level_available[cpu];
+
+ if (!lpm_pdev || !avail)
+ return !from_idle;
+
+ return !!(from_idle ? avail[index].idle_enabled :
+ avail[index].suspend_enabled);
+}
+
+bool lpm_cluster_mode_allow(struct lpm_cluster *cluster,
+ unsigned int mode, bool from_idle)
+{
+ struct lpm_level_avail *avail = &cluster->levels[mode].available;
+
+ if (!lpm_pdev || !avail)
+ return false;
+
+ return !!(from_idle ? avail->idle_enabled :
+ avail->suspend_enabled);
+}
+
+static int parse_legacy_cluster_params(struct device_node *node,
+ struct lpm_cluster *c)
+{
+ int i;
+ char *key;
+ int ret;
+ struct lpm_match {
+ char *devname;
+ int (*set_mode)(struct low_power_ops *, int,
+ struct lpm_cluster_level *);
+ };
+ struct lpm_match match_tbl[] = {
+ {"l2", set_l2_mode},
+ {"cci", set_system_mode},
+ {"l3", set_l3_mode},
+ {"cbf", set_system_mode},
+ };
+
+
+ key = "qcom,spm-device-names";
+ c->ndevices = of_property_count_strings(node, key);
+
+ if (c->ndevices < 0) {
+ pr_info("%s(): Ignoring cluster params\n", __func__);
+ c->no_saw_devices = true;
+ c->ndevices = 0;
+ return 0;
+ }
+
+ c->name = devm_kzalloc(&lpm_pdev->dev, c->ndevices * sizeof(*c->name),
+ GFP_KERNEL);
+ c->lpm_dev = devm_kzalloc(&lpm_pdev->dev,
+ c->ndevices * sizeof(*c->lpm_dev),
+ GFP_KERNEL);
+ if (!c->name || !c->lpm_dev) {
+ ret = -ENOMEM;
+ goto failed;
+ }
+
+ for (i = 0; i < c->ndevices; i++) {
+ char device_name[20];
+ int j;
+
+ ret = of_property_read_string_index(node, key, i, &c->name[i]);
+ if (ret)
+ goto failed;
+ snprintf(device_name, sizeof(device_name), "%s-%s",
+ c->cluster_name, c->name[i]);
+
+ c->lpm_dev[i].spm = msm_spm_get_device_by_name(device_name);
+
+ if (IS_ERR_OR_NULL(c->lpm_dev[i].spm)) {
+ pr_err("Failed to get spm device by name:%s\n",
+ device_name);
+ ret = PTR_ERR(c->lpm_dev[i].spm);
+ goto failed;
+ }
+ for (j = 0; j < ARRAY_SIZE(match_tbl); j++) {
+ if (!strcmp(c->name[i], match_tbl[j].devname))
+ c->lpm_dev[i].set_mode = match_tbl[j].set_mode;
+ }
+
+ if (!c->lpm_dev[i].set_mode) {
+ ret = -ENODEV;
+ goto failed;
+ }
+ }
+
+ key = "qcom,default-level";
+ if (of_property_read_u32(node, key, &c->default_level))
+ c->default_level = 0;
+ return 0;
+failed:
+ pr_err("%s(): Failed reading %s\n", __func__, key);
+ return ret;
+}
+
+static int parse_cluster_params(struct device_node *node,
+ struct lpm_cluster *c)
+{
+ char *key;
+ int ret;
+
+ key = "label";
+ ret = of_property_read_string(node, key, &c->cluster_name);
+ if (ret) {
+ pr_err("%s(): Cannot read required param %s\n", __func__, key);
+ return ret;
+ }
+
+ if (use_psci) {
+ key = "qcom,psci-mode-shift";
+ ret = of_property_read_u32(node, key,
+ &c->psci_mode_shift);
+ if (ret) {
+ pr_err("%s(): Failed to read param: %s\n",
+ __func__, key);
+ return ret;
+ }
+
+ key = "qcom,psci-mode-mask";
+ ret = of_property_read_u32(node, key,
+ &c->psci_mode_mask);
+ if (ret) {
+ pr_err("%s(): Failed to read param: %s\n",
+ __func__, key);
+ return ret;
+ }
+
+ /* Set ndevice to 1 as default */
+ c->ndevices = 1;
+
+ return 0;
+ } else
+ return parse_legacy_cluster_params(node, c);
+}
+
+static int parse_lpm_mode(const char *str)
+{
+ int i;
+ struct lpm_lookup_table mode_lookup[] = {
+ {MSM_SPM_MODE_POWER_COLLAPSE, "pc"},
+ {MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE, "spc"},
+ {MSM_SPM_MODE_FASTPC, "fpc"},
+ {MSM_SPM_MODE_GDHS, "gdhs"},
+ {MSM_SPM_MODE_RETENTION, "retention"},
+ {MSM_SPM_MODE_CLOCK_GATING, "wfi"},
+ {MSM_SPM_MODE_DISABLED, "active"}
+ };
+
+ for (i = 0; i < ARRAY_SIZE(mode_lookup); i++)
+ if (!strcmp(str, mode_lookup[i].mode_name))
+ return mode_lookup[i].modes;
+ return -EINVAL;
+}
+
+static int parse_power_params(struct device_node *node,
+ struct power_params *pwr)
+{
+ char *key;
+ int ret;
+
+ key = "qcom,latency-us";
+ ret = of_property_read_u32(node, key, &pwr->latency_us);
+ if (ret)
+ goto fail;
+
+ key = "qcom,ss-power";
+ ret = of_property_read_u32(node, key, &pwr->ss_power);
+ if (ret)
+ goto fail;
+
+ key = "qcom,energy-overhead";
+ ret = of_property_read_u32(node, key, &pwr->energy_overhead);
+ if (ret)
+ goto fail;
+
+ key = "qcom,time-overhead";
+ ret = of_property_read_u32(node, key, &pwr->time_overhead_us);
+ if (ret)
+ goto fail;
+
+fail:
+ if (ret)
+ pr_err("%s(): %s Error reading %s\n", __func__, node->name,
+ key);
+ return ret;
+}
+
+static int parse_cluster_level(struct device_node *node,
+ struct lpm_cluster *cluster)
+{
+ int i = 0;
+ struct lpm_cluster_level *level = &cluster->levels[cluster->nlevels];
+ int ret = -ENOMEM;
+ char *key;
+
+ key = "label";
+ ret = of_property_read_string(node, key, &level->level_name);
+ if (ret)
+ goto failed;
+
+ if (use_psci) {
+ char *k = "qcom,psci-mode";
+
+ ret = of_property_read_u32(node, k, &level->psci_id);
+ if (ret)
+ goto failed;
+
+ level->is_reset = of_property_read_bool(node, "qcom,is-reset");
+ } else if (!cluster->no_saw_devices) {
+ key = "no saw-devices";
+
+ level->mode = devm_kzalloc(&lpm_pdev->dev,
+ cluster->ndevices * sizeof(*level->mode),
+ GFP_KERNEL);
+ if (!level->mode) {
+ pr_err("Memory allocation failed\n");
+ goto failed;
+ }
+
+ for (i = 0; i < cluster->ndevices; i++) {
+ const char *spm_mode;
+ char key[25] = {0};
+
+ snprintf(key, 25, "qcom,spm-%s-mode", cluster->name[i]);
+ ret = of_property_read_string(node, key, &spm_mode);
+ if (ret)
+ goto failed;
+
+ level->mode[i] = parse_lpm_mode(spm_mode);
+
+ if (level->mode[i] < 0)
+ goto failed;
+
+ if (level->mode[i] == MSM_SPM_MODE_POWER_COLLAPSE
+ || level->mode[i] ==
+ MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE)
+ level->is_reset |= true;
+ }
+ }
+
+ key = "label";
+ ret = of_property_read_string(node, key, &level->level_name);
+ if (ret)
+ goto failed;
+
+ if (cluster->nlevels != cluster->default_level) {
+ key = "min child idx";
+ ret = of_property_read_u32(node, "qcom,min-child-idx",
+ &level->min_child_level);
+ if (ret)
+ goto failed;
+
+ if (cluster->min_child_level > level->min_child_level)
+ cluster->min_child_level = level->min_child_level;
+ }
+
+ level->notify_rpm = of_property_read_bool(node, "qcom,notify-rpm");
+ level->disable_dynamic_routing = of_property_read_bool(node,
+ "qcom,disable-dynamic-int-routing");
+ level->last_core_only = of_property_read_bool(node,
+ "qcom,last-core-only");
+ level->no_cache_flush = of_property_read_bool(node,
+ "qcom,no-cache-flush");
+
+ key = "parse_power_params";
+ ret = parse_power_params(node, &level->pwr);
+ if (ret)
+ goto failed;
+
+ key = "qcom,reset-level";
+ ret = of_property_read_u32(node, key, &level->reset_level);
+ if (ret == -EINVAL)
+ level->reset_level = LPM_RESET_LVL_NONE;
+ else if (ret)
+ goto failed;
+
+ cluster->nlevels++;
+ return 0;
+failed:
+ pr_err("Failed %s() key = %s ret = %d\n", __func__, key, ret);
+ return ret;
+}
+
+static int parse_cpu_spm_mode(const char *mode_name)
+{
+ struct lpm_lookup_table pm_sm_lookup[] = {
+ {MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT,
+ "wfi"},
+ {MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE,
+ "standalone_pc"},
+ {MSM_PM_SLEEP_MODE_POWER_COLLAPSE,
+ "pc"},
+ {MSM_PM_SLEEP_MODE_RETENTION,
+ "retention"},
+ {MSM_PM_SLEEP_MODE_FASTPC,
+ "fpc"},
+ };
+ int i;
+ int ret = -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(pm_sm_lookup); i++) {
+ if (!strcmp(mode_name, pm_sm_lookup[i].mode_name)) {
+ ret = pm_sm_lookup[i].modes;
+ break;
+ }
+ }
+ return ret;
+}
+
+static int parse_cpu_mode(struct device_node *n, struct lpm_cpu_level *l)
+{
+ char *key;
+ int ret;
+
+ key = "qcom,spm-cpu-mode";
+ ret = of_property_read_string(n, key, &l->name);
+ if (ret) {
+ pr_err("Failed %s %d\n", n->name, __LINE__);
+ return ret;
+ }
+
+ if (use_psci) {
+ key = "qcom,psci-cpu-mode";
+
+ ret = of_property_read_u32(n, key, &l->psci_id);
+ if (ret) {
+ pr_err("Failed reading %s on device %s\n", key,
+ n->name);
+ return ret;
+ }
+ key = "qcom,hyp-psci";
+
+ l->hyp_psci = of_property_read_bool(n, key);
+ } else {
+ l->mode = parse_cpu_spm_mode(l->name);
+
+ if (l->mode < 0)
+ return l->mode;
+ }
+ return 0;
+
+}
+
+static int get_cpumask_for_node(struct device_node *node, struct cpumask *mask)
+{
+ struct device_node *cpu_node;
+ int cpu;
+ int idx = 0;
+
+ cpu_node = of_parse_phandle(node, "qcom,cpu", idx++);
+ if (!cpu_node) {
+ pr_info("%s: No CPU phandle, assuming single cluster\n",
+ node->full_name);
+ /*
+ * Not all targets have the cpu node populated in the device
+ * tree. If cpu node is not populated assume all possible
+ * nodes belong to this cluster
+ */
+ cpumask_copy(mask, cpu_possible_mask);
+ return 0;
+ }
+
+ while (cpu_node) {
+ for_each_possible_cpu(cpu) {
+ if (of_get_cpu_node(cpu, NULL) == cpu_node) {
+ cpumask_set_cpu(cpu, mask);
+ break;
+ }
+ }
+ of_node_put(cpu_node);
+ cpu_node = of_parse_phandle(node, "qcom,cpu", idx++);
+ }
+
+ return 0;
+}
+
+static int calculate_residency(struct power_params *base_pwr,
+ struct power_params *next_pwr)
+{
+ int32_t residency = (int32_t)(next_pwr->energy_overhead -
+ base_pwr->energy_overhead) -
+ ((int32_t)(next_pwr->ss_power * next_pwr->time_overhead_us)
+ - (int32_t)(base_pwr->ss_power * base_pwr->time_overhead_us));
+
+ residency /= (int32_t)(base_pwr->ss_power - next_pwr->ss_power);
+
+ if (residency < 0) {
+ pr_err("%s: residency < 0 for LPM\n",
+ __func__);
+ return next_pwr->time_overhead_us;
+ }
+
+ return residency < next_pwr->time_overhead_us ?
+ next_pwr->time_overhead_us : residency;
+}
+
+static int parse_cpu_levels(struct device_node *node, struct lpm_cluster *c)
+{
+ struct device_node *n;
+ int ret = -ENOMEM;
+ int i, j;
+ char *key;
+
+ c->cpu = devm_kzalloc(&lpm_pdev->dev, sizeof(*c->cpu), GFP_KERNEL);
+ if (!c->cpu)
+ return ret;
+
+ c->cpu->parent = c;
+ if (use_psci) {
+
+ key = "qcom,psci-mode-shift";
+
+ ret = of_property_read_u32(node, key, &c->cpu->psci_mode_shift);
+ if (ret) {
+ pr_err("Failed reading %s on device %s\n", key,
+ node->name);
+ return ret;
+ }
+ key = "qcom,psci-mode-mask";
+
+ ret = of_property_read_u32(node, key, &c->cpu->psci_mode_mask);
+ if (ret) {
+ pr_err("Failed reading %s on device %s\n", key,
+ node->name);
+ return ret;
+ }
+ }
+ for_each_child_of_node(node, n) {
+ struct lpm_cpu_level *l = &c->cpu->levels[c->cpu->nlevels];
+
+ c->cpu->nlevels++;
+
+ ret = parse_cpu_mode(n, l);
+ if (ret < 0) {
+ pr_info("Failed %s\n", l->name);
+ goto failed;
+ }
+
+ ret = parse_power_params(n, &l->pwr);
+ if (ret)
+ goto failed;
+
+ key = "qcom,use-broadcast-timer";
+ l->use_bc_timer = of_property_read_bool(n, key);
+
+ l->is_reset = of_property_read_bool(n, "qcom,is-reset");
+
+ key = "qcom,jtag-save-restore";
+ l->jtag_save_restore = of_property_read_bool(n, key);
+
+ key = "qcom,reset-level";
+ ret = of_property_read_u32(n, key, &l->reset_level);
+ if (ret == -EINVAL)
+ l->reset_level = LPM_RESET_LVL_NONE;
+ else if (ret)
+ goto failed;
+ of_node_put(n);
+ }
+ for (i = 0; i < c->cpu->nlevels; i++) {
+ for (j = 0; j < c->cpu->nlevels; j++) {
+ if (i >= j) {
+ c->cpu->levels[i].pwr.residencies[j] = 0;
+ continue;
+ }
+
+ c->cpu->levels[i].pwr.residencies[j] =
+ calculate_residency(&c->cpu->levels[i].pwr,
+ &c->cpu->levels[j].pwr);
+
+ pr_err("%s: idx %d %u\n", __func__, j,
+ c->cpu->levels[i].pwr.residencies[j]);
+ }
+ }
+
+ return 0;
+failed:
+ of_node_put(n);
+ pr_err("%s(): Failed with error code:%d\n", __func__, ret);
+ return ret;
+}
+
+void free_cluster_node(struct lpm_cluster *cluster)
+{
+ struct lpm_cluster *cl, *m;
+
+ list_for_each_entry_safe(cl, m, &cluster->child, list) {
+ list_del(&cl->list);
+ free_cluster_node(cl);
+ };
+
+ cluster->ndevices = 0;
+}
+
+/*
+ * TODO:
+ * Expects a CPU or a cluster only. This ensures that affinity
+ * level of a cluster is consistent with reference to its
+ * child nodes.
+ */
+static struct lpm_cluster *parse_cluster(struct device_node *node,
+ struct lpm_cluster *parent)
+{
+ struct lpm_cluster *c;
+ struct device_node *n;
+ char *key;
+ int ret = 0;
+ int i, j;
+
+ c = devm_kzalloc(&lpm_pdev->dev, sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ ret = parse_cluster_params(node, c);
+
+ if (ret)
+ goto failed_parse_params;
+
+ INIT_LIST_HEAD(&c->child);
+ c->parent = parent;
+ spin_lock_init(&c->sync_lock);
+ c->min_child_level = NR_LPM_LEVELS;
+
+ for_each_child_of_node(node, n) {
+
+ if (!n->name)
+ continue;
+ key = "qcom,pm-cluster-level";
+ if (!of_node_cmp(n->name, key)) {
+ if (parse_cluster_level(n, c))
+ goto failed_parse_cluster;
+ continue;
+ }
+
+ key = "qcom,pm-cluster";
+ if (!of_node_cmp(n->name, key)) {
+ struct lpm_cluster *child;
+
+ if (c->no_saw_devices)
+ pr_info("%s: SAW device not provided.\n",
+ __func__);
+
+ child = parse_cluster(n, c);
+ if (!child)
+ goto failed_parse_cluster;
+
+ of_node_put(n);
+ list_add(&child->list, &c->child);
+ cpumask_or(&c->child_cpus, &c->child_cpus,
+ &child->child_cpus);
+ c->aff_level = child->aff_level + 1;
+ continue;
+ }
+
+ key = "qcom,pm-cpu";
+ if (!of_node_cmp(n->name, key)) {
+ /*
+ * Parse the the cpu node only if a pm-cpu node
+ * is available, though the mask is defined @ the
+ * cluster level
+ */
+ if (get_cpumask_for_node(node, &c->child_cpus))
+ goto failed_parse_cluster;
+
+ if (parse_cpu_levels(n, c))
+ goto failed_parse_cluster;
+
+ c->aff_level = 1;
+
+ for_each_cpu(i, &c->child_cpus) {
+ per_cpu(max_residency, i) = devm_kzalloc(
+ &lpm_pdev->dev,
+ sizeof(uint32_t) * c->cpu->nlevels,
+ GFP_KERNEL);
+ if (!per_cpu(max_residency, i))
+ return ERR_PTR(-ENOMEM);
+ set_optimum_cpu_residency(c->cpu, i, true);
+ }
+ }
+ }
+
+ if (cpumask_intersects(&c->child_cpus, cpu_online_mask))
+ c->last_level = c->default_level;
+ else
+ c->last_level = c->nlevels-1;
+
+ for (i = 0; i < c->nlevels; i++) {
+ for (j = 0; j < c->nlevels; j++) {
+ if (i >= j) {
+ c->levels[i].pwr.residencies[j] = 0;
+ continue;
+ }
+ c->levels[i].pwr.residencies[j] = calculate_residency(
+ &c->levels[i].pwr, &c->levels[j].pwr);
+ }
+ }
+ set_optimum_cluster_residency(c, true);
+ return c;
+
+failed_parse_cluster:
+ pr_err("Failed parse cluster:%s\n", key);
+ if (parent)
+ list_del(&c->list);
+ free_cluster_node(c);
+failed_parse_params:
+ pr_err("Failed parse params\n");
+ return NULL;
+}
+struct lpm_cluster *lpm_of_parse_cluster(struct platform_device *pdev)
+{
+ struct device_node *top = NULL;
+ struct lpm_cluster *c;
+
+ use_psci = of_property_read_bool(pdev->dev.of_node, "qcom,use-psci");
+
+ top = of_find_node_by_name(pdev->dev.of_node, "qcom,pm-cluster");
+ if (!top) {
+ pr_err("Failed to find root node\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ lpm_pdev = pdev;
+ c = parse_cluster(top, NULL);
+ of_node_put(top);
+ return c;
+}
+
+void cluster_dt_walkthrough(struct lpm_cluster *cluster)
+{
+ struct list_head *list;
+ int i, j;
+ static int id;
+ char str[10] = {0};
+
+ if (!cluster)
+ return;
+
+ for (i = 0; i < id; i++)
+ snprintf(str+i, 10 - i, "\t");
+ pr_info("%d\n", __LINE__);
+
+ for (i = 0; i < cluster->nlevels; i++) {
+ struct lpm_cluster_level *l = &cluster->levels[i];
+
+ pr_info("%d ndevices:%d\n", __LINE__, cluster->ndevices);
+ for (j = 0; j < cluster->ndevices; j++)
+ pr_info("%sDevice: %pk id:%pk\n", str,
+ &cluster->name[j], &l->mode[i]);
+ }
+
+ if (cluster->cpu) {
+ pr_info("%d\n", __LINE__);
+ for (j = 0; j < cluster->cpu->nlevels; j++)
+ pr_info("%s\tCPU mode: %s id:%d\n", str,
+ cluster->cpu->levels[j].name,
+ cluster->cpu->levels[j].mode);
+ }
+
+ id++;
+
+
+ list_for_each(list, &cluster->child) {
+ struct lpm_cluster *n;
+
+ pr_info("%d\n", __LINE__);
+ n = list_entry(list, typeof(*n), list);
+ cluster_dt_walkthrough(n);
+ }
+ id--;
+}
diff --git a/drivers/cpuidle/lpm-workarounds.c b/drivers/cpuidle/lpm-workarounds.c
new file mode 100644
index 0000000..a0ce80e
--- /dev/null
+++ b/drivers/cpuidle/lpm-workarounds.c
@@ -0,0 +1,137 @@
+/* Copyright (c) 2014-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/err.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regulator/rpm-smd-regulator.h>
+#include <lpm-workarounds.h>
+
+static struct regulator *lpm_cx_reg;
+static struct work_struct dummy_vote_work;
+static struct workqueue_struct *lpm_wa_wq;
+static bool lpm_wa_cx_turbo_unvote;
+
+/* While exiting from RPM assisted power collapse on some targets like MSM8939
+ * the CX is bumped to turbo mode by RPM. To reduce the power impact, APSS
+ * low power driver need to remove the CX turbo vote.
+ */
+static void send_dummy_cx_vote(struct work_struct *w)
+{
+ if (lpm_cx_reg) {
+ regulator_set_voltage(lpm_cx_reg,
+ RPM_REGULATOR_CORNER_SUPER_TURBO,
+ RPM_REGULATOR_CORNER_SUPER_TURBO);
+
+ regulator_set_voltage(lpm_cx_reg,
+ RPM_REGULATOR_CORNER_NONE,
+ RPM_REGULATOR_CORNER_SUPER_TURBO);
+ }
+}
+
+/*
+ * lpm_wa_cx_unvote_send(): Unvote for CX turbo mode
+ */
+void lpm_wa_cx_unvote_send(void)
+{
+ if (lpm_wa_cx_turbo_unvote)
+ queue_work(lpm_wa_wq, &dummy_vote_work);
+}
+EXPORT_SYMBOL(lpm_wa_cx_unvote_send);
+
+static int lpm_wa_cx_unvote_init(struct platform_device *pdev)
+{
+ int ret = 0;
+
+ lpm_cx_reg = devm_regulator_get(&pdev->dev, "lpm-cx");
+ if (IS_ERR(lpm_cx_reg)) {
+ ret = PTR_ERR(lpm_cx_reg);
+ if (ret != -EPROBE_DEFER)
+ pr_err("Unable to get the CX regulator\n");
+ return ret;
+ }
+
+ INIT_WORK(&dummy_vote_work, send_dummy_cx_vote);
+
+ lpm_wa_wq = alloc_workqueue("lpm-wa",
+ WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI, 1);
+
+ return ret;
+}
+
+static int lpm_wa_cx_unvote_exit(void)
+{
+ if (lpm_wa_wq)
+ destroy_workqueue(lpm_wa_wq);
+
+ return 0;
+}
+
+static int lpm_wa_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+
+ lpm_wa_cx_turbo_unvote = of_property_read_bool(pdev->dev.of_node,
+ "qcom,lpm-wa-cx-turbo-unvote");
+ if (lpm_wa_cx_turbo_unvote) {
+ ret = lpm_wa_cx_unvote_init(pdev);
+ if (ret) {
+ pr_err("%s: Failed to initialize lpm_wa_cx_unvote (%d)\n",
+ __func__, ret);
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static int lpm_wa_remove(struct platform_device *pdev)
+{
+ int ret = 0;
+
+ if (lpm_wa_cx_turbo_unvote)
+ ret = lpm_wa_cx_unvote_exit();
+
+ return ret;
+}
+
+static const struct of_device_id lpm_wa_mtch_tbl[] = {
+ {.compatible = "qcom,lpm-workarounds"},
+ {},
+};
+
+static struct platform_driver lpm_wa_driver = {
+ .probe = lpm_wa_probe,
+ .remove = lpm_wa_remove,
+ .driver = {
+ .name = "lpm-workarounds",
+ .owner = THIS_MODULE,
+ .of_match_table = lpm_wa_mtch_tbl,
+ },
+};
+
+static int __init lpm_wa_module_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&lpm_wa_driver);
+ if (ret)
+ pr_info("Error registering %s\n", lpm_wa_driver.driver.name);
+
+ return ret;
+}
+late_initcall(lpm_wa_module_init);
diff --git a/drivers/cpuidle/lpm-workarounds.h b/drivers/cpuidle/lpm-workarounds.h
new file mode 100644
index 0000000..d9a6db7
--- /dev/null
+++ b/drivers/cpuidle/lpm-workarounds.h
@@ -0,0 +1,19 @@
+/* Copyright (c) 2014-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __LPM_WA_H
+#define __LPM_WA_H
+
+void lpm_wa_cx_unvote_send(void);
+
+#endif /* __LPM_WA_H */
diff --git a/drivers/iio/imu/inv_icm20602/inv_icm20602_bsp.c b/drivers/iio/imu/inv_icm20602/inv_icm20602_bsp.c
index bfff285..9968e44 100644
--- a/drivers/iio/imu/inv_icm20602/inv_icm20602_bsp.c
+++ b/drivers/iio/imu/inv_icm20602/inv_icm20602_bsp.c
@@ -95,12 +95,12 @@
#define W_FLG 0
#define R_FLG 1
int icm20602_bulk_read(struct inv_icm20602_state *st,
- int reg, char *buf, int size)
+ int reg, u8 *buf, int size)
{
int result = MPU_SUCCESS;
char tx_buf[2] = {0x0, 0x0};
int tmp_size = size;
- int tmp_buf = buf;
+ u8 *tmp_buf = buf;
struct i2c_msg msg[2];
if (!st || !buf)
@@ -109,38 +109,31 @@
if (st->interface == ICM20602_SPI) {
tx_buf[0] = ICM20602_READ_REG(reg);
result = spi_write_then_read(st->spi, &tx_buf[0],
- 1, tmp_buf, size);
+ 1, tmp_buf, size);
if (result) {
pr_err("mpu read reg %u failed, rc %d\n",
- reg, result);
+ reg, result);
result = -MPU_READ_FAIL;
}
} else {
result = size;
- while (tmp_size > 0) {
#ifdef ICM20602_I2C_SMBUS
- result += i2c_smbus_read_i2c_block_data(st->client,
- reg, (tmp_size < 32)?tmp_size:32, tmp_buf);
- tmp_size -= 32;
- tmp_buf += tmp_size;
+ result += i2c_smbus_read_i2c_block_data(st->client,
+ reg, size, tmp_buf);
#else
- tx_buf[0] = reg;
- msg[0].addr = st->client->addr;
- msg[0].flags = W_FLG;
- msg[0].len = 1;
- msg[0].buf = tx_buf;
+ tx_buf[0] = reg;
+ msg[0].addr = st->client->addr;
+ msg[0].flags = W_FLG;
+ msg[0].len = 1;
+ msg[0].buf = tx_buf;
- msg[1].addr = st->client->addr;
- msg[1].flags = I2C_M_RD;
- msg[1].len = (tmp_size < 32)?tmp_size:32;
- msg[1].buf = tmp_buf;
- i2c_transfer(st->client->adapter, msg, ARRAY_SIZE(msg));
- tmp_size -= 32;
- tmp_buf += tmp_size;
+ msg[1].addr = st->client->addr;
+ msg[1].flags = I2C_M_RD;
+ msg[1].len = size;
+ msg[1].buf = tmp_buf;
+ i2c_transfer(st->client->adapter, msg, ARRAY_SIZE(msg));
#endif
- }
}
-
return result;
}
@@ -355,6 +348,33 @@
return MPU_SUCCESS;
}
+int icm20602_int_status(struct inv_icm20602_state *st,
+ u8 *int_status)
+{
+ return icm20602_read_reg(st,
+ reg_set_20602.INT_STATUS.address, int_status);
+}
+
+int icm20602_int_wm_status(struct inv_icm20602_state *st,
+ u8 *int_status)
+{
+ return icm20602_read_reg(st,
+ reg_set_20602.FIFO_WM_INT_STATUS.address, int_status);
+}
+
+int icm20602_fifo_count(struct inv_icm20602_state *st,
+ u16 *fifo_count)
+{
+ u8 count_h, count_l;
+
+ *fifo_count = 0;
+ icm20602_read_reg(st, reg_set_20602.FIFO_COUNTH.address, &count_h);
+ icm20602_read_reg(st, reg_set_20602.FIFO_COUNTL.address, &count_l);
+ *fifo_count |= (count_h << 8);
+ *fifo_count |= count_l;
+ return MPU_SUCCESS;
+}
+
static int icm20602_config_waterlevel(struct inv_icm20602_state *st)
{
struct icm20602_user_config *config = NULL;
@@ -809,6 +829,7 @@
int result = MPU_SUCCESS;
struct icm20602_user_config *config = NULL;
int package_count;
+ int i;
config = st->config;
if (st == NULL || st->config == NULL) {
@@ -860,7 +881,7 @@
/* buffer malloc */
package_count = config->fifo_waterlevel / ICM20602_PACKAGE_SIZE;
- st->buf = kzalloc(sizeof(config->fifo_waterlevel * 2), GFP_ATOMIC);
+ st->buf = kzalloc(config->fifo_waterlevel * 2, GFP_ATOMIC);
if (!st->buf)
return -ENOMEM;
@@ -869,9 +890,26 @@
if (!st->data_push)
return -ENOMEM;
+ for (i = 0; i < package_count; i++) {
+ st->data_push[i].raw_data =
+ kzalloc(ICM20602_PACKAGE_SIZE, GFP_ATOMIC);
+ }
+
return result;
}
+int icm20602_reset_fifo(struct inv_icm20602_state *st)
+{
+ reg_set_20602.USER_CTRL.reg_u.REG.FIFO_RST = 0x1;
+ if (icm20602_write_reg_simple(st, reg_set_20602.USER_CTRL)) {
+ reg_set_20602.USER_CTRL.reg_u.REG.FIFO_RST = 0x0;
+ return -MPU_FAIL;
+ }
+ reg_set_20602.USER_CTRL.reg_u.REG.FIFO_RST = 0x0;
+ return MPU_SUCCESS;
+}
+
+
void icm20602_rw_test(struct inv_icm20602_state *st)
{
uint8_t val = 0;
diff --git a/drivers/iio/imu/inv_icm20602/inv_icm20602_core.c b/drivers/iio/imu/inv_icm20602/inv_icm20602_core.c
index 0f7fc92..15df447 100644
--- a/drivers/iio/imu/inv_icm20602/inv_icm20602_core.c
+++ b/drivers/iio/imu/inv_icm20602/inv_icm20602_core.c
@@ -41,12 +41,12 @@
{
struct icm20602_user_config *config = st->config;
- config->user_fps_in_ms = 10;
+ config->user_fps_in_ms = 20;
config->gyro_lpf = INV_ICM20602_GYRO_LFP_92HZ;
config->gyro_fsr = ICM20602_GYRO_FSR_1000DPS;
config->acc_lpf = ICM20602_ACCLFP_99;
config->acc_fsr = ICM20602_ACC_FSR_4G;
- config->gyro_accel_sample_rate = ICM20602_SAMPLE_RATE_100HZ;
+ config->gyro_accel_sample_rate = ICM20602_SAMPLE_RATE_200HZ;
config->fifo_enabled = true;
}
diff --git a/drivers/iio/imu/inv_icm20602/inv_icm20602_iio.h b/drivers/iio/imu/inv_icm20602/inv_icm20602_iio.h
index 943fc1e..9ea5ae5 100644
--- a/drivers/iio/imu/inv_icm20602/inv_icm20602_iio.h
+++ b/drivers/iio/imu/inv_icm20602/inv_icm20602_iio.h
@@ -180,7 +180,7 @@
uint32_t user_fps_in_ms;
bool fifo_enabled;
- uint32_t fifo_waterlevel;
+ uint16_t fifo_waterlevel;
struct X_Y_Z wake_on_motion;
};
@@ -257,19 +257,21 @@
struct struct_icm20602_data {
s64 timestamps;
- struct struct_icm20602_raw_data raw_data;
- struct struct_icm20602_real_data real_data;
+ u8 *raw_data;
};
extern struct iio_trigger *inv_trig;
irqreturn_t inv_icm20602_irq_handler(int irq, void *p);
irqreturn_t inv_icm20602_read_fifo_fn(int irq, void *p);
-int inv_icm20602_reset_fifo(struct iio_dev *indio_dev);
int inv_icm20602_probe_trigger(struct iio_dev *indio_dev);
void inv_icm20602_remove_trigger(struct inv_icm20602_state *st);
int inv_icm20602_validate_trigger(struct iio_dev *indio_dev,
struct iio_trigger *trig);
+int icm20602_int_status(struct inv_icm20602_state *st, u8 *int_status);
+int icm20602_int_wm_status(struct inv_icm20602_state *st, u8 *int_status);
+int icm20602_reset_fifo(struct inv_icm20602_state *st);
+int icm20602_fifo_count(struct inv_icm20602_state *st, u16 *fifo_count);
int icm20602_read_raw(struct inv_icm20602_state *st,
struct struct_icm20602_real_data *real_data, uint32_t type);
diff --git a/drivers/iio/imu/inv_icm20602/inv_icm20602_ring.c b/drivers/iio/imu/inv_icm20602/inv_icm20602_ring.c
index b0f93be..de02656 100644
--- a/drivers/iio/imu/inv_icm20602/inv_icm20602_ring.c
+++ b/drivers/iio/imu/inv_icm20602/inv_icm20602_ring.c
@@ -40,15 +40,6 @@
spin_unlock_irqrestore(&st->time_stamp_lock, flags);
}
-int inv_icm20602_reset_fifo(struct iio_dev *indio_dev)
-{
- int result;
- //u8 d;
- //struct inv_icm20602_state *st = iio_priv(indio_dev);
-
- return result;
-}
-
/*
* inv_icm20602_irq_handler() - Cache a timestamp at each data ready interrupt.
*/
@@ -67,44 +58,50 @@
return IRQ_WAKE_THREAD;
}
+#define BIT_FIFO_OFLOW_INT 0x10
+#define BIT_FIFO_WM_INT 0x40
static int inv_icm20602_read_data(struct iio_dev *indio_dev)
{
int result = MPU_SUCCESS;
struct inv_icm20602_state *st = iio_priv(indio_dev);
struct icm20602_user_config *config = st->config;
int package_count;
- //char *buf = st->buf;
- //struct struct_icm20602_data *data_push = st->data_push;
+ char *buf = st->buf;
+ struct struct_icm20602_data *data_push = st->data_push;
s64 timestamp;
+ u8 int_status, int_wm_status;
+ u16 fifo_count;
int i;
if (!st)
return -MPU_FAIL;
package_count = config->fifo_waterlevel / ICM20602_PACKAGE_SIZE;
mutex_lock(&indio_dev->mlock);
- if (config->fifo_enabled) {
- result = icm20602_read_fifo(st,
- st->buf, config->fifo_waterlevel);
- if (result != config->fifo_waterlevel) {
- pr_err("icm20602 read fifo failed, result = %d\n",
- result);
- goto flush_fifo;
- }
-
- for (i = 0; i < package_count; i++) {
- memcpy((char *)(&st->data_push[i].raw_data),
- st->buf, ICM20602_PACKAGE_SIZE);
- result = kfifo_out(&st->timestamps,
- ×tamp, 1);
- /* when there is no timestamp, put it as 0 */
- if (result == 0)
- timestamp = 0;
- st->data_push[i].timestamps = timestamp;
- iio_push_to_buffers(indio_dev, st->data_push+i);
- st->buf += ICM20602_PACKAGE_SIZE;
- }
+ icm20602_int_status(st, &int_status);
+ if (int_status & BIT_FIFO_OFLOW_INT) {
+ icm20602_fifo_count(st, &fifo_count);
+ pr_debug("fifo_count = %d\n", fifo_count);
+ icm20602_reset_fifo(st);
+ goto end_session;
}
-//end_session:
+ if (config->fifo_enabled) {
+ result = kfifo_out(&st->timestamps,
+ ×tamp, 1);
+ /* when there is no timestamp, put it as 0 */
+ if (result == 0)
+ timestamp = 0;
+ for (i = 0; i < package_count; i++) {
+ result = icm20602_read_fifo(st,
+ buf, ICM20602_PACKAGE_SIZE);
+ memcpy(st->data_push[i].raw_data,
+ buf, ICM20602_PACKAGE_SIZE);
+ iio_push_to_buffers_with_timestamp(indio_dev,
+ st->data_push[i].raw_data, timestamp);
+ buf += ICM20602_PACKAGE_SIZE;
+ }
+ memset(st->buf, 0, config->fifo_waterlevel);
+ }
+end_session:
mutex_unlock(&indio_dev->mlock);
iio_trigger_notify_done(indio_dev->trig);
return MPU_SUCCESS;
@@ -112,7 +109,7 @@
flush_fifo:
/* Flush HW and SW FIFOs. */
inv_clear_kfifo(st);
- inv_icm20602_reset_fifo(indio_dev);
+ icm20602_reset_fifo(st);
mutex_unlock(&indio_dev->mlock);
iio_trigger_notify_done(indio_dev->trig);
return MPU_SUCCESS;
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 2f0f448..b0e9fe6 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -44,6 +44,34 @@
#include "irq-gic-common.h"
+#define MAX_IRQ 1020U /* Max number of SGI+PPI+SPI */
+#define SPI_START_IRQ 32 /* SPI start irq number */
+#define GICD_ICFGR_BITS 2 /* 2 bits per irq in GICD_ICFGR */
+#define GICD_ISENABLER_BITS 1 /* 1 bit per irq in GICD_ISENABLER */
+#define GICD_IPRIORITYR_BITS 8 /* 8 bits per irq in GICD_IPRIORITYR */
+
+/* 32 bit mask with lower n bits set */
+#define UMASK_LOW(n) (~0U >> (32 - (n)))
+
+/* Number of 32-bit words required to store all irqs, for
+ * registers where each word stores configuration for each irq
+ * in bits_per_irq bits.
+ */
+#define NUM_IRQ_WORDS(bits_per_irq) (DIV_ROUND_UP(MAX_IRQ, \
+ 32 / (bits_per_irq)))
+#define MAX_IRQS_IGNORE 10
+
+#define IRQ_NR_BOUND(nr) min((nr), MAX_IRQ)
+
+/* Bitmap to irqs, which are restored */
+static DECLARE_BITMAP(irqs_restore, MAX_IRQ);
+
+/* Bitmap to irqs, for which restore is ignored.
+ * Presently, only GICD_IROUTER mismatches are
+ * ignored.
+ */
+static DECLARE_BITMAP(irqs_ignore_restore, MAX_IRQ);
+
struct redist_region {
void __iomem *redist_base;
phys_addr_t phys_base;
@@ -60,6 +88,16 @@
u32 nr_redist_regions;
unsigned int irq_nr;
struct partition_desc *ppi_descs[16];
+
+ u64 saved_spi_router[MAX_IRQ];
+ u32 saved_spi_enable[NUM_IRQ_WORDS(GICD_ISENABLER_BITS)];
+ u32 saved_spi_cfg[NUM_IRQ_WORDS(GICD_ICFGR_BITS)];
+ u32 saved_spi_priority[NUM_IRQ_WORDS(GICD_IPRIORITYR_BITS)];
+
+ u64 changed_spi_router[MAX_IRQ];
+ u32 changed_spi_enable[NUM_IRQ_WORDS(GICD_ISENABLER_BITS)];
+ u32 changed_spi_cfg[NUM_IRQ_WORDS(GICD_ICFGR_BITS)];
+ u32 changed_spi_priority[NUM_IRQ_WORDS(GICD_IPRIORITYR_BITS)];
};
static struct gic_chip_data gic_data __read_mostly;
@@ -67,6 +105,58 @@
static struct gic_kvm_info gic_v3_kvm_info;
+enum gicd_save_restore_reg {
+ SAVED_ICFGR,
+ SAVED_IS_ENABLER,
+ SAVED_IPRIORITYR,
+ NUM_SAVED_GICD_REGS,
+};
+
+/* Stores start address of spi config for saved gicd regs */
+static u32 *saved_spi_regs_start[NUM_SAVED_GICD_REGS] = {
+ [SAVED_ICFGR] = gic_data.saved_spi_cfg,
+ [SAVED_IS_ENABLER] = gic_data.saved_spi_enable,
+ [SAVED_IPRIORITYR] = gic_data.saved_spi_priority,
+};
+
+/* Stores start address of spi config for changed gicd regs */
+static u32 *changed_spi_regs_start[NUM_SAVED_GICD_REGS] = {
+ [SAVED_ICFGR] = gic_data.changed_spi_cfg,
+ [SAVED_IS_ENABLER] = gic_data.changed_spi_enable,
+ [SAVED_IPRIORITYR] = gic_data.changed_spi_priority,
+};
+
+/* GICD offset for saved registers */
+static u32 gicd_offset[NUM_SAVED_GICD_REGS] = {
+ [SAVED_ICFGR] = GICD_ICFGR,
+ [SAVED_IS_ENABLER] = GICD_ISENABLER,
+ [SAVED_IPRIORITYR] = GICD_IPRIORITYR,
+};
+
+/* Bits per irq word, for gicd saved registers */
+static u32 gicd_reg_bits_per_irq[NUM_SAVED_GICD_REGS] = {
+ [SAVED_ICFGR] = GICD_ICFGR_BITS,
+ [SAVED_IS_ENABLER] = GICD_ISENABLER_BITS,
+ [SAVED_IPRIORITYR] = GICD_IPRIORITYR_BITS,
+};
+
+#define for_each_spi_irq_word(i, reg) \
+ for (i = 0; \
+ i < DIV_ROUND_UP(IRQ_NR_BOUND(gic_data.irq_nr) - SPI_START_IRQ, \
+ 32 / gicd_reg_bits_per_irq[reg]); \
+ i++)
+
+#define read_spi_word_offset(base, reg, i) \
+ readl_relaxed_no_log( \
+ base + gicd_offset[reg] + i * 4 + \
+ SPI_START_IRQ * gicd_reg_bits_per_irq[reg] / 8)
+
+#define restore_spi_word_offset(base, reg, i) \
+ writel_relaxed_no_log( \
+ saved_spi_regs_start[reg][i],\
+ base + gicd_offset[reg] + i * 4 + \
+ SPI_START_IRQ * gicd_reg_bits_per_irq[reg] / 8)
+
#define gic_data_rdist() (this_cpu_ptr(gic_data.rdists.rdist))
#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
#define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K)
@@ -134,6 +224,229 @@
}
#endif
+void gic_v3_dist_save(void)
+{
+ void __iomem *base = gic_data.dist_base;
+ int reg, i;
+
+ for (reg = SAVED_ICFGR; reg < NUM_SAVED_GICD_REGS; reg++) {
+ for_each_spi_irq_word(i, reg) {
+ saved_spi_regs_start[reg][i] =
+ read_spi_word_offset(base, reg, i);
+ }
+ }
+
+ for (i = 32; i < IRQ_NR_BOUND(gic_data.irq_nr); i++)
+ gic_data.saved_spi_router[i] =
+ gic_read_irouter(base + GICD_IROUTER + i * 8);
+}
+
+static void _gicd_check_reg(enum gicd_save_restore_reg reg)
+{
+ void __iomem *base = gic_data.dist_base;
+ u32 *saved_spi_cfg = saved_spi_regs_start[reg];
+ u32 *changed_spi_cfg = changed_spi_regs_start[reg];
+ u32 bits_per_irq = gicd_reg_bits_per_irq[reg];
+ u32 current_cfg = 0;
+ int i, j = SPI_START_IRQ, l;
+ u32 k;
+
+ for_each_spi_irq_word(i, reg) {
+ current_cfg = read_spi_word_offset(base, reg, i);
+ if (current_cfg != saved_spi_cfg[i]) {
+ for (k = current_cfg ^ saved_spi_cfg[i],
+ l = 0; k ; k >>= bits_per_irq, l++) {
+ if (k & UMASK_LOW(bits_per_irq))
+ set_bit(j+l, irqs_restore);
+ }
+ changed_spi_cfg[i] = current_cfg ^ saved_spi_cfg[i];
+ }
+ j += 32 / bits_per_irq;
+ }
+}
+
+#define _gic_v3_dist_check_icfgr() \
+ _gicd_check_reg(SAVED_ICFGR)
+#define _gic_v3_dist_check_ipriorityr() \
+ _gicd_check_reg(SAVED_IPRIORITYR)
+#define _gic_v3_dist_check_isenabler() \
+ _gicd_check_reg(SAVED_IS_ENABLER)
+
+static void _gic_v3_dist_check_irouter(void)
+{
+ void __iomem *base = gic_data.dist_base;
+ u64 current_irouter_cfg = 0;
+ int i;
+
+ for (i = 32; i < IRQ_NR_BOUND(gic_data.irq_nr); i++) {
+ if (test_bit(i, irqs_ignore_restore))
+ continue;
+ current_irouter_cfg = gic_read_irouter(
+ base + GICD_IROUTER + i * 8);
+ if (current_irouter_cfg != gic_data.saved_spi_router[i]) {
+ set_bit(i, irqs_restore);
+ gic_data.changed_spi_router[i] =
+ current_irouter_cfg ^ gic_data.saved_spi_router[i];
+ }
+ }
+}
+
+static void _gic_v3_dist_restore_reg(enum gicd_save_restore_reg reg)
+{
+ void __iomem *base = gic_data.dist_base;
+ int i;
+
+ for_each_spi_irq_word(i, reg) {
+ if (changed_spi_regs_start[reg][i])
+ restore_spi_word_offset(base, reg, i);
+ }
+
+ /* Commit all restored configurations before subsequent writes */
+ wmb();
+}
+
+#define _gic_v3_dist_restore_icfgr() _gic_v3_dist_restore_reg(SAVED_ICFGR)
+#define _gic_v3_dist_restore_ipriorityr() \
+ _gic_v3_dist_restore_reg(SAVED_IPRIORITYR)
+
+static void _gic_v3_dist_restore_set_reg(u32 offset)
+{
+ void __iomem *base = gic_data.dist_base;
+ int i, j = SPI_START_IRQ, l;
+ int irq_nr = IRQ_NR_BOUND(gic_data.irq_nr) - SPI_START_IRQ;
+
+ for (i = 0; i < DIV_ROUND_UP(irq_nr, 32); i++, j += 32) {
+ u32 reg_val = readl_relaxed_no_log(base + offset + i * 4 + 4);
+ bool irqs_restore_updated = 0;
+
+ for (l = 0; l < 32; l++) {
+ if (test_bit(j+l, irqs_restore)) {
+ reg_val |= BIT(l);
+ irqs_restore_updated = 1;
+ }
+ }
+
+ if (irqs_restore_updated) {
+ writel_relaxed_no_log(
+ reg_val, base + offset + i * 4 + 4);
+ }
+ }
+
+ /* Commit restored configuration updates before subsequent writes */
+ wmb();
+}
+
+#define _gic_v3_dist_restore_isenabler() \
+ _gic_v3_dist_restore_set_reg(GICD_ISENABLER)
+
+#define _gic_v3_dist_restore_ispending() \
+ _gic_v3_dist_restore_set_reg(GICD_ISPENDR)
+
+static void _gic_v3_dist_restore_irouter(void)
+{
+ void __iomem *base = gic_data.dist_base;
+ int i;
+
+ for (i = 32; i < IRQ_NR_BOUND(gic_data.irq_nr); i++) {
+ if (test_bit(i, irqs_ignore_restore))
+ continue;
+ if (gic_data.changed_spi_router[i]) {
+ gic_write_irouter(gic_data.saved_spi_router[i],
+ base + GICD_IROUTER + i * 8);
+ }
+ }
+
+ /* Commit GICD_IROUTER writes before subsequent writes */
+ wmb();
+}
+
+static void _gic_v3_dist_clear_reg(u32 offset)
+{
+ void __iomem *base = gic_data.dist_base;
+ int i, j = SPI_START_IRQ, l;
+ int irq_nr = IRQ_NR_BOUND(gic_data.irq_nr) - SPI_START_IRQ;
+
+ for (i = 0; i < DIV_ROUND_UP(irq_nr, 32); i++, j += 32) {
+ u32 clear = 0;
+ bool irqs_restore_updated = 0;
+
+ for (l = 0; l < 32; l++) {
+ if (test_bit(j+l, irqs_restore)) {
+ clear |= BIT(l);
+ irqs_restore_updated = 1;
+ }
+ }
+
+ if (irqs_restore_updated) {
+ writel_relaxed_no_log(
+ clear, base + offset + i * 4 + 4);
+ }
+ }
+
+ /* Commit clearing of irq config before subsequent writes */
+ wmb();
+}
+
+#define _gic_v3_dist_set_icenabler() \
+ _gic_v3_dist_clear_reg(GICD_ICENABLER)
+
+#define _gic_v3_dist_set_icpending() \
+ _gic_v3_dist_clear_reg(GICD_ICPENDR)
+
+#define _gic_v3_dist_set_icactive() \
+ _gic_v3_dist_clear_reg(GICD_ICACTIVER)
+
+/* Restore GICD state for SPIs. SPI configuration is restored
+ * for GICD_ICFGR, GICD_ISENABLER, GICD_IPRIORITYR, GICD_IROUTER
+ * registers. Following is the sequence for restore:
+ *
+ * 1. For SPIs, check whether any of GICD_ICFGR, GICD_ISENABLER,
+ * GICD_IPRIORITYR, GICD_IROUTER, current configuration is
+ * different from saved configuration.
+ *
+ * For all irqs, with mismatched configurations,
+ *
+ * 2. Set GICD_ICENABLER and wait for its completion.
+ *
+ * 3. Restore any changed GICD_ICFGR, GICD_IPRIORITYR, GICD_IROUTER
+ * configurations.
+ *
+ * 4. Set GICD_ICACTIVER.
+ *
+ * 5. Set pending for the interrupt.
+ *
+ * 6. Enable interrupt and wait for its completion.
+ *
+ */
+void gic_v3_dist_restore(void)
+{
+ _gic_v3_dist_check_icfgr();
+ _gic_v3_dist_check_ipriorityr();
+ _gic_v3_dist_check_isenabler();
+ _gic_v3_dist_check_irouter();
+
+ if (bitmap_empty(irqs_restore, IRQ_NR_BOUND(gic_data.irq_nr)))
+ return;
+
+ _gic_v3_dist_set_icenabler();
+ gic_dist_wait_for_rwp();
+
+ _gic_v3_dist_restore_icfgr();
+ _gic_v3_dist_restore_ipriorityr();
+ _gic_v3_dist_restore_irouter();
+
+ _gic_v3_dist_set_icactive();
+
+ _gic_v3_dist_set_icpending();
+ _gic_v3_dist_restore_ispending();
+
+ _gic_v3_dist_restore_isenabler();
+ gic_dist_wait_for_rwp();
+
+ /* Commit all writes before proceeding */
+ wmb();
+}
+
/*
* gic_show_pending_irq - Shows the pending interrupts
* Note: Interrupts should be disabled on the cpu from which
@@ -1244,7 +1557,8 @@
struct redist_region *rdist_regs;
u64 redist_stride;
u32 nr_redist_regions;
- int err, i;
+ int err, i, ignore_irqs_len;
+ u32 ignore_restore_irqs[MAX_IRQS_IGNORE] = {0};
dist_base = of_iomap(node, 0);
if (!dist_base) {
@@ -1294,6 +1608,14 @@
gic_populate_ppi_partitions(node);
gic_of_setup_kvm_info(node);
+
+ ignore_irqs_len = of_property_read_variable_u32_array(node,
+ "ignored-save-restore-irqs",
+ ignore_restore_irqs,
+ 0, MAX_IRQS_IGNORE);
+ for (i = 0; i < ignore_irqs_len; i++)
+ set_bit(ignore_restore_irqs[i], irqs_ignore_restore);
+
return 0;
out_unmap_rdist:
diff --git a/drivers/leds/leds-qpnp-haptics.c b/drivers/leds/leds-qpnp-haptics.c
index 764657a..8a850c5 100644
--- a/drivers/leds/leds-qpnp-haptics.c
+++ b/drivers/leds/leds-qpnp-haptics.c
@@ -24,6 +24,7 @@
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/qpnp/qpnp-misc.h>
#include <linux/qpnp/qpnp-revid.h>
@@ -321,6 +322,7 @@
int sc_irq;
struct pwm_param pwm_data;
struct hap_lra_ares_param ares_cfg;
+ struct regulator *vcc_pon;
u32 play_time_ms;
u32 max_play_time_ms;
u32 vmax_mv;
@@ -355,6 +357,7 @@
bool lra_auto_mode;
bool play_irq_en;
bool auto_res_err_recovery_hw;
+ bool vcc_pon_enabled;
};
static int qpnp_haptics_parse_buffer_dt(struct hap_chip *chip);
@@ -801,10 +804,29 @@
enable = atomic_read(&chip->state);
pr_debug("state: %d\n", enable);
+
+ if (chip->vcc_pon && enable && !chip->vcc_pon_enabled) {
+ rc = regulator_enable(chip->vcc_pon);
+ if (rc < 0)
+ pr_err("%s: could not enable vcc_pon regulator rc=%d\n",
+ __func__, rc);
+ else
+ chip->vcc_pon_enabled = true;
+ }
+
rc = qpnp_haptics_play(chip, enable);
if (rc < 0)
pr_err("Error in %sing haptics, rc=%d\n",
enable ? "play" : "stopp", rc);
+
+ if (chip->vcc_pon && !enable && chip->vcc_pon_enabled) {
+ rc = regulator_disable(chip->vcc_pon);
+ if (rc)
+ pr_err("%s: could not disable vcc_pon regulator rc=%d\n",
+ __func__, rc);
+ else
+ chip->vcc_pon_enabled = false;
+ }
}
static enum hrtimer_restart hap_stop_timer(struct hrtimer *timer)
@@ -2054,6 +2076,7 @@
struct device_node *revid_node, *misc_node;
const char *temp_str;
int rc, temp;
+ struct regulator *vcc_pon;
rc = of_property_read_u32(node, "reg", &temp);
if (rc < 0) {
@@ -2381,6 +2404,16 @@
else if (chip->play_mode == HAP_PWM)
rc = qpnp_haptics_parse_pwm_dt(chip);
+ if (of_find_property(node, "vcc_pon-supply", NULL)) {
+ vcc_pon = regulator_get(&chip->pdev->dev, "vcc_pon");
+ if (IS_ERR(vcc_pon)) {
+ rc = PTR_ERR(vcc_pon);
+ dev_err(&chip->pdev->dev,
+ "regulator get failed vcc_pon rc=%d\n", rc);
+ }
+ chip->vcc_pon = vcc_pon;
+ }
+
return rc;
}
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c b/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c
index ba32526..dfcb73a 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c
@@ -229,7 +229,9 @@
static int msm_csid_reset(struct csid_device *csid_dev)
{
int32_t rc = 0;
+ uint32_t irq = 0, irq_bitshift;
+ irq_bitshift = csid_dev->ctrl_reg->csid_reg.csid_rst_done_irq_bitshift;
msm_camera_io_w(csid_dev->ctrl_reg->csid_reg.csid_rst_stb_all,
csid_dev->base +
csid_dev->ctrl_reg->csid_reg.csid_rst_cmd_addr);
@@ -238,8 +240,23 @@
if (rc < 0) {
pr_err("wait_for_completion in msm_csid_reset fail rc = %d\n",
rc);
+ } else if (rc == 0) {
+ irq = msm_camera_io_r(csid_dev->base +
+ csid_dev->ctrl_reg->csid_reg.csid_irq_status_addr);
+ pr_err_ratelimited("%s CSID%d_IRQ_STATUS_ADDR = 0x%x\n",
+ __func__, csid_dev->pdev->id, irq);
+ if (irq & (0x1 << irq_bitshift)) {
+ rc = 1;
+ CDBG("%s succeeded", __func__);
+ } else {
+ rc = 0;
+ pr_err("%s reset csid_irq_status failed = 0x%x\n",
+ __func__, irq);
+ }
if (rc == 0)
rc = -ETIMEDOUT;
+ } else {
+ CDBG("%s succeeded", __func__);
}
return rc;
}
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
index 6ae030f..a665978 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
@@ -609,6 +609,7 @@
struct allowed_clock_rates_table *allowed_clks_tbl = NULL;
u64 rate = 0;
struct clock_data *dcvs = NULL;
+ u32 operating_rate, vsp_factor_num = 10, vsp_factor_den = 7;
core = inst->core;
dcvs = &inst->clk_data;
@@ -631,8 +632,19 @@
vsp_cycles = mbs_per_second * inst->clk_data.entry->vsp_cycles;
- /* 10 / 7 is overhead factor */
- vsp_cycles += (inst->clk_data.bitrate * 10) / 7;
+ operating_rate = inst->clk_data.operating_rate >> 16;
+ if (operating_rate > inst->prop.fps && inst->prop.fps) {
+ vsp_factor_num *= operating_rate;
+ vsp_factor_den *= inst->prop.fps;
+ }
+ //adjust factor for 2 core case, due to workload is not
+ //equally distributed on 2 cores, use 0.65 instead of 0.5
+ if (inst->clk_data.core_id == VIDC_CORE_ID_3) {
+ vsp_factor_num = vsp_factor_num * 13 / 10;
+ vsp_factor_den *= 2;
+ }
+ vsp_cycles += ((u64)inst->clk_data.bitrate * vsp_factor_num) /
+ vsp_factor_den;
} else if (inst->session_type == MSM_VIDC_DECODER) {
vpp_cycles = mbs_per_second * inst->clk_data.entry->vpp_cycles;
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index f7c63cf..f70420f 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -21,11 +21,16 @@
#include "ftm.h"
#define WIL_MAX_ROC_DURATION_MS 5000
+#define CTRY_CHINA "CN"
bool disable_ap_sme;
module_param(disable_ap_sme, bool, 0444);
MODULE_PARM_DESC(disable_ap_sme, " let user space handle AP mode SME");
+static bool country_specific_board_file;
+module_param(country_specific_board_file, bool, 0444);
+MODULE_PARM_DESC(country_specific_board_file, " switch board file upon regulatory domain change (Default: false)");
+
static bool ignore_reg_hints = true;
module_param(ignore_reg_hints, bool, 0444);
MODULE_PARM_DESC(ignore_reg_hints, " Ignore OTA regulatory hints (Default: true)");
@@ -1984,6 +1989,64 @@
return 0;
}
+static int wil_switch_board_file(struct wil6210_priv *wil,
+ const u8 *new_regdomain)
+{
+ int rc = 0;
+
+ if (!country_specific_board_file)
+ return 0;
+
+ if (memcmp(wil->regdomain, CTRY_CHINA, 2) == 0) {
+ wil_info(wil, "moving out of China reg domain, use default board file\n");
+ wil->board_file_country[0] = '\0';
+ } else if (memcmp(new_regdomain, CTRY_CHINA, 2) == 0) {
+ wil_info(wil, "moving into China reg domain, use country specific board file\n");
+ strlcpy(wil->board_file_country, CTRY_CHINA,
+ sizeof(wil->board_file_country));
+ } else {
+ return 0;
+ }
+
+ /* need to switch board file - reset the device */
+
+ mutex_lock(&wil->mutex);
+
+ if (!netif_running(wil_to_ndev(wil)) || wil_is_recovery_blocked(wil))
+ /* new board file will be used in next FW load */
+ goto out;
+
+ __wil_down(wil);
+ rc = __wil_up(wil);
+
+out:
+ mutex_unlock(&wil->mutex);
+ return rc;
+}
+
+static void wil_cfg80211_reg_notify(struct wiphy *wiphy,
+ struct regulatory_request *request)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ int rc;
+
+ wil_info(wil, "cfg reg_notify %c%c%s%s initiator %d hint_type %d\n",
+ request->alpha2[0], request->alpha2[1],
+ request->intersect ? " intersect" : "",
+ request->processed ? " processed" : "",
+ request->initiator, request->user_reg_hint_type);
+
+ if (memcmp(wil->regdomain, request->alpha2, 2) == 0)
+ /* reg domain did not change */
+ return;
+
+ rc = wil_switch_board_file(wil, request->alpha2);
+ if (rc)
+ wil_err(wil, "switch board file failed %d\n", rc);
+
+ memcpy(wil->regdomain, request->alpha2, 2);
+}
+
static struct cfg80211_ops wil_cfg80211_ops = {
.add_virtual_intf = wil_cfg80211_add_iface,
.del_virtual_intf = wil_cfg80211_del_iface,
@@ -2055,6 +2118,8 @@
wiphy->mgmt_stypes = wil_mgmt_stypes;
wiphy->features |= NL80211_FEATURE_SK_TX_STATUS;
+ wiphy->reg_notifier = wil_cfg80211_reg_notify;
+
wiphy->n_vendor_commands = ARRAY_SIZE(wil_nl80211_vendor_commands);
wiphy->vendor_commands = wil_nl80211_vendor_commands;
wiphy->vendor_events = wil_nl80211_vendor_events;
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 2baa6cf..f37254d 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -26,6 +26,7 @@
#define WAIT_FOR_HALP_VOTE_MS 100
#define WAIT_FOR_SCAN_ABORT_MS 1000
+#define WIL_BOARD_FILE_MAX_NAMELEN 128
bool debug_fw; /* = false; */
module_param(debug_fw, bool, 0444);
@@ -946,6 +947,30 @@
le32_to_cpus(&r->head);
}
+/* construct actual board file name to use */
+void wil_get_board_file(struct wil6210_priv *wil, char *buf, size_t len)
+{
+ const char *board_file = WIL_BOARD_FILE_NAME;
+ const char *ext;
+ int prefix_len;
+
+ if (wil->board_file_country[0] == '\0') {
+ strlcpy(buf, board_file, len);
+ return;
+ }
+
+ /* use country specific board file */
+ if (len < strlen(board_file) + 4 /* for _XX and terminating null */)
+ return;
+
+ ext = strrchr(board_file, '.');
+ prefix_len = (ext ? ext - board_file : strlen(board_file));
+ snprintf(buf, len, "%.*s_%.2s",
+ prefix_len, board_file, wil->board_file_country);
+ if (ext)
+ strlcat(buf, ext, len);
+}
+
static int wil_get_bl_info(struct wil6210_priv *wil)
{
struct net_device *ndev = wil_to_ndev(wil);
@@ -1260,8 +1285,12 @@
wil_set_oob_mode(wil, oob_mode);
if (load_fw) {
+ char board_file[WIL_BOARD_FILE_MAX_NAMELEN];
+
+ board_file[0] = '\0';
+ wil_get_board_file(wil, board_file, sizeof(board_file));
wil_info(wil, "Use firmware <%s> + board <%s>\n",
- wil->wil_fw_name, WIL_BOARD_FILE_NAME);
+ wil->wil_fw_name, board_file);
if (!no_flash)
wil_bl_prepare_halt(wil);
@@ -1273,11 +1302,9 @@
if (rc)
goto out;
if (wil->brd_file_addr)
- rc = wil_request_board(wil, WIL_BOARD_FILE_NAME);
+ rc = wil_request_board(wil, board_file);
else
- rc = wil_request_firmware(wil,
- WIL_BOARD_FILE_NAME,
- true);
+ rc = wil_request_firmware(wil, board_file, true);
if (rc)
goto out;
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index f4476ee..2b71deb 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -683,6 +683,7 @@
const char *hw_name;
const char *wil_fw_name;
char *board_file;
+ char board_file_country[3]; /* alpha2 */
u32 brd_file_addr;
u32 brd_file_max_size;
DECLARE_BITMAP(hw_capa, hw_capa_last);
@@ -796,6 +797,8 @@
} snr_thresh;
int fw_calib_result;
+ /* current reg domain configured in kernel */
+ char regdomain[3]; /* alpha2 */
#ifdef CONFIG_PM
struct notifier_block pm_notify;
@@ -873,6 +876,8 @@
wil_w(wil, reg, wil_r(wil, reg) & ~val);
}
+void wil_get_board_file(struct wil6210_priv *wil, char *buf, size_t len);
+
#if defined(CONFIG_DYNAMIC_DEBUG)
#define wil_hex_dump_txrx(prefix_str, prefix_type, rowsize, \
groupsize, buf, len, ascii) \
diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
index 888fdbc..dc91a1b 100644
--- a/drivers/of/dynamic.c
+++ b/drivers/of/dynamic.c
@@ -16,6 +16,11 @@
#include "of_private.h"
+static struct device_node *kobj_to_device_node(struct kobject *kobj)
+{
+ return container_of(kobj, struct device_node, kobj);
+}
+
/**
* of_node_get() - Increment refcount of a node
* @node: Node to inc refcount, NULL is supported to simplify writing of
diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h
index c4d7fdc..61acd6b 100644
--- a/drivers/of/of_private.h
+++ b/drivers/of/of_private.h
@@ -35,12 +35,6 @@
extern struct list_head aliases_lookup;
extern struct kset *of_kset;
-
-static inline struct device_node *kobj_to_device_node(struct kobject *kobj)
-{
- return container_of(kobj, struct device_node, kobj);
-}
-
#if defined(CONFIG_OF_DYNAMIC)
extern int of_property_notify(int action, struct device_node *np,
struct property *prop, struct property *old_prop);
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c b/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c
index c043bad..15be5d6 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c
@@ -327,6 +327,11 @@
size_t tmp;
gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
+ if (!ipa_ctx->nat_mem.is_dev_init) {
+ IPAERR_RL("Nat table not initialized\n");
+ return -EPERM;
+ }
+
IPADBG("\n");
if (init->table_entries == 0) {
IPADBG("Table entries is zero\n");
@@ -576,6 +581,11 @@
int ret = 0;
gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
+ if (!ipa_ctx->nat_mem.is_dev_init) {
+ IPAERR_RL("Nat table not initialized\n");
+ return -EPERM;
+ }
+
IPADBG("\n");
if (dma->entries <= 0) {
IPAERR_RL("Invalid number of commands %d\n",
@@ -762,6 +772,16 @@
int result;
gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
+ if (!ipa_ctx->nat_mem.is_dev_init) {
+ IPAERR_RL("Nat table not initialized\n");
+ return -EPERM;
+ }
+
+ if (ipa_ctx->nat_mem.public_ip_addr) {
+ IPAERR_RL("Public IP addr not assigned and trying to delete\n");
+ return -EPERM;
+ }
+
IPADBG("\n");
if (ipa_ctx->nat_mem.is_tmp_mem) {
IPAERR("using temp memory during nat del\n");
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c b/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
index b48f2c4..7065e2c 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
@@ -1258,6 +1258,11 @@
goto bail;
}
+ if (!ipa3_ctx->nat_mem.dev.is_dev_init) {
+ IPAERR_RL("NAT hasn't been initialized\n");
+ return -EPERM;
+ }
+
for (cnt = 0; cnt < dma->entries; ++cnt) {
result = ipa3_table_validate_table_dma_one(&dma->dma[cnt]);
if (result) {
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_pm.h b/drivers/platform/msm/ipa/ipa_v3/ipa_pm.h
index 9bc6d39..47a03f9 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_pm.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_pm.h
@@ -16,7 +16,7 @@
#include <linux/msm_ipa.h>
/* internal to ipa */
-#define IPA_PM_MAX_CLIENTS 14 /* actual max is value -1 since we start from 1*/
+#define IPA_PM_MAX_CLIENTS 32 /* actual max is value -1 since we start from 1*/
#define IPA_PM_MAX_EX_CL 64
#define IPA_PM_THRESHOLD_MAX 5
#define IPA_PM_EXCEPTION_MAX 2
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index 5d6d3cd..4b9268f 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -964,12 +964,12 @@
/* IPA_3_5_1 */
[IPA_3_5_1][IPA_CLIENT_WLAN1_PROD] = {
true, IPA_v3_5_GROUP_UL_DL, true,
- IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+ IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP,
QMB_MASTER_SELECT_DDR,
{ 7, 1, 8, 16, IPA_EE_UC } },
[IPA_3_5_1][IPA_CLIENT_USB_PROD] = {
true, IPA_v3_5_GROUP_UL_DL, true,
- IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+ IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP,
QMB_MASTER_SELECT_DDR,
{ 0, 0, 8, 16, IPA_EE_AP } },
[IPA_3_5_1][IPA_CLIENT_APPS_LAN_PROD] = {
@@ -979,7 +979,7 @@
{ 8, 7, 8, 16, IPA_EE_AP } },
[IPA_3_5_1][IPA_CLIENT_APPS_WAN_PROD] = {
true, IPA_v3_5_GROUP_UL_DL, true,
- IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+ IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP,
QMB_MASTER_SELECT_DDR,
{ 2, 3, 16, 32, IPA_EE_AP } },
[IPA_3_5_1][IPA_CLIENT_APPS_CMD_PROD] = {
diff --git a/drivers/platform/msm/mhi_dev/mhi.c b/drivers/platform/msm/mhi_dev/mhi.c
index fb513ec..f06d6f3 100644
--- a/drivers/platform/msm/mhi_dev/mhi.c
+++ b/drivers/platform/msm/mhi_dev/mhi.c
@@ -1021,10 +1021,9 @@
if (rc)
pr_err("Error sending command completion event\n");
+ mhi_update_state_info(ch_id, MHI_STATE_CONNECTED);
/* Trigger callback to clients */
mhi_dev_trigger_cb();
-
- mhi_update_state_info(ch_id, MHI_STATE_CONNECTED);
if (ch_id == MHI_CLIENT_MBIM_OUT)
kobject_uevent_env(&mhi_ctx->dev->kobj,
KOBJ_CHANGE, connected);
@@ -2455,8 +2454,10 @@
* If channel is open during registration, no callback is issued.
* Instead return -EEXIST to notify the client. Clients request
* is added to the list to notify future state change notification.
+ * Channel struct may not be allocated yet if this function is called
+ * early during boot - add an explicit check for non-null "ch".
*/
- if (mhi_ctx->ch[channel].state == MHI_DEV_CH_STARTED) {
+ if (mhi_ctx->ch && (mhi_ctx->ch[channel].state == MHI_DEV_CH_STARTED)) {
mutex_unlock(&mhi_ctx->mhi_lock);
return -EEXIST;
}
@@ -2835,8 +2836,6 @@
INIT_LIST_HEAD(&mhi_ctx->event_ring_list);
INIT_LIST_HEAD(&mhi_ctx->process_ring_list);
- INIT_LIST_HEAD(&mhi_ctx->client_cb_list);
- mutex_init(&mhi_ctx->mhi_lock);
mutex_init(&mhi_ctx->mhi_event_lock);
mutex_init(&mhi_ctx->mhi_write_test);
@@ -2986,6 +2985,14 @@
dev_err(&pdev->dev,
"Failed to create IPC logging context\n");
}
+ /*
+ * The below list and mutex should be initialized
+ * before calling mhi_uci_init to avoid crash in
+ * mhi_register_state_cb when accessing these.
+ */
+ INIT_LIST_HEAD(&mhi_ctx->client_cb_list);
+ mutex_init(&mhi_ctx->mhi_lock);
+
mhi_uci_init();
mhi_update_state_info(MHI_DEV_UEVENT_CTRL,
MHI_STATE_CONFIGURED);
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index 3dd1722..3ab93fe 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -331,6 +331,10 @@
POWER_SUPPLY_ATTR(allow_hvdcp3),
POWER_SUPPLY_ATTR(hvdcp_opti_allowed),
POWER_SUPPLY_ATTR(max_pulse_allowed),
+ POWER_SUPPLY_ATTR(ignore_false_negative_isense),
+ POWER_SUPPLY_ATTR(battery_info),
+ POWER_SUPPLY_ATTR(battery_info_id),
+ POWER_SUPPLY_ATTR(enable_jeita_detection),
/* Local extensions of type int64_t */
POWER_SUPPLY_ATTR(charge_counter_ext),
/* Properties of type `const char *' */
diff --git a/drivers/power/supply/qcom/fg-alg.c b/drivers/power/supply/qcom/fg-alg.c
index 9b9a880..129af7b 100644
--- a/drivers/power/supply/qcom/fg-alg.c
+++ b/drivers/power/supply/qcom/fg-alg.c
@@ -19,6 +19,7 @@
#include "fg-alg.h"
#define FULL_SOC_RAW 255
+#define FULL_BATT_SOC GENMASK(31, 0)
#define CAPACITY_DELTA_DECIPCT 500
/* Cycle counter APIs */
@@ -351,7 +352,7 @@
*/
static int cap_learning_process_full_data(struct cap_learning *cl)
{
- int rc, cc_soc_sw, cc_soc_delta_pct;
+ int rc, cc_soc_sw, cc_soc_delta_centi_pct;
int64_t delta_cap_uah;
rc = cl->get_cc_soc(cl->data, &cc_soc_sw);
@@ -360,20 +361,21 @@
return rc;
}
- cc_soc_delta_pct =
- div64_s64((int64_t)(cc_soc_sw - cl->init_cc_soc_sw) * 100,
+ cc_soc_delta_centi_pct =
+ div64_s64((int64_t)(cc_soc_sw - cl->init_cc_soc_sw) * 10000,
cl->cc_soc_max);
/* If the delta is < 50%, then skip processing full data */
- if (cc_soc_delta_pct < 50) {
- pr_err("cc_soc_delta_pct: %d\n", cc_soc_delta_pct);
+ if (cc_soc_delta_centi_pct < 5000) {
+ pr_err("cc_soc_delta_centi_pct: %d\n", cc_soc_delta_centi_pct);
return -ERANGE;
}
- delta_cap_uah = div64_s64(cl->learned_cap_uah * cc_soc_delta_pct, 100);
+ delta_cap_uah = div64_s64(cl->learned_cap_uah * cc_soc_delta_centi_pct,
+ 10000);
cl->final_cap_uah = cl->init_cap_uah + delta_cap_uah;
- pr_debug("Current cc_soc=%d cc_soc_delta_pct=%d total_cap_uah=%lld\n",
- cc_soc_sw, cc_soc_delta_pct, cl->final_cap_uah);
+ pr_debug("Current cc_soc=%d cc_soc_delta_centi_pct=%d total_cap_uah=%lld\n",
+ cc_soc_sw, cc_soc_delta_centi_pct, cl->final_cap_uah);
return 0;
}
@@ -401,8 +403,8 @@
return -EINVAL;
}
- cl->init_cap_uah = div64_s64(cl->learned_cap_uah * batt_soc_msb,
- FULL_SOC_RAW);
+ cl->init_cap_uah = div64_s64(cl->learned_cap_uah * batt_soc,
+ FULL_BATT_SOC);
if (cl->prime_cc_soc) {
/*
diff --git a/drivers/power/supply/qcom/qpnp-fg.c b/drivers/power/supply/qcom/qpnp-fg.c
index 015da41..deccb20 100644
--- a/drivers/power/supply/qcom/qpnp-fg.c
+++ b/drivers/power/supply/qcom/qpnp-fg.c
@@ -33,6 +33,7 @@
#include <linux/ktime.h>
#include <linux/power_supply.h>
#include <linux/of_batterydata.h>
+#include <linux/spinlock.h>
#include <linux/string_helpers.h>
#include <linux/alarmtimer.h>
#include <linux/qpnp/qpnp-revid.h>
@@ -72,6 +73,7 @@
#define QPNP_FG_DEV_NAME "qcom,qpnp-fg"
#define MEM_IF_TIMEOUT_MS 5000
+#define FG_CYCLE_MS 1500
#define BUCKET_COUNT 8
#define BUCKET_SOC_PCT (256 / BUCKET_COUNT)
@@ -108,6 +110,7 @@
PMI8950 = 17,
PMI8996 = 19,
PMI8937 = 55,
+ PMI8940 = 64,
};
enum wa_flags {
@@ -150,6 +153,8 @@
int min_temp;
int max_temp;
int vbat_est_thr_uv;
+ int max_cap_limit;
+ int min_cap_limit;
};
struct fg_rslow_data {
@@ -275,11 +280,45 @@
DATA(BATT_ID_INFO, 0x594, 3, 1, -EINVAL),
};
+enum fg_mem_backup_index {
+ FG_BACKUP_SOC = 0,
+ FG_BACKUP_CYCLE_COUNT,
+ FG_BACKUP_CC_SOC_COEFF,
+ FG_BACKUP_IGAIN,
+ FG_BACKUP_VCOR,
+ FG_BACKUP_TEMP_COUNTER,
+ FG_BACKUP_AGING_STORAGE,
+ FG_BACKUP_MAH_TO_SOC,
+ FG_BACKUP_MAX,
+};
+
+#define BACKUP(_idx, _address, _offset, _length, _value) \
+ [FG_BACKUP_##_idx] = { \
+ .address = _address, \
+ .offset = _offset, \
+ .len = _length, \
+ .value = _value, \
+ } \
+
+static struct fg_mem_data fg_backup_regs[FG_BACKUP_MAX] = {
+ /* ID Address, Offset, Length, Value*/
+ BACKUP(SOC, 0x564, 0, 24, -EINVAL),
+ BACKUP(CYCLE_COUNT, 0x5E8, 0, 16, -EINVAL),
+ BACKUP(CC_SOC_COEFF, 0x5BC, 0, 8, -EINVAL),
+ BACKUP(IGAIN, 0x424, 0, 4, -EINVAL),
+ BACKUP(VCOR, 0x484, 0, 4, -EINVAL),
+ BACKUP(TEMP_COUNTER, 0x580, 0, 4, -EINVAL),
+ BACKUP(AGING_STORAGE, 0x5E4, 0, 4, -EINVAL),
+ BACKUP(MAH_TO_SOC, 0x4A0, 0, 4, -EINVAL),
+};
+
static int fg_debug_mask;
module_param_named(
debug_mask, fg_debug_mask, int, 00600
);
+static int fg_reset_on_lockup;
+
static int fg_sense_type = -EINVAL;
static int fg_restart;
@@ -298,9 +337,18 @@
sram_update_period_ms, fg_sram_update_period_ms, int, 00600
);
+static bool fg_batt_valid_ocv;
+module_param_named(batt_valid_ocv, fg_batt_valid_ocv, bool, 0600
+);
+
+static int fg_batt_range_pct;
+module_param_named(batt_range_pct, fg_batt_range_pct, int, 0600
+);
+
struct fg_irq {
int irq;
- unsigned long disabled;
+ bool disabled;
+ bool wakeup;
};
enum fg_soc_irq {
@@ -348,6 +396,16 @@
MAX_ADDRESS,
};
+enum batt_info_params {
+ BATT_INFO_NOTIFY = 0,
+ BATT_INFO_SOC,
+ BATT_INFO_RES_ID,
+ BATT_INFO_VOLTAGE,
+ BATT_INFO_TEMP,
+ BATT_INFO_FCC,
+ BATT_INFO_MAX,
+};
+
struct register_offset {
u16 address[MAX_ADDRESS];
};
@@ -395,6 +453,22 @@
}
}
+enum slope_limit_status {
+ LOW_TEMP_CHARGE,
+ HIGH_TEMP_CHARGE,
+ LOW_TEMP_DISCHARGE,
+ HIGH_TEMP_DISCHARGE,
+ SLOPE_LIMIT_MAX,
+};
+
+#define VOLT_GAIN_MAX 3
+struct dischg_gain_soc {
+ bool enable;
+ u32 soc[VOLT_GAIN_MAX];
+ u32 medc_gain[VOLT_GAIN_MAX];
+ u32 highc_gain[VOLT_GAIN_MAX];
+};
+
#define THERMAL_COEFF_N_BYTES 6
struct fg_chip {
struct device *dev;
@@ -420,6 +494,7 @@
struct completion first_soc_done;
struct power_supply *bms_psy;
struct power_supply_desc bms_psy_d;
+ spinlock_t sec_access_lock;
struct mutex rw_lock;
struct mutex sysfs_restart_lock;
struct delayed_work batt_profile_init;
@@ -449,6 +524,7 @@
struct fg_wakeup_source update_sram_wakeup_source;
bool fg_restarting;
bool profile_loaded;
+ bool soc_reporting_ready;
bool use_otp_profile;
bool battery_missing;
bool power_supply_registered;
@@ -459,6 +535,7 @@
bool charge_done;
bool resume_soc_lowered;
bool vbat_low_irq_enabled;
+ bool full_soc_irq_enabled;
bool charge_full;
bool hold_soc_while_full;
bool input_present;
@@ -467,6 +544,10 @@
bool bad_batt_detection_en;
bool bcl_lpm_disabled;
bool charging_disabled;
+ bool use_vbat_low_empty_soc;
+ bool fg_shutdown;
+ bool use_soft_jeita_irq;
+ bool allow_false_negative_isense;
struct delayed_work update_jeita_setting;
struct delayed_work update_sram_data;
struct delayed_work update_temp_work;
@@ -491,6 +572,7 @@
int prev_status;
int health;
enum fg_batt_aging_mode batt_aging_mode;
+ struct alarm hard_jeita_alarm;
/* capacity learning */
struct fg_learning_data learning_data;
struct alarm fg_cap_learning_alarm;
@@ -498,6 +580,7 @@
struct fg_cc_soc_data sw_cc_soc_data;
/* rslow compensation */
struct fg_rslow_data rslow_comp;
+ int rconn_mohm;
/* cycle counter */
struct fg_cyc_ctr_data cyc_ctr;
/* iadc compensation */
@@ -510,6 +593,8 @@
bool jeita_hysteresis_support;
bool batt_hot;
bool batt_cold;
+ bool batt_warm;
+ bool batt_cool;
int cold_hysteresis;
int hot_hysteresis;
/* ESR pulse tuning */
@@ -518,6 +603,47 @@
bool esr_extract_disabled;
bool imptr_pulse_slow_en;
bool esr_pulse_tune_en;
+ /* Slope limiter */
+ struct work_struct slope_limiter_work;
+ struct fg_wakeup_source slope_limit_wakeup_source;
+ bool soc_slope_limiter_en;
+ enum slope_limit_status slope_limit_sts;
+ u32 slope_limit_temp;
+ u32 slope_limit_coeffs[SLOPE_LIMIT_MAX];
+ /* Discharge soc gain */
+ struct work_struct dischg_gain_work;
+ struct fg_wakeup_source dischg_gain_wakeup_source;
+ struct dischg_gain_soc dischg_gain;
+ /* IMA error recovery */
+ struct completion fg_reset_done;
+ struct work_struct ima_error_recovery_work;
+ struct fg_wakeup_source fg_reset_wakeup_source;
+ struct mutex ima_recovery_lock;
+ bool ima_error_handling;
+ bool block_sram_access;
+ bool irqs_enabled;
+ bool use_last_soc;
+ int last_soc;
+ /* Validating temperature */
+ int last_good_temp;
+ int batt_temp_low_limit;
+ int batt_temp_high_limit;
+ /* Validating CC_SOC */
+ struct work_struct cc_soc_store_work;
+ struct fg_wakeup_source cc_soc_wakeup_source;
+ int cc_soc_limit_pct;
+ bool use_last_cc_soc;
+ int64_t last_cc_soc;
+ /* Sanity check */
+ struct delayed_work check_sanity_work;
+ struct fg_wakeup_source sanity_wakeup_source;
+ u8 last_beat_count;
+ /* Batt_info restore */
+ int batt_info[BATT_INFO_MAX];
+ int batt_info_id;
+ bool batt_info_restore;
+ bool *batt_range_ocv;
+ int *batt_range_pct;
};
/* FG_MEMIF DEBUGFS structures */
@@ -661,17 +787,56 @@
return rc;
}
-static int fg_masked_write(struct fg_chip *chip, u16 addr,
+static int fg_masked_write_raw(struct fg_chip *chip, u16 addr,
u8 mask, u8 val, int len)
{
int rc;
rc = regmap_update_bits(chip->regmap, addr, mask, val);
- if (rc) {
+ if (rc)
pr_err("spmi write failed: addr=%03X, rc=%d\n", addr, rc);
- return rc;
+
+ return rc;
+}
+
+static int fg_masked_write(struct fg_chip *chip, u16 addr,
+ u8 mask, u8 val, int len)
+{
+ int rc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chip->sec_access_lock, flags);
+ rc = fg_masked_write_raw(chip, addr, mask, val, len);
+ spin_unlock_irqrestore(&chip->sec_access_lock, flags);
+
+ return rc;
+}
+
+#define SEC_ACCESS_OFFSET 0xD0
+#define SEC_ACCESS_VALUE 0xA5
+#define PERIPHERAL_MASK 0xFF
+static int fg_sec_masked_write(struct fg_chip *chip, u16 addr, u8 mask, u8 val,
+ int len)
+{
+ int rc;
+ unsigned long flags;
+ u8 temp;
+ u16 base = addr & (~PERIPHERAL_MASK);
+
+ spin_lock_irqsave(&chip->sec_access_lock, flags);
+ temp = SEC_ACCESS_VALUE;
+ rc = fg_write(chip, &temp, base + SEC_ACCESS_OFFSET, 1);
+ if (rc) {
+ pr_err("Unable to unlock sec_access: %d\n", rc);
+ goto out;
}
+ rc = fg_masked_write_raw(chip, addr, mask, val, len);
+ if (rc)
+ pr_err("Unable to write securely to address 0x%x: %d", addr,
+ rc);
+out:
+ spin_unlock_irqrestore(&chip->sec_access_lock, flags);
return rc;
}
@@ -952,6 +1117,7 @@
int rc = 0, user_cnt = 0, sublen;
bool access_configured = false;
u8 *wr_data = val, word[4];
+ u16 orig_address = address;
char str[DEBUG_PRINT_BUFFER_SIZE];
if (address < RAM_OFFSET)
@@ -960,8 +1126,8 @@
if (offset > 3)
return -EINVAL;
- address = ((address + offset) / 4) * 4;
- offset = (address + offset) % 4;
+ address = ((orig_address + offset) / 4) * 4;
+ offset = (orig_address + offset) % 4;
user_cnt = atomic_add_return(1, &chip->memif_user_cnt);
if (fg_debug_mask & FG_MEM_DEBUG_WRITES)
@@ -1061,50 +1227,253 @@
#define MEM_INTF_IMA_EXP_STS 0x55
#define MEM_INTF_IMA_HW_STS 0x56
#define MEM_INTF_IMA_BYTE_EN 0x60
-#define IMA_ADDR_STBL_ERR BIT(7)
-#define IMA_WR_ACS_ERR BIT(6)
-#define IMA_RD_ACS_ERR BIT(5)
#define IMA_IACS_CLR BIT(2)
#define IMA_IACS_RDY BIT(1)
-static int fg_check_ima_exception(struct fg_chip *chip)
+static int fg_run_iacs_clear_sequence(struct fg_chip *chip)
+{
+ int rc = 0;
+ u8 temp;
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("Running IACS clear sequence\n");
+
+ /* clear the error */
+ rc = fg_masked_write(chip, chip->mem_base + MEM_INTF_IMA_CFG,
+ IMA_IACS_CLR, IMA_IACS_CLR, 1);
+ if (rc) {
+ pr_err("Error writing to IMA_CFG, rc=%d\n", rc);
+ return rc;
+ }
+
+ temp = 0x4;
+ rc = fg_write(chip, &temp, MEM_INTF_ADDR_LSB(chip) + 1, 1);
+ if (rc) {
+ pr_err("Error writing to MEM_INTF_ADDR_MSB, rc=%d\n", rc);
+ return rc;
+ }
+
+ temp = 0x0;
+ rc = fg_write(chip, &temp, MEM_INTF_WR_DATA0(chip) + 3, 1);
+ if (rc) {
+ pr_err("Error writing to WR_DATA3, rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = fg_read(chip, &temp, MEM_INTF_RD_DATA0(chip) + 3, 1);
+ if (rc) {
+ pr_err("Error writing to RD_DATA3, rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = fg_masked_write(chip, chip->mem_base + MEM_INTF_IMA_CFG,
+ IMA_IACS_CLR, 0, 1);
+ if (rc) {
+ pr_err("Error writing to IMA_CFG, rc=%d\n", rc);
+ return rc;
+ }
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("IACS clear sequence complete!\n");
+ return rc;
+}
+
+#define IACS_ERR_BIT BIT(0)
+#define XCT_ERR_BIT BIT(1)
+#define DATA_RD_ERR_BIT BIT(3)
+#define DATA_WR_ERR_BIT BIT(4)
+#define ADDR_BURST_WRAP_BIT BIT(5)
+#define ADDR_RNG_ERR_BIT BIT(6)
+#define ADDR_SRC_ERR_BIT BIT(7)
+static int fg_check_ima_exception(struct fg_chip *chip, bool check_hw_sts)
{
int rc = 0, ret = 0;
- u8 err_sts, exp_sts = 0, hw_sts = 0;
+ u8 err_sts = 0, exp_sts = 0, hw_sts = 0;
+ bool run_err_clr_seq = false;
rc = fg_read(chip, &err_sts,
chip->mem_base + MEM_INTF_IMA_ERR_STS, 1);
if (rc) {
- pr_err("failed to read beat count rc=%d\n", rc);
+ pr_err("failed to read IMA_ERR_STS, rc=%d\n", rc);
return rc;
}
- if (err_sts & (IMA_ADDR_STBL_ERR | IMA_WR_ACS_ERR | IMA_RD_ACS_ERR)) {
- u8 temp;
-
- fg_read(chip, &exp_sts,
+ rc = fg_read(chip, &exp_sts,
chip->mem_base + MEM_INTF_IMA_EXP_STS, 1);
- fg_read(chip, &hw_sts,
- chip->mem_base + MEM_INTF_IMA_HW_STS, 1);
- pr_err("IMA access failed ima_err_sts=%x ima_exp_sts=%x ima_hw_sts=%x\n",
- err_sts, exp_sts, hw_sts);
- rc = err_sts;
-
- /* clear the error */
- ret |= fg_masked_write(chip, chip->mem_base + MEM_INTF_IMA_CFG,
- IMA_IACS_CLR, IMA_IACS_CLR, 1);
- temp = 0x4;
- ret |= fg_write(chip, &temp, MEM_INTF_ADDR_LSB(chip) + 1, 1);
- temp = 0x0;
- ret |= fg_write(chip, &temp, MEM_INTF_WR_DATA0(chip) + 3, 1);
- ret |= fg_read(chip, &temp, MEM_INTF_RD_DATA0(chip) + 3, 1);
- ret |= fg_masked_write(chip, chip->mem_base + MEM_INTF_IMA_CFG,
- IMA_IACS_CLR, 0, 1);
- if (!ret)
- return -EAGAIN;
-
- pr_err("Error clearing IMA exception ret=%d\n", ret);
+ if (rc) {
+ pr_err("Error in reading IMA_EXP_STS, rc=%d\n", rc);
+ return rc;
}
+ rc = fg_read(chip, &hw_sts,
+ chip->mem_base + MEM_INTF_IMA_HW_STS, 1);
+ if (rc) {
+ pr_err("Error in reading IMA_HW_STS, rc=%d\n", rc);
+ return rc;
+ }
+
+ pr_info_once("Initial ima_err_sts=%x ima_exp_sts=%x ima_hw_sts=%x\n",
+ err_sts, exp_sts, hw_sts);
+
+ if (fg_debug_mask & (FG_MEM_DEBUG_READS | FG_MEM_DEBUG_WRITES))
+ pr_info("ima_err_sts=%x ima_exp_sts=%x ima_hw_sts=%x\n",
+ err_sts, exp_sts, hw_sts);
+
+ if (check_hw_sts) {
+ /*
+ * Lower nibble should be equal to upper nibble before SRAM
+ * transactions begins from SW side. If they are unequal, then
+ * the error clear sequence should be run irrespective of IMA
+ * exception errors.
+ */
+ if ((hw_sts & 0x0F) != hw_sts >> 4) {
+ pr_err("IMA HW not in correct state, hw_sts=%x\n",
+ hw_sts);
+ run_err_clr_seq = true;
+ }
+ }
+
+ if (exp_sts & (IACS_ERR_BIT | XCT_ERR_BIT | DATA_RD_ERR_BIT |
+ DATA_WR_ERR_BIT | ADDR_BURST_WRAP_BIT | ADDR_RNG_ERR_BIT |
+ ADDR_SRC_ERR_BIT)) {
+ pr_err("IMA exception bit set, exp_sts=%x\n", exp_sts);
+ run_err_clr_seq = true;
+ }
+
+ if (run_err_clr_seq) {
+ ret = fg_run_iacs_clear_sequence(chip);
+ if (!ret)
+ return -EAGAIN;
+ else
+ pr_err("Error clearing IMA exception ret=%d\n", ret);
+ }
+
+ return rc;
+}
+
+static void fg_enable_irqs(struct fg_chip *chip, bool enable)
+{
+ if (!(enable ^ chip->irqs_enabled))
+ return;
+
+ if (enable) {
+ enable_irq(chip->soc_irq[DELTA_SOC].irq);
+ enable_irq_wake(chip->soc_irq[DELTA_SOC].irq);
+ if (!chip->full_soc_irq_enabled) {
+ enable_irq(chip->soc_irq[FULL_SOC].irq);
+ enable_irq_wake(chip->soc_irq[FULL_SOC].irq);
+ chip->full_soc_irq_enabled = true;
+ }
+ enable_irq(chip->batt_irq[BATT_MISSING].irq);
+ if (!chip->vbat_low_irq_enabled) {
+ enable_irq(chip->batt_irq[VBATT_LOW].irq);
+ enable_irq_wake(chip->batt_irq[VBATT_LOW].irq);
+ chip->vbat_low_irq_enabled = true;
+ }
+ if (!chip->use_vbat_low_empty_soc) {
+ enable_irq(chip->soc_irq[EMPTY_SOC].irq);
+ enable_irq_wake(chip->soc_irq[EMPTY_SOC].irq);
+ }
+ chip->irqs_enabled = true;
+ } else {
+ disable_irq_wake(chip->soc_irq[DELTA_SOC].irq);
+ disable_irq_nosync(chip->soc_irq[DELTA_SOC].irq);
+ if (chip->full_soc_irq_enabled) {
+ disable_irq_wake(chip->soc_irq[FULL_SOC].irq);
+ disable_irq_nosync(chip->soc_irq[FULL_SOC].irq);
+ chip->full_soc_irq_enabled = false;
+ }
+ disable_irq(chip->batt_irq[BATT_MISSING].irq);
+ if (chip->vbat_low_irq_enabled) {
+ disable_irq_wake(chip->batt_irq[VBATT_LOW].irq);
+ disable_irq_nosync(chip->batt_irq[VBATT_LOW].irq);
+ chip->vbat_low_irq_enabled = false;
+ }
+ if (!chip->use_vbat_low_empty_soc) {
+ disable_irq_wake(chip->soc_irq[EMPTY_SOC].irq);
+ disable_irq_nosync(chip->soc_irq[EMPTY_SOC].irq);
+ }
+ chip->irqs_enabled = false;
+ }
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("FG interrupts are %sabled\n", enable ? "en" : "dis");
+}
+
+static void fg_check_ima_error_handling(struct fg_chip *chip)
+{
+ if (chip->ima_error_handling) {
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("IMA error is handled already!\n");
+ return;
+ }
+ mutex_lock(&chip->ima_recovery_lock);
+ fg_enable_irqs(chip, false);
+ chip->use_last_cc_soc = true;
+ chip->ima_error_handling = true;
+ if (!work_pending(&chip->ima_error_recovery_work))
+ schedule_work(&chip->ima_error_recovery_work);
+ mutex_unlock(&chip->ima_recovery_lock);
+}
+
+#define SOC_ALG_ST 0xCF
+#define FGXCT_PRD BIT(7)
+#define ALG_ST_CHECK_COUNT 20
+static int fg_check_alg_status(struct fg_chip *chip)
+{
+ int rc = 0, timeout = ALG_ST_CHECK_COUNT, count = 0;
+ u8 ima_opr_sts, alg_sts = 0, temp = 0;
+
+ if (!fg_reset_on_lockup) {
+ pr_info("FG lockup detection cannot be run\n");
+ return 0;
+ }
+
+ rc = fg_read(chip, &alg_sts, chip->soc_base + SOC_ALG_ST, 1);
+ if (rc) {
+ pr_err("Error in reading SOC_ALG_ST, rc=%d\n", rc);
+ return rc;
+ }
+
+ while (1) {
+ rc = fg_read(chip, &ima_opr_sts,
+ chip->mem_base + MEM_INTF_IMA_OPR_STS, 1);
+ if (!rc && !(ima_opr_sts & FGXCT_PRD))
+ break;
+
+ if (rc) {
+ pr_err("Error in reading IMA_OPR_STS, rc=%d\n",
+ rc);
+ break;
+ }
+
+ rc = fg_read(chip, &temp, chip->soc_base + SOC_ALG_ST,
+ 1);
+ if (rc) {
+ pr_err("Error in reading SOC_ALG_ST, rc=%d\n",
+ rc);
+ break;
+ }
+
+ if ((ima_opr_sts & FGXCT_PRD) && (temp == alg_sts))
+ count++;
+
+ /* Wait for ~10ms while polling ALG_ST & IMA_OPR_STS */
+ usleep_range(9000, 11000);
+
+ if (!(--timeout))
+ break;
+ }
+
+ if (fg_debug_mask & (FG_MEM_DEBUG_READS | FG_MEM_DEBUG_WRITES))
+ pr_info("ima_opr_sts: %x alg_sts: %x count=%d\n", ima_opr_sts,
+ alg_sts, count);
+
+ if (count == ALG_ST_CHECK_COUNT) {
+ /* If we are here, that means FG ALG is stuck */
+ pr_err("ALG is stuck\n");
+ fg_check_ima_error_handling(chip);
+ rc = -EBUSY;
+ }
return rc;
}
@@ -1122,19 +1491,25 @@
while (1) {
rc = fg_read(chip, &ima_opr_sts,
chip->mem_base + MEM_INTF_IMA_OPR_STS, 1);
- if (!rc && (ima_opr_sts & IMA_IACS_RDY))
+ if (!rc && (ima_opr_sts & IMA_IACS_RDY)) {
break;
+ } else {
+ if (!(--timeout) || rc)
+ break;
- if (!(--timeout) || rc)
- break;
- /* delay for iacs_ready to be asserted */
- usleep_range(5000, 7000);
+ /* delay for iacs_ready to be asserted */
+ usleep_range(5000, 7000);
+ }
}
if (!timeout || rc) {
- pr_err("IACS_RDY not set\n");
+ pr_err("IACS_RDY not set, ima_opr_sts: %x\n", ima_opr_sts);
+ rc = fg_check_alg_status(chip);
+ if (rc && rc != -EBUSY)
+ pr_err("Couldn't check FG ALG status, rc=%d\n",
+ rc);
/* perform IACS_CLR sequence */
- fg_check_ima_exception(chip);
+ fg_check_ima_exception(chip, false);
return -EBUSY;
}
@@ -1154,15 +1529,16 @@
while (len > 0) {
num_bytes = (offset + len) > BUF_LEN ?
- (BUF_LEN - offset) : len;
+ (BUF_LEN - offset) : len;
/* write to byte_enable */
for (i = offset; i < (offset + num_bytes); i++)
byte_enable |= BIT(i);
rc = fg_write(chip, &byte_enable,
- chip->mem_base + MEM_INTF_IMA_BYTE_EN, 1);
+ chip->mem_base + MEM_INTF_IMA_BYTE_EN, 1);
if (rc) {
- pr_err("Unable to write to byte_en_reg rc=%d\n", rc);
+ pr_err("Unable to write to byte_en_reg rc=%d\n",
+ rc);
return rc;
}
/* write data */
@@ -1193,12 +1569,13 @@
rc = fg_check_iacs_ready(chip);
if (rc) {
- pr_debug("IACS_RDY failed rc=%d\n", rc);
+ pr_err("IACS_RDY failed post write to address %x offset %d rc=%d\n",
+ address, offset, rc);
return rc;
}
/* check for error condition */
- rc = fg_check_ima_exception(chip);
+ rc = fg_check_ima_exception(chip, false);
if (rc) {
pr_err("IMA transaction failed rc=%d", rc);
return rc;
@@ -1239,12 +1616,13 @@
rc = fg_check_iacs_ready(chip);
if (rc) {
- pr_debug("IACS_RDY failed rc=%d\n", rc);
+ pr_err("IACS_RDY failed post read for address %x offset %d rc=%d\n",
+ address, offset, rc);
return rc;
}
/* check for error condition */
- rc = fg_check_ima_exception(chip);
+ rc = fg_check_ima_exception(chip, false);
if (rc) {
pr_err("IMA transaction failed rc=%d", rc);
return rc;
@@ -1296,7 +1674,7 @@
* clear, then return an error instead of waiting for it again.
*/
if (time_count > 4) {
- pr_err("Waited for 1.5 seconds polling RIF_MEM_ACCESS_REQ\n");
+ pr_err("Waited for ~16ms polling RIF_MEM_ACCESS_REQ\n");
return -ETIMEDOUT;
}
@@ -1322,7 +1700,8 @@
rc = fg_check_iacs_ready(chip);
if (rc) {
- pr_debug("IACS_RDY failed rc=%d\n", rc);
+ pr_err("IACS_RDY failed before setting address: %x offset: %d rc=%d\n",
+ address, offset, rc);
return rc;
}
@@ -1335,7 +1714,8 @@
rc = fg_check_iacs_ready(chip);
if (rc)
- pr_debug("IACS_RDY failed rc=%d\n", rc);
+ pr_err("IACS_RDY failed after setting address: %x offset: %d rc=%d\n",
+ address, offset, rc);
return rc;
}
@@ -1346,10 +1726,13 @@
static int fg_interleaved_mem_read(struct fg_chip *chip, u8 *val, u16 address,
int len, int offset)
{
- int rc = 0, orig_address = address;
+ int rc = 0, ret, orig_address = address;
u8 start_beat_count, end_beat_count, count = 0;
bool retry = false;
+ if (chip->fg_shutdown)
+ return -EINVAL;
+
if (offset > 3) {
pr_err("offset too large %d\n", offset);
return -EINVAL;
@@ -1372,11 +1755,22 @@
}
mutex_lock(&chip->rw_lock);
+ if (fg_debug_mask & FG_MEM_DEBUG_READS)
+ pr_info("Read for %d bytes is attempted @ 0x%x[%d]\n",
+ len, address, offset);
retry:
+ if (count >= RETRY_COUNT) {
+ pr_err("Retried reading 3 times\n");
+ retry = false;
+ goto out;
+ }
+
rc = fg_interleaved_mem_config(chip, val, address, offset, len, 0);
if (rc) {
pr_err("failed to configure SRAM for IMA rc = %d\n", rc);
+ retry = true;
+ count++;
goto out;
}
@@ -1385,18 +1779,21 @@
chip->mem_base + MEM_INTF_FG_BEAT_COUNT, 1);
if (rc) {
pr_err("failed to read beat count rc=%d\n", rc);
+ retry = true;
+ count++;
goto out;
}
/* read data */
rc = __fg_interleaved_mem_read(chip, val, address, offset, len);
if (rc) {
+ count++;
if ((rc == -EAGAIN) && (count < RETRY_COUNT)) {
- count++;
pr_err("IMA access failed retry_count = %d\n", count);
goto retry;
} else {
pr_err("failed to read SRAM address rc = %d\n", rc);
+ retry = true;
goto out;
}
}
@@ -1406,6 +1803,8 @@
chip->mem_base + MEM_INTF_FG_BEAT_COUNT, 1);
if (rc) {
pr_err("failed to read beat count rc=%d\n", rc);
+ retry = true;
+ count++;
goto out;
}
@@ -1418,12 +1817,13 @@
if (fg_debug_mask & FG_MEM_DEBUG_READS)
pr_info("Beat count do not match - retry transaction\n");
retry = true;
+ count++;
}
out:
/* Release IMA access */
- rc = fg_masked_write(chip, MEM_INTF_CFG(chip), IMA_REQ_ACCESS, 0, 1);
- if (rc)
- pr_err("failed to reset IMA access bit rc = %d\n", rc);
+ ret = fg_masked_write(chip, MEM_INTF_CFG(chip), IMA_REQ_ACCESS, 0, 1);
+ if (ret)
+ pr_err("failed to reset IMA access bit ret = %d\n", ret);
if (retry) {
retry = false;
@@ -1439,8 +1839,12 @@
static int fg_interleaved_mem_write(struct fg_chip *chip, u8 *val, u16 address,
int len, int offset)
{
- int rc = 0, orig_address = address;
+ int rc = 0, ret, orig_address = address;
u8 count = 0;
+ bool retry = false;
+
+ if (chip->fg_shutdown)
+ return -EINVAL;
if (address < RAM_OFFSET)
return -EINVAL;
@@ -1455,32 +1859,49 @@
offset = (orig_address + offset) % 4;
mutex_lock(&chip->rw_lock);
+ if (fg_debug_mask & FG_MEM_DEBUG_WRITES)
+ pr_info("Write for %d bytes is attempted @ 0x%x[%d]\n",
+ len, address, offset);
retry:
+ if (count >= RETRY_COUNT) {
+ pr_err("Retried writing 3 times\n");
+ retry = false;
+ goto out;
+ }
+
rc = fg_interleaved_mem_config(chip, val, address, offset, len, 1);
if (rc) {
- pr_err("failed to xonfigure SRAM for IMA rc = %d\n", rc);
+ pr_err("failed to configure SRAM for IMA rc = %d\n", rc);
+ retry = true;
+ count++;
goto out;
}
/* write data */
rc = __fg_interleaved_mem_write(chip, val, address, offset, len);
if (rc) {
+ count++;
if ((rc == -EAGAIN) && (count < RETRY_COUNT)) {
- count++;
pr_err("IMA access failed retry_count = %d\n", count);
goto retry;
} else {
pr_err("failed to write SRAM address rc = %d\n", rc);
+ retry = true;
goto out;
}
}
out:
/* Release IMA access */
- rc = fg_masked_write(chip, MEM_INTF_CFG(chip), IMA_REQ_ACCESS, 0, 1);
- if (rc)
- pr_err("failed to reset IMA access bit rc = %d\n", rc);
+ ret = fg_masked_write(chip, MEM_INTF_CFG(chip), IMA_REQ_ACCESS, 0, 1);
+ if (ret)
+ pr_err("failed to reset IMA access bit ret = %d\n", ret);
+
+ if (retry) {
+ retry = false;
+ goto retry;
+ }
mutex_unlock(&chip->rw_lock);
fg_relax(&chip->memif_wakeup_source);
@@ -1490,6 +1911,9 @@
static int fg_mem_read(struct fg_chip *chip, u8 *val, u16 address,
int len, int offset, bool keep_access)
{
+ if (chip->block_sram_access)
+ return -EBUSY;
+
if (chip->ima_supported)
return fg_interleaved_mem_read(chip, val, address,
len, offset);
@@ -1501,6 +1925,9 @@
static int fg_mem_write(struct fg_chip *chip, u8 *val, u16 address,
int len, int offset, bool keep_access)
{
+ if (chip->block_sram_access)
+ return -EBUSY;
+
if (chip->ima_supported)
return fg_interleaved_mem_write(chip, val, address,
len, offset);
@@ -1538,6 +1965,62 @@
return rc;
}
+static u8 sram_backup_buffer[100];
+static int fg_backup_sram_registers(struct fg_chip *chip, bool save)
+{
+ int rc, i, len, offset;
+ u16 address;
+ u8 *ptr;
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("%sing SRAM registers\n", save ? "Back" : "Restor");
+
+ ptr = sram_backup_buffer;
+ for (i = 0; i < FG_BACKUP_MAX; i++) {
+ address = fg_backup_regs[i].address;
+ offset = fg_backup_regs[i].offset;
+ len = fg_backup_regs[i].len;
+ if (save)
+ rc = fg_interleaved_mem_read(chip, ptr, address,
+ len, offset);
+ else
+ rc = fg_interleaved_mem_write(chip, ptr, address,
+ len, offset);
+ if (rc) {
+ pr_err("Error in reading %d bytes from %x[%d], rc=%d\n",
+ len, address, offset, rc);
+ break;
+ }
+ ptr += len;
+ }
+
+ return rc;
+}
+
+#define SOC_FG_RESET 0xF3
+#define RESET_MASK (BIT(7) | BIT(5))
+static int fg_reset(struct fg_chip *chip, bool reset)
+{
+ int rc;
+
+ rc = fg_sec_masked_write(chip, chip->soc_base + SOC_FG_RESET,
+ 0xFF, reset ? RESET_MASK : 0, 1);
+ if (rc)
+ pr_err("Error in writing to 0x%x, rc=%d\n", SOC_FG_RESET, rc);
+
+ return rc;
+}
+
+static void fg_handle_battery_insertion(struct fg_chip *chip)
+{
+ reinit_completion(&chip->batt_id_avail);
+ reinit_completion(&chip->fg_reset_done);
+ schedule_delayed_work(&chip->batt_profile_init, 0);
+ cancel_delayed_work(&chip->update_sram_data);
+ schedule_delayed_work(&chip->update_sram_data, msecs_to_jiffies(0));
+}
+
+
static int soc_to_setpoint(int soc)
{
return DIV_ROUND_CLOSEST(soc * 255, 100);
@@ -1550,6 +2033,7 @@
val = DIV_ROUND_CLOSEST(vbatt_mv * 32768, 5000);
data[0] = val & 0xFF;
data[1] = val >> 8;
+ return;
}
static u8 batt_to_setpoint_8b(int vbatt_mv)
@@ -1678,14 +2162,37 @@
return rc;
}
+#define VBATT_LOW_STS_BIT BIT(2)
+static int fg_get_vbatt_status(struct fg_chip *chip, bool *vbatt_low_sts)
+{
+ int rc = 0;
+ u8 fg_batt_sts;
+
+ rc = fg_read(chip, &fg_batt_sts, INT_RT_STS(chip->batt_base), 1);
+ if (rc)
+ pr_err("spmi read failed: addr=%03X, rc=%d\n",
+ INT_RT_STS(chip->batt_base), rc);
+ else
+ *vbatt_low_sts = !!(fg_batt_sts & VBATT_LOW_STS_BIT);
+
+ return rc;
+}
+
#define SOC_EMPTY BIT(3)
static bool fg_is_batt_empty(struct fg_chip *chip)
{
u8 fg_soc_sts;
int rc;
+ bool vbatt_low_sts;
- rc = fg_read(chip, &fg_soc_sts,
- INT_RT_STS(chip->soc_base), 1);
+ if (chip->use_vbat_low_empty_soc) {
+ if (fg_get_vbatt_status(chip, &vbatt_low_sts))
+ return false;
+
+ return vbatt_low_sts;
+ }
+
+ rc = fg_read(chip, &fg_soc_sts, INT_RT_STS(chip->soc_base), 1);
if (rc) {
pr_err("spmi read failed: addr=%03X, rc=%d\n",
INT_RT_STS(chip->soc_base), rc);
@@ -1732,7 +2239,16 @@
#define FULL_SOC_RAW 0xFF
static int get_prop_capacity(struct fg_chip *chip)
{
- int msoc;
+ int msoc, rc;
+ bool vbatt_low_sts;
+
+ if (chip->use_last_soc && chip->last_soc) {
+ if (chip->last_soc == FULL_SOC_RAW)
+ return FULL_CAPACITY;
+ return DIV_ROUND_CLOSEST((chip->last_soc - 1) *
+ (FULL_CAPACITY - 2),
+ FULL_SOC_RAW - 2) + 1;
+ }
if (chip->battery_missing)
return MISSING_CAPACITY;
@@ -1747,10 +2263,28 @@
return EMPTY_CAPACITY;
}
msoc = get_monotonic_soc_raw(chip);
- if (msoc == 0)
- return EMPTY_CAPACITY;
- else if (msoc == FULL_SOC_RAW)
+ if (msoc == 0) {
+ if (fg_reset_on_lockup && chip->use_vbat_low_empty_soc) {
+ rc = fg_get_vbatt_status(chip, &vbatt_low_sts);
+ if (rc) {
+ pr_err("Error in reading vbatt_status, rc=%d\n",
+ rc);
+ return EMPTY_CAPACITY;
+ }
+
+ if (!vbatt_low_sts)
+ return DIV_ROUND_CLOSEST((chip->last_soc - 1) *
+ (FULL_CAPACITY - 2),
+ FULL_SOC_RAW - 2) + 1;
+ else
+ return EMPTY_CAPACITY;
+ } else {
+ return EMPTY_CAPACITY;
+ }
+ } else if (msoc == FULL_SOC_RAW) {
return FULL_CAPACITY;
+ }
+
return DIV_ROUND_CLOSEST((msoc - 1) * (FULL_CAPACITY - 2),
FULL_SOC_RAW - 2) + 1;
}
@@ -1843,6 +2377,25 @@
return 0;
}
+#define IGNORE_FALSE_NEGATIVE_ISENSE_BIT BIT(3)
+static int set_prop_ignore_false_negative_isense(struct fg_chip *chip,
+ bool ignore)
+{
+ int rc;
+
+ rc = fg_mem_masked_write(chip, EXTERNAL_SENSE_SELECT,
+ IGNORE_FALSE_NEGATIVE_ISENSE_BIT,
+ ignore ? IGNORE_FALSE_NEGATIVE_ISENSE_BIT : 0,
+ EXTERNAL_SENSE_OFFSET);
+ if (rc) {
+ pr_err("failed to %s isense false negative ignore rc=%d\n",
+ ignore ? "enable" : "disable", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
#define EXPONENT_MASK 0xF800
#define MANTISSA_MASK 0x3FF
#define SIGN BIT(10)
@@ -1953,8 +2506,7 @@
return rc;
}
- if (fg_debug_mask & FG_IRQS)
- pr_info("fg batt sts 0x%x\n", fg_batt_sts);
+ pr_debug("fg batt sts 0x%x\n", fg_batt_sts);
return (fg_batt_sts & BATT_IDED) ? 1 : 0;
}
@@ -1984,7 +2536,7 @@
#define DECIKELVIN 2730
#define SRAM_PERIOD_NO_ID_UPDATE_MS 100
#define FULL_PERCENT_28BIT 0xFFFFFFF
-static void update_sram_data(struct fg_chip *chip, int *resched_ms)
+static int update_sram_data(struct fg_chip *chip, int *resched_ms)
{
int i, j, rc = 0;
u8 reg[4];
@@ -2060,6 +2612,31 @@
}
fg_mem_release(chip);
+ /* Backup the registers whenever no error happens during update */
+ if (fg_reset_on_lockup && !chip->ima_error_handling) {
+ if (!rc) {
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("backing up SRAM registers\n");
+ rc = fg_backup_sram_registers(chip, true);
+ if (rc) {
+ pr_err("Couldn't save sram registers\n");
+ goto out;
+ }
+ if (!chip->use_last_soc) {
+ chip->last_soc = get_monotonic_soc_raw(chip);
+ chip->last_cc_soc = div64_s64(
+ (int64_t)chip->last_soc *
+ FULL_PERCENT_28BIT, FULL_SOC_RAW);
+ }
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("last_soc: %d last_cc_soc: %lld\n",
+ chip->last_soc, chip->last_cc_soc);
+ } else {
+ pr_err("update_sram failed\n");
+ goto out;
+ }
+ }
+
if (!rc)
get_current_time(&chip->last_sram_update_time);
@@ -2070,7 +2647,55 @@
} else {
*resched_ms = SRAM_PERIOD_NO_ID_UPDATE_MS;
}
+out:
fg_relax(&chip->update_sram_wakeup_source);
+ return rc;
+}
+
+#define SANITY_CHECK_PERIOD_MS 5000
+static void check_sanity_work(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ check_sanity_work.work);
+ int rc = 0;
+ u8 beat_count;
+ bool tried_once = false;
+
+ fg_stay_awake(&chip->sanity_wakeup_source);
+
+try_again:
+ rc = fg_read(chip, &beat_count,
+ chip->mem_base + MEM_INTF_FG_BEAT_COUNT, 1);
+ if (rc) {
+ pr_err("failed to read beat count rc=%d\n", rc);
+ goto resched;
+ }
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("current: %d, prev: %d\n", beat_count,
+ chip->last_beat_count);
+
+ if (chip->last_beat_count == beat_count) {
+ if (!tried_once) {
+ /* Wait for 1 FG cycle and read it once again */
+ msleep(1500);
+ tried_once = true;
+ goto try_again;
+ } else {
+ pr_err("Beat count not updating\n");
+ fg_check_ima_error_handling(chip);
+ goto out;
+ }
+ } else {
+ chip->last_beat_count = beat_count;
+ }
+resched:
+ schedule_delayed_work(
+ &chip->check_sanity_work,
+ msecs_to_jiffies(SANITY_CHECK_PERIOD_MS));
+out:
+ fg_relax(&chip->sanity_wakeup_source);
}
#define SRAM_TIMEOUT_MS 3000
@@ -2079,8 +2704,9 @@
struct fg_chip *chip = container_of(work,
struct fg_chip,
update_sram_data.work);
- int resched_ms = SRAM_PERIOD_NO_ID_UPDATE_MS, ret;
+ int resched_ms, ret;
bool tried_again = false;
+ int rc = 0;
wait:
/* Wait for MEMIF access revoked */
@@ -2094,14 +2720,19 @@
goto wait;
} else if (ret <= 0) {
pr_err("transaction timed out ret=%d\n", ret);
+ if (fg_is_batt_id_valid(chip))
+ resched_ms = fg_sram_update_period_ms;
+ else
+ resched_ms = SRAM_PERIOD_NO_ID_UPDATE_MS;
goto out;
}
- update_sram_data(chip, &resched_ms);
+ rc = update_sram_data(chip, &resched_ms);
out:
- schedule_delayed_work(
- &chip->update_sram_data,
- msecs_to_jiffies(resched_ms));
+ if (!rc)
+ schedule_delayed_work(
+ &chip->update_sram_data,
+ msecs_to_jiffies(resched_ms));
}
#define BATT_TEMP_OFFSET 3
@@ -2115,6 +2746,8 @@
TEMP_SENSE_CHARGE_BIT)
#define TEMP_PERIOD_UPDATE_MS 10000
#define TEMP_PERIOD_TIMEOUT_MS 3000
+#define BATT_TEMP_LOW_LIMIT -600
+#define BATT_TEMP_HIGH_LIMIT 1500
static void update_temp_data(struct work_struct *work)
{
s16 temp;
@@ -2166,14 +2799,44 @@
}
temp = reg[0] | (reg[1] << 8);
- fg_data[0].value = (temp * TEMP_LSB_16B / 1000)
- - DECIKELVIN;
+ temp = (temp * TEMP_LSB_16B / 1000) - DECIKELVIN;
+
+ /*
+ * If temperature is within the specified range (e.g. -60C and 150C),
+ * update it to the userspace. Otherwise, use the last read good
+ * temperature.
+ */
+ if (temp > chip->batt_temp_low_limit &&
+ temp < chip->batt_temp_high_limit) {
+ chip->last_good_temp = temp;
+ fg_data[0].value = temp;
+ } else {
+ fg_data[0].value = chip->last_good_temp;
+
+ /*
+ * If the temperature is read before and seems to be in valid
+ * range, then a bad temperature reading could be because of
+ * FG lockup. Trigger the FG reset sequence in such cases.
+ */
+ if (chip->last_temp_update_time && fg_reset_on_lockup &&
+ (chip->last_good_temp > chip->batt_temp_low_limit &&
+ chip->last_good_temp < chip->batt_temp_high_limit)) {
+ pr_err("Batt_temp is %d !, triggering FG reset\n",
+ temp);
+ fg_check_ima_error_handling(chip);
+ }
+ }
if (fg_debug_mask & FG_MEM_DEBUG_READS)
pr_info("BATT_TEMP %d %d\n", temp, fg_data[0].value);
get_current_time(&chip->last_temp_update_time);
+ if (chip->soc_slope_limiter_en) {
+ fg_stay_awake(&chip->slope_limit_wakeup_source);
+ schedule_work(&chip->slope_limiter_work);
+ }
+
out:
if (chip->sw_rbias_ctrl) {
rc = fg_mem_masked_write(chip, EXTERNAL_SENSE_SELECT,
@@ -2226,18 +2889,6 @@
return rc;
}
-#define VBATT_LOW_STS_BIT BIT(2)
-static int fg_get_vbatt_status(struct fg_chip *chip, bool *vbatt_low_sts)
-{
- int rc = 0;
- u8 fg_batt_sts;
-
- rc = fg_read(chip, &fg_batt_sts, INT_RT_STS(chip->batt_base), 1);
- if (!rc)
- *vbatt_low_sts = !!(fg_batt_sts & VBATT_LOW_STS_BIT);
- return rc;
-}
-
#define BATT_CYCLE_NUMBER_REG 0x5E8
#define BATT_CYCLE_OFFSET 0
static void restore_cycle_counter(struct fg_chip *chip)
@@ -2301,6 +2952,9 @@
bucket, rc);
else
chip->cyc_ctr.count[bucket] = cyc_count;
+
+ if (fg_debug_mask & FG_POWER_SUPPLY)
+ pr_info("Stored bucket %d cyc_count: %d\n", bucket, cyc_count);
return rc;
}
@@ -2416,6 +3070,62 @@
return ((int)val) * 1000;
}
+#define SLOPE_LIMITER_COEFF_REG 0x430
+#define SLOPE_LIMITER_COEFF_OFFSET 3
+#define SLOPE_LIMIT_TEMP_THRESHOLD 100
+#define SLOPE_LIMIT_LOW_TEMP_CHG 45
+#define SLOPE_LIMIT_HIGH_TEMP_CHG 2
+#define SLOPE_LIMIT_LOW_TEMP_DISCHG 45
+#define SLOPE_LIMIT_HIGH_TEMP_DISCHG 2
+static void slope_limiter_work(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work, struct fg_chip,
+ slope_limiter_work);
+ enum slope_limit_status status;
+ int batt_temp, rc;
+ u8 buf[2];
+ int64_t val;
+
+ batt_temp = get_sram_prop_now(chip, FG_DATA_BATT_TEMP);
+
+ if (chip->status == POWER_SUPPLY_STATUS_CHARGING ||
+ chip->status == POWER_SUPPLY_STATUS_FULL) {
+ if (batt_temp < chip->slope_limit_temp)
+ status = LOW_TEMP_CHARGE;
+ else
+ status = HIGH_TEMP_CHARGE;
+ } else if (chip->status == POWER_SUPPLY_STATUS_DISCHARGING) {
+ if (batt_temp < chip->slope_limit_temp)
+ status = LOW_TEMP_DISCHARGE;
+ else
+ status = HIGH_TEMP_DISCHARGE;
+ } else {
+ goto out;
+ }
+
+ if (status == chip->slope_limit_sts)
+ goto out;
+
+ val = chip->slope_limit_coeffs[status];
+ val *= MICRO_UNIT;
+ half_float_to_buffer(val, buf);
+ rc = fg_mem_write(chip, buf,
+ SLOPE_LIMITER_COEFF_REG, 2,
+ SLOPE_LIMITER_COEFF_OFFSET, 0);
+ if (rc) {
+ pr_err("Couldn't write to slope_limiter_coeff_reg, rc=%d\n",
+ rc);
+ goto out;
+ }
+
+ chip->slope_limit_sts = status;
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("Slope limit sts: %d val: %lld buf[%x %x] written\n",
+ status, val, buf[0], buf[1]);
+out:
+ fg_relax(&chip->slope_limit_wakeup_source);
+}
+
static int lookup_ocv_for_soc(struct fg_chip *chip, int soc)
{
int64_t *coeffs;
@@ -2481,6 +3191,7 @@
#define ESR_ACTUAL_REG 0x554
#define BATTERY_ESR_REG 0x4F4
#define TEMP_RS_TO_RSLOW_REG 0x514
+#define ESR_OFFSET 2
static int estimate_battery_age(struct fg_chip *chip, int *actual_capacity)
{
int64_t ocv_cutoff_new, ocv_cutoff_aged, temp_rs_to_rslow;
@@ -2519,7 +3230,7 @@
rc = fg_mem_read(chip, buffer, ESR_ACTUAL_REG, 2, 2, 0);
esr_actual = half_float(buffer);
- rc |= fg_mem_read(chip, buffer, BATTERY_ESR_REG, 2, 2, 0);
+ rc |= fg_mem_read(chip, buffer, BATTERY_ESR_REG, 2, ESR_OFFSET, 0);
battery_esr = half_float(buffer);
if (rc) {
@@ -2594,124 +3305,6 @@
estimate_battery_age(chip, &chip->actual_cap_uah);
}
-static enum power_supply_property fg_power_props[] = {
- POWER_SUPPLY_PROP_CAPACITY,
- POWER_SUPPLY_PROP_CAPACITY_RAW,
- POWER_SUPPLY_PROP_CURRENT_NOW,
- POWER_SUPPLY_PROP_VOLTAGE_NOW,
- POWER_SUPPLY_PROP_VOLTAGE_OCV,
- POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
- POWER_SUPPLY_PROP_CHARGE_NOW,
- POWER_SUPPLY_PROP_CHARGE_NOW_RAW,
- POWER_SUPPLY_PROP_CHARGE_NOW_ERROR,
- POWER_SUPPLY_PROP_CHARGE_FULL,
- POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
- POWER_SUPPLY_PROP_TEMP,
- POWER_SUPPLY_PROP_COOL_TEMP,
- POWER_SUPPLY_PROP_WARM_TEMP,
- POWER_SUPPLY_PROP_RESISTANCE,
- POWER_SUPPLY_PROP_RESISTANCE_ID,
- POWER_SUPPLY_PROP_BATTERY_TYPE,
- POWER_SUPPLY_PROP_UPDATE_NOW,
- POWER_SUPPLY_PROP_ESR_COUNT,
- POWER_SUPPLY_PROP_VOLTAGE_MIN,
- POWER_SUPPLY_PROP_CYCLE_COUNT,
- POWER_SUPPLY_PROP_CYCLE_COUNT_ID,
- POWER_SUPPLY_PROP_HI_POWER,
-};
-
-static int fg_power_get_property(struct power_supply *psy,
- enum power_supply_property psp,
- union power_supply_propval *val)
-{
- struct fg_chip *chip = power_supply_get_drvdata(psy);
- bool vbatt_low_sts;
-
- switch (psp) {
- case POWER_SUPPLY_PROP_BATTERY_TYPE:
- if (chip->battery_missing)
- val->strval = missing_batt_type;
- else if (chip->fg_restarting)
- val->strval = loading_batt_type;
- else
- val->strval = chip->batt_type;
- break;
- case POWER_SUPPLY_PROP_CAPACITY:
- val->intval = get_prop_capacity(chip);
- break;
- case POWER_SUPPLY_PROP_CAPACITY_RAW:
- val->intval = get_sram_prop_now(chip, FG_DATA_BATT_SOC);
- break;
- case POWER_SUPPLY_PROP_CHARGE_NOW_ERROR:
- val->intval = get_sram_prop_now(chip, FG_DATA_VINT_ERR);
- break;
- case POWER_SUPPLY_PROP_CURRENT_NOW:
- val->intval = get_sram_prop_now(chip, FG_DATA_CURRENT);
- break;
- case POWER_SUPPLY_PROP_VOLTAGE_NOW:
- val->intval = get_sram_prop_now(chip, FG_DATA_VOLTAGE);
- break;
- case POWER_SUPPLY_PROP_VOLTAGE_OCV:
- val->intval = get_sram_prop_now(chip, FG_DATA_OCV);
- break;
- case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
- val->intval = chip->batt_max_voltage_uv;
- break;
- case POWER_SUPPLY_PROP_TEMP:
- val->intval = get_sram_prop_now(chip, FG_DATA_BATT_TEMP);
- break;
- case POWER_SUPPLY_PROP_COOL_TEMP:
- val->intval = get_prop_jeita_temp(chip, FG_MEM_SOFT_COLD);
- break;
- case POWER_SUPPLY_PROP_WARM_TEMP:
- val->intval = get_prop_jeita_temp(chip, FG_MEM_SOFT_HOT);
- break;
- case POWER_SUPPLY_PROP_RESISTANCE:
- val->intval = get_sram_prop_now(chip, FG_DATA_BATT_ESR);
- break;
- case POWER_SUPPLY_PROP_ESR_COUNT:
- val->intval = get_sram_prop_now(chip, FG_DATA_BATT_ESR_COUNT);
- break;
- case POWER_SUPPLY_PROP_CYCLE_COUNT:
- val->intval = fg_get_cycle_count(chip);
- break;
- case POWER_SUPPLY_PROP_CYCLE_COUNT_ID:
- val->intval = chip->cyc_ctr.id;
- break;
- case POWER_SUPPLY_PROP_RESISTANCE_ID:
- val->intval = get_sram_prop_now(chip, FG_DATA_BATT_ID);
- break;
- case POWER_SUPPLY_PROP_UPDATE_NOW:
- val->intval = 0;
- break;
- case POWER_SUPPLY_PROP_VOLTAGE_MIN:
- if (!fg_get_vbatt_status(chip, &vbatt_low_sts))
- val->intval = (int)vbatt_low_sts;
- else
- val->intval = 1;
- break;
- case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
- val->intval = chip->nom_cap_uah;
- break;
- case POWER_SUPPLY_PROP_CHARGE_FULL:
- val->intval = chip->learning_data.learned_cc_uah;
- break;
- case POWER_SUPPLY_PROP_CHARGE_NOW:
- val->intval = chip->learning_data.cc_uah;
- break;
- case POWER_SUPPLY_PROP_CHARGE_NOW_RAW:
- val->intval = get_sram_prop_now(chip, FG_DATA_CC_CHARGE);
- break;
- case POWER_SUPPLY_PROP_HI_POWER:
- val->intval = !!chip->bcl_lpm_disabled;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
static int correction_times[] = {
1470,
2940,
@@ -2853,11 +3446,8 @@
goto fail;
}
- if (chip->wa_flag & USE_CC_SOC_REG) {
- mutex_unlock(&chip->learning_data.learning_lock);
- fg_relax(&chip->capacity_learning_wakeup_source);
- return;
- }
+ if (chip->wa_flag & USE_CC_SOC_REG)
+ goto fail;
fg_mem_lock(chip);
@@ -2888,6 +3478,8 @@
pr_info("total_cc_uah = %lld\n", chip->learning_data.cc_uah);
fail:
+ if (chip->wa_flag & USE_CC_SOC_REG)
+ fg_relax(&chip->capacity_learning_wakeup_source);
mutex_unlock(&chip->learning_data.learning_lock);
return;
@@ -2901,7 +3493,7 @@
{
int rc;
u8 reg[4];
- unsigned int temp, magnitude;
+ int temp;
rc = fg_mem_read(chip, reg, CC_SOC_BASE_REG, 4, CC_SOC_OFFSET, 0);
if (rc) {
@@ -2910,20 +3502,61 @@
}
temp = reg[3] << 24 | reg[2] << 16 | reg[1] << 8 | reg[0];
- magnitude = temp & CC_SOC_MAGNITUDE_MASK;
- if (temp & CC_SOC_NEGATIVE_BIT)
- *cc_soc = -1 * (~magnitude + 1);
- else
- *cc_soc = magnitude;
-
+ *cc_soc = sign_extend32(temp, 29);
return 0;
}
+static int fg_get_current_cc(struct fg_chip *chip)
+{
+ int cc_soc, rc;
+ int64_t current_capacity;
+
+ if (!(chip->wa_flag & USE_CC_SOC_REG))
+ return chip->learning_data.cc_uah;
+
+ if (!chip->learning_data.learned_cc_uah)
+ return -EINVAL;
+
+ rc = fg_get_cc_soc(chip, &cc_soc);
+ if (rc < 0) {
+ pr_err("Failed to get cc_soc, rc=%d\n", rc);
+ return rc;
+ }
+
+ current_capacity = cc_soc * chip->learning_data.learned_cc_uah;
+ current_capacity = div64_u64(current_capacity, FULL_PERCENT_28BIT);
+ return current_capacity;
+}
+
+#define BATT_MISSING_STS BIT(6)
+static bool is_battery_missing(struct fg_chip *chip)
+{
+ int rc;
+ u8 fg_batt_sts;
+
+ rc = fg_read(chip, &fg_batt_sts,
+ INT_RT_STS(chip->batt_base), 1);
+ if (rc) {
+ pr_err("spmi read failed: addr=%03X, rc=%d\n",
+ INT_RT_STS(chip->batt_base), rc);
+ return false;
+ }
+
+ return (fg_batt_sts & BATT_MISSING_STS) ? true : false;
+}
+
static int fg_cap_learning_process_full_data(struct fg_chip *chip)
{
int cc_pc_val, rc = -EINVAL;
unsigned int cc_soc_delta_pc;
int64_t delta_cc_uah;
+ uint64_t temp;
+ bool batt_missing = is_battery_missing(chip);
+
+ if (batt_missing) {
+ pr_err("Battery is missing!\n");
+ goto fail;
+ }
if (!chip->learning_data.active)
goto fail;
@@ -2940,9 +3573,8 @@
goto fail;
}
- cc_soc_delta_pc = DIV_ROUND_CLOSEST(
- abs(cc_pc_val - chip->learning_data.init_cc_pc_val)
- * 100, FULL_PERCENT_28BIT);
+ temp = abs(cc_pc_val - chip->learning_data.init_cc_pc_val);
+ cc_soc_delta_pc = DIV_ROUND_CLOSEST_ULL(temp * 100, FULL_PERCENT_28BIT);
delta_cc_uah = div64_s64(
chip->learning_data.learned_cc_uah * cc_soc_delta_pc,
@@ -2950,8 +3582,11 @@
chip->learning_data.cc_uah = delta_cc_uah + chip->learning_data.cc_uah;
if (fg_debug_mask & FG_AGING)
- pr_info("current cc_soc=%d cc_soc_pc=%d total_cc_uah = %lld\n",
+ pr_info("current cc_soc=%d cc_soc_pc=%d init_cc_pc_val=%d delta_cc_uah=%lld learned_cc_uah=%lld total_cc_uah = %lld\n",
cc_pc_val, cc_soc_delta_pc,
+ chip->learning_data.init_cc_pc_val,
+ delta_cc_uah,
+ chip->learning_data.learned_cc_uah,
chip->learning_data.cc_uah);
return 0;
@@ -3044,6 +3679,12 @@
{
int16_t cc_mah;
int rc;
+ bool batt_missing = is_battery_missing(chip);
+
+ if (batt_missing) {
+ pr_err("Battery is missing!\n");
+ return;
+ }
cc_mah = div64_s64(chip->learning_data.learned_cc_uah, 1000);
@@ -3065,6 +3706,12 @@
static void fg_cap_learning_post_process(struct fg_chip *chip)
{
int64_t max_inc_val, min_dec_val, old_cap;
+ bool batt_missing = is_battery_missing(chip);
+
+ if (batt_missing) {
+ pr_err("Battery is missing!\n");
+ return;
+ }
max_inc_val = chip->learning_data.learned_cc_uah
* (1000 + chip->learning_data.max_increment);
@@ -3083,6 +3730,32 @@
chip->learning_data.learned_cc_uah =
chip->learning_data.cc_uah;
+ if (chip->learning_data.max_cap_limit) {
+ max_inc_val = (int64_t)chip->nom_cap_uah * (1000 +
+ chip->learning_data.max_cap_limit);
+ max_inc_val = div64_u64(max_inc_val, 1000);
+ if (chip->learning_data.cc_uah > max_inc_val) {
+ if (fg_debug_mask & FG_AGING)
+ pr_info("learning capacity %lld goes above max limit %lld\n",
+ chip->learning_data.cc_uah,
+ max_inc_val);
+ chip->learning_data.learned_cc_uah = max_inc_val;
+ }
+ }
+
+ if (chip->learning_data.min_cap_limit) {
+ min_dec_val = (int64_t)chip->nom_cap_uah * (1000 -
+ chip->learning_data.min_cap_limit);
+ min_dec_val = div64_u64(min_dec_val, 1000);
+ if (chip->learning_data.cc_uah < min_dec_val) {
+ if (fg_debug_mask & FG_AGING)
+ pr_info("learning capacity %lld goes below min limit %lld\n",
+ chip->learning_data.cc_uah,
+ min_dec_val);
+ chip->learning_data.learned_cc_uah = min_dec_val;
+ }
+ }
+
fg_cap_learning_save_data(chip);
if (fg_debug_mask & FG_AGING)
pr_info("final cc_uah = %lld, learned capacity %lld -> %lld uah\n",
@@ -3142,7 +3815,7 @@
if (battery_soc * 100 / FULL_PERCENT_3B
> chip->learning_data.max_start_soc) {
if (fg_debug_mask & FG_AGING)
- pr_info("battery soc too low (%d < %d), aborting\n",
+ pr_info("battery soc too high (%d > %d), aborting\n",
battery_soc * 100 / FULL_PERCENT_3B,
chip->learning_data.max_start_soc);
fg_mem_release(chip);
@@ -3226,6 +3899,17 @@
}
fg_cap_learning_stop(chip);
+ } else if (chip->status == POWER_SUPPLY_STATUS_FULL) {
+ if (chip->wa_flag & USE_CC_SOC_REG) {
+ /* reset SW_CC_SOC register to 100% upon charge_full */
+ rc = fg_mem_write(chip, (u8 *)&cc_pc_100,
+ CC_SOC_BASE_REG, 4, CC_SOC_OFFSET, 0);
+ if (rc)
+ pr_err("Failed to reset CC_SOC_REG rc=%d\n",
+ rc);
+ else if (fg_debug_mask & FG_STATUS)
+ pr_info("Reset SW_CC_SOC to full value\n");
+ }
}
fail:
@@ -3323,7 +4007,14 @@
struct fg_chip,
status_change_work);
unsigned long current_time = 0;
- int cc_soc, rc, capacity = get_prop_capacity(chip);
+ int cc_soc, batt_soc, rc, capacity = get_prop_capacity(chip);
+ bool batt_missing = is_battery_missing(chip);
+
+ if (batt_missing) {
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("Battery is missing\n");
+ return;
+ }
if (chip->esr_pulse_tune_en) {
fg_stay_awake(&chip->esr_extract_wakeup_source);
@@ -3343,19 +4034,34 @@
}
if (chip->status == POWER_SUPPLY_STATUS_FULL ||
chip->status == POWER_SUPPLY_STATUS_CHARGING) {
- if (!chip->vbat_low_irq_enabled) {
+ if (!chip->vbat_low_irq_enabled &&
+ !chip->use_vbat_low_empty_soc) {
enable_irq(chip->batt_irq[VBATT_LOW].irq);
enable_irq_wake(chip->batt_irq[VBATT_LOW].irq);
chip->vbat_low_irq_enabled = true;
}
+
+ if (!chip->full_soc_irq_enabled) {
+ enable_irq(chip->soc_irq[FULL_SOC].irq);
+ enable_irq_wake(chip->soc_irq[FULL_SOC].irq);
+ chip->full_soc_irq_enabled = true;
+ }
+
if (!!(chip->wa_flag & PULSE_REQUEST_WA) && capacity == 100)
fg_configure_soc(chip);
} else if (chip->status == POWER_SUPPLY_STATUS_DISCHARGING) {
- if (chip->vbat_low_irq_enabled) {
+ if (chip->vbat_low_irq_enabled &&
+ !chip->use_vbat_low_empty_soc) {
disable_irq_wake(chip->batt_irq[VBATT_LOW].irq);
disable_irq_nosync(chip->batt_irq[VBATT_LOW].irq);
chip->vbat_low_irq_enabled = false;
}
+
+ if (chip->full_soc_irq_enabled) {
+ disable_irq_wake(chip->soc_irq[FULL_SOC].irq);
+ disable_irq_nosync(chip->soc_irq[FULL_SOC].irq);
+ chip->full_soc_irq_enabled = false;
+ }
}
fg_cap_learning_check(chip);
schedule_work(&chip->update_esr_work);
@@ -3368,6 +4074,42 @@
}
if (chip->prev_status != chip->status && chip->last_sram_update_time) {
+ /*
+ * Reset SW_CC_SOC to a value based off battery SOC when
+ * the device is discharging.
+ */
+ if (chip->status == POWER_SUPPLY_STATUS_DISCHARGING) {
+ batt_soc = get_battery_soc_raw(chip);
+ if (!batt_soc)
+ return;
+
+ batt_soc = div64_s64((int64_t)batt_soc *
+ FULL_PERCENT_28BIT, FULL_PERCENT_3B);
+ rc = fg_mem_write(chip, (u8 *)&batt_soc,
+ CC_SOC_BASE_REG, 4, CC_SOC_OFFSET, 0);
+ if (rc)
+ pr_err("Failed to reset CC_SOC_REG rc=%d\n",
+ rc);
+ else if (fg_debug_mask & FG_STATUS)
+ pr_info("Reset SW_CC_SOC to %x\n", batt_soc);
+ }
+
+ /*
+ * Schedule the update_temp_work whenever there is a status
+ * change. This is essential for applying the slope limiter
+ * coefficients when that feature is enabled.
+ */
+ if (chip->last_temp_update_time && chip->soc_slope_limiter_en) {
+ cancel_delayed_work_sync(&chip->update_temp_work);
+ schedule_delayed_work(&chip->update_temp_work,
+ msecs_to_jiffies(0));
+ }
+
+ if (chip->dischg_gain.enable) {
+ fg_stay_awake(&chip->dischg_gain_wakeup_source);
+ schedule_work(&chip->dischg_gain_work);
+ }
+
get_current_time(¤t_time);
/*
* When charging status changes, update SRAM parameters if it
@@ -3393,10 +4135,10 @@
}
if ((chip->wa_flag & USE_CC_SOC_REG) && chip->bad_batt_detection_en
&& chip->safety_timer_expired) {
- chip->sw_cc_soc_data.delta_soc =
- DIV_ROUND_CLOSEST(abs(cc_soc -
- chip->sw_cc_soc_data.init_cc_soc)
- * 100, FULL_PERCENT_28BIT);
+ uint64_t delta_cc_soc = abs(cc_soc -
+ chip->sw_cc_soc_data.init_cc_soc);
+ chip->sw_cc_soc_data.delta_soc = DIV_ROUND_CLOSEST_ULL(
+ delta_cc_soc * 100, FULL_PERCENT_28BIT);
chip->sw_cc_soc_data.full_capacity =
chip->sw_cc_soc_data.delta_soc +
chip->sw_cc_soc_data.init_sys_soc;
@@ -3539,6 +4281,395 @@
return rc;
}
+static int fg_restore_cc_soc(struct fg_chip *chip)
+{
+ int rc;
+
+ if (!chip->use_last_cc_soc || !chip->last_cc_soc)
+ return 0;
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("Restoring cc_soc: %lld\n", chip->last_cc_soc);
+
+ rc = fg_mem_write(chip, (u8 *)&chip->last_cc_soc,
+ fg_data[FG_DATA_CC_CHARGE].address, 4,
+ fg_data[FG_DATA_CC_CHARGE].offset, 0);
+ if (rc)
+ pr_err("failed to update CC_SOC rc=%d\n", rc);
+ else
+ chip->use_last_cc_soc = false;
+
+ return rc;
+}
+
+#define SRAM_MONOTONIC_SOC_REG 0x574
+#define SRAM_MONOTONIC_SOC_OFFSET 2
+static int fg_restore_soc(struct fg_chip *chip)
+{
+ int rc;
+ u16 msoc;
+
+ if (chip->use_last_soc && chip->last_soc)
+ msoc = DIV_ROUND_CLOSEST(chip->last_soc * 0xFFFF,
+ FULL_SOC_RAW);
+ else
+ return 0;
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("Restored soc: %d\n", msoc);
+
+ rc = fg_mem_write(chip, (u8 *)&msoc, SRAM_MONOTONIC_SOC_REG, 2,
+ SRAM_MONOTONIC_SOC_OFFSET, 0);
+ if (rc)
+ pr_err("failed to write M_SOC_REG rc=%d\n", rc);
+
+ return rc;
+}
+
+#define NOM_CAP_REG 0x4F4
+#define CAPACITY_DELTA_DECIPCT 500
+static int load_battery_aging_data(struct fg_chip *chip)
+{
+ int rc = 0;
+ u8 buffer[2];
+ int16_t cc_mah;
+ int64_t delta_cc_uah, pct_nom_cap_uah;
+
+ rc = fg_mem_read(chip, buffer, NOM_CAP_REG, 2, 0, 0);
+ if (rc) {
+ pr_err("Failed to read nominal capacitance: %d\n", rc);
+ goto out;
+ }
+
+ chip->nom_cap_uah = bcap_uah_2b(buffer);
+ chip->actual_cap_uah = chip->nom_cap_uah;
+
+ if (chip->learning_data.learned_cc_uah == 0) {
+ chip->learning_data.learned_cc_uah = chip->nom_cap_uah;
+ fg_cap_learning_save_data(chip);
+ } else if (chip->learning_data.feedback_on) {
+ delta_cc_uah = abs(chip->learning_data.learned_cc_uah -
+ chip->nom_cap_uah);
+ pct_nom_cap_uah = div64_s64((int64_t)chip->nom_cap_uah *
+ CAPACITY_DELTA_DECIPCT, 1000);
+ /*
+ * If the learned capacity is out of range, say by 50%
+ * from the nominal capacity, then overwrite the learned
+ * capacity with the nominal capacity.
+ */
+ if (chip->nom_cap_uah && delta_cc_uah > pct_nom_cap_uah) {
+ if (fg_debug_mask & FG_AGING) {
+ pr_info("learned_cc_uah: %lld is higher than expected\n",
+ chip->learning_data.learned_cc_uah);
+ pr_info("Capping it to nominal:%d\n",
+ chip->nom_cap_uah);
+ }
+ chip->learning_data.learned_cc_uah = chip->nom_cap_uah;
+ fg_cap_learning_save_data(chip);
+ } else {
+ cc_mah = div64_s64(chip->learning_data.learned_cc_uah,
+ 1000);
+ rc = fg_calc_and_store_cc_soc_coeff(chip, cc_mah);
+ if (rc)
+ pr_err("Error in restoring cc_soc_coeff, rc:%d\n",
+ rc);
+ }
+ }
+out:
+ return rc;
+}
+
+static void fg_restore_battery_info(struct fg_chip *chip)
+{
+ int rc;
+ char buf[4] = {0, 0, 0, 0};
+
+ chip->last_soc = DIV_ROUND_CLOSEST(chip->batt_info[BATT_INFO_SOC] *
+ FULL_SOC_RAW, FULL_CAPACITY);
+ chip->last_cc_soc = div64_s64((int64_t)chip->last_soc *
+ FULL_PERCENT_28BIT, FULL_SOC_RAW);
+ chip->use_last_soc = true;
+ chip->use_last_cc_soc = true;
+ rc = fg_restore_soc(chip);
+ if (rc) {
+ pr_err("Error in restoring soc, rc=%d\n", rc);
+ goto out;
+ }
+
+ rc = fg_restore_cc_soc(chip);
+ if (rc) {
+ pr_err("Error in restoring cc_soc, rc=%d\n", rc);
+ goto out;
+ }
+
+ rc = fg_mem_write(chip, buf,
+ fg_data[FG_DATA_VINT_ERR].address,
+ fg_data[FG_DATA_VINT_ERR].len,
+ fg_data[FG_DATA_VINT_ERR].offset, 0);
+ if (rc) {
+ pr_err("Failed to write to VINT_ERR, rc=%d\n", rc);
+ goto out;
+ }
+
+ chip->learning_data.learned_cc_uah = chip->batt_info[BATT_INFO_FCC];
+ rc = load_battery_aging_data(chip);
+ if (rc) {
+ pr_err("Failed to load battery aging data, rc:%d\n", rc);
+ goto out;
+ }
+
+ if (chip->power_supply_registered)
+ power_supply_changed(chip->bms_psy);
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("Restored battery info!\n");
+
+out:
+ return;
+}
+
+#define DELTA_BATT_TEMP 30
+static bool fg_validate_battery_info(struct fg_chip *chip)
+{
+ int i, delta_pct, batt_id_kohm, batt_temp, batt_volt_mv, batt_soc;
+
+ for (i = 1; i < BATT_INFO_MAX; i++) {
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("batt_info[%d]: %d\n", i, chip->batt_info[i]);
+
+ if ((chip->batt_info[i] == 0 && i != BATT_INFO_TEMP) ||
+ chip->batt_info[i] == INT_MAX) {
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("batt_info[%d]:%d is invalid\n", i,
+ chip->batt_info[i]);
+ return false;
+ }
+ }
+
+ batt_id_kohm = get_sram_prop_now(chip, FG_DATA_BATT_ID) / 1000;
+ if (batt_id_kohm != chip->batt_info[BATT_INFO_RES_ID]) {
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("batt_id(%dK) does not match the stored batt_id(%dK)\n",
+ batt_id_kohm,
+ chip->batt_info[BATT_INFO_RES_ID]);
+ return false;
+ }
+
+ batt_temp = get_sram_prop_now(chip, FG_DATA_BATT_TEMP);
+ if (abs(chip->batt_info[BATT_INFO_TEMP] - batt_temp) >
+ DELTA_BATT_TEMP) {
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("batt_temp(%d) is higher/lower than stored batt_temp(%d)\n",
+ batt_temp, chip->batt_info[BATT_INFO_TEMP]);
+ return false;
+ }
+
+ if (chip->batt_info[BATT_INFO_FCC] < 0) {
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("batt_fcc cannot be %d\n",
+ chip->batt_info[BATT_INFO_FCC]);
+ return false;
+ }
+
+ batt_volt_mv = get_sram_prop_now(chip, FG_DATA_VOLTAGE) / 1000;
+ batt_soc = get_monotonic_soc_raw(chip);
+ if (batt_soc != 0 && batt_soc != FULL_SOC_RAW)
+ batt_soc = DIV_ROUND_CLOSEST((batt_soc - 1) *
+ (FULL_CAPACITY - 2), FULL_SOC_RAW - 2) + 1;
+
+ if (*chip->batt_range_ocv && chip->batt_max_voltage_uv > 1000)
+ delta_pct = DIV_ROUND_CLOSEST(abs(batt_volt_mv -
+ chip->batt_info[BATT_INFO_VOLTAGE]) * 100,
+ chip->batt_max_voltage_uv / 1000);
+ else
+ delta_pct = abs(batt_soc - chip->batt_info[BATT_INFO_SOC]);
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("Validating by %s batt_voltage:%d capacity:%d delta_pct:%d\n",
+ *chip->batt_range_ocv ? "OCV" : "SOC", batt_volt_mv,
+ batt_soc, delta_pct);
+
+ if (*chip->batt_range_pct && delta_pct > *chip->batt_range_pct) {
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("delta_pct(%d) is higher than batt_range_pct(%d)\n",
+ delta_pct, *chip->batt_range_pct);
+ return false;
+ }
+
+ return true;
+}
+
+static int fg_set_battery_info(struct fg_chip *chip, int val)
+{
+ if (chip->batt_info_id < 0 ||
+ chip->batt_info_id >= BATT_INFO_MAX) {
+ pr_err("Invalid batt_info_id %d\n", chip->batt_info_id);
+ chip->batt_info_id = 0;
+ return -EINVAL;
+ }
+
+ if (chip->batt_info_id == BATT_INFO_NOTIFY && val == INT_MAX - 1) {
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("Notified from userspace\n");
+ if (chip->batt_info_restore && !chip->ima_error_handling) {
+ if (!fg_validate_battery_info(chip)) {
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("Validating battery info failed\n");
+ } else {
+ fg_restore_battery_info(chip);
+ }
+ }
+ }
+
+ chip->batt_info[chip->batt_info_id] = val;
+ return 0;
+}
+
+static enum power_supply_property fg_power_props[] = {
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_CAPACITY_RAW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_VOLTAGE_OCV,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
+ POWER_SUPPLY_PROP_CHARGE_COUNTER,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_CHARGE_NOW_RAW,
+ POWER_SUPPLY_PROP_CHARGE_NOW_ERROR,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_COOL_TEMP,
+ POWER_SUPPLY_PROP_WARM_TEMP,
+ POWER_SUPPLY_PROP_RESISTANCE,
+ POWER_SUPPLY_PROP_RESISTANCE_ID,
+ POWER_SUPPLY_PROP_BATTERY_TYPE,
+ POWER_SUPPLY_PROP_UPDATE_NOW,
+ POWER_SUPPLY_PROP_ESR_COUNT,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN,
+ POWER_SUPPLY_PROP_CYCLE_COUNT,
+ POWER_SUPPLY_PROP_CYCLE_COUNT_ID,
+ POWER_SUPPLY_PROP_HI_POWER,
+ POWER_SUPPLY_PROP_SOC_REPORTING_READY,
+ POWER_SUPPLY_PROP_IGNORE_FALSE_NEGATIVE_ISENSE,
+ POWER_SUPPLY_PROP_ENABLE_JEITA_DETECTION,
+ POWER_SUPPLY_PROP_BATTERY_INFO,
+ POWER_SUPPLY_PROP_BATTERY_INFO_ID,
+};
+
+static int fg_power_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct fg_chip *chip = power_supply_get_drvdata(psy);
+ bool vbatt_low_sts;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_BATTERY_TYPE:
+ if (chip->battery_missing)
+ val->strval = missing_batt_type;
+ else if (chip->fg_restarting)
+ val->strval = loading_batt_type;
+ else
+ val->strval = chip->batt_type;
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ val->intval = get_prop_capacity(chip);
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY_RAW:
+ val->intval = get_sram_prop_now(chip, FG_DATA_BATT_SOC);
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_NOW_ERROR:
+ val->intval = get_sram_prop_now(chip, FG_DATA_VINT_ERR);
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ val->intval = get_sram_prop_now(chip, FG_DATA_CURRENT);
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ val->intval = get_sram_prop_now(chip, FG_DATA_VOLTAGE);
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_OCV:
+ val->intval = get_sram_prop_now(chip, FG_DATA_OCV);
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
+ val->intval = chip->batt_max_voltage_uv;
+ break;
+ case POWER_SUPPLY_PROP_TEMP:
+ val->intval = get_sram_prop_now(chip, FG_DATA_BATT_TEMP);
+ break;
+ case POWER_SUPPLY_PROP_COOL_TEMP:
+ val->intval = get_prop_jeita_temp(chip, FG_MEM_SOFT_COLD);
+ break;
+ case POWER_SUPPLY_PROP_WARM_TEMP:
+ val->intval = get_prop_jeita_temp(chip, FG_MEM_SOFT_HOT);
+ break;
+ case POWER_SUPPLY_PROP_RESISTANCE:
+ val->intval = get_sram_prop_now(chip, FG_DATA_BATT_ESR);
+ break;
+ case POWER_SUPPLY_PROP_ESR_COUNT:
+ val->intval = get_sram_prop_now(chip, FG_DATA_BATT_ESR_COUNT);
+ break;
+ case POWER_SUPPLY_PROP_CYCLE_COUNT:
+ val->intval = fg_get_cycle_count(chip);
+ break;
+ case POWER_SUPPLY_PROP_CYCLE_COUNT_ID:
+ val->intval = chip->cyc_ctr.id;
+ break;
+ case POWER_SUPPLY_PROP_RESISTANCE_ID:
+ val->intval = get_sram_prop_now(chip, FG_DATA_BATT_ID);
+ break;
+ case POWER_SUPPLY_PROP_UPDATE_NOW:
+ val->intval = 0;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MIN:
+ if (!fg_get_vbatt_status(chip, &vbatt_low_sts))
+ val->intval = (int)vbatt_low_sts;
+ else
+ val->intval = 1;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+ val->intval = chip->nom_cap_uah;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL:
+ val->intval = chip->learning_data.learned_cc_uah;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_NOW:
+ val->intval = chip->learning_data.cc_uah;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_NOW_RAW:
+ val->intval = get_sram_prop_now(chip, FG_DATA_CC_CHARGE);
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_COUNTER:
+ val->intval = fg_get_current_cc(chip);
+ break;
+ case POWER_SUPPLY_PROP_HI_POWER:
+ val->intval = !!chip->bcl_lpm_disabled;
+ break;
+ case POWER_SUPPLY_PROP_SOC_REPORTING_READY:
+ val->intval = !!chip->soc_reporting_ready;
+ break;
+ case POWER_SUPPLY_PROP_IGNORE_FALSE_NEGATIVE_ISENSE:
+ val->intval = !chip->allow_false_negative_isense;
+ break;
+ case POWER_SUPPLY_PROP_ENABLE_JEITA_DETECTION:
+ val->intval = chip->use_soft_jeita_irq;
+ break;
+ case POWER_SUPPLY_PROP_BATTERY_INFO:
+ if (chip->batt_info_id < 0 ||
+ chip->batt_info_id >= BATT_INFO_MAX)
+ return -EINVAL;
+ val->intval = chip->batt_info[chip->batt_info_id];
+ break;
+ case POWER_SUPPLY_PROP_BATTERY_INFO_ID:
+ val->intval = chip->batt_info_id;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int fg_power_set_property(struct power_supply *psy,
enum power_supply_property psp,
const union power_supply_propval *val)
@@ -3557,6 +4688,67 @@
if (val->intval)
update_sram_data(chip, &unused);
break;
+ case POWER_SUPPLY_PROP_IGNORE_FALSE_NEGATIVE_ISENSE:
+ rc = set_prop_ignore_false_negative_isense(chip, !!val->intval);
+ if (rc)
+ pr_err("set_prop_ignore_false_negative_isense failed, rc=%d\n",
+ rc);
+ else
+ chip->allow_false_negative_isense = !val->intval;
+ break;
+ case POWER_SUPPLY_PROP_ENABLE_JEITA_DETECTION:
+ if (chip->use_soft_jeita_irq == !!val->intval) {
+ pr_debug("JEITA irq %s, ignore!\n",
+ chip->use_soft_jeita_irq ?
+ "enabled" : "disabled");
+ break;
+ }
+ chip->use_soft_jeita_irq = !!val->intval;
+ if (chip->use_soft_jeita_irq) {
+ if (chip->batt_irq[JEITA_SOFT_COLD].disabled) {
+ enable_irq(
+ chip->batt_irq[JEITA_SOFT_COLD].irq);
+ chip->batt_irq[JEITA_SOFT_COLD].disabled =
+ false;
+ }
+ if (!chip->batt_irq[JEITA_SOFT_COLD].wakeup) {
+ enable_irq_wake(
+ chip->batt_irq[JEITA_SOFT_COLD].irq);
+ chip->batt_irq[JEITA_SOFT_COLD].wakeup = true;
+ }
+ if (chip->batt_irq[JEITA_SOFT_HOT].disabled) {
+ enable_irq(
+ chip->batt_irq[JEITA_SOFT_HOT].irq);
+ chip->batt_irq[JEITA_SOFT_HOT].disabled = false;
+ }
+ if (!chip->batt_irq[JEITA_SOFT_HOT].wakeup) {
+ enable_irq_wake(
+ chip->batt_irq[JEITA_SOFT_HOT].irq);
+ chip->batt_irq[JEITA_SOFT_HOT].wakeup = true;
+ }
+ } else {
+ if (chip->batt_irq[JEITA_SOFT_COLD].wakeup) {
+ disable_irq_wake(
+ chip->batt_irq[JEITA_SOFT_COLD].irq);
+ chip->batt_irq[JEITA_SOFT_COLD].wakeup = false;
+ }
+ if (!chip->batt_irq[JEITA_SOFT_COLD].disabled) {
+ disable_irq_nosync(
+ chip->batt_irq[JEITA_SOFT_COLD].irq);
+ chip->batt_irq[JEITA_SOFT_COLD].disabled = true;
+ }
+ if (chip->batt_irq[JEITA_SOFT_HOT].wakeup) {
+ disable_irq_wake(
+ chip->batt_irq[JEITA_SOFT_HOT].irq);
+ chip->batt_irq[JEITA_SOFT_HOT].wakeup = false;
+ }
+ if (!chip->batt_irq[JEITA_SOFT_HOT].disabled) {
+ disable_irq_nosync(
+ chip->batt_irq[JEITA_SOFT_HOT].irq);
+ chip->batt_irq[JEITA_SOFT_HOT].disabled = true;
+ }
+ }
+ break;
case POWER_SUPPLY_PROP_STATUS:
chip->prev_status = chip->status;
chip->status = val->intval;
@@ -3599,6 +4791,12 @@
schedule_work(&chip->bcl_hi_power_work);
}
break;
+ case POWER_SUPPLY_PROP_BATTERY_INFO:
+ rc = fg_set_battery_info(chip, val->intval);
+ break;
+ case POWER_SUPPLY_PROP_BATTERY_INFO_ID:
+ chip->batt_info_id = val->intval;
+ break;
default:
return -EINVAL;
};
@@ -3613,6 +4811,8 @@
case POWER_SUPPLY_PROP_COOL_TEMP:
case POWER_SUPPLY_PROP_WARM_TEMP:
case POWER_SUPPLY_PROP_CYCLE_COUNT_ID:
+ case POWER_SUPPLY_PROP_BATTERY_INFO:
+ case POWER_SUPPLY_PROP_BATTERY_INFO_ID:
return 1;
default:
break;
@@ -3807,21 +5007,197 @@
fg_relax(&chip->gain_comp_wakeup_source);
}
-#define BATT_MISSING_STS BIT(6)
-static bool is_battery_missing(struct fg_chip *chip)
+static void cc_soc_store_work(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work, struct fg_chip,
+ cc_soc_store_work);
+ int cc_soc_pct;
+
+ if (!chip->nom_cap_uah) {
+ pr_err("nom_cap_uah zero!\n");
+ fg_relax(&chip->cc_soc_wakeup_source);
+ return;
+ }
+
+ cc_soc_pct = get_sram_prop_now(chip, FG_DATA_CC_CHARGE);
+ cc_soc_pct = div64_s64(cc_soc_pct * 100,
+ chip->nom_cap_uah);
+ chip->last_cc_soc = div64_s64((int64_t)chip->last_soc *
+ FULL_PERCENT_28BIT, FULL_SOC_RAW);
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("cc_soc_pct: %d last_cc_soc: %lld\n", cc_soc_pct,
+ chip->last_cc_soc);
+
+ if (fg_reset_on_lockup && (chip->cc_soc_limit_pct > 0 &&
+ cc_soc_pct >= chip->cc_soc_limit_pct)) {
+ pr_err("CC_SOC out of range\n");
+ fg_check_ima_error_handling(chip);
+ }
+
+ fg_relax(&chip->cc_soc_wakeup_source);
+}
+
+#define HARD_JEITA_ALARM_CHECK_NS 10000000000
+static enum alarmtimer_restart fg_hard_jeita_alarm_cb(struct alarm *alarm,
+ ktime_t now)
+{
+ struct fg_chip *chip = container_of(alarm,
+ struct fg_chip, hard_jeita_alarm);
+ int rc, health = POWER_SUPPLY_HEALTH_UNKNOWN;
+ u8 regval;
+ bool batt_hot, batt_cold;
+ union power_supply_propval val = {0, };
+
+ if (!is_usb_present(chip)) {
+ pr_debug("USB plugged out, stop the timer!\n");
+ return ALARMTIMER_NORESTART;
+ }
+
+ rc = fg_read(chip, ®val, BATT_INFO_STS(chip->batt_base), 1);
+ if (rc) {
+ pr_err("read batt_sts failed, rc=%d\n", rc);
+ goto recheck;
+ }
+
+ batt_hot = !!(regval & JEITA_HARD_HOT_RT_STS);
+ batt_cold = !!(regval & JEITA_HARD_COLD_RT_STS);
+ if (batt_hot && batt_cold) {
+ pr_debug("Hot && cold can't co-exist\n");
+ goto recheck;
+ }
+
+ if ((batt_hot == chip->batt_hot) && (batt_cold == chip->batt_cold)) {
+ pr_debug("battery JEITA state not changed, ignore\n");
+ goto recheck;
+ }
+
+ if (batt_cold != chip->batt_cold) {
+ /* cool --> cold */
+ if (chip->batt_cool) {
+ chip->batt_cool = false;
+ chip->batt_cold = true;
+ health = POWER_SUPPLY_HEALTH_COLD;
+ } else if (chip->batt_cold) { /* cold --> cool */
+ chip->batt_cool = true;
+ chip->batt_cold = false;
+ health = POWER_SUPPLY_HEALTH_COOL;
+ }
+ }
+
+ if (batt_hot != chip->batt_hot) {
+ /* warm --> hot */
+ if (chip->batt_warm) {
+ chip->batt_warm = false;
+ chip->batt_hot = true;
+ health = POWER_SUPPLY_HEALTH_OVERHEAT;
+ } else if (chip->batt_hot) { /* hot --> warm */
+ chip->batt_hot = false;
+ chip->batt_warm = true;
+ health = POWER_SUPPLY_HEALTH_WARM;
+ }
+ }
+
+ if (health != POWER_SUPPLY_HEALTH_UNKNOWN) {
+ pr_debug("FG report battery health: %d\n", health);
+ val.intval = health;
+ rc = power_supply_set_property(chip->batt_psy,
+ POWER_SUPPLY_PROP_HEALTH, &val);
+ if (rc)
+ pr_err("Set batt_psy health: %d failed\n", health);
+ }
+
+recheck:
+ alarm_forward_now(alarm, ns_to_ktime(HARD_JEITA_ALARM_CHECK_NS));
+ return ALARMTIMER_RESTART;
+}
+
+#define BATT_SOFT_COLD_STS BIT(0)
+#define BATT_SOFT_HOT_STS BIT(1)
+static irqreturn_t fg_jeita_soft_hot_irq_handler(int irq, void *_chip)
{
int rc;
- u8 fg_batt_sts;
+ struct fg_chip *chip = _chip;
+ u8 regval;
+ bool batt_warm;
+ union power_supply_propval val = {0, };
- rc = fg_read(chip, &fg_batt_sts,
- INT_RT_STS(chip->batt_base), 1);
+ if (!is_charger_available(chip))
+ return IRQ_HANDLED;
+
+ rc = fg_read(chip, ®val, INT_RT_STS(chip->batt_base), 1);
if (rc) {
pr_err("spmi read failed: addr=%03X, rc=%d\n",
INT_RT_STS(chip->batt_base), rc);
- return false;
+ return IRQ_HANDLED;
}
- return (fg_batt_sts & BATT_MISSING_STS) ? true : false;
+ batt_warm = !!(regval & BATT_SOFT_HOT_STS);
+ if (chip->batt_warm == batt_warm) {
+ pr_debug("warm state not change, ignore!\n");
+ return IRQ_HANDLED;
+ }
+
+ chip->batt_warm = batt_warm;
+ if (batt_warm) {
+ val.intval = POWER_SUPPLY_HEALTH_WARM;
+ power_supply_set_property(chip->batt_psy,
+ POWER_SUPPLY_PROP_HEALTH, &val);
+ /* kick the alarm timer for hard hot polling */
+ alarm_start_relative(&chip->hard_jeita_alarm,
+ ns_to_ktime(HARD_JEITA_ALARM_CHECK_NS));
+ } else {
+ val.intval = POWER_SUPPLY_HEALTH_GOOD;
+ power_supply_set_property(chip->batt_psy,
+ POWER_SUPPLY_PROP_HEALTH, &val);
+ /* cancel the alarm timer */
+ alarm_try_to_cancel(&chip->hard_jeita_alarm);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t fg_jeita_soft_cold_irq_handler(int irq, void *_chip)
+{
+ int rc;
+ struct fg_chip *chip = _chip;
+ u8 regval;
+ bool batt_cool;
+ union power_supply_propval val = {0, };
+
+ if (!is_charger_available(chip))
+ return IRQ_HANDLED;
+
+ rc = fg_read(chip, ®val, INT_RT_STS(chip->batt_base), 1);
+ if (rc) {
+ pr_err("spmi read failed: addr=%03X, rc=%d\n",
+ INT_RT_STS(chip->batt_base), rc);
+ return IRQ_HANDLED;
+ }
+
+ batt_cool = !!(regval & BATT_SOFT_COLD_STS);
+ if (chip->batt_cool == batt_cool) {
+ pr_debug("cool state not change, ignore\n");
+ return IRQ_HANDLED;
+ }
+
+ chip->batt_cool = batt_cool;
+ if (batt_cool) {
+ val.intval = POWER_SUPPLY_HEALTH_COOL;
+ power_supply_set_property(chip->batt_psy,
+ POWER_SUPPLY_PROP_HEALTH, &val);
+ /* kick the alarm timer for hard cold polling */
+ alarm_start_relative(&chip->hard_jeita_alarm,
+ ns_to_ktime(HARD_JEITA_ALARM_CHECK_NS));
+ } else {
+ val.intval = POWER_SUPPLY_HEALTH_GOOD;
+ power_supply_set_property(chip->batt_psy,
+ POWER_SUPPLY_PROP_HEALTH, &val);
+ /* cancel the alarm timer */
+ alarm_try_to_cancel(&chip->hard_jeita_alarm);
+ }
+
+ return IRQ_HANDLED;
}
#define SOC_FIRST_EST_DONE BIT(5)
@@ -3841,21 +5217,40 @@
return (fg_soc_sts & SOC_FIRST_EST_DONE) ? true : false;
}
+#define FG_EMPTY_DEBOUNCE_MS 1500
static irqreturn_t fg_vbatt_low_handler(int irq, void *_chip)
{
struct fg_chip *chip = _chip;
- int rc;
bool vbatt_low_sts;
if (fg_debug_mask & FG_IRQS)
pr_info("vbatt-low triggered\n");
- if (chip->status == POWER_SUPPLY_STATUS_CHARGING) {
- rc = fg_get_vbatt_status(chip, &vbatt_low_sts);
- if (rc) {
- pr_err("error in reading vbatt_status, rc:%d\n", rc);
+ /* handle empty soc based on vbatt-low interrupt */
+ if (chip->use_vbat_low_empty_soc) {
+ if (fg_get_vbatt_status(chip, &vbatt_low_sts))
goto out;
+
+ if (vbatt_low_sts) {
+ if (fg_debug_mask & FG_IRQS)
+ pr_info("Vbatt is low\n");
+ disable_irq_wake(chip->batt_irq[VBATT_LOW].irq);
+ disable_irq_nosync(chip->batt_irq[VBATT_LOW].irq);
+ chip->vbat_low_irq_enabled = false;
+ fg_stay_awake(&chip->empty_check_wakeup_source);
+ schedule_delayed_work(&chip->check_empty_work,
+ msecs_to_jiffies(FG_EMPTY_DEBOUNCE_MS));
+ } else {
+ if (fg_debug_mask & FG_IRQS)
+ pr_info("Vbatt is high\n");
+ chip->soc_empty = false;
}
+ goto out;
+ }
+
+ if (chip->status == POWER_SUPPLY_STATUS_CHARGING) {
+ if (fg_get_vbatt_status(chip, &vbatt_low_sts))
+ goto out;
if (!vbatt_low_sts && chip->vbat_low_irq_enabled) {
if (fg_debug_mask & FG_IRQS)
pr_info("disabling vbatt_low irq\n");
@@ -3876,8 +5271,10 @@
bool batt_missing = is_battery_missing(chip);
if (batt_missing) {
+ fg_cap_learning_stop(chip);
chip->battery_missing = true;
chip->profile_loaded = false;
+ chip->soc_reporting_ready = false;
chip->batt_type = default_batt_type;
mutex_lock(&chip->cyc_ctr.lock);
if (fg_debug_mask & FG_IRQS)
@@ -3885,17 +5282,10 @@
clear_cycle_counter(chip);
mutex_unlock(&chip->cyc_ctr.lock);
} else {
- if (!chip->use_otp_profile) {
- reinit_completion(&chip->batt_id_avail);
- reinit_completion(&chip->first_soc_done);
- schedule_delayed_work(&chip->batt_profile_init, 0);
- cancel_delayed_work(&chip->update_sram_data);
- schedule_delayed_work(
- &chip->update_sram_data,
- msecs_to_jiffies(0));
- } else {
+ if (!chip->use_otp_profile)
+ fg_handle_battery_insertion(chip);
+ else
chip->battery_missing = false;
- }
}
if (fg_debug_mask & FG_IRQS)
@@ -3943,7 +5333,7 @@
{
struct fg_chip *chip = _chip;
u8 soc_rt_sts;
- int rc;
+ int rc, msoc;
rc = fg_read(chip, &soc_rt_sts, INT_RT_STS(chip->soc_base), 1);
if (rc) {
@@ -3954,6 +5344,37 @@
if (fg_debug_mask & FG_IRQS)
pr_info("triggered 0x%x\n", soc_rt_sts);
+ if (chip->dischg_gain.enable) {
+ fg_stay_awake(&chip->dischg_gain_wakeup_source);
+ schedule_work(&chip->dischg_gain_work);
+ }
+
+ if (chip->soc_slope_limiter_en) {
+ fg_stay_awake(&chip->slope_limit_wakeup_source);
+ schedule_work(&chip->slope_limiter_work);
+ }
+
+ /* Backup last soc every delta soc interrupt */
+ chip->use_last_soc = false;
+ if (fg_reset_on_lockup) {
+ if (!chip->ima_error_handling)
+ chip->last_soc = get_monotonic_soc_raw(chip);
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("last_soc: %d\n", chip->last_soc);
+
+ fg_stay_awake(&chip->cc_soc_wakeup_source);
+ schedule_work(&chip->cc_soc_store_work);
+ }
+
+ if (chip->use_vbat_low_empty_soc) {
+ msoc = get_monotonic_soc_raw(chip);
+ if (msoc == 0 || chip->soc_empty) {
+ fg_stay_awake(&chip->empty_check_wakeup_source);
+ schedule_delayed_work(&chip->check_empty_work,
+ msecs_to_jiffies(FG_EMPTY_DEBOUNCE_MS));
+ }
+ }
+
schedule_work(&chip->battery_age_work);
if (chip->power_supply_registered)
@@ -3988,7 +5409,6 @@
return IRQ_HANDLED;
}
-#define FG_EMPTY_DEBOUNCE_MS 1500
static irqreturn_t fg_empty_soc_irq_handler(int irq, void *_chip)
{
struct fg_chip *chip = _chip;
@@ -4100,16 +5520,15 @@
fg_relax(&chip->resume_soc_wakeup_source);
}
-
#define OCV_COEFFS_START_REG 0x4C0
#define OCV_JUNCTION_REG 0x4D8
-#define NOM_CAP_REG 0x4F4
#define CUTOFF_VOLTAGE_REG 0x40C
#define RSLOW_CFG_REG 0x538
#define RSLOW_CFG_OFFSET 2
#define RSLOW_THRESH_REG 0x52C
#define RSLOW_THRESH_OFFSET 0
-#define TEMP_RS_TO_RSLOW_OFFSET 2
+#define RS_TO_RSLOW_CHG_OFFSET 2
+#define RS_TO_RSLOW_DISCHG_OFFSET 0
#define RSLOW_COMP_REG 0x528
#define RSLOW_COMP_C1_OFFSET 0
#define RSLOW_COMP_C2_OFFSET 2
@@ -4117,7 +5536,6 @@
{
u8 buffer[24];
int rc, i;
- int16_t cc_mah;
fg_mem_lock(chip);
rc = fg_mem_read(chip, buffer, OCV_COEFFS_START_REG, 24, 0, 0);
@@ -4138,30 +5556,21 @@
chip->ocv_coeffs[8], chip->ocv_coeffs[9],
chip->ocv_coeffs[10], chip->ocv_coeffs[11]);
}
- rc = fg_mem_read(chip, buffer, OCV_JUNCTION_REG, 1, 0, 0);
- chip->ocv_junction_p1p2 = buffer[0] * 100 / 255;
- rc |= fg_mem_read(chip, buffer, OCV_JUNCTION_REG, 1, 1, 0);
- chip->ocv_junction_p2p3 = buffer[0] * 100 / 255;
+ rc = fg_mem_read(chip, buffer, OCV_JUNCTION_REG, 2, 0, 0);
if (rc) {
pr_err("Failed to read ocv junctions: %d\n", rc);
goto done;
}
- rc = fg_mem_read(chip, buffer, NOM_CAP_REG, 2, 0, 0);
+
+ chip->ocv_junction_p1p2 = buffer[0] * 100 / 255;
+ chip->ocv_junction_p2p3 = buffer[1] * 100 / 255;
+
+ rc = load_battery_aging_data(chip);
if (rc) {
- pr_err("Failed to read nominal capacitance: %d\n", rc);
+ pr_err("Failed to load battery aging data, rc:%d\n", rc);
goto done;
}
- chip->nom_cap_uah = bcap_uah_2b(buffer);
- chip->actual_cap_uah = chip->nom_cap_uah;
- if (chip->learning_data.learned_cc_uah == 0) {
- chip->learning_data.learned_cc_uah = chip->nom_cap_uah;
- fg_cap_learning_save_data(chip);
- } else if (chip->learning_data.feedback_on) {
- cc_mah = div64_s64(chip->learning_data.learned_cc_uah, 1000);
- rc = fg_calc_and_store_cc_soc_coeff(chip, cc_mah);
- if (rc)
- pr_err("Error in restoring cc_soc_coeff, rc:%d\n", rc);
- }
+
rc = fg_mem_read(chip, buffer, CUTOFF_VOLTAGE_REG, 2, 0, 0);
if (rc) {
pr_err("Failed to read cutoff voltage: %d\n", rc);
@@ -4188,9 +5597,9 @@
}
chip->rslow_comp.rslow_thr = buffer[0];
rc = fg_mem_read(chip, buffer, TEMP_RS_TO_RSLOW_REG, 2,
- RSLOW_THRESH_OFFSET, 0);
+ RS_TO_RSLOW_CHG_OFFSET, 0);
if (rc) {
- pr_err("unable to read rs to rslow: %d\n", rc);
+ pr_err("unable to read rs to rslow_chg: %d\n", rc);
goto done;
}
memcpy(chip->rslow_comp.rs_to_rslow, buffer, 2);
@@ -4207,6 +5616,68 @@
return rc;
}
+static int fg_update_batt_rslow_settings(struct fg_chip *chip)
+{
+ int64_t rs_to_rslow_chg, rs_to_rslow_dischg, batt_esr, rconn_uohm;
+ u8 buffer[2];
+ int rc;
+
+ rc = fg_mem_read(chip, buffer, BATTERY_ESR_REG, 2, ESR_OFFSET, 0);
+ if (rc) {
+ pr_err("unable to read battery_esr: %d\n", rc);
+ goto done;
+ }
+ batt_esr = half_float(buffer);
+
+ rc = fg_mem_read(chip, buffer, TEMP_RS_TO_RSLOW_REG, 2,
+ RS_TO_RSLOW_DISCHG_OFFSET, 0);
+ if (rc) {
+ pr_err("unable to read rs to rslow dischg: %d\n", rc);
+ goto done;
+ }
+ rs_to_rslow_dischg = half_float(buffer);
+
+ rc = fg_mem_read(chip, buffer, TEMP_RS_TO_RSLOW_REG, 2,
+ RS_TO_RSLOW_CHG_OFFSET, 0);
+ if (rc) {
+ pr_err("unable to read rs to rslow chg: %d\n", rc);
+ goto done;
+ }
+ rs_to_rslow_chg = half_float(buffer);
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("rs_rslow_chg: %lld, rs_rslow_dischg: %lld, esr: %lld\n",
+ rs_to_rslow_chg, rs_to_rslow_dischg, batt_esr);
+
+ rconn_uohm = chip->rconn_mohm * 1000;
+ rs_to_rslow_dischg = div64_s64(rs_to_rslow_dischg * batt_esr,
+ batt_esr + rconn_uohm);
+ rs_to_rslow_chg = div64_s64(rs_to_rslow_chg * batt_esr,
+ batt_esr + rconn_uohm);
+
+ half_float_to_buffer(rs_to_rslow_chg, buffer);
+ rc = fg_mem_write(chip, buffer, TEMP_RS_TO_RSLOW_REG, 2,
+ RS_TO_RSLOW_CHG_OFFSET, 0);
+ if (rc) {
+ pr_err("unable to write rs_to_rslow_chg: %d\n", rc);
+ goto done;
+ }
+
+ half_float_to_buffer(rs_to_rslow_dischg, buffer);
+ rc = fg_mem_write(chip, buffer, TEMP_RS_TO_RSLOW_REG, 2,
+ RS_TO_RSLOW_DISCHG_OFFSET, 0);
+ if (rc) {
+ pr_err("unable to write rs_to_rslow_dischg: %d\n", rc);
+ goto done;
+ }
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("Modified rs_rslow_chg: %lld, rs_rslow_dischg: %lld\n",
+ rs_to_rslow_chg, rs_to_rslow_dischg);
+done:
+ return rc;
+}
+
#define RSLOW_CFG_MASK (BIT(2) | BIT(3) | BIT(4) | BIT(5))
#define RSLOW_CFG_ON_VAL (BIT(2) | BIT(3))
#define RSLOW_THRESH_FULL_VAL 0xFF
@@ -4233,7 +5704,7 @@
half_float_to_buffer(chip->rslow_comp.chg_rs_to_rslow, buffer);
rc = fg_mem_write(chip, buffer,
- TEMP_RS_TO_RSLOW_REG, 2, TEMP_RS_TO_RSLOW_OFFSET, 0);
+ TEMP_RS_TO_RSLOW_REG, 2, RS_TO_RSLOW_CHG_OFFSET, 0);
if (rc) {
pr_err("unable to write rs to rslow: %d\n", rc);
goto done;
@@ -4286,7 +5757,7 @@
}
rc = fg_mem_write(chip, chip->rslow_comp.rs_to_rslow,
- TEMP_RS_TO_RSLOW_REG, 2, TEMP_RS_TO_RSLOW_OFFSET, 0);
+ TEMP_RS_TO_RSLOW_REG, 2, RS_TO_RSLOW_CHG_OFFSET, 0);
if (rc) {
pr_err("unable to write rs to rslow: %d\n", rc);
goto done;
@@ -4510,6 +5981,58 @@
fg_relax(&chip->esr_extract_wakeup_source);
}
+#define KI_COEFF_MEDC_REG 0x400
+#define KI_COEFF_MEDC_OFFSET 0
+#define KI_COEFF_HIGHC_REG 0x404
+#define KI_COEFF_HIGHC_OFFSET 0
+#define DEFAULT_MEDC_VOLTAGE_GAIN 3
+#define DEFAULT_HIGHC_VOLTAGE_GAIN 2
+static void discharge_gain_work(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work, struct fg_chip,
+ dischg_gain_work);
+ u8 buf[2];
+ int capacity, rc, i;
+ int64_t medc_val = DEFAULT_MEDC_VOLTAGE_GAIN;
+ int64_t highc_val = DEFAULT_HIGHC_VOLTAGE_GAIN;
+
+ capacity = get_prop_capacity(chip);
+ if (chip->status == POWER_SUPPLY_STATUS_DISCHARGING) {
+ for (i = VOLT_GAIN_MAX - 1; i >= 0; i--) {
+ if (capacity <= chip->dischg_gain.soc[i]) {
+ medc_val = chip->dischg_gain.medc_gain[i];
+ highc_val = chip->dischg_gain.highc_gain[i];
+ }
+ }
+ }
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("Capacity: %d, medc_gain: %lld highc_gain: %lld\n",
+ capacity, medc_val, highc_val);
+
+ medc_val *= MICRO_UNIT;
+ half_float_to_buffer(medc_val, buf);
+ rc = fg_mem_write(chip, buf, KI_COEFF_MEDC_REG, 2,
+ KI_COEFF_MEDC_OFFSET, 0);
+ if (rc)
+ pr_err("Couldn't write to ki_coeff_medc_reg, rc=%d\n", rc);
+ else if (fg_debug_mask & FG_STATUS)
+ pr_info("Value [%x %x] written to ki_coeff_medc\n", buf[0],
+ buf[1]);
+
+ highc_val *= MICRO_UNIT;
+ half_float_to_buffer(highc_val, buf);
+ rc = fg_mem_write(chip, buf, KI_COEFF_HIGHC_REG, 2,
+ KI_COEFF_HIGHC_OFFSET, 0);
+ if (rc)
+ pr_err("Couldn't write to ki_coeff_highc_reg, rc=%d\n", rc);
+ else if (fg_debug_mask & FG_STATUS)
+ pr_info("Value [%x %x] written to ki_coeff_highc\n", buf[0],
+ buf[1]);
+
+ fg_relax(&chip->dischg_gain_wakeup_source);
+}
+
#define LOW_LATENCY BIT(6)
#define BATT_PROFILE_OFFSET 0x4C0
#define PROFILE_INTEGRITY_REG 0x53C
@@ -4529,7 +6052,7 @@
pr_info("restarting fuel gauge...\n");
try_again:
- if (write_profile) {
+ if (write_profile && !chip->ima_error_handling) {
if (!chip->charging_disabled) {
pr_err("Charging not yet disabled!\n");
return -EINVAL;
@@ -4770,7 +6293,8 @@
#define BATTERY_PSY_WAIT_MS 2000
static int fg_batt_profile_init(struct fg_chip *chip)
{
- int rc = 0, ret, len, batt_id;
+ int rc = 0, ret;
+ int len, batt_id;
struct device_node *node = chip->pdev->dev.of_node;
struct device_node *batt_node, *profile_node;
const char *data, *batt_type_str;
@@ -4792,6 +6316,19 @@
goto no_profile;
}
+ /* Check whether the charger is ready */
+ if (!is_charger_available(chip))
+ goto reschedule;
+
+ /* Disable charging for a FG cycle before calculating vbat_in_range */
+ if (!chip->charging_disabled) {
+ rc = set_prop_enable_charging(chip, false);
+ if (rc)
+ pr_err("Failed to disable charging, rc=%d\n", rc);
+
+ goto update;
+ }
+
batt_node = of_find_node_by_name(node, "qcom,battery-data");
if (!batt_node) {
pr_warn("No available batterydata, using OTP defaults\n");
@@ -4808,8 +6345,12 @@
fg_batt_type);
if (IS_ERR_OR_NULL(profile_node)) {
rc = PTR_ERR(profile_node);
- pr_err("couldn't find profile handle %d\n", rc);
- goto no_profile;
+ if (rc == -EPROBE_DEFER) {
+ goto reschedule;
+ } else {
+ pr_err("couldn't find profile handle rc=%d\n", rc);
+ goto no_profile;
+ }
}
/* read rslow compensation values if they're available */
@@ -4903,18 +6444,6 @@
goto no_profile;
}
- /* Check whether the charger is ready */
- if (!is_charger_available(chip))
- goto reschedule;
-
- /* Disable charging for a FG cycle before calculating vbat_in_range */
- if (!chip->charging_disabled) {
- rc = set_prop_enable_charging(chip, false);
- if (rc)
- pr_err("Failed to disable charging, rc=%d\n", rc);
-
- goto reschedule;
- }
vbat_in_range = get_vbat_est_diff(chip)
< settings[FG_MEM_VBAT_EST_DIFF].value * 1000;
@@ -4956,11 +6485,7 @@
chip->batt_profile, len, false);
}
- if (chip->power_supply_registered)
- power_supply_changed(chip->bms_psy);
-
memcpy(chip->batt_profile, data, len);
-
chip->batt_profile_len = len;
if (fg_debug_mask & FG_STATUS)
@@ -4995,6 +6520,11 @@
}
}
+ if (chip->rconn_mohm > 0) {
+ rc = fg_update_batt_rslow_settings(chip);
+ if (rc)
+ pr_err("Error in updating ESR, rc=%d\n", rc);
+ }
done:
if (chip->charging_disabled) {
rc = set_prop_enable_charging(chip, true);
@@ -5008,8 +6538,22 @@
chip->batt_type = fg_batt_type;
else
chip->batt_type = batt_type_str;
+
+ if (chip->first_profile_loaded && fg_reset_on_lockup) {
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("restoring SRAM registers\n");
+ rc = fg_backup_sram_registers(chip, false);
+ if (rc)
+ pr_err("Couldn't restore sram registers\n");
+
+ /* Read the cycle counter back from FG SRAM */
+ if (chip->cyc_ctr.en)
+ restore_cycle_counter(chip);
+ }
+
chip->first_profile_loaded = true;
chip->profile_loaded = true;
+ chip->soc_reporting_ready = true;
chip->battery_missing = is_battery_missing(chip);
update_chg_iterm(chip);
update_cc_cv_setpoint(chip);
@@ -5025,8 +6569,10 @@
fg_relax(&chip->profile_wakeup_source);
pr_info("Battery SOC: %d, V: %duV\n", get_prop_capacity(chip),
fg_data[FG_DATA_VOLTAGE].value);
+ complete_all(&chip->fg_reset_done);
return rc;
no_profile:
+ chip->soc_reporting_ready = true;
if (chip->charging_disabled) {
rc = set_prop_enable_charging(chip, true);
if (rc)
@@ -5039,14 +6585,15 @@
power_supply_changed(chip->bms_psy);
fg_relax(&chip->profile_wakeup_source);
return rc;
-reschedule:
- schedule_delayed_work(
- &chip->batt_profile_init,
- msecs_to_jiffies(BATTERY_PSY_WAIT_MS));
+update:
cancel_delayed_work(&chip->update_sram_data);
schedule_delayed_work(
&chip->update_sram_data,
msecs_to_jiffies(0));
+reschedule:
+ schedule_delayed_work(
+ &chip->batt_profile_init,
+ msecs_to_jiffies(BATTERY_PSY_WAIT_MS));
fg_relax(&chip->profile_wakeup_source);
return 0;
}
@@ -5056,14 +6603,41 @@
struct fg_chip *chip = container_of(work,
struct fg_chip,
check_empty_work.work);
+ bool vbatt_low_sts;
+ int msoc;
- if (fg_is_batt_empty(chip)) {
+ /* handle empty soc based on vbatt-low interrupt */
+ if (chip->use_vbat_low_empty_soc) {
+ if (fg_get_vbatt_status(chip, &vbatt_low_sts))
+ goto out;
+
+ msoc = get_monotonic_soc_raw(chip);
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("Vbatt_low: %d, msoc: %d\n", vbatt_low_sts,
+ msoc);
+ if (vbatt_low_sts || (msoc == 0))
+ chip->soc_empty = true;
+ else
+ chip->soc_empty = false;
+
+ if (chip->power_supply_registered)
+ power_supply_changed(chip->bms_psy);
+
+ if (!chip->vbat_low_irq_enabled) {
+ enable_irq(chip->batt_irq[VBATT_LOW].irq);
+ enable_irq_wake(chip->batt_irq[VBATT_LOW].irq);
+ chip->vbat_low_irq_enabled = true;
+ }
+ } else if (fg_is_batt_empty(chip)) {
if (fg_debug_mask & FG_STATUS)
pr_info("EMPTY SOC high\n");
chip->soc_empty = true;
if (chip->power_supply_registered)
power_supply_changed(chip->bms_psy);
}
+
+out:
fg_relax(&chip->empty_check_wakeup_source);
}
@@ -5103,7 +6677,7 @@
int rc;
u8 buffer[3];
int bsoc;
- int resume_soc_raw = FULL_SOC_RAW - settings[FG_MEM_RESUME_SOC].value;
+ int resume_soc_raw = settings[FG_MEM_RESUME_SOC].value;
bool disable = false;
u8 reg;
@@ -5318,6 +6892,98 @@
} \
} while (0)
+static int fg_dischg_gain_dt_init(struct fg_chip *chip)
+{
+ struct device_node *node = chip->pdev->dev.of_node;
+ struct property *prop;
+ int i, rc = 0;
+ size_t size;
+
+ prop = of_find_property(node, "qcom,fg-dischg-voltage-gain-soc",
+ NULL);
+ if (!prop) {
+ pr_err("qcom-fg-dischg-voltage-gain-soc not specified\n");
+ goto out;
+ }
+
+ size = prop->length / sizeof(u32);
+ if (size != VOLT_GAIN_MAX) {
+ pr_err("Voltage gain SOC specified is of incorrect size\n");
+ goto out;
+ }
+
+ rc = of_property_read_u32_array(node,
+ "qcom,fg-dischg-voltage-gain-soc", chip->dischg_gain.soc, size);
+ if (rc < 0) {
+ pr_err("Reading qcom-fg-dischg-voltage-gain-soc failed, rc=%d\n",
+ rc);
+ goto out;
+ }
+
+ for (i = 0; i < VOLT_GAIN_MAX; i++) {
+ if (chip->dischg_gain.soc[i] > 100) {
+ pr_err("Incorrect dischg-voltage-gain-soc\n");
+ goto out;
+ }
+ }
+
+ prop = of_find_property(node, "qcom,fg-dischg-med-voltage-gain",
+ NULL);
+ if (!prop) {
+ pr_err("qcom-fg-dischg-med-voltage-gain not specified\n");
+ goto out;
+ }
+
+ size = prop->length / sizeof(u32);
+ if (size != VOLT_GAIN_MAX) {
+ pr_err("med-voltage-gain specified is of incorrect size\n");
+ goto out;
+ }
+
+ rc = of_property_read_u32_array(node,
+ "qcom,fg-dischg-med-voltage-gain", chip->dischg_gain.medc_gain,
+ size);
+ if (rc < 0) {
+ pr_err("Reading qcom-fg-dischg-med-voltage-gain failed, rc=%d\n",
+ rc);
+ goto out;
+ }
+
+ prop = of_find_property(node, "qcom,fg-dischg-high-voltage-gain",
+ NULL);
+ if (!prop) {
+ pr_err("qcom-fg-dischg-high-voltage-gain not specified\n");
+ goto out;
+ }
+
+ size = prop->length / sizeof(u32);
+ if (size != VOLT_GAIN_MAX) {
+ pr_err("high-voltage-gain specified is of incorrect size\n");
+ goto out;
+ }
+
+ rc = of_property_read_u32_array(node,
+ "qcom,fg-dischg-high-voltage-gain",
+ chip->dischg_gain.highc_gain, size);
+ if (rc < 0) {
+ pr_err("Reading qcom-fg-dischg-high-voltage-gain failed, rc=%d\n",
+ rc);
+ goto out;
+ }
+
+ if (fg_debug_mask & FG_STATUS) {
+ for (i = 0; i < VOLT_GAIN_MAX; i++)
+ pr_info("SOC:%d MedC_Gain:%d HighC_Gain: %d\n",
+ chip->dischg_gain.soc[i],
+ chip->dischg_gain.medc_gain[i],
+ chip->dischg_gain.highc_gain[i]);
+ }
+ return 0;
+out:
+ chip->dischg_gain.enable = false;
+ return rc;
+}
+
#define DEFAULT_EVALUATION_CURRENT_MA 1000
static int fg_of_init(struct fg_chip *chip)
{
@@ -5395,6 +7061,10 @@
"cl-max-start-capacity", rc, 15);
OF_READ_PROPERTY(chip->learning_data.vbat_est_thr_uv,
"cl-vbat-est-thr-uv", rc, 40000);
+ OF_READ_PROPERTY(chip->learning_data.max_cap_limit,
+ "cl-max-limit-deciperc", rc, 0);
+ OF_READ_PROPERTY(chip->learning_data.min_cap_limit,
+ "cl-min-limit-deciperc", rc, 0);
OF_READ_PROPERTY(chip->evaluation_current,
"aging-eval-current-ma", rc,
DEFAULT_EVALUATION_CURRENT_MA);
@@ -5455,6 +7125,77 @@
chip->esr_pulse_tune_en = of_property_read_bool(node,
"qcom,esr-pulse-tuning-en");
+ chip->soc_slope_limiter_en = of_property_read_bool(node,
+ "qcom,fg-control-slope-limiter");
+ if (chip->soc_slope_limiter_en) {
+ OF_READ_PROPERTY(chip->slope_limit_temp,
+ "fg-slope-limit-temp-threshold", rc,
+ SLOPE_LIMIT_TEMP_THRESHOLD);
+
+ OF_READ_PROPERTY(chip->slope_limit_coeffs[LOW_TEMP_CHARGE],
+ "fg-slope-limit-low-temp-chg", rc,
+ SLOPE_LIMIT_LOW_TEMP_CHG);
+
+ OF_READ_PROPERTY(chip->slope_limit_coeffs[HIGH_TEMP_CHARGE],
+ "fg-slope-limit-high-temp-chg", rc,
+ SLOPE_LIMIT_HIGH_TEMP_CHG);
+
+ OF_READ_PROPERTY(chip->slope_limit_coeffs[LOW_TEMP_DISCHARGE],
+ "fg-slope-limit-low-temp-dischg", rc,
+ SLOPE_LIMIT_LOW_TEMP_DISCHG);
+
+ OF_READ_PROPERTY(chip->slope_limit_coeffs[HIGH_TEMP_DISCHARGE],
+ "fg-slope-limit-high-temp-dischg", rc,
+ SLOPE_LIMIT_HIGH_TEMP_DISCHG);
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("slope-limiter, temp: %d coeffs: [%d %d %d %d]\n",
+ chip->slope_limit_temp,
+ chip->slope_limit_coeffs[LOW_TEMP_CHARGE],
+ chip->slope_limit_coeffs[HIGH_TEMP_CHARGE],
+ chip->slope_limit_coeffs[LOW_TEMP_DISCHARGE],
+ chip->slope_limit_coeffs[HIGH_TEMP_DISCHARGE]);
+ }
+
+ OF_READ_PROPERTY(chip->rconn_mohm, "fg-rconn-mohm", rc, 0);
+
+ chip->dischg_gain.enable = of_property_read_bool(node,
+ "qcom,fg-dischg-voltage-gain-ctrl");
+ if (chip->dischg_gain.enable) {
+ rc = fg_dischg_gain_dt_init(chip);
+ if (rc) {
+ pr_err("Error in reading dischg_gain parameters, rc=%d\n",
+ rc);
+ rc = 0;
+ }
+ }
+
+ chip->use_vbat_low_empty_soc = of_property_read_bool(node,
+ "qcom,fg-use-vbat-low-empty-soc");
+
+ OF_READ_PROPERTY(chip->batt_temp_low_limit,
+ "fg-batt-temp-low-limit", rc, BATT_TEMP_LOW_LIMIT);
+
+ OF_READ_PROPERTY(chip->batt_temp_high_limit,
+ "fg-batt-temp-high-limit", rc, BATT_TEMP_HIGH_LIMIT);
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("batt-temp-low_limit: %d batt-temp-high_limit: %d\n",
+ chip->batt_temp_low_limit, chip->batt_temp_high_limit);
+
+ OF_READ_PROPERTY(chip->cc_soc_limit_pct, "fg-cc-soc-limit-pct", rc, 0);
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("cc-soc-limit-pct: %d\n", chip->cc_soc_limit_pct);
+
+ chip->batt_info_restore = of_property_read_bool(node,
+ "qcom,fg-restore-batt-info");
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("restore: %d validate_by_ocv: %d range_pct: %d\n",
+ chip->batt_info_restore, fg_batt_valid_ocv,
+ fg_batt_range_pct);
+
return rc;
}
@@ -5528,15 +7269,22 @@
chip->soc_irq[FULL_SOC].irq, rc);
return rc;
}
- rc = devm_request_irq(chip->dev,
- chip->soc_irq[EMPTY_SOC].irq,
- fg_empty_soc_irq_handler,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- "empty-soc", chip);
- if (rc < 0) {
- pr_err("Can't request %d empty-soc: %d\n",
- chip->soc_irq[EMPTY_SOC].irq, rc);
- return rc;
+ enable_irq_wake(chip->soc_irq[FULL_SOC].irq);
+ chip->full_soc_irq_enabled = true;
+
+ if (!chip->use_vbat_low_empty_soc) {
+ rc = devm_request_irq(chip->dev,
+ chip->soc_irq[EMPTY_SOC].irq,
+ fg_empty_soc_irq_handler,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING,
+ "empty-soc", chip);
+ if (rc < 0) {
+ pr_err("Can't request %d empty-soc: %d\n",
+ chip->soc_irq[EMPTY_SOC].irq,
+ rc);
+ return rc;
+ }
}
rc = devm_request_irq(chip->dev,
chip->soc_irq[DELTA_SOC].irq,
@@ -5558,8 +7306,8 @@
}
enable_irq_wake(chip->soc_irq[DELTA_SOC].irq);
- enable_irq_wake(chip->soc_irq[FULL_SOC].irq);
- enable_irq_wake(chip->soc_irq[EMPTY_SOC].irq);
+ if (!chip->use_vbat_low_empty_soc)
+ enable_irq_wake(chip->soc_irq[EMPTY_SOC].irq);
break;
case FG_MEMIF:
chip->mem_irq[FG_MEM_AVAIL].irq
@@ -5581,8 +7329,53 @@
}
break;
case FG_BATT:
- chip->batt_irq[BATT_MISSING].irq
- = of_irq_get_byname(child, "batt-missing");
+ chip->batt_irq[JEITA_SOFT_COLD].irq =
+ of_irq_get_byname(child, "soft-cold");
+ if (chip->batt_irq[JEITA_SOFT_COLD].irq < 0) {
+ pr_err("Unable to get soft-cold irq\n");
+ rc = -EINVAL;
+ return rc;
+ }
+ rc = devm_request_threaded_irq(chip->dev,
+ chip->batt_irq[JEITA_SOFT_COLD].irq,
+ NULL,
+ fg_jeita_soft_cold_irq_handler,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
+ "soft-cold", chip);
+ if (rc < 0) {
+ pr_err("Can't request %d soft-cold: %d\n",
+ chip->batt_irq[JEITA_SOFT_COLD].irq,
+ rc);
+ return rc;
+ }
+ disable_irq(chip->batt_irq[JEITA_SOFT_COLD].irq);
+ chip->batt_irq[JEITA_SOFT_COLD].disabled = true;
+ chip->batt_irq[JEITA_SOFT_HOT].irq =
+ of_irq_get_byname(child, "soft-hot");
+ if (chip->batt_irq[JEITA_SOFT_HOT].irq < 0) {
+ pr_err("Unable to get soft-hot irq\n");
+ rc = -EINVAL;
+ return rc;
+ }
+ rc = devm_request_threaded_irq(chip->dev,
+ chip->batt_irq[JEITA_SOFT_HOT].irq,
+ NULL,
+ fg_jeita_soft_hot_irq_handler,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
+ "soft-hot", chip);
+ if (rc < 0) {
+ pr_err("Can't request %d soft-hot: %d\n",
+ chip->batt_irq[JEITA_SOFT_HOT].irq, rc);
+ return rc;
+ }
+ disable_irq(chip->batt_irq[JEITA_SOFT_HOT].irq);
+ chip->batt_irq[JEITA_SOFT_HOT].disabled = true;
+ chip->batt_irq[BATT_MISSING].irq =
+ of_irq_get_byname(child, "batt-missing");
if (chip->batt_irq[BATT_MISSING].irq < 0) {
pr_err("Unable to get batt-missing irq\n");
rc = -EINVAL;
@@ -5619,8 +7412,14 @@
chip->batt_irq[VBATT_LOW].irq, rc);
return rc;
}
- disable_irq_nosync(chip->batt_irq[VBATT_LOW].irq);
- chip->vbat_low_irq_enabled = false;
+ if (chip->use_vbat_low_empty_soc) {
+ enable_irq_wake(chip->batt_irq[VBATT_LOW].irq);
+ chip->vbat_low_irq_enabled = true;
+ } else {
+ disable_irq_nosync(
+ chip->batt_irq[VBATT_LOW].irq);
+ chip->vbat_low_irq_enabled = false;
+ }
break;
case FG_ADC:
break;
@@ -5630,17 +7429,22 @@
}
}
+ chip->irqs_enabled = true;
return rc;
}
-static void fg_cleanup(struct fg_chip *chip)
+static void fg_cancel_all_works(struct fg_chip *chip)
{
+ cancel_delayed_work_sync(&chip->check_sanity_work);
cancel_delayed_work_sync(&chip->update_sram_data);
cancel_delayed_work_sync(&chip->update_temp_work);
cancel_delayed_work_sync(&chip->update_jeita_setting);
cancel_delayed_work_sync(&chip->check_empty_work);
cancel_delayed_work_sync(&chip->batt_profile_init);
alarm_try_to_cancel(&chip->fg_cap_learning_alarm);
+ alarm_try_to_cancel(&chip->hard_jeita_alarm);
+ if (!chip->ima_error_handling)
+ cancel_work_sync(&chip->ima_error_recovery_work);
cancel_work_sync(&chip->rslow_comp_work);
cancel_work_sync(&chip->set_resume_soc_work);
cancel_work_sync(&chip->fg_cap_learning_work);
@@ -5652,12 +7456,23 @@
cancel_work_sync(&chip->gain_comp_work);
cancel_work_sync(&chip->init_work);
cancel_work_sync(&chip->charge_full_work);
+ cancel_work_sync(&chip->bcl_hi_power_work);
cancel_work_sync(&chip->esr_extract_config_work);
+ cancel_work_sync(&chip->slope_limiter_work);
+ cancel_work_sync(&chip->dischg_gain_work);
+ cancel_work_sync(&chip->cc_soc_store_work);
+}
+
+static void fg_cleanup(struct fg_chip *chip)
+{
+ fg_cancel_all_works(chip);
+ power_supply_unregister(chip->bms_psy);
mutex_destroy(&chip->rslow_comp.lock);
mutex_destroy(&chip->rw_lock);
mutex_destroy(&chip->cyc_ctr.lock);
mutex_destroy(&chip->learning_data.learning_lock);
mutex_destroy(&chip->sysfs_restart_lock);
+ mutex_destroy(&chip->ima_recovery_lock);
wakeup_source_trash(&chip->resume_soc_wakeup_source.source);
wakeup_source_trash(&chip->empty_check_wakeup_source.source);
wakeup_source_trash(&chip->memif_wakeup_source.source);
@@ -5667,6 +7482,11 @@
wakeup_source_trash(&chip->gain_comp_wakeup_source.source);
wakeup_source_trash(&chip->capacity_learning_wakeup_source.source);
wakeup_source_trash(&chip->esr_extract_wakeup_source.source);
+ wakeup_source_trash(&chip->slope_limit_wakeup_source.source);
+ wakeup_source_trash(&chip->dischg_gain_wakeup_source.source);
+ wakeup_source_trash(&chip->fg_reset_wakeup_source.source);
+ wakeup_source_trash(&chip->cc_soc_wakeup_source.source);
+ wakeup_source_trash(&chip->sanity_wakeup_source.source);
}
static int fg_remove(struct platform_device *pdev)
@@ -6155,12 +7975,13 @@
return 0;
}
-#define FG_ALG_SYSCTL_1 0x4B0
-#define SOC_CNFG 0x450
-#define SOC_DELTA_OFFSET 3
-#define DELTA_SOC_PERCENT 1
-#define I_TERM_QUAL_BIT BIT(1)
-#define PATCH_NEG_CURRENT_BIT BIT(3)
+#define FG_ALG_SYSCTL_1 0x4B0
+#define SOC_CNFG 0x450
+#define SOC_DELTA_OFFSET 3
+#define DELTA_SOC_PERCENT 1
+#define ALERT_CFG_OFFSET 3
+#define I_TERM_QUAL_BIT BIT(1)
+#define PATCH_NEG_CURRENT_BIT BIT(3)
#define KI_COEFF_PRED_FULL_ADDR 0x408
#define KI_COEFF_PRED_FULL_4_0_MSB 0x88
#define KI_COEFF_PRED_FULL_4_0_LSB 0x00
@@ -6168,6 +7989,12 @@
#define FG_ADC_CONFIG_REG 0x4B8
#define FG_BCL_CONFIG_OFFSET 0x3
#define BCL_FORCED_HPM_IN_CHARGE BIT(2)
+#define IRQ_USE_VOLTAGE_HYST_BIT BIT(0)
+#define EMPTY_FROM_VOLTAGE_BIT BIT(1)
+#define EMPTY_FROM_SOC_BIT BIT(2)
+#define EMPTY_SOC_IRQ_MASK (IRQ_USE_VOLTAGE_HYST_BIT | \
+ EMPTY_FROM_SOC_BIT | \
+ EMPTY_FROM_VOLTAGE_BIT)
static int fg_common_hw_init(struct fg_chip *chip)
{
int rc;
@@ -6176,8 +8003,9 @@
update_iterm(chip);
update_cutoff_voltage(chip);
- update_irq_volt_empty(chip);
update_bcl_thresholds(chip);
+ if (!chip->use_vbat_low_empty_soc)
+ update_irq_volt_empty(chip);
resume_soc_raw = settings[FG_MEM_RESUME_SOC].value;
if (resume_soc_raw > 0) {
@@ -6207,6 +8035,11 @@
return rc;
}
+ /* Override the voltage threshold for vbatt_low with empty_volt */
+ if (chip->use_vbat_low_empty_soc)
+ settings[FG_MEM_BATT_LOW].value =
+ settings[FG_MEM_IRQ_VOLT_EMPTY].value;
+
rc = fg_mem_masked_write(chip, settings[FG_MEM_BATT_LOW].address, 0xFF,
batt_to_setpoint_8b(settings[FG_MEM_BATT_LOW].value),
settings[FG_MEM_BATT_LOW].offset);
@@ -6274,20 +8107,41 @@
if (fg_debug_mask & FG_STATUS)
pr_info("imptr_pulse_slow is %sabled\n",
chip->imptr_pulse_slow_en ? "en" : "dis");
+ }
- rc = fg_mem_read(chip, &val, RSLOW_CFG_REG, 1, RSLOW_CFG_OFFSET,
- 0);
- if (rc) {
- pr_err("unable to read rslow cfg: %d\n", rc);
- return rc;
- }
+ rc = fg_mem_read(chip, &val, RSLOW_CFG_REG, 1, RSLOW_CFG_OFFSET,
+ 0);
+ if (rc) {
+ pr_err("unable to read rslow cfg: %d\n", rc);
+ return rc;
+ }
- if (val & RSLOW_CFG_ON_VAL)
- chip->rslow_comp.active = true;
+ if (val & RSLOW_CFG_ON_VAL)
+ chip->rslow_comp.active = true;
- if (fg_debug_mask & FG_STATUS)
- pr_info("rslow_comp active is %sabled\n",
- chip->rslow_comp.active ? "en" : "dis");
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("rslow_comp active is %sabled\n",
+ chip->rslow_comp.active ? "en" : "dis");
+
+ /*
+ * Clear bits 0-2 in 0x4B3 and set them again to make empty_soc irq
+ * trigger again.
+ */
+ rc = fg_mem_masked_write(chip, FG_ALG_SYSCTL_1, EMPTY_SOC_IRQ_MASK,
+ 0, ALERT_CFG_OFFSET);
+ if (rc) {
+ pr_err("failed to write to 0x4B3 rc=%d\n", rc);
+ return rc;
+ }
+
+ /* Wait for a FG cycle before enabling empty soc irq configuration */
+ msleep(FG_CYCLE_MS);
+
+ rc = fg_mem_masked_write(chip, FG_ALG_SYSCTL_1, EMPTY_SOC_IRQ_MASK,
+ EMPTY_SOC_IRQ_MASK, ALERT_CFG_OFFSET);
+ if (rc) {
+ pr_err("failed to write to 0x4B3 rc=%d\n", rc);
+ return rc;
}
return 0;
@@ -6414,12 +8268,13 @@
/* Setup workaround flag based on PMIC type */
if (fg_sense_type == INTERNAL_CURRENT_SENSE)
chip->wa_flag |= IADC_GAIN_COMP_WA;
- if (chip->pmic_revision[REVID_DIG_MAJOR] > 1)
+ if (chip->pmic_revision[REVID_DIG_MAJOR] >= 1)
chip->wa_flag |= USE_CC_SOC_REG;
break;
case PMI8950:
case PMI8937:
+ case PMI8940:
rc = fg_8950_hw_init(chip);
/* Setup workaround flag based on PMIC type */
chip->wa_flag |= BCL_HI_POWER_FOR_CHGLED_WA;
@@ -6438,12 +8293,223 @@
return rc;
}
+static int fg_init_iadc_config(struct fg_chip *chip)
+{
+ u8 reg[2];
+ int rc;
+
+ /* read default gain config */
+ rc = fg_mem_read(chip, reg, K_VCOR_REG, 2, DEF_GAIN_OFFSET, 0);
+ if (rc) {
+ pr_err("Failed to read default gain rc=%d\n", rc);
+ return rc;
+ }
+
+ if (reg[1] || reg[0]) {
+ /*
+ * Default gain register has valid value:
+ * - write to gain register.
+ */
+ rc = fg_mem_write(chip, reg, GAIN_REG, 2,
+ GAIN_OFFSET, 0);
+ if (rc) {
+ pr_err("Failed to write gain rc=%d\n", rc);
+ return rc;
+ }
+ } else {
+ /*
+ * Default gain register is invalid:
+ * - read gain register for default gain value
+ * - write to default gain register.
+ */
+ rc = fg_mem_read(chip, reg, GAIN_REG, 2,
+ GAIN_OFFSET, 0);
+ if (rc) {
+ pr_err("Failed to read gain rc=%d\n", rc);
+ return rc;
+ }
+ rc = fg_mem_write(chip, reg, K_VCOR_REG, 2,
+ DEF_GAIN_OFFSET, 0);
+ if (rc) {
+ pr_err("Failed to write default gain rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ chip->iadc_comp_data.dfl_gain_reg[0] = reg[0];
+ chip->iadc_comp_data.dfl_gain_reg[1] = reg[1];
+ chip->iadc_comp_data.dfl_gain = half_float(reg);
+
+ pr_debug("IADC gain initial config reg_val 0x%x%x gain %lld\n",
+ reg[1], reg[0], chip->iadc_comp_data.dfl_gain);
+ return 0;
+}
+
+#define EN_WR_FGXCT_PRD BIT(6)
+#define EN_RD_FGXCT_PRD BIT(5)
+#define FG_RESTART_TIMEOUT_MS 12000
+static void ima_error_recovery_work(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ ima_error_recovery_work);
+ bool tried_again = false;
+ int rc;
+ u8 buf[4] = {0, 0, 0, 0};
+
+ fg_stay_awake(&chip->fg_reset_wakeup_source);
+ mutex_lock(&chip->ima_recovery_lock);
+ if (!chip->ima_error_handling) {
+ pr_err("Scheduled by mistake?\n");
+ mutex_unlock(&chip->ima_recovery_lock);
+ fg_relax(&chip->fg_reset_wakeup_source);
+ return;
+ }
+
+ /*
+ * SOC should be read and used until the error recovery completes.
+ * Without this, there could be a fluctuation in SOC values notified
+ * to the userspace.
+ */
+ chip->use_last_soc = true;
+
+ /* Block SRAM access till FG reset is complete */
+ chip->block_sram_access = true;
+
+ /* Release the mutex to avoid deadlock while cancelling the works */
+ mutex_unlock(&chip->ima_recovery_lock);
+
+ /* Cancel all the works */
+ fg_cancel_all_works(chip);
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("last_soc: %d\n", chip->last_soc);
+
+ mutex_lock(&chip->ima_recovery_lock);
+ /* Acquire IMA access forcibly from FG ALG */
+ rc = fg_masked_write(chip, chip->mem_base + MEM_INTF_IMA_CFG,
+ EN_WR_FGXCT_PRD | EN_RD_FGXCT_PRD,
+ EN_WR_FGXCT_PRD | EN_RD_FGXCT_PRD, 1);
+ if (rc) {
+ pr_err("Error in writing to IMA_CFG, rc=%d\n", rc);
+ goto out;
+ }
+
+ /* Release the IMA access now so that FG reset can go through */
+ rc = fg_masked_write(chip, chip->mem_base + MEM_INTF_IMA_CFG,
+ EN_WR_FGXCT_PRD | EN_RD_FGXCT_PRD, 0, 1);
+ if (rc) {
+ pr_err("Error in writing to IMA_CFG, rc=%d\n", rc);
+ goto out;
+ }
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("resetting FG\n");
+
+ /* Assert FG reset */
+ rc = fg_reset(chip, true);
+ if (rc) {
+ pr_err("Couldn't reset FG\n");
+ goto out;
+ }
+
+ /* Wait for a small time before deasserting FG reset */
+ msleep(100);
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("clearing FG from reset\n");
+
+ /* Deassert FG reset */
+ rc = fg_reset(chip, false);
+ if (rc) {
+ pr_err("Couldn't clear FG reset\n");
+ goto out;
+ }
+
+ /* Wait for at least a FG cycle before doing SRAM access */
+ msleep(2000);
+
+ chip->block_sram_access = false;
+
+ if (!chip->init_done) {
+ schedule_work(&chip->init_work);
+ goto wait;
+ }
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("Calling hw_init\n");
+
+ /*
+ * Once FG is reset, everything in SRAM will be wiped out. Redo
+ * hw_init, update jeita settings etc., again to make sure all
+ * the settings got restored again.
+ */
+ rc = fg_hw_init(chip);
+ if (rc) {
+ pr_err("Error in hw_init, rc=%d\n", rc);
+ goto out;
+ }
+
+ update_jeita_setting(&chip->update_jeita_setting.work);
+
+ if (chip->wa_flag & IADC_GAIN_COMP_WA) {
+ rc = fg_init_iadc_config(chip);
+ if (rc)
+ goto out;
+ }
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("loading battery profile\n");
+ if (!chip->use_otp_profile) {
+ chip->battery_missing = true;
+ chip->profile_loaded = false;
+ chip->soc_reporting_ready = false;
+ chip->batt_type = default_batt_type;
+ fg_handle_battery_insertion(chip);
+ }
+
+wait:
+ rc = wait_for_completion_interruptible_timeout(&chip->fg_reset_done,
+ msecs_to_jiffies(FG_RESTART_TIMEOUT_MS));
+
+ /* If we were interrupted wait again one more time. */
+ if (rc == -ERESTARTSYS && !tried_again) {
+ tried_again = true;
+ pr_debug("interrupted, waiting again\n");
+ goto wait;
+ } else if (rc <= 0) {
+ pr_err("fg_restart taking long time rc=%d\n", rc);
+ goto out;
+ }
+
+ rc = fg_mem_write(chip, buf, fg_data[FG_DATA_VINT_ERR].address,
+ fg_data[FG_DATA_VINT_ERR].len,
+ fg_data[FG_DATA_VINT_ERR].offset, 0);
+ if (rc < 0)
+ pr_err("Error in clearing VACT_INT_ERR, rc=%d\n", rc);
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("IMA error recovery done...\n");
+out:
+ fg_restore_soc(chip);
+ fg_restore_cc_soc(chip);
+ fg_enable_irqs(chip, true);
+ update_sram_data_work(&chip->update_sram_data.work);
+ update_temp_data(&chip->update_temp_work.work);
+ schedule_delayed_work(&chip->check_sanity_work,
+ msecs_to_jiffies(1000));
+ chip->ima_error_handling = false;
+ mutex_unlock(&chip->ima_recovery_lock);
+ fg_relax(&chip->fg_reset_wakeup_source);
+}
+
#define DIG_MINOR 0x0
#define DIG_MAJOR 0x1
#define ANA_MINOR 0x2
#define ANA_MAJOR 0x3
#define IACS_INTR_SRC_SLCT BIT(3)
-static int fg_setup_memif_offset(struct fg_chip *chip)
+static int fg_memif_init(struct fg_chip *chip)
{
int rc;
@@ -6464,7 +8530,7 @@
break;
default:
pr_err("Digital Major rev=%d not supported\n",
- chip->revision[DIG_MAJOR]);
+ chip->revision[DIG_MAJOR]);
return -EINVAL;
}
@@ -6481,6 +8547,13 @@
pr_err("failed to configure interrupt source %d\n", rc);
return rc;
}
+
+ /* check for error condition */
+ rc = fg_check_ima_exception(chip, true);
+ if (rc) {
+ pr_err("Error in clearing IMA exception rc=%d", rc);
+ return rc;
+ }
}
return 0;
@@ -6515,6 +8588,7 @@
case PMI8950:
case PMI8937:
case PMI8996:
+ case PMI8940:
chip->pmic_subtype = pmic_rev_id->pmic_subtype;
chip->pmic_revision[REVID_RESERVED] = pmic_rev_id->rev1;
chip->pmic_revision[REVID_VARIANT] = pmic_rev_id->rev2;
@@ -6531,10 +8605,8 @@
}
#define INIT_JEITA_DELAY_MS 1000
-
static void delayed_init_work(struct work_struct *work)
{
- u8 reg[2];
int rc;
struct fg_chip *chip = container_of(work,
struct fg_chip,
@@ -6546,6 +8618,14 @@
rc = fg_hw_init(chip);
if (rc) {
pr_err("failed to hw init rc = %d\n", rc);
+ if (!chip->init_done && chip->ima_supported) {
+ rc = fg_check_alg_status(chip);
+ if (rc && rc != -EBUSY)
+ pr_err("Couldn't check FG ALG status, rc=%d\n",
+ rc);
+ fg_mem_release(chip);
+ return;
+ }
fg_mem_release(chip);
fg_cleanup(chip);
return;
@@ -6566,57 +8646,19 @@
if (!chip->use_otp_profile)
schedule_delayed_work(&chip->batt_profile_init, 0);
+ if (chip->ima_supported && fg_reset_on_lockup)
+ schedule_delayed_work(&chip->check_sanity_work,
+ msecs_to_jiffies(1000));
+
if (chip->wa_flag & IADC_GAIN_COMP_WA) {
- /* read default gain config */
- rc = fg_mem_read(chip, reg, K_VCOR_REG, 2, DEF_GAIN_OFFSET, 0);
- if (rc) {
- pr_err("Failed to read default gain rc=%d\n", rc);
+ rc = fg_init_iadc_config(chip);
+ if (rc)
goto done;
- }
-
- if (reg[1] || reg[0]) {
- /*
- * Default gain register has valid value:
- * - write to gain register.
- */
- rc = fg_mem_write(chip, reg, GAIN_REG, 2,
- GAIN_OFFSET, 0);
- if (rc) {
- pr_err("Failed to write gain rc=%d\n", rc);
- goto done;
- }
- } else {
- /*
- * Default gain register is invalid:
- * - read gain register for default gain value
- * - write to default gain register.
- */
- rc = fg_mem_read(chip, reg, GAIN_REG, 2,
- GAIN_OFFSET, 0);
- if (rc) {
- pr_err("Failed to read gain rc=%d\n", rc);
- goto done;
- }
- rc = fg_mem_write(chip, reg, K_VCOR_REG, 2,
- DEF_GAIN_OFFSET, 0);
- if (rc) {
- pr_err("Failed to write default gain rc=%d\n",
- rc);
- goto done;
- }
- }
-
- chip->iadc_comp_data.dfl_gain_reg[0] = reg[0];
- chip->iadc_comp_data.dfl_gain_reg[1] = reg[1];
- chip->iadc_comp_data.dfl_gain = half_float(reg);
- chip->input_present = is_input_present(chip);
- chip->otg_present = is_otg_present(chip);
- chip->init_done = true;
-
- pr_debug("IADC gain initial config reg_val 0x%x%x gain %lld\n",
- reg[1], reg[0], chip->iadc_comp_data.dfl_gain);
}
+ chip->input_present = is_input_present(chip);
+ chip->otg_present = is_otg_present(chip);
+ chip->init_done = true;
pr_debug("FG: HW_init success\n");
return;
@@ -6675,16 +8717,30 @@
"qpnp_fg_cap_learning");
wakeup_source_init(&chip->esr_extract_wakeup_source.source,
"qpnp_fg_esr_extract");
+ wakeup_source_init(&chip->slope_limit_wakeup_source.source,
+ "qpnp_fg_slope_limit");
+ wakeup_source_init(&chip->dischg_gain_wakeup_source.source,
+ "qpnp_fg_dischg_gain");
+ wakeup_source_init(&chip->fg_reset_wakeup_source.source,
+ "qpnp_fg_reset");
+ wakeup_source_init(&chip->cc_soc_wakeup_source.source,
+ "qpnp_fg_cc_soc");
+ wakeup_source_init(&chip->sanity_wakeup_source.source,
+ "qpnp_fg_sanity_check");
+ spin_lock_init(&chip->sec_access_lock);
mutex_init(&chip->rw_lock);
mutex_init(&chip->cyc_ctr.lock);
mutex_init(&chip->learning_data.learning_lock);
mutex_init(&chip->rslow_comp.lock);
mutex_init(&chip->sysfs_restart_lock);
+ mutex_init(&chip->ima_recovery_lock);
INIT_DELAYED_WORK(&chip->update_jeita_setting, update_jeita_setting);
INIT_DELAYED_WORK(&chip->update_sram_data, update_sram_data_work);
INIT_DELAYED_WORK(&chip->update_temp_work, update_temp_data);
INIT_DELAYED_WORK(&chip->check_empty_work, check_empty_work);
INIT_DELAYED_WORK(&chip->batt_profile_init, batt_profile_init);
+ INIT_DELAYED_WORK(&chip->check_sanity_work, check_sanity_work);
+ INIT_WORK(&chip->ima_error_recovery_work, ima_error_recovery_work);
INIT_WORK(&chip->rslow_comp_work, rslow_comp_work);
INIT_WORK(&chip->fg_cap_learning_work, fg_cap_learning_work);
INIT_WORK(&chip->dump_sram, dump_sram);
@@ -6699,13 +8755,19 @@
INIT_WORK(&chip->gain_comp_work, iadc_gain_comp_work);
INIT_WORK(&chip->bcl_hi_power_work, bcl_hi_power_work);
INIT_WORK(&chip->esr_extract_config_work, esr_extract_config_work);
+ INIT_WORK(&chip->slope_limiter_work, slope_limiter_work);
+ INIT_WORK(&chip->dischg_gain_work, discharge_gain_work);
+ INIT_WORK(&chip->cc_soc_store_work, cc_soc_store_work);
alarm_init(&chip->fg_cap_learning_alarm, ALARM_BOOTTIME,
fg_cap_learning_alarm_cb);
+ alarm_init(&chip->hard_jeita_alarm, ALARM_BOOTTIME,
+ fg_hard_jeita_alarm_cb);
init_completion(&chip->sram_access_granted);
init_completion(&chip->sram_access_revoked);
complete_all(&chip->sram_access_revoked);
init_completion(&chip->batt_id_avail);
init_completion(&chip->first_soc_done);
+ init_completion(&chip->fg_reset_done);
dev_set_drvdata(&pdev->dev, chip);
if (of_get_available_child_count(pdev->dev.of_node) == 0) {
@@ -6763,7 +8825,7 @@
return rc;
}
- rc = fg_setup_memif_offset(chip);
+ rc = fg_memif_init(chip);
if (rc) {
pr_err("Unable to setup mem_if offsets rc=%d\n", rc);
goto of_init_fail;
@@ -6834,10 +8896,18 @@
rc = fg_dfs_create(chip);
if (rc < 0) {
pr_err("failed to create debugfs rc = %d\n", rc);
- goto cancel_work;
+ goto power_supply_unregister;
}
}
+ /* Fake temperature till the actual temperature is read */
+ chip->last_good_temp = 250;
+
+ /* Initialize batt_info variables */
+ chip->batt_range_ocv = &fg_batt_valid_ocv;
+ chip->batt_range_pct = &fg_batt_range_pct;
+ memset(chip->batt_info, INT_MAX, sizeof(chip->batt_info));
+
schedule_work(&chip->init_work);
pr_info("FG Probe success - FG Revision DIG:%d.%d ANA:%d.%d PMIC subtype=%d\n",
@@ -6847,32 +8917,17 @@
return rc;
+power_supply_unregister:
+ power_supply_unregister(chip->bms_psy);
cancel_work:
- cancel_delayed_work_sync(&chip->update_jeita_setting);
- cancel_delayed_work_sync(&chip->update_sram_data);
- cancel_delayed_work_sync(&chip->update_temp_work);
- cancel_delayed_work_sync(&chip->check_empty_work);
- cancel_delayed_work_sync(&chip->batt_profile_init);
- alarm_try_to_cancel(&chip->fg_cap_learning_alarm);
- cancel_work_sync(&chip->set_resume_soc_work);
- cancel_work_sync(&chip->fg_cap_learning_work);
- cancel_work_sync(&chip->dump_sram);
- cancel_work_sync(&chip->status_change_work);
- cancel_work_sync(&chip->cycle_count_work);
- cancel_work_sync(&chip->update_esr_work);
- cancel_work_sync(&chip->rslow_comp_work);
- cancel_work_sync(&chip->sysfs_restart_work);
- cancel_work_sync(&chip->gain_comp_work);
- cancel_work_sync(&chip->init_work);
- cancel_work_sync(&chip->charge_full_work);
- cancel_work_sync(&chip->bcl_hi_power_work);
- cancel_work_sync(&chip->esr_extract_config_work);
+ fg_cancel_all_works(chip);
of_init_fail:
mutex_destroy(&chip->rslow_comp.lock);
mutex_destroy(&chip->rw_lock);
mutex_destroy(&chip->cyc_ctr.lock);
mutex_destroy(&chip->learning_data.learning_lock);
mutex_destroy(&chip->sysfs_restart_lock);
+ mutex_destroy(&chip->ima_recovery_lock);
wakeup_source_trash(&chip->resume_soc_wakeup_source.source);
wakeup_source_trash(&chip->empty_check_wakeup_source.source);
wakeup_source_trash(&chip->memif_wakeup_source.source);
@@ -6882,6 +8937,11 @@
wakeup_source_trash(&chip->gain_comp_wakeup_source.source);
wakeup_source_trash(&chip->capacity_learning_wakeup_source.source);
wakeup_source_trash(&chip->esr_extract_wakeup_source.source);
+ wakeup_source_trash(&chip->slope_limit_wakeup_source.source);
+ wakeup_source_trash(&chip->dischg_gain_wakeup_source.source);
+ wakeup_source_trash(&chip->fg_reset_wakeup_source.source);
+ wakeup_source_trash(&chip->cc_soc_wakeup_source.source);
+ wakeup_source_trash(&chip->sanity_wakeup_source.source);
return rc;
}
@@ -6938,11 +8998,103 @@
return 0;
}
+static void fg_check_ima_idle(struct fg_chip *chip)
+{
+ bool rif_mem_sts = true;
+ int rc, time_count = 0;
+
+ mutex_lock(&chip->rw_lock);
+ /* Make sure IMA is idle */
+ while (1) {
+ rc = fg_check_rif_mem_access(chip, &rif_mem_sts);
+ if (rc)
+ break;
+
+ if (!rif_mem_sts)
+ break;
+
+ if (time_count > 4) {
+ pr_err("Waited for ~16ms polling RIF_MEM_ACCESS_REQ\n");
+ fg_run_iacs_clear_sequence(chip);
+ break;
+ }
+
+ /* Wait for 4ms before reading RIF_MEM_ACCESS_REQ again */
+ usleep_range(4000, 4100);
+ time_count++;
+ }
+ mutex_unlock(&chip->rw_lock);
+}
+
+static void fg_shutdown(struct platform_device *pdev)
+{
+ struct fg_chip *chip = dev_get_drvdata(&pdev->dev);
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_emerg("FG shutdown started\n");
+ fg_cancel_all_works(chip);
+ fg_check_ima_idle(chip);
+ chip->fg_shutdown = true;
+ if (fg_debug_mask & FG_STATUS)
+ pr_emerg("FG shutdown complete\n");
+}
+
static const struct dev_pm_ops qpnp_fg_pm_ops = {
.suspend = fg_suspend,
.resume = fg_resume,
};
+static int fg_reset_lockup_set(const char *val, const struct kernel_param *kp)
+{
+ int rc;
+ struct power_supply *bms_psy;
+ struct fg_chip *chip;
+ int old_val = fg_reset_on_lockup;
+
+ rc = param_set_int(val, kp);
+ if (rc) {
+ pr_err("Unable to set fg_reset_on_lockup: %d\n", rc);
+ return rc;
+ }
+
+ if (fg_reset_on_lockup != 0 && fg_reset_on_lockup != 1) {
+ pr_err("Bad value %d\n", fg_reset_on_lockup);
+ fg_reset_on_lockup = old_val;
+ return -EINVAL;
+ }
+
+ bms_psy = power_supply_get_by_name("bms");
+ if (!bms_psy) {
+ pr_err("bms psy not found\n");
+ return 0;
+ }
+
+ chip = power_supply_get_drvdata(bms_psy);
+ if (!chip->ima_supported) {
+ pr_err("Cannot set this for non-IMA supported FG\n");
+ fg_reset_on_lockup = old_val;
+ return -EINVAL;
+ }
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("fg_reset_on_lockup set to %d\n", fg_reset_on_lockup);
+
+ if (fg_reset_on_lockup)
+ schedule_delayed_work(&chip->check_sanity_work,
+ msecs_to_jiffies(1000));
+ else
+ cancel_delayed_work_sync(&chip->check_sanity_work);
+
+ return rc;
+}
+
+static struct kernel_param_ops fg_reset_ops = {
+ .set = fg_reset_lockup_set,
+ .get = param_get_int,
+};
+
+module_param_cb(reset_on_lockup, &fg_reset_ops, &fg_reset_on_lockup, 0644);
+
static int fg_sense_type_set(const char *val, const struct kernel_param *kp)
{
int rc;
@@ -7025,6 +9177,7 @@
},
.probe = fg_probe,
.remove = fg_remove,
+ .shutdown = fg_shutdown,
};
static int __init fg_init(void)
diff --git a/drivers/power/supply/qcom/qpnp-smb5.c b/drivers/power/supply/qcom/qpnp-smb5.c
index 8c00d9c..8ff5330d 100644
--- a/drivers/power/supply/qcom/qpnp-smb5.c
+++ b/drivers/power/supply/qcom/qpnp-smb5.c
@@ -244,7 +244,7 @@
chg->use_extcon = true;
chg->name = "pmi632_charger";
/* PMI632 does not support PD */
- __pd_disabled = 1;
+ chg->pd_not_supported = true;
chg->hw_max_icl_ua =
(chip->dt.usb_icl_ua > 0) ? chip->dt.usb_icl_ua
: PMI632_MAX_ICL_UA;
diff --git a/drivers/power/supply/qcom/smb5-lib.c b/drivers/power/supply/qcom/smb5-lib.c
index a5af817..58466b83 100644
--- a/drivers/power/supply/qcom/smb5-lib.c
+++ b/drivers/power/supply/qcom/smb5-lib.c
@@ -517,7 +517,7 @@
int rc;
u8 mask = HVDCP_EN_BIT | BC1P2_SRC_DETECT_BIT;
- if (chg->pd_disabled)
+ if (chg->pd_not_supported)
return 0;
rc = smblib_masked_write(chg, USBIN_OPTIONS_1_CFG_REG, mask,
@@ -3078,7 +3078,8 @@
}
if (!chg->pr_swap_in_progress)
- chg->ok_to_pd = !(*chg->pd_disabled) || chg->early_usb_attach;
+ chg->ok_to_pd = (!(*chg->pd_disabled) || chg->early_usb_attach)
+ && !chg->pd_not_supported;
}
static void typec_src_insertion(struct smb_charger *chg)
@@ -3097,8 +3098,8 @@
}
chg->typec_legacy = stat & TYPEC_LEGACY_CABLE_STATUS_BIT;
- chg->ok_to_pd = !(chg->typec_legacy || *chg->pd_disabled)
- || chg->early_usb_attach;
+ chg->ok_to_pd = (!(chg->typec_legacy || *chg->pd_disabled)
+ || chg->early_usb_attach) && !chg->pd_not_supported;
if (!chg->ok_to_pd) {
rc = smblib_configure_hvdcp_apsd(chg, true);
if (rc < 0) {
diff --git a/drivers/power/supply/qcom/smb5-lib.h b/drivers/power/supply/qcom/smb5-lib.h
index 14eba8c..57281cc7 100644
--- a/drivers/power/supply/qcom/smb5-lib.h
+++ b/drivers/power/supply/qcom/smb5-lib.h
@@ -272,6 +272,7 @@
int otg_delay_ms;
int *weak_chg_icl_ua;
struct qpnp_vadc_chip *vadc_dev;
+ bool pd_not_supported;
/* locks */
struct mutex lock;
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 5fafaca..3ecca59 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -684,6 +684,17 @@
determines last CPU to call into PSCI for cluster Low power
modes.
+config MSM_PM_LEGACY
+ depends on PM
+ select MSM_IDLE_STATS if DEBUG_FS
+ select CPU_IDLE_MULTIPLE_DRIVERS
+ bool "Qualcomm platform specific Legacy PM driver"
+ help
+ Platform specific legacy power driver to manage
+ cores and l2 low power modes. It interface with
+ various system driver and put the cores into
+ low power modes.
+
config MSM_NOPM
default y if !PM
bool
@@ -700,7 +711,7 @@
trusted apps, unloading them and marshalling buffers to the
trusted fingerprint app.
-if MSM_PM
+if (MSM_PM || MSM_PM_LEGACY)
menuconfig MSM_IDLE_STATS
bool "Collect idle statistics"
help
@@ -733,7 +744,7 @@
histogram. This is for collecting statistics on suspend.
endif # MSM_IDLE_STATS
-endif # MSM_PM
+endif # MSM_PM || MSM_PM_LEGACY
config QCOM_DCC_V2
bool "Qualcomm Technologies Data Capture and Compare engine support for V2"
@@ -868,3 +879,11 @@
interrupt event and event data.
source "drivers/soc/qcom/wcnss/Kconfig"
+
+config BIG_CLUSTER_MIN_FREQ_ADJUST
+ bool "Adjust BIG cluster min frequency based on power collapse state"
+ default n
+ help
+ This driver is used to set the floor of the min frequency of big cluster
+ to the user specified value when the cluster is not power collapsed. When
+ the cluster is power collpsed it resets the value to physical limits.
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index b83f554..967f525 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -105,3 +105,4 @@
obj-$(CONFIG_QCOM_QDSS_BRIDGE) += qdss_bridge.o
obj-$(CONFIG_MSM_BAM_DMUX) += bam_dmux.o
obj-$(CONFIG_WCNSS_CORE) += wcnss/
+obj-$(CONFIG_BIG_CLUSTER_MIN_FREQ_ADJUST) += big_cluster_min_freq_adjust.o
diff --git a/drivers/soc/qcom/big_cluster_min_freq_adjust.c b/drivers/soc/qcom/big_cluster_min_freq_adjust.c
new file mode 100644
index 0000000..dbc89e1
--- /dev/null
+++ b/drivers/soc/qcom/big_cluster_min_freq_adjust.c
@@ -0,0 +1,278 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "big_min_freq_adjust: " fmt
+
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include <linux/cpumask.h>
+#include <linux/cpu_pm.h>
+#include <linux/types.h>
+#include <linux/smp.h>
+#include <linux/moduleparam.h>
+
+enum min_freq_adjust {
+ ADJUST_MIN_FLOOR, /* Set min floor to user supplied value */
+ RESET_MIN_FLOOR, /* Reset min floor cpuinfo value */
+};
+
+struct big_min_freq_adjust_data {
+ struct cpumask cluster_cpumask;
+ unsigned int min_freq_floor;
+ struct delayed_work min_freq_work;
+ unsigned long min_down_delay_jiffies;
+ enum min_freq_adjust min_freq_state;
+ enum min_freq_adjust min_freq_request;
+ spinlock_t lock;
+ bool big_min_freq_on;
+ bool is_init;
+};
+static struct big_min_freq_adjust_data big_min_freq_adjust_data;
+
+static void cpufreq_min_freq_work(struct work_struct *work)
+{
+ struct big_min_freq_adjust_data *p = &big_min_freq_adjust_data;
+
+ spin_lock(&p->lock);
+ if (p->min_freq_state == p->min_freq_request) {
+ spin_unlock(&p->lock);
+ return;
+ }
+ spin_unlock(&p->lock);
+ cpufreq_update_policy(cpumask_first(&p->cluster_cpumask));
+}
+
+static int cpufreq_callback(struct notifier_block *nb, unsigned long val,
+ void *data)
+{
+ struct big_min_freq_adjust_data *p = &big_min_freq_adjust_data;
+ struct cpufreq_policy *policy = data;
+ unsigned int min_freq_floor;
+
+ if (p->big_min_freq_on == false)
+ return NOTIFY_DONE;
+
+ if (val != CPUFREQ_ADJUST)
+ return NOTIFY_DONE;
+
+ if (!cpumask_test_cpu(cpumask_first(&p->cluster_cpumask),
+ policy->related_cpus))
+ return NOTIFY_DONE;
+
+ spin_lock(&p->lock);
+ if (p->min_freq_request == ADJUST_MIN_FLOOR)
+ min_freq_floor = p->min_freq_floor;
+ else
+ min_freq_floor = policy->cpuinfo.min_freq;
+ cpufreq_verify_within_limits(policy, min_freq_floor,
+ policy->cpuinfo.max_freq);
+ p->min_freq_state = p->min_freq_request;
+ spin_unlock(&p->lock);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block cpufreq_nb = {
+ .notifier_call = cpufreq_callback
+};
+
+#define AFFINITY_LEVEL_L2 1
+static int cpu_pm_callback(struct notifier_block *self,
+ unsigned long cmd, void *v)
+{
+ struct big_min_freq_adjust_data *p = &big_min_freq_adjust_data;
+ unsigned long aff_level = (unsigned long) v;
+ unsigned long delay;
+ int cpu;
+
+ if (p->big_min_freq_on == false)
+ return NOTIFY_DONE;
+
+ if (aff_level != AFFINITY_LEVEL_L2)
+ return NOTIFY_DONE;
+
+ cpu = smp_processor_id();
+
+ if (!cpumask_test_cpu(cpu, &p->cluster_cpumask))
+ return NOTIFY_DONE;
+
+ spin_lock(&p->lock);
+ switch (cmd) {
+ case CPU_CLUSTER_PM_ENTER:
+ p->min_freq_request = RESET_MIN_FLOOR;
+ delay = p->min_down_delay_jiffies;
+ break;
+ case CPU_CLUSTER_PM_ENTER_FAILED:
+ case CPU_CLUSTER_PM_EXIT:
+ p->min_freq_request = ADJUST_MIN_FLOOR;
+ /* To avoid unnecessary oscillations between exit and idle */
+ delay = 1;
+ break;
+ default:
+ spin_unlock(&p->lock);
+ return NOTIFY_DONE;
+ }
+
+ cancel_delayed_work(&p->min_freq_work);
+
+ if (p->min_freq_state != p->min_freq_request)
+ schedule_delayed_work(&p->min_freq_work, delay);
+ spin_unlock(&p->lock);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block cpu_pm_nb = {
+ .notifier_call = cpu_pm_callback
+};
+
+static unsigned long __read_mostly big_min_down_delay_ms;
+#define MIN_DOWN_DELAY_MSEC 80 /* Default big_min_down_delay in msec */
+#define POLICY_MIN 1094400 /* Default min_freq_floor in KHz */
+
+static void trigger_state_machine(void *d)
+{
+ struct big_min_freq_adjust_data *p = &big_min_freq_adjust_data;
+ bool *update_policy = d;
+
+ if (p->min_freq_request != ADJUST_MIN_FLOOR) {
+ p->min_freq_request = ADJUST_MIN_FLOOR;
+ *update_policy = true;
+ }
+}
+
+static int enable_big_min_freq_adjust(void)
+{
+ struct big_min_freq_adjust_data *p = &big_min_freq_adjust_data;
+ int ret;
+ bool update_policy = false;
+
+ if (p->big_min_freq_on == true)
+ return 0;
+
+ INIT_DEFERRABLE_WORK(&p->min_freq_work, cpufreq_min_freq_work);
+
+ cpumask_clear(&p->cluster_cpumask);
+ cpumask_set_cpu(4, &p->cluster_cpumask);
+ cpumask_set_cpu(5, &p->cluster_cpumask);
+ cpumask_set_cpu(6, &p->cluster_cpumask);
+ cpumask_set_cpu(7, &p->cluster_cpumask);
+
+ if (!big_min_down_delay_ms) {
+ big_min_down_delay_ms = MIN_DOWN_DELAY_MSEC;
+ p->min_down_delay_jiffies = msecs_to_jiffies(
+ big_min_down_delay_ms);
+ }
+ if (!p->min_freq_floor)
+ p->min_freq_floor = POLICY_MIN;
+
+ ret = cpu_pm_register_notifier(&cpu_pm_nb);
+ if (ret) {
+ pr_err("Failed to register for PM notification\n");
+ return ret;
+ }
+
+ ret = cpufreq_register_notifier(&cpufreq_nb, CPUFREQ_POLICY_NOTIFIER);
+ if (ret) {
+ pr_err("Failed to register for CPUFREQ POLICY notification\n");
+ cpu_pm_unregister_notifier(&cpu_pm_nb);
+ return ret;
+ }
+
+ p->min_freq_state = RESET_MIN_FLOOR;
+ p->min_freq_request = RESET_MIN_FLOOR;
+ spin_lock_init(&p->lock);
+ p->big_min_freq_on = true;
+
+ /* If BIG cluster is active at this time and continue to be active
+ * forever, in that case min frequency of the cluster will never be
+ * set to floor value. This is to trigger the state machine and set
+ * the min freq and min_freq_state to appropriate values.
+ *
+ * Two possibilities here.
+ * 1) If cluster is idle before this, the wakeup is unnecessary but
+ * the state machine is set to proper state.
+ * 2) If cluster is active before this, the wakeup is necessary and
+ * the state machine is set to proper state.
+ */
+ smp_call_function_any(&p->cluster_cpumask,
+ trigger_state_machine, &update_policy, true);
+ if (update_policy)
+ cpufreq_update_policy(cpumask_first(&p->cluster_cpumask));
+
+ pr_info("big min freq ajustment enabled\n");
+
+ return 0;
+}
+
+static bool __read_mostly big_min_freq_adjust_enabled;
+
+static int set_big_min_freq_adjust(const char *buf,
+ const struct kernel_param *kp)
+{
+ int ret;
+
+ ret = param_set_bool_enable_only(buf, kp);
+ if (ret) {
+ pr_err("Unable to set big_min_freq_adjust_enabled: %d\n", ret);
+ return ret;
+ }
+
+ if (!big_min_freq_adjust_data.is_init)
+ return ret;
+
+ return enable_big_min_freq_adjust();
+}
+
+static const struct kernel_param_ops param_ops_big_min_freq_adjust = {
+ .set = set_big_min_freq_adjust,
+ .get = param_get_bool,
+};
+module_param_cb(min_freq_adjust, ¶m_ops_big_min_freq_adjust,
+ &big_min_freq_adjust_enabled, 0644);
+
+module_param_named(min_freq_floor, big_min_freq_adjust_data.min_freq_floor,
+ uint, 0644);
+
+static int set_min_down_delay_ms(const char *buf, const struct kernel_param *kp)
+{
+ int ret;
+
+ ret = param_set_ulong(buf, kp);
+ if (ret) {
+ pr_err("Unable to set big_min_down_delay_ms: %d\n", ret);
+ return ret;
+ }
+
+ big_min_freq_adjust_data.min_down_delay_jiffies = msecs_to_jiffies(
+ big_min_down_delay_ms);
+
+ return 0;
+}
+
+static const struct kernel_param_ops param_ops_big_min_down_delay_ms = {
+ .set = set_min_down_delay_ms,
+ .get = param_get_ulong,
+};
+module_param_cb(min_down_delay_ms, ¶m_ops_big_min_down_delay_ms,
+ &big_min_down_delay_ms, 0644);
+
+static int __init big_min_freq_adjust_init(void)
+{
+ big_min_freq_adjust_data.is_init = true;
+ if (!big_min_freq_adjust_enabled)
+ return 0;
+
+ return enable_big_min_freq_adjust();
+}
+late_initcall(big_min_freq_adjust_init);
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index c254299..b8e9268 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -2113,6 +2113,7 @@
set_bit(ICNSS_WLFW_EXISTS, &penv->state);
clear_bit(ICNSS_FW_DOWN, &penv->state);
+ icnss_ignore_qmi_timeout(false);
penv->wlfw_clnt = qmi_handle_create(icnss_qmi_wlfw_clnt_notify, penv);
if (!penv->wlfw_clnt) {
@@ -2450,8 +2451,10 @@
int ret = 0;
struct icnss_event_pd_service_down_data *event_data = data;
- if (!test_bit(ICNSS_WLFW_EXISTS, &priv->state))
+ if (!test_bit(ICNSS_WLFW_EXISTS, &priv->state)) {
+ icnss_ignore_qmi_timeout(false);
goto out;
+ }
if (priv->force_err_fatal)
ICNSS_ASSERT(0);
@@ -2475,8 +2478,6 @@
out:
kfree(data);
- icnss_ignore_qmi_timeout(false);
-
return ret;
}
@@ -2485,15 +2486,16 @@
{
int ret = 0;
- if (!test_bit(ICNSS_WLFW_EXISTS, &priv->state))
+ if (!test_bit(ICNSS_WLFW_EXISTS, &priv->state)) {
+ icnss_ignore_qmi_timeout(false);
goto out;
+ }
priv->early_crash_ind = true;
icnss_fw_crashed(priv, NULL);
out:
kfree(data);
- icnss_ignore_qmi_timeout(false);
return ret;
}
@@ -3178,7 +3180,8 @@
if (!dev)
return -ENODEV;
- if (test_bit(ICNSS_FW_DOWN, &penv->state)) {
+ if (test_bit(ICNSS_FW_DOWN, &penv->state) ||
+ !test_bit(ICNSS_FW_READY, &penv->state)) {
icnss_pr_err("FW down, ignoring fw_log_mode state: 0x%lx\n",
penv->state);
return -EINVAL;
@@ -3277,7 +3280,8 @@
if (!dev)
return -ENODEV;
- if (test_bit(ICNSS_FW_DOWN, &penv->state)) {
+ if (test_bit(ICNSS_FW_DOWN, &penv->state) ||
+ !test_bit(ICNSS_FW_READY, &penv->state)) {
icnss_pr_err("FW down, ignoring wlan_enable state: 0x%lx\n",
penv->state);
return -EINVAL;
diff --git a/drivers/soc/qcom/system_pm.c b/drivers/soc/qcom/system_pm.c
index 9c6edbf..1befdcf 100644
--- a/drivers/soc/qcom/system_pm.c
+++ b/drivers/soc/qcom/system_pm.c
@@ -22,6 +22,13 @@
#define PDC_TIME_VALID_SHIFT 31
#define PDC_TIME_UPPER_MASK 0xFFFFFF
+#ifdef CONFIG_ARM_GIC_V3
+#include <linux/irqchip/arm-gic-v3.h>
+#else
+static inline void gic_v3_dist_restore(void) {}
+static inline void gic_v3_dist_save(void) {}
+#endif
+
static struct rpmh_client *rpmh_client;
static int setup_wakeup(uint32_t lo, uint32_t hi)
@@ -61,6 +68,7 @@
*/
static int system_sleep_enter(struct cpumask *mask)
{
+ gic_v3_dist_save();
return rpmh_flush(rpmh_client);
}
@@ -70,6 +78,7 @@
static void system_sleep_exit(void)
{
msm_rpmh_master_stats_update();
+ gic_v3_dist_restore();
}
static struct system_pm_ops pm_ops = {
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index 810546a..2790fea 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -2821,8 +2821,11 @@
int ret;
mdwc->dwc3_gdsc = devm_regulator_get(mdwc->dev, "USB3_GDSC");
- if (IS_ERR(mdwc->dwc3_gdsc))
+ if (IS_ERR(mdwc->dwc3_gdsc)) {
+ if (PTR_ERR(mdwc->dwc3_gdsc) == -EPROBE_DEFER)
+ return PTR_ERR(mdwc->dwc3_gdsc);
mdwc->dwc3_gdsc = NULL;
+ }
mdwc->xo_clk = devm_clk_get(mdwc->dev, "xo");
if (IS_ERR(mdwc->xo_clk)) {
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index f0e4d5e..f878b8d1 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -414,7 +414,7 @@
dwc3_trace(trace_dwc3_gadget, "Command Timed Out");
dev_err(dwc->dev, "%s command timeout for %s\n",
dwc3_gadget_ep_cmd_string(cmd), dep->name);
- if (cmd != DWC3_DEPCMD_ENDTRANSFER) {
+ if (DWC3_DEPCMD_CMD(cmd) != DWC3_DEPCMD_ENDTRANSFER) {
dwc->ep_cmd_timeout_cnt++;
dwc3_notify_event(dwc,
DWC3_CONTROLLER_RESTART_USB_SESSION, 0);
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index d3e0ca5..90cbb61 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -568,6 +568,7 @@
config USB_CONFIGFS_F_GSI
bool "USB GSI function"
select USB_F_GSI
+ select USB_U_ETHER
depends on USB_CONFIGFS
help
Generic function driver to support h/w acceleration to IPA over GSI.
diff --git a/drivers/usb/gadget/function/f_gsi.c b/drivers/usb/gadget/function/f_gsi.c
index 5ffbf12..94dd64a 100644
--- a/drivers/usb/gadget/function/f_gsi.c
+++ b/drivers/usb/gadget/function/f_gsi.c
@@ -19,7 +19,7 @@
MODULE_PARM_DESC(qti_packet_debug, "Print QTI Packet's Raw Data");
static struct workqueue_struct *ipa_usb_wq;
-static struct f_gsi *__gsi[IPA_USB_MAX_TETH_PROT_SIZE];
+static struct f_gsi *__gsi[USB_PROT_MAX];
static void *ipc_log_ctxt;
#define NUM_LOG_PAGES 15
@@ -56,6 +56,15 @@
static struct gsi_ctrl_pkt *gsi_ctrl_pkt_alloc(unsigned int len, gfp_t flags);
static void gsi_ctrl_pkt_free(struct gsi_ctrl_pkt *pkt);
+static inline bool is_ext_prot_ether(int prot_id)
+{
+ if (prot_id == USB_PROT_RMNET_ETHER ||
+ prot_id == USB_PROT_DPL_ETHER)
+ return true;
+
+ return false;
+}
+
static inline bool usb_gsi_remote_wakeup_allowed(struct usb_function *f)
{
bool remote_wakeup_allowed;
@@ -226,7 +235,7 @@
log_event_err("%s: Set net_ready_trigger", __func__);
gsi->d_port.net_ready_trigger = true;
- if (gsi->prot_id == IPA_USB_ECM) {
+ if (gsi->prot_id == USB_PROT_ECM_IPA) {
cpkt_notify_connect = gsi_ctrl_pkt_alloc(0, GFP_ATOMIC);
if (IS_ERR(cpkt_notify_connect)) {
spin_unlock_irqrestore(&gsi->d_port.lock,
@@ -260,7 +269,7 @@
* Do not post EVT_CONNECTED for RNDIS.
* Data path for RNDIS is enabled on EVT_HOST_READY.
*/
- if (gsi->prot_id != IPA_USB_RNDIS) {
+ if (gsi->prot_id != USB_PROT_RNDIS_IPA) {
post_event(&gsi->d_port, EVT_CONNECTED);
queue_work(gsi->d_port.ipa_usb_wq,
&gsi->d_port.usb_ipa_w);
@@ -311,7 +320,7 @@
log_event_dbg("%s: USB GSI IN OPS Completed", __func__);
in_params->client =
- (gsi->prot_id != IPA_USB_DIAG) ? IPA_CLIENT_USB_CONS :
+ (gsi->prot_id != USB_PROT_DIAG_IPA) ? IPA_CLIENT_USB_CONS :
IPA_CLIENT_USB_DPL_CONS;
in_params->ipa_ep_cfg.mode.mode = IPA_BASIC;
in_params->teth_prot = gsi->prot_id;
@@ -395,7 +404,7 @@
conn_params->usb_to_ipa_xferrscidx =
d_port->out_xfer_rsc_index;
conn_params->usb_to_ipa_xferrscidx_valid =
- (gsi->prot_id != IPA_USB_DIAG) ? true : false;
+ (gsi->prot_id != USB_PROT_DIAG_IPA) ? true : false;
conn_params->ipa_to_usb_xferrscidx_valid = true;
conn_params->teth_prot = gsi->prot_id;
conn_params->teth_prot_params.max_xfer_size_bytes_to_dev = 23700;
@@ -440,7 +449,7 @@
d_port->in_request.db_reg_phs_addr_msb =
ipa_in_channel_out_params.db_reg_phs_addr_msb;
- if (gsi->prot_id != IPA_USB_DIAG) {
+ if (gsi->prot_id != USB_PROT_DIAG_IPA) {
d_port->out_channel_handle =
ipa_out_channel_out_params.clnt_hdl;
d_port->out_request.db_reg_phs_addr_lsb =
@@ -1159,7 +1168,8 @@
switch (cmd) {
case QTI_CTRL_MODEM_OFFLINE:
- if (gsi->prot_id == IPA_USB_DIAG) {
+ if (gsi->prot_id == USB_PROT_DIAG_IPA ||
+ gsi->prot_id == USB_PROT_DPL_ETHER) {
log_event_dbg("%s:Modem Offline not handled", __func__);
goto exit_ioctl;
}
@@ -1177,7 +1187,8 @@
gsi_ctrl_send_notification(gsi);
break;
case QTI_CTRL_MODEM_ONLINE:
- if (gsi->prot_id == IPA_USB_DIAG) {
+ if (gsi->prot_id == USB_PROT_DIAG_IPA ||
+ gsi->prot_id == USB_PROT_DPL_ETHER) {
log_event_dbg("%s:Modem Online not handled", __func__);
goto exit_ioctl;
}
@@ -1186,7 +1197,8 @@
break;
case QTI_CTRL_GET_LINE_STATE:
val = atomic_read(&gsi->connected);
- if (gsi->prot_id == IPA_USB_RMNET)
+ if (gsi->prot_id == USB_PROT_RMNET_IPA ||
+ gsi->prot_id == USB_PROT_RMNET_ETHER)
val = gsi->rmnet_dtr_status;
ret = copy_to_user((void __user *)arg, &val, sizeof(val));
@@ -1207,7 +1219,8 @@
break;
}
- if (gsi->prot_id == IPA_USB_DIAG &&
+ if ((gsi->prot_id == USB_PROT_DIAG_IPA ||
+ gsi->prot_id == USB_PROT_DPL_ETHER) &&
(gsi->d_port.in_channel_handle == -EINVAL)) {
ret = -EAGAIN;
break;
@@ -1222,7 +1235,8 @@
info.ph_ep_info.ep_type = GSI_MBIM_DATA_EP_TYPE_HSUSB;
info.ph_ep_info.peripheral_iface_id = gsi->data_id;
info.ipa_ep_pair.cons_pipe_num =
- (gsi->prot_id == IPA_USB_DIAG) ? -1 :
+ (gsi->prot_id == USB_PROT_DIAG_IPA ||
+ gsi->prot_id == USB_PROT_DPL_ETHER) ? -1 :
gsi->d_port.out_channel_handle;
info.ipa_ep_pair.prod_pipe_num = gsi->d_port.in_channel_handle;
@@ -1328,8 +1342,8 @@
static int gsi_function_ctrl_port_init(struct f_gsi *gsi)
{
int ret;
+ char *cdev_name = NULL;
int sz = GSI_CTRL_NAME_LEN;
- bool ctrl_dev_create = true;
INIT_LIST_HEAD(&gsi->c_port.cpkt_req_q);
INIT_LIST_HEAD(&gsi->c_port.cpkt_resp_q);
@@ -1338,17 +1352,30 @@
init_waitqueue_head(&gsi->c_port.read_wq);
- if (gsi->prot_id == IPA_USB_RMNET)
- strlcat(gsi->c_port.name, GSI_RMNET_CTRL_NAME, sz);
- else if (gsi->prot_id == IPA_USB_MBIM)
- strlcat(gsi->c_port.name, GSI_MBIM_CTRL_NAME, sz);
- else if (gsi->prot_id == IPA_USB_DIAG)
- strlcat(gsi->c_port.name, GSI_DPL_CTRL_NAME, sz);
- else
- ctrl_dev_create = false;
+ switch (gsi->prot_id) {
+ case USB_PROT_RMNET_IPA:
+ cdev_name = GSI_RMNET_CTRL_NAME;
+ break;
+ case USB_PROT_RMNET_ETHER:
+ cdev_name = ETHER_RMNET_CTRL_NAME;
+ break;
+ case USB_PROT_MBIM_IPA:
+ cdev_name = GSI_MBIM_CTRL_NAME;
+ break;
+ case USB_PROT_DIAG_IPA:
+ cdev_name = GSI_DPL_CTRL_NAME;
+ break;
+ case USB_PROT_DPL_ETHER:
+ cdev_name = ETHER_DPL_CTRL_NAME;
+ break;
+ default:
+ break;
+ }
- if (!ctrl_dev_create)
+ if (!cdev_name)
return 0;
+ else
+ strlcat(gsi->c_port.name, cdev_name, sz);
gsi->c_port.ctrl_device.name = gsi->c_port.name;
gsi->c_port.ctrl_device.fops = &gsi_ctrl_dev_fops;
@@ -1512,7 +1539,7 @@
event->wValue = cpu_to_le16(0);
event->wLength = cpu_to_le16(0);
- if (gsi->prot_id == IPA_USB_RNDIS) {
+ if (gsi->prot_id == USB_PROT_RNDIS_IPA) {
data = req->buf;
data[0] = cpu_to_le32(1);
data[1] = cpu_to_le32(0);
@@ -1737,7 +1764,7 @@
/* read the request; process it later */
value = w_length;
req->context = gsi;
- if (gsi->prot_id == IPA_USB_RNDIS)
+ if (gsi->prot_id == USB_PROT_RNDIS_IPA)
req->complete = gsi_rndis_command_complete;
else
req->complete = gsi_ctrl_cmd_complete;
@@ -1749,7 +1776,7 @@
if (w_value || w_index != id)
goto invalid;
- if (gsi->prot_id == IPA_USB_RNDIS) {
+ if (gsi->prot_id == USB_PROT_RNDIS_IPA) {
/* return the result */
buf = rndis_get_next_response(gsi->params, &n);
if (buf) {
@@ -1785,7 +1812,8 @@
case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
| USB_CDC_REQ_SET_CONTROL_LINE_STATE:
line_state = (w_value & GSI_CTRL_DTR ? true : false);
- if (gsi->prot_id == IPA_USB_RMNET)
+ if (gsi->prot_id == USB_PROT_RMNET_IPA ||
+ gsi->prot_id == USB_PROT_RMNET_ETHER)
gsi->rmnet_dtr_status = line_state;
log_event_dbg("%s: USB_CDC_REQ_SET_CONTROL_LINE_STATE DTR:%d\n",
__func__, line_state);
@@ -1882,9 +1910,10 @@
struct f_gsi *gsi = func_to_gsi(f);
/* RNDIS, RMNET and DPL only support alt 0*/
- if (intf == gsi->ctrl_id || gsi->prot_id == IPA_USB_RNDIS ||
- gsi->prot_id == IPA_USB_RMNET ||
- gsi->prot_id == IPA_USB_DIAG)
+ if (intf == gsi->ctrl_id || gsi->prot_id == USB_PROT_RNDIS_IPA ||
+ gsi->prot_id == USB_PROT_RMNET_IPA ||
+ gsi->prot_id == USB_PROT_DIAG_IPA ||
+ is_ext_prot_ether(gsi->prot_id))
return 0;
else if (intf == gsi->data_id)
return gsi->data_interface_up;
@@ -2003,7 +2032,8 @@
log_event_dbg("intf=%u, alt=%u", intf, alt);
/* Control interface has only altsetting 0 */
- if (intf == gsi->ctrl_id || gsi->prot_id == IPA_USB_RMNET) {
+ if (intf == gsi->ctrl_id || gsi->prot_id == USB_PROT_RMNET_IPA ||
+ gsi->prot_id == USB_PROT_RMNET_ETHER) {
if (alt != 0)
goto fail;
@@ -2037,10 +2067,11 @@
if (intf == gsi->data_id) {
gsi->d_port.net_ready_trigger = false;
/* for rndis and rmnet alt is always 0 update alt accordingly */
- if (gsi->prot_id == IPA_USB_RNDIS ||
- gsi->prot_id == IPA_USB_RMNET ||
- gsi->prot_id == IPA_USB_DIAG)
- alt = 1;
+ if (gsi->prot_id == USB_PROT_RNDIS_IPA ||
+ gsi->prot_id == USB_PROT_RMNET_IPA ||
+ gsi->prot_id == USB_PROT_DIAG_IPA ||
+ is_ext_prot_ether(gsi->prot_id))
+ alt = 1;
if (alt > 1)
goto notify_ep_disable;
@@ -2067,8 +2098,9 @@
}
/* Configure EPs for GSI */
- if (gsi->d_port.in_ep) {
- if (gsi->prot_id == IPA_USB_DIAG)
+ if (gsi->d_port.in_ep &&
+ gsi->prot_id <= USB_PROT_DIAG_IPA) {
+ if (gsi->prot_id == USB_PROT_DIAG_IPA)
gsi->d_port.in_ep->ep_intr_num = 3;
else
gsi->d_port.in_ep->ep_intr_num = 2;
@@ -2077,7 +2109,8 @@
GSI_EP_OP_CONFIG);
}
- if (gsi->d_port.out_ep) {
+ if (gsi->d_port.out_ep &&
+ gsi->prot_id <= USB_PROT_DIAG_IPA) {
gsi->d_port.out_ep->ep_intr_num = 1;
usb_gsi_ep_op(gsi->d_port.out_ep,
&gsi->d_port.out_request,
@@ -2086,7 +2119,17 @@
gsi->d_port.gadget = cdev->gadget;
- if (gsi->prot_id == IPA_USB_RNDIS) {
+ if (is_ext_prot_ether(gsi->prot_id)) {
+ net = gether_connect(&gsi->d_port.gether_port);
+ if (IS_ERR(net)) {
+ pr_err("%s:gether_connect err:%ld\n",
+ __func__, PTR_ERR(net));
+ goto notify_ep_disable;
+ }
+ gsi->d_port.gether_port.cdc_filter = 0;
+ }
+
+ if (gsi->prot_id == USB_PROT_RNDIS_IPA) {
gsi_rndis_open(gsi);
net = gsi_rndis_get_netdev("rndis0");
if (IS_ERR(net))
@@ -2098,7 +2141,7 @@
&gsi->d_port.cdc_filter);
}
- if (gsi->prot_id == IPA_USB_ECM)
+ if (gsi->prot_id == USB_PROT_ECM_IPA)
gsi->d_port.cdc_filter = DEFAULT_FILTER;
/*
@@ -2106,7 +2149,7 @@
* handler which is invoked when the host sends the
* GEN_CURRENT_PACKET_FILTER message.
*/
- if (gsi->prot_id != IPA_USB_RNDIS)
+ if (gsi->prot_id != USB_PROT_RNDIS_IPA)
post_event(&gsi->d_port,
EVT_CONNECT_IN_PROGRESS);
queue_work(gsi->d_port.ipa_usb_wq,
@@ -2127,7 +2170,8 @@
atomic_set(&gsi->connected, 1);
/* send 0 len pkt to qti to notify state change */
- if (gsi->prot_id == IPA_USB_DIAG)
+ if (gsi->prot_id == USB_PROT_DIAG_IPA ||
+ gsi->prot_id == USB_PROT_DPL_ETHER)
gsi_ctrl_send_cpkt_tomodem(gsi, NULL, 0);
return 0;
@@ -2145,10 +2189,11 @@
atomic_set(&gsi->connected, 0);
- if (gsi->prot_id == IPA_USB_RNDIS)
+ if (gsi->prot_id == USB_PROT_RNDIS_IPA)
rndis_uninit(gsi->params);
- if (gsi->prot_id == IPA_USB_RMNET)
+ if (gsi->prot_id == USB_PROT_RMNET_IPA ||
+ gsi->prot_id == USB_PROT_RMNET_ETHER)
gsi->rmnet_dtr_status = false;
/* Disable Control Path */
@@ -2171,6 +2216,12 @@
gsi->data_interface_up = false;
log_event_dbg("%s deactivated", gsi->function.name);
+
+ if (is_ext_prot_ether(gsi->prot_id)) {
+ gether_disconnect(&gsi->d_port.gether_port);
+ return;
+ }
+
ipa_disconnect_handler(&gsi->d_port);
post_event(&gsi->d_port, EVT_DISCONNECTED);
queue_work(gsi->d_port.ipa_usb_wq, &gsi->d_port.usb_ipa_w);
@@ -2221,7 +2272,7 @@
* RNDIS_MESSAGE_PACKET_FILTER after performing bus resume.
* Trigger state machine explicitly on resume.
*/
- if (gsi->prot_id == IPA_USB_RNDIS &&
+ if (gsi->prot_id == USB_PROT_RNDIS_IPA &&
!usb_gsi_remote_wakeup_allowed(f))
rndis_flow_control(gsi->params, false);
@@ -2327,7 +2378,7 @@
info->data_nop_desc->bInterfaceNumber = gsi->data_id;
/* allocate instance-specific endpoints */
- if (info->fs_in_desc) {
+ if (info->fs_in_desc && gsi->prot_id <= USB_PROT_DIAG_IPA) {
ep = usb_ep_autoconfig_by_name(cdev->gadget,
info->fs_in_desc, info->in_epname);
if (!ep)
@@ -2335,9 +2386,17 @@
gsi->d_port.in_ep = ep;
msm_ep_config(gsi->d_port.in_ep, NULL);
ep->driver_data = cdev; /* claim */
+ } else {
+ if (info->fs_in_desc) {
+ ep = usb_ep_autoconfig(cdev->gadget, info->fs_in_desc);
+ if (!ep)
+ goto fail;
+ gsi->d_port.in_ep = ep;
+ ep->driver_data = cdev; /* claim */
+ }
}
- if (info->fs_out_desc) {
+ if (info->fs_out_desc && gsi->prot_id <= USB_PROT_DIAG_IPA) {
ep = usb_ep_autoconfig_by_name(cdev->gadget,
info->fs_out_desc, info->out_epname);
if (!ep)
@@ -2345,6 +2404,14 @@
gsi->d_port.out_ep = ep;
msm_ep_config(gsi->d_port.out_ep, NULL);
ep->driver_data = cdev; /* claim */
+ } else {
+ if (info->fs_out_desc) {
+ ep = usb_ep_autoconfig(cdev->gadget, info->fs_out_desc);
+ if (!ep)
+ goto fail;
+ gsi->d_port.out_ep = ep;
+ ep->driver_data = cdev; /* claim */
+ }
}
if (info->fs_notify_desc) {
@@ -2475,14 +2542,17 @@
struct gsi_function_bind_info info = {0};
struct f_gsi *gsi = func_to_gsi(f);
struct rndis_params *params;
+ struct net_device *net;
+ char *name = NULL;
int status;
__u8 class;
__u8 subclass;
__u8 proto;
- if (gsi->prot_id == IPA_USB_RMNET ||
- gsi->prot_id == IPA_USB_DIAG)
+ if (gsi->prot_id == USB_PROT_RMNET_IPA ||
+ gsi->prot_id == USB_PROT_DIAG_IPA ||
+ is_ext_prot_ether(gsi->prot_id))
gsi->ctrl_id = -ENODEV;
else {
status = gsi->ctrl_id = usb_interface_id(c, f);
@@ -2495,7 +2565,7 @@
goto fail;
switch (gsi->prot_id) {
- case IPA_USB_RNDIS:
+ case USB_PROT_RNDIS_IPA:
info.string_defs = rndis_gsi_string_defs;
info.ctrl_desc = &rndis_gsi_control_intf;
info.ctrl_str_idx = 0;
@@ -2641,7 +2711,7 @@
info.ctrl_desc->bInterfaceProtocol = proto;
break;
- case IPA_USB_MBIM:
+ case USB_PROT_MBIM_IPA:
info.string_defs = mbim_gsi_string_defs;
info.ctrl_desc = &mbim_gsi_control_intf;
info.ctrl_str_idx = 0;
@@ -2688,7 +2758,8 @@
c->bConfigurationValue + '0';
}
break;
- case IPA_USB_RMNET:
+ case USB_PROT_RMNET_IPA:
+ case USB_PROT_RMNET_ETHER:
info.string_defs = rmnet_gsi_string_defs;
info.data_desc = &rmnet_gsi_interface_desc;
info.data_str_idx = 0;
@@ -2713,8 +2784,9 @@
info.out_req_buf_len = GSI_OUT_RMNET_BUF_LEN;
info.out_req_num_buf = GSI_NUM_OUT_BUFFERS;
info.notify_buf_len = sizeof(struct usb_cdc_notification);
+ name = "usb_rmnet";
break;
- case IPA_USB_ECM:
+ case USB_PROT_ECM_IPA:
info.string_defs = ecm_gsi_string_defs;
info.ctrl_desc = &ecm_gsi_control_intf;
info.ctrl_str_idx = 0;
@@ -2763,7 +2835,8 @@
gsi->d_port.ipa_init_params.host_ethaddr[5]);
info.string_defs[1].s = gsi->ethaddr;
break;
- case IPA_USB_DIAG:
+ case USB_PROT_DIAG_IPA:
+ case USB_PROT_DPL_ETHER:
info.string_defs = qdss_gsi_string_defs;
info.data_desc = &qdss_gsi_data_intf_desc;
info.data_str_idx = 0;
@@ -2778,6 +2851,7 @@
info.in_req_buf_len = 16384;
info.in_req_num_buf = GSI_NUM_IN_BUFFERS;
info.notify_buf_len = sizeof(struct usb_cdc_notification);
+ name = "dpl_usb";
break;
default:
log_event_err("%s: Invalid prot id %d", __func__,
@@ -2789,6 +2863,29 @@
if (status)
goto dereg_rndis;
+ if (is_ext_prot_ether(gsi->prot_id)) {
+ if (!name)
+ return -EINVAL;
+
+ gsi->d_port.gether_port.in_ep = gsi->d_port.in_ep;
+ gsi->d_port.gether_port.out_ep = gsi->d_port.out_ep;
+ net = gether_setup_name_default(name);
+ if (IS_ERR(net)) {
+ pr_err("%s: gether_setup failed\n", __func__);
+ return PTR_ERR(net);
+ }
+ gsi->d_port.gether_port.ioport = netdev_priv(net);
+ gether_set_gadget(net, c->cdev->gadget);
+ status = gether_register_netdev(net);
+ if (status < 0) {
+ pr_err("%s: gether_register_netdev failed\n",
+ __func__);
+ free_netdev(net);
+ return status;
+ }
+ goto skip_ipa_init;
+ }
+
status = ipa_register_ipa_ready_cb(ipa_ready_callback, gsi);
if (!status) {
log_event_info("%s: ipa is not ready", __func__);
@@ -2814,6 +2911,7 @@
gsi->d_port.sm_state = STATE_INITIALIZED;
+skip_ipa_init:
DBG(cdev, "%s: %s speed IN/%s OUT/%s NOTIFY/%s\n",
f->name,
gadget_is_superspeed(c->cdev->gadget) ? "super" :
@@ -2836,6 +2934,12 @@
{
struct f_gsi *gsi = func_to_gsi(f);
+ if (is_ext_prot_ether(gsi->prot_id)) {
+ gether_cleanup(gsi->d_port.gether_port.ioport);
+ gsi->d_port.gether_port.ioport = NULL;
+ goto skip_ipa_dinit;
+ }
+
/*
* Use drain_workqueue to accomplish below conditions:
* 1. Make sure that any running work completed
@@ -2847,12 +2951,13 @@
drain_workqueue(gsi->d_port.ipa_usb_wq);
ipa_usb_deinit_teth_prot(gsi->prot_id);
- if (gsi->prot_id == IPA_USB_RNDIS) {
+skip_ipa_dinit:
+ if (gsi->prot_id == USB_PROT_RNDIS_IPA) {
gsi->d_port.sm_state = STATE_UNINITIALIZED;
rndis_deregister(gsi->params);
}
- if (gsi->prot_id == IPA_USB_MBIM)
+ if (gsi->prot_id == USB_PROT_MBIM_IPA)
mbim_gsi_ext_config_desc.function.subCompatibleID[0] = 0;
if (gadget_is_superspeed(c->cdev->gadget)) {
@@ -2883,33 +2988,34 @@
static int gsi_bind_config(struct f_gsi *gsi)
{
int status = 0;
- enum ipa_usb_teth_prot prot_id = gsi->prot_id;
- log_event_dbg("%s: prot id %d", __func__, prot_id);
+ log_event_dbg("%s: prot id %d", __func__, gsi->prot_id);
- switch (prot_id) {
- case IPA_USB_RNDIS:
+ switch (gsi->prot_id) {
+ case USB_PROT_RNDIS_IPA:
gsi->function.name = "rndis";
gsi->function.strings = rndis_gsi_strings;
break;
- case IPA_USB_ECM:
+ case USB_PROT_ECM_IPA:
gsi->function.name = "cdc_ethernet";
gsi->function.strings = ecm_gsi_strings;
break;
- case IPA_USB_RMNET:
+ case USB_PROT_RMNET_IPA:
+ case USB_PROT_RMNET_ETHER:
gsi->function.name = "rmnet";
gsi->function.strings = rmnet_gsi_strings;
break;
- case IPA_USB_MBIM:
+ case USB_PROT_MBIM_IPA:
gsi->function.name = "mbim";
gsi->function.strings = mbim_gsi_strings;
break;
- case IPA_USB_DIAG:
+ case USB_PROT_DIAG_IPA:
+ case USB_PROT_DPL_ETHER:
gsi->function.name = "dpl";
gsi->function.strings = qdss_gsi_strings;
break;
default:
- log_event_err("%s: invalid prot id %d", __func__, prot_id);
+ log_event_err("%s: invalid prot id %d", __func__, gsi->prot_id);
return -EINVAL;
}
@@ -3164,9 +3270,10 @@
static int gsi_set_inst_name(struct usb_function_instance *fi,
const char *name)
{
- int prot_id, name_len, ret = 0;
+ int name_len, ret = 0;
struct gsi_opts *opts;
struct f_gsi *gsi;
+ enum usb_prot_id prot_id;
opts = container_of(fi, struct gsi_opts, func_inst);
@@ -3181,7 +3288,7 @@
return -EINVAL;
}
- if (prot_id == IPA_USB_RNDIS)
+ if (prot_id == USB_PROT_RNDIS_IPA)
config_group_init_type_name(&opts->func_inst.group, "",
&gsi_func_rndis_type);
@@ -3249,7 +3356,7 @@
return -ENOMEM;
}
- for (i = 0; i < IPA_USB_MAX_TETH_PROT_SIZE; i++) {
+ for (i = 0; i < USB_PROT_MAX; i++) {
__gsi[i] = gsi_function_init();
if (IS_ERR(__gsi[i]))
return PTR_ERR(__gsi[i]);
@@ -3272,7 +3379,7 @@
if (ipc_log_ctxt)
ipc_log_context_destroy(ipc_log_ctxt);
- for (i = 0; i < IPA_USB_MAX_TETH_PROT_SIZE; i++)
+ for (i = 0; i < USB_PROT_MAX; i++)
kfree(__gsi[i]);
usb_function_unregister(&gsiusb_func);
diff --git a/drivers/usb/gadget/function/f_gsi.h b/drivers/usb/gadget/function/f_gsi.h
index c6e64fd..7e998c4 100644
--- a/drivers/usb/gadget/function/f_gsi.h
+++ b/drivers/usb/gadget/function/f_gsi.h
@@ -27,9 +27,14 @@
#include <linux/ipa_usb.h>
#include <linux/ipc_logging.h>
+#include "u_ether.h"
+
#define GSI_RMNET_CTRL_NAME "rmnet_ctrl"
#define GSI_MBIM_CTRL_NAME "android_mbim"
#define GSI_DPL_CTRL_NAME "dpl_ctrl"
+#define ETHER_RMNET_CTRL_NAME "rmnet_ctrl0"
+#define ETHER_DPL_CTRL_NAME "dpl_ctrl0"
+
#define GSI_CTRL_NAME_LEN (sizeof(GSI_MBIM_CTRL_NAME)+2)
#define GSI_MAX_CTRL_PKT_SIZE 4096
#define GSI_CTRL_DTR (1 << 0)
@@ -114,6 +119,21 @@
RNDIS_ID_MAX,
};
+enum usb_prot_id {
+ /* accelerated: redefined from ipa_usb.h, do not change order */
+ USB_PROT_RNDIS_IPA,
+ USB_PROT_ECM_IPA,
+ USB_PROT_RMNET_IPA,
+ USB_PROT_MBIM_IPA,
+ USB_PROT_DIAG_IPA,
+
+ /* non-accelerated */
+ USB_PROT_RMNET_ETHER,
+ USB_PROT_DPL_ETHER,
+
+ USB_PROT_MAX,
+};
+
#define MAXQUEUELEN 128
struct event_queue {
u8 event[MAXQUEUELEN];
@@ -228,6 +248,7 @@
enum connection_state sm_state;
struct event_queue evt_q;
wait_queue_head_t wait_for_ipa_ready;
+ struct gether gether_port;
/* Track these for debugfs */
struct ipa_usb_xdci_chan_params ipa_in_channel_params;
@@ -237,7 +258,7 @@
struct f_gsi {
struct usb_function function;
- enum ipa_usb_teth_prot prot_id;
+ enum usb_prot_id prot_id;
int ctrl_id;
int data_id;
u32 vendorID;
@@ -285,21 +306,25 @@
func_inst.group);
}
-static enum ipa_usb_teth_prot name_to_prot_id(const char *name)
+static enum usb_prot_id name_to_prot_id(const char *name)
{
if (!name)
goto error;
if (!strncasecmp(name, "rndis", MAX_INST_NAME_LEN))
- return IPA_USB_RNDIS;
+ return USB_PROT_RNDIS_IPA;
if (!strncasecmp(name, "ecm", MAX_INST_NAME_LEN))
- return IPA_USB_ECM;
+ return USB_PROT_ECM_IPA;
if (!strncasecmp(name, "rmnet", MAX_INST_NAME_LEN))
- return IPA_USB_RMNET;
+ return USB_PROT_RMNET_IPA;
if (!strncasecmp(name, "mbim", MAX_INST_NAME_LEN))
- return IPA_USB_MBIM;
+ return USB_PROT_MBIM_IPA;
if (!strncasecmp(name, "dpl", MAX_INST_NAME_LEN))
- return IPA_USB_DIAG;
+ return USB_PROT_DIAG_IPA;
+ if (!strncasecmp(name, "rmnet.ether", MAX_INST_NAME_LEN))
+ return USB_PROT_RMNET_ETHER;
+ if (!strncasecmp(name, "dpl.ether", MAX_INST_NAME_LEN))
+ return USB_PROT_DPL_ETHER;
error:
return -EINVAL;
diff --git a/drivers/usb/gadget/function/f_mtp.c b/drivers/usb/gadget/function/f_mtp.c
index 651776d..3f25946 100644
--- a/drivers/usb/gadget/function/f_mtp.c
+++ b/drivers/usb/gadget/function/f_mtp.c
@@ -613,7 +613,17 @@
return -EINVAL;
spin_lock_irq(&dev->lock);
+ if (dev->state == STATE_OFFLINE) {
+ spin_unlock_irq(&dev->lock);
+ return -ENODEV;
+ }
+
if (dev->ep_out->desc) {
+ if (!cdev) {
+ spin_unlock_irq(&dev->lock);
+ return -ENODEV;
+ }
+
len = usb_ep_align_maybe(cdev->gadget, dev->ep_out, count);
if (len > MTP_BULK_BUFFER_SIZE) {
spin_unlock_irq(&dev->lock);
@@ -1498,7 +1508,10 @@
while ((req = mtp_req_get(dev, &dev->intr_idle)))
mtp_request_free(req, dev->ep_intr);
mutex_unlock(&dev->read_mutex);
+ spin_lock_irq(&dev->lock);
dev->state = STATE_OFFLINE;
+ dev->cdev = NULL;
+ spin_unlock_irq(&dev->lock);
kfree(f->os_desc_table);
f->os_desc_n = 0;
fi_mtp->func_inst.f = NULL;
@@ -1554,7 +1567,9 @@
struct usb_composite_dev *cdev = dev->cdev;
DBG(cdev, "mtp_function_disable\n");
+ spin_lock_irq(&dev->lock);
dev->state = STATE_OFFLINE;
+ spin_unlock_irq(&dev->lock);
usb_ep_disable(dev->ep_in);
usb_ep_disable(dev->ep_out);
usb_ep_disable(dev->ep_intr);
diff --git a/drivers/usb/phy/phy-msm-ssusb-qmp.c b/drivers/usb/phy/phy-msm-ssusb-qmp.c
index 174f75f..1795d24 100644
--- a/drivers/usb/phy/phy-msm-ssusb-qmp.c
+++ b/drivers/usb/phy/phy-msm-ssusb-qmp.c
@@ -657,6 +657,9 @@
if (suspend) {
if (phy->cable_connected)
msm_ssusb_qmp_enable_autonomous(phy, 1);
+ else
+ writel_relaxed(0x00,
+ phy->base + phy->phy_reg[USB3_PHY_POWER_DOWN_CONTROL]);
/* Make sure above write completed with PHY */
wmb();
diff --git a/fs/buffer.c b/fs/buffer.c
index 5d8f496..3a8064d 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -1455,12 +1455,48 @@
return 0;
}
+static void __evict_bh_lru(void *arg)
+{
+ struct bh_lru *b = &get_cpu_var(bh_lrus);
+ struct buffer_head *bh = arg;
+ int i;
+
+ for (i = 0; i < BH_LRU_SIZE; i++) {
+ if (b->bhs[i] == bh) {
+ brelse(b->bhs[i]);
+ b->bhs[i] = NULL;
+ goto out;
+ }
+ }
+out:
+ put_cpu_var(bh_lrus);
+}
+
+static bool bh_exists_in_lru(int cpu, void *arg)
+{
+ struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu);
+ struct buffer_head *bh = arg;
+ int i;
+
+ for (i = 0; i < BH_LRU_SIZE; i++) {
+ if (b->bhs[i] == bh)
+ return 1;
+ }
+
+ return 0;
+
+}
void invalidate_bh_lrus(void)
{
on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1, GFP_KERNEL);
}
EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
+static void evict_bh_lrus(struct buffer_head *bh)
+{
+ on_each_cpu_cond(bh_exists_in_lru, __evict_bh_lru, bh, 1, GFP_ATOMIC);
+}
+
void set_bh_page(struct buffer_head *bh,
struct page *page, unsigned long offset)
{
@@ -3250,8 +3286,15 @@
do {
if (buffer_write_io_error(bh) && page->mapping)
mapping_set_error(page->mapping, -EIO);
- if (buffer_busy(bh))
- goto failed;
+ if (buffer_busy(bh)) {
+ /*
+ * Check if the busy failure was due to an
+ * outstanding LRU reference
+ */
+ evict_bh_lrus(bh);
+ if (buffer_busy(bh))
+ goto failed;
+ }
bh = bh->b_this_page;
} while (bh != head);
diff --git a/include/dt-bindings/clock/msm-clocks-8952.h b/include/dt-bindings/clock/msm-clocks-8952.h
index 3190d4f..e66c5ed 100644
--- a/include/dt-bindings/clock/msm-clocks-8952.h
+++ b/include/dt-bindings/clock/msm-clocks-8952.h
@@ -237,8 +237,10 @@
#define clk_dsi0pll_byte_clk_src 0xbbaa30be
#define clk_dsi0pll_pixel_clk_src 0x45b3260f
+#define clk_dsi0pll_vco_clk 0x15940d40
#define clk_dsi1pll_byte_clk_src 0x63930a8f
#define clk_dsi1pll_pixel_clk_src 0x0e4c9b56
+#define clk_dsi1pll_vco_clk 0x99797b50
#define clk_dsi_pll0_byte_clk_src 0x44539836
#define clk_dsi_pll0_pixel_clk_src 0x5767c287
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index c122409..914eb4a 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -451,6 +451,8 @@
}
void gic_show_pending_irqs(void);
+void gic_v3_dist_save(void);
+void gic_v3_dist_restore(void);
unsigned int get_gic_highpri_irq(void);
#endif
diff --git a/include/linux/of.h b/include/linux/of.h
index 299aeb1..3dcf853 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -106,6 +106,8 @@
node->fwnode.type = FWNODE_OF;
}
+#define of_node_kobj(n) (&(n)->kobj)
+
/* true when node is initialized */
static inline int of_node_is_initialized(struct device_node *node)
{
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 2fb9f03..78c2a9f 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -282,6 +282,10 @@
POWER_SUPPLY_PROP_ALLOW_HVDCP3,
POWER_SUPPLY_PROP_HVDCP_OPTI_ALLOWED,
POWER_SUPPLY_PROP_MAX_PULSE_ALLOWED,
+ POWER_SUPPLY_PROP_IGNORE_FALSE_NEGATIVE_ISENSE,
+ POWER_SUPPLY_PROP_BATTERY_INFO,
+ POWER_SUPPLY_PROP_BATTERY_INFO_ID,
+ POWER_SUPPLY_PROP_ENABLE_JEITA_DETECTION,
/* Local extensions of type int64_t */
POWER_SUPPLY_PROP_CHARGE_COUNTER_EXT,
/* Properties of type `const char *' */
diff --git a/include/soc/qcom/pm-legacy.h b/include/soc/qcom/pm-legacy.h
new file mode 100644
index 0000000..b95175d
--- /dev/null
+++ b/include/soc/qcom/pm-legacy.h
@@ -0,0 +1,219 @@
+/*
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2009-2016, 2018, The Linux Foundation. All rights reserved.
+ * Author: San Mehat <san@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_PM_H
+#define __ARCH_ARM_MACH_MSM_PM_H
+
+#include <linux/types.h>
+#include <linux/cpuidle.h>
+#include <asm/smp_plat.h>
+#include <asm/barrier.h>
+#include <dt-bindings/msm/pm.h>
+
+#if !defined(CONFIG_SMP)
+#define msm_secondary_startup NULL
+#elif defined(CONFIG_CPU_V7)
+#define msm_secondary_startup secondary_startup
+#else
+#define msm_secondary_startup secondary_holding_pen
+#endif
+
+enum msm_pm_sleep_mode {
+ MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT,
+ MSM_PM_SLEEP_MODE_RETENTION,
+ MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE,
+ MSM_PM_SLEEP_MODE_POWER_COLLAPSE,
+ MSM_PM_SLEEP_MODE_FASTPC,
+ MSM_PM_SLEEP_MODE_POWER_COLLAPSE_SUSPEND,
+ MSM_PM_SLEEP_MODE_NR,
+ MSM_PM_SLEEP_MODE_NOT_SELECTED,
+};
+
+enum msm_pm_l2_scm_flag {
+ MSM_SCM_L2_ON = 0,
+ MSM_SCM_L2_OFF = 1,
+ MSM_SCM_L2_GDHS = 3,
+ MSM_SCM_L3_PC_OFF = 4,
+};
+
+#define MSM_PM_MODE(cpu, mode_nr) ((cpu) *MSM_PM_SLEEP_MODE_NR + (mode_nr))
+
+struct msm_pm_time_params {
+ uint32_t latency_us;
+ uint32_t sleep_us;
+ uint32_t next_event_us;
+ uint32_t modified_time_us;
+};
+
+struct msm_pm_sleep_status_data {
+ void __iomem *base_addr;
+ uint32_t mask;
+};
+
+struct latency_level {
+ int affinity_level;
+ int reset_level;
+ const char *level_name;
+};
+
+/**
+ * lpm_cpu_pre_pc_cb(): API to get the L2 flag to pass to TZ
+ *
+ * @cpu: cpuid of the CPU going down.
+ *
+ * Returns the l2 flush flag enum that is passed down to TZ during power
+ * collaps
+ */
+enum msm_pm_l2_scm_flag lpm_cpu_pre_pc_cb(unsigned int cpu);
+
+/**
+ * msm_pm_sleep_mode_allow() - API to determine if sleep mode is allowed.
+ * @cpu: CPU on which to check for the sleep mode.
+ * @mode: Sleep Mode to check for.
+ * @idle: Idle or Suspend Sleep Mode.
+ *
+ * Helper function to determine if a Idle or Suspend
+ * Sleep mode is allowed for a specific CPU.
+ *
+ * Return: 1 for allowed; 0 if not allowed.
+ */
+int msm_pm_sleep_mode_allow(unsigned int cpu, unsigned int mode, bool idle);
+
+/**
+ * msm_pm_sleep_mode_supported() - API to determine if sleep mode is
+ * supported.
+ * @cpu: CPU on which to check for the sleep mode.
+ * @mode: Sleep Mode to check for.
+ * @idle: Idle or Suspend Sleep Mode.
+ *
+ * Helper function to determine if a Idle or Suspend
+ * Sleep mode is allowed and enabled for a specific CPU.
+ *
+ * Return: 1 for supported; 0 if not supported.
+ */
+int msm_pm_sleep_mode_supported(unsigned int cpu, unsigned int mode, bool idle);
+
+struct msm_pm_cpr_ops {
+ void (*cpr_suspend)(void);
+ void (*cpr_resume)(void);
+};
+
+void __init msm_pm_set_tz_retention_flag(unsigned int flag);
+void msm_pm_enable_retention(bool enable);
+bool msm_pm_retention_enabled(void);
+static inline bool msm_cpu_pm_enter_sleep(enum msm_pm_sleep_mode mode,
+ bool from_idle)
+{
+ return false;
+}
+static inline void msm_arch_idle(void)
+{
+ /* memory barrier */
+ mb();
+ wfi();
+}
+
+#ifdef CONFIG_MSM_PM_LEGACY
+
+void msm_pm_set_rpm_wakeup_irq(unsigned int irq);
+int msm_pm_wait_cpu_shutdown(unsigned int cpu);
+int __init msm_pm_sleep_status_init(void);
+void lpm_cpu_hotplug_enter(unsigned int cpu);
+s32 msm_cpuidle_get_deep_idle_latency(void);
+int msm_pm_collapse(unsigned long unused);
+
+/**
+ * lpm_get_latency() - API to get latency for a low power mode
+ * @latency_level: pointer to structure with below elements
+ * affinity_level: The level (CPU/L2/CCI etc.) for which the
+ * latency is required.
+ * LPM_AFF_LVL_CPU : CPU level
+ * LPM_AFF_LVL_L2 : L2 level
+ * LPM_AFF_LVL_CCI : CCI level
+ * reset_level: Can be passed "LPM_RESET_LVL_GDHS" for
+ * low power mode with control logic power collapse or
+ * "LPM_RESET_LVL_PC" for low power mode with control and
+ * memory logic power collapse or "LPM_RESET_LVL_RET" for
+ * retention mode.
+ * level_name: Pointer to the cluster name for which the latency
+ * is required or NULL if the minimum value out of all the
+ * clusters is to be returned. For CPU level, the name of the
+ * L2 cluster to be passed. For CCI it has no effect.
+ * @latency: address to get the latency value.
+ *
+ * latency value will be for the particular cluster or the minimum
+ * value out of all the clusters at the particular affinity_level
+ * and reset_level.
+ *
+ * Return: 0 for success; Error number for failure.
+ */
+int lpm_get_latency(struct latency_level *level, uint32_t *latency);
+
+#else
+static inline void msm_pm_set_rpm_wakeup_irq(unsigned int irq) {}
+static inline int msm_pm_wait_cpu_shutdown(unsigned int cpu) { return 0; }
+static inline int msm_pm_sleep_status_init(void) { return 0; };
+
+static inline void lpm_cpu_hotplug_enter(unsigned int cpu)
+{
+ msm_arch_idle();
+};
+
+static inline s32 msm_cpuidle_get_deep_idle_latency(void) { return 0; }
+#define msm_pm_collapse NULL
+
+static inline int lpm_get_latency(struct latency_level *level,
+ uint32_t *latency)
+{
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_HOTPLUG_CPU
+int msm_platform_secondary_init(unsigned int cpu);
+#else
+static inline int msm_platform_secondary_init(unsigned int cpu) { return 0; }
+#endif
+
+enum msm_pm_time_stats_id {
+ MSM_PM_STAT_REQUESTED_IDLE = 0,
+ MSM_PM_STAT_IDLE_SPIN,
+ MSM_PM_STAT_IDLE_WFI,
+ MSM_PM_STAT_RETENTION,
+ MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE,
+ MSM_PM_STAT_IDLE_FAILED_STANDALONE_POWER_COLLAPSE,
+ MSM_PM_STAT_IDLE_POWER_COLLAPSE,
+ MSM_PM_STAT_IDLE_FAILED_POWER_COLLAPSE,
+ MSM_PM_STAT_SUSPEND,
+ MSM_PM_STAT_FAILED_SUSPEND,
+ MSM_PM_STAT_NOT_IDLE,
+ MSM_PM_STAT_COUNT
+};
+
+#ifdef CONFIG_MSM_IDLE_STATS
+void msm_pm_add_stats(enum msm_pm_time_stats_id *enable_stats, int size);
+void msm_pm_add_stat(enum msm_pm_time_stats_id id, int64_t t);
+void msm_pm_l2_add_stat(uint32_t id, int64_t t);
+#else
+static inline void msm_pm_add_stats(enum msm_pm_time_stats_id *enable_stats,
+ int size) {}
+static inline void msm_pm_add_stat(enum msm_pm_time_stats_id id, int64_t t) {}
+static inline void msm_pm_l2_add_stat(uint32_t id, int64_t t) {}
+#endif
+
+void msm_pm_set_cpr_ops(struct msm_pm_cpr_ops *ops);
+extern dma_addr_t msm_pc_debug_counters_phys;
+#endif /* __ARCH_ARM_MACH_MSM_PM_H */
diff --git a/include/trace/events/trace_msm_low_power.h b/include/trace/events/trace_msm_low_power.h
index c25da0e..54c1272 100644
--- a/include/trace/events/trace_msm_low_power.h
+++ b/include/trace/events/trace_msm_low_power.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012, 2014-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -250,6 +250,25 @@
__entry->sample, __entry->tmr)
);
+TRACE_EVENT(pre_pc_cb,
+
+ TP_PROTO(int tzflag),
+
+ TP_ARGS(tzflag),
+
+ TP_STRUCT__entry(
+ __field(int, tzflag)
+ ),
+
+ TP_fast_assign(
+ __entry->tzflag = tzflag;
+ ),
+
+ TP_printk("tzflag:%d",
+ __entry->tzflag
+ )
+);
+
#endif
#define TRACE_INCLUDE_FILE trace_msm_low_power
#include <trace/define_trace.h>
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index 52f3c55..84d9a2e 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -348,26 +348,27 @@
struct avc_xperms_decision_node *xpd_node;
struct extended_perms_decision *xpd;
- xpd_node = kmem_cache_zalloc(avc_xperms_decision_cachep, GFP_NOWAIT);
+ xpd_node = kmem_cache_zalloc(avc_xperms_decision_cachep,
+ GFP_NOWAIT | __GFP_NOWARN);
if (!xpd_node)
return NULL;
xpd = &xpd_node->xpd;
if (which & XPERMS_ALLOWED) {
xpd->allowed = kmem_cache_zalloc(avc_xperms_data_cachep,
- GFP_NOWAIT);
+ GFP_NOWAIT | __GFP_NOWARN);
if (!xpd->allowed)
goto error;
}
if (which & XPERMS_AUDITALLOW) {
xpd->auditallow = kmem_cache_zalloc(avc_xperms_data_cachep,
- GFP_NOWAIT);
+ GFP_NOWAIT | __GFP_NOWARN);
if (!xpd->auditallow)
goto error;
}
if (which & XPERMS_DONTAUDIT) {
xpd->dontaudit = kmem_cache_zalloc(avc_xperms_data_cachep,
- GFP_NOWAIT);
+ GFP_NOWAIT | __GFP_NOWARN);
if (!xpd->dontaudit)
goto error;
}
@@ -395,7 +396,8 @@
{
struct avc_xperms_node *xp_node;
- xp_node = kmem_cache_zalloc(avc_xperms_cachep, GFP_NOWAIT);
+ xp_node = kmem_cache_zalloc(avc_xperms_cachep,
+ GFP_NOWAIT | __GFP_NOWARN);
if (!xp_node)
return xp_node;
INIT_LIST_HEAD(&xp_node->xpd_head);
@@ -548,7 +550,7 @@
{
struct avc_node *node;
- node = kmem_cache_zalloc(avc_node_cachep, GFP_NOWAIT);
+ node = kmem_cache_zalloc(avc_node_cachep, GFP_NOWAIT | __GFP_NOWARN);
if (!node)
goto out;
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
index 5143801..180261d 100644
--- a/sound/core/rawmidi.c
+++ b/sound/core/rawmidi.c
@@ -976,9 +976,9 @@
struct snd_rawmidi_runtime *runtime = substream->runtime;
unsigned long appl_ptr;
- spin_lock_irqsave(&runtime->lock, flags);
if (userbuf)
mutex_lock(&runtime->realloc_mutex);
+ spin_lock_irqsave(&runtime->lock, flags);
while (count > 0 && runtime->avail) {
count1 = runtime->buffer_size - runtime->appl_ptr;
if (count1 > count)